Spaces:
Sleeping
Sleeping
File size: 5,054 Bytes
6d35820 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
from typing import Annotated, Sequence, TypedDict
import functools
import operator
from bertopic import BERTopic
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain_core.messages import BaseMessage, HumanMessage
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
from langgraph.graph import END, StateGraph
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_experimental.tools import PythonREPLTool
# Initialize tools
tavily_tool = TavilySearchResults(max_results=5)
python_repl_tool = PythonREPLTool()
# Load BERTopic model
topic_model = BERTopic.load("topic_model")
# Function to create an agent
def create_agent(llm: ChatOpenAI, tools: list, system_prompt: str):
prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
MessagesPlaceholder(variable_name="messages"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
agent = create_openai_tools_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools)
return executor
# Function to define an agent node
def agent_node(state, agent, name):
result = agent.invoke(state)
return {"messages": [HumanMessage(content=result["output"], name=name)]}
# Define the Viewer agent using the BERTopic model
def viewer_agent(state):
review = state["messages"][-1].content
index = topic_model.transform([review])[0][0]
answer = topic_model.topic_labels_.get(index)
return {"messages": [HumanMessage(content=answer, name="Viewer")]}
# Define AgentState
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
next: str
# Create LLM for the supervisor
llm = ChatOpenAI(model="gpt-4-1106-preview")
# Define the system prompt for the supervisor
system_prompt = (
"You are a supervisor tasked with managing a conversation between the following workers: Researcher, Coder, Viewer. "
"Given the following user request, respond with the worker to act next. Each worker will perform a task and respond with their results and status. "
"When finished, respond with FINISH. If the request seems like a product review or sentiment analysis, route it to the Viewer."
)
# Define options
options = ["FINISH", "Researcher", "Coder", "Viewer"]
# Define the function for routing
function_def = {
"name": "route",
"description": "Select the next role.",
"parameters": {
"title": "routeSchema",
"type": "object",
"properties": {
"next": {
"title": "Next",
"anyOf": [
{"enum": options},
],
}
},
"required": ["next"],
},
}
# Define the prompt for the supervisor
prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
MessagesPlaceholder(variable_name="messages"),
(
"system",
"Given the conversation above, who should act next? Or should we FINISH? Select one of: {options}",
),
]
).partial(options=str(options), members="Researcher, Coder, Viewer")
# Create the supervisor chain
supervisor_chain = (
prompt
| llm.bind_functions(functions=[function_def], function_call="route")
| JsonOutputFunctionsParser()
)
# Define agents
research_agent = create_agent(llm, [tavily_tool], "You are a web researcher.")
research_node = functools.partial(agent_node, agent=research_agent, name="Researcher")
code_agent = create_agent(
llm,
[python_repl_tool],
"You may generate safe python code to analyze data and generate charts using matplotlib.",
)
code_node = functools.partial(agent_node, agent=code_agent, name="Coder")
viewer_node = functools.partial(viewer_agent)
# Create the workflow
workflow = StateGraph(AgentState)
workflow.add_node("Researcher", research_node)
workflow.add_node("Coder", code_node)
workflow.add_node("Viewer", viewer_node)
workflow.add_node("supervisor", supervisor_chain)
# Add edges for each agent to report back to the supervisor
members = ["Researcher", "Coder", "Viewer"]
for member in members:
workflow.add_edge(member, "supervisor")
# Add conditional edges
conditional_map = {k: k for k in members}
conditional_map["FINISH"] = END
workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map)
# Set the entry point
workflow.set_entry_point("supervisor")
# Compile the workflow
graph = workflow.compile()
# Testing the workflow
for s in graph.stream(
{
"messages": [
HumanMessage(content="write a report of gopal who worked in 3 k technologies")
]
}
):
if "__end__" not in s:
print(s)
print("----")
|