Rohit108 commited on
Commit
76aa4b0
·
verified ·
1 Parent(s): 8f2d6a1

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -152
app.py DELETED
@@ -1,152 +0,0 @@
1
- from typing import Annotated, Sequence, TypedDict
2
- import functools
3
- import operator
4
- from bertopic import BERTopic
5
- from langchain.agents import AgentExecutor, create_openai_tools_agent
6
- from langchain_core.messages import BaseMessage, HumanMessage
7
- from langchain_openai import ChatOpenAI
8
- from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
9
- from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
10
- from langgraph.graph import END, StateGraph
11
- from langchain_community.tools.tavily_search import TavilySearchResults
12
- from langchain_experimental.tools import PythonREPLTool
13
-
14
- # Initialize tools
15
- tavily_tool = TavilySearchResults(max_results=5)
16
- python_repl_tool = PythonREPLTool()
17
-
18
- # Load BERTopic model
19
- topic_model = BERTopic.load("topic_model")
20
-
21
- # Function to create an agent
22
- def create_agent(llm: ChatOpenAI, tools: list, system_prompt: str):
23
- prompt = ChatPromptTemplate.from_messages(
24
- [
25
- ("system", system_prompt),
26
- MessagesPlaceholder(variable_name="messages"),
27
- MessagesPlaceholder(variable_name="agent_scratchpad"),
28
- ]
29
- )
30
- agent = create_openai_tools_agent(llm, tools, prompt)
31
- executor = AgentExecutor(agent=agent, tools=tools)
32
- return executor
33
-
34
- # Function to define an agent node
35
- def agent_node(state, agent, name):
36
- result = agent.invoke(state)
37
- return {"messages": [HumanMessage(content=result["output"], name=name)]}
38
-
39
- # Define the Viewer agent using the BERTopic model
40
- def viewer_agent(state):
41
- review = state["messages"][-1].content
42
- index = topic_model.transform([review])[0][0]
43
- answer = topic_model.topic_labels_.get(index)
44
- return {"messages": [HumanMessage(content=answer, name="Viewer")]}
45
-
46
- # Define AgentState
47
- class AgentState(TypedDict):
48
- messages: Annotated[Sequence[BaseMessage], operator.add]
49
- next: str
50
-
51
- # Create LLM for the supervisor
52
- llm = ChatOpenAI(model="gpt-4-1106-preview")
53
-
54
- # Define the system prompt for the supervisor
55
- system_prompt = (
56
- "You are a supervisor tasked with managing a conversation between the following workers: Researcher, Coder, Viewer. "
57
- "Given the following user request, respond with the worker to act next. Each worker will perform a task and respond with their results and status. "
58
- "When finished, respond with FINISH. If the request seems like a product review or sentiment analysis, route it to the Viewer."
59
- )
60
-
61
- # Define options
62
- options = ["FINISH", "Researcher", "Coder", "Viewer"]
63
-
64
- # Define the function for routing
65
- function_def = {
66
- "name": "route",
67
- "description": "Select the next role.",
68
- "parameters": {
69
- "title": "routeSchema",
70
- "type": "object",
71
- "properties": {
72
- "next": {
73
- "title": "Next",
74
- "anyOf": [
75
- {"enum": options},
76
- ],
77
- }
78
- },
79
- "required": ["next"],
80
- },
81
- }
82
-
83
- # Define the prompt for the supervisor
84
- prompt = ChatPromptTemplate.from_messages(
85
- [
86
- ("system", system_prompt),
87
- MessagesPlaceholder(variable_name="messages"),
88
- (
89
- "system",
90
- "Given the conversation above, who should act next? Or should we FINISH? Select one of: {options}",
91
- ),
92
- ]
93
- ).partial(options=str(options), members="Researcher, Coder, Viewer")
94
-
95
- # Create the supervisor chain
96
- supervisor_chain = (
97
- prompt
98
- | llm.bind_functions(functions=[function_def], function_call="route")
99
- | JsonOutputFunctionsParser()
100
- )
101
-
102
- # Define agents
103
- research_agent = create_agent(llm, [tavily_tool], "You are a web researcher.")
104
- research_node = functools.partial(agent_node, agent=research_agent, name="Researcher")
105
-
106
- code_agent = create_agent(
107
- llm,
108
- [python_repl_tool],
109
- "You may generate safe python code to analyze data and generate charts using matplotlib.",
110
- )
111
- code_node = functools.partial(agent_node, agent=code_agent, name="Coder")
112
-
113
- viewer_node = functools.partial(viewer_agent)
114
-
115
- # Create the workflow
116
- workflow = StateGraph(AgentState)
117
- workflow.add_node("Researcher", research_node)
118
- workflow.add_node("Coder", code_node)
119
- workflow.add_node("Viewer", viewer_node)
120
- workflow.add_node("supervisor", supervisor_chain)
121
-
122
- # Add edges for each agent to report back to the supervisor
123
- members = ["Researcher", "Coder", "Viewer"]
124
- for member in members:
125
- workflow.add_edge(member, "supervisor")
126
-
127
- # Add conditional edges
128
- conditional_map = {k: k for k in members}
129
- conditional_map["FINISH"] = END
130
- workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map)
131
-
132
- # Set the entry point
133
- workflow.set_entry_point("supervisor")
134
-
135
- # Compile the workflow
136
- graph = workflow.compile()
137
-
138
- # Testing the workflow
139
- for s in graph.stream(
140
- {
141
- "messages": [
142
- HumanMessage(content="write a report of gopal who worked in 3 k technologies")
143
- ]
144
- }
145
- ):
146
- if "__end__" not in s:
147
- print(s)
148
- print("----")
149
-
150
-
151
-
152
-