wishwakankanamg commited on
Commit
ffa0300
·
1 Parent(s): 81027bf
Files changed (2) hide show
  1. agent.py +4 -16
  2. app.py +6 -75
agent.py CHANGED
@@ -216,23 +216,11 @@ def build_graph(provider: str = "groq"):
216
  if __name__ == "__main__":
217
  question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
218
  # Build the graph
219
- graph = build_graph(provider="huggingface") # This needs to actually use the provider
220
  # Run the graph
221
- initial_messages = [sys_msg, HumanMessage(content=question)] # PREPEND sys_msg HERE
222
-
223
- print("Invoking graph with initial messages:")
224
- for m in initial_messages:
225
  m.pretty_print()
226
- print("-" * 20)
227
-
228
- result_messages = graph.invoke({"messages": initial_messages})
229
-
230
- print("\nFinal Graph Output:")
231
- if result_messages and "messages" in result_messages:
232
- for m in result_messages["messages"]:
233
- m.pretty_print()
234
- else:
235
- print("Graph did not return expected messages structure.")
236
- print(f"Raw output: {result_messages}")
237
 
238
 
 
216
  if __name__ == "__main__":
217
  question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
218
  # Build the graph
219
+ graph = build_graph(provider="groq")
220
  # Run the graph
221
+ messages = [HumanMessage(content=question)]
222
+ messages = graph.invoke({"messages": messages})
223
+ for m in messages["messages"]:
 
224
  m.pretty_print()
 
 
 
 
 
 
 
 
 
 
 
225
 
226
 
app.py CHANGED
@@ -21,84 +21,15 @@ class BasicAgent:
21
  """A langgraph agent."""
22
  def __init__(self):
23
  print("BasicAgent initialized.")
24
- # Assumes build_graph() and sys_msg are available from your agent.py
25
- # You need to import them here.
26
- # from your_agent_file import build_graph, sys_msg as agent_system_message
27
- # self.graph = build_graph()
28
- # self.system_message = agent_system_message
29
-
30
- # Let's assume for now agent.py is structured to be imported like this:
31
- # Option 1: if agent.py defines them globally
32
- # from agent import build_graph, sys_msg
33
- # self.graph = build_graph(provider="huggingface") # Or your desired provider
34
- # self.sys_msg_for_graph = sys_msg
35
-
36
- # Option 2: If build_graph also returns the sys_msg or if sys_msg is part of the graph object
37
- # This depends on how you refactor agent.py for importability
38
-
39
- # For this example, I'll assume you import them directly:
40
- from agent import build_graph, sys_msg as agent_sys_msg # Make sure agent.py is in PYTHONPATH or same dir
41
-
42
- self.graph = build_graph(provider="huggingface") # Specify the provider
43
- self.agent_system_message = agent_sys_msg
44
-
45
 
46
  def __call__(self, question: str) -> str:
47
  print(f"Agent received question (first 50 chars): {question[:50]}...")
48
-
49
- # INCORRECT INVOCATION:
50
- # messages = [HumanMessage(content=question)] # <-- MISSING SYSTEM MESSAGE
51
- # messages = self.graph.invoke({"messages": messages})
52
-
53
- # CORRECTED INVOCATION:
54
- # You MUST include the system message that was loaded in agent.py
55
- initial_graph_messages = [self.agent_system_message, HumanMessage(content=question)]
56
-
57
- print("Invoking graph with (system_message + human_question):")
58
- # Optional: print the messages being sent for debugging
59
- # for m in initial_graph_messages:
60
- # m.pretty_print()
61
- # print("-" * 20)
62
-
63
- try:
64
- graph_output = self.graph.invoke({"messages": initial_graph_messages})
65
- except Exception as e:
66
- # This will catch the StopIteration if it propagates from the graph
67
- print(f"ERROR during graph.invoke: {e}")
68
- # Depending on how your outer loop handles errors,
69
- # you might want to return a specific error string or re-raise
70
- # For the Hugging Face course, it expects the AGENT ERROR string.
71
- # The run_and_submit_all function already handles this by catching exceptions from agent().
72
- raise # Re-raise the exception to be caught by run_and_submit_all
73
-
74
- # Parsing the answer
75
- if graph_output and "messages" in graph_output and graph_output["messages"]:
76
- final_ai_message = graph_output["messages"][-1] # Get the last message
77
-
78
- # Debug: print the final AI message object
79
- # print("Final AI Message Object from Graph:")
80
- # final_ai_message.pretty_print()
81
-
82
- if hasattr(final_ai_message, 'content'):
83
- raw_answer = str(final_ai_message.content)
84
-
85
- # Your specific parsing: "answer = messages['messages'][-1].content; return answer[14:]"
86
- # This assumes the answer ALWAYS starts with "FINAL ANSWER: " (14 characters)
87
- if raw_answer.upper().startswith("FINAL ANSWER: "):
88
- answer = raw_answer[14:].strip() # Remove "FINAL ANSWER: " and leading/trailing whitespace
89
- else:
90
- # The LLM didn't follow the "FINAL ANSWER: " format
91
- print(f"Warning: LLM output did not start with 'FINAL ANSWER: '. Raw output: '{raw_answer}'")
92
- answer = raw_answer # Return the raw answer if format is not met, or handle as error
93
- else:
94
- answer = "Agent Error: Final message from graph has no content."
95
- print(f"Final message object was: {final_ai_message}")
96
- else:
97
- answer = "Agent Error: Graph did not return expected messages structure."
98
- print(f"Raw graph output: {graph_output}")
99
-
100
- print(f"Agent returning answer: {answer}")
101
- return answer
102
 
103
 
104
  def run_and_submit_all( profile: gr.OAuthProfile | None):
 
21
  """A langgraph agent."""
22
  def __init__(self):
23
  print("BasicAgent initialized.")
24
+ self.graph = build_graph()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  def __call__(self, question: str) -> str:
27
  print(f"Agent received question (first 50 chars): {question[:50]}...")
28
+ # Wrap the question in a HumanMessage from langchain_core
29
+ messages = [HumanMessage(content=question)]
30
+ messages = self.graph.invoke({"messages": messages})
31
+ answer = messages['messages'][-1].content
32
+ return answer[14:]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
 
35
  def run_and_submit_all( profile: gr.OAuthProfile | None):