wishwakankanamg commited on
Commit
ef17b20
·
1 Parent(s): 2a66609

toool called debug print

Browse files
Files changed (4) hide show
  1. __pycache__/graph.cpython-310.pyc +0 -0
  2. app.log +69 -0
  3. app.py +2 -4
  4. graph.py +102 -47
__pycache__/graph.cpython-310.pyc CHANGED
Binary files a/__pycache__/graph.cpython-310.pyc and b/__pycache__/graph.cpython-310.pyc differ
 
app.log CHANGED
@@ -49913,3 +49913,72 @@ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/er
49913
  2025-06-06 14:43:36:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49914
  2025-06-06 15:19:39:__main__:INFO: Starting the interface
49915
  2025-06-06 15:20:37:__main__:INFO: Starting the interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49913
  2025-06-06 14:43:36:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49914
  2025-06-06 15:19:39:__main__:INFO: Starting the interface
49915
  2025-06-06 15:20:37:__main__:INFO: Starting the interface
49916
+ 2025-06-06 15:32:33:__main__:INFO: Starting the interface
49917
+ 2025-06-06 15:34:41:__main__:INFO: Starting the interface
49918
+ 2025-06-06 15:35:07:__main__:INFO: Starting the interface
49919
+ 2025-06-06 15:35:26:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49920
+ 2025-06-06 15:35:39:__main__:INFO: Prompt: You are a helpful assistant.
49921
+ 2025-06-06 15:35:41:__main__:ERROR: Exception occurred
49922
+ Traceback (most recent call last):
49923
+ File "/home/user/app/app.py", line 85, in chat_fn
49924
+ async for stream_mode, chunk in graph.astream(
49925
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
49926
+ async for _ in runner.atick(
49927
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
49928
+ _panic_or_proceed(
49929
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
49930
+ raise exc
49931
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
49932
+ return await task.proc.ainvoke(task.input, config)
49933
+ File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
49934
+ input = await step.ainvoke(input, config, **kwargs)
49935
+ File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
49936
+ ret = await self.afunc(*args, **kwargs)
49937
+ File "/home/user/app/graph.py", line 248, in brainstorming_node
49938
+ chain = brainstorming_system_prompt_template | llm_with_tools
49939
+ NameError: name 'brainstorming_system_prompt_template' is not defined. Did you mean: 'brainstorming_system_prompt'?
49940
+ 2025-06-06 15:35:59:__main__:INFO: Starting the interface
49941
+ 2025-06-06 15:36:04:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49942
+ 2025-06-06 15:36:10:__main__:INFO: Prompt: You are a helpful assistant.
49943
+ 2025-06-06 15:36:15:__main__:ERROR: Exception occurred
49944
+ Traceback (most recent call last):
49945
+ File "/home/user/app/app.py", line 85, in chat_fn
49946
+ async for stream_mode, chunk in graph.astream(
49947
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
49948
+ async for _ in runner.atick(
49949
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
49950
+ _panic_or_proceed(
49951
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
49952
+ raise exc
49953
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
49954
+ return await task.proc.ainvoke(task.input, config)
49955
+ File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
49956
+ input = await step.ainvoke(input, config, **kwargs)
49957
+ File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
49958
+ ret = await self.afunc(*args, **kwargs)
49959
+ File "/home/user/app/graph.py", line 248, in brainstorming_node
49960
+ chain = brainstorming_system_prompt_template | llm_with_tools
49961
+ NameError: name 'brainstorming_system_prompt_template' is not defined. Did you mean: 'brainstorming_system_prompt'?
49962
+ 2025-06-06 15:36:35:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49963
+ 2025-06-06 15:38:34:__main__:INFO: Starting the interface
49964
+ 2025-06-06 15:38:40:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49965
+ 2025-06-06 15:38:45:__main__:INFO: Prompt: You are a helpful assistant.
49966
+ 2025-06-06 15:39:40:__main__:ERROR: Exception occurred
49967
+ Traceback (most recent call last):
49968
+ File "/home/user/app/app.py", line 85, in chat_fn
49969
+ async for stream_mode, chunk in graph.astream(
49970
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
49971
+ raise GraphRecursionError(msg)
49972
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
49973
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
49974
+ 2025-06-06 15:55:25:__main__:INFO: Starting the interface
49975
+ 2025-06-06 15:55:30:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49976
+ 2025-06-06 15:55:53:__main__:INFO: Prompt: You are a helpful assistant.
49977
+ 2025-06-06 15:58:00:__main__:ERROR: Exception occurred
49978
+ Traceback (most recent call last):
49979
+ File "/home/user/app/app.py", line 85, in chat_fn
49980
+ async for stream_mode, chunk in graph.astream(
49981
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
49982
+ raise GraphRecursionError(msg)
49983
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
49984
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
app.py CHANGED
@@ -691,10 +691,8 @@ if __name__ == "__main__":
691
  # ],
692
  # # queue=False # Consider if queueing is needed or affects load order for this
693
  # )
694
- @demo.load(inputs=[browser_state], outputs=[username, password])
695
- def load_browser_state(data):
696
- return data[0], data[1]
697
-
698
  @app.load( # Or demo.load
699
  inputs=[
700
  is_new_user_for_greeting,
 
691
  # ],
692
  # # queue=False # Consider if queueing is needed or affects load order for this
693
  # )
694
+
695
+
 
 
696
  @app.load( # Or demo.load
697
  inputs=[
698
  is_new_user_for_greeting,
graph.py CHANGED
@@ -104,6 +104,9 @@ class GraphProcessingState(BaseModel):
104
  search_enabled: bool = Field(default=True, description="Whether to enable search tools")
105
  next_stage: str = Field(default="", description="The next stage to execute, decided by the guidance node.")
106
 
 
 
 
107
  # Completion flags for each stage
108
  idea_complete: bool = Field(default=False)
109
  brainstorming_complete: bool = Field(default=False)
@@ -213,66 +216,106 @@ async def guidance_node(state: GraphProcessingState, config=None):
213
  }
214
 
215
  async def brainstorming_node(state: GraphProcessingState, config=None):
216
- print("\n--- Brainstorming Node ---")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
  if not model:
218
  return {"messages": [AIMessage(content="Model not available for brainstorming.")]}
219
 
220
- # Define the system prompt for brainstorming
221
- # This prompt guides the LLM on its role, constraints, and how to signal completion or use tools.
222
  brainstorming_system_prompt = ChatPromptTemplate.from_messages(
223
  [
224
  ("system",
225
  "You are a brainstorming assistant for DIY projects. Your goal is to help the user finalize a single, specific DIY project idea. "
226
  "The project idea MUST satisfy these critical criteria:\n"
227
  "1. Buildable by an average person with basic DIY skills.\n"
228
- "2. Uses only materials and basic tools commonly available in general hardware stores, craft stores, or supermarkets worldwide. (e.g., wood, screws, glue, paint, fabric, cardboard, basic hand tools like screwdrivers, hammers, saws, drills. AVOID specialized electronic components, 3D printing, specific brand items not universally available, or complex machinery.)\n"
 
 
229
  "3. The final product should be a tangible item.\n\n"
230
  "Your interaction flow:\n"
231
  "- Engage with the user to understand their interests or initial thoughts.\n"
232
  "- Propose ideas or refine user's suggestions to meet the criteria.\n"
233
- "- If an idea proposed by you or the user clearly meets all criteria and you believe it's a good final choice, respond ONLY with the exact phrase: 'IDEA FINALIZED: [Name of the Idea]'. Example: 'IDEA FINALIZED: Simple Wooden Spice Rack'. Do not add any other text before or after this phrase if you use it.\n"
234
- "- If you need more information from the user to refine an idea, or if their current suggestion doesn't meet the criteria, ask clarifying questions. Guide them towards simpler, more accessible options.\n"
235
- "- If you are stuck, the user's request is too vague for you to make progress towards a suitable idea, or they propose something clearly outside the 'universally available materials' constraint, and you need their direct input to clarify fundamental aspects, you should use the 'human_assistance' tool. Frame your request for the human clearly. For example: 'To ensure the idea is simple enough, could you tell me what kind of tools you are comfortable using?' or 'The idea of a custom electronic gadget is interesting, but finding parts globally is hard. Could we think of a non-electronic version, or what's the core function you want from it that we could achieve with simpler materials?'"
 
 
 
 
 
 
 
236
  ),
237
  MessagesPlaceholder(variable_name="messages"),
238
  ]
239
  )
240
 
241
- # Bind tools that this node can use
242
- # For brainstorming, human_assistance is key. Search might also be useful.
243
  node_tools = [human_assistance]
244
- if state.search_enabled and tavily_search_tool: # Check if tavily_search_tool was initialized
245
  node_tools.append(tavily_search_tool)
246
 
247
  llm_with_tools = model.bind_tools(node_tools)
248
- chain = brainstorming_system_prompt_template | llm_with_tools
249
-
250
- # Get the current messages from the state
251
- current_messages = state.messages
252
- # Log current messages for debugging
253
- # print("Messages sent to brainstorming LLM:")
254
- # for msg in current_messages:
255
- # print(f" {msg.type}: {msg.content}")
256
 
257
- response_message = await chain.ainvoke({"messages": current_messages}, config=config)
258
- # print(f"Brainstorming LLM raw response: {response_message}")
259
 
 
260
 
261
- updates = {"messages": [response_message]} # Add the AI's response to the history
262
-
263
- # Check for the finalization signal in the AI's response content
264
  if isinstance(response_message, AIMessage) and response_message.content:
265
- if response_message.content.startswith("IDEA FINALIZED:"):
 
 
266
  updates["brainstorming_complete"] = True
267
- # Optionally, extract the idea name and store it if needed elsewhere in the state
268
- # idea_name = response_message.content.replace("IDEA FINALIZED:", "").strip()
269
- # updates["finalized_idea_name"] = idea_name # If you add this to GraphProcessingState
270
- print(f"✅ Brainstorming complete! Idea: {response_message.content}")
271
- elif response_message.tool_calls:
 
 
272
  print(f"🛠️ Brainstorming node initiated tool call(s): {response_message.tool_calls}")
 
273
  else:
274
- print(f"💬 Brainstorming node continues discussion: {response_message.content}")
 
 
275
 
 
 
 
 
 
 
276
 
277
  return updates
278
 
@@ -310,6 +353,32 @@ async def planning_node(state: GraphProcessingState, config=None):
310
  "messages": response
311
  }
312
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313
 
314
  def guidance_routing(state: GraphProcessingState) -> str:
315
 
@@ -358,29 +427,15 @@ def define_workflow() -> CompiledStateGraph:
358
  workflow.add_node("guidance_node", guidance_node)
359
  workflow.add_node("brainstorming_node", brainstorming_node)
360
 
361
-
362
- # workflow.add_node("chatbot", chatbot)
363
-
364
  # Edges
365
  workflow.add_edge("tools", "guidance_node")
366
- # workflow.add_edge("planning_node", "assistant_node")
367
  workflow.add_edge("planning_node", "guidance_node")
368
  workflow.add_edge("brainstorming_node", "guidance_node")
 
 
369
 
370
-
371
-
372
-
373
- # workflow.add_conditional_edges(
374
- # "chatbot",
375
- # tools_condition,
376
- # )
377
-
378
- # workflow.add_edge("tools", "chatbot")
379
- # workflow.add_edge(START, "chatbot")
380
-
381
- # Conditional routing
382
- # workflow.add_conditional_edges("assistant_node", assistant_routing)
383
  workflow.add_conditional_edges("guidance_node", guidance_routing)
 
384
 
385
  # # Set end nodes
386
  workflow.set_entry_point("guidance_node")
 
104
  search_enabled: bool = Field(default=True, description="Whether to enable search tools")
105
  next_stage: str = Field(default="", description="The next stage to execute, decided by the guidance node.")
106
 
107
+ tool_call_required: bool = Field(default=False, description="Whether a tool should be called from brainstorming.")
108
+ loop_brainstorming: bool = Field(default=False, description="Whether to loop back to brainstorming for further iteration.")
109
+
110
  # Completion flags for each stage
111
  idea_complete: bool = Field(default=False)
112
  brainstorming_complete: bool = Field(default=False)
 
216
  }
217
 
218
  async def brainstorming_node(state: GraphProcessingState, config=None):
219
+ print("\n--- Brainstorming Node (Debug) ---")
220
+
221
+ print(f"Prompt: {state.prompt}")
222
+ for message in state.messages:
223
+ if isinstance(message, HumanMessage):
224
+ print(f"Human: {message.content}")
225
+ elif isinstance(message, AIMessage):
226
+ if message.content:
227
+ if isinstance(message.content, list):
228
+ texts = [item.get('text', '') for item in message.content if isinstance(item, dict) and 'text' in item]
229
+ if texts:
230
+ print(f"AI: {' '.join(texts)}")
231
+ elif isinstance(message.content, str):
232
+ print(f"AI: {message.content}")
233
+ elif isinstance(message, SystemMessage):
234
+ print(f"System: {message.content}")
235
+ elif isinstance(message, ToolMessage):
236
+ print(f"Tool: {message.content}")
237
+
238
+ print(f"Tools Enabled: {state.tools_enabled}")
239
+ print(f"Search Enabled: {state.search_enabled}")
240
+ print(f"Next Stage: {state.next_stage}")
241
+
242
+ print(f"Idea Complete: {state.idea_complete}")
243
+ print(f"Brainstorming Complete: {state.brainstorming_complete}")
244
+ print(f"Planning Complete: {state.planning_complete}")
245
+ print(f"Drawing Complete: {state.drawing_complete}")
246
+ print(f"Product Searching Complete: {state.product_searching_complete}")
247
+ print(f"Purchasing Complete: {state.purchasing_complete}")
248
+
249
  if not model:
250
  return {"messages": [AIMessage(content="Model not available for brainstorming.")]}
251
 
252
+ # Define the system prompt guiding the brainstorming assistant
 
253
  brainstorming_system_prompt = ChatPromptTemplate.from_messages(
254
  [
255
  ("system",
256
  "You are a brainstorming assistant for DIY projects. Your goal is to help the user finalize a single, specific DIY project idea. "
257
  "The project idea MUST satisfy these critical criteria:\n"
258
  "1. Buildable by an average person with basic DIY skills.\n"
259
+ "2. Uses only materials and basic tools commonly available in general hardware stores, craft stores, or supermarkets worldwide. "
260
+ "(e.g., wood, screws, glue, paint, fabric, cardboard, basic hand tools like screwdrivers, hammers, saws, drills. "
261
+ "AVOID specialized electronic components, 3D printing, specific brand items not universally available, or complex machinery.)\n"
262
  "3. The final product should be a tangible item.\n\n"
263
  "Your interaction flow:\n"
264
  "- Engage with the user to understand their interests or initial thoughts.\n"
265
  "- Propose ideas or refine user's suggestions to meet the criteria.\n"
266
+ "- If an idea proposed by you or the user clearly meets all criteria and you believe it's a good final choice, respond ONLY with the exact phrase: "
267
+ "'IDEA FINALIZED: [Name of the Idea]'. Example: 'IDEA FINALIZED: Simple Wooden Spice Rack'. "
268
+ "Do not add any other text before or after this phrase if you use it.\n"
269
+ "- If you need more information from the user to refine an idea, or if their current suggestion doesn't meet the criteria, ask clarifying questions. "
270
+ "Guide them towards simpler, more accessible options.\n"
271
+ "- If you are stuck, the user's request is too vague for you to make progress towards a suitable idea, or they propose something clearly outside the "
272
+ "'universally available materials' constraint, and you need their direct input to clarify fundamental aspects, you should use the 'human_assistance' tool. "
273
+ "Frame your request for the human clearly. For example: 'To ensure the idea is simple enough, could you tell me what kind of tools you are comfortable using?' "
274
+ "or 'The idea of a custom electronic gadget is interesting, but finding parts globally is hard. Could we think of a non-electronic version, or what's the core function "
275
+ "you want from it that we could achieve with simpler materials?'"
276
  ),
277
  MessagesPlaceholder(variable_name="messages"),
278
  ]
279
  )
280
 
281
+ # Tools allowed for brainstorming
 
282
  node_tools = [human_assistance]
283
+ if state.search_enabled and tavily_search_tool: # only add search tool if enabled and initialized
284
  node_tools.append(tavily_search_tool)
285
 
286
  llm_with_tools = model.bind_tools(node_tools)
287
+ chain = brainstorming_system_prompt | llm_with_tools
 
 
 
 
 
 
 
288
 
289
+ # Pass current messages from the state to the chain
290
+ response_message = await chain.ainvoke({"messages": state.messages}, config=config)
291
 
292
+ updates = {"messages": [response_message]}
293
 
 
 
 
294
  if isinstance(response_message, AIMessage) and response_message.content:
295
+ content = response_message.content.strip()
296
+
297
+ if content.startswith("IDEA FINALIZED:"):
298
  updates["brainstorming_complete"] = True
299
+ updates["tool_call_required"] = False
300
+ updates["loop_brainstorming"] = False
301
+ print(f" Brainstorming complete! Idea: {content}")
302
+
303
+ elif getattr(response_message, "tool_calls", None):
304
+ updates["tool_call_required"] = True
305
+ updates["loop_brainstorming"] = False
306
  print(f"🛠️ Brainstorming node initiated tool call(s): {response_message.tool_calls}")
307
+
308
  else:
309
+ updates["tool_call_required"] = False
310
+ updates["loop_brainstorming"] = True
311
+ print(f"💬 Brainstorming node continues discussion: {content}")
312
 
313
+ else:
314
+ # If no proper response, keep looping brainstorming
315
+ updates["tool_call_required"] = False
316
+ updates["loop_brainstorming"] = True
317
+
318
+ print("\n--- End Brainstorming Node Debug ---")
319
 
320
  return updates
321
 
 
353
  "messages": response
354
  }
355
 
356
+ def brainstorming_routing(state: GraphProcessingState) -> str:
357
+ print("\n--- brainstorming_routing Edge (Debug via print) ---") # Added a newline for clarity
358
+ print(f"Prompt: {state.prompt}")
359
+ # print(f"Message: {state.messages}")
360
+ print(f"Tools Enabled: {state.tools_enabled}")
361
+ print(f"Search Enabled: {state.search_enabled}")
362
+ print(f"Next Stage: {state.next_stage}")
363
+
364
+
365
+ # Log boolean completion flags
366
+ print(f"Idea Complete: {state.idea_complete}")
367
+ print(f"Brainstorming Complete: {state.brainstorming_complete}")
368
+ print(f"Planning Complete: {state.planning_complete}")
369
+ print(f"Drawing Complete: {state.drawing_complete}")
370
+ print(f"Product Searching Complete: {state.product_searching_complete}")
371
+ print(f"Purchasing Complete: {state.purchasing_complete}")
372
+ print("--- End Guidance Node Debug ---") # Added for clarity
373
+ if state.tool_call_required:
374
+ print('calling tools for brainstorming')
375
+ return "tools"
376
+ elif state.loop_brainstorming:
377
+ print('returning back to brainstorming at the route')
378
+ return "brainstorming_node"
379
+ else:
380
+ print('all good in brainstorming route going back to guidance')
381
+ return "guidance_node"
382
 
383
  def guidance_routing(state: GraphProcessingState) -> str:
384
 
 
427
  workflow.add_node("guidance_node", guidance_node)
428
  workflow.add_node("brainstorming_node", brainstorming_node)
429
 
 
 
 
430
  # Edges
431
  workflow.add_edge("tools", "guidance_node")
 
432
  workflow.add_edge("planning_node", "guidance_node")
433
  workflow.add_edge("brainstorming_node", "guidance_node")
434
+ workflow.add_edge("brainstorming_node", "tools")
435
+ workflow.add_edge("tools", "brainstorming_node")
436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437
  workflow.add_conditional_edges("guidance_node", guidance_routing)
438
+ workflow.add_conditional_edges("brainstorming_node", brainstorming_routing)
439
 
440
  # # Set end nodes
441
  workflow.set_entry_point("guidance_node")