wishwakankanamg commited on
Commit
e11925e
·
1 Parent(s): f21c09d

first ateempt to finalize node setup

Browse files
Files changed (3) hide show
  1. __pycache__/graph.cpython-310.pyc +0 -0
  2. app.log +29 -0
  3. graph.py +197 -138
__pycache__/graph.cpython-310.pyc CHANGED
Binary files a/__pycache__/graph.cpython-310.pyc and b/__pycache__/graph.cpython-310.pyc differ
 
app.log CHANGED
@@ -49882,3 +49882,32 @@ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/er
49882
  2025-06-06 14:35:30:__main__:INFO: Prompt: You are a helpful assistant.
49883
  2025-06-06 14:35:39:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49884
  2025-06-06 14:36:20:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49882
  2025-06-06 14:35:30:__main__:INFO: Prompt: You are a helpful assistant.
49883
  2025-06-06 14:35:39:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49884
  2025-06-06 14:36:20:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49885
+ 2025-06-06 14:36:35:__main__:INFO: Starting the interface
49886
+ 2025-06-06 14:37:12:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49887
+ 2025-06-06 14:37:17:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49888
+ 2025-06-06 14:37:31:__main__:INFO: Prompt: You are a helpful assistant.
49889
+ 2025-06-06 14:38:10:__main__:ERROR: Exception occurred
49890
+ Traceback (most recent call last):
49891
+ File "/home/user/app/app.py", line 85, in chat_fn
49892
+ async for stream_mode, chunk in graph.astream(
49893
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
49894
+ raise GraphRecursionError(msg)
49895
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
49896
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
49897
+ 2025-06-06 14:38:58:__main__:INFO: Prompt: You are a helpful assistant.
49898
+ 2025-06-06 14:39:30:__main__:INFO: Starting the interface
49899
+ 2025-06-06 14:39:51:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49900
+ 2025-06-06 14:39:53:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49901
+ 2025-06-06 14:40:01:__main__:INFO: Prompt: You are a helpful assistant.
49902
+ 2025-06-06 14:40:41:__main__:ERROR: Exception occurred
49903
+ Traceback (most recent call last):
49904
+ File "/home/user/app/app.py", line 85, in chat_fn
49905
+ async for stream_mode, chunk in graph.astream(
49906
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
49907
+ raise GraphRecursionError(msg)
49908
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
49909
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
49910
+ 2025-06-06 14:42:57:__main__:INFO: Starting the interface
49911
+ 2025-06-06 14:43:07:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49912
+ 2025-06-06 14:43:08:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
49913
+ 2025-06-06 14:43:36:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
graph.py CHANGED
@@ -113,108 +113,7 @@ class GraphProcessingState(BaseModel):
113
  purchasing_complete: bool = Field(default=False)
114
 
115
 
116
- async def planning_node(state: GraphProcessingState, config=None):
117
- # Define the system prompt for planning
118
- planning_prompt = "Based on the user's idea, create a detailed step-by-step plan to build the DIY product."
119
-
120
- # Combine the planning prompt with any existing prompts
121
- if state.prompt:
122
- final_prompt = "\n".join([planning_prompt, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
123
- else:
124
- final_prompt = "\n".join([planning_prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
125
-
126
- # Create the prompt template
127
- prompt = ChatPromptTemplate.from_messages(
128
- [
129
- ("system", final_prompt),
130
- MessagesPlaceholder(variable_name="messages"),
131
- ]
132
- )
133
 
134
- # Bind tools if necessary
135
- assistant_tools = []
136
- if state.tools_enabled.get("download_website_text", True):
137
- assistant_tools.append(download_website_text)
138
- if search_enabled and state.tools_enabled.get("tavily_search_results_json", True):
139
- assistant_tools.append(tavily_search_tool)
140
- assistant_model = model.bind_tools(assistant_tools)
141
-
142
- # Create the chain and invoke it
143
- chain = prompt | assistant_model
144
- response = await chain.ainvoke({"messages": state.messages}, config=config)
145
-
146
- return {
147
- "messages": response
148
- }
149
-
150
- async def assistant_node(state: GraphProcessingState, config=None):
151
- print("\n--- Assistance Node (Debug via print) ---") # Added a newline for clarity
152
-
153
-
154
- print(f"Prompt: {state.prompt}")
155
-
156
- print(f"Tools Enabled: {state.tools_enabled}")
157
- print(f"Search Enabled: {state.search_enabled}")
158
- print(f"Next Stage: {state.next_stage}")
159
-
160
-
161
- # Log boolean completion flags
162
- print(f"Idea Complete: {state.idea_complete}")
163
- print(f"Brainstorming Complete: {state.brainstorming_complete}")
164
- print(f"Planning Complete: {state.planning_complete}")
165
- print(f"Drawing Complete: {state.drawing_complete}")
166
- print(f"Product Searching Complete: {state.product_searching_complete}")
167
- print(f"Purchasing Complete: {state.purchasing_complete}")
168
- print("--- End Guidance Node Debug ---") # Added for clarity
169
- print(f"\nMessage: {state.messages}")
170
- assistant_tools = []
171
- if state.tools_enabled.get("download_website_text", True):
172
- assistant_tools.append(download_website_text)
173
- if search_enabled and state.tools_enabled.get("tavily_search_results_json", True):
174
- assistant_tools.append(tavily_search_tool)
175
- assistant_model = model.bind_tools(assistant_tools)
176
- if state.prompt:
177
- final_prompt = "\n".join([state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
178
- else:
179
- final_prompt = ASSISTANT_SYSTEM_PROMPT_BASE
180
-
181
- prompt = ChatPromptTemplate.from_messages(
182
- [
183
- ("system", final_prompt),
184
- MessagesPlaceholder(variable_name="messages"),
185
- ]
186
- )
187
- chain = prompt | assistant_model
188
- response = await chain.ainvoke({"messages": state.messages}, config=config)
189
-
190
- for msg in response:
191
- if isinstance(msg, HumanMessage):
192
- print("Human:", msg.content)
193
- elif isinstance(msg, AIMessage):
194
- if isinstance(msg.content, list):
195
- ai_texts = [part.get("text", "") for part in msg.content if isinstance(part, dict)]
196
- print("AI:", " ".join(ai_texts))
197
- else:
198
- print("AI:", msg.content)
199
-
200
- idea_complete = evaluate_idea_completion(response)
201
-
202
- return {
203
- "messages": response,
204
- "idea_complete": idea_complete
205
- }
206
-
207
- # message = llm_with_tools.invoke(state["messages"])
208
- # Because we will be interrupting during tool execution,
209
- # we disable parallel tool calling to avoid repeating any
210
- # tool invocations when we resume.
211
- assert len(response.tool_calls) <= 1
212
- idea_complete = evaluate_idea_completion(response)
213
-
214
- return {
215
- "messages": response,
216
- "idea_complete": idea_complete
217
- }
218
 
219
  async def guidance_node(state: GraphProcessingState, config=None):
220
  # Use logger.debug() for verbose debugging information
@@ -313,36 +212,108 @@ async def guidance_node(state: GraphProcessingState, config=None):
313
  "next_stage": incomplete[0] if incomplete else None
314
  }
315
 
316
- # def assistant_cond_edge(state: GraphProcessingState):
317
- # last_message = state.messages[-1]
318
- # if hasattr(last_message, "tool_calls") and last_message.tool_calls:
319
- # logger.info(f"Tool call detected: {last_message.tool_calls}")
320
- # return "tools"
321
- # return END
322
- def assistant_routing(state: GraphProcessingState) -> str:
323
- last_message = state.messages[-1]
324
- if hasattr(last_message, "tool_calls") and last_message.tool_calls:
325
- logger.info("Tool call detected. Routing to 'tools' node.")
326
- return "tools"
327
- elif state.idea_complete:
328
- logger.info("Idea is complete. Routing to 'planning_node'.")
329
- return "planning_node"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
  else:
331
- logger.info("Idea is incomplete. Routing back to 'assistant_node'.")
332
- return "assistant_node"
333
 
334
- def guidance_routing(state: GraphProcessingState) -> str:
335
- # Use logger.debug() for verbose debugging information
336
- # This allows you to control its visibility via logging configuration
337
- print("\n--- Guidance Routing Edge (Debug via print) ---") # Added a newline for clarity
 
 
 
338
 
339
- # Log the entire state as a dictionary (Pydantic's .model_dump())
340
- # This is very comprehensive. For large states, consider logging parts.
341
- # logger.debug(f"Full State: {state.model_dump(mode='json')}") # mode='json' makes it JSON-serializable
342
- # Or for a more Pythonic dict:
343
- # print(f"Full State Dict: {state.model_dump()}")
 
 
344
 
 
 
 
345
 
 
 
 
 
 
 
 
 
346
  print(f"Prompt: {state.prompt}")
347
  # print(f"Message: {state.messages}")
348
  print(f"Tools Enabled: {state.tools_enabled}")
@@ -358,19 +329,21 @@ def guidance_routing(state: GraphProcessingState) -> str:
358
  print(f"Product Searching Complete: {state.product_searching_complete}")
359
  print(f"Purchasing Complete: {state.purchasing_complete}")
360
  print("--- End Guidance Node Debug ---") # Added for clarity
 
361
  next_stage = state.next_stage
362
  if next_stage == "brainstorming":
363
- return "assistant_node"
 
364
  elif next_stage == "planning":
365
- return "planning_node"
366
- elif next_stage == "drawing":
367
- return "tools"
368
- elif next_stage == "product_searching":
369
- return "assistant_node"
370
- elif next_stage == "purchasing":
371
- return "assistant_node"
372
 
373
- return END
374
 
375
 
376
 
@@ -380,10 +353,11 @@ def define_workflow() -> CompiledStateGraph:
380
  workflow = StateGraph(GraphProcessingState)
381
 
382
  # Add nodes
383
- workflow.add_node("assistant_node", assistant_node)
384
  workflow.add_node("tools", ToolNode(tools))
385
  workflow.add_node("planning_node", planning_node)
386
  workflow.add_node("guidance_node", guidance_node)
 
 
387
 
388
  # workflow.add_node("chatbot", chatbot)
389
 
@@ -391,7 +365,8 @@ def define_workflow() -> CompiledStateGraph:
391
  workflow.add_edge("tools", "guidance_node")
392
  # workflow.add_edge("planning_node", "assistant_node")
393
  workflow.add_edge("planning_node", "guidance_node")
394
- workflow.add_edge("assistant_node", "guidance_node")
 
395
 
396
 
397
 
@@ -423,4 +398,88 @@ def define_workflow() -> CompiledStateGraph:
423
 
424
  return compiled_graph
425
 
426
- graph = define_workflow()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  purchasing_complete: bool = Field(default=False)
114
 
115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
  async def guidance_node(state: GraphProcessingState, config=None):
119
  # Use logger.debug() for verbose debugging information
 
212
  "next_stage": incomplete[0] if incomplete else None
213
  }
214
 
215
+ async def brainstorming_node(state: GraphProcessingState, config=None):
216
+ print("\n--- Brainstorming Node ---")
217
+ if not model:
218
+ return {"messages": [AIMessage(content="Model not available for brainstorming.")]}
219
+
220
+ # Define the system prompt for brainstorming
221
+ # This prompt guides the LLM on its role, constraints, and how to signal completion or use tools.
222
+ brainstorming_system_prompt = ChatPromptTemplate.from_messages(
223
+ [
224
+ ("system",
225
+ "You are a brainstorming assistant for DIY projects. Your goal is to help the user finalize a single, specific DIY project idea. "
226
+ "The project idea MUST satisfy these critical criteria:\n"
227
+ "1. Buildable by an average person with basic DIY skills.\n"
228
+ "2. Uses only materials and basic tools commonly available in general hardware stores, craft stores, or supermarkets worldwide. (e.g., wood, screws, glue, paint, fabric, cardboard, basic hand tools like screwdrivers, hammers, saws, drills. AVOID specialized electronic components, 3D printing, specific brand items not universally available, or complex machinery.)\n"
229
+ "3. The final product should be a tangible item.\n\n"
230
+ "Your interaction flow:\n"
231
+ "- Engage with the user to understand their interests or initial thoughts.\n"
232
+ "- Propose ideas or refine user's suggestions to meet the criteria.\n"
233
+ "- If an idea proposed by you or the user clearly meets all criteria and you believe it's a good final choice, respond ONLY with the exact phrase: 'IDEA FINALIZED: [Name of the Idea]'. Example: 'IDEA FINALIZED: Simple Wooden Spice Rack'. Do not add any other text before or after this phrase if you use it.\n"
234
+ "- If you need more information from the user to refine an idea, or if their current suggestion doesn't meet the criteria, ask clarifying questions. Guide them towards simpler, more accessible options.\n"
235
+ "- If you are stuck, the user's request is too vague for you to make progress towards a suitable idea, or they propose something clearly outside the 'universally available materials' constraint, and you need their direct input to clarify fundamental aspects, you should use the 'human_assistance' tool. Frame your request for the human clearly. For example: 'To ensure the idea is simple enough, could you tell me what kind of tools you are comfortable using?' or 'The idea of a custom electronic gadget is interesting, but finding parts globally is hard. Could we think of a non-electronic version, or what's the core function you want from it that we could achieve with simpler materials?'"
236
+ ),
237
+ MessagesPlaceholder(variable_name="messages"),
238
+ ]
239
+ )
240
+
241
+ # Bind tools that this node can use
242
+ # For brainstorming, human_assistance is key. Search might also be useful.
243
+ node_tools = [human_assistance]
244
+ if state.search_enabled and tavily_search_tool: # Check if tavily_search_tool was initialized
245
+ node_tools.append(tavily_search_tool)
246
+
247
+ llm_with_tools = model.bind_tools(node_tools)
248
+ chain = brainstorming_system_prompt_template | llm_with_tools
249
+
250
+ # Get the current messages from the state
251
+ current_messages = state.messages
252
+ # Log current messages for debugging
253
+ # print("Messages sent to brainstorming LLM:")
254
+ # for msg in current_messages:
255
+ # print(f" {msg.type}: {msg.content}")
256
+
257
+ response_message = await chain.ainvoke({"messages": current_messages}, config=config)
258
+ # print(f"Brainstorming LLM raw response: {response_message}")
259
+
260
+
261
+ updates = {"messages": [response_message]} # Add the AI's response to the history
262
+
263
+ # Check for the finalization signal in the AI's response content
264
+ if isinstance(response_message, AIMessage) and response_message.content:
265
+ if response_message.content.startswith("IDEA FINALIZED:"):
266
+ updates["brainstorming_complete"] = True
267
+ # Optionally, extract the idea name and store it if needed elsewhere in the state
268
+ # idea_name = response_message.content.replace("IDEA FINALIZED:", "").strip()
269
+ # updates["finalized_idea_name"] = idea_name # If you add this to GraphProcessingState
270
+ print(f"✅ Brainstorming complete! Idea: {response_message.content}")
271
+ elif response_message.tool_calls:
272
+ print(f"🛠️ Brainstorming node initiated tool call(s): {response_message.tool_calls}")
273
+ else:
274
+ print(f"💬 Brainstorming node continues discussion: {response_message.content}")
275
+
276
+
277
+ return updates
278
+
279
+ async def planning_node(state: GraphProcessingState, config=None):
280
+ # Define the system prompt for planning
281
+ planning_prompt = "Based on the user's idea, create a detailed step-by-step plan to build the DIY product."
282
+
283
+ # Combine the planning prompt with any existing prompts
284
+ if state.prompt:
285
+ final_prompt = "\n".join([planning_prompt, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
286
  else:
287
+ final_prompt = "\n".join([planning_prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
 
288
 
289
+ # Create the prompt template
290
+ prompt = ChatPromptTemplate.from_messages(
291
+ [
292
+ ("system", final_prompt),
293
+ MessagesPlaceholder(variable_name="messages"),
294
+ ]
295
+ )
296
 
297
+ # Bind tools if necessary
298
+ assistant_tools = []
299
+ if state.tools_enabled.get("download_website_text", True):
300
+ assistant_tools.append(download_website_text)
301
+ if search_enabled and state.tools_enabled.get("tavily_search_results_json", True):
302
+ assistant_tools.append(tavily_search_tool)
303
+ assistant_model = model.bind_tools(assistant_tools)
304
 
305
+ # Create the chain and invoke it
306
+ chain = prompt | assistant_model
307
+ response = await chain.ainvoke({"messages": state.messages}, config=config)
308
 
309
+ return {
310
+ "messages": response
311
+ }
312
+
313
+
314
+ def guidance_routing(state: GraphProcessingState) -> str:
315
+
316
+ print("\n--- Guidance Routing Edge (Debug via print) ---") # Added a newline for clarity
317
  print(f"Prompt: {state.prompt}")
318
  # print(f"Message: {state.messages}")
319
  print(f"Tools Enabled: {state.tools_enabled}")
 
329
  print(f"Product Searching Complete: {state.product_searching_complete}")
330
  print(f"Purchasing Complete: {state.purchasing_complete}")
331
  print("--- End Guidance Node Debug ---") # Added for clarity
332
+
333
  next_stage = state.next_stage
334
  if next_stage == "brainstorming":
335
+ return "brainstorming_node"
336
+
337
  elif next_stage == "planning":
338
+ # return "planning_node"
339
+ # elif next_stage == "drawing":
340
+ # return "drawing_node"
341
+ # elif next_stage == "product_searching":
342
+ # return "product_searching"
343
+ # elif next_stage == "purchasing":
344
+ # return "purchasing_node"
345
 
346
+ return END
347
 
348
 
349
 
 
353
  workflow = StateGraph(GraphProcessingState)
354
 
355
  # Add nodes
 
356
  workflow.add_node("tools", ToolNode(tools))
357
  workflow.add_node("planning_node", planning_node)
358
  workflow.add_node("guidance_node", guidance_node)
359
+ workflow.add_node("brainstorming_node", brainstorming_node)
360
+
361
 
362
  # workflow.add_node("chatbot", chatbot)
363
 
 
365
  workflow.add_edge("tools", "guidance_node")
366
  # workflow.add_edge("planning_node", "assistant_node")
367
  workflow.add_edge("planning_node", "guidance_node")
368
+ workflow.add_edge("brainstorming_node", "guidance_node")
369
+
370
 
371
 
372
 
 
398
 
399
  return compiled_graph
400
 
401
+ graph = define_workflow()
402
+
403
+
404
+
405
+
406
+
407
+
408
+
409
+
410
+
411
+
412
+
413
+
414
+
415
+
416
+
417
+
418
+ # async def assistant_node(state: GraphProcessingState, config=None):
419
+ # print("\n--- Assistance Node (Debug via print) ---") # Added a newline for clarity
420
+
421
+
422
+ # print(f"Prompt: {state.prompt}")
423
+
424
+ # print(f"Tools Enabled: {state.tools_enabled}")
425
+ # print(f"Search Enabled: {state.search_enabled}")
426
+ # print(f"Next Stage: {state.next_stage}")
427
+
428
+
429
+ # # Log boolean completion flags
430
+ # print(f"Idea Complete: {state.idea_complete}")
431
+ # print(f"Brainstorming Complete: {state.brainstorming_complete}")
432
+ # print(f"Planning Complete: {state.planning_complete}")
433
+ # print(f"Drawing Complete: {state.drawing_complete}")
434
+ # print(f"Product Searching Complete: {state.product_searching_complete}")
435
+ # print(f"Purchasing Complete: {state.purchasing_complete}")
436
+ # print("--- End Guidance Node Debug ---") # Added for clarity
437
+ # print(f"\nMessage: {state.messages}")
438
+ # assistant_tools = []
439
+ # if state.tools_enabled.get("download_website_text", True):
440
+ # assistant_tools.append(download_website_text)
441
+ # if search_enabled and state.tools_enabled.get("tavily_search_results_json", True):
442
+ # assistant_tools.append(tavily_search_tool)
443
+ # assistant_model = model.bind_tools(assistant_tools)
444
+ # if state.prompt:
445
+ # final_prompt = "\n".join([state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
446
+ # else:
447
+ # final_prompt = ASSISTANT_SYSTEM_PROMPT_BASE
448
+
449
+ # prompt = ChatPromptTemplate.from_messages(
450
+ # [
451
+ # ("system", final_prompt),
452
+ # MessagesPlaceholder(variable_name="messages"),
453
+ # ]
454
+ # )
455
+ # chain = prompt | assistant_model
456
+ # response = await chain.ainvoke({"messages": state.messages}, config=config)
457
+
458
+ # for msg in response:
459
+ # if isinstance(msg, HumanMessage):
460
+ # print("Human:", msg.content)
461
+ # elif isinstance(msg, AIMessage):
462
+ # if isinstance(msg.content, list):
463
+ # ai_texts = [part.get("text", "") for part in msg.content if isinstance(part, dict)]
464
+ # print("AI:", " ".join(ai_texts))
465
+ # else:
466
+ # print("AI:", msg.content)
467
+
468
+ # idea_complete = evaluate_idea_completion(response)
469
+
470
+ # return {
471
+ # "messages": response,
472
+ # "idea_complete": idea_complete
473
+ # }
474
+
475
+ # # message = llm_with_tools.invoke(state["messages"])
476
+ # # Because we will be interrupting during tool execution,
477
+ # # we disable parallel tool calling to avoid repeating any
478
+ # # tool invocations when we resume.
479
+ # assert len(response.tool_calls) <= 1
480
+ # idea_complete = evaluate_idea_completion(response)
481
+
482
+ # return {
483
+ # "messages": response,
484
+ # "idea_complete": idea_complete
485
+ # }