wishwakankanamg commited on
Commit
11d7bdc
·
1 Parent(s): d9db98e
Files changed (4) hide show
  1. __pycache__/graph.cpython-310.pyc +0 -0
  2. app.log +164 -0
  3. graph.png +0 -0
  4. graph.py +348 -254
__pycache__/graph.cpython-310.pyc CHANGED
Binary files a/__pycache__/graph.cpython-310.pyc and b/__pycache__/graph.cpython-310.pyc differ
 
app.log CHANGED
@@ -50797,3 +50797,167 @@ anthropic.BadRequestError: Error code: 400 - {'type': 'error', 'error': {'type':
50797
  2025-06-06 21:18:43:__main__:INFO: Prompt: You are a helpful assistant.
50798
  2025-06-06 21:27:34:__main__:INFO: Prompt: You are a helpful assistant.
50799
  2025-06-06 21:27:48:__main__:INFO: Prompt: You are a helpful assistant.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50797
  2025-06-06 21:18:43:__main__:INFO: Prompt: You are a helpful assistant.
50798
  2025-06-06 21:27:34:__main__:INFO: Prompt: You are a helpful assistant.
50799
  2025-06-06 21:27:48:__main__:INFO: Prompt: You are a helpful assistant.
50800
+ 2025-06-06 21:39:54:__main__:INFO: Starting the interface
50801
+ 2025-06-06 21:40:00:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50802
+ 2025-06-06 21:40:04:__main__:INFO: Prompt: You are a helpful assistant.
50803
+ 2025-06-06 21:40:10:__main__:ERROR: Exception occurred
50804
+ Traceback (most recent call last):
50805
+ File "/home/user/app/app.py", line 97, in chat_fn
50806
+ async for stream_mode, chunk in graph.astream(
50807
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
50808
+ async for _ in runner.atick(
50809
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
50810
+ _panic_or_proceed(
50811
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
50812
+ raise exc
50813
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
50814
+ return await task.proc.ainvoke(task.input, config)
50815
+ File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
50816
+ input = await step.ainvoke(input, config, **kwargs)
50817
+ File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
50818
+ ret = await self.afunc(*args, **kwargs)
50819
+ File "/home/user/app/graph.py", line 368, in brainstorming_node
50820
+ content = response_message.content.strip()
50821
+ AttributeError: 'list' object has no attribute 'strip'
50822
+ 2025-06-06 22:34:00:__main__:INFO: Starting the interface
50823
+ 2025-06-06 22:34:11:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50824
+ 2025-06-06 22:34:21:__main__:INFO: Prompt: You are a helpful assistant.
50825
+ 2025-06-06 22:34:26:__main__:ERROR: Exception occurred
50826
+ Traceback (most recent call last):
50827
+ File "/home/user/app/app.py", line 97, in chat_fn
50828
+ async for stream_mode, chunk in graph.astream(
50829
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
50830
+ async for _ in runner.atick(
50831
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
50832
+ _panic_or_proceed(
50833
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
50834
+ raise exc
50835
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
50836
+ return await task.proc.ainvoke(task.input, config)
50837
+ File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
50838
+ input = await step.ainvoke(input, config, **kwargs)
50839
+ File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
50840
+ ret = await self.afunc(*args, **kwargs)
50841
+ File "/home/user/app/graph.py", line 368, in brainstorming_node
50842
+ content = response_message.content.strip()
50843
+ AttributeError: 'list' object has no attribute 'strip'
50844
+ 2025-06-06 22:34:41:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50845
+ 2025-06-06 22:40:24:__main__:INFO: Starting the interface
50846
+ 2025-06-06 22:40:33:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50847
+ 2025-06-06 22:40:41:__main__:INFO: Prompt: You are a helpful assistant.
50848
+ 2025-06-06 22:47:29:__main__:INFO: Starting the interface
50849
+ 2025-06-06 22:47:36:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50850
+ 2025-06-06 22:47:51:__main__:INFO: Prompt: You are a helpful assistant.
50851
+ 2025-06-06 22:48:20:__main__:ERROR: Exception occurred
50852
+ Traceback (most recent call last):
50853
+ File "/home/user/app/app.py", line 97, in chat_fn
50854
+ async for stream_mode, chunk in graph.astream(
50855
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
50856
+ raise GraphRecursionError(msg)
50857
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
50858
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
50859
+ 2025-06-06 23:09:32:__main__:INFO: Starting the interface
50860
+ 2025-06-06 23:09:39:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50861
+ 2025-06-06 23:09:44:__main__:INFO: Prompt: You are a helpful assistant.
50862
+ 2025-06-06 23:10:01:__main__:ERROR: Exception occurred
50863
+ Traceback (most recent call last):
50864
+ File "/home/user/app/app.py", line 97, in chat_fn
50865
+ async for stream_mode, chunk in graph.astream(
50866
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
50867
+ raise GraphRecursionError(msg)
50868
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
50869
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
50870
+ 2025-06-06 23:10:03:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50871
+ 2025-06-06 23:16:42:__main__:INFO: Starting the interface
50872
+ 2025-06-06 23:17:03:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50873
+ 2025-06-06 23:17:10:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50874
+ 2025-06-06 23:17:20:__main__:INFO: Prompt: You are a helpful assistant.
50875
+ 2025-06-06 23:17:52:__main__:ERROR: Exception occurred
50876
+ Traceback (most recent call last):
50877
+ File "/home/user/app/app.py", line 97, in chat_fn
50878
+ async for stream_mode, chunk in graph.astream(
50879
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
50880
+ raise GraphRecursionError(msg)
50881
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
50882
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
50883
+ 2025-06-06 23:23:37:__main__:INFO: Starting the interface
50884
+ 2025-06-06 23:23:44:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50885
+ 2025-06-06 23:23:48:__main__:INFO: Prompt: You are a helpful assistant.
50886
+ 2025-06-06 23:24:56:__main__:ERROR: Exception occurred
50887
+ Traceback (most recent call last):
50888
+ File "/home/user/app/app.py", line 97, in chat_fn
50889
+ async for stream_mode, chunk in graph.astream(
50890
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
50891
+ raise GraphRecursionError(msg)
50892
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
50893
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
50894
+ 2025-06-06 23:28:59:__main__:INFO: Starting the interface
50895
+ 2025-06-06 23:29:04:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50896
+ 2025-06-06 23:29:08:__main__:INFO: Prompt: You are a helpful assistant.
50897
+ 2025-06-06 23:29:57:__main__:ERROR: Exception occurred
50898
+ Traceback (most recent call last):
50899
+ File "/home/user/app/app.py", line 97, in chat_fn
50900
+ async for stream_mode, chunk in graph.astream(
50901
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
50902
+ raise GraphRecursionError(msg)
50903
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
50904
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
50905
+ 2025-06-06 23:31:00:__main__:INFO: Starting the interface
50906
+ 2025-06-06 23:31:05:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50907
+ 2025-06-06 23:31:06:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50908
+ 2025-06-06 23:31:14:__main__:INFO: Prompt: You are a helpful assistant.
50909
+ 2025-06-06 23:32:28:__main__:ERROR: Exception occurred
50910
+ Traceback (most recent call last):
50911
+ File "/home/user/app/app.py", line 97, in chat_fn
50912
+ async for stream_mode, chunk in graph.astream(
50913
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
50914
+ raise GraphRecursionError(msg)
50915
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
50916
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
50917
+ 2025-06-06 23:33:00:__main__:INFO: Starting the interface
50918
+ 2025-06-06 23:33:05:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50919
+ 2025-06-06 23:33:13:__main__:INFO: Prompt: You are a helpful assistant.
50920
+ 2025-06-06 23:33:35:__main__:ERROR: Exception occurred
50921
+ Traceback (most recent call last):
50922
+ File "/home/user/app/app.py", line 97, in chat_fn
50923
+ async for stream_mode, chunk in graph.astream(
50924
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
50925
+ raise GraphRecursionError(msg)
50926
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
50927
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
50928
+ 2025-06-06 23:38:07:__main__:INFO: Starting the interface
50929
+ 2025-06-06 23:38:14:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50930
+ 2025-06-06 23:38:29:__main__:INFO: Prompt: You are a helpful assistant.
50931
+ 2025-06-06 23:41:05:__main__:INFO: Prompt: You are a helpful assistant.
50932
+ 2025-06-06 23:53:54:__main__:INFO: Starting the interface
50933
+ 2025-06-06 23:54:01:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50934
+ 2025-06-06 23:54:13:__main__:INFO: Prompt: You are a helpful assistant.
50935
+ 2025-06-06 23:54:14:__main__:ERROR: Exception occurred
50936
+ Traceback (most recent call last):
50937
+ File "/home/user/app/app.py", line 97, in chat_fn
50938
+ async for stream_mode, chunk in graph.astream(
50939
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
50940
+ raise GraphRecursionError(msg)
50941
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
50942
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
50943
+ 2025-06-06 23:54:27:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50944
+ 2025-06-06 23:57:18:__main__:INFO: Starting the interface
50945
+ 2025-06-06 23:57:24:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
50946
+ 2025-06-06 23:57:29:__main__:INFO: Prompt: You are a helpful assistant.
50947
+ 2025-06-06 23:57:30:__main__:ERROR: Exception occurred
50948
+ Traceback (most recent call last):
50949
+ File "/home/user/app/app.py", line 97, in chat_fn
50950
+ async for stream_mode, chunk in graph.astream(
50951
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
50952
+ raise GraphRecursionError(msg)
50953
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
50954
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
50955
+ 2025-06-06 23:57:38:__main__:INFO: Prompt: You are a helpful assistant.
50956
+ 2025-06-06 23:57:39:__main__:ERROR: Exception occurred
50957
+ Traceback (most recent call last):
50958
+ File "/home/user/app/app.py", line 97, in chat_fn
50959
+ async for stream_mode, chunk in graph.astream(
50960
+ File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
50961
+ raise GraphRecursionError(msg)
50962
+ langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
50963
+ For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
graph.png CHANGED
graph.py CHANGED
@@ -129,124 +129,35 @@ class GraphProcessingState(BaseModel):
129
 
130
 
131
 
132
- # async def guidance_node(state: GraphProcessingState, config=None):
133
- # print("\n--- Guidance Node (Debug via print) ---")
134
-
135
- # print(f"Prompt: {state.prompt}")
136
- # for message in state.messages:
137
- # if isinstance(message, HumanMessage):
138
- # print(f"Human: {message.content}")
139
- # elif isinstance(message, AIMessage):
140
- # if message.content:
141
- # if isinstance(message.content, list):
142
- # texts = [item.get('text', '') for item in message.content if isinstance(item, dict) and 'text' in item]
143
- # if texts:
144
- # print(f"AI: {' '.join(texts)}")
145
- # elif isinstance(message.content, str):
146
- # print(f"AI: {message.content}")
147
- # elif isinstance(message, SystemMessage):
148
- # print(f"System: {message.content}")
149
- # elif isinstance(message, ToolMessage):
150
- # print(f"Tool: {message.content}")
151
-
152
- # print(f"Tools Enabled: {state.tools_enabled}")
153
- # print(f"Search Enabled: {state.search_enabled}")
154
- # print(f"Next Stage: {state.next_stage}")
155
 
156
 
157
- # print(f"Brainstorming Complete: {state.brainstorming_complete}")
158
-
159
-
160
- # guidance_node.count = getattr(guidance_node, 'count', 0) + 1
161
- # print('\nGuidance Node called count', guidance_node.count)
162
- # print("\n--- End Guidance Node Debug ---")
163
-
164
- # stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
165
- # completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
166
- # incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
167
-
168
- # if not incomplete:
169
- # print("All stages complete!")
170
- # # Handle case where all stages are complete
171
- # # You might want to return a message and end, or set proposed_next_stage to a special value
172
- # ai_all_complete_msg = AIMessage(content="All DIY project stages are complete!")
173
- # return {
174
- # "messages": current_messages + [ai_all_complete_msg],
175
- # "next_stage": "end_project", # Or None, or a final summary node
176
- # "pending_approval_stage": None,
177
- # }
178
- # else:
179
- # # THIS LINE DEFINES THE VARIABLE
180
- # proposed_next_stage = incomplete[0]
181
-
182
- # print(f"Proposed next stage: {proposed_next_stage}")
183
-
184
- # status_summary = f"Completed stages: {completed}\nIncomplete stages: {incomplete}"
185
-
186
- # guidance_prompt_text = (
187
- # "You are the Guiding Assistant for a DIY project. Your primary responsibility is to determine the next logical step "
188
- # "and then **obtain the user's explicit approval** before proceeding.\n\n"
189
- # f"CURRENT PROJECT STATUS:\n{status_summary}\n\n"
190
- # f"Based on the status, the most logical next stage appears to be: **'{proposed_next_stage}'**.\n\n"
191
- # "YOUR TASK:\n"
192
- # f"1. Formulate a clear and concise question for the user, asking if they agree to proceed to the **'{proposed_next_stage}'** stage. For example: 'It looks like '{proposed_next_stage}' is next. Shall we proceed with that?' or 'Are you ready to move on to {proposed_next_stage}?'\n"
193
- # "2. **You MUST use the 'human_assistance' tool to ask this question.** Do not answer directly. Invoke the tool with your question.\n"
194
- # "Example of tool usage (though you don't write this, you *call* the tool):\n"
195
- # "Tool Call: human_assistance(query='The next stage is planning. Do you want to proceed with planning?')\n\n"
196
- # "Consider the user's most recent message if it provides any preference."
197
- # )
198
-
199
- # if state.prompt:
200
- # final_prompt = "\n".join([guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
201
- # else:
202
- # final_prompt = "\n".join([guidance_prompt_text, ASSISTANT_SYSTEM_PROMPT_BASE])
203
-
204
- # prompt = ChatPromptTemplate.from_messages(
205
- # [
206
- # ("system", final_prompt),
207
- # MessagesPlaceholder(variable_name="messages"),
208
- # ]
209
- # )
210
-
211
- # assistant_model = model.bind_tools([human_assistance])
212
-
213
- # chain = prompt | assistant_model
214
 
215
- # try:
216
- # response = await chain.ainvoke({"messages": state.messages}, config=config)
217
 
218
- # for msg in response:
219
- # if isinstance(msg, HumanMessage):
220
- # print("Human:", msg.content)
221
- # elif isinstance(msg, AIMessage):
222
- # if isinstance(msg.content, list):
223
- # ai_texts = [part.get("text", "") for part in msg.content if isinstance(part, dict)]
224
- # print("AI:", " ".join(ai_texts))
225
- # else:
226
- # print("AI:", msg.content)
 
 
 
 
 
 
 
 
 
227
 
228
- # # Check for tool calls in the response
229
- # if hasattr(response, "tool_calls"):
230
- # for tool_call in response.tool_calls:
231
- # tool_name = tool_call['name']
232
- # if tool_name == "human_assistance":
233
- # query = tool_call['args']['query']
234
- # print(f"Human input needed: {query}")
235
- # # Handle human assistance tool call
236
- # # You can pause execution and wait for user input here
237
 
238
- # return {
239
- # "messages": [response],
240
- # "next_stage": incomplete[0] if incomplete else "brainstorming"
241
- # }
242
- # except Exception as e:
243
- # print(f"Error in guidance node: {e}")
244
- # return {
245
- # "messages": [AIMessage(content="Error in guidance node.")],
246
- # "next_stage": "brainstorming"
247
- # }
248
 
249
- async def guidance_node(state: GraphProcessingState, config=None):
250
  # Define the order of stages
251
  stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
252
 
@@ -254,6 +165,10 @@ async def guidance_node(state: GraphProcessingState, config=None):
254
  completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
255
  incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
256
 
 
 
 
 
257
  # Determine the next stage
258
  if not incomplete:
259
  # All stages are complete
@@ -272,7 +187,7 @@ async def guidance_node(state: GraphProcessingState, config=None):
272
  }
273
 
274
  async def brainstorming_node(state: GraphProcessingState, config=None):
275
- print("\n--- Guidance Node (Debug via print) ---") # Added a newline for clarity
276
 
277
 
278
  print(f"Prompt: {state.prompt}")
@@ -296,16 +211,12 @@ async def brainstorming_node(state: GraphProcessingState, config=None):
296
 
297
  print(f"Tools Enabled: {state.tools_enabled}")
298
  print(f"Search Enabled: {state.search_enabled}")
299
- print(f"Next Stage: {state.next_stage}")
300
 
 
301
 
302
  # Log boolean completion flags
303
- print(f"Idea Complete: {state.idea_complete}")
304
- print(f"Brainstorming Complete: {state.brainstorming_complete}")
305
- print(f"Planning Complete: {state.planning_complete}")
306
- print(f"Drawing Complete: {state.drawing_complete}")
307
- print(f"Product Searching Complete: {state.product_searching_complete}")
308
- print(f"Purchasing Complete: {state.purchasing_complete}")
309
 
310
  # Check if model is available
311
  if not model:
@@ -321,78 +232,168 @@ async def brainstorming_node(state: GraphProcessingState, config=None):
321
  if not filtered_messages:
322
  filtered_messages.append(AIMessage(content="No valid messages provided."))
323
 
324
- # Define the system prompt guiding the brainstorming assistant
325
- brainstorming_system_prompt = ChatPromptTemplate.from_messages(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  [
327
- ("system",
328
- "You are a brainstorming assistant for DIY projects. Your goal is to help the user finalize a single, specific DIY project idea. "
329
- "The project idea MUST satisfy these critical criteria:\n"
330
- "1. Buildable by an average person with basic DIY skills.\n"
331
- "2. Uses only materials and basic tools commonly available in general hardware stores, craft stores, or supermarkets worldwide. "
332
- "(e.g., wood, screws, glue, paint, fabric, cardboard, basic hand tools like screwdrivers, hammers, saws, drills. "
333
- "AVOID specialized electronic components, 3D printing, specific brand items not universally available, or complex machinery.)\n"
334
- "3. The final product should be a tangible item.\n\n"
335
- "Your interaction flow:\n"
336
- "- Engage with the user to understand their interests or initial thoughts.\n"
337
- "- Propose ideas or refine user's suggestions to meet the criteria.\n"
338
- "- If an idea proposed by you or the user clearly meets all criteria and you believe it's a good final choice, respond ONLY with the exact phrase: "
339
- "'IDEA FINALIZED: [Name of the Idea]'. Example: 'IDEA FINALIZED: Simple Wooden Spice Rack'. "
340
- "Do not add any other text before or after this phrase if you use it.\n"
341
- "- If you need more information from the user to refine an idea, or if their current suggestion doesn't meet the criteria, ask clarifying questions. "
342
- "Guide them towards simpler, more accessible options.\n"
343
- "- If you are stuck, the user's request is too vague for you to make progress towards a suitable idea, or they propose something clearly outside the "
344
- "'universally available materials' constraint, and you need their direct input to clarify fundamental aspects, you should use the 'human_assistance' tool. "
345
- "Frame your request for the human clearly. For example: 'To ensure the idea is simple enough, could you tell me what kind of tools you are comfortable using?' "
346
- "or 'The idea of a custom electronic gadget is interesting, but finding parts globally is hard. Could we think of a non-electronic version, or what's the core function "
347
- "you want from it that we could achieve with simpler materials?'"
348
- ),
349
  MessagesPlaceholder(variable_name="messages"),
350
  ]
351
  )
352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  # Tools allowed for brainstorming
354
  node_tools = [human_assistance]
355
  if state.search_enabled and tavily_search_tool: # only add search tool if enabled and initialized
356
  node_tools.append(tavily_search_tool)
357
 
358
  llm_with_tools = model.bind_tools(node_tools)
359
- chain = brainstorming_system_prompt | llm_with_tools
360
 
361
  # Pass filtered messages to the chain
362
- response_message = await chain.ainvoke({"messages": filtered_messages}, config=config)
363
-
364
- updates = {"messages": [response_message]}
365
- print('response from brainstorm', response_message)
366
-
367
- if isinstance(response_message, AIMessage) and response_message.content:
368
- content = response_message.content.strip()
369
-
370
- if content.startswith("IDEA FINALIZED:"):
371
- print('final idea')
372
- updates["brainstorming_complete"] = True
373
- updates["tool_call_required"] = False
374
- updates["loop_brainstorming"] = False
375
- print(f"✅ Brainstorming complete! Idea: {content}")
376
-
377
- elif getattr(response_message, "tool_calls", None):
378
- print('tool call requested')
379
- updates["tool_call_required"] = True
380
- updates["loop_brainstorming"] = False
381
- print(f"🛠️ Brainstorming node initiated tool call(s): {response_message.tool_calls}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382
 
383
  else:
384
- print('keep brainstorming')
385
  updates["tool_call_required"] = False
386
  updates["loop_brainstorming"] = True
387
- print(f"💬 Brainstorming node continues discussion: {content}")
388
 
389
- else:
390
- # If no proper response, keep looping brainstorming
391
- updates["tool_call_required"] = False
392
- updates["loop_brainstorming"] = True
393
-
394
- print("\n--- End Brainstorming Node Debug ---")
395
- return updates
 
396
 
397
  def brainstorming_routing(state: GraphProcessingState) -> str:
398
  print("\n--- brainstorming_routing Edge (Debug via print) ---") # Added a newline for clarity
@@ -422,96 +423,29 @@ def brainstorming_routing(state: GraphProcessingState) -> str:
422
  return "guidance_node"
423
 
424
 
425
- async def planning_node(state: GraphProcessingState, config=None):
426
- # Define the system prompt for planning
427
- planning_prompt = "Based on the user's idea, create a detailed step-by-step plan to build the DIY product."
428
 
429
- # Combine the planning prompt with any existing prompts
430
- if state.prompt:
431
- final_prompt = "\n".join([planning_prompt, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
432
- else:
433
- final_prompt = "\n".join([planning_prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
434
-
435
- # Create the prompt template
436
- prompt = ChatPromptTemplate.from_messages(
437
- [
438
- ("system", final_prompt),
439
- MessagesPlaceholder(variable_name="messages"),
440
- ]
441
- )
442
-
443
- # Bind tools if necessary
444
- assistant_tools = []
445
- if state.tools_enabled.get("download_website_text", True):
446
- assistant_tools.append(download_website_text)
447
- if search_enabled and state.tools_enabled.get("tavily_search_results_json", True):
448
- assistant_tools.append(tavily_search_tool)
449
- assistant_model = model.bind_tools(assistant_tools)
450
-
451
- # Create the chain and invoke it
452
- chain = prompt | assistant_model
453
- response = await chain.ainvoke({"messages": state.messages}, config=config)
454
-
455
- return {
456
- "messages": response
457
- }
458
-
459
- def custom_route_after_guidance(state: GraphProcessingState) -> Literal["execute_tools", "proceed_to_next_stage"]:
460
- """
461
- Checks the last message from the 'guidance_node'.
462
- If it's an AIMessage with tool_calls, routes to 'execute_tools'.
463
- Otherwise, routes to 'proceed_to_next_stage' (which could be a router itself
464
- or directly to the state.next_stage node if it's set).
465
- """
466
- print("\n--- Custom Route After Guidance Condition ---")
467
- messages = state['messages'] if 'messages' in state else []
468
- if not messages:
469
- print("No messages found in state. Defaulting to proceed_to_next_stage.")
470
- return "proceed_to_next_stage" # Or handle as an error/specific state
471
-
472
- last_message = messages[-1]
473
- print(f"Last message type: {type(last_message)}")
474
-
475
- if isinstance(last_message, AIMessage):
476
- if hasattr(last_message, "tool_calls") and last_message.tool_calls:
477
- # Ensure tool_calls is not None and not an empty list
478
- print(f"AIMessage has tool_calls: {last_message.tool_calls}")
479
- return "execute_tools"
480
- else:
481
- print("AIMessage, but no tool_calls or tool_calls is empty.")
482
- # If next_stage was set by guidance_node (e.g. after approval), we'd use that.
483
- # For simplicity here, we just assume a generic "proceed"
484
- return "proceed_to_next_stage"
485
- else:
486
- print(f"Last message is not an AIMessage (type: {type(last_message)}). Proceeding to next stage.")
487
- return "proceed_to_next_stage"
488
-
489
- print("--- End Custom Route After Guidance Condition ---")
490
 
491
  def guidance_routing(state: GraphProcessingState) -> str:
492
 
493
- print("\n--- Guidance Routing Edge (Debug via print) ---") # Added a newline for clarity
494
  print(f"Prompt: {state.prompt}")
495
  # print(f"Message: {state.messages}")
496
  print(f"Tools Enabled: {state.tools_enabled}")
497
  print(f"Search Enabled: {state.search_enabled}")
498
  print(f"Next Stage: {state.next_stage}")
499
 
500
-
501
- # Log boolean completion flags
502
- print(f"Idea Complete: {state.idea_complete}")
503
- print(f"Brainstorming Complete: {state.brainstorming_complete}")
504
- print(f"Planning Complete: {state.planning_complete}")
505
- print(f"Drawing Complete: {state.drawing_complete}")
506
- print(f"Product Searching Complete: {state.product_searching_complete}")
507
- print(f"Purchasing Complete: {state.purchasing_complete}")
508
- print("--- End Guidance Node Debug ---") # Added for clarity
509
 
510
  next_stage = state.next_stage
511
  if next_stage == "brainstorming":
512
  return "brainstorming_node"
513
 
514
  elif next_stage == "planning":
 
515
  # return "planning_node"
516
  # elif next_stage == "drawing":
517
  # return "drawing_node"
@@ -531,16 +465,13 @@ def define_workflow() -> CompiledStateGraph:
531
 
532
  # Add nodes
533
  workflow.add_node("tools", DebugToolNode(tools))
534
- workflow.add_node("planning_node", planning_node)
535
  workflow.add_node("guidance_node", guidance_node)
536
  workflow.add_node("brainstorming_node", brainstorming_node)
 
537
 
538
  # Edges
539
- workflow.add_edge("tools", "guidance_node")
540
- # workflow.add_edge("planning_node", "guidance_node")
541
- # workflow.add_edge("brainstorming_node", "guidance_node")
542
- # workflow.add_edge("brainstorming_node", "tools")
543
- workflow.add_edge("tools", "guidance_node")
544
  workflow.add_conditional_edges(
545
  "guidance_node",
546
  guidance_routing,
@@ -548,6 +479,15 @@ def define_workflow() -> CompiledStateGraph:
548
  "brainstorming_node" : "brainstorming_node",
549
  }
550
  )
 
 
 
 
 
 
 
 
 
551
  # workflow.add_conditional_edges(
552
  # "guidance_node", # The source node
553
  # custom_route_after_guidance, # Your custom condition function
@@ -666,4 +606,158 @@ graph = define_workflow()
666
 
667
 
668
 
669
- #
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
 
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
 
134
+ async def guidance_node(state: GraphProcessingState, config=None):
135
+ print("\n--- Guidance Node (Debug via print) ---") # Added a newline for clarity
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
 
 
 
137
 
138
+ print(f"Prompt: {state.prompt}")
139
+ for message in state.messages:
140
+ if isinstance(message, HumanMessage):
141
+ print(f"Human: {message.content}")
142
+ elif isinstance(message, AIMessage):
143
+ # Check if content is non-empty
144
+ if message.content:
145
+ # If content is a list (e.g., list of dicts), extract text
146
+ if isinstance(message.content, list):
147
+ texts = [item.get('text', '') for item in message.content if isinstance(item, dict) and 'text' in item]
148
+ if texts:
149
+ print(f"AI: {' '.join(texts)}")
150
+ elif isinstance(message.content, str):
151
+ print(f"AI: {message.content}")
152
+ elif isinstance(message, SystemMessage):
153
+ print(f"System: {message.content}")
154
+ elif isinstance(message, ToolMessage):
155
+ print(f"Tool: {message.content}")
156
 
157
+
 
 
 
 
 
 
 
 
158
 
 
 
 
 
 
 
 
 
 
 
159
 
160
+ # Log boolean completion flags
161
  # Define the order of stages
162
  stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
163
 
 
165
  completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
166
  incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
167
 
168
+ print(f"Tools Enabled: {state.tools_enabled}")
169
+ print(f"Search Enabled: {state.search_enabled}")
170
+ print(f"Next Stage: {state.next_stage}")
171
+
172
  # Determine the next stage
173
  if not incomplete:
174
  # All stages are complete
 
187
  }
188
 
189
  async def brainstorming_node(state: GraphProcessingState, config=None):
190
+ print("\n--- brainstorming Node (Debug via print) ---") # Added a newline for clarity
191
 
192
 
193
  print(f"Prompt: {state.prompt}")
 
211
 
212
  print(f"Tools Enabled: {state.tools_enabled}")
213
  print(f"Search Enabled: {state.search_enabled}")
 
214
 
215
+ print(f"Next Stage: {state.next_stage}")
216
 
217
  # Log boolean completion flags
218
+ print(f"is Brainstorming Complete: {state.brainstorming_complete}")
219
+
 
 
 
 
220
 
221
  # Check if model is available
222
  if not model:
 
232
  if not filtered_messages:
233
  filtered_messages.append(AIMessage(content="No valid messages provided."))
234
 
235
+ stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
236
+ completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
237
+ incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
238
+
239
+ if not incomplete:
240
+ print("All stages complete!")
241
+ # Handle case where all stages are complete
242
+ # You might want to return a message and end, or set proposed_next_stage to a special value
243
+ ai_all_complete_msg = AIMessage(content="All DIY project stages are complete!")
244
+ return {
245
+ "messages": current_messages + [ai_all_complete_msg],
246
+ "next_stage": "end_project", # Or None, or a final summary node
247
+ "pending_approval_stage": None,
248
+ }
249
+ else:
250
+ # THIS LINE DEFINES THE VARIABLE
251
+ proposed_next_stage = incomplete[0]
252
+
253
+ print(f"Proposed next stage: {proposed_next_stage}")
254
+
255
+ status_summary = f"Completed stages: {completed}\nIncomplete stages: {incomplete}"
256
+
257
+ guidance_prompt_text = (
258
+ "You are the Guiding Assistant for a DIY project. Your primary responsibility is to determine the next logical step "
259
+ "and then **obtain the user's explicit approval** before proceeding.\n\n"
260
+ f"CURRENT PROJECT STATUS:\n{status_summary}\n\n"
261
+ f"Based on the status, the most logical next stage appears to be: **'{proposed_next_stage}'**.\n\n"
262
+ "YOUR TASK:\n"
263
+ f"1. Formulate a clear and concise question for the user, asking if they agree to proceed to the **'{proposed_next_stage}'** stage. For example: 'It looks like '{proposed_next_stage}' is next. Shall we proceed with that?' or 'Are you ready to move on to {proposed_next_stage}?'\n"
264
+ "2. **You MUST use the 'human_assistance' tool to ask this question.** Do not answer directly. Invoke the tool with your question.\n"
265
+ "Example of tool usage (though you don't write this, you *call* the tool):\n"
266
+ "Tool Call: human_assistance(query='The next stage is planning. Do you want to proceed with planning?')\n\n"
267
+ "Consider the user's most recent message if it provides any preference."
268
+ )
269
+
270
+
271
+ if state.prompt:
272
+ final_prompt = "\n".join([guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
273
+ else:
274
+ final_prompt = "\n".join([guidance_prompt_text, ASSISTANT_SYSTEM_PROMPT_BASE])
275
+
276
+ prompt = ChatPromptTemplate.from_messages(
277
  [
278
+ ("system", final_prompt),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
  MessagesPlaceholder(variable_name="messages"),
280
  ]
281
  )
282
 
283
+ assistant_model = model.bind_tools([human_assistance])
284
+
285
+ # Define the system prompt guiding the brainstorming assistant
286
+ # brainstorming_system_prompt = ChatPromptTemplate.from_messages(
287
+ # [
288
+ # ("system",
289
+ # "You are a brainstorming assistant for DIY projects. Your goal is to help the user finalize a single, specific DIY project idea. "
290
+ # "The project idea MUST satisfy these critical criteria:\n"
291
+ # "1. Buildable by an average person with basic DIY skills.\n"
292
+ # "2. Uses only materials and basic tools commonly available in general hardware stores, craft stores, or supermarkets worldwide. "
293
+ # "(e.g., wood, screws, glue, paint, fabric, cardboard, basic hand tools like screwdrivers, hammers, saws, drills. "
294
+ # "AVOID specialized electronic components, 3D printing, specific brand items not universally available, or complex machinery.)\n"
295
+ # "3. The final product should be a tangible item.\n\n"
296
+ # "Your interaction flow:\n"
297
+ # "- Engage with the user to understand their interests or initial thoughts.\n"
298
+ # "- Propose ideas or refine user's suggestions to meet the criteria.\n"
299
+ # "- If an idea proposed by you or the user clearly meets all criteria and you believe it's a good final choice, respond ONLY with the exact phrase: "
300
+ # "2. **You MUST use the 'human_assistance' tool to ask this question.** Do not answer directly. Invoke the tool with your question.\n"
301
+ # "Example of tool usage (though you don't write this, you *call* the tool):\n"
302
+ # "Tool Call: human_assistance(query='The next stage is planning. Do you want to proceed with planning?')\n\n"
303
+ # "Consider the user's most recent message if it provides any preference."
304
+ # "'IDEA FINALIZED: [Name of the Idea]'. Example: 'IDEA FINALIZED: Simple Wooden Spice Rack'. "
305
+ # "Do not add any other text before or after this phrase if you use it.\n"
306
+ # ),
307
+ # MessagesPlaceholder(variable_name="messages"),
308
+ # ]
309
+ # )
310
+
311
  # Tools allowed for brainstorming
312
  node_tools = [human_assistance]
313
  if state.search_enabled and tavily_search_tool: # only add search tool if enabled and initialized
314
  node_tools.append(tavily_search_tool)
315
 
316
  llm_with_tools = model.bind_tools(node_tools)
317
+ chain = prompt | llm_with_tools
318
 
319
  # Pass filtered messages to the chain
320
+ try:
321
+
322
+ response = await chain.ainvoke({"messages": filtered_messages}, config=config)
323
+
324
+ for msg in response:
325
+ if isinstance(msg, HumanMessage):
326
+ print("Human:", msg.content)
327
+ elif isinstance(msg, AIMessage):
328
+ if isinstance(msg.content, list):
329
+ ai_texts = [part.get("text", "") for part in msg.content if isinstance(part, dict)]
330
+ print("AI:", " ".join(ai_texts))
331
+ else:
332
+ print("AI:", msg.content)
333
+
334
+ if hasattr(response, "tool_calls"):
335
+ for tool_call in response.tool_calls:
336
+ tool_name = tool_call['name']
337
+ if tool_name == "human_assistance":
338
+ query = tool_call['args']['query']
339
+ print(f"Human input needed: {query}")
340
+
341
+ updates = {"messages": [response]}
342
+ print('response from brainstorm', response)
343
+
344
+ if isinstance(response, AIMessage) and response.content:
345
+ if isinstance(response.content, str):
346
+ content = response.content.strip()
347
+ elif isinstance(response.content, list):
348
+ texts = [item.get("text", "") for item in response.content if isinstance(item, dict)]
349
+ content = " ".join(texts).strip()
350
+ else:
351
+ content = str(response.content).strip()
352
+
353
+ if content.startswith("IDEA FINALIZED:"):
354
+ print('✅ final idea')
355
+ updates.update({
356
+ "brainstorming_complete": True,
357
+ "tool_call_required": False,
358
+ "loop_brainstorming": False,
359
+ })
360
+
361
+ else:
362
+ tool_calls = getattr(response, "tool_calls", None)
363
+
364
+ if tool_calls:
365
+ print('🛠️ tool call requested')
366
+ updates.update({
367
+ "tool_call_required": True,
368
+ "loop_brainstorming": False,
369
+ })
370
+
371
+ for tool_call in tool_calls:
372
+ if isinstance(tool_call, dict) and 'name' in tool_call and 'args' in tool_call:
373
+ print(f"🔧 Tool Call (Dict): {tool_call.get('name')}, Args: {tool_call.get('args')}")
374
+ else:
375
+ print(f"🔧 Unknown tool_call format: {tool_call}")
376
+ else:
377
+ print('💬 keep brainstorming')
378
+ updates.update({
379
+ "tool_call_required": False,
380
+ "loop_brainstorming": True,
381
+ })
382
+ print(f"Brainstorming continues: {content}")
383
 
384
  else:
385
+ # If no proper response, keep looping brainstorming
386
  updates["tool_call_required"] = False
387
  updates["loop_brainstorming"] = True
 
388
 
389
+ print("\n--- End Brainstorming Node Debug ---")
390
+ return updates
391
+ except Exception as e:
392
+ print(f"Error in guidance node: {e}")
393
+ return {
394
+ "messages": [AIMessage(content="Error in guidance node.")],
395
+ "next_stage": "brainstorming"
396
+ }
397
 
398
  def brainstorming_routing(state: GraphProcessingState) -> str:
399
  print("\n--- brainstorming_routing Edge (Debug via print) ---") # Added a newline for clarity
 
423
  return "guidance_node"
424
 
425
 
 
 
 
426
 
427
+ # def route_brainstorming(state):
428
+ # if state.get("tool_call_required"):
429
+ # return "tools"
430
+ # else:
431
+ # return "guidance_node"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
432
 
433
  def guidance_routing(state: GraphProcessingState) -> str:
434
 
435
+ print("\n--- Guidance Routing (Debug via print) ---") # Added a newline for clarity
436
  print(f"Prompt: {state.prompt}")
437
  # print(f"Message: {state.messages}")
438
  print(f"Tools Enabled: {state.tools_enabled}")
439
  print(f"Search Enabled: {state.search_enabled}")
440
  print(f"Next Stage: {state.next_stage}")
441
 
 
 
 
 
 
 
 
 
 
442
 
443
  next_stage = state.next_stage
444
  if next_stage == "brainstorming":
445
  return "brainstorming_node"
446
 
447
  elif next_stage == "planning":
448
+ print('\n may day may day may day may day may day')
449
  # return "planning_node"
450
  # elif next_stage == "drawing":
451
  # return "drawing_node"
 
465
 
466
  # Add nodes
467
  workflow.add_node("tools", DebugToolNode(tools))
468
+
469
  workflow.add_node("guidance_node", guidance_node)
470
  workflow.add_node("brainstorming_node", brainstorming_node)
471
+ # workflow.add_node("planning_node", planning_node)
472
 
473
  # Edges
474
+
 
 
 
 
475
  workflow.add_conditional_edges(
476
  "guidance_node",
477
  guidance_routing,
 
479
  "brainstorming_node" : "brainstorming_node",
480
  }
481
  )
482
+
483
+ workflow.add_conditional_edges(
484
+ "brainstorming_node",
485
+ tools_condition,
486
+ )
487
+ workflow.add_edge("tools", "guidance_node")
488
+ workflow.add_edge("brainstorming_node", "guidance_node")
489
+
490
+
491
  # workflow.add_conditional_edges(
492
  # "guidance_node", # The source node
493
  # custom_route_after_guidance, # Your custom condition function
 
606
 
607
 
608
 
609
+ #
610
+
611
+
612
+ # async def planning_node(state: GraphProcessingState, config=None):
613
+ # # Define the system prompt for planning
614
+ # planning_prompt = "Based on the user's idea, create a detailed step-by-step plan to build the DIY product."
615
+
616
+ # # Combine the planning prompt with any existing prompts
617
+ # if state.prompt:
618
+ # final_prompt = "\n".join([planning_prompt, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
619
+ # else:
620
+ # final_prompt = "\n".join([planning_prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
621
+
622
+ # # Create the prompt template
623
+ # prompt = ChatPromptTemplate.from_messages(
624
+ # [
625
+ # ("system", final_prompt),
626
+ # MessagesPlaceholder(variable_name="messages"),
627
+ # ]
628
+ # )
629
+
630
+ # # Bind tools if necessary
631
+ # assistant_tools = []
632
+ # if state.tools_enabled.get("download_website_text", True):
633
+ # assistant_tools.append(download_website_text)
634
+ # if search_enabled and state.tools_enabled.get("tavily_search_results_json", True):
635
+ # assistant_tools.append(tavily_search_tool)
636
+ # assistant_model = model.bind_tools(assistant_tools)
637
+
638
+ # # Create the chain and invoke it
639
+ # chain = prompt | assistant_model
640
+ # response = await chain.ainvoke({"messages": state.messages}, config=config)
641
+
642
+ # return {
643
+ # "messages": response
644
+ # }
645
+
646
+
647
+
648
+ # async def guidance_node(state: GraphProcessingState, config=None):
649
+ # print("\n--- Guidance Node (Debug via print) ---")
650
+
651
+ # print(f"Prompt: {state.prompt}")
652
+ # for message in state.messages:
653
+ # if isinstance(message, HumanMessage):
654
+ # print(f"Human: {message.content}")
655
+ # elif isinstance(message, AIMessage):
656
+ # if message.content:
657
+ # if isinstance(message.content, list):
658
+ # texts = [item.get('text', '') for item in message.content if isinstance(item, dict) and 'text' in item]
659
+ # if texts:
660
+ # print(f"AI: {' '.join(texts)}")
661
+ # elif isinstance(message.content, str):
662
+ # print(f"AI: {message.content}")
663
+ # elif isinstance(message, SystemMessage):
664
+ # print(f"System: {message.content}")
665
+ # elif isinstance(message, ToolMessage):
666
+ # print(f"Tool: {message.content}")
667
+
668
+ # print(f"Tools Enabled: {state.tools_enabled}")
669
+ # print(f"Search Enabled: {state.search_enabled}")
670
+ # print(f"Next Stage: {state.next_stage}")
671
+
672
+
673
+ # print(f"Brainstorming Complete: {state.brainstorming_complete}")
674
+
675
+
676
+ # guidance_node.count = getattr(guidance_node, 'count', 0) + 1
677
+ # print('\nGuidance Node called count', guidance_node.count)
678
+ # print("\n--- End Guidance Node Debug ---")
679
+
680
+ # stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
681
+ # completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
682
+ # incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
683
+
684
+ # if not incomplete:
685
+ # print("All stages complete!")
686
+ # # Handle case where all stages are complete
687
+ # # You might want to return a message and end, or set proposed_next_stage to a special value
688
+ # ai_all_complete_msg = AIMessage(content="All DIY project stages are complete!")
689
+ # return {
690
+ # "messages": current_messages + [ai_all_complete_msg],
691
+ # "next_stage": "end_project", # Or None, or a final summary node
692
+ # "pending_approval_stage": None,
693
+ # }
694
+ # else:
695
+ # # THIS LINE DEFINES THE VARIABLE
696
+ # proposed_next_stage = incomplete[0]
697
+
698
+ # print(f"Proposed next stage: {proposed_next_stage}")
699
+
700
+ # status_summary = f"Completed stages: {completed}\nIncomplete stages: {incomplete}"
701
+
702
+ # guidance_prompt_text = (
703
+ # "You are the Guiding Assistant for a DIY project. Your primary responsibility is to determine the next logical step "
704
+ # "and then **obtain the user's explicit approval** before proceeding.\n\n"
705
+ # f"CURRENT PROJECT STATUS:\n{status_summary}\n\n"
706
+ # f"Based on the status, the most logical next stage appears to be: **'{proposed_next_stage}'**.\n\n"
707
+ # "YOUR TASK:\n"
708
+ # f"1. Formulate a clear and concise question for the user, asking if they agree to proceed to the **'{proposed_next_stage}'** stage. For example: 'It looks like '{proposed_next_stage}' is next. Shall we proceed with that?' or 'Are you ready to move on to {proposed_next_stage}?'\n"
709
+ # "2. **You MUST use the 'human_assistance' tool to ask this question.** Do not answer directly. Invoke the tool with your question.\n"
710
+ # "Example of tool usage (though you don't write this, you *call* the tool):\n"
711
+ # "Tool Call: human_assistance(query='The next stage is planning. Do you want to proceed with planning?')\n\n"
712
+ # "Consider the user's most recent message if it provides any preference."
713
+ # )
714
+
715
+ # if state.prompt:
716
+ # final_prompt = "\n".join([guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
717
+ # else:
718
+ # final_prompt = "\n".join([guidance_prompt_text, ASSISTANT_SYSTEM_PROMPT_BASE])
719
+
720
+ # prompt = ChatPromptTemplate.from_messages(
721
+ # [
722
+ # ("system", final_prompt),
723
+ # MessagesPlaceholder(variable_name="messages"),
724
+ # ]
725
+ # )
726
+
727
+ # assistant_model = model.bind_tools([human_assistance])
728
+
729
+ # chain = prompt | assistant_model
730
+
731
+ # try:
732
+ # response = await chain.ainvoke({"messages": state.messages}, config=config)
733
+
734
+ # for msg in response:
735
+ # if isinstance(msg, HumanMessage):
736
+ # print("Human:", msg.content)
737
+ # elif isinstance(msg, AIMessage):
738
+ # if isinstance(msg.content, list):
739
+ # ai_texts = [part.get("text", "") for part in msg.content if isinstance(part, dict)]
740
+ # print("AI:", " ".join(ai_texts))
741
+ # else:
742
+ # print("AI:", msg.content)
743
+
744
+ # # Check for tool calls in the response
745
+ # if hasattr(response, "tool_calls"):
746
+ # for tool_call in response.tool_calls:
747
+ # tool_name = tool_call['name']
748
+ # if tool_name == "human_assistance":
749
+ # query = tool_call['args']['query']
750
+ # print(f"Human input needed: {query}")
751
+ # # Handle human assistance tool call
752
+ # # You can pause execution and wait for user input here
753
+
754
+ # return {
755
+ # "messages": [response],
756
+ # "next_stage": incomplete[0] if incomplete else "brainstorming"
757
+ # }
758
+ # except Exception as e:
759
+ # print(f"Error in guidance node: {e}")
760
+ # return {
761
+ # "messages": [AIMessage(content="Error in guidance node.")],
762
+ # "next_stage": "brainstorming"
763
+ # }