Spaces:
Runtime error
Runtime error
Commit
·
6a1ad3d
1
Parent(s):
212150e
fix
Browse files- __pycache__/graph.cpython-310.pyc +0 -0
- app.log +307 -0
- app.py +36 -6
- graph.py +111 -105
__pycache__/graph.cpython-310.pyc
CHANGED
Binary files a/__pycache__/graph.cpython-310.pyc and b/__pycache__/graph.cpython-310.pyc differ
|
|
app.log
CHANGED
@@ -49994,3 +49994,310 @@ Traceback (most recent call last):
|
|
49994 |
raise GraphRecursionError(msg)
|
49995 |
langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
|
49996 |
For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49994 |
raise GraphRecursionError(msg)
|
49995 |
langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
|
49996 |
For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
|
49997 |
+
2025-06-06 17:07:08:__main__:INFO: Starting the interface
|
49998 |
+
2025-06-06 17:07:16:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
49999 |
+
2025-06-06 17:07:26:__main__:INFO: Prompt: You are a helpful assistant.
|
50000 |
+
2025-06-06 17:07:31:__main__:ERROR: Exception occurred
|
50001 |
+
Traceback (most recent call last):
|
50002 |
+
File "/home/user/app/app.py", line 85, in chat_fn
|
50003 |
+
async for stream_mode, chunk in graph.astream(
|
50004 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
|
50005 |
+
async for _ in runner.atick(
|
50006 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
|
50007 |
+
_panic_or_proceed(
|
50008 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
|
50009 |
+
raise exc
|
50010 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
|
50011 |
+
return await task.proc.ainvoke(task.input, config)
|
50012 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
|
50013 |
+
input = await step.ainvoke(input, config, **kwargs)
|
50014 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
|
50015 |
+
ret = await self.afunc(*args, **kwargs)
|
50016 |
+
File "/home/user/app/graph.py", line 327, in brainstorming_node
|
50017 |
+
test_tool_call = ToolCall(name="human_assistance", args={"question": "What materials do you have?"})
|
50018 |
+
NameError: name 'ToolCall' is not defined
|
50019 |
+
2025-06-06 17:09:30:__main__:INFO: Starting the interface
|
50020 |
+
2025-06-06 17:09:33:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50021 |
+
2025-06-06 17:09:44:__main__:INFO: Prompt: You are a helpful assistant.
|
50022 |
+
2025-06-06 17:09:47:__main__:ERROR: Exception occurred
|
50023 |
+
Traceback (most recent call last):
|
50024 |
+
File "/home/user/app/app.py", line 85, in chat_fn
|
50025 |
+
async for stream_mode, chunk in graph.astream(
|
50026 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
|
50027 |
+
async for _ in runner.atick(
|
50028 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
|
50029 |
+
_panic_or_proceed(
|
50030 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
|
50031 |
+
raise exc
|
50032 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
|
50033 |
+
return await task.proc.ainvoke(task.input, config)
|
50034 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
|
50035 |
+
input = await step.ainvoke(input, config, **kwargs)
|
50036 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
|
50037 |
+
ret = await self.afunc(*args, **kwargs)
|
50038 |
+
File "/home/user/app/graph.py", line 328, in brainstorming_node
|
50039 |
+
test_message = AIMessage(content=None, tool_calls=[test_tool_call])
|
50040 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/messages/ai.py", line 187, in __init__
|
50041 |
+
super().__init__(content=content, **kwargs)
|
50042 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/messages/base.py", line 71, in __init__
|
50043 |
+
super().__init__(content=content, **kwargs)
|
50044 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/load/serializable.py", line 130, in __init__
|
50045 |
+
super().__init__(*args, **kwargs)
|
50046 |
+
File "/usr/local/lib/python3.10/site-packages/pydantic/main.py", line 214, in __init__
|
50047 |
+
validated_self = self.__pydantic_validator__.validate_python(data, self_instance=self)
|
50048 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/messages/ai.py", line 224, in _backwards_compat_tool_calls
|
50049 |
+
values["tool_calls"] = [
|
50050 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/messages/ai.py", line 225, in <listcomp>
|
50051 |
+
create_tool_call(**{k: v for k, v in tc.items() if k != "type"})
|
50052 |
+
TypeError: tool_call() missing 1 required keyword-only argument: 'id'
|
50053 |
+
2025-06-06 17:13:34:__main__:INFO: Starting the interface
|
50054 |
+
2025-06-06 17:13:43:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50055 |
+
2025-06-06 17:13:59:__main__:INFO: Prompt: You are a helpful assistant.
|
50056 |
+
2025-06-06 17:14:03:__main__:ERROR: Exception occurred
|
50057 |
+
Traceback (most recent call last):
|
50058 |
+
File "/home/user/app/app.py", line 85, in chat_fn
|
50059 |
+
async for stream_mode, chunk in graph.astream(
|
50060 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
|
50061 |
+
async for _ in runner.atick(
|
50062 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
|
50063 |
+
_panic_or_proceed(
|
50064 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
|
50065 |
+
raise exc
|
50066 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
|
50067 |
+
return await task.proc.ainvoke(task.input, config)
|
50068 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
|
50069 |
+
input = await step.ainvoke(input, config, **kwargs)
|
50070 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
|
50071 |
+
ret = await self.afunc(*args, **kwargs)
|
50072 |
+
File "/home/user/app/graph.py", line 334, in brainstorming_node
|
50073 |
+
test_message = AIMessage(content=None, tool_calls=[test_tool_call])
|
50074 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/messages/ai.py", line 187, in __init__
|
50075 |
+
super().__init__(content=content, **kwargs)
|
50076 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/messages/base.py", line 71, in __init__
|
50077 |
+
super().__init__(content=content, **kwargs)
|
50078 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/load/serializable.py", line 130, in __init__
|
50079 |
+
super().__init__(*args, **kwargs)
|
50080 |
+
File "/usr/local/lib/python3.10/site-packages/pydantic/main.py", line 214, in __init__
|
50081 |
+
validated_self = self.__pydantic_validator__.validate_python(data, self_instance=self)
|
50082 |
+
pydantic_core._pydantic_core.ValidationError: 2 validation errors for AIMessage
|
50083 |
+
content.str
|
50084 |
+
Input should be a valid string [type=string_type, input_value=None, input_type=NoneType]
|
50085 |
+
For further information visit https://errors.pydantic.dev/2.10/v/string_type
|
50086 |
+
content.list[union[str,dict[any,any]]]
|
50087 |
+
Input should be a valid list [type=list_type, input_value=None, input_type=NoneType]
|
50088 |
+
For further information visit https://errors.pydantic.dev/2.10/v/list_type
|
50089 |
+
2025-06-06 17:18:55:__main__:INFO: Starting the interface
|
50090 |
+
2025-06-06 17:19:09:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50091 |
+
2025-06-06 17:19:19:__main__:INFO: Prompt: You are a helpful assistant.
|
50092 |
+
2025-06-06 17:19:22:__main__:ERROR: Exception occurred
|
50093 |
+
Traceback (most recent call last):
|
50094 |
+
File "/home/user/app/app.py", line 85, in chat_fn
|
50095 |
+
async for stream_mode, chunk in graph.astream(
|
50096 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
|
50097 |
+
async for _ in runner.atick(
|
50098 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
|
50099 |
+
_panic_or_proceed(
|
50100 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
|
50101 |
+
raise exc
|
50102 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
|
50103 |
+
return await task.proc.ainvoke(task.input, config)
|
50104 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
|
50105 |
+
input = await step.ainvoke(input, config, **kwargs)
|
50106 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
|
50107 |
+
ret = await self.afunc(*args, **kwargs)
|
50108 |
+
File "/home/user/app/graph.py", line 210, in guidance_node
|
50109 |
+
response = await chain.ainvoke({"messages": state.messages}, config=config)
|
50110 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 3089, in ainvoke
|
50111 |
+
input_ = await coro_with_context(part(), context, create_task=True)
|
50112 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 5444, in ainvoke
|
50113 |
+
return await self.bound.ainvoke(
|
50114 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 394, in ainvoke
|
50115 |
+
llm_result = await self.agenerate_prompt(
|
50116 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 968, in agenerate_prompt
|
50117 |
+
return await self.agenerate(
|
50118 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 926, in agenerate
|
50119 |
+
raise exceptions[0]
|
50120 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 1083, in _agenerate_with_cache
|
50121 |
+
async for chunk in self._astream(messages, stop=stop, **kwargs):
|
50122 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_anthropic/chat_models.py", line 1394, in _astream
|
50123 |
+
_handle_anthropic_bad_request(e)
|
50124 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_anthropic/chat_models.py", line 1374, in _astream
|
50125 |
+
stream = await self._acreate(payload)
|
50126 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_anthropic/chat_models.py", line 1322, in _acreate
|
50127 |
+
return await self._async_client.messages.create(**payload)
|
50128 |
+
File "/usr/local/lib/python3.10/site-packages/anthropic/resources/messages/messages.py", line 2229, in create
|
50129 |
+
return await self._post(
|
50130 |
+
File "/usr/local/lib/python3.10/site-packages/anthropic/_base_client.py", line 1819, in post
|
50131 |
+
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
|
50132 |
+
File "/usr/local/lib/python3.10/site-packages/anthropic/_base_client.py", line 1626, in request
|
50133 |
+
raise self._make_status_error_from_response(err.response) from None
|
50134 |
+
anthropic.BadRequestError: Error code: 400 - {'type': 'error', 'error': {'type': 'invalid_request_error', 'message': 'messages.1: `tool_use` ids were found without `tool_result` blocks immediately after: 5463c2ea-8caf-48fc-a755-e23181439455. Each `tool_use` block must have a corresponding `tool_result` block in the next message.'}}
|
50135 |
+
2025-06-06 17:44:15:__main__:INFO: Starting the interface
|
50136 |
+
2025-06-06 17:44:21:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50137 |
+
2025-06-06 17:44:29:__main__:INFO: Prompt: You are a helpful assistant.
|
50138 |
+
2025-06-06 17:44:38:__main__:ERROR: Exception occurred
|
50139 |
+
Traceback (most recent call last):
|
50140 |
+
File "/home/user/app/app.py", line 85, in chat_fn
|
50141 |
+
async for stream_mode, chunk in graph.astream(
|
50142 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
|
50143 |
+
async for _ in runner.atick(
|
50144 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
|
50145 |
+
_panic_or_proceed(
|
50146 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
|
50147 |
+
raise exc
|
50148 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
|
50149 |
+
return await task.proc.ainvoke(task.input, config)
|
50150 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
|
50151 |
+
input = await step.ainvoke(input, config, **kwargs)
|
50152 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
|
50153 |
+
ret = await self.afunc(*args, **kwargs)
|
50154 |
+
File "/home/user/app/graph.py", line 210, in guidance_node
|
50155 |
+
response = await chain.ainvoke({"messages": state.messages}, config=config)
|
50156 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 3089, in ainvoke
|
50157 |
+
input_ = await coro_with_context(part(), context, create_task=True)
|
50158 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 5444, in ainvoke
|
50159 |
+
return await self.bound.ainvoke(
|
50160 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 394, in ainvoke
|
50161 |
+
llm_result = await self.agenerate_prompt(
|
50162 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 968, in agenerate_prompt
|
50163 |
+
return await self.agenerate(
|
50164 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 926, in agenerate
|
50165 |
+
raise exceptions[0]
|
50166 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 1083, in _agenerate_with_cache
|
50167 |
+
async for chunk in self._astream(messages, stop=stop, **kwargs):
|
50168 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_anthropic/chat_models.py", line 1394, in _astream
|
50169 |
+
_handle_anthropic_bad_request(e)
|
50170 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_anthropic/chat_models.py", line 1374, in _astream
|
50171 |
+
stream = await self._acreate(payload)
|
50172 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_anthropic/chat_models.py", line 1322, in _acreate
|
50173 |
+
return await self._async_client.messages.create(**payload)
|
50174 |
+
File "/usr/local/lib/python3.10/site-packages/anthropic/resources/messages/messages.py", line 2229, in create
|
50175 |
+
return await self._post(
|
50176 |
+
File "/usr/local/lib/python3.10/site-packages/anthropic/_base_client.py", line 1819, in post
|
50177 |
+
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
|
50178 |
+
File "/usr/local/lib/python3.10/site-packages/anthropic/_base_client.py", line 1626, in request
|
50179 |
+
raise self._make_status_error_from_response(err.response) from None
|
50180 |
+
anthropic.BadRequestError: Error code: 400 - {'type': 'error', 'error': {'type': 'invalid_request_error', 'message': 'messages.1: `tool_use` ids were found without `tool_result` blocks immediately after: fa20aec0-e08a-42dc-b122-09be53ba1b26. Each `tool_use` block must have a corresponding `tool_result` block in the next message.'}}
|
50181 |
+
2025-06-06 17:49:43:__main__:INFO: Starting the interface
|
50182 |
+
2025-06-06 17:49:48:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50183 |
+
2025-06-06 17:50:04:__main__:INFO: Prompt: You are a helpful assistant.
|
50184 |
+
2025-06-06 17:50:09:__main__:ERROR: Exception occurred
|
50185 |
+
Traceback (most recent call last):
|
50186 |
+
File "/home/user/app/app.py", line 85, in chat_fn
|
50187 |
+
async for stream_mode, chunk in graph.astream(
|
50188 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
|
50189 |
+
async for _ in runner.atick(
|
50190 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
|
50191 |
+
_panic_or_proceed(
|
50192 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
|
50193 |
+
raise exc
|
50194 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
|
50195 |
+
return await task.proc.ainvoke(task.input, config)
|
50196 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
|
50197 |
+
input = await step.ainvoke(input, config, **kwargs)
|
50198 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
|
50199 |
+
ret = await self.afunc(*args, **kwargs)
|
50200 |
+
File "/home/user/app/graph.py", line 261, in brainstorming_node
|
50201 |
+
result = human_assistance("What materials do you have?")
|
50202 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/_api/deprecation.py", line 191, in warning_emitting_wrapper
|
50203 |
+
return wrapped(*args, **kwargs)
|
50204 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/tools/base.py", line 893, in __call__
|
50205 |
+
return self.run(tool_input, callbacks=callbacks)
|
50206 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/tools/base.py", line 771, in run
|
50207 |
+
raise error_to_raise
|
50208 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/tools/base.py", line 740, in run
|
50209 |
+
response = context.run(self._run, *tool_args, **tool_kwargs)
|
50210 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/tools/structured.py", line 95, in _run
|
50211 |
+
raise NotImplementedError(msg)
|
50212 |
+
NotImplementedError: StructuredTool does not support sync invocation.
|
50213 |
+
2025-06-06 17:59:36:__main__:INFO: Starting the interface
|
50214 |
+
2025-06-06 17:59:43:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50215 |
+
2025-06-06 18:02:02:__main__:INFO: Prompt: You are a helpful assistant.
|
50216 |
+
2025-06-06 18:02:06:__main__:ERROR: Exception occurred
|
50217 |
+
Traceback (most recent call last):
|
50218 |
+
File "/home/user/app/app.py", line 97, in chat_fn
|
50219 |
+
async for stream_mode, chunk in graph.astream(
|
50220 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
|
50221 |
+
async for _ in runner.atick(
|
50222 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
|
50223 |
+
_panic_or_proceed(
|
50224 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
|
50225 |
+
raise exc
|
50226 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
|
50227 |
+
return await task.proc.ainvoke(task.input, config)
|
50228 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
|
50229 |
+
input = await step.ainvoke(input, config, **kwargs)
|
50230 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
|
50231 |
+
ret = await self.afunc(*args, **kwargs)
|
50232 |
+
File "/home/user/app/graph.py", line 261, in brainstorming_node
|
50233 |
+
result = await human_assistance.ainvoke({"query": "What materials do you have?"})
|
50234 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/tools/structured.py", line 66, in ainvoke
|
50235 |
+
return await super().ainvoke(input, config, **kwargs)
|
50236 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/tools/base.py", line 520, in ainvoke
|
50237 |
+
return await self.arun(tool_input, **kwargs)
|
50238 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/tools/base.py", line 884, in arun
|
50239 |
+
raise error_to_raise
|
50240 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/tools/base.py", line 853, in arun
|
50241 |
+
response = await coro_with_context(coro, context)
|
50242 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/tools/structured.py", line 110, in _arun
|
50243 |
+
return await self.coroutine(*args, **kwargs)
|
50244 |
+
File "/home/user/app/graph.py", line 65, in human_assistance
|
50245 |
+
human_response = await interrupt({"query": query}) # async wait
|
50246 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/types.py", line 523, in interrupt
|
50247 |
+
conf = get_config()["configurable"]
|
50248 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/config.py", line 29, in get_config
|
50249 |
+
raise RuntimeError("Called get_config outside of a runnable context")
|
50250 |
+
RuntimeError: Called get_config outside of a runnable context
|
50251 |
+
2025-06-06 18:08:07:__main__:INFO: Starting the interface
|
50252 |
+
2025-06-06 18:08:16:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50253 |
+
2025-06-06 18:08:33:__main__:INFO: Prompt: You are a helpful assistant.
|
50254 |
+
2025-06-06 18:08:36:__main__:ERROR: Exception occurred
|
50255 |
+
Traceback (most recent call last):
|
50256 |
+
File "/home/user/app/app.py", line 97, in chat_fn
|
50257 |
+
async for stream_mode, chunk in graph.astream(
|
50258 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
|
50259 |
+
async for _ in runner.atick(
|
50260 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
|
50261 |
+
_panic_or_proceed(
|
50262 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
|
50263 |
+
raise exc
|
50264 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
|
50265 |
+
return await task.proc.ainvoke(task.input, config)
|
50266 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
|
50267 |
+
input = await step.ainvoke(input, config, **kwargs)
|
50268 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
|
50269 |
+
ret = await self.afunc(*args, **kwargs)
|
50270 |
+
File "/home/user/app/graph.py", line 235, in brainstorming_node
|
50271 |
+
from langgraph.types import ToolCall
|
50272 |
+
ImportError: cannot import name 'ToolCall' from 'langgraph.types' (/usr/local/lib/python3.10/site-packages/langgraph/types.py)
|
50273 |
+
2025-06-06 18:12:07:__main__:INFO: Starting the interface
|
50274 |
+
2025-06-06 18:12:15:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50275 |
+
2025-06-06 18:12:24:__main__:INFO: Prompt: You are a helpful assistant.
|
50276 |
+
2025-06-06 18:13:00:__main__:ERROR: Exception occurred
|
50277 |
+
Traceback (most recent call last):
|
50278 |
+
File "/home/user/app/app.py", line 97, in chat_fn
|
50279 |
+
async for stream_mode, chunk in graph.astream(
|
50280 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
|
50281 |
+
raise GraphRecursionError(msg)
|
50282 |
+
langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
|
50283 |
+
For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
|
50284 |
+
2025-06-06 18:14:57:__main__:INFO: Starting the interface
|
50285 |
+
2025-06-06 18:15:04:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50286 |
+
2025-06-06 18:16:24:__main__:INFO: Starting the interface
|
50287 |
+
2025-06-06 18:16:36:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50288 |
+
2025-06-06 18:17:54:__main__:INFO: Prompt: You are a helpful assistant.
|
50289 |
+
2025-06-06 18:18:36:__main__:ERROR: Exception occurred
|
50290 |
+
Traceback (most recent call last):
|
50291 |
+
File "/home/user/app/app.py", line 97, in chat_fn
|
50292 |
+
async for stream_mode, chunk in graph.astream(
|
50293 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2677, in astream
|
50294 |
+
raise GraphRecursionError(msg)
|
50295 |
+
langgraph.errors.GraphRecursionError: Recursion limit of 20 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
|
50296 |
+
For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
|
50297 |
+
2025-06-06 18:18:39:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50298 |
+
2025-06-06 18:31:54:__main__:INFO: Starting the interface
|
50299 |
+
2025-06-06 18:32:00:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50300 |
+
2025-06-06 18:32:07:__main__:INFO: Prompt: You are a helpful assistant.
|
50301 |
+
2025-06-06 18:34:27:__main__:INFO: Starting the interface
|
50302 |
+
2025-06-06 18:34:31:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50303 |
+
2025-06-06 18:35:28:__main__:INFO: Prompt: You are a helpful assistant.
|
app.py
CHANGED
@@ -66,12 +66,24 @@ async def chat_fn(user_input: str, history: dict, input_graph_state: dict, uuid:
|
|
66 |
}
|
67 |
if prompt:
|
68 |
input_graph_state["prompt"] = prompt
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
config = RunnableConfig(
|
76 |
recursion_limit=20,
|
77 |
run_name="user_chat",
|
@@ -93,18 +105,34 @@ async def chat_fn(user_input: str, history: dict, input_graph_state: dict, uuid:
|
|
93 |
if hasattr(last_message, "tool_calls"):
|
94 |
for msg_tool_call in last_message.tool_calls:
|
95 |
tool_name: str = msg_tool_call['name']
|
|
|
96 |
if tool_name == "tavily_search_results_json":
|
97 |
query = msg_tool_call['args']['query']
|
98 |
waiting_output_seq.append(f"Searching for '{query}'...")
|
99 |
yield "\n".join(waiting_output_seq), gr.skip(), gr.skip()
|
|
|
100 |
# download_website_text is the name of the function defined in graph.py
|
101 |
elif tool_name == "download_website_text":
|
102 |
url = msg_tool_call['args']['url']
|
103 |
waiting_output_seq.append(f"Downloading text from '{url}'...")
|
104 |
yield "\n".join(waiting_output_seq), gr.skip(), gr.skip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
else:
|
106 |
waiting_output_seq.append(f"Running {tool_name}...")
|
107 |
yield "\n".join(waiting_output_seq), gr.skip(), gr.skip()
|
|
|
108 |
elif stream_mode == "messages":
|
109 |
msg, metadata = chunk
|
110 |
# print("output: ", msg, metadata)
|
@@ -119,10 +147,12 @@ async def chat_fn(user_input: str, history: dict, input_graph_state: dict, uuid:
|
|
119 |
current_chunk_text += block.get("text", "")
|
120 |
elif isinstance(block, str): # Fallback if content is list of strings
|
121 |
current_chunk_text += block
|
|
|
122 |
|
123 |
if current_chunk_text: # Only add and yield if there's actually text
|
124 |
output += current_chunk_text
|
125 |
yield output, gr.skip(), gr.skip()
|
|
|
126 |
# Trigger for asking follow up questions
|
127 |
# + store the graph state for next iteration
|
128 |
# yield output, dict(final_state), gr.skip()
|
|
|
66 |
}
|
67 |
if prompt:
|
68 |
input_graph_state["prompt"] = prompt
|
69 |
+
|
70 |
+
if input_graph_state.get("awaiting_human_input"):
|
71 |
+
input_graph_state["messages"].append(
|
72 |
+
ToolMessage(
|
73 |
+
tool_call_id=input_graph_state.pop("human_assistance_tool_id"),
|
74 |
+
content=user_input
|
75 |
+
)
|
76 |
+
)
|
77 |
+
input_graph_state["awaiting_human_input"] = False
|
78 |
+
else:
|
79 |
+
# New user message
|
80 |
+
if "messages" not in input_graph_state:
|
81 |
+
input_graph_state["messages"] = []
|
82 |
+
input_graph_state["messages"].append(
|
83 |
+
HumanMessage(user_input[:USER_INPUT_MAX_LENGTH])
|
84 |
+
)
|
85 |
+
input_graph_state["messages"] = input_graph_state["messages"][-TRIM_MESSAGE_LENGTH:]
|
86 |
+
|
87 |
config = RunnableConfig(
|
88 |
recursion_limit=20,
|
89 |
run_name="user_chat",
|
|
|
105 |
if hasattr(last_message, "tool_calls"):
|
106 |
for msg_tool_call in last_message.tool_calls:
|
107 |
tool_name: str = msg_tool_call['name']
|
108 |
+
|
109 |
if tool_name == "tavily_search_results_json":
|
110 |
query = msg_tool_call['args']['query']
|
111 |
waiting_output_seq.append(f"Searching for '{query}'...")
|
112 |
yield "\n".join(waiting_output_seq), gr.skip(), gr.skip()
|
113 |
+
|
114 |
# download_website_text is the name of the function defined in graph.py
|
115 |
elif tool_name == "download_website_text":
|
116 |
url = msg_tool_call['args']['url']
|
117 |
waiting_output_seq.append(f"Downloading text from '{url}'...")
|
118 |
yield "\n".join(waiting_output_seq), gr.skip(), gr.skip()
|
119 |
+
|
120 |
+
elif tool_name == "human_assistance":
|
121 |
+
query = msg_tool_call["args"]["query"]
|
122 |
+
waiting_output_seq.append(f"👤 Human input needed: {query}")
|
123 |
+
|
124 |
+
# Save state to resume after user provides input
|
125 |
+
input_graph_state["awaiting_human_input"] = True
|
126 |
+
input_graph_state["human_assistance_tool_id"] = msg_tool_call["id"]
|
127 |
+
|
128 |
+
# Show input textbox to user and pause graph execution
|
129 |
+
yield "\n".join(waiting_output_seq), gr.Textbox(visible=True, label="Your input"), gr.skip()
|
130 |
+
return # Pause execution, resume in next call
|
131 |
+
|
132 |
else:
|
133 |
waiting_output_seq.append(f"Running {tool_name}...")
|
134 |
yield "\n".join(waiting_output_seq), gr.skip(), gr.skip()
|
135 |
+
|
136 |
elif stream_mode == "messages":
|
137 |
msg, metadata = chunk
|
138 |
# print("output: ", msg, metadata)
|
|
|
147 |
current_chunk_text += block.get("text", "")
|
148 |
elif isinstance(block, str): # Fallback if content is list of strings
|
149 |
current_chunk_text += block
|
150 |
+
|
151 |
|
152 |
if current_chunk_text: # Only add and yield if there's actually text
|
153 |
output += current_chunk_text
|
154 |
yield output, gr.skip(), gr.skip()
|
155 |
+
|
156 |
# Trigger for asking follow up questions
|
157 |
# + store the graph state for next iteration
|
158 |
# yield output, dict(final_state), gr.skip()
|
graph.py
CHANGED
@@ -4,9 +4,11 @@ import logging
|
|
4 |
import os
|
5 |
from typing import Annotated
|
6 |
from typing_extensions import TypedDict
|
|
|
|
|
7 |
|
8 |
import aiohttp
|
9 |
-
from langchain_core.messages import AIMessage, HumanMessage, AnyMessage
|
10 |
|
11 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
12 |
from langchain_core.tools import tool
|
@@ -58,9 +60,9 @@ def evaluate_idea_completion(response) -> bool:
|
|
58 |
return all(keyword in response_text for keyword in required_keywords)
|
59 |
|
60 |
@tool
|
61 |
-
def human_assistance(query: str) -> str:
|
62 |
"""Request assistance from a human."""
|
63 |
-
human_response = interrupt({"query": query})
|
64 |
return human_response["data"]
|
65 |
|
66 |
@tool
|
@@ -223,118 +225,113 @@ async def guidance_node(state: GraphProcessingState, config=None):
|
|
223 |
}
|
224 |
|
225 |
async def brainstorming_node(state: GraphProcessingState, config=None):
|
226 |
-
print("\n---
|
227 |
-
|
228 |
print(f"Prompt: {state.prompt}")
|
|
|
|
|
229 |
for message in state.messages:
|
230 |
-
if isinstance(message, HumanMessage):
|
231 |
-
print(f"
|
232 |
-
elif isinstance(message, AIMessage):
|
233 |
-
if message.content:
|
234 |
-
if isinstance(message.content, list):
|
235 |
-
texts = [item.get('text', '') for item in message.content if isinstance(item, dict) and 'text' in item]
|
236 |
-
if texts:
|
237 |
-
print(f"AI: {' '.join(texts)}")
|
238 |
-
elif isinstance(message.content, str):
|
239 |
-
print(f"AI: {message.content}")
|
240 |
-
elif isinstance(message, SystemMessage):
|
241 |
-
print(f"System: {message.content}")
|
242 |
-
elif isinstance(message, ToolMessage):
|
243 |
-
print(f"Tool: {message.content}")
|
244 |
|
245 |
-
print(f"Tools
|
246 |
print(f"Search Enabled: {state.search_enabled}")
|
247 |
print(f"Next Stage: {state.next_stage}")
|
248 |
|
|
|
249 |
print(f"Idea Complete: {state.idea_complete}")
|
250 |
print(f"Brainstorming Complete: {state.brainstorming_complete}")
|
251 |
print(f"Planning Complete: {state.planning_complete}")
|
252 |
print(f"Drawing Complete: {state.drawing_complete}")
|
253 |
print(f"Product Searching Complete: {state.product_searching_complete}")
|
254 |
print(f"Purchasing Complete: {state.purchasing_complete}")
|
|
|
255 |
|
256 |
-
# if
|
257 |
-
|
258 |
-
|
259 |
-
# # Define the system prompt guiding the brainstorming assistant
|
260 |
-
# brainstorming_system_prompt = ChatPromptTemplate.from_messages(
|
261 |
-
# [
|
262 |
-
# ("system",
|
263 |
-
# "You are a brainstorming assistant for DIY projects. Your goal is to help the user finalize a single, specific DIY project idea. "
|
264 |
-
# "The project idea MUST satisfy these critical criteria:\n"
|
265 |
-
# "1. Buildable by an average person with basic DIY skills.\n"
|
266 |
-
# "2. Uses only materials and basic tools commonly available in general hardware stores, craft stores, or supermarkets worldwide. "
|
267 |
-
# "(e.g., wood, screws, glue, paint, fabric, cardboard, basic hand tools like screwdrivers, hammers, saws, drills. "
|
268 |
-
# "AVOID specialized electronic components, 3D printing, specific brand items not universally available, or complex machinery.)\n"
|
269 |
-
# "3. The final product should be a tangible item.\n\n"
|
270 |
-
# "Your interaction flow:\n"
|
271 |
-
# "- Engage with the user to understand their interests or initial thoughts.\n"
|
272 |
-
# "- Propose ideas or refine user's suggestions to meet the criteria.\n"
|
273 |
-
# "- If an idea proposed by you or the user clearly meets all criteria and you believe it's a good final choice, respond ONLY with the exact phrase: "
|
274 |
-
# "'IDEA FINALIZED: [Name of the Idea]'. Example: 'IDEA FINALIZED: Simple Wooden Spice Rack'. "
|
275 |
-
# "Do not add any other text before or after this phrase if you use it.\n"
|
276 |
-
# "- If you need more information from the user to refine an idea, or if their current suggestion doesn't meet the criteria, ask clarifying questions. "
|
277 |
-
# "Guide them towards simpler, more accessible options.\n"
|
278 |
-
# "- If you are stuck, the user's request is too vague for you to make progress towards a suitable idea, or they propose something clearly outside the "
|
279 |
-
# "'universally available materials' constraint, and you need their direct input to clarify fundamental aspects, you should use the 'human_assistance' tool. "
|
280 |
-
# "Frame your request for the human clearly. For example: 'To ensure the idea is simple enough, could you tell me what kind of tools you are comfortable using?' "
|
281 |
-
# "or 'The idea of a custom electronic gadget is interesting, but finding parts globally is hard. Could we think of a non-electronic version, or what's the core function "
|
282 |
-
# "you want from it that we could achieve with simpler materials?'"
|
283 |
-
# ),
|
284 |
-
# MessagesPlaceholder(variable_name="messages"),
|
285 |
-
# ]
|
286 |
-
# )
|
287 |
-
|
288 |
-
# # Tools allowed for brainstorming
|
289 |
-
# node_tools = [human_assistance]
|
290 |
-
# if state.search_enabled and tavily_search_tool: # only add search tool if enabled and initialized
|
291 |
-
# node_tools.append(tavily_search_tool)
|
292 |
-
|
293 |
-
# llm_with_tools = model.bind_tools(node_tools)
|
294 |
-
# chain = brainstorming_system_prompt | llm_with_tools
|
295 |
-
|
296 |
-
# # Pass current messages from the state to the chain
|
297 |
-
# response_message = await chain.ainvoke({"messages": state.messages}, config=config)
|
298 |
-
|
299 |
-
# updates = {"messages": [response_message]}
|
300 |
-
|
301 |
-
# if isinstance(response_message, AIMessage) and response_message.content:
|
302 |
-
# content = response_message.content.strip()
|
303 |
-
|
304 |
-
# if content.startswith("IDEA FINALIZED:"):
|
305 |
-
# updates["brainstorming_complete"] = True
|
306 |
-
# updates["tool_call_required"] = False
|
307 |
-
# updates["loop_brainstorming"] = False
|
308 |
-
# print(f"✅ Brainstorming complete! Idea: {content}")
|
309 |
-
|
310 |
-
# elif getattr(response_message, "tool_calls", None):
|
311 |
-
# updates["tool_call_required"] = True
|
312 |
-
# updates["loop_brainstorming"] = False
|
313 |
-
# print(f"🛠️ Brainstorming node initiated tool call(s): {response_message.tool_calls}")
|
314 |
-
|
315 |
-
# else:
|
316 |
-
# updates["tool_call_required"] = False
|
317 |
-
# updates["loop_brainstorming"] = True
|
318 |
-
# print(f"💬 Brainstorming node continues discussion: {content}")
|
319 |
-
|
320 |
-
# else:
|
321 |
-
# # If no proper response, keep looping brainstorming
|
322 |
-
# updates["tool_call_required"] = False
|
323 |
-
# updates["loop_brainstorming"] = True
|
324 |
-
|
325 |
-
# print("\n--- End Brainstorming Node Debug ---")
|
326 |
-
|
327 |
-
test_tool_call = ToolCall(name="human_assistance", args={"question": "What materials do you have?"})
|
328 |
-
test_message = AIMessage(content=None, tool_calls=[test_tool_call])
|
329 |
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
|
336 |
-
|
|
|
|
|
337 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
338 |
|
339 |
def brainstorming_routing(state: GraphProcessingState) -> str:
|
340 |
print("\n--- brainstorming_routing Edge (Debug via print) ---") # Added a newline for clarity
|
@@ -449,13 +446,17 @@ def define_workflow() -> CompiledStateGraph:
|
|
449 |
|
450 |
# Edges
|
451 |
workflow.add_edge("tools", "guidance_node")
|
452 |
-
workflow.add_edge("planning_node", "guidance_node")
|
453 |
-
workflow.add_edge("brainstorming_node", "guidance_node")
|
454 |
-
workflow.add_edge("brainstorming_node", "tools")
|
455 |
-
workflow.add_edge("tools", "brainstorming_node")
|
456 |
-
|
|
|
|
|
|
|
|
|
457 |
workflow.add_conditional_edges("guidance_node", guidance_routing)
|
458 |
-
workflow.add_conditional_edges("brainstorming_node", brainstorming_routing)
|
459 |
|
460 |
# # Set end nodes
|
461 |
workflow.set_entry_point("guidance_node")
|
@@ -558,3 +559,8 @@ graph = define_workflow()
|
|
558 |
# "messages": response,
|
559 |
# "idea_complete": idea_complete
|
560 |
# }
|
|
|
|
|
|
|
|
|
|
|
|
4 |
import os
|
5 |
from typing import Annotated
|
6 |
from typing_extensions import TypedDict
|
7 |
+
import uuid
|
8 |
+
|
9 |
|
10 |
import aiohttp
|
11 |
+
from langchain_core.messages import AIMessage, HumanMessage, AnyMessage, ToolCall
|
12 |
|
13 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
14 |
from langchain_core.tools import tool
|
|
|
60 |
return all(keyword in response_text for keyword in required_keywords)
|
61 |
|
62 |
@tool
|
63 |
+
async def human_assistance(query: str) -> str:
|
64 |
"""Request assistance from a human."""
|
65 |
+
human_response = await interrupt({"query": query}) # async wait
|
66 |
return human_response["data"]
|
67 |
|
68 |
@tool
|
|
|
225 |
}
|
226 |
|
227 |
async def brainstorming_node(state: GraphProcessingState, config=None):
|
228 |
+
print("\n--- brainstorming_node (Debug via print) ---")
|
|
|
229 |
print(f"Prompt: {state.prompt}")
|
230 |
+
|
231 |
+
# Log the content of each message
|
232 |
for message in state.messages:
|
233 |
+
if isinstance(message, (HumanMessage, AIMessage, SystemMessage, ToolMessage)):
|
234 |
+
print(f"Message Content: {message.content}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
|
236 |
+
print(f"Tools called: {state.tool_call_required}")
|
237 |
print(f"Search Enabled: {state.search_enabled}")
|
238 |
print(f"Next Stage: {state.next_stage}")
|
239 |
|
240 |
+
# Log boolean completion flags
|
241 |
print(f"Idea Complete: {state.idea_complete}")
|
242 |
print(f"Brainstorming Complete: {state.brainstorming_complete}")
|
243 |
print(f"Planning Complete: {state.planning_complete}")
|
244 |
print(f"Drawing Complete: {state.drawing_complete}")
|
245 |
print(f"Product Searching Complete: {state.product_searching_complete}")
|
246 |
print(f"Purchasing Complete: {state.purchasing_complete}")
|
247 |
+
print("--- End Guidance Node Debug ---")
|
248 |
|
249 |
+
# Check if model is available
|
250 |
+
if not model:
|
251 |
+
return {"messages": [AIMessage(content="Model not available for brainstorming.")]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
|
253 |
+
# Filter out messages with empty content
|
254 |
+
filtered_messages = [
|
255 |
+
message for message in state.messages
|
256 |
+
if isinstance(message, (HumanMessage, AIMessage, SystemMessage, ToolMessage)) and message.content
|
257 |
+
]
|
258 |
|
259 |
+
# Ensure there is at least one message with content
|
260 |
+
if not filtered_messages:
|
261 |
+
filtered_messages.append(AIMessage(content="No valid messages provided."))
|
262 |
|
263 |
+
# Define the system prompt guiding the brainstorming assistant
|
264 |
+
brainstorming_system_prompt = ChatPromptTemplate.from_messages(
|
265 |
+
[
|
266 |
+
("system",
|
267 |
+
"You are a brainstorming assistant for DIY projects. Your goal is to help the user finalize a single, specific DIY project idea. "
|
268 |
+
"The project idea MUST satisfy these critical criteria:\n"
|
269 |
+
"1. Buildable by an average person with basic DIY skills.\n"
|
270 |
+
"2. Uses only materials and basic tools commonly available in general hardware stores, craft stores, or supermarkets worldwide. "
|
271 |
+
"(e.g., wood, screws, glue, paint, fabric, cardboard, basic hand tools like screwdrivers, hammers, saws, drills. "
|
272 |
+
"AVOID specialized electronic components, 3D printing, specific brand items not universally available, or complex machinery.)\n"
|
273 |
+
"3. The final product should be a tangible item.\n\n"
|
274 |
+
"Your interaction flow:\n"
|
275 |
+
"- Engage with the user to understand their interests or initial thoughts.\n"
|
276 |
+
"- Propose ideas or refine user's suggestions to meet the criteria.\n"
|
277 |
+
"- If an idea proposed by you or the user clearly meets all criteria and you believe it's a good final choice, respond ONLY with the exact phrase: "
|
278 |
+
"'IDEA FINALIZED: [Name of the Idea]'. Example: 'IDEA FINALIZED: Simple Wooden Spice Rack'. "
|
279 |
+
"Do not add any other text before or after this phrase if you use it.\n"
|
280 |
+
"- If you need more information from the user to refine an idea, or if their current suggestion doesn't meet the criteria, ask clarifying questions. "
|
281 |
+
"Guide them towards simpler, more accessible options.\n"
|
282 |
+
"- If you are stuck, the user's request is too vague for you to make progress towards a suitable idea, or they propose something clearly outside the "
|
283 |
+
"'universally available materials' constraint, and you need their direct input to clarify fundamental aspects, you should use the 'human_assistance' tool. "
|
284 |
+
"Frame your request for the human clearly. For example: 'To ensure the idea is simple enough, could you tell me what kind of tools you are comfortable using?' "
|
285 |
+
"or 'The idea of a custom electronic gadget is interesting, but finding parts globally is hard. Could we think of a non-electronic version, or what's the core function "
|
286 |
+
"you want from it that we could achieve with simpler materials?'"
|
287 |
+
),
|
288 |
+
MessagesPlaceholder(variable_name="messages"),
|
289 |
+
]
|
290 |
+
)
|
291 |
+
|
292 |
+
# Tools allowed for brainstorming
|
293 |
+
node_tools = [human_assistance]
|
294 |
+
if state.search_enabled and tavily_search_tool: # only add search tool if enabled and initialized
|
295 |
+
node_tools.append(tavily_search_tool)
|
296 |
+
|
297 |
+
llm_with_tools = model.bind_tools(node_tools)
|
298 |
+
chain = brainstorming_system_prompt | llm_with_tools
|
299 |
+
|
300 |
+
# Pass filtered messages to the chain
|
301 |
+
response_message = await chain.ainvoke({"messages": filtered_messages}, config=config)
|
302 |
+
|
303 |
+
updates = {"messages": [response_message]}
|
304 |
+
print('response from brainstorm', response_message)
|
305 |
+
|
306 |
+
if isinstance(response_message, AIMessage) and response_message.content:
|
307 |
+
content = response_message.content.strip()
|
308 |
+
|
309 |
+
if content.startswith("IDEA FINALIZED:"):
|
310 |
+
print('final idea')
|
311 |
+
updates["brainstorming_complete"] = True
|
312 |
+
updates["tool_call_required"] = False
|
313 |
+
updates["loop_brainstorming"] = False
|
314 |
+
print(f"✅ Brainstorming complete! Idea: {content}")
|
315 |
+
|
316 |
+
elif getattr(response_message, "tool_calls", None):
|
317 |
+
print('tool call requested')
|
318 |
+
updates["tool_call_required"] = True
|
319 |
+
updates["loop_brainstorming"] = False
|
320 |
+
print(f"🛠️ Brainstorming node initiated tool call(s): {response_message.tool_calls}")
|
321 |
+
|
322 |
+
else:
|
323 |
+
print('keep brainstorming')
|
324 |
+
updates["tool_call_required"] = False
|
325 |
+
updates["loop_brainstorming"] = True
|
326 |
+
print(f"💬 Brainstorming node continues discussion: {content}")
|
327 |
+
|
328 |
+
else:
|
329 |
+
# If no proper response, keep looping brainstorming
|
330 |
+
updates["tool_call_required"] = False
|
331 |
+
updates["loop_brainstorming"] = True
|
332 |
+
|
333 |
+
print("\n--- End Brainstorming Node Debug ---")
|
334 |
+
return updates
|
335 |
|
336 |
def brainstorming_routing(state: GraphProcessingState) -> str:
|
337 |
print("\n--- brainstorming_routing Edge (Debug via print) ---") # Added a newline for clarity
|
|
|
446 |
|
447 |
# Edges
|
448 |
workflow.add_edge("tools", "guidance_node")
|
449 |
+
# workflow.add_edge("planning_node", "guidance_node")
|
450 |
+
# workflow.add_edge("brainstorming_node", "guidance_node")
|
451 |
+
# workflow.add_edge("brainstorming_node", "tools")
|
452 |
+
# workflow.add_edge("tools", "brainstorming_node")
|
453 |
+
|
454 |
+
workflow.add_conditional_edges(
|
455 |
+
"brainstorming_node",
|
456 |
+
tools_condition,
|
457 |
+
)
|
458 |
workflow.add_conditional_edges("guidance_node", guidance_routing)
|
459 |
+
# workflow.add_conditional_edges("brainstorming_node", brainstorming_routing)
|
460 |
|
461 |
# # Set end nodes
|
462 |
workflow.set_entry_point("guidance_node")
|
|
|
559 |
# "messages": response,
|
560 |
# "idea_complete": idea_complete
|
561 |
# }
|
562 |
+
|
563 |
+
|
564 |
+
|
565 |
+
|
566 |
+
#
|