Spaces:
Runtime error
Runtime error
Commit
·
d9db98e
1
Parent(s):
b8b2aff
stiff
Browse files- __pycache__/graph.cpython-310.pyc +0 -0
- app.log +165 -0
- graph.py +143 -112
__pycache__/graph.cpython-310.pyc
CHANGED
Binary files a/__pycache__/graph.cpython-310.pyc and b/__pycache__/graph.cpython-310.pyc differ
|
|
app.log
CHANGED
@@ -50632,3 +50632,168 @@ Traceback (most recent call last):
|
|
50632 |
File "/home/user/app/graph.py", line 145, in guidance_node
|
50633 |
elif isinstance(message, SystemMessage):
|
50634 |
NameError: name 'SystemMessage' is not defined
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50632 |
File "/home/user/app/graph.py", line 145, in guidance_node
|
50633 |
elif isinstance(message, SystemMessage):
|
50634 |
NameError: name 'SystemMessage' is not defined
|
50635 |
+
2025-06-06 21:03:31:__main__:INFO: Starting the interface
|
50636 |
+
2025-06-06 21:03:37:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50637 |
+
2025-06-06 21:03:42:__main__:INFO: Prompt: You are a helpful assistant.
|
50638 |
+
2025-06-06 21:03:42:__main__:ERROR: Exception occurred
|
50639 |
+
Traceback (most recent call last):
|
50640 |
+
File "/home/user/app/app.py", line 97, in chat_fn
|
50641 |
+
async for stream_mode, chunk in graph.astream(
|
50642 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
|
50643 |
+
async for _ in runner.atick(
|
50644 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
|
50645 |
+
_panic_or_proceed(
|
50646 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
|
50647 |
+
raise exc
|
50648 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
|
50649 |
+
return await task.proc.ainvoke(task.input, config)
|
50650 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
|
50651 |
+
input = await step.ainvoke(input, config, **kwargs)
|
50652 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
|
50653 |
+
ret = await self.afunc(*args, **kwargs)
|
50654 |
+
File "/home/user/app/graph.py", line 147, in guidance_node
|
50655 |
+
elif isinstance(message, SystemMessage):
|
50656 |
+
NameError: name 'SystemMessage' is not defined
|
50657 |
+
2025-06-06 21:03:50:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50658 |
+
2025-06-06 21:03:53:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50659 |
+
2025-06-06 21:04:10:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50660 |
+
2025-06-06 21:04:26:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50661 |
+
2025-06-06 21:05:15:__main__:INFO: Starting the interface
|
50662 |
+
2025-06-06 21:05:20:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50663 |
+
2025-06-06 21:05:25:__main__:INFO: Prompt: You are a helpful assistant.
|
50664 |
+
2025-06-06 21:05:25:__main__:ERROR: Exception occurred
|
50665 |
+
Traceback (most recent call last):
|
50666 |
+
File "/home/user/app/app.py", line 97, in chat_fn
|
50667 |
+
async for stream_mode, chunk in graph.astream(
|
50668 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
|
50669 |
+
async for _ in runner.atick(
|
50670 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
|
50671 |
+
_panic_or_proceed(
|
50672 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
|
50673 |
+
raise exc
|
50674 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
|
50675 |
+
return await task.proc.ainvoke(task.input, config)
|
50676 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
|
50677 |
+
input = await step.ainvoke(input, config, **kwargs)
|
50678 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
|
50679 |
+
ret = await self.afunc(*args, **kwargs)
|
50680 |
+
File "/home/user/app/graph.py", line 149, in guidance_node
|
50681 |
+
elif isinstance(message, ToolMessage):
|
50682 |
+
NameError: name 'ToolMessage' is not defined. Did you mean: 'AnyMessage'?
|
50683 |
+
2025-06-06 21:05:56:__main__:INFO: Starting the interface
|
50684 |
+
2025-06-06 21:06:01:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50685 |
+
2025-06-06 21:06:17:__main__:INFO: Prompt: You are a helpful assistant.
|
50686 |
+
2025-06-06 21:06:17:__main__:ERROR: Exception occurred
|
50687 |
+
Traceback (most recent call last):
|
50688 |
+
File "/home/user/app/app.py", line 97, in chat_fn
|
50689 |
+
async for stream_mode, chunk in graph.astream(
|
50690 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
|
50691 |
+
async for _ in runner.atick(
|
50692 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
|
50693 |
+
_panic_or_proceed(
|
50694 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
|
50695 |
+
raise exc
|
50696 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
|
50697 |
+
return await task.proc.ainvoke(task.input, config)
|
50698 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 678, in ainvoke
|
50699 |
+
input = await step.ainvoke(input, config)
|
50700 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
|
50701 |
+
ret = await self.afunc(*args, **kwargs)
|
50702 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/graph/branch.py", line 196, in _aroute
|
50703 |
+
result = await self.path.ainvoke(value, config)
|
50704 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 433, in ainvoke
|
50705 |
+
ret = await coro
|
50706 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/runnables/config.py", line 616, in run_in_executor
|
50707 |
+
return await asyncio.get_running_loop().run_in_executor(
|
50708 |
+
File "/usr/local/lib/python3.10/concurrent/futures/thread.py", line 58, in run
|
50709 |
+
result = self.fn(*self.args, **self.kwargs)
|
50710 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/runnables/config.py", line 607, in wrapper
|
50711 |
+
return func(*args, **kwargs)
|
50712 |
+
File "/home/user/app/graph.py", line 442, in custom_route_after_guidance
|
50713 |
+
messages = state.get("messages", [])
|
50714 |
+
File "/usr/local/lib/python3.10/site-packages/pydantic/main.py", line 891, in __getattr__
|
50715 |
+
raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}')
|
50716 |
+
AttributeError: 'GraphProcessingState' object has no attribute 'get'
|
50717 |
+
2025-06-06 21:09:02:__main__:INFO: Starting the interface
|
50718 |
+
2025-06-06 21:09:07:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50719 |
+
2025-06-06 21:09:46:__main__:INFO: Prompt: You are a helpful assistant.
|
50720 |
+
2025-06-06 21:09:46:__main__:ERROR: Exception occurred
|
50721 |
+
Traceback (most recent call last):
|
50722 |
+
File "/home/user/app/app.py", line 97, in chat_fn
|
50723 |
+
async for stream_mode, chunk in graph.astream(
|
50724 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
|
50725 |
+
async for _ in runner.atick(
|
50726 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
|
50727 |
+
_panic_or_proceed(
|
50728 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
|
50729 |
+
raise exc
|
50730 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
|
50731 |
+
return await task.proc.ainvoke(task.input, config)
|
50732 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 678, in ainvoke
|
50733 |
+
input = await step.ainvoke(input, config)
|
50734 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
|
50735 |
+
ret = await self.afunc(*args, **kwargs)
|
50736 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/graph/branch.py", line 196, in _aroute
|
50737 |
+
result = await self.path.ainvoke(value, config)
|
50738 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 433, in ainvoke
|
50739 |
+
ret = await coro
|
50740 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/runnables/config.py", line 616, in run_in_executor
|
50741 |
+
return await asyncio.get_running_loop().run_in_executor(
|
50742 |
+
File "/usr/local/lib/python3.10/concurrent/futures/thread.py", line 58, in run
|
50743 |
+
result = self.fn(*self.args, **self.kwargs)
|
50744 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/runnables/config.py", line 607, in wrapper
|
50745 |
+
return func(*args, **kwargs)
|
50746 |
+
File "/home/user/app/graph.py", line 443, in custom_route_after_guidance
|
50747 |
+
if not messages:
|
50748 |
+
NameError: name 'messages' is not defined. Did you mean: 'essages'?
|
50749 |
+
2025-06-06 21:10:22:__main__:INFO: Starting the interface
|
50750 |
+
2025-06-06 21:10:28:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50751 |
+
2025-06-06 21:11:30:__main__:INFO: Prompt: You are a helpful assistant.
|
50752 |
+
2025-06-06 21:11:30:__main__:ERROR: Exception occurred
|
50753 |
+
Traceback (most recent call last):
|
50754 |
+
File "/home/user/app/app.py", line 97, in chat_fn
|
50755 |
+
async for stream_mode, chunk in graph.astream(
|
50756 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 2655, in astream
|
50757 |
+
async for _ in runner.atick(
|
50758 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 400, in atick
|
50759 |
+
_panic_or_proceed(
|
50760 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/runner.py", line 509, in _panic_or_proceed
|
50761 |
+
raise exc
|
50762 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/pregel/retry.py", line 136, in arun_with_retry
|
50763 |
+
return await task.proc.ainvoke(task.input, config)
|
50764 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 676, in ainvoke
|
50765 |
+
input = await step.ainvoke(input, config, **kwargs)
|
50766 |
+
File "/usr/local/lib/python3.10/site-packages/langgraph/utils/runnable.py", line 440, in ainvoke
|
50767 |
+
ret = await self.afunc(*args, **kwargs)
|
50768 |
+
File "/home/user/app/graph.py", line 428, in planning_node
|
50769 |
+
response = await chain.ainvoke({"messages": state.messages}, config=config)
|
50770 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 3089, in ainvoke
|
50771 |
+
input_ = await coro_with_context(part(), context, create_task=True)
|
50772 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 5444, in ainvoke
|
50773 |
+
return await self.bound.ainvoke(
|
50774 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 394, in ainvoke
|
50775 |
+
llm_result = await self.agenerate_prompt(
|
50776 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 968, in agenerate_prompt
|
50777 |
+
return await self.agenerate(
|
50778 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 926, in agenerate
|
50779 |
+
raise exceptions[0]
|
50780 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 1083, in _agenerate_with_cache
|
50781 |
+
async for chunk in self._astream(messages, stop=stop, **kwargs):
|
50782 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_anthropic/chat_models.py", line 1394, in _astream
|
50783 |
+
_handle_anthropic_bad_request(e)
|
50784 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_anthropic/chat_models.py", line 1374, in _astream
|
50785 |
+
stream = await self._acreate(payload)
|
50786 |
+
File "/usr/local/lib/python3.10/site-packages/langchain_anthropic/chat_models.py", line 1322, in _acreate
|
50787 |
+
return await self._async_client.messages.create(**payload)
|
50788 |
+
File "/usr/local/lib/python3.10/site-packages/anthropic/resources/messages/messages.py", line 2229, in create
|
50789 |
+
return await self._post(
|
50790 |
+
File "/usr/local/lib/python3.10/site-packages/anthropic/_base_client.py", line 1819, in post
|
50791 |
+
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
|
50792 |
+
File "/usr/local/lib/python3.10/site-packages/anthropic/_base_client.py", line 1626, in request
|
50793 |
+
raise self._make_status_error_from_response(err.response) from None
|
50794 |
+
anthropic.BadRequestError: Error code: 400 - {'type': 'error', 'error': {'type': 'invalid_request_error', 'message': 'messages.0.content.1: unexpected `tool_use_id` found in `tool_result` blocks: toolu_01MvUi4ZEnFZ2cN39bLVinQ2. Each `tool_result` block must have a corresponding `tool_use` block in the previous message.'}}
|
50795 |
+
2025-06-06 21:18:33:__main__:INFO: Starting the interface
|
50796 |
+
2025-06-06 21:18:38:__main__:INFO: Greeting added for new user via handle_initial_greeting_load.
|
50797 |
+
2025-06-06 21:18:43:__main__:INFO: Prompt: You are a helpful assistant.
|
50798 |
+
2025-06-06 21:27:34:__main__:INFO: Prompt: You are a helpful assistant.
|
50799 |
+
2025-06-06 21:27:48:__main__:INFO: Prompt: You are a helpful assistant.
|
graph.py
CHANGED
@@ -8,7 +8,7 @@ import uuid
|
|
8 |
|
9 |
|
10 |
import aiohttp
|
11 |
-
from langchain_core.messages import AIMessage, HumanMessage, AnyMessage, ToolCall
|
12 |
|
13 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
14 |
from langchain_core.tools import tool
|
@@ -23,7 +23,7 @@ from langgraph.prebuilt import ToolNode
|
|
23 |
from langgraph.checkpoint.memory import MemorySaver
|
24 |
from langgraph.types import Command, interrupt
|
25 |
|
26 |
-
from typing import TypedDict, List, Optional
|
27 |
|
28 |
class State(TypedDict):
|
29 |
messages: Annotated[list, add_messages]
|
@@ -129,121 +129,146 @@ class GraphProcessingState(BaseModel):
|
|
129 |
|
130 |
|
131 |
|
132 |
-
async def guidance_node(state: GraphProcessingState, config=None):
|
133 |
-
|
134 |
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
|
156 |
|
157 |
-
|
158 |
|
159 |
|
160 |
-
|
161 |
-
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
|
|
|
|
|
165 |
completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
|
166 |
incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
|
167 |
-
|
|
|
168 |
if not incomplete:
|
169 |
-
|
170 |
-
# Handle case where all stages are complete
|
171 |
-
# You might want to return a message and end, or set proposed_next_stage to a special value
|
172 |
-
ai_all_complete_msg = AIMessage(content="All DIY project stages are complete!")
|
173 |
return {
|
174 |
-
"messages":
|
175 |
-
"next_stage": "end_project",
|
176 |
"pending_approval_stage": None,
|
177 |
}
|
178 |
else:
|
179 |
-
#
|
180 |
-
|
181 |
-
|
182 |
-
print(f"Proposed next stage: {proposed_next_stage}")
|
183 |
-
|
184 |
-
status_summary = f"Completed stages: {completed}\nIncomplete stages: {incomplete}"
|
185 |
-
|
186 |
-
guidance_prompt_text = (
|
187 |
-
"You are the Guiding Assistant for a DIY project. Your primary responsibility is to determine the next logical step "
|
188 |
-
"and then **obtain the user's explicit approval** before proceeding.\n\n"
|
189 |
-
f"CURRENT PROJECT STATUS:\n{status_summary}\n\n"
|
190 |
-
f"Based on the status, the most logical next stage appears to be: **'{proposed_next_stage}'**.\n\n"
|
191 |
-
"YOUR TASK:\n"
|
192 |
-
f"1. Formulate a clear and concise question for the user, asking if they agree to proceed to the **'{proposed_next_stage}'** stage. For example: 'It looks like '{proposed_next_stage}' is next. Shall we proceed with that?' or 'Are you ready to move on to {proposed_next_stage}?'\n"
|
193 |
-
"2. **You MUST use the 'human_assistance' tool to ask this question.** Do not answer directly. Invoke the tool with your question.\n"
|
194 |
-
"Example of tool usage (though you don't write this, you *call* the tool):\n"
|
195 |
-
"Tool Call: human_assistance(query='The next stage is planning. Do you want to proceed with planning?')\n\n"
|
196 |
-
"Consider the user's most recent message if it provides any preference."
|
197 |
-
)
|
198 |
-
|
199 |
-
if state.prompt:
|
200 |
-
final_prompt = "\n".join([guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
|
201 |
-
else:
|
202 |
-
final_prompt = "\n".join([guidance_prompt_text, ASSISTANT_SYSTEM_PROMPT_BASE])
|
203 |
-
|
204 |
-
prompt = ChatPromptTemplate.from_messages(
|
205 |
-
[
|
206 |
-
("system", final_prompt),
|
207 |
-
MessagesPlaceholder(variable_name="messages"),
|
208 |
-
]
|
209 |
-
)
|
210 |
-
|
211 |
-
assistant_model = model.bind_tools([human_assistance])
|
212 |
-
|
213 |
-
chain = prompt | assistant_model
|
214 |
-
|
215 |
-
try:
|
216 |
-
response = await chain.ainvoke({"messages": state.messages}, config=config)
|
217 |
-
|
218 |
-
for msg in response:
|
219 |
-
if isinstance(msg, HumanMessage):
|
220 |
-
print("Human:", msg.content)
|
221 |
-
elif isinstance(msg, AIMessage):
|
222 |
-
if isinstance(msg.content, list):
|
223 |
-
ai_texts = [part.get("text", "") for part in msg.content if isinstance(part, dict)]
|
224 |
-
print("AI:", " ".join(ai_texts))
|
225 |
-
else:
|
226 |
-
print("AI:", msg.content)
|
227 |
-
|
228 |
-
# Check for tool calls in the response
|
229 |
-
if hasattr(response, "tool_calls"):
|
230 |
-
for tool_call in response.tool_calls:
|
231 |
-
tool_name = tool_call['name']
|
232 |
-
if tool_name == "human_assistance":
|
233 |
-
query = tool_call['args']['query']
|
234 |
-
print(f"Human input needed: {query}")
|
235 |
-
# Handle human assistance tool call
|
236 |
-
# You can pause execution and wait for user input here
|
237 |
-
|
238 |
-
return {
|
239 |
-
"messages": [response],
|
240 |
-
"next_stage": incomplete[0] if incomplete else "brainstorming"
|
241 |
-
}
|
242 |
-
except Exception as e:
|
243 |
-
print(f"Error in guidance node: {e}")
|
244 |
return {
|
245 |
-
"messages": [
|
246 |
-
"next_stage":
|
|
|
247 |
}
|
248 |
|
249 |
async def brainstorming_node(state: GraphProcessingState, config=None):
|
@@ -439,7 +464,7 @@ def custom_route_after_guidance(state: GraphProcessingState) -> Literal["execute
|
|
439 |
or directly to the state.next_stage node if it's set).
|
440 |
"""
|
441 |
print("\n--- Custom Route After Guidance Condition ---")
|
442 |
-
messages = state
|
443 |
if not messages:
|
444 |
print("No messages found in state. Defaulting to proceed_to_next_stage.")
|
445 |
return "proceed_to_next_stage" # Or handle as an error/specific state
|
@@ -515,18 +540,24 @@ def define_workflow() -> CompiledStateGraph:
|
|
515 |
# workflow.add_edge("planning_node", "guidance_node")
|
516 |
# workflow.add_edge("brainstorming_node", "guidance_node")
|
517 |
# workflow.add_edge("brainstorming_node", "tools")
|
518 |
-
|
519 |
-
|
520 |
workflow.add_conditional_edges(
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
"proceed_to_next_stage": "planning_node" # If function returns "proceed_to_next_stage"
|
527 |
-
# Or this could be another router, or END
|
528 |
-
}
|
529 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
530 |
# workflow.add_conditional_edges("guidance_node", guidance_routing)
|
531 |
# workflow.add_conditional_edges("brainstorming_node", brainstorming_routing)
|
532 |
|
|
|
8 |
|
9 |
|
10 |
import aiohttp
|
11 |
+
from langchain_core.messages import AIMessage, HumanMessage, AnyMessage, ToolCall, SystemMessage, ToolMessage
|
12 |
|
13 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
14 |
from langchain_core.tools import tool
|
|
|
23 |
from langgraph.checkpoint.memory import MemorySaver
|
24 |
from langgraph.types import Command, interrupt
|
25 |
|
26 |
+
from typing import TypedDict, List, Optional, Literal
|
27 |
|
28 |
class State(TypedDict):
|
29 |
messages: Annotated[list, add_messages]
|
|
|
129 |
|
130 |
|
131 |
|
132 |
+
# async def guidance_node(state: GraphProcessingState, config=None):
|
133 |
+
# print("\n--- Guidance Node (Debug via print) ---")
|
134 |
|
135 |
+
# print(f"Prompt: {state.prompt}")
|
136 |
+
# for message in state.messages:
|
137 |
+
# if isinstance(message, HumanMessage):
|
138 |
+
# print(f"Human: {message.content}")
|
139 |
+
# elif isinstance(message, AIMessage):
|
140 |
+
# if message.content:
|
141 |
+
# if isinstance(message.content, list):
|
142 |
+
# texts = [item.get('text', '') for item in message.content if isinstance(item, dict) and 'text' in item]
|
143 |
+
# if texts:
|
144 |
+
# print(f"AI: {' '.join(texts)}")
|
145 |
+
# elif isinstance(message.content, str):
|
146 |
+
# print(f"AI: {message.content}")
|
147 |
+
# elif isinstance(message, SystemMessage):
|
148 |
+
# print(f"System: {message.content}")
|
149 |
+
# elif isinstance(message, ToolMessage):
|
150 |
+
# print(f"Tool: {message.content}")
|
151 |
|
152 |
+
# print(f"Tools Enabled: {state.tools_enabled}")
|
153 |
+
# print(f"Search Enabled: {state.search_enabled}")
|
154 |
+
# print(f"Next Stage: {state.next_stage}")
|
155 |
|
156 |
|
157 |
+
# print(f"Brainstorming Complete: {state.brainstorming_complete}")
|
158 |
|
159 |
|
160 |
+
# guidance_node.count = getattr(guidance_node, 'count', 0) + 1
|
161 |
+
# print('\nGuidance Node called count', guidance_node.count)
|
162 |
+
# print("\n--- End Guidance Node Debug ---")
|
163 |
+
|
164 |
+
# stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
|
165 |
+
# completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
|
166 |
+
# incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
|
167 |
+
|
168 |
+
# if not incomplete:
|
169 |
+
# print("All stages complete!")
|
170 |
+
# # Handle case where all stages are complete
|
171 |
+
# # You might want to return a message and end, or set proposed_next_stage to a special value
|
172 |
+
# ai_all_complete_msg = AIMessage(content="All DIY project stages are complete!")
|
173 |
+
# return {
|
174 |
+
# "messages": current_messages + [ai_all_complete_msg],
|
175 |
+
# "next_stage": "end_project", # Or None, or a final summary node
|
176 |
+
# "pending_approval_stage": None,
|
177 |
+
# }
|
178 |
+
# else:
|
179 |
+
# # THIS LINE DEFINES THE VARIABLE
|
180 |
+
# proposed_next_stage = incomplete[0]
|
181 |
+
|
182 |
+
# print(f"Proposed next stage: {proposed_next_stage}")
|
183 |
+
|
184 |
+
# status_summary = f"Completed stages: {completed}\nIncomplete stages: {incomplete}"
|
185 |
+
|
186 |
+
# guidance_prompt_text = (
|
187 |
+
# "You are the Guiding Assistant for a DIY project. Your primary responsibility is to determine the next logical step "
|
188 |
+
# "and then **obtain the user's explicit approval** before proceeding.\n\n"
|
189 |
+
# f"CURRENT PROJECT STATUS:\n{status_summary}\n\n"
|
190 |
+
# f"Based on the status, the most logical next stage appears to be: **'{proposed_next_stage}'**.\n\n"
|
191 |
+
# "YOUR TASK:\n"
|
192 |
+
# f"1. Formulate a clear and concise question for the user, asking if they agree to proceed to the **'{proposed_next_stage}'** stage. For example: 'It looks like '{proposed_next_stage}' is next. Shall we proceed with that?' or 'Are you ready to move on to {proposed_next_stage}?'\n"
|
193 |
+
# "2. **You MUST use the 'human_assistance' tool to ask this question.** Do not answer directly. Invoke the tool with your question.\n"
|
194 |
+
# "Example of tool usage (though you don't write this, you *call* the tool):\n"
|
195 |
+
# "Tool Call: human_assistance(query='The next stage is planning. Do you want to proceed with planning?')\n\n"
|
196 |
+
# "Consider the user's most recent message if it provides any preference."
|
197 |
+
# )
|
198 |
+
|
199 |
+
# if state.prompt:
|
200 |
+
# final_prompt = "\n".join([guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
|
201 |
+
# else:
|
202 |
+
# final_prompt = "\n".join([guidance_prompt_text, ASSISTANT_SYSTEM_PROMPT_BASE])
|
203 |
+
|
204 |
+
# prompt = ChatPromptTemplate.from_messages(
|
205 |
+
# [
|
206 |
+
# ("system", final_prompt),
|
207 |
+
# MessagesPlaceholder(variable_name="messages"),
|
208 |
+
# ]
|
209 |
+
# )
|
210 |
+
|
211 |
+
# assistant_model = model.bind_tools([human_assistance])
|
212 |
|
213 |
+
# chain = prompt | assistant_model
|
214 |
+
|
215 |
+
# try:
|
216 |
+
# response = await chain.ainvoke({"messages": state.messages}, config=config)
|
217 |
+
|
218 |
+
# for msg in response:
|
219 |
+
# if isinstance(msg, HumanMessage):
|
220 |
+
# print("Human:", msg.content)
|
221 |
+
# elif isinstance(msg, AIMessage):
|
222 |
+
# if isinstance(msg.content, list):
|
223 |
+
# ai_texts = [part.get("text", "") for part in msg.content if isinstance(part, dict)]
|
224 |
+
# print("AI:", " ".join(ai_texts))
|
225 |
+
# else:
|
226 |
+
# print("AI:", msg.content)
|
227 |
+
|
228 |
+
# # Check for tool calls in the response
|
229 |
+
# if hasattr(response, "tool_calls"):
|
230 |
+
# for tool_call in response.tool_calls:
|
231 |
+
# tool_name = tool_call['name']
|
232 |
+
# if tool_name == "human_assistance":
|
233 |
+
# query = tool_call['args']['query']
|
234 |
+
# print(f"Human input needed: {query}")
|
235 |
+
# # Handle human assistance tool call
|
236 |
+
# # You can pause execution and wait for user input here
|
237 |
+
|
238 |
+
# return {
|
239 |
+
# "messages": [response],
|
240 |
+
# "next_stage": incomplete[0] if incomplete else "brainstorming"
|
241 |
+
# }
|
242 |
+
# except Exception as e:
|
243 |
+
# print(f"Error in guidance node: {e}")
|
244 |
+
# return {
|
245 |
+
# "messages": [AIMessage(content="Error in guidance node.")],
|
246 |
+
# "next_stage": "brainstorming"
|
247 |
+
# }
|
248 |
+
|
249 |
+
async def guidance_node(state: GraphProcessingState, config=None):
|
250 |
+
# Define the order of stages
|
251 |
stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
|
252 |
+
|
253 |
+
# Identify completed and incomplete stages
|
254 |
completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
|
255 |
incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
|
256 |
+
|
257 |
+
# Determine the next stage
|
258 |
if not incomplete:
|
259 |
+
# All stages are complete
|
|
|
|
|
|
|
260 |
return {
|
261 |
+
"messages": [AIMessage(content="All DIY project stages are complete!")],
|
262 |
+
"next_stage": "end_project",
|
263 |
"pending_approval_stage": None,
|
264 |
}
|
265 |
else:
|
266 |
+
# Set the next stage to the first incomplete stage
|
267 |
+
next_stage = incomplete[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
return {
|
269 |
+
"messages": [],
|
270 |
+
"next_stage": next_stage,
|
271 |
+
"pending_approval_stage": None,
|
272 |
}
|
273 |
|
274 |
async def brainstorming_node(state: GraphProcessingState, config=None):
|
|
|
464 |
or directly to the state.next_stage node if it's set).
|
465 |
"""
|
466 |
print("\n--- Custom Route After Guidance Condition ---")
|
467 |
+
messages = state['messages'] if 'messages' in state else []
|
468 |
if not messages:
|
469 |
print("No messages found in state. Defaulting to proceed_to_next_stage.")
|
470 |
return "proceed_to_next_stage" # Or handle as an error/specific state
|
|
|
540 |
# workflow.add_edge("planning_node", "guidance_node")
|
541 |
# workflow.add_edge("brainstorming_node", "guidance_node")
|
542 |
# workflow.add_edge("brainstorming_node", "tools")
|
543 |
+
workflow.add_edge("tools", "guidance_node")
|
|
|
544 |
workflow.add_conditional_edges(
|
545 |
+
"guidance_node",
|
546 |
+
guidance_routing,
|
547 |
+
{
|
548 |
+
"brainstorming_node" : "brainstorming_node",
|
549 |
+
}
|
|
|
|
|
|
|
550 |
)
|
551 |
+
# workflow.add_conditional_edges(
|
552 |
+
# "guidance_node", # The source node
|
553 |
+
# custom_route_after_guidance, # Your custom condition function
|
554 |
+
# {
|
555 |
+
# # "Path name": "Destination node name"
|
556 |
+
# "execute_tools": "tools", # If function returns "execute_tools"
|
557 |
+
# "proceed_to_next_stage": "planning_node" # If function returns "proceed_to_next_stage"
|
558 |
+
# # Or this could be another router, or END
|
559 |
+
# }
|
560 |
+
# )
|
561 |
# workflow.add_conditional_edges("guidance_node", guidance_routing)
|
562 |
# workflow.add_conditional_edges("brainstorming_node", brainstorming_routing)
|
563 |
|