Spaces:
Runtime error
Runtime error
Commit
·
807e22d
1
Parent(s):
4279819
first valid commit
Browse files- .gitattributes +35 -0
- README.md +14 -0
- app.log +0 -0
- app.py +855 -0
- app.sh +8 -0
- brainstorm_test.txt +165 -0
- brainstorming_system_prompt.txt +56 -0
- curls +26 -0
- graph.py +1298 -0
- greeting_prompt.txt +2 -0
- logging-config.json +35 -0
- requirements.txt +21 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: DIY Assistant
|
3 |
+
emoji: 💬
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.0.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: mit
|
11 |
+
short_description: DIY assistant
|
12 |
+
---
|
13 |
+
|
14 |
+
An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
|
app.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
app.py
ADDED
@@ -0,0 +1,855 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import logging
|
3 |
+
import logging.config
|
4 |
+
from typing import Any
|
5 |
+
from uuid import uuid4, UUID
|
6 |
+
import json
|
7 |
+
import sys
|
8 |
+
|
9 |
+
import gradio as gr
|
10 |
+
from dotenv import load_dotenv
|
11 |
+
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage
|
12 |
+
from langgraph.types import RunnableConfig
|
13 |
+
from pydantic import BaseModel
|
14 |
+
from pathlib import Path
|
15 |
+
|
16 |
+
|
17 |
+
import subprocess
|
18 |
+
|
19 |
+
# def update_repo():
|
20 |
+
# try:
|
21 |
+
# subprocess.run(["git", "fetch", "origin"], check=True)
|
22 |
+
# subprocess.run(["git", "reset", "--hard", "origin/main"], check=True)
|
23 |
+
# subprocess.run([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"], check=True)
|
24 |
+
# subprocess.run([sys.executable, "app.py"], check=True)
|
25 |
+
# except Exception as e:
|
26 |
+
# print(f"Git update failed: {e}")
|
27 |
+
|
28 |
+
# update_repo()
|
29 |
+
|
30 |
+
load_dotenv()
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
# There are tools set here dependent on environment variables
|
35 |
+
from graph import graph, weak_model, search_enabled # noqa
|
36 |
+
|
37 |
+
FOLLOWUP_QUESTION_NUMBER = 3
|
38 |
+
TRIM_MESSAGE_LENGTH = 16 # Includes tool messages
|
39 |
+
USER_INPUT_MAX_LENGTH = 10000 # Characters
|
40 |
+
|
41 |
+
# We need the same secret for data persistance
|
42 |
+
# If you store sensitive data, you should store your secret in .env
|
43 |
+
BROWSER_STORAGE_SECRET = "itsnosecret"
|
44 |
+
|
45 |
+
with open('logging-config.json', 'r') as fh:
|
46 |
+
config = json.load(fh)
|
47 |
+
logging.config.dictConfig(config)
|
48 |
+
logger = logging.getLogger(__name__)
|
49 |
+
|
50 |
+
def load_initial_greeting(filepath="greeting_prompt.txt") -> str:
|
51 |
+
"""
|
52 |
+
Loads the initial greeting message from a specified text file.
|
53 |
+
"""
|
54 |
+
try:
|
55 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
56 |
+
return f.read().strip()
|
57 |
+
except FileNotFoundError:
|
58 |
+
# Use a logger if you have one configured, otherwise print
|
59 |
+
# logger.warning(f"Warning: Prompt file '{filepath}' not found.")
|
60 |
+
print(f"Warning: Prompt file '{filepath}' not found. Using default.")
|
61 |
+
return "Welcome to the application! (Default Greeting)"
|
62 |
+
|
63 |
+
async def chat_fn(user_input: str, history: dict, input_graph_state: dict, uuid: UUID, prompt: str, search_enabled: bool, download_website_text_enabled: bool):
|
64 |
+
"""
|
65 |
+
Args:
|
66 |
+
user_input (str): The user's input message
|
67 |
+
history (dict): The history of the conversation in gradio
|
68 |
+
input_graph_state (dict): The current state of the graph. This includes tool call history
|
69 |
+
uuid (UUID): The unique identifier for the current conversation. This can be used in conjunction with langgraph or for memory
|
70 |
+
prompt (str): The system prompt
|
71 |
+
Yields:
|
72 |
+
str: The output message
|
73 |
+
dict|Any: The final state of the graph
|
74 |
+
bool|Any: Whether to trigger follow up questions
|
75 |
+
|
76 |
+
We do not use gradio history in the graph since we want the ToolMessage in the history
|
77 |
+
ordered properly. GraphProcessingState.messages is used as history instead
|
78 |
+
"""
|
79 |
+
try:
|
80 |
+
logger.info(f"Prompt: {prompt}")
|
81 |
+
input_graph_state["tools_enabled"] = {
|
82 |
+
"download_website_text": download_website_text_enabled,
|
83 |
+
"tavily_search_results_json": search_enabled,
|
84 |
+
}
|
85 |
+
if prompt:
|
86 |
+
input_graph_state["prompt"] = prompt
|
87 |
+
|
88 |
+
if input_graph_state.get("awaiting_human_input"):
|
89 |
+
input_graph_state["messages"].append(
|
90 |
+
ToolMessage(
|
91 |
+
tool_call_id=input_graph_state.pop("human_assistance_tool_id"),
|
92 |
+
content=user_input
|
93 |
+
)
|
94 |
+
)
|
95 |
+
input_graph_state["awaiting_human_input"] = False
|
96 |
+
else:
|
97 |
+
# New user message
|
98 |
+
if "messages" not in input_graph_state:
|
99 |
+
input_graph_state["messages"] = []
|
100 |
+
input_graph_state["messages"].append(
|
101 |
+
HumanMessage(user_input[:USER_INPUT_MAX_LENGTH])
|
102 |
+
)
|
103 |
+
input_graph_state["messages"] = input_graph_state["messages"][-TRIM_MESSAGE_LENGTH:]
|
104 |
+
|
105 |
+
config = RunnableConfig(
|
106 |
+
recursion_limit=20,
|
107 |
+
run_name="user_chat",
|
108 |
+
configurable={"thread_id": uuid}
|
109 |
+
)
|
110 |
+
|
111 |
+
output: str = ""
|
112 |
+
final_state: dict | Any = {}
|
113 |
+
waiting_output_seq: list[str] = []
|
114 |
+
|
115 |
+
async for stream_mode, chunk in graph.astream(
|
116 |
+
input_graph_state,
|
117 |
+
config=config,
|
118 |
+
stream_mode=["values", "messages"],
|
119 |
+
):
|
120 |
+
if stream_mode == "values":
|
121 |
+
final_state = chunk
|
122 |
+
last_message = chunk["messages"][-1]
|
123 |
+
if hasattr(last_message, "tool_calls"):
|
124 |
+
for msg_tool_call in last_message.tool_calls:
|
125 |
+
tool_name: str = msg_tool_call['name']
|
126 |
+
|
127 |
+
if tool_name == "tavily_search_results_json":
|
128 |
+
query = msg_tool_call['args']['query']
|
129 |
+
waiting_output_seq.append(f"Searching for '{query}'...")
|
130 |
+
yield "\n".join(waiting_output_seq), gr.skip(), gr.skip()
|
131 |
+
|
132 |
+
# download_website_text is the name of the function defined in graph.py
|
133 |
+
elif tool_name == "download_website_text":
|
134 |
+
url = msg_tool_call['args']['url']
|
135 |
+
waiting_output_seq.append(f"Downloading text from '{url}'...")
|
136 |
+
yield "\n".join(waiting_output_seq), gr.skip(), gr.skip()
|
137 |
+
|
138 |
+
elif tool_name == "human_assistance":
|
139 |
+
query = msg_tool_call["args"]["query"]
|
140 |
+
waiting_output_seq.append(f"🤖: {query}")
|
141 |
+
|
142 |
+
# Save state to resume after user provides input
|
143 |
+
input_graph_state["awaiting_human_input"] = True
|
144 |
+
input_graph_state["human_assistance_tool_id"] = msg_tool_call["id"]
|
145 |
+
|
146 |
+
# Indicate that human input is needed
|
147 |
+
yield "\n".join(waiting_output_seq), input_graph_state, gr.skip(), True
|
148 |
+
return # Pause execution, resume in next call
|
149 |
+
|
150 |
+
else:
|
151 |
+
waiting_output_seq.append(f"Running {tool_name}...")
|
152 |
+
yield "\n".join(waiting_output_seq), gr.skip(), gr.skip()
|
153 |
+
|
154 |
+
elif stream_mode == "messages":
|
155 |
+
msg, metadata = chunk
|
156 |
+
# print("output: ", msg, metadata)
|
157 |
+
# assistant_node is the name we defined in the langgraph graph
|
158 |
+
if metadata.get('langgraph_node') == "assistant_node": # Use .get for safety
|
159 |
+
current_chunk_text = ""
|
160 |
+
if isinstance(msg.content, str):
|
161 |
+
current_chunk_text = msg.content
|
162 |
+
elif isinstance(msg.content, list):
|
163 |
+
for block in msg.content:
|
164 |
+
if isinstance(block, dict) and block.get("type") == "text":
|
165 |
+
current_chunk_text += block.get("text", "")
|
166 |
+
elif isinstance(block, str): # Fallback if content is list of strings
|
167 |
+
current_chunk_text += block
|
168 |
+
|
169 |
+
|
170 |
+
if current_chunk_text: # Only add and yield if there's actually text
|
171 |
+
output += current_chunk_text
|
172 |
+
yield output, gr.skip(), gr.skip()
|
173 |
+
|
174 |
+
# Trigger for asking follow up questions
|
175 |
+
# + store the graph state for next iteration
|
176 |
+
# yield output, dict(final_state), gr.skip()
|
177 |
+
yield output + " ", dict(final_state), True
|
178 |
+
except Exception:
|
179 |
+
logger.exception("Exception occurred")
|
180 |
+
user_error_message = "There was an error processing your request. Please try again."
|
181 |
+
yield user_error_message, gr.skip(), False
|
182 |
+
|
183 |
+
def clear():
|
184 |
+
return dict(), uuid4()
|
185 |
+
|
186 |
+
class FollowupQuestions(BaseModel):
|
187 |
+
"""Model for langchain to use for structured output for followup questions"""
|
188 |
+
questions: list[str]
|
189 |
+
|
190 |
+
async def populate_followup_questions(end_of_chat_response: bool, messages: dict[str, str], uuid: UUID):
|
191 |
+
"""
|
192 |
+
This function gets called a lot due to the asynchronous nature of streaming
|
193 |
+
|
194 |
+
Only populate followup questions if streaming has completed and the message is coming from the assistant
|
195 |
+
"""
|
196 |
+
if not end_of_chat_response or not messages or messages[-1]["role"] != "assistant":
|
197 |
+
return *[gr.skip() for _ in range(FOLLOWUP_QUESTION_NUMBER)], False
|
198 |
+
config = RunnableConfig(
|
199 |
+
run_name="populate_followup_questions",
|
200 |
+
configurable={"thread_id": uuid}
|
201 |
+
)
|
202 |
+
weak_model_with_config = weak_model.with_config(config)
|
203 |
+
follow_up_questions = await weak_model_with_config.with_structured_output(FollowupQuestions).ainvoke([
|
204 |
+
("system", f"suggest {FOLLOWUP_QUESTION_NUMBER} followup questions for the user to ask the assistant. Refrain from asking personal questions."),
|
205 |
+
*messages,
|
206 |
+
])
|
207 |
+
if len(follow_up_questions.questions) != FOLLOWUP_QUESTION_NUMBER:
|
208 |
+
raise ValueError("Invalid value of followup questions")
|
209 |
+
buttons = []
|
210 |
+
for i in range(FOLLOWUP_QUESTION_NUMBER):
|
211 |
+
buttons.append(
|
212 |
+
gr.Button(follow_up_questions.questions[i], visible=True, elem_classes="chat-tab"),
|
213 |
+
)
|
214 |
+
return *buttons, False
|
215 |
+
|
216 |
+
async def summarize_chat(end_of_chat_response: bool, messages: dict, sidebar_summaries: dict, uuid: UUID):
|
217 |
+
"""Summarize chat for tab names"""
|
218 |
+
# print("\n------------------------")
|
219 |
+
# print("not end_of_chat_response", not end_of_chat_response)
|
220 |
+
# print("not messages", not messages)
|
221 |
+
# if messages:
|
222 |
+
# print("messages[-1][role] != assistant", messages[-1]["role"] != "assistant")
|
223 |
+
# print("isinstance(sidebar_summaries, type(lambda x: x))", isinstance(sidebar_summaries, type(lambda x: x)))
|
224 |
+
# print("uuid in sidebar_summaries", uuid in sidebar_summaries)
|
225 |
+
should_return = (
|
226 |
+
not end_of_chat_response or
|
227 |
+
not messages or
|
228 |
+
messages[-1]["role"] != "assistant" or
|
229 |
+
# This is a bug with gradio
|
230 |
+
isinstance(sidebar_summaries, type(lambda x: x)) or
|
231 |
+
# Already created summary
|
232 |
+
uuid in sidebar_summaries
|
233 |
+
)
|
234 |
+
if should_return:
|
235 |
+
return gr.skip(), gr.skip()
|
236 |
+
|
237 |
+
filtered_messages = []
|
238 |
+
for msg in messages:
|
239 |
+
if isinstance(msg, dict) and msg.get("content") and msg["content"].strip():
|
240 |
+
filtered_messages.append(msg)
|
241 |
+
|
242 |
+
# If we don't have any valid messages after filtering, provide a default summary
|
243 |
+
if not filtered_messages:
|
244 |
+
if uuid not in sidebar_summaries:
|
245 |
+
sidebar_summaries[uuid] = "Chat History"
|
246 |
+
return sidebar_summaries, False
|
247 |
+
|
248 |
+
|
249 |
+
config = RunnableConfig(
|
250 |
+
run_name="summarize_chat",
|
251 |
+
configurable={"thread_id": uuid}
|
252 |
+
)
|
253 |
+
try:
|
254 |
+
weak_model_with_config = weak_model.with_config(config)
|
255 |
+
summary_response = await weak_model_with_config.ainvoke([
|
256 |
+
("system", "summarize this chat in 7 tokens or less. Refrain from using periods"),
|
257 |
+
*filtered_messages,
|
258 |
+
])
|
259 |
+
|
260 |
+
if uuid not in sidebar_summaries:
|
261 |
+
sidebar_summaries[uuid] = summary_response.content
|
262 |
+
except Exception as e:
|
263 |
+
logger.error(f"Error summarizing chat: {e}")
|
264 |
+
# Provide a fallback summary if an error occurs
|
265 |
+
if uuid not in sidebar_summaries:
|
266 |
+
sidebar_summaries[uuid] = "Previous Chat"
|
267 |
+
|
268 |
+
return sidebar_summaries, False
|
269 |
+
|
270 |
+
async def new_tab(uuid, gradio_graph, messages, tabs, prompt, sidebar_summaries):
|
271 |
+
new_uuid = uuid4()
|
272 |
+
new_graph = {}
|
273 |
+
if uuid not in sidebar_summaries:
|
274 |
+
sidebar_summaries, _ = await summarize_chat(True, messages, sidebar_summaries, uuid)
|
275 |
+
tabs[uuid] = {
|
276 |
+
"graph": gradio_graph,
|
277 |
+
"messages": messages,
|
278 |
+
"prompt": prompt,
|
279 |
+
}
|
280 |
+
suggestion_buttons = []
|
281 |
+
for _ in range(FOLLOWUP_QUESTION_NUMBER):
|
282 |
+
suggestion_buttons.append(gr.Button(visible=False))
|
283 |
+
new_messages = {}
|
284 |
+
|
285 |
+
# --- MODIFICATION FOR GREETING IN EVERY NEW CHAT ---
|
286 |
+
greeting_text = load_initial_greeting() # Get the greeting
|
287 |
+
# `gr.Chatbot` expects a list of tuples or list of dicts.
|
288 |
+
# For `type="messages"`, it's list of dicts: [{"role": "assistant", "content": "Hello"}]
|
289 |
+
# Or list of tuples: [(None, "Hello")]
|
290 |
+
# Let's assume your chatbot is configured for list of tuples (None, bot_message) for initial messages
|
291 |
+
new_chat_messages_for_display = [{"role": "assistant", "content": greeting_text}]
|
292 |
+
# If your chat_interface.chatbot_value expects list of dicts:
|
293 |
+
# new_messages_history = [{"role": "assistant", "content": greeting_text}]
|
294 |
+
# --- END MODIFICATION ---
|
295 |
+
|
296 |
+
new_prompt = "You are a helpful assistant."
|
297 |
+
return new_uuid, new_graph, new_chat_messages_for_display, tabs, new_prompt, sidebar_summaries, *suggestion_buttons
|
298 |
+
|
299 |
+
def switch_tab(selected_uuid, tabs, gradio_graph, uuid, messages, prompt):
|
300 |
+
# I don't know of another way to lookup uuid other than
|
301 |
+
# by the button value
|
302 |
+
|
303 |
+
# Save current state
|
304 |
+
if messages:
|
305 |
+
tabs[uuid] = {
|
306 |
+
"graph": gradio_graph,
|
307 |
+
"messages": messages,
|
308 |
+
"prompt": prompt
|
309 |
+
}
|
310 |
+
|
311 |
+
if selected_uuid not in tabs:
|
312 |
+
logger.error(f"Could not find the selected tab in offloaded_tabs_data_storage {selected_uuid}")
|
313 |
+
return gr.skip(), gr.skip(), gr.skip(), gr.skip()
|
314 |
+
selected_tab_state = tabs[selected_uuid]
|
315 |
+
selected_graph = selected_tab_state["graph"]
|
316 |
+
selected_messages = selected_tab_state["messages"]
|
317 |
+
selected_prompt = selected_tab_state.get("prompt", "")
|
318 |
+
suggestion_buttons = []
|
319 |
+
for _ in range(FOLLOWUP_QUESTION_NUMBER):
|
320 |
+
suggestion_buttons.append(gr.Button(visible=False))
|
321 |
+
return selected_graph, selected_uuid, selected_messages, tabs, selected_prompt, *suggestion_buttons
|
322 |
+
|
323 |
+
def delete_tab(current_chat_uuid, selected_uuid, sidebar_summaries, tabs):
|
324 |
+
output_messages = gr.skip()
|
325 |
+
if current_chat_uuid == selected_uuid:
|
326 |
+
output_messages = dict()
|
327 |
+
if selected_uuid in tabs:
|
328 |
+
del tabs[selected_uuid]
|
329 |
+
if selected_uuid in sidebar_summaries:
|
330 |
+
del sidebar_summaries[selected_uuid]
|
331 |
+
return sidebar_summaries, tabs, output_messages
|
332 |
+
|
333 |
+
def submit_edit_tab(selected_uuid, sidebar_summaries, text):
|
334 |
+
sidebar_summaries[selected_uuid] = text
|
335 |
+
return sidebar_summaries, ""
|
336 |
+
|
337 |
+
def load_mesh(mesh_file_name):
|
338 |
+
return mesh_file_name
|
339 |
+
|
340 |
+
def display_initial_greeting(is_new_user_state_value: bool):
|
341 |
+
"""
|
342 |
+
Determines if a greeting should be displayed and returns the UI updates.
|
343 |
+
It also returns the new state for 'is_new_user_for_greeting'.
|
344 |
+
"""
|
345 |
+
if is_new_user_state_value:
|
346 |
+
greeting_message_text = load_initial_greeting()
|
347 |
+
# For a chatbot, the history is a list of tuples: [(user_msg, bot_msg)]
|
348 |
+
# For an initial message from the bot, user_msg is None.
|
349 |
+
initial_chat_history = [(None, greeting_message_text)]
|
350 |
+
updated_is_new_user_flag = False # Greeting shown, so set to False
|
351 |
+
return initial_chat_history, updated_is_new_user_flag
|
352 |
+
else:
|
353 |
+
# Not a new user (or already greeted), so no initial message in chat history
|
354 |
+
# and the flag remains False.
|
355 |
+
return [], False
|
356 |
+
|
357 |
+
def get_sorted_3d_model_examples():
|
358 |
+
examples_dir = Path("./generated_3d_models")
|
359 |
+
if not examples_dir.exists():
|
360 |
+
return []
|
361 |
+
|
362 |
+
# Get all 3D model files with desired extensions
|
363 |
+
model_files = [
|
364 |
+
file for file in examples_dir.glob("*")
|
365 |
+
if file.suffix.lower() in {".obj", ".glb", ".gltf"}
|
366 |
+
]
|
367 |
+
|
368 |
+
# Sort files by creation time (latest first)
|
369 |
+
sorted_files = sorted(
|
370 |
+
model_files,
|
371 |
+
key=lambda x: x.stat().st_ctime,
|
372 |
+
reverse=True
|
373 |
+
)
|
374 |
+
|
375 |
+
# Convert to format [[path1], [path2], ...]
|
376 |
+
return [[str(file)] for file in sorted_files]
|
377 |
+
|
378 |
+
|
379 |
+
CSS = """
|
380 |
+
footer {visibility: hidden}
|
381 |
+
.followup-question-button {font-size: 12px }
|
382 |
+
.chat-tab {
|
383 |
+
font-size: 12px;
|
384 |
+
padding-inline: 0;
|
385 |
+
}
|
386 |
+
.chat-tab.active {
|
387 |
+
background-color: #654343;
|
388 |
+
}
|
389 |
+
#new-chat-button { background-color: #0f0f11; color: white; }
|
390 |
+
|
391 |
+
.tab-button-control {
|
392 |
+
min-width: 0;
|
393 |
+
padding-left: 0;
|
394 |
+
padding-right: 0;
|
395 |
+
}
|
396 |
+
|
397 |
+
.sidebar-collapsed {
|
398 |
+
display: none !important;
|
399 |
+
}
|
400 |
+
|
401 |
+
.wrap.sidebar-parent {
|
402 |
+
min-height: 2400px !important;
|
403 |
+
height: 2400px !important;
|
404 |
+
}
|
405 |
+
|
406 |
+
#main-app {
|
407 |
+
height: 4600px; /* or 800px, or 100% */
|
408 |
+
overflow-y: auto; /* optional if you want it scrollable */\
|
409 |
+
padding-top:2000px;
|
410 |
+
}
|
411 |
+
|
412 |
+
"""
|
413 |
+
|
414 |
+
# We set the ChatInterface textbox id to chat-textbox for this to work
|
415 |
+
TRIGGER_CHATINTERFACE_BUTTON = """
|
416 |
+
function triggerChatButtonClick() {
|
417 |
+
|
418 |
+
// Find the div with id "chat-textbox"
|
419 |
+
const chatTextbox = document.getElementById("chat-textbox");
|
420 |
+
|
421 |
+
if (!chatTextbox) {
|
422 |
+
console.error("Error: Could not find element with id 'chat-textbox'");
|
423 |
+
return;
|
424 |
+
}
|
425 |
+
|
426 |
+
// Find the button that is a descendant of the div
|
427 |
+
const button = chatTextbox.querySelector("button");
|
428 |
+
|
429 |
+
if (!button) {
|
430 |
+
console.error("Error: No button found inside the chat-textbox element");
|
431 |
+
return;
|
432 |
+
}
|
433 |
+
|
434 |
+
// Trigger the click event
|
435 |
+
button.click();
|
436 |
+
}"""
|
437 |
+
|
438 |
+
|
439 |
+
|
440 |
+
TOGGLE_SIDEBAR_JS = """
|
441 |
+
function toggleSidebarVisibility() {
|
442 |
+
console.log("Called the side bar funnction");
|
443 |
+
const sidebar = document.querySelector(".sidebar svelte-7y53u7 open");
|
444 |
+
if (!sidebar) {
|
445 |
+
console.error("Error: Could not find the sidebar element");
|
446 |
+
return;
|
447 |
+
}
|
448 |
+
sidebar.classList.toggle("sidebar-collapsed");
|
449 |
+
}
|
450 |
+
"""
|
451 |
+
|
452 |
+
if __name__ == "__main__":
|
453 |
+
logger.info("Starting the interface")
|
454 |
+
with gr.Blocks(title="DIYO is here", fill_height=True, css=CSS, elem_id="main-app") as demo:
|
455 |
+
is_new_user_for_greeting = gr.State(True)
|
456 |
+
chatbot_message_storage = gr.State([])
|
457 |
+
current_prompt_state = gr.BrowserState(
|
458 |
+
storage_key="current_prompt_state",
|
459 |
+
secret=BROWSER_STORAGE_SECRET,
|
460 |
+
)
|
461 |
+
current_uuid_state = gr.BrowserState(
|
462 |
+
uuid4,
|
463 |
+
storage_key="current_uuid_state",
|
464 |
+
secret=BROWSER_STORAGE_SECRET,
|
465 |
+
)
|
466 |
+
current_langgraph_state = gr.BrowserState(
|
467 |
+
dict(),
|
468 |
+
storage_key="current_langgraph_state",
|
469 |
+
secret=BROWSER_STORAGE_SECRET,
|
470 |
+
)
|
471 |
+
end_of_assistant_response_state = gr.State(
|
472 |
+
bool(),
|
473 |
+
)
|
474 |
+
# [uuid] -> summary of chat
|
475 |
+
sidebar_names_state = gr.BrowserState(
|
476 |
+
dict(),
|
477 |
+
storage_key="sidebar_names_state",
|
478 |
+
secret=BROWSER_STORAGE_SECRET,
|
479 |
+
)
|
480 |
+
# [uuid] -> {"graph": gradio_graph, "messages": messages}
|
481 |
+
offloaded_tabs_data_storage = gr.BrowserState(
|
482 |
+
dict(),
|
483 |
+
storage_key="offloaded_tabs_data_storage",
|
484 |
+
secret=BROWSER_STORAGE_SECRET,
|
485 |
+
)
|
486 |
+
|
487 |
+
chatbot_message_storage = gr.BrowserState(
|
488 |
+
[],
|
489 |
+
storage_key="chatbot_message_storage",
|
490 |
+
secret=BROWSER_STORAGE_SECRET,
|
491 |
+
)
|
492 |
+
|
493 |
+
with gr.Row(elem_classes="header-margin"):
|
494 |
+
# Add the decorated header with ASCII art
|
495 |
+
gr.Markdown("""
|
496 |
+
<div style="display: flex; align-items: center; justify-content: center; text-align: center; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 15px; margin-bottom: 20px; color: white; box-shadow: 0 4px 15px rgba(0,0,0,0.2);">
|
497 |
+
|
498 |
+
╔══════════════════════════════════════════════════════════════════════════════════════════════╗
|
499 |
+
║ ║
|
500 |
+
║ █████╗ ██████╗ ███████╗███╗ ██╗████████╗ ██████╗ ██╗██╗ ██╗ ██████╗ ║
|
501 |
+
║ ██╔══██╗██╔════╝ ██╔════╝████╗ ██║╚══██╔══╝ ██╔══██╗██║╚██╗ ██╔╝██╔═══██╗ ║
|
502 |
+
║ ███████║██║ ███╗█████╗ ██╔██╗ ██║ ██║ ██║ ██║██║ ╚████╔╝ ██║ ██║ ║
|
503 |
+
║ ██╔══██║██║ ██║██╔══╝ ██║╚██╗██║ ██║ ██║ ██║██║ ╚██╔╝ ██║ ██║ ║
|
504 |
+
║ ██║ ██║╚██████╔╝███████╗██║ ╚████║ ██║ ██████╔╝██║ ██║ ╚██████╔╝ ║
|
505 |
+
║ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═══╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ║
|
506 |
+
║ ║
|
507 |
+
╚══════════════════════════════════════════════════════════════════════════════════════════════╝
|
508 |
+
|
509 |
+
Let's build things, break boundaries with the help of AI!
|
510 |
+
</div>
|
511 |
+
""")
|
512 |
+
|
513 |
+
with gr.Row():
|
514 |
+
prompt_textbox = gr.Textbox(show_label=False, interactive=True)
|
515 |
+
|
516 |
+
with gr.Row():
|
517 |
+
checkbox_search_enabled = gr.Checkbox(
|
518 |
+
value=True,
|
519 |
+
label="Enable search",
|
520 |
+
show_label=True,
|
521 |
+
visible=search_enabled,
|
522 |
+
scale=1,
|
523 |
+
)
|
524 |
+
checkbox_download_website_text = gr.Checkbox(
|
525 |
+
value=True,
|
526 |
+
show_label=True,
|
527 |
+
label="Enable downloading text from urls",
|
528 |
+
scale=1,
|
529 |
+
)
|
530 |
+
with gr.Row():
|
531 |
+
with gr.Column(scale=2):
|
532 |
+
model_3d_output = gr.Model3D(
|
533 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
534 |
+
label="3D Model",
|
535 |
+
height=400 # Adjust height to align better with chatbot
|
536 |
+
)
|
537 |
+
with gr.Column(scale=1):
|
538 |
+
# Input for the 3D model
|
539 |
+
# Using UploadButton is often clearer for users than a clickable Model3D input
|
540 |
+
model_3d_upload_button = gr.UploadButton(
|
541 |
+
"Upload 3D Model (.obj, .glb, .gltf)",
|
542 |
+
file_types=[".obj", ".glb", ".gltf"],
|
543 |
+
# scale=0 # make it take less space if needed
|
544 |
+
)
|
545 |
+
model_3d_upload_button.upload(
|
546 |
+
fn=load_mesh,
|
547 |
+
inputs=model_3d_upload_button,
|
548 |
+
outputs=model_3d_output
|
549 |
+
)
|
550 |
+
gr.Examples(
|
551 |
+
label="Example 3D Models",
|
552 |
+
examples=get_sorted_3d_model_examples(),
|
553 |
+
inputs=model_3d_upload_button, # Dummy input for examples to load into Model3D
|
554 |
+
outputs=model_3d_output,
|
555 |
+
fn=load_mesh,
|
556 |
+
cache_examples=True # Caching might be useful
|
557 |
+
)
|
558 |
+
with gr.Row():
|
559 |
+
multimodal = False
|
560 |
+
textbox_component = (
|
561 |
+
gr.MultimodalTextbox if multimodal else gr.Textbox
|
562 |
+
)
|
563 |
+
|
564 |
+
textbox = textbox_component(
|
565 |
+
show_label=False,
|
566 |
+
label="Message",
|
567 |
+
placeholder="Type a message...",
|
568 |
+
scale=1,
|
569 |
+
autofocus=True,
|
570 |
+
submit_btn=True,
|
571 |
+
stop_btn=True,
|
572 |
+
elem_id="chat-textbox",
|
573 |
+
lines=1,
|
574 |
+
)
|
575 |
+
chatbot = gr.Chatbot(
|
576 |
+
type="messages",
|
577 |
+
scale=0,
|
578 |
+
show_copy_button=True,
|
579 |
+
height=400,
|
580 |
+
editable="all",
|
581 |
+
elem_classes="main-chatbox"
|
582 |
+
)
|
583 |
+
with gr.Row():
|
584 |
+
followup_question_buttons = []
|
585 |
+
for i in range(FOLLOWUP_QUESTION_NUMBER):
|
586 |
+
btn = gr.Button(f"Button {i+1}", visible=False)
|
587 |
+
followup_question_buttons.append(btn)
|
588 |
+
|
589 |
+
|
590 |
+
|
591 |
+
tab_edit_uuid_state = gr.State(
|
592 |
+
str()
|
593 |
+
)
|
594 |
+
prompt_textbox.change(lambda prompt: prompt, inputs=[prompt_textbox], outputs=[current_prompt_state])
|
595 |
+
with gr.Sidebar() as sidebar:
|
596 |
+
@gr.render(inputs=[tab_edit_uuid_state, end_of_assistant_response_state, sidebar_names_state, current_uuid_state, chatbot, offloaded_tabs_data_storage])
|
597 |
+
def render_chats(tab_uuid_edit, end_of_chat_response, sidebar_summaries, active_uuid, messages, tabs):
|
598 |
+
current_tab_button_text = ""
|
599 |
+
if active_uuid not in sidebar_summaries:
|
600 |
+
current_tab_button_text = "Current Chat"
|
601 |
+
elif active_uuid not in tabs:
|
602 |
+
current_tab_button_text = sidebar_summaries[active_uuid]
|
603 |
+
if current_tab_button_text:
|
604 |
+
unique_id = f"current-tab-{active_uuid}-{uuid4()}"
|
605 |
+
gr.Button(
|
606 |
+
current_tab_button_text,
|
607 |
+
elem_classes=["chat-tab", "active"],
|
608 |
+
elem_id=unique_id # Add unique elem_id
|
609 |
+
)
|
610 |
+
for chat_uuid, tab in reversed(tabs.items()):
|
611 |
+
elem_classes = ["chat-tab"]
|
612 |
+
if chat_uuid == active_uuid:
|
613 |
+
elem_classes.append("active")
|
614 |
+
button_uuid_state = gr.State(chat_uuid)
|
615 |
+
with gr.Row():
|
616 |
+
clear_tab_button = gr.Button(
|
617 |
+
"🗑",
|
618 |
+
scale=0,
|
619 |
+
elem_classes=["tab-button-control"],
|
620 |
+
elem_id=f"delete-btn-{chat_uuid}-{uuid4()}" # Add unique ID
|
621 |
+
)
|
622 |
+
clear_tab_button.click(
|
623 |
+
fn=delete_tab,
|
624 |
+
inputs=[
|
625 |
+
current_uuid_state,
|
626 |
+
button_uuid_state,
|
627 |
+
sidebar_names_state,
|
628 |
+
offloaded_tabs_data_storage
|
629 |
+
],
|
630 |
+
outputs=[
|
631 |
+
sidebar_names_state,
|
632 |
+
offloaded_tabs_data_storage,
|
633 |
+
chat_interface.chatbot_value
|
634 |
+
]
|
635 |
+
)
|
636 |
+
chat_button_text = sidebar_summaries.get(chat_uuid)
|
637 |
+
if not chat_button_text:
|
638 |
+
chat_button_text = str(chat_uuid)
|
639 |
+
if chat_uuid != tab_uuid_edit:
|
640 |
+
set_edit_tab_button = gr.Button(
|
641 |
+
"✎",
|
642 |
+
scale=0,
|
643 |
+
elem_classes=["tab-button-control"],
|
644 |
+
elem_id=f"edit-btn-{chat_uuid}-{uuid4()}" # Add unique ID
|
645 |
+
)
|
646 |
+
set_edit_tab_button.click(
|
647 |
+
fn=lambda x: x,
|
648 |
+
inputs=[button_uuid_state],
|
649 |
+
outputs=[tab_edit_uuid_state]
|
650 |
+
)
|
651 |
+
chat_tab_button = gr.Button(
|
652 |
+
chat_button_text,
|
653 |
+
elem_id=f"chat-{chat_uuid}-{uuid4()}", # Add truly unique ID
|
654 |
+
elem_classes=elem_classes,
|
655 |
+
scale=2
|
656 |
+
)
|
657 |
+
chat_tab_button.click(
|
658 |
+
fn=switch_tab,
|
659 |
+
inputs=[
|
660 |
+
button_uuid_state,
|
661 |
+
offloaded_tabs_data_storage,
|
662 |
+
current_langgraph_state,
|
663 |
+
current_uuid_state,
|
664 |
+
chatbot,
|
665 |
+
prompt_textbox
|
666 |
+
],
|
667 |
+
outputs=[
|
668 |
+
current_langgraph_state,
|
669 |
+
current_uuid_state,
|
670 |
+
chat_interface.chatbot_value,
|
671 |
+
offloaded_tabs_data_storage,
|
672 |
+
prompt_textbox,
|
673 |
+
*followup_question_buttons
|
674 |
+
]
|
675 |
+
)
|
676 |
+
else:
|
677 |
+
chat_tab_text = gr.Textbox(
|
678 |
+
chat_button_text,
|
679 |
+
scale=2,
|
680 |
+
interactive=True,
|
681 |
+
show_label=False,
|
682 |
+
elem_id=f"edit-text-{chat_uuid}-{uuid4()}" # Add unique ID
|
683 |
+
)
|
684 |
+
chat_tab_text.submit(
|
685 |
+
fn=submit_edit_tab,
|
686 |
+
inputs=[
|
687 |
+
button_uuid_state,
|
688 |
+
sidebar_names_state,
|
689 |
+
chat_tab_text
|
690 |
+
],
|
691 |
+
outputs=[
|
692 |
+
sidebar_names_state,
|
693 |
+
tab_edit_uuid_state
|
694 |
+
]
|
695 |
+
)
|
696 |
+
# )
|
697 |
+
# return chat_tabs, sidebar_summaries
|
698 |
+
new_chat_button = gr.Button("New Chat", elem_id="new-chat-button")
|
699 |
+
chatbot.clear(fn=clear, outputs=[current_langgraph_state, current_uuid_state])
|
700 |
+
|
701 |
+
chat_interface = gr.ChatInterface(
|
702 |
+
chatbot=chatbot,
|
703 |
+
fn=chat_fn,
|
704 |
+
additional_inputs=[
|
705 |
+
current_langgraph_state,
|
706 |
+
current_uuid_state,
|
707 |
+
prompt_textbox,
|
708 |
+
checkbox_search_enabled,
|
709 |
+
checkbox_download_website_text,
|
710 |
+
],
|
711 |
+
additional_outputs=[
|
712 |
+
current_langgraph_state,
|
713 |
+
end_of_assistant_response_state
|
714 |
+
],
|
715 |
+
type="messages",
|
716 |
+
multimodal=multimodal,
|
717 |
+
textbox=textbox,
|
718 |
+
)
|
719 |
+
|
720 |
+
new_chat_button.click(
|
721 |
+
new_tab,
|
722 |
+
inputs=[
|
723 |
+
current_uuid_state,
|
724 |
+
current_langgraph_state,
|
725 |
+
chatbot,
|
726 |
+
offloaded_tabs_data_storage,
|
727 |
+
prompt_textbox,
|
728 |
+
sidebar_names_state,
|
729 |
+
],
|
730 |
+
outputs=[
|
731 |
+
current_uuid_state,
|
732 |
+
current_langgraph_state,
|
733 |
+
chat_interface.chatbot_value,
|
734 |
+
offloaded_tabs_data_storage,
|
735 |
+
prompt_textbox,
|
736 |
+
sidebar_names_state,
|
737 |
+
*followup_question_buttons,
|
738 |
+
]
|
739 |
+
)
|
740 |
+
|
741 |
+
|
742 |
+
def click_followup_button(btn):
|
743 |
+
buttons = [gr.Button(visible=False) for _ in range(len(followup_question_buttons))]
|
744 |
+
return btn, *buttons
|
745 |
+
|
746 |
+
|
747 |
+
for btn in followup_question_buttons:
|
748 |
+
btn.click(
|
749 |
+
fn=click_followup_button,
|
750 |
+
inputs=[btn],
|
751 |
+
outputs=[
|
752 |
+
chat_interface.textbox,
|
753 |
+
*followup_question_buttons
|
754 |
+
]
|
755 |
+
).success(lambda: None, js=TRIGGER_CHATINTERFACE_BUTTON)
|
756 |
+
|
757 |
+
chatbot.change(
|
758 |
+
fn=populate_followup_questions,
|
759 |
+
inputs=[
|
760 |
+
end_of_assistant_response_state,
|
761 |
+
chatbot,
|
762 |
+
current_uuid_state
|
763 |
+
],
|
764 |
+
outputs=[
|
765 |
+
*followup_question_buttons,
|
766 |
+
end_of_assistant_response_state
|
767 |
+
],
|
768 |
+
trigger_mode="multiple"
|
769 |
+
)
|
770 |
+
chatbot.change(
|
771 |
+
fn=summarize_chat,
|
772 |
+
inputs=[
|
773 |
+
end_of_assistant_response_state,
|
774 |
+
chatbot,
|
775 |
+
sidebar_names_state,
|
776 |
+
current_uuid_state
|
777 |
+
],
|
778 |
+
outputs=[
|
779 |
+
sidebar_names_state,
|
780 |
+
end_of_assistant_response_state
|
781 |
+
],
|
782 |
+
trigger_mode="multiple"
|
783 |
+
)
|
784 |
+
chatbot.change(
|
785 |
+
fn=lambda x: x,
|
786 |
+
inputs=[chatbot],
|
787 |
+
outputs=[chatbot_message_storage],
|
788 |
+
trigger_mode="always_last"
|
789 |
+
)
|
790 |
+
|
791 |
+
@demo.load( # Or demo.load
|
792 |
+
inputs=[
|
793 |
+
is_new_user_for_greeting,
|
794 |
+
chatbot_message_storage # Pass the current stored messages
|
795 |
+
],
|
796 |
+
outputs=[
|
797 |
+
chatbot_message_storage, # Update the stored messages with the greeting
|
798 |
+
is_new_user_for_greeting # Update the flag
|
799 |
+
]
|
800 |
+
)
|
801 |
+
def handle_initial_greeting_load(current_is_new_user_flag: bool, existing_chat_history: list):
|
802 |
+
"""
|
803 |
+
This function is called by the @app.load decorator above.
|
804 |
+
It decides whether to add a greeting to the chat history.
|
805 |
+
"""
|
806 |
+
# You can either put the logic directly here, or call the globally defined one.
|
807 |
+
# Option 1: Call the globally defined function (cleaner if it's complex)
|
808 |
+
# Make sure 'display_initial_greeting_on_load' is defined globally in your app.py
|
809 |
+
# For this example, I'm assuming 'display_initial_greeting_on_load' is the one we defined earlier:
|
810 |
+
# def display_initial_greeting_on_load(current_is_new_user_flag: bool, existing_chat_history: list):
|
811 |
+
# if current_is_new_user_flag:
|
812 |
+
# greeting_message_text = load_initial_greeting() # from graph.py
|
813 |
+
# greeting_entry = (None, greeting_message_text)
|
814 |
+
# if not isinstance(existing_chat_history, list): existing_chat_history = []
|
815 |
+
# updated_chat_history = [greeting_entry] + existing_chat_history
|
816 |
+
# updated_is_new_user_flag = False
|
817 |
+
# logger.info("Greeting added for new user.")
|
818 |
+
# return updated_chat_history, updated_is_new_user_flag
|
819 |
+
# else:
|
820 |
+
# logger.info("Not a new user or already greeted, no greeting added.")
|
821 |
+
# return existing_chat_history, False
|
822 |
+
#
|
823 |
+
# return display_initial_greeting_on_load(current_is_new_user_flag, existing_chat_history)
|
824 |
+
|
825 |
+
# Option 2: Put logic directly here (if simple enough)
|
826 |
+
if current_is_new_user_flag:
|
827 |
+
greeting_message_text = load_initial_greeting() # Make sure load_initial_greeting is imported
|
828 |
+
greeting_entry = {"role": "assistant", "content": greeting_message_text}
|
829 |
+
# Ensure existing_chat_history is a list before concatenation
|
830 |
+
if not isinstance(existing_chat_history, list):
|
831 |
+
existing_chat_history = []
|
832 |
+
updated_chat_history = [greeting_entry] + existing_chat_history
|
833 |
+
updated_is_new_user_flag = False
|
834 |
+
logger.info("Greeting added for new user via handle_initial_greeting_load.")
|
835 |
+
return updated_chat_history, updated_is_new_user_flag
|
836 |
+
else:
|
837 |
+
logger.info("Not a new user or already greeted (handle_initial_greeting_load path).")
|
838 |
+
return existing_chat_history, False
|
839 |
+
|
840 |
+
@demo.load(inputs=[chatbot_message_storage], outputs=[chat_interface.chatbot_value])
|
841 |
+
def load_messages(messages):
|
842 |
+
return messages
|
843 |
+
|
844 |
+
@demo.load(inputs=[current_prompt_state], outputs=[prompt_textbox])
|
845 |
+
def load_prompt(current_prompt):
|
846 |
+
return current_prompt
|
847 |
+
|
848 |
+
|
849 |
+
demo.launch(server_name="127.0.0.1", server_port=8080, share=True)
|
850 |
+
|
851 |
+
# demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|
852 |
+
|
853 |
+
|
854 |
+
|
855 |
+
|
app.sh
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
echo "Fetching latest code from origin/main..."
|
4 |
+
git fetch origin
|
5 |
+
git reset --hard origin/main
|
6 |
+
|
7 |
+
echo "Starting the app..."
|
8 |
+
python app.py # or whatever your main entry point is trying to edit this foi just because
|
brainstorm_test.txt
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
system_prompt: |-
|
2 |
+
You are a creative and helpful AI assistant acting as a **DIY Project Brainstorming Facilitator**. Your primary goal is to collaborate with the user to finalize **ONE specific, viable DIY project idea**. You will achieve this by understanding user preferences, suggesting ideas, refining them collaboratively, and using the `human_assistance` tool for direct user interaction and clarification.
|
3 |
+
|
4 |
+
f"CURRENT PROJECT STATUS:\n{status_summary}\n\n"
|
5 |
+
f"Based on the status, the most logical next stage appears to be: **'{proposed_next_stage}'**.\n\n"
|
6 |
+
|
7 |
+
just trying to run some code and and
|
8 |
+
|
9 |
+
**Critical Criteria for the Final DIY Project Idea (MUST be met):**
|
10 |
+
1. **Buildable:** Achievable by an average person with basic DIY skills.
|
11 |
+
2. **Common Materials/Tools:** Uses only materials (e.g., wood, screws, glue, paint, fabric, cardboard) and basic hand tools (e.g., screwdrivers, hammers, saws, drills) commonly available in general hardware stores, craft stores, or supermarkets worldwide.
|
12 |
+
3. **Avoid Specializations:** Explicitly AVOID projects requiring specialized electronic components, 3D printing, specific brand items not universally available, or complex machinery.
|
13 |
+
4. **Tangible Product:** The final result must be a physical, tangible item.
|
14 |
+
|
15 |
+
**Your Process for Each Brainstorming Interaction Cycle:**
|
16 |
+
|
17 |
+
1. **THOUGHT:**
|
18 |
+
* First, clearly state your understanding of the user's current input or the state of the brainstorming (e.g., "User is looking for initial ideas," "User proposed an idea that needs refinement against criteria," "We are close to finalizing an idea.").
|
19 |
+
* Outline your plan for this interaction turn. This usually involves:
|
20 |
+
* Engaging with the user's latest message.
|
21 |
+
* Proposing a new idea or refining an existing one to meet the **Critical Criteria**.
|
22 |
+
* Identifying if a question to the user is needed.
|
23 |
+
* **Tool Identification (`human_assistance`):** If you need to ask the user a question to:
|
24 |
+
* Understand their interests or initial thoughts.
|
25 |
+
* Clarify their preferences or skill level (gently).
|
26 |
+
* Get feedback on a proposed idea.
|
27 |
+
* Refine an idea to meet criteria.
|
28 |
+
You MUST state your intention to use the `human_assistance` tool and clearly formulate the question you will pass as the `query` argument.
|
29 |
+
* **Idea Finalization Check:** If you believe a current idea, discussed with the user, clearly meets ALL **Critical Criteria** and the user seems positive, note your intention to output the `IDEA FINALIZED` signal.
|
30 |
+
|
31 |
+
2. **TOOL USE (`human_assistance` - If Necessary):**
|
32 |
+
* If your plan requires asking the user a question, you will then invoke the `human_assistance` tool with your formulated query.
|
33 |
+
* (Agent Builder Note: The LLM will output a tool call here. The system executes it.)
|
34 |
+
|
35 |
+
3. **RESPONSE SYNTHESIS / IDEA FINALIZATION:**
|
36 |
+
* After any necessary tool use (or if no tool was needed for this turn), synthesize your response.
|
37 |
+
* **If an idea is finalized:** When you determine that a specific project idea meets ALL **Critical Criteria** and the user has positively engaged with it, your response for this turn MUST BE *ONLY* the exact phrase:
|
38 |
+
`IDEA FINALIZED: [Name of the Idea]`
|
39 |
+
(Example: `IDEA FINALIZED: Simple Wooden Spice Rack`)
|
40 |
+
Do not add any other text before or after this phrase if you use it. This signals the end of brainstorming.
|
41 |
+
* **If brainstorming continues (no finalization yet):**
|
42 |
+
* Provide your conversational response, suggestions, or refinements.
|
43 |
+
* If you didn't use a tool in step 2 but are now responding, ensure your response is engaging and moves the brainstorming forward.
|
44 |
+
* If you just made a tool call for `human_assistance`, your main output here might be the tool call itself, or a very brief lead-in text if the system allows. Await the user's response to your question (which will come as a new message).
|
45 |
+
|
46 |
+
**General Guidelines for Your Interaction:**
|
47 |
+
* **Collaborative & Iterative:** Work *with* the user. It's a conversation.
|
48 |
+
* **Criteria Focused:** Always gently guide ideas towards meeting all **Critical Criteria**. If a user's idea doesn't fit, explain why clearly and kindly, then suggest alternatives or modifications.
|
49 |
+
* **One Main Idea at a Time:** To avoid confusion, try to focus the discussion on one main project idea or a small set of comparable alternatives at any given time.
|
50 |
+
* **User-Centric:** Your goal is to help the user find a project *they* will be happy and successful with.
|
51 |
+
* **Clarity:** Be clear in your suggestions and questions.
|
52 |
+
* **Tool Protocol:** When you decide to use `human_assistance`, formulate the tool call correctly. Do not try to answer the question you intend to ask the user.
|
53 |
+
|
54 |
+
---
|
55 |
+
**Examples of How You Should Operate in Brainstorming Mode:**
|
56 |
+
|
57 |
+
**Example 1: Initial Interaction, User is Vague**
|
58 |
+
User Message: "I want to make something for my room."
|
59 |
+
|
60 |
+
THOUGHT:
|
61 |
+
The user has a general interest (something for their room) but no specific idea. I need to gather more information about their preferences or suggest some categories while keeping the DIY criteria in mind. I will use `human_assistance` to ask a clarifying question.
|
62 |
+
My plan is to:
|
63 |
+
1. Acknowledge their interest.
|
64 |
+
2. Ask for more details about what kind of item or style they like for their room, using the `human_assistance` tool.
|
65 |
+
Tool: `human_assistance`
|
66 |
+
Argument `query`: "That's a great starting point! To help me suggest some ideas, what kind of item are you thinking of for your room? For example, are you looking for some decor, a small piece of furniture, or something for organization?"
|
67 |
+
|
68 |
+
(LLM then generates the tool call for `human_assistance` with the above query.)
|
69 |
+
|
70 |
+
RESPONSE SYNTHESIS:
|
71 |
+
(The system would show the tool call. If a textual response is also generated by the LLM before the tool call, it might be: "Okay, let's brainstorm some ideas for your room!")
|
72 |
+
|
73 |
+
---
|
74 |
+
**Example 2: User Suggests an Idea that Needs Refinement**
|
75 |
+
User Message: "I'd like to build a custom LED matrix display powered by a Raspberry Pi."
|
76 |
+
|
77 |
+
THOUGHT:
|
78 |
+
The user's idea ("custom LED matrix display powered by a Raspberry Pi") involves specialized electronic components (LED matrix, Raspberry Pi) which violates one of the Critical Criteria. I need to gently explain this and suggest an alternative that fits the criteria, or ask if they are open to a non-electronic project with a similar theme if possible.
|
79 |
+
My plan is to:
|
80 |
+
1. Acknowledge the coolness of their idea.
|
81 |
+
2. Explain the constraint regarding specialized electronics.
|
82 |
+
3. Propose a simpler, non-electronic decorative item or ask if they have other interests that fit the criteria using `human_assistance`.
|
83 |
+
Tool: `human_assistance`
|
84 |
+
Argument `query`: "A custom LED display sounds like a very cool project! For the kinds of DIY projects we're focusing on here, we try to stick to materials and tools easily found in regular hardware or craft stores, avoiding specialized electronics like Raspberry Pis for now. Would you be interested in brainstorming a different kind of decorative item for your room, perhaps using wood, paint, or fabric? Or is there another type of project you've been thinking about?"
|
85 |
+
|
86 |
+
(LLM then generates the tool call.)
|
87 |
+
|
88 |
+
RESPONSE SYNTHESIS:
|
89 |
+
(Similar to above, LLM outputs the tool call.)
|
90 |
+
|
91 |
+
---
|
92 |
+
**Example 3: Idea Converging and Meeting Criteria**
|
93 |
+
User Message: "Okay, a simple hanging planter made from reclaimed wood sounds good. I think I can manage that."
|
94 |
+
|
95 |
+
THOUGHT:
|
96 |
+
The user is positive about "simple hanging planter made from reclaimed wood."
|
97 |
+
Let's check criteria:
|
98 |
+
1. Buildable: "Simple hanging planter" sounds buildable with basic skills.
|
99 |
+
2. Common Materials/Tools: "Reclaimed wood" is common, and tools would be basic (saw, drill, screws/rope).
|
100 |
+
3. Avoid Specializations: No electronics or complex machinery.
|
101 |
+
4. Tangible Product: Yes, it's a physical planter.
|
102 |
+
All criteria seem met, and the user sounds agreeable. I should output the `IDEA FINALIZED` signal.
|
103 |
+
|
104 |
+
RESPONSE SYNTHESIS:
|
105 |
+
IDEA FINALIZED: Simple hanging planter from reclaimed wood
|
106 |
+
|
107 |
+
---
|
108 |
+
**Example 4: Brainstorming without immediate tool use**
|
109 |
+
User Message: (After AI asked "What kind of materials do you like?") "I like working with wood, but I'm a beginner."
|
110 |
+
|
111 |
+
THOUGHT:
|
112 |
+
The user likes wood and is a beginner. I should suggest a simple wooden project that fits the criteria. I don't need to ask another question immediately. I can make a direct suggestion.
|
113 |
+
My plan is to:
|
114 |
+
1. Acknowledge their preference and skill level.
|
115 |
+
2. Propose 1-2 simple beginner-friendly wooden project ideas that meet all criteria.
|
116 |
+
No tool needed for this specific turn.
|
117 |
+
|
118 |
+
RESPONSE SYNTHESIS:
|
119 |
+
That's great! Wood is very versatile. For a beginner, how about a simple wooden coaster set, a small picture frame, or perhaps a basic bookend? These projects use minimal materials and are good for practicing fundamental skills. Do any of those sound interesting, or would you like other suggestions?
|
120 |
+
Use code with caution.
|
121 |
+
How this maps to your brainstorming_node code:
|
122 |
+
guidance_prompt_text becomes this new system prompt:
|
123 |
+
Instead of your current guidance_prompt_text which is a partial instruction, you would replace it entirely with the detailed prompt above.
|
124 |
+
# In your brainstorming_node.py (or wherever model components are defined)
|
125 |
+
BRAINSTORMING_SYSTEM_PROMPT = """
|
126 |
+
You are a creative and helpful AI assistant acting as a DIY Project Brainstorming Facilitator...
|
127 |
+
... (rest of the detailed prompt above) ...
|
128 |
+
"""
|
129 |
+
Use code with caution.
|
130 |
+
Python
|
131 |
+
Constructing final_prompt:
|
132 |
+
Your code currently does:
|
133 |
+
final_prompt = "\n".join([guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
|
134 |
+
With the new approach, ASSISTANT_SYSTEM_PROMPT_BASE (the original methodical prompt) is no longer suitable to be appended directly as it would conflict.
|
135 |
+
You should simplify this to:
|
136 |
+
# In your brainstorming_node function
|
137 |
+
# Remove the old guidance_prompt_text variable from within the function if BRAINSTORMING_SYSTEM_PROMPT is defined globally/imported
|
138 |
+
|
139 |
+
system_message_content = BRAINSTORMING_SYSTEM_PROMPT
|
140 |
+
if state.prompt: # If there's a new user message relevant to this turn
|
141 |
+
system_message_content += f"\n\nConsider the user's latest input: {state.prompt}"
|
142 |
+
|
143 |
+
prompt = ChatPromptTemplate.from_messages(
|
144 |
+
[
|
145 |
+
("system", system_message_content),
|
146 |
+
MessagesPlaceholder(variable_name="messages"), # for conversation history
|
147 |
+
]
|
148 |
+
)
|
149 |
+
Use code with caution.
|
150 |
+
Python
|
151 |
+
The state.prompt (if it represents the very latest user query/message for the current turn) can be appended to the system prompt or, more typically, be the last HumanMessage in the state.messages list. The MessagesPlaceholder will handle the history.
|
152 |
+
Tool Binding:
|
153 |
+
Your existing assistant_model = model.bind_tools([human_assistance]) is correct. The new prompt now clearly instructs the LLM when and how to call human_assistance.
|
154 |
+
Logic for IDEA FINALIZED::
|
155 |
+
Your existing code:
|
156 |
+
if content.startswith("IDEA FINALIZED:"):
|
157 |
+
print('✅ final idea')
|
158 |
+
updates.update({
|
159 |
+
"brainstorming_complete": True,
|
160 |
+
# ...
|
161 |
+
})
|
162 |
+
Use code with caution.
|
163 |
+
Python
|
164 |
+
This perfectly matches the new prompt's instruction for signaling finalization.
|
165 |
+
By using this tailored system prompt, the LLM will have a much clearer understanding of its role, goals, constraints, and expected output format for the brainstorming phase, making it more effective in guiding the user to a suitable DIY project idea.
|
brainstorming_system_prompt.txt
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
=# Assume this is defined globally or imported
|
2 |
+
# This is the detailed prompt I provided in the previous answer.
|
3 |
+
# Make sure this string ITSELF does not contain unresolved f-string placeholders like {status_summary}
|
4 |
+
# or accidental placeholders like {\n "brainstorming_complete"} or {state.prompt}
|
5 |
+
BRAINSTORMING_FACILITATOR_SYSTEM_PROMPT = """
|
6 |
+
You are a creative and helpful AI assistant acting as a **DIY Project Brainstorming Facilitator**. Your primary goal is to collaborate with the user to finalize **ONE specific, viable DIY project idea**. You will achieve this by understanding user preferences, suggesting ideas, refining them collaboratively, and using the `human_assistance` tool for direct user interaction and clarification.
|
7 |
+
|
8 |
+
**Critical Criteria for the Final DIY Project Idea (MUST be met):**
|
9 |
+
1. **Buildable:** Achievable by an average person with basic DIY skills.
|
10 |
+
2. **Common Materials/Tools:** Uses only materials (e.g., wood, screws, glue, paint, fabric, cardboard) and basic hand tools (e.g., screwdrivers, hammers, saws, drills) commonly available in general hardware stores, craft stores, or supermarkets worldwide.
|
11 |
+
3. **Avoid Specializations:** Explicitly AVOID projects requiring specialized electronic components, 3D printing, specific brand items not universally available, or complex machinery.
|
12 |
+
4. **Tangible Product:** The final result must be a physical, tangible item.
|
13 |
+
|
14 |
+
**Your Process for Each Brainstorming Interaction Cycle:**
|
15 |
+
|
16 |
+
1. **THOUGHT:**
|
17 |
+
* First, clearly state your understanding of the user's current input or the state of the brainstorming (e.g., "User is looking for initial ideas," "User proposed an idea that needs refinement against criteria," "We are close to finalizing an idea.").
|
18 |
+
* Outline your plan for this interaction turn. This usually involves:
|
19 |
+
* Engaging with the user's latest message.
|
20 |
+
* Proposing a new idea or refining an existing one to meet the **Critical Criteria**.
|
21 |
+
* Identifying if a question to the user is needed.
|
22 |
+
* **Tool Identification (`human_assistance`):** If you need to ask the user a question to:
|
23 |
+
* Understand their interests or initial thoughts.
|
24 |
+
* Clarify their preferences or skill level (gently).
|
25 |
+
* Get feedback on a proposed idea.
|
26 |
+
* Refine an idea to meet criteria.
|
27 |
+
You MUST state your intention to use the `human_assistance` tool and clearly formulate the question you will pass as the `query` argument.
|
28 |
+
* **Idea Finalization Check:** If you believe a current idea, discussed with the user, clearly meets ALL **Critical Criteria** and the user seems positive, note your intention to output the `IDEA FINALIZED` signal.
|
29 |
+
|
30 |
+
2. **TOOL USE (`human_assistance` - If Necessary):**
|
31 |
+
* If your plan requires asking the user a question, you will then invoke the `human_assistance` tool with your formulated query.
|
32 |
+
* (Agent Builder Note: The LLM will output a tool call here. The system executes it.)
|
33 |
+
|
34 |
+
3. **RESPONSE SYNTHESIS / IDEA FINALIZATION:**
|
35 |
+
* After any necessary tool use (or if no tool was needed for this turn), synthesize your response.
|
36 |
+
* **If an idea is finalized:** When you determine that a specific project idea meets ALL **Critical Criteria** and the user has positively engaged with it, your response for this turn MUST BE *ONLY* the exact phrase:
|
37 |
+
`IDEA FINALIZED: [Name of the Idea]`
|
38 |
+
(Example: `IDEA FINALIZED: Simple Wooden Spice Rack`)
|
39 |
+
Do not add any other text before or after this phrase if you use it. This signals the end of brainstorming.
|
40 |
+
* **If brainstorming continues (no finalization yet):**
|
41 |
+
* Provide your conversational response, suggestions, or refinements.
|
42 |
+
* If you didn't use a tool in step 2 but are now responding, ensure your response is engaging and moves the brainstorming forward.
|
43 |
+
* If you just made a tool call for `human_assistance`, your main output here might be the tool call itself, or a very brief lead-in text if the system allows. Await the user's response to your question (which will come as a new message).
|
44 |
+
|
45 |
+
**General Guidelines for Your Interaction:**
|
46 |
+
* **Collaborative & Iterative:** Work *with* the user. It's a conversation.
|
47 |
+
* **Criteria Focused:** Always gently guide ideas towards meeting all **Critical Criteria**. If a user's idea doesn't fit, explain why clearly and kindly, then suggest alternatives or modifications.
|
48 |
+
* **One Main Idea at a Time:** To avoid confusion, try to focus the discussion on one main project idea or a small set of comparable alternatives at any given time.
|
49 |
+
* **User-Centric:** Your goal is to help the user find a project *they* will be happy and successful with.
|
50 |
+
* **Clarity:** Be clear in your suggestions and questions.
|
51 |
+
* **Tool Protocol:** When you decide to use `human_assistance`, formulate the tool call correctly. Do not try to answer the question you intend to ask the user.
|
52 |
+
|
53 |
+
---
|
54 |
+
**Examples of How You Should Operate in Brainstorming Mode:** (Include examples as before)
|
55 |
+
... (rest of the prompt) ...
|
56 |
+
"""
|
curls
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
PowerShell 7.5.1
|
2 |
+
PS C:\Users\jayan> curl "https://wishwa-code--trellis-3d-model-generate-dev.modal.run/?image_url=A chair looking like a avocado?q=80&w=1974&auto=format&fit=crop&ixlib=rb-4.1.0&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D&simplify=0.95&texture_size=1024&sparse_sampling_steps=12&sparse_sampling_cfg=7.5&slat_sampling_steps=12&slat_sampling_cfg=3&seed=42&output_format=glb" --output model1.glb
|
3 |
+
curl: (3) URL rejected: Malformed input to a URL function
|
4 |
+
PS C:\Users\jayan> curl "https://wishwa-code--trellis-3d-model-generate-dev.modal.run/?image_url=A%chair%looking%like%a%avocado?q=80&w=1974&auto=format&fit=crop&ixlib=rb-4.1.0&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D&simplify=0.95&texture_size=1024&sparse_sampling_steps=12&sparse_sampling_cfg=7.5&slat_sampling_steps=12&slat_sampling_cfg=3&seed=42&output_format=glb" --output model1.glb
|
5 |
+
% Total % Received % Xferd Average Speed Time Time Time Current
|
6 |
+
Dload Upload Total Spent Left Speed
|
7 |
+
100 90 100 90 0 0 1 0 0:01:30 0:00:50 0:00:40 20
|
8 |
+
PS C:\Users\jayan> curl "https://wishwa-code--trellis-3d-model-generate-dev.modal.run/?image_url=A%chair%looking%like%a%avocado?q=80&w=1974&auto=format&fit=crop&ixlib=rb-4.1.0&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D&simplify=0.95&texture_size=1024&sparse_sampling_steps=12&sparse_sampling_cfg=7.5&slat_sampling_steps=12&slat_sampling_cfg=3&seed=42&output_format=glb" --output model1.glb
|
9 |
+
% Total % Received % Xferd Average Speed Time Time Time Current
|
10 |
+
Dload Upload Total Spent Left Speed
|
11 |
+
0 0 0 0 0 0 0 0 --:--:-- 0:00:50 --:--:-- 0
|
12 |
+
PS C:\Users\jayan> ^C
|
13 |
+
PS C:\Users\jayan> ^C
|
14 |
+
PS C:\Users\jayan> ^C
|
15 |
+
PS C:\Users\jayan> curl "https://wishwa-code--trellis-3d-model-generate-dev.modal.run/?image_url=A%chair%looking%like%a%avocado?q=80&w=1974&auto=format&fit=crop&ixlib=rb-4.1.0&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D&simplify=0.95&texture_size=1024&sparse_sampling_steps=12&sparse_sampling_cfg=7.5&slat_sampling_steps=12&slat_sampling_cfg=3&seed=42&output_format=glb" --output model1.glb
|
16 |
+
% Total % Received % Xferd Average Speed Time Time Time Current
|
17 |
+
Dload Upload Total Spent Left Speed
|
18 |
+
100 90 100 90 0 0 1 0 0:01:30 0:01:07 0:00:23 20
|
19 |
+
PS C:\Users\jayan> curl "https://wishwa-code--trellis-3d-model-generate-dev.modal.run/?image_url=https://images.unsplash.com/photo-1748973750733-d037dded16dd?q=80&w=1974&auto=format&fit=crop&ixlib=rb-4.1.0&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D&simplify=0.95&texture_size=1024&sparse_sampling_steps=12&sparse_sampling_cfg=7.5&slat_sampling_steps=12&slat_sampling_cfg=3&seed=42&output_format=glb" --output model1.glb
|
20 |
+
% Total % Received % Xferd Average Speed Time Time Time Current
|
21 |
+
Dload Upload Total Spent Left Speed
|
22 |
+
100 1820k 100 1820k 0 0 18953 0 0:01:38 0:01:38 --:--:-- 140k
|
23 |
+
PS C:\Users\jayan>
|
24 |
+
|
25 |
+
|
26 |
+
|
graph.py
ADDED
@@ -0,0 +1,1298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
import logging
|
4 |
+
import os
|
5 |
+
import uuid
|
6 |
+
import aiohttp
|
7 |
+
import json
|
8 |
+
import httpx
|
9 |
+
|
10 |
+
from typing import Annotated
|
11 |
+
from typing import TypedDict, List, Optional, Literal
|
12 |
+
|
13 |
+
from typing_extensions import TypedDict
|
14 |
+
from pydantic import BaseModel, Field
|
15 |
+
from trafilatura import extract
|
16 |
+
|
17 |
+
from langchain_core.messages import AIMessage, HumanMessage, AnyMessage, ToolCall, SystemMessage, ToolMessage
|
18 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
19 |
+
from langchain_core.tools import tool
|
20 |
+
|
21 |
+
from langchain_community.tools import TavilySearchResults
|
22 |
+
|
23 |
+
from langgraph.graph.state import CompiledStateGraph
|
24 |
+
from langgraph.graph import StateGraph, START, END, add_messages
|
25 |
+
|
26 |
+
from langgraph.prebuilt import ToolNode
|
27 |
+
from langgraph.prebuilt import ToolNode, tools_condition
|
28 |
+
|
29 |
+
from langgraph.checkpoint.memory import MemorySaver
|
30 |
+
|
31 |
+
from langgraph.types import Command, interrupt
|
32 |
+
|
33 |
+
from langchain_anthropic import ChatAnthropic
|
34 |
+
from langchain_openai import ChatOpenAI
|
35 |
+
|
36 |
+
from mistralai import Mistral
|
37 |
+
from langchain.chat_models import init_chat_model
|
38 |
+
from langchain_core.messages.utils import convert_to_openai_messages
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
class State(TypedDict):
|
43 |
+
messages: Annotated[list, add_messages]
|
44 |
+
|
45 |
+
class DebugToolNode(ToolNode):
|
46 |
+
async def invoke(self, state, config=None):
|
47 |
+
print("🛠️ ToolNode activated")
|
48 |
+
print(f"Available tools: {[tool.name for tool in self.tool_map.values()]}")
|
49 |
+
print(f"Tool calls in last message: {state.messages[-1].tool_calls}")
|
50 |
+
return await super().invoke(state, config)
|
51 |
+
|
52 |
+
|
53 |
+
logger = logging.getLogger(__name__)
|
54 |
+
ASSISTANT_SYSTEM_PROMPT_BASE = """"""
|
55 |
+
search_enabled = bool(os.environ.get("TAVILY_API_KEY"))
|
56 |
+
|
57 |
+
try:
|
58 |
+
with open('brainstorming_system_prompt.txt', 'r') as file:
|
59 |
+
brainstorming_system_prompt = file.read()
|
60 |
+
except FileNotFoundError:
|
61 |
+
print("File 'system_prompt.txt' not found!")
|
62 |
+
except Exception as e:
|
63 |
+
print(f"Error reading file: {e}")
|
64 |
+
|
65 |
+
def evaluate_idea_completion(response) -> bool:
|
66 |
+
"""
|
67 |
+
Evaluates whether the assistant's response indicates a complete DIY project idea.
|
68 |
+
You can customize the logic based on your specific criteria.
|
69 |
+
"""
|
70 |
+
# Example logic: Check if the response contains certain keywords
|
71 |
+
required_keywords = ["materials", "dimensions", "tools", "steps"]
|
72 |
+
|
73 |
+
# Determine the type of response and extract text accordingly
|
74 |
+
if isinstance(response, dict):
|
75 |
+
# If response is a dictionary, extract values and join them into a single string
|
76 |
+
response_text = ' '.join(str(value).lower() for value in response.values())
|
77 |
+
elif isinstance(response, str):
|
78 |
+
# If response is a string, convert it to lowercase
|
79 |
+
response_text = response.lower()
|
80 |
+
else:
|
81 |
+
# If response is of an unexpected type, convert it to string and lowercase
|
82 |
+
response_text = str(response).lower()
|
83 |
+
|
84 |
+
return all(keyword in response_text for keyword in required_keywords)
|
85 |
+
|
86 |
+
@tool
|
87 |
+
async def human_assistance(query: str) -> str:
|
88 |
+
"""Request assistance from a human."""
|
89 |
+
human_response = await interrupt({"query": query}) # async wait
|
90 |
+
return human_response["data"]
|
91 |
+
|
92 |
+
@tool
|
93 |
+
async def download_website_text(url: str) -> str:
|
94 |
+
"""Download the text from a website"""
|
95 |
+
try:
|
96 |
+
async with aiohttp.ClientSession() as session:
|
97 |
+
async with session.get(url) as response:
|
98 |
+
response.raise_for_status()
|
99 |
+
downloaded = await response.text()
|
100 |
+
result = extract(downloaded, include_formatting=True, include_links=True, output_format='json', with_metadata=True)
|
101 |
+
return result or "No text found on the website"
|
102 |
+
except Exception as e:
|
103 |
+
logger.error(f"Failed to download {url}: {str(e)}")
|
104 |
+
return f"Error retrieving website content: {str(e)}"
|
105 |
+
|
106 |
+
@tool
|
107 |
+
async def finalize_idea() -> str:
|
108 |
+
"""Marks the brainstorming phase as complete. This function does nothing else."""
|
109 |
+
return "Brainstorming finalized."
|
110 |
+
|
111 |
+
tools = [download_website_text, human_assistance,finalize_idea]
|
112 |
+
memory = MemorySaver()
|
113 |
+
|
114 |
+
|
115 |
+
if search_enabled:
|
116 |
+
tavily_search_tool = TavilySearchResults(
|
117 |
+
max_results=5,
|
118 |
+
search_depth="advanced",
|
119 |
+
include_answer=True,
|
120 |
+
include_raw_content=True,
|
121 |
+
)
|
122 |
+
tools.append(tavily_search_tool)
|
123 |
+
else:
|
124 |
+
print("TAVILY_API_KEY environment variable not found. Websearch disabled")
|
125 |
+
|
126 |
+
weak_model = ChatOpenAI(
|
127 |
+
model="gpt-4o",
|
128 |
+
temperature=0,
|
129 |
+
max_tokens=None,
|
130 |
+
timeout=None,
|
131 |
+
max_retries=2,
|
132 |
+
# api_key="...", # if you prefer to pass api key in directly instaed of using env vars
|
133 |
+
# base_url="...",
|
134 |
+
# organization="...",
|
135 |
+
# other params...
|
136 |
+
)
|
137 |
+
|
138 |
+
api_key = os.environ["MISTRAL_API_KEY"]
|
139 |
+
model = "mistral-large-latest"
|
140 |
+
|
141 |
+
client = Mistral(api_key=api_key)
|
142 |
+
|
143 |
+
|
144 |
+
# ChatAnthropic(
|
145 |
+
# model="claude-3-5-sonnet-20240620",
|
146 |
+
# temperature=0,
|
147 |
+
# max_tokens=1024,
|
148 |
+
# timeout=None,
|
149 |
+
# max_retries=2,
|
150 |
+
# # other params...
|
151 |
+
# )
|
152 |
+
search_enabled = bool(os.environ.get("TAVILY_API_KEY"))
|
153 |
+
|
154 |
+
if not os.environ.get("OPENAI_API_KEY"):
|
155 |
+
print('Open API key not found')
|
156 |
+
|
157 |
+
prompt_planning_model = ChatOpenAI(
|
158 |
+
model="gpt-4o",
|
159 |
+
temperature=0,
|
160 |
+
max_tokens=None,
|
161 |
+
timeout=None,
|
162 |
+
max_retries=2,
|
163 |
+
# api_key="...", # if you prefer to pass api key in directly instaed of using env vars
|
164 |
+
# base_url="...",
|
165 |
+
# organization="...",
|
166 |
+
# other params...
|
167 |
+
)
|
168 |
+
|
169 |
+
threed_object_gen_model = ChatOpenAI(
|
170 |
+
model="gpt-4o",
|
171 |
+
temperature=0,
|
172 |
+
max_tokens=None,
|
173 |
+
timeout=None,
|
174 |
+
max_retries=2,
|
175 |
+
# api_key="...", # if you prefer to pass api key in directly instaed of using env vars
|
176 |
+
# base_url="...",
|
177 |
+
# organization="...",
|
178 |
+
# other params...
|
179 |
+
)
|
180 |
+
|
181 |
+
model = weak_model
|
182 |
+
assistant_model = weak_model
|
183 |
+
|
184 |
+
class GraphProcessingState(BaseModel):
|
185 |
+
# user_input: str = Field(default_factory=str, description="The original user input")
|
186 |
+
messages: Annotated[list[AnyMessage], add_messages] = Field(default_factory=list)
|
187 |
+
prompt: str = Field(default_factory=str, description="The prompt to be used for the model")
|
188 |
+
tools_enabled: dict = Field(default_factory=dict, description="The tools enabled for the assistant")
|
189 |
+
search_enabled: bool = Field(default=True, description="Whether to enable search tools")
|
190 |
+
next_stage: str = Field(default="", description="The next stage to execute, decided by the guidance node.")
|
191 |
+
|
192 |
+
tool_call_required: bool = Field(default=False, description="Whether a tool should be called from brainstorming.")
|
193 |
+
loop_brainstorming: bool = Field(default=False, description="Whether to loop back to brainstorming for further iteration.")
|
194 |
+
|
195 |
+
# Completion flags for each stage
|
196 |
+
idea_complete: bool = Field(default=False)
|
197 |
+
brainstorming_complete: bool = Field(default=False)
|
198 |
+
planning_complete: bool = Field(default=False)
|
199 |
+
drawing_complete: bool = Field(default=False)
|
200 |
+
product_searching_complete: bool = Field(default=False)
|
201 |
+
purchasing_complete: bool = Field(default=False)
|
202 |
+
|
203 |
+
|
204 |
+
generated_image_url_from_dalle: str = Field(default="", description="The generated_image_url_from_dalle.")
|
205 |
+
|
206 |
+
|
207 |
+
|
208 |
+
async def guidance_node(state: GraphProcessingState, config=None):
|
209 |
+
|
210 |
+
# print(f"Prompt: {state.prompt}")
|
211 |
+
# print(f"Prompt: {state.prompt}")
|
212 |
+
# # print(f"Message: {state.messages}")
|
213 |
+
# print(f"Tools Enabled: {state.tools_enabled}")
|
214 |
+
# print(f"Search Enabled: {state.search_enabled}")
|
215 |
+
# for message in state.messages:
|
216 |
+
# print(f'\ncomplete message', message)
|
217 |
+
# if isinstance(message, HumanMessage):
|
218 |
+
# print(f"Human: {message.content}\n")
|
219 |
+
# elif isinstance(message, AIMessage):
|
220 |
+
# # Check if content is non-empty
|
221 |
+
# if message.content:
|
222 |
+
# # If content is a list (e.g., list of dicts), extract text
|
223 |
+
# if isinstance(message.content, list):
|
224 |
+
# texts = [item.get('text', '') for item in message.content if isinstance(item, dict) and 'text' in item]
|
225 |
+
# if texts:
|
226 |
+
# print(f"AI: {' '.join(texts)}\n")
|
227 |
+
# elif isinstance(message.content, str):
|
228 |
+
# print(f"AI: {message.content}")
|
229 |
+
# elif isinstance(message, SystemMessage):
|
230 |
+
# print(f"System: {message.content}\n")
|
231 |
+
# elif isinstance(message, ToolMessage):
|
232 |
+
# print(f"Tool: {message.content}\n")
|
233 |
+
print("\n🕵️♀️🕵️♀️ | start | progress checking nodee \n") # Added a newline for clarity
|
234 |
+
|
235 |
+
# print(f"Prompt: {state.prompt}\n")
|
236 |
+
|
237 |
+
if state.messages:
|
238 |
+
last_message = state.messages[-1]
|
239 |
+
|
240 |
+
if isinstance(last_message, HumanMessage):
|
241 |
+
print(f"🧑 Human: {last_message.content}\n")
|
242 |
+
elif isinstance(last_message, AIMessage):
|
243 |
+
if last_message.content:
|
244 |
+
if isinstance(last_message.content, list):
|
245 |
+
texts = [item.get('text', '') for item in last_message.content if isinstance(item, dict) and 'text' in item]
|
246 |
+
if texts:
|
247 |
+
print(f"🤖 AI: {' '.join(texts)}\n")
|
248 |
+
elif isinstance(last_message.content, str):
|
249 |
+
print(f"🤖 AI: {last_message.content}\n")
|
250 |
+
elif isinstance(last_message, SystemMessage):
|
251 |
+
print(f"⚙️ System: {last_message.content}\n")
|
252 |
+
elif isinstance(last_message, ToolMessage):
|
253 |
+
print(f"🛠️ Tool: {last_message.content}\n")
|
254 |
+
else:
|
255 |
+
print("\n(No messages found.)")
|
256 |
+
|
257 |
+
|
258 |
+
# Log boolean completion flags
|
259 |
+
# Define the order of stages
|
260 |
+
stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
|
261 |
+
|
262 |
+
# Identify completed and incomplete stages
|
263 |
+
completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
|
264 |
+
incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
|
265 |
+
|
266 |
+
|
267 |
+
|
268 |
+
# Determine the next stage
|
269 |
+
if not incomplete:
|
270 |
+
# All stages are complete
|
271 |
+
return {
|
272 |
+
"messages": [AIMessage(content="All DIY project stages are complete!")],
|
273 |
+
"next_stage": "end_project",
|
274 |
+
"pending_approval_stage": None,
|
275 |
+
}
|
276 |
+
else:
|
277 |
+
# Set the next stage to the first incomplete stage
|
278 |
+
next_stage = incomplete[0]
|
279 |
+
print(f"Next Stage: {state.next_stage}")
|
280 |
+
print("\n🕵️♀️🕵️♀️ | end | progress checking nodee \n") # Added a newline for clarity
|
281 |
+
return {
|
282 |
+
"messages": [],
|
283 |
+
"next_stage": next_stage,
|
284 |
+
"pending_approval_stage": None,
|
285 |
+
}
|
286 |
+
|
287 |
+
def guidance_routing(state: GraphProcessingState) -> str:
|
288 |
+
|
289 |
+
print("\n🔀🔀 Routing checkpoint 🔀🔀\n")
|
290 |
+
|
291 |
+
print(f"Next Stage: {state.next_stage}\n")
|
292 |
+
|
293 |
+
print(f"Brainstorming complete: {state.brainstorming_complete}")
|
294 |
+
print(f"Prompt planing: {state.planning_complete}")
|
295 |
+
print(f"Drwaing 3d model: {state.drawing_complete}")
|
296 |
+
print(f"Finding products: {state.product_searching_complete}\n")
|
297 |
+
|
298 |
+
|
299 |
+
|
300 |
+
next_stage = state.next_stage
|
301 |
+
if next_stage == "brainstorming":
|
302 |
+
return "brainstorming_node"
|
303 |
+
|
304 |
+
elif next_stage == "planning":
|
305 |
+
# return "generate_3d_node"
|
306 |
+
return "prompt_planning_node"
|
307 |
+
elif next_stage == "drawing":
|
308 |
+
return "generate_3d_node"
|
309 |
+
elif next_stage == "product_searching":
|
310 |
+
print('\n may day may day may day may day may day')
|
311 |
+
|
312 |
+
print(f"Prompt: {state.prompt}")
|
313 |
+
print(f"Prompt: {state.prompt}")
|
314 |
+
# print(f"Message: {state.messages}")
|
315 |
+
print(f"Tools Enabled: {state.tools_enabled}")
|
316 |
+
print(f"Search Enabled: {state.search_enabled}")
|
317 |
+
for message in state.messages:
|
318 |
+
print(f'\ncomplete message', message)
|
319 |
+
if isinstance(message, HumanMessage):
|
320 |
+
print(f"Human: {message.content}\n")
|
321 |
+
elif isinstance(message, AIMessage):
|
322 |
+
# Check if content is non-empty
|
323 |
+
if message.content:
|
324 |
+
# If content is a list (e.g., list of dicts), extract text
|
325 |
+
if isinstance(message.content, list):
|
326 |
+
texts = [item.get('text', '') for item in message.content if isinstance(item, dict) and 'text' in item]
|
327 |
+
if texts:
|
328 |
+
print(f"AI: {' '.join(texts)}\n")
|
329 |
+
elif isinstance(message.content, str):
|
330 |
+
print(f"AI: {message.content}")
|
331 |
+
elif isinstance(message, SystemMessage):
|
332 |
+
print(f"System: {message.content}\n")
|
333 |
+
elif isinstance(message, ToolMessage):
|
334 |
+
print(f"Tool: {message.content}\n")
|
335 |
+
# return "drawing_node"
|
336 |
+
# elif next_stage == "product_searching":
|
337 |
+
# return "product_searching"
|
338 |
+
# elif next_stage == "purchasing":
|
339 |
+
# return "purchasing_node"
|
340 |
+
return END
|
341 |
+
|
342 |
+
async def brainstorming_node(state: GraphProcessingState, config=None):
|
343 |
+
print("\n🧠🧠 | start | brainstorming Node \n") # Added a newline for clarity
|
344 |
+
|
345 |
+
|
346 |
+
# Check if model is available
|
347 |
+
if not model:
|
348 |
+
return {"messages": [AIMessage(content="Model not available for brainstorming.")]}
|
349 |
+
|
350 |
+
# Filter out messages with empty content
|
351 |
+
filtered_messages = [
|
352 |
+
message for message in state.messages
|
353 |
+
if isinstance(message, (HumanMessage, AIMessage, SystemMessage, ToolMessage)) and message.content
|
354 |
+
]
|
355 |
+
|
356 |
+
# Ensure there is at least one message with content
|
357 |
+
if not filtered_messages:
|
358 |
+
filtered_messages.append(AIMessage(content="No valid messages provided."))
|
359 |
+
|
360 |
+
stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
|
361 |
+
completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
|
362 |
+
incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
|
363 |
+
|
364 |
+
if not incomplete:
|
365 |
+
print("All stages complete!")
|
366 |
+
# Handle case where all stages are complete
|
367 |
+
# You might want to return a message and end, or set proposed_next_stage to a special value
|
368 |
+
ai_all_complete_msg = AIMessage(content="All DIY project stages are complete!")
|
369 |
+
return {
|
370 |
+
"messages": current_messages + [ai_all_complete_msg],
|
371 |
+
"next_stage": "end_project", # Or None, or a final summary node
|
372 |
+
"pending_approval_stage": None,
|
373 |
+
}
|
374 |
+
else:
|
375 |
+
# THIS LINE DEFINES THE VARIABLE
|
376 |
+
proposed_next_stage = incomplete[0]
|
377 |
+
|
378 |
+
guidance_prompt_text = (
|
379 |
+
"""
|
380 |
+
You are a warm, encouraging, and knowledgeable AI assistant, acting as a **Creative DIY Collaborator**. Your primary goal is to guide the user through a friendly and inspiring conversation to finalize **ONE specific, viable DIY project idea**. While we want to be efficient, the top priority is making the user feel heard, understood, and confident in their final choice.
|
381 |
+
|
382 |
+
⚠️ Your core directive remains speed and convergence: If you identify an idea that clearly meets ALL **Critical Criteria** and the user seems positive or neutral, you must suggest finalizing it **immediately**. Do NOT delay by offering too many alternatives once a solid candidate emerges. Your goal is to converge on a "good enough" idea the user is happy with, not to explore every possibility.
|
383 |
+
|
384 |
+
**Your Conversational Style & Strategy:**
|
385 |
+
1. **Be an Active Listener:** Start by acknowledging and validating the user's input. Show you understand their core desire (e.g., "That sounds like a fun goal! Creating a custom piece for your living room is always rewarding.").
|
386 |
+
2. **Ask Inspiring, Open-Ended Questions:** Instead of generic questions, make them feel personal and insightful.
|
387 |
+
* *Instead of:* "What do you want to build?"
|
388 |
+
* *Try:* "What part of your home are you dreaming of improving?" or "Are you thinking of a gift for someone special, or a project just for you?"
|
389 |
+
3. **Act as a Knowledgeable Guide:** When a user is unsure, proactively suggest appealing ideas based on their subtle clues. Connect their interests to tangible projects.
|
390 |
+
* *Example:* If the user mentions liking plants and having a small balcony, you could suggest: "That's great! We could think about a vertical herb garden to save space, or maybe some simple, stylish hanging macrame planters. Does either of those spark your interest?"
|
391 |
+
4. **Guide, Don't Just Gatekeep:** When an idea *almost* meets the criteria, don't just reject it. Gently guide it towards feasibility.
|
392 |
+
* *Example:* "A full-sized dining table might require some specialized tools. How about we adapt that idea into a beautiful, buildable coffee table or a set of side tables using similar techniques?"
|
393 |
+
|
394 |
+
**Critical Criteria for the Final DIY Project Idea (Your non-negotiable checklist):**
|
395 |
+
1. **Buildable:** Achievable by an average person with basic DIY skills.
|
396 |
+
2. **Common Materials/Tools:** Uses only materials (e.g., wood, screws, glue, paint, fabric, cardboard) and basic hand tools (e.g., screwdrivers, hammers, saws, drills) commonly available in general hardware stores, craft stores, or supermarkets worldwide.
|
397 |
+
3. **Avoid Specializations:** Explicitly AVOID projects requiring specialized electronic components, 3D printing, specific brand items not universally available, or complex machinery.
|
398 |
+
4. **Tangible Product:** The final result must be a physical, tangible item.
|
399 |
+
|
400 |
+
**Your Internal Process (How you think on each turn):**
|
401 |
+
|
402 |
+
1. **THOUGHT:**
|
403 |
+
* Clearly state your understanding of the user’s current input and conversational state.
|
404 |
+
* Outline your plan: Engage with their latest input using your **Conversational Style**. Propose or refine an idea to meet the **Critical Criteria**.
|
405 |
+
* **Tool Identification (`human_assistance`):** Decide if you need to ask a question. The question should be formulated according to the "Inspiring, Open-Ended Questions" principle. Clearly state your intention to use the `human_assistance` tool with the exact friendly and natural-sounding question as the `query`.
|
406 |
+
* **Idea Finalization Check:** Check if the current idea satisfies ALL **Critical Criteria**. If yes, and the user shows no objection, move to finalize immediately. Remember: **good enough is final enough**.
|
407 |
+
|
408 |
+
2. **TOOL USE (`human_assistance` - If Needed):**
|
409 |
+
* Invoke `human_assistance` with your well-formulated, friendly query.
|
410 |
+
|
411 |
+
3. **RESPONSE SYNTHESIS / IDEA FINALIZATION:**
|
412 |
+
* **If an idea is finalized:** Respond *only* with the exact phrase:
|
413 |
+
`IDEA FINALIZED: [Name of the Idea]`
|
414 |
+
(e.g., `IDEA FINALIZED: Simple Wooden Spice Rack`)
|
415 |
+
* **If brainstorming continues:**
|
416 |
+
* Provide your engaging suggestions or refinements based on your **Conversational Style**.
|
417 |
+
* Await the user response.
|
418 |
+
|
419 |
+
**General Guidelines (Your core principles):**
|
420 |
+
* **Empathy Over Pure Efficiency:** A positive, collaborative experience is the primary goal. Don't rush the user if they are still exploring.
|
421 |
+
* **Criteria Focused:** Always gently guide ideas toward the **Critical Criteria**.
|
422 |
+
* **One Main Idea at a Time:** Focus the conversation on a single project idea to avoid confusion.
|
423 |
+
* **Rapid Convergence:** Despite the friendly tone, always be looking for the fastest path to a final, viable idea.
|
424 |
+
"""
|
425 |
+
)
|
426 |
+
|
427 |
+
|
428 |
+
|
429 |
+
if state.prompt:
|
430 |
+
final_prompt = "\n".join([ guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
|
431 |
+
else:
|
432 |
+
final_prompt = "\n".join([ guidance_prompt_text, ASSISTANT_SYSTEM_PROMPT_BASE])
|
433 |
+
|
434 |
+
prompt = ChatPromptTemplate.from_messages(
|
435 |
+
[
|
436 |
+
("system", final_prompt),
|
437 |
+
MessagesPlaceholder(variable_name="messages"),
|
438 |
+
]
|
439 |
+
)
|
440 |
+
|
441 |
+
# Tools allowed for brainstorming
|
442 |
+
node_tools = [human_assistance]
|
443 |
+
if state.search_enabled and tavily_search_tool: # only add search tool if enabled and initialized
|
444 |
+
node_tools.append(tavily_search_tool)
|
445 |
+
|
446 |
+
|
447 |
+
|
448 |
+
|
449 |
+
mistraltools = [
|
450 |
+
{
|
451 |
+
"type": "function",
|
452 |
+
"function": {
|
453 |
+
"name": "human_assistance",
|
454 |
+
"description": "Ask a question from the user",
|
455 |
+
"parameters": {
|
456 |
+
"type": "object",
|
457 |
+
"properties": {
|
458 |
+
"query": {
|
459 |
+
"type": "string",
|
460 |
+
"query": "The transaction id.",
|
461 |
+
}
|
462 |
+
},
|
463 |
+
"required": ["query"],
|
464 |
+
},
|
465 |
+
},
|
466 |
+
},
|
467 |
+
{
|
468 |
+
"type": "function",
|
469 |
+
"function": {
|
470 |
+
"name": "finalize_idea",
|
471 |
+
"description": "Handles finalized ideas. Saves or dispatches the confirmed idea for the next steps. but make sure you give your response with key word IDEA FINALIZED",
|
472 |
+
"parameters": {
|
473 |
+
"type": "object",
|
474 |
+
"properties": {
|
475 |
+
"idea_name": {
|
476 |
+
"type": "string",
|
477 |
+
"description": "The name of the finalized DIY idea.",
|
478 |
+
}
|
479 |
+
},
|
480 |
+
"required": ["idea_name"]
|
481 |
+
}
|
482 |
+
}
|
483 |
+
}
|
484 |
+
]
|
485 |
+
llm = init_chat_model("mistral-large-latest", model_provider="mistralai")
|
486 |
+
|
487 |
+
llm_with_tools = llm.bind_tools(mistraltools)
|
488 |
+
chain = prompt | llm_with_tools
|
489 |
+
|
490 |
+
openai_messages = convert_to_openai_messages(state.messages)
|
491 |
+
|
492 |
+
openai_messages_with_prompt = [
|
493 |
+
{"role": "system", "content": final_prompt}, # your guidance prompt
|
494 |
+
*openai_messages # history you’ve already converted
|
495 |
+
]
|
496 |
+
|
497 |
+
print('open ai formatted', openai_messages_with_prompt[-1])
|
498 |
+
|
499 |
+
for msg in openai_messages_with_prompt:
|
500 |
+
print(msg)
|
501 |
+
|
502 |
+
mistralmodel = "mistral-saba-2502"
|
503 |
+
|
504 |
+
# Pass filtered messages to the chain
|
505 |
+
try:
|
506 |
+
|
507 |
+
# response = await chain.ainvoke({"messages": filtered_messages}, config=config)
|
508 |
+
response = client.chat.complete(
|
509 |
+
model = mistralmodel,
|
510 |
+
messages = openai_messages_with_prompt,
|
511 |
+
tools = mistraltools,
|
512 |
+
tool_choice = "any",
|
513 |
+
parallel_tool_calls = False,
|
514 |
+
)
|
515 |
+
|
516 |
+
mistral_message = response.choices[0].message
|
517 |
+
tool_call = response.choices[0].message.tool_calls[0]
|
518 |
+
function_name = tool_call.function.name
|
519 |
+
function_params = json.loads(tool_call.function.arguments)
|
520 |
+
|
521 |
+
ai_message = AIMessage(
|
522 |
+
content=mistral_message.content or "", # Use empty string if blank
|
523 |
+
additional_kwargs={
|
524 |
+
"tool_calls": [
|
525 |
+
{
|
526 |
+
"id": tool_call.id,
|
527 |
+
"function": {
|
528 |
+
"name": tool_call.function.name,
|
529 |
+
"arguments": tool_call.function.arguments,
|
530 |
+
},
|
531 |
+
"type": "function", # Add this if your chain expects it
|
532 |
+
}
|
533 |
+
]
|
534 |
+
}
|
535 |
+
)
|
536 |
+
|
537 |
+
updates = {
|
538 |
+
"messages": [ai_message],
|
539 |
+
"tool_calls": [
|
540 |
+
{
|
541 |
+
"name": function_name,
|
542 |
+
"arguments": function_params,
|
543 |
+
}
|
544 |
+
],
|
545 |
+
"next": function_name,
|
546 |
+
}
|
547 |
+
|
548 |
+
print("\nfunction_name: ", function_name, "\nfunction_params: ", function_params)
|
549 |
+
print('\n🔍 response from brainstorm\n', updates)
|
550 |
+
|
551 |
+
if function_name == "finalize_idea":
|
552 |
+
print('finalazing idea')
|
553 |
+
state.brainstorming_complete = True
|
554 |
+
updates["brainstorming_complete"] = True
|
555 |
+
|
556 |
+
|
557 |
+
if isinstance(response, AIMessage) and response.content:
|
558 |
+
print(' Identified last AI message', response)
|
559 |
+
if isinstance(response.content, str):
|
560 |
+
content = response.content.strip()
|
561 |
+
elif isinstance(response.content, list):
|
562 |
+
texts = [item.get("text", "") for item in response.content if isinstance(item, dict)]
|
563 |
+
content = " ".join(texts).strip()
|
564 |
+
else:
|
565 |
+
content = str(response.content).strip()
|
566 |
+
|
567 |
+
print('content for idea finalizing:', content)
|
568 |
+
if "finalize_idea:" in content: # Use 'in' instead of 'startswith'
|
569 |
+
print('✅ final idea')
|
570 |
+
updates.update({
|
571 |
+
"brainstorming_complete": True,
|
572 |
+
"tool_call_required": False,
|
573 |
+
"loop_brainstorming": False,
|
574 |
+
})
|
575 |
+
return updates
|
576 |
+
|
577 |
+
else:
|
578 |
+
# tool_calls = getattr(response, "tool_calls", None)
|
579 |
+
|
580 |
+
|
581 |
+
if tool_call:
|
582 |
+
print('🛠️ tool call requested at brainstorming node')
|
583 |
+
updates.update({
|
584 |
+
"tool_call_required": True,
|
585 |
+
"loop_brainstorming": False,
|
586 |
+
})
|
587 |
+
|
588 |
+
if tool_call:
|
589 |
+
tool_call = response.choices[0].message.tool_calls[0]
|
590 |
+
function_name = tool_call.function.name
|
591 |
+
function_params = json.loads(tool_call.function.arguments)
|
592 |
+
print("\nfunction_name: ", function_name, "\nfunction_params: ", function_params)
|
593 |
+
# for tool_call in response.tool_calls:
|
594 |
+
# tool_name = tool_call['name']
|
595 |
+
# if tool_name == "human_assistance":
|
596 |
+
# query = tool_call['args']['query']
|
597 |
+
# print(f"Human input needed: {query}")
|
598 |
+
|
599 |
+
# for tool_call in tool_calls:
|
600 |
+
# if isinstance(tool_call, dict) and 'name' in tool_call and 'args' in tool_call:
|
601 |
+
# print(f"🔧 Tool Call (Dict): {tool_call.get('name')}, Args: {tool_call.get('args')}")
|
602 |
+
# else:
|
603 |
+
# print(f"🔧 Unknown tool_call format: {tool_call}")
|
604 |
+
else:
|
605 |
+
print('💬 decided tp keep brainstorming')
|
606 |
+
updates.update({
|
607 |
+
"tool_call_required": False,
|
608 |
+
"loop_brainstorming": True,
|
609 |
+
})
|
610 |
+
print(f"Brainstorming continues: {content}")
|
611 |
+
|
612 |
+
else:
|
613 |
+
# If no proper response, keep looping brainstorming
|
614 |
+
updates["tool_call_required"] = False
|
615 |
+
updates["loop_brainstorming"] = True
|
616 |
+
|
617 |
+
print("\n🧠🧠 | end | brainstorming Node \n")
|
618 |
+
return updates
|
619 |
+
except Exception as e:
|
620 |
+
print(f"Error: {e}")
|
621 |
+
return {
|
622 |
+
"messages": [AIMessage(content="Error.")],
|
623 |
+
"next_stage": "brainstorming"
|
624 |
+
}
|
625 |
+
|
626 |
+
|
627 |
+
async def prompt_planning_node(state: GraphProcessingState, config=None):
|
628 |
+
print("\n🚩🚩 | start | prompt planing Node \n")
|
629 |
+
# Ensure we have a model
|
630 |
+
if not model:
|
631 |
+
return {"messages": [AIMessage(content="Model not available for planning.")]}
|
632 |
+
|
633 |
+
|
634 |
+
filtered_messages = state.messages
|
635 |
+
|
636 |
+
# Filter out empty messages
|
637 |
+
# filtered_messages = [
|
638 |
+
# msg for msg in state.messages
|
639 |
+
# if isinstance(msg, (HumanMessage, AIMessage, SystemMessage, ToolMessage)) and msg.content
|
640 |
+
# ]
|
641 |
+
# filtered_messages = []
|
642 |
+
|
643 |
+
# for msg in state.messages:
|
644 |
+
# if isinstance(msg, ToolMessage):
|
645 |
+
# # 🛠️ ToolMessage needs to be paired with a prior assistant message that called the tool
|
646 |
+
# tool_name = msg.name or "unknown_tool"
|
647 |
+
# tool_call_id = msg.tool_call_id or "tool_call_id_missing"
|
648 |
+
|
649 |
+
# # Simulated assistant message that initiated the tool call
|
650 |
+
# fake_assistant_msg = AIMessage(
|
651 |
+
# content="",
|
652 |
+
# additional_kwargs={
|
653 |
+
# "tool_calls": [
|
654 |
+
# {
|
655 |
+
# "id": tool_call_id,
|
656 |
+
# "type": "function",
|
657 |
+
# "function": {
|
658 |
+
# "name": tool_name,
|
659 |
+
# "arguments": json.dumps({"content": msg.content or ""}),
|
660 |
+
# }
|
661 |
+
# }
|
662 |
+
# ]
|
663 |
+
# }
|
664 |
+
# )
|
665 |
+
|
666 |
+
# # Append both in correct sequence
|
667 |
+
# filtered_messages.append(fake_assistant_msg)
|
668 |
+
# filtered_messages.append(msg)
|
669 |
+
|
670 |
+
# elif isinstance(msg, (HumanMessage, AIMessage, SystemMessage)) and msg.content:
|
671 |
+
# filtered_messages.append(msg)
|
672 |
+
|
673 |
+
# Fallback if list ends up empty
|
674 |
+
if not filtered_messages:
|
675 |
+
filtered_messages.append(AIMessage(content="No valid messages provided."))
|
676 |
+
|
677 |
+
|
678 |
+
# Define the system prompt for planning
|
679 |
+
guidance_prompt_text = """
|
680 |
+
You are a creative and helpful AI assistant acting as a **DIY Project Brainstorming & 3D-Prompt Generator**. Your mission is to collaborate with the user to:
|
681 |
+
|
682 |
+
1. Brainstorm and refine one specific, viable DIY project idea.
|
683 |
+
2. Identify the single key component from that idea that should be 3D-modeled.
|
684 |
+
3. Produce a final, precise text prompt for an OpenAI 3D-generation endpoint.
|
685 |
+
|
686 |
+
---
|
687 |
+
**Critical Criteria for the DIY Project** (must be met):
|
688 |
+
• Buildable by an average person with only basic DIY skills.
|
689 |
+
• Uses common materials/tools (e.g., wood, screws, glue, paint; hammer, saw, drill).
|
690 |
+
• No specialized electronics, 3D printers, or proprietary parts.
|
691 |
+
• Results in a tangible, physical item.
|
692 |
+
|
693 |
+
---
|
694 |
+
**Available Tools**
|
695 |
+
• human_assistance – ask the user clarifying questions.
|
696 |
+
• (optional) your project-specific search tool – look up inspiration or standard dimensions if needed.
|
697 |
+
|
698 |
+
---
|
699 |
+
**When the DIY idea is fully detailed and meets all criteria, output exactly and only:**
|
700 |
+
|
701 |
+
ACCURATE PROMPT FOR MODEL GENERATING: [Your final single-paragraph prompt here]
|
702 |
+
"""
|
703 |
+
|
704 |
+
# Build final prompt
|
705 |
+
if state.prompt:
|
706 |
+
final_prompt = "\n".join([guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
|
707 |
+
else:
|
708 |
+
final_prompt = "\n".join([guidance_prompt_text, ASSISTANT_SYSTEM_PROMPT_BASE])
|
709 |
+
|
710 |
+
prompt = ChatPromptTemplate.from_messages([
|
711 |
+
("system", final_prompt),
|
712 |
+
MessagesPlaceholder(variable_name="messages"),
|
713 |
+
])
|
714 |
+
|
715 |
+
# Bind tools
|
716 |
+
node_tools = [human_assistance]
|
717 |
+
if state.search_enabled and tavily_search_tool:
|
718 |
+
node_tools.append(tavily_search_tool)
|
719 |
+
|
720 |
+
llm_with_tools = prompt_planning_model.bind_tools(node_tools)
|
721 |
+
chain = prompt | llm_with_tools
|
722 |
+
|
723 |
+
# print(' 👾👾👾👾Debugging the request going in to prompt planing model')
|
724 |
+
# print("Prompt: ", prompt)
|
725 |
+
# print("chain: ", chain)
|
726 |
+
|
727 |
+
for msg in filtered_messages:
|
728 |
+
print('✨msg : ',msg)
|
729 |
+
print('\n')
|
730 |
+
|
731 |
+
try:
|
732 |
+
response = await chain.ainvoke({"messages": filtered_messages}, config=config)
|
733 |
+
|
734 |
+
print('\nresponse ->: ', response)
|
735 |
+
|
736 |
+
# Log any required human assistance query
|
737 |
+
if hasattr(response, "tool_calls"):
|
738 |
+
for call in response.tool_calls:
|
739 |
+
if call.get("name") == "human_assistance":
|
740 |
+
print(f"Human input needed: {call['args']['query']}")
|
741 |
+
|
742 |
+
|
743 |
+
|
744 |
+
updates = {"messages": [response]}
|
745 |
+
|
746 |
+
# Extract response text
|
747 |
+
content = ""
|
748 |
+
if isinstance(response.content, str):
|
749 |
+
content = response.content.strip()
|
750 |
+
elif isinstance(response.content, list):
|
751 |
+
content = " ".join(item.get("text","") for item in response.content if isinstance(item, dict)).strip()
|
752 |
+
|
753 |
+
# Check for finalization signalif "finalize_idea:" in content:
|
754 |
+
if "ACCURATE PROMPT FOR MODEL GENERATING" in content:
|
755 |
+
dalle_prompt_text = content.replace("ACCURATE PROMPT FOR MODEL GENERATING:", "").strip()
|
756 |
+
print(f"\n🤖🤖🤖🤖Extracted DALL-E prompt: {dalle_prompt_text}")
|
757 |
+
|
758 |
+
generated_image_url = None
|
759 |
+
generated_3d_model_url = None # This will store the final 3D model URL
|
760 |
+
|
761 |
+
# --- START: New code for DALL-E and Trellis API calls ---
|
762 |
+
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
763 |
+
if not OPENAI_API_KEY:
|
764 |
+
print("Error: OPENAI_API_KEY environment variable not set.")
|
765 |
+
updates["messages"].append(AIMessage(content="OpenAI API key not configured. Cannot generate image."))
|
766 |
+
else:
|
767 |
+
# 1. Call DALL-E API
|
768 |
+
dalle_api_url = "https://api.openai.com/v1/images/generations"
|
769 |
+
dalle_headers = {
|
770 |
+
"Content-Type": "application/json",
|
771 |
+
"Authorization": f"Bearer {OPENAI_API_KEY}"
|
772 |
+
}
|
773 |
+
|
774 |
+
_model_to_use_for_dalle_call = "dall-e-2" # <<< IMPORTANT: Set this to "dall-e-2" or "dall-e-3"
|
775 |
+
|
776 |
+
|
777 |
+
_processed_prompt_text = dalle_prompt_text # Start with the original prompt
|
778 |
+
_prompt_was_trimmed_or_issue_found = False
|
779 |
+
_warning_or_error_message_for_updates = None
|
780 |
+
|
781 |
+
max_prompt_lengths = {
|
782 |
+
"dall-e-2": 1000,
|
783 |
+
"dall-e-3": 4000,
|
784 |
+
"gpt-image-1": 32000 # Included for completeness, though payload is for DALL-E
|
785 |
+
}
|
786 |
+
|
787 |
+
if not _processed_prompt_text: # Check for empty prompt
|
788 |
+
_message = f"Error: The DALL-E prompt for model '{_model_to_use_for_dalle_call}' cannot be empty. API call will likely fail."
|
789 |
+
print(f"\n🛑🛑🛑🛑 {_message}")
|
790 |
+
_warning_or_error_message_for_updates = _message
|
791 |
+
_prompt_was_trimmed_or_issue_found = True
|
792 |
+
# NOTE: OpenAI API will return an error for an empty prompt.
|
793 |
+
# If you want to prevent the call entirely here, you could add:
|
794 |
+
# updates["messages"].append(AIMessage(content=_message))
|
795 |
+
# return # or raise an exception
|
796 |
+
|
797 |
+
elif _model_to_use_for_dalle_call in max_prompt_lengths:
|
798 |
+
_max_len = max_prompt_lengths[_model_to_use_for_dalle_call]
|
799 |
+
_original_len = len(_processed_prompt_text)
|
800 |
+
|
801 |
+
if _original_len > _max_len:
|
802 |
+
_processed_prompt_text = _processed_prompt_text[:_max_len]
|
803 |
+
_message = (
|
804 |
+
f"Warning: Prompt for DALL-E ({_model_to_use_for_dalle_call}) was {_original_len} characters. "
|
805 |
+
f"It has been TRUNCATED to the maximum of {_max_len} characters."
|
806 |
+
)
|
807 |
+
print(f"\n⚠️⚠️⚠️⚠️ {_message}")
|
808 |
+
_warning_or_error_message_for_updates = _message
|
809 |
+
_prompt_was_trimmed_or_issue_found = True
|
810 |
+
else:
|
811 |
+
# Model specified in _model_to_use_for_dalle_call is not in our length check dictionary
|
812 |
+
_message = (
|
813 |
+
f"Notice: Model '{_model_to_use_for_dalle_call}' not found in pre-defined prompt length limits. "
|
814 |
+
"Proceeding with the original prompt. API may reject if prompt is too long for this model."
|
815 |
+
)
|
816 |
+
print(f"\nℹ️ℹ️ℹ️ℹ️ {_message}")
|
817 |
+
# You might not want to add this specific notice to 'updates["messages"]' unless it's critical
|
818 |
+
# _warning_or_error_message_for_updates = _message
|
819 |
+
# _prompt_was_trimmed_or_issue_found = True # Or not, depending on how you view this
|
820 |
+
|
821 |
+
# Add warning/error to updates if one was generated
|
822 |
+
if _warning_or_error_message_for_updates:
|
823 |
+
# Check if 'updates' and 'AIMessage' are available in the current scope to avoid errors
|
824 |
+
if 'updates' in locals() and isinstance(updates, dict) and 'messages' in updates and 'AIMessage' in globals():
|
825 |
+
updates["messages"].append(AIMessage(content=_warning_or_error_message_for_updates))
|
826 |
+
elif 'updates' in globals() and isinstance(updates, dict) and 'messages' in updates: # If AIMessage isn't defined, just append string
|
827 |
+
updates["messages"].append(_warning_or_error_message_for_updates)
|
828 |
+
|
829 |
+
|
830 |
+
# --- Prompt Trimming Logic END ---
|
831 |
+
|
832 |
+
dalle_payload = {
|
833 |
+
"model": _model_to_use_for_dalle_call, # Use the model determined above
|
834 |
+
"prompt": _processed_prompt_text, # Use the processed (potentially trimmed) prompt
|
835 |
+
"n": 1,
|
836 |
+
"size": "1024x1024"
|
837 |
+
# You can add other DALL-E 3 specific params if _model_to_use_for_dalle_call is "dall-e-3"
|
838 |
+
# e.g., "quality": "hd", "style": "vivid"
|
839 |
+
}
|
840 |
+
|
841 |
+
print(f"\n🤖🤖🤖🤖Calling DALL-E with prompt: {dalle_prompt_text}")
|
842 |
+
async with aiohttp.ClientSession() as session:
|
843 |
+
try:
|
844 |
+
async with session.post(dalle_api_url, headers=dalle_headers, json=dalle_payload) as dalle_response:
|
845 |
+
dalle_response.raise_for_status() # Raise an exception for HTTP errors
|
846 |
+
dalle_data = await dalle_response.json()
|
847 |
+
if dalle_data.get("data") and len(dalle_data["data"]) > 0:
|
848 |
+
generated_image_url = dalle_data["data"][0].get("url")
|
849 |
+
print(f"DALL-E generated image URL: {generated_image_url}")
|
850 |
+
updates["messages"].append(AIMessage(content=f"Image generated by DALL-E: {generated_image_url}"))
|
851 |
+
else:
|
852 |
+
print("Error: DALL-E API did not return image data.")
|
853 |
+
updates["messages"].append(AIMessage(content="Failed to get image from DALL-E."))
|
854 |
+
except aiohttp.ClientError as e:
|
855 |
+
print(f"DALL-E API call error: {e}")
|
856 |
+
updates["messages"].append(AIMessage(content=f"Error calling DALL-E: {e}"))
|
857 |
+
except json.JSONDecodeError as e:
|
858 |
+
print(f"DALL-E API JSON decode error: {e}. Response: {await dalle_response.text()}")
|
859 |
+
updates["messages"].append(AIMessage(content=f"Error decoding DALL-E response: {e}"))
|
860 |
+
except Exception as e:
|
861 |
+
print(f"Unexpected error during DALL-E processing: {e}")
|
862 |
+
updates["messages"].append(AIMessage(content=f"Unexpected error with DALL-E: {e}"))
|
863 |
+
|
864 |
+
updates.update({
|
865 |
+
"generated_image_url_from_dalle": generated_image_url,
|
866 |
+
"planning_complete": True,
|
867 |
+
"tool_call_required": False,
|
868 |
+
"loop_planning": False,
|
869 |
+
})
|
870 |
+
else:
|
871 |
+
# Check if a tool call was requested
|
872 |
+
if getattr(response, "tool_calls", None):
|
873 |
+
updates.update({
|
874 |
+
"tool_call_required": True,
|
875 |
+
"loop_planning": False,
|
876 |
+
})
|
877 |
+
else:
|
878 |
+
updates.update({
|
879 |
+
"tool_call_required": False,
|
880 |
+
"loop_planning": True,
|
881 |
+
})
|
882 |
+
|
883 |
+
print("\n🚩🚩 | end | prompt planing Node \n")
|
884 |
+
return updates
|
885 |
+
|
886 |
+
except Exception as e:
|
887 |
+
print(f"Error in prompt_planning node: {e}")
|
888 |
+
return {
|
889 |
+
"messages": [AIMessage(content="Error in prompt_planning node.")],
|
890 |
+
"next_stage": state.next_stage or "planning"
|
891 |
+
}
|
892 |
+
|
893 |
+
async def generate_3d_node(state: GraphProcessingState, config=None):
|
894 |
+
print("\n🚀🚀🚀 | start | Generate 3D Node ��🚀🚀\n")
|
895 |
+
# 1. Get the image URL
|
896 |
+
# For now, using a hardcoded URL as requested for testing.
|
897 |
+
# In a real scenario, you might get this from the state:
|
898 |
+
# image_url = state.get("image_url_for_3d")
|
899 |
+
# if not image_url:
|
900 |
+
# print("No image_url_for_3d found in state.")
|
901 |
+
# return {"messages": [AIMessage(content="No image URL found for 3D generation.")]}
|
902 |
+
|
903 |
+
hardcoded_image_url = state.generated_image_url_from_dalle
|
904 |
+
print(f"Using hardcoded image_url: {hardcoded_image_url}")
|
905 |
+
|
906 |
+
# 2. Define API endpoint and parameters
|
907 |
+
api_base_url = "https://wishwa-code--trellis-3d-model-generate-dev.modal.run/"
|
908 |
+
params = {
|
909 |
+
"image_url": hardcoded_image_url,
|
910 |
+
"simplify": "0.95",
|
911 |
+
"texture_size": "1024",
|
912 |
+
"sparse_sampling_steps": "12",
|
913 |
+
"sparse_sampling_cfg": "7.5",
|
914 |
+
"slat_sampling_steps": "12",
|
915 |
+
"slat_sampling_cfg": "3",
|
916 |
+
"seed": "42",
|
917 |
+
"output_format": "glb"
|
918 |
+
}
|
919 |
+
|
920 |
+
# Create a directory to store generated models if it doesn't exist
|
921 |
+
output_dir = "generated_3d_models"
|
922 |
+
os.makedirs(output_dir, exist_ok=True)
|
923 |
+
|
924 |
+
# 3. Attempt generation with retries
|
925 |
+
for attempt in range(1, 2):
|
926 |
+
print(f"Attempt {attempt} to call 3D generation API...")
|
927 |
+
try:
|
928 |
+
# Note: The API call can take a long time (1.5 mins in your curl example)
|
929 |
+
# Ensure your HTTP client timeout is sufficient.
|
930 |
+
# httpx default timeout is 5 seconds, which is too short.
|
931 |
+
async with httpx.AsyncClient(timeout=120.0) as client: # Timeout set to 120 seconds
|
932 |
+
response = await client.get(api_base_url, params=params)
|
933 |
+
response.raise_for_status() # Raises an HTTPStatusError for 4XX/5XX responses
|
934 |
+
|
935 |
+
# Successfully got a response
|
936 |
+
if response.status_code == 200:
|
937 |
+
# Assuming the response body is the .glb file content
|
938 |
+
file_name = f"model_{uuid.uuid4()}.glb"
|
939 |
+
file_path = os.path.join(output_dir, file_name)
|
940 |
+
|
941 |
+
with open(file_path, "wb") as f:
|
942 |
+
f.write(response.content)
|
943 |
+
|
944 |
+
print(f"Success: 3D model saved to {file_path}")
|
945 |
+
return {
|
946 |
+
"messages": [AIMessage(content=f"3D object generation successful: {file_path}")],
|
947 |
+
"generate_3d_complete": True,
|
948 |
+
"three_d_model_path": file_path,
|
949 |
+
"next_stage": state.get("next_stage") or 'end' # Use .get for safer access
|
950 |
+
}
|
951 |
+
else:
|
952 |
+
# This case might not be reached if raise_for_status() is used effectively,
|
953 |
+
# but good for explicit handling.
|
954 |
+
error_message = f"API returned status {response.status_code}: {response.text}"
|
955 |
+
print(error_message)
|
956 |
+
if attempt == 3: # Last attempt
|
957 |
+
return {"messages": [AIMessage(content=f"Failed to generate 3D object. Last error: {error_message}")]}
|
958 |
+
|
959 |
+
except httpx.HTTPStatusError as e:
|
960 |
+
error_message = f"HTTP error occurred: {e.response.status_code} - {e.response.text}"
|
961 |
+
print(error_message)
|
962 |
+
if attempt == 3:
|
963 |
+
return {"messages": [AIMessage(content=f"Failed to generate 3D object after 3 attempts. Last HTTP error: {error_message}")]}
|
964 |
+
except httpx.RequestError as e: # Catches network errors, timeout errors etc.
|
965 |
+
error_message = f"Request error occurred: {str(e)}"
|
966 |
+
print(error_message)
|
967 |
+
if attempt == 3:
|
968 |
+
return {"messages": [AIMessage(content=f"Failed to generate 3D object after 3 attempts. Last request error: {error_message}")]}
|
969 |
+
except Exception as e:
|
970 |
+
error_message = f"An unexpected error occurred: {str(e)}"
|
971 |
+
print(error_message)
|
972 |
+
if attempt == 3:
|
973 |
+
return {"messages": [AIMessage(content=f"Failed to generate 3D object after 3 attempts. Last unexpected error: {error_message}")]}
|
974 |
+
|
975 |
+
if attempt < 2:
|
976 |
+
print("Retrying...")
|
977 |
+
else:
|
978 |
+
print("Max retries reached.")
|
979 |
+
|
980 |
+
|
981 |
+
# Failed after retries (this path should ideally be covered by returns in the loop)
|
982 |
+
return {"messages": [AIMessage(content="Failed to generate a valid 3D object after 3 attempts.")]}
|
983 |
+
|
984 |
+
def define_workflow() -> CompiledStateGraph:
|
985 |
+
"""Defines the workflow graph"""
|
986 |
+
# Initialize the graph
|
987 |
+
workflow = StateGraph(GraphProcessingState)
|
988 |
+
|
989 |
+
# Add nodes
|
990 |
+
workflow.add_node("tools", DebugToolNode(tools))
|
991 |
+
|
992 |
+
workflow.add_node("guidance_node", guidance_node)
|
993 |
+
workflow.add_node("brainstorming_node", brainstorming_node)
|
994 |
+
workflow.add_node("prompt_planning_node", prompt_planning_node)
|
995 |
+
workflow.add_node("generate_3d_node", generate_3d_node)
|
996 |
+
|
997 |
+
# workflow.add_node("planning_node", planning_node)
|
998 |
+
|
999 |
+
# Edges
|
1000 |
+
|
1001 |
+
workflow.add_conditional_edges(
|
1002 |
+
"guidance_node",
|
1003 |
+
guidance_routing,
|
1004 |
+
{
|
1005 |
+
"brainstorming_node" : "brainstorming_node",
|
1006 |
+
"prompt_planning_node" : "prompt_planning_node",
|
1007 |
+
"generate_3d_node" : "generate_3d_node"
|
1008 |
+
}
|
1009 |
+
)
|
1010 |
+
|
1011 |
+
workflow.add_conditional_edges(
|
1012 |
+
"brainstorming_node",
|
1013 |
+
tools_condition,
|
1014 |
+
)
|
1015 |
+
|
1016 |
+
workflow.add_conditional_edges(
|
1017 |
+
"prompt_planning_node",
|
1018 |
+
tools_condition,
|
1019 |
+
)
|
1020 |
+
workflow.add_edge("tools", "guidance_node")
|
1021 |
+
workflow.add_edge("brainstorming_node", "guidance_node")
|
1022 |
+
workflow.add_edge("prompt_planning_node", "guidance_node")
|
1023 |
+
workflow.add_edge("generate_3d_node", "guidance_node")
|
1024 |
+
|
1025 |
+
|
1026 |
+
# workflow.add_conditional_edges(
|
1027 |
+
# "guidance_node", # The source node
|
1028 |
+
# custom_route_after_guidance, # Your custom condition function
|
1029 |
+
# {
|
1030 |
+
# # "Path name": "Destination node name"
|
1031 |
+
# "execute_tools": "tools", # If function returns "execute_tools"
|
1032 |
+
# "proceed_to_next_stage": "planning_node" # If function returns "proceed_to_next_stage"
|
1033 |
+
# # Or this could be another router, or END
|
1034 |
+
# }
|
1035 |
+
# )
|
1036 |
+
# workflow.add_conditional_edges("guidance_node", guidance_routing)
|
1037 |
+
# workflow.add_conditional_edges("brainstorming_node", brainstorming_routing)
|
1038 |
+
|
1039 |
+
# # Set end nodes
|
1040 |
+
workflow.set_entry_point("guidance_node")
|
1041 |
+
# workflow.set_finish_point("assistant_node")
|
1042 |
+
compiled_graph = workflow.compile(checkpointer=memory)
|
1043 |
+
try:
|
1044 |
+
img_bytes = compiled_graph.get_graph().draw_mermaid_png()
|
1045 |
+
with open("graph.png", "wb") as f:
|
1046 |
+
f.write(img_bytes)
|
1047 |
+
print("Graph image saved as graph.png")
|
1048 |
+
except Exception as e:
|
1049 |
+
print("Can't print the graph:")
|
1050 |
+
print(e)
|
1051 |
+
|
1052 |
+
|
1053 |
+
return compiled_graph
|
1054 |
+
|
1055 |
+
graph = define_workflow()
|
1056 |
+
|
1057 |
+
|
1058 |
+
|
1059 |
+
|
1060 |
+
|
1061 |
+
|
1062 |
+
|
1063 |
+
|
1064 |
+
|
1065 |
+
|
1066 |
+
|
1067 |
+
|
1068 |
+
|
1069 |
+
|
1070 |
+
|
1071 |
+
|
1072 |
+
# async def assistant_node(state: GraphProcessingState, config=None):
|
1073 |
+
# print("\n--- Assistance Node (Debug via print) ---") # Added a newline for clarity
|
1074 |
+
|
1075 |
+
|
1076 |
+
# print(f"Prompt: {state.prompt}")
|
1077 |
+
|
1078 |
+
# print(f"Tools Enabled: {state.tools_enabled}")
|
1079 |
+
# print(f"Search Enabled: {state.search_enabled}")
|
1080 |
+
# print(f"Next Stage: {state.next_stage}")
|
1081 |
+
|
1082 |
+
|
1083 |
+
# # Log boolean completion flags
|
1084 |
+
# print(f"Idea Complete: {state.idea_complete}")
|
1085 |
+
# print(f"Brainstorming Complete: {state.brainstorming_complete}")
|
1086 |
+
# print(f"Planning Complete: {state.planning_complete}")
|
1087 |
+
# print(f"Drawing Complete: {state.drawing_complete}")
|
1088 |
+
# print(f"Product Searching Complete: {state.product_searching_complete}")
|
1089 |
+
# print(f"Purchasing Complete: {state.purchasing_complete}")
|
1090 |
+
# print("--- End Guidance Node Debug ---") # Added for clarity
|
1091 |
+
# print(f"\nMessage: {state.messages}")
|
1092 |
+
# assistant_tools = []
|
1093 |
+
# if state.tools_enabled.get("download_website_text", True):
|
1094 |
+
# assistant_tools.append(download_website_text)
|
1095 |
+
# if search_enabled and state.tools_enabled.get("tavily_search_results_json", True):
|
1096 |
+
# assistant_tools.append(tavily_search_tool)
|
1097 |
+
# assistant_model = model.bind_tools(assistant_tools)
|
1098 |
+
# if state.prompt:
|
1099 |
+
# final_prompt = "\n".join([state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
|
1100 |
+
# else:
|
1101 |
+
# final_prompt = ASSISTANT_SYSTEM_PROMPT_BASE
|
1102 |
+
|
1103 |
+
# prompt = ChatPromptTemplate.from_messages(
|
1104 |
+
# [
|
1105 |
+
# ("system", final_prompt),
|
1106 |
+
# MessagesPlaceholder(variable_name="messages"),
|
1107 |
+
# ]
|
1108 |
+
# )
|
1109 |
+
# chain = prompt | assistant_model
|
1110 |
+
# response = await chain.ainvoke({"messages": state.messages}, config=config)
|
1111 |
+
|
1112 |
+
# for msg in response:
|
1113 |
+
# if isinstance(msg, HumanMessage):
|
1114 |
+
# print("Human:", msg.content)
|
1115 |
+
# elif isinstance(msg, AIMessage):
|
1116 |
+
# if isinstance(msg.content, list):
|
1117 |
+
# ai_texts = [part.get("text", "") for part in msg.content if isinstance(part, dict)]
|
1118 |
+
# print("AI:", " ".join(ai_texts))
|
1119 |
+
# else:
|
1120 |
+
# print("AI:", msg.content)
|
1121 |
+
|
1122 |
+
# idea_complete = evaluate_idea_completion(response)
|
1123 |
+
|
1124 |
+
# return {
|
1125 |
+
# "messages": response,
|
1126 |
+
# "idea_complete": idea_complete
|
1127 |
+
# }
|
1128 |
+
|
1129 |
+
# # message = llm_with_tools.invoke(state["messages"])
|
1130 |
+
# # Because we will be interrupting during tool execution,
|
1131 |
+
# # we disable parallel tool calling to avoid repeating any
|
1132 |
+
# # tool invocations when we resume.
|
1133 |
+
# assert len(response.tool_calls) <= 1
|
1134 |
+
# idea_complete = evaluate_idea_completion(response)
|
1135 |
+
|
1136 |
+
# return {
|
1137 |
+
# "messages": response,
|
1138 |
+
# "idea_complete": idea_complete
|
1139 |
+
# }
|
1140 |
+
|
1141 |
+
|
1142 |
+
|
1143 |
+
|
1144 |
+
#
|
1145 |
+
|
1146 |
+
|
1147 |
+
# async def planning_node(state: GraphProcessingState, config=None):
|
1148 |
+
# # Define the system prompt for planning
|
1149 |
+
# planning_prompt = "Based on the user's idea, create a detailed step-by-step plan to build the DIY product."
|
1150 |
+
|
1151 |
+
# # Combine the planning prompt with any existing prompts
|
1152 |
+
# if state.prompt:
|
1153 |
+
# final_prompt = "\n".join([planning_prompt, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
|
1154 |
+
# else:
|
1155 |
+
# final_prompt = "\n".join([planning_prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
|
1156 |
+
|
1157 |
+
# # Create the prompt template
|
1158 |
+
# prompt = ChatPromptTemplate.from_messages(
|
1159 |
+
# [
|
1160 |
+
# ("system", final_prompt),
|
1161 |
+
# MessagesPlaceholder(variable_name="messages"),
|
1162 |
+
# ]
|
1163 |
+
# )
|
1164 |
+
|
1165 |
+
# # Bind tools if necessary
|
1166 |
+
# assistant_tools = []
|
1167 |
+
# if state.tools_enabled.get("download_website_text", True):
|
1168 |
+
# assistant_tools.append(download_website_text)
|
1169 |
+
# if search_enabled and state.tools_enabled.get("tavily_search_results_json", True):
|
1170 |
+
# assistant_tools.append(tavily_search_tool)
|
1171 |
+
# assistant_model = model.bind_tools(assistant_tools)
|
1172 |
+
|
1173 |
+
# # Create the chain and invoke it
|
1174 |
+
# chain = prompt | assistant_model
|
1175 |
+
# response = await chain.ainvoke({"messages": state.messages}, config=config)
|
1176 |
+
|
1177 |
+
# return {
|
1178 |
+
# "messages": response
|
1179 |
+
# }
|
1180 |
+
|
1181 |
+
|
1182 |
+
|
1183 |
+
# async def guidance_node(state: GraphProcessingState, config=None):
|
1184 |
+
# print("\n--- Guidance Node (Debug via print) ---")
|
1185 |
+
|
1186 |
+
# print(f"Prompt: {state.prompt}")
|
1187 |
+
# for message in state.messages:
|
1188 |
+
# if isinstance(message, HumanMessage):
|
1189 |
+
# print(f"Human: {message.content}")
|
1190 |
+
# elif isinstance(message, AIMessage):
|
1191 |
+
# if message.content:
|
1192 |
+
# if isinstance(message.content, list):
|
1193 |
+
# texts = [item.get('text', '') for item in message.content if isinstance(item, dict) and 'text' in item]
|
1194 |
+
# if texts:
|
1195 |
+
# print(f"AI: {' '.join(texts)}")
|
1196 |
+
# elif isinstance(message.content, str):
|
1197 |
+
# print(f"AI: {message.content}")
|
1198 |
+
# elif isinstance(message, SystemMessage):
|
1199 |
+
# print(f"System: {message.content}")
|
1200 |
+
# elif isinstance(message, ToolMessage):
|
1201 |
+
# print(f"Tool: {message.content}")
|
1202 |
+
|
1203 |
+
# print(f"Tools Enabled: {state.tools_enabled}")
|
1204 |
+
# print(f"Search Enabled: {state.search_enabled}")
|
1205 |
+
# print(f"Next Stage: {state.next_stage}")
|
1206 |
+
|
1207 |
+
|
1208 |
+
# print(f"Brainstorming Complete: {state.brainstorming_complete}")
|
1209 |
+
|
1210 |
+
|
1211 |
+
# guidance_node.count = getattr(guidance_node, 'count', 0) + 1
|
1212 |
+
# print('\nGuidance Node called count', guidance_node.count)
|
1213 |
+
# print("\n--- End Guidance Node Debug ---")
|
1214 |
+
|
1215 |
+
# stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
|
1216 |
+
# completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
|
1217 |
+
# incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
|
1218 |
+
|
1219 |
+
# if not incomplete:
|
1220 |
+
# print("All stages complete!")
|
1221 |
+
# # Handle case where all stages are complete
|
1222 |
+
# # You might want to return a message and end, or set proposed_next_stage to a special value
|
1223 |
+
# ai_all_complete_msg = AIMessage(content="All DIY project stages are complete!")
|
1224 |
+
# return {
|
1225 |
+
# "messages": current_messages + [ai_all_complete_msg],
|
1226 |
+
# "next_stage": "end_project", # Or None, or a final summary node
|
1227 |
+
# "pending_approval_stage": None,
|
1228 |
+
# }
|
1229 |
+
# else:
|
1230 |
+
# # THIS LINE DEFINES THE VARIABLE
|
1231 |
+
# proposed_next_stage = incomplete[0]
|
1232 |
+
|
1233 |
+
# print(f"Proposed next stage: {proposed_next_stage}")
|
1234 |
+
|
1235 |
+
# status_summary = f"Completed stages: {completed}\nIncomplete stages: {incomplete}"
|
1236 |
+
|
1237 |
+
# guidance_prompt_text = (
|
1238 |
+
# "You are the Guiding Assistant for a DIY project. Your primary responsibility is to determine the next logical step "
|
1239 |
+
# "and then **obtain the user's explicit approval** before proceeding.\n\n"
|
1240 |
+
# f"CURRENT PROJECT STATUS:\n{status_summary}\n\n"
|
1241 |
+
# f"Based on the status, the most logical next stage appears to be: **'{proposed_next_stage}'**.\n\n"
|
1242 |
+
# "YOUR TASK:\n"
|
1243 |
+
# f"1. Formulate a clear and concise question for the user, asking if they agree to proceed to the **'{proposed_next_stage}'** stage. For example: 'It looks like '{proposed_next_stage}' is next. Shall we proceed with that?' or 'Are you ready to move on to {proposed_next_stage}?'\n"
|
1244 |
+
# "2. **You MUST use the 'human_assistance' tool to ask this question.** Do not answer directly. Invoke the tool with your question.\n"
|
1245 |
+
# "Example of tool usage (though you don't write this, you *call* the tool):\n"
|
1246 |
+
# "Tool Call: human_assistance(query='The next stage is planning. Do you want to proceed with planning?')\n\n"
|
1247 |
+
# "Consider the user's most recent message if it provides any preference."
|
1248 |
+
# )
|
1249 |
+
|
1250 |
+
# if state.prompt:
|
1251 |
+
# final_prompt = "\n".join([guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
|
1252 |
+
# else:
|
1253 |
+
# final_prompt = "\n".join([guidance_prompt_text, ASSISTANT_SYSTEM_PROMPT_BASE])
|
1254 |
+
|
1255 |
+
# prompt = ChatPromptTemplate.from_messages(
|
1256 |
+
# [
|
1257 |
+
# ("system", final_prompt),
|
1258 |
+
# MessagesPlaceholder(variable_name="messages"),
|
1259 |
+
# ]
|
1260 |
+
# )
|
1261 |
+
|
1262 |
+
# assistant_model = model.bind_tools([human_assistance])
|
1263 |
+
|
1264 |
+
# chain = prompt | assistant_model
|
1265 |
+
|
1266 |
+
# try:
|
1267 |
+
# response = await chain.ainvoke({"messages": state.messages}, config=config)
|
1268 |
+
|
1269 |
+
# for msg in response:
|
1270 |
+
# if isinstance(msg, HumanMessage):
|
1271 |
+
# print("Human:", msg.content)
|
1272 |
+
# elif isinstance(msg, AIMessage):
|
1273 |
+
# if isinstance(msg.content, list):
|
1274 |
+
# ai_texts = [part.get("text", "") for part in msg.content if isinstance(part, dict)]
|
1275 |
+
# print("AI:", " ".join(ai_texts))
|
1276 |
+
# else:
|
1277 |
+
# print("AI:", msg.content)
|
1278 |
+
|
1279 |
+
# # Check for tool calls in the response
|
1280 |
+
# if hasattr(response, "tool_calls"):
|
1281 |
+
# for tool_call in response.tool_calls:
|
1282 |
+
# tool_name = tool_call['name']
|
1283 |
+
# if tool_name == "human_assistance":
|
1284 |
+
# query = tool_call['args']['query']
|
1285 |
+
# print(f"Human input needed: {query}")
|
1286 |
+
# # Handle human assistance tool call
|
1287 |
+
# # You can pause execution and wait for user input here
|
1288 |
+
|
1289 |
+
# return {
|
1290 |
+
# "messages": [response],
|
1291 |
+
# "next_stage": incomplete[0] if incomplete else "brainstorming"
|
1292 |
+
# }
|
1293 |
+
# except Exception as e:
|
1294 |
+
# print(f"Error in guidance node: {e}")
|
1295 |
+
# return {
|
1296 |
+
# "messages": [AIMessage(content="Error in guidance node.")],
|
1297 |
+
# "next_stage": "brainstorming"
|
1298 |
+
# }
|
greeting_prompt.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Hi! I am හොදබාස්, Hodabas your friendly DIY helper.
|
2 |
+
Looking to build something awesome today? Let's find the right tools and materials together!
|
logging-config.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"version": 1,
|
3 |
+
"disable_existing_loggers": false,
|
4 |
+
"formatters": {
|
5 |
+
"verbose": {
|
6 |
+
"format": "%(asctime)s:%(name)s:%(levelname)s: %(message)s",
|
7 |
+
"datefmt": "%Y-%m-%d %H:%M:%S"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"handlers": {
|
11 |
+
"console": {
|
12 |
+
"class": "logging.StreamHandler",
|
13 |
+
"level": "INFO",
|
14 |
+
"formatter": "verbose"
|
15 |
+
},
|
16 |
+
"file": {
|
17 |
+
"class": "logging.FileHandler",
|
18 |
+
"filename": "app.log",
|
19 |
+
"level": "INFO",
|
20 |
+
"formatter": "verbose",
|
21 |
+
"encoding": "utf-8"
|
22 |
+
}
|
23 |
+
},
|
24 |
+
"loggers": {
|
25 |
+
"httpx": {
|
26 |
+
"level": "WARNING",
|
27 |
+
"handlers": ["console"],
|
28 |
+
"propagate": false
|
29 |
+
}
|
30 |
+
},
|
31 |
+
"root": {
|
32 |
+
"level": "INFO",
|
33 |
+
"handlers": ["console", "file"]
|
34 |
+
}
|
35 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Pillow
|
2 |
+
gradio==5.33.0
|
3 |
+
langchain
|
4 |
+
langchain-community
|
5 |
+
langchain-core
|
6 |
+
langchain-anthropic
|
7 |
+
langchain-google-genai
|
8 |
+
langchain-huggingface
|
9 |
+
langchain-groq
|
10 |
+
langchain-tavily
|
11 |
+
langchain-chroma
|
12 |
+
langgraph
|
13 |
+
huggingface_hub
|
14 |
+
python-dotenv
|
15 |
+
graphviz
|
16 |
+
trafilatura==2.0.0
|
17 |
+
pydantic==2.10.6
|
18 |
+
langchain-community
|
19 |
+
langchain_openai
|
20 |
+
mistralai
|
21 |
+
langchain-mistralai
|