Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -54,19 +54,23 @@ def call_model(prompt: str, model, tokenizer) -> str:
|
|
54 |
class AgentState(TypedDict):
|
55 |
messages: List[Dict[str, str]]
|
56 |
html: str
|
|
|
57 |
final_prompt: str
|
58 |
feedback: str
|
59 |
iteration: int
|
60 |
done: bool
|
61 |
timings: Dict[str, float]
|
62 |
|
63 |
-
def agent(
|
64 |
start = time.time()
|
65 |
model, tokenizer = load_agent_model(**AGENT_MODEL_CONFIG[agent_key])
|
66 |
|
67 |
-
latest_input =
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
70 |
response = call_model(prompt, model, tokenizer)
|
71 |
state["messages"].append({"role": agent_key, "content": response})
|
72 |
state["timings"][timing_label] = time.time() - start
|
@@ -74,16 +78,17 @@ def agent(prompt_template, state: AgentState, agent_key: str, timing_label: str)
|
|
74 |
return response
|
75 |
|
76 |
PROMPTS = {
|
77 |
-
"product_manager": "
|
78 |
-
"project_manager": "
|
79 |
-
"software_engineer": "
|
80 |
-
"qa_engineer": "
|
81 |
}
|
82 |
|
83 |
def generate_ui(user_prompt: str, max_iter: int):
|
84 |
state: AgentState = {
|
85 |
"messages": [{"role": "user", "content": user_prompt}],
|
86 |
"html": "",
|
|
|
87 |
"final_prompt": "",
|
88 |
"feedback": "",
|
89 |
"iteration": 0,
|
@@ -92,23 +97,28 @@ def generate_ui(user_prompt: str, max_iter: int):
|
|
92 |
}
|
93 |
|
94 |
workflow = StateGraph(AgentState)
|
|
|
95 |
workflow.add_node("product_manager", lambda s: {
|
96 |
"messages": s["messages"] + [{
|
97 |
"role": "product_manager",
|
98 |
-
"content": agent(PROMPTS["product_manager"], s, "product_manager", "product_manager")
|
99 |
-
}]
|
|
|
100 |
})
|
|
|
101 |
workflow.add_node("project_manager", lambda s: {
|
102 |
"messages": s["messages"] + [{
|
103 |
"role": "project_manager",
|
104 |
-
"content": (
|
105 |
}],
|
106 |
-
"final_prompt":
|
107 |
})
|
|
|
108 |
workflow.add_node("software_engineer", lambda s: {
|
109 |
-
"html": (
|
110 |
-
"messages": s["messages"] + [{"role": "software_engineer", "content":
|
111 |
})
|
|
|
112 |
def qa_fn(s):
|
113 |
feedback = agent(PROMPTS["qa_engineer"], s, "qa_engineer", "qa_engineer")
|
114 |
done = "APPROVED" in feedback or s["iteration"] >= max_iter
|
@@ -118,6 +128,7 @@ def generate_ui(user_prompt: str, max_iter: int):
|
|
118 |
"iteration": s["iteration"] + 1,
|
119 |
"messages": s["messages"] + [{"role": "qa_engineer", "content": feedback}]
|
120 |
}
|
|
|
121 |
workflow.add_node("qa_engineer", qa_fn)
|
122 |
|
123 |
workflow.add_edge("product_manager", "project_manager")
|
|
|
54 |
class AgentState(TypedDict):
|
55 |
messages: List[Dict[str, str]]
|
56 |
html: str
|
57 |
+
refined_request: str
|
58 |
final_prompt: str
|
59 |
feedback: str
|
60 |
iteration: int
|
61 |
done: bool
|
62 |
timings: Dict[str, float]
|
63 |
|
64 |
+
def agent(template: str, state: AgentState, agent_key: str, timing_label: str):
|
65 |
start = time.time()
|
66 |
model, tokenizer = load_agent_model(**AGENT_MODEL_CONFIG[agent_key])
|
67 |
|
68 |
+
latest_input = (
|
69 |
+
state.get("final_prompt")
|
70 |
+
or state.get("refined_request")
|
71 |
+
or state["messages"][-1]["content"]
|
72 |
+
)
|
73 |
+
prompt = template.format(user_input=latest_input, html=state.get("html", ""), final_prompt=state.get("final_prompt", ""))
|
74 |
response = call_model(prompt, model, tokenizer)
|
75 |
state["messages"].append({"role": agent_key, "content": response})
|
76 |
state["timings"][timing_label] = time.time() - start
|
|
|
78 |
return response
|
79 |
|
80 |
PROMPTS = {
|
81 |
+
"product_manager": "{user_input}",
|
82 |
+
"project_manager": "{user_input}",
|
83 |
+
"software_engineer": "{final_prompt}",
|
84 |
+
"qa_engineer": "{html}"
|
85 |
}
|
86 |
|
87 |
def generate_ui(user_prompt: str, max_iter: int):
|
88 |
state: AgentState = {
|
89 |
"messages": [{"role": "user", "content": user_prompt}],
|
90 |
"html": "",
|
91 |
+
"refined_request": "",
|
92 |
"final_prompt": "",
|
93 |
"feedback": "",
|
94 |
"iteration": 0,
|
|
|
97 |
}
|
98 |
|
99 |
workflow = StateGraph(AgentState)
|
100 |
+
|
101 |
workflow.add_node("product_manager", lambda s: {
|
102 |
"messages": s["messages"] + [{
|
103 |
"role": "product_manager",
|
104 |
+
"content": (pm := agent(PROMPTS["product_manager"], s, "product_manager", "product_manager"))
|
105 |
+
}],
|
106 |
+
"refined_request": pm
|
107 |
})
|
108 |
+
|
109 |
workflow.add_node("project_manager", lambda s: {
|
110 |
"messages": s["messages"] + [{
|
111 |
"role": "project_manager",
|
112 |
+
"content": (pr := agent(PROMPTS["project_manager"], s, "project_manager", "project_manager"))
|
113 |
}],
|
114 |
+
"final_prompt": pr
|
115 |
})
|
116 |
+
|
117 |
workflow.add_node("software_engineer", lambda s: {
|
118 |
+
"html": (html := agent(PROMPTS["software_engineer"], s, "software_engineer", "software_engineer")),
|
119 |
+
"messages": s["messages"] + [{"role": "software_engineer", "content": html}]
|
120 |
})
|
121 |
+
|
122 |
def qa_fn(s):
|
123 |
feedback = agent(PROMPTS["qa_engineer"], s, "qa_engineer", "qa_engineer")
|
124 |
done = "APPROVED" in feedback or s["iteration"] >= max_iter
|
|
|
128 |
"iteration": s["iteration"] + 1,
|
129 |
"messages": s["messages"] + [{"role": "qa_engineer", "content": feedback}]
|
130 |
}
|
131 |
+
|
132 |
workflow.add_node("qa_engineer", qa_fn)
|
133 |
|
134 |
workflow.add_edge("product_manager", "project_manager")
|