spandana30 commited on
Commit
e004259
Β·
verified Β·
1 Parent(s): 2834a6c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +175 -137
app.py CHANGED
@@ -1,17 +1,60 @@
1
  import streamlit as st
2
  import os
3
  import time
4
- import base64
 
 
 
5
  from typing import Dict, List, TypedDict
6
  from langgraph.graph import StateGraph, END
7
- from huggingface_hub import InferenceClient
8
 
9
- # Individual clients per role
10
- product_manager_client = InferenceClient("unsloth/mistral-7b-bnb-4bit", token=st.secrets["HF_TOKEN"])
11
- project_manager_client = InferenceClient("unsloth/gemma-3-1b-it", token=st.secrets["HF_TOKEN"])
12
- software_architect_client = InferenceClient("unsloth/c4ai-command-r-08-2024-bnb-4bit", token=st.secrets["HF_TOKEN"])
13
- software_engineer_client = InferenceClient("codellama/CodeLlama-7b-hf", token=st.secrets["HF_TOKEN"])
14
- qa_client = InferenceClient("codellama/CodeLlama-7b-hf", token=st.secrets["HF_TOKEN"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  class AgentState(TypedDict):
17
  messages: List[Dict[str, str]]
@@ -19,156 +62,151 @@ class AgentState(TypedDict):
19
  project_plan: str
20
  design_specs: str
21
  html: str
22
- css: str
23
  feedback: str
24
  iteration: int
25
  done: bool
26
  timings: Dict[str, float]
27
 
28
- PRODUCT_MANAGER_PROMPT = """You're a Product Manager. Interpret this user request:
29
- {user_request}
30
- Define the high-level product goals, features, and user stories."""
31
-
32
- PROJECT_MANAGER_PROMPT = """You're a Project Manager. Based on this feature list:
33
- {product_vision}
34
- Create a project plan with key milestones and task assignments."""
35
-
36
- DESIGNER_PROMPT = """You're a UI designer. Create design specs for:
37
- {project_plan}
38
- Include:
39
- 1. Color palette (primary, secondary, accent)
40
- 2. Font choices
41
- 3. Layout structure
42
- 4. Component styles
43
- Don't write code - just design guidance."""
44
-
45
- ENGINEER_PROMPT = """Create a complete HTML page with embedded CSS for:
46
- {design_specs}
47
- Requirements:
48
- 1. Full HTML document with <!DOCTYPE>
49
- 2. CSS inside <style> tags in head
50
- 3. Mobile-responsive
51
- 4. Semantic HTML
52
- 5. Ready-to-use (will work when saved as .html)
53
- Output JUST the complete HTML file content:"""
54
-
55
- QA_PROMPT = """Review this website:
56
- {html}
57
- Check for:
58
- 1. Visual quality
59
- 2. Responsiveness
60
- 3. Functionality
61
- Reply \"APPROVED\" if perfect, or suggest improvements."""
62
-
63
- def time_agent(agent_func, state: AgentState, label: str):
64
  start = time.time()
65
- result = agent_func(state)
66
- duration = time.time() - start
67
- result["timings"] = state["timings"]
68
- result["timings"][label] = duration
69
- return result
70
-
71
- def product_manager_agent(state: AgentState):
72
- vision = product_manager_client.text_generation(
73
- PRODUCT_MANAGER_PROMPT.format(user_request=state["messages"][-1]["content"]),
74
- max_new_tokens=1000, temperature=0.3, return_full_text=False
75
- )
76
- return {"product_vision": vision, "messages": state["messages"] + [{"role": "product_manager", "content": vision}]}
77
-
78
- def project_manager_agent(state: AgentState):
79
- plan = project_manager_client.text_generation(
80
- PROJECT_MANAGER_PROMPT.format(product_vision=state["product_vision"]),
81
- max_new_tokens=1000, temperature=0.3, return_full_text=False
82
  )
83
- return {"project_plan": plan, "messages": state["messages"] + [{"role": "project_manager", "content": plan}]}
84
-
85
- def designer_agent(state: AgentState):
86
- specs = software_architect_client.text_generation(
87
- DESIGNER_PROMPT.format(project_plan=state["project_plan"]),
88
- max_new_tokens=1000, temperature=0.3, return_full_text=False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  )
90
- return {"design_specs": specs, "messages": state["messages"] + [{"role": "designer", "content": specs}]}
91
-
92
- def engineer_agent(state: AgentState):
93
- html = software_engineer_client.text_generation(
94
- ENGINEER_PROMPT.format(design_specs=state["design_specs"]),
95
- max_new_tokens=3000, temperature=0.3, return_full_text=False
96
- )
97
- if not html.strip().startswith("<!DOCTYPE"):
98
- html = f"""<!DOCTYPE html><html><head><meta charset='UTF-8'><meta name='viewport' content='width=device-width, initial-scale=1.0'><title>Generated UI</title></head><body>{html}</body></html>"""
99
- return {"html": html, "messages": state["messages"] + [{"role": "software_engineer", "content": html}]}
100
-
101
- def qa_agent(state: AgentState, max_iter: int):
102
- feedback = qa_client.text_generation(
103
- QA_PROMPT.format(html=state["html"]),
104
- max_new_tokens=1000, temperature=0.3, return_full_text=False
105
- )
106
- done = "APPROVED" in feedback or state["iteration"] >= max_iter
107
- return {"feedback": feedback, "done": done, "iteration": state["iteration"] + 1,
108
- "messages": state["messages"] + [{"role": "qa", "content": feedback}]}
109
-
110
- def generate_ui(user_request: str, max_iter: int):
111
- state = {"messages": [{"role": "user", "content": user_request}], "product_vision": "", "project_plan": "",
112
- "design_specs": "", "html": "", "css": "", "feedback": "", "iteration": 0, "done": False, "timings": {}}
113
 
114
  workflow = StateGraph(AgentState)
115
- workflow.add_node("product_manager", lambda s: time_agent(product_manager_agent, s, "product_manager"))
116
- workflow.add_node("project_manager", lambda s: time_agent(project_manager_agent, s, "project_manager"))
117
- workflow.add_node("designer", lambda s: time_agent(designer_agent, s, "designer"))
118
- workflow.add_node("software_engineer", lambda s: time_agent(engineer_agent, s, "software_engineer"))
119
- workflow.add_node("qa", lambda s: time_agent(lambda x: qa_agent(x, max_iter), s, "qa"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
  workflow.add_edge("product_manager", "project_manager")
122
  workflow.add_edge("project_manager", "designer")
123
  workflow.add_edge("designer", "software_engineer")
124
- workflow.add_edge("software_engineer", "qa")
125
- workflow.add_conditional_edges("qa", lambda s: END if s["done"] else "software_engineer")
126
  workflow.set_entry_point("product_manager")
127
 
128
  app = workflow.compile()
129
- total_start = time.time()
130
  final_state = app.invoke(state)
131
- return final_state["html"], final_state, time.time() - total_start
132
 
133
  def main():
134
- st.set_page_config(page_title="Multi-Agent Collaboration", layout="wide")
135
- st.title("🀝 Multi-Agent Collaboration")
136
- with st.sidebar:
137
- max_iter = st.slider("Max QA Iterations", 1, 5, 2)
138
-
139
- prompt = st.text_area("πŸ“ Describe the UI you want:", "A coffee shop landing page with hero, menu, and contact form.", height=150)
140
-
141
- if st.button("πŸš€ Generate UI"):
142
  with st.spinner("Agents working..."):
143
- html, final_state, total_time = generate_ui(prompt, max_iter)
144
- st.success("βœ… UI Generated Successfully!")
145
- st.components.v1.html(html, height=600, scrolling=True)
146
-
147
- st.subheader("πŸ“₯ Download HTML")
148
- b64 = base64.b64encode(html.encode()).decode()
149
- st.markdown(f'<a href="data:file/html;base64,{b64}" download="ui.html">Download HTML</a>', unsafe_allow_html=True)
150
-
151
- st.subheader("🧠 Agent Communication Log")
152
- history_text = ""
153
- for msg in final_state["messages"]:
154
- role = msg["role"].replace("_", " ").title()
155
- content = msg["content"]
156
- history_text += f"---\n{role}:\n{content}\n\n"
157
- st.text_area("Agent Dialogue", value=history_text, height=300)
158
-
159
- b64_hist = base64.b64encode(history_text.encode()).decode()
160
- st.markdown(
161
- f'<a href="data:file/txt;base64,{b64_hist}" download="agent_communication.txt" '
162
- 'style="padding: 0.4em 1em; background: #4CAF50; color: white; border-radius: 0.3em; text-decoration: none;">'
163
- 'πŸ“₯ Download Communication Log</a>',
164
- unsafe_allow_html=True
165
- )
166
-
167
- st.subheader("πŸ“Š Performance")
168
- st.write(f"⏱️ Total Time: {total_time:.2f} seconds")
169
- st.write(f"πŸ” Iterations: {final_state['iteration']}")
170
- for stage in ["product_manager", "project_manager", "designer", "software_engineer", "qa"]:
171
- st.write(f"🧩 {stage.title().replace('_', ' ')} Time: {final_state['timings'].get(stage, 0):.2f}s")
172
 
173
  if __name__ == "__main__":
174
  main()
 
1
  import streamlit as st
2
  import os
3
  import time
4
+ import gc
5
+ import torch
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM
7
+ from peft import PeftModel
8
  from typing import Dict, List, TypedDict
9
  from langgraph.graph import StateGraph, END
 
10
 
11
+ HF_TOKEN = os.getenv("HF_TOKEN")
12
+
13
+ AGENT_MODEL_CONFIG = {
14
+ "product_manager": {
15
+ "base_id": "unsloth/mistral-7b-bnb-4bit",
16
+ "adapter_id": "spandana30/product-manager-mistral"
17
+ },
18
+ "project_manager": {
19
+ "base_id": "unsloth/gemma-3-1b-it",
20
+ "adapter_id": "spandana30/project-manager-gemma"
21
+ },
22
+ "designer": {
23
+ "base_id": "unsloth/gemma-3-1b-it",
24
+ "adapter_id": "spandana30/project-manager-gemma"
25
+ },
26
+ "software_engineer": {
27
+ "base_id": "codellama/CodeLLaMA-7b-hf",
28
+ "adapter_id": "spandana30/software-engineer-codellama"
29
+ },
30
+ "qa_engineer": {
31
+ "base_id": "codellama/CodeLLaMA-7b-hf",
32
+ "adapter_id": "spandana30/software-engineer-codellama"
33
+ }
34
+ }
35
+
36
+ @st.cache_resource
37
+ def load_agent_model(base_id, adapter_id):
38
+ base_model = AutoModelForCausalLM.from_pretrained(
39
+ base_id,
40
+ torch_dtype=torch.float16,
41
+ device_map="auto",
42
+ load_in_4bit=True,
43
+ token=HF_TOKEN
44
+ )
45
+ model = PeftModel.from_pretrained(base_model, adapter_id, token=HF_TOKEN)
46
+ tokenizer = AutoTokenizer.from_pretrained(adapter_id, token=HF_TOKEN)
47
+ return model.eval(), tokenizer
48
+
49
+ def call_model(prompt: str, model, tokenizer) -> str:
50
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True).to(model.device)
51
+ outputs = model.generate(
52
+ **inputs,
53
+ max_new_tokens=1024,
54
+ do_sample=False,
55
+ temperature=0.3
56
+ )
57
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
58
 
59
  class AgentState(TypedDict):
60
  messages: List[Dict[str, str]]
 
62
  project_plan: str
63
  design_specs: str
64
  html: str
 
65
  feedback: str
66
  iteration: int
67
  done: bool
68
  timings: Dict[str, float]
69
 
70
+ def agent(template: str, state: AgentState, agent_key: str, timing_label: str):
71
+ st.write(f'πŸ›  Running agent: {agent_key}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  start = time.time()
73
+ model, tokenizer = load_agent_model(**AGENT_MODEL_CONFIG[agent_key])
74
+
75
+ prompt = template.format(
76
+ user_request=state["messages"][0]["content"],
77
+ product_vision=state.get("product_vision", ""),
78
+ project_plan=state.get("project_plan", ""),
79
+ design_specs=state.get("design_specs", ""),
80
+ html=state.get("html", "")
 
 
 
 
 
 
 
 
 
81
  )
82
+ st.write(f'πŸ“€ Prompt for {agent_key}:', prompt)
83
+
84
+ response = call_model(prompt, model, tokenizer)
85
+ st.write(f'πŸ“₯ Response from {agent_key}:', response[:500])
86
+
87
+ state["messages"].append({"role": agent_key, "content": response})
88
+ state["timings"][timing_label] = time.time() - start
89
+ gc.collect()
90
+ return response
91
+
92
+ PROMPTS = {
93
+ "product_manager": (
94
+ "You're a Product Manager. Interpret this user request:\\n"
95
+ "{user_request}\\n"
96
+ "Define the high-level product goals, features, and user stories."
97
+ ),
98
+ "project_manager": (
99
+ "You're a Project Manager. Based on this feature list:\\n"
100
+ "{product_vision}\\n"
101
+ "Create a project plan with key milestones and task assignments."
102
+ ),
103
+ "designer": (
104
+ "You're a UI designer. Create design specs for:\\n"
105
+ "{project_plan}\\n"
106
+ "Include:\\n"
107
+ "1. Color palette (primary, secondary, accent)\\n"
108
+ "2. Font choices\\n"
109
+ "3. Layout structure\\n"
110
+ "4. Component styles\\n"
111
+ "Don't write code - just design guidance."
112
+ ),
113
+ "software_engineer": (
114
+ "Create a complete HTML page with embedded CSS for:\\n"
115
+ "{design_specs}\\n"
116
+ "Requirements:\\n"
117
+ "1. Full HTML document with <!DOCTYPE>\\n"
118
+ "2. CSS inside <style> tags in head\\n"
119
+ "3. Mobile-responsive\\n"
120
+ "4. Semantic HTML\\n"
121
+ "5. Ready-to-use (will work when saved as .html)\\n"
122
+ "Output JUST the complete HTML file content:"
123
+ ),
124
+ "qa_engineer": (
125
+ "Review this website:\\n"
126
+ "{html}\\n"
127
+ "Check for:\\n"
128
+ "1. Visual quality\\n"
129
+ "2. Responsiveness\\n"
130
+ "3. Functionality\\n"
131
+ "Reply \\"APPROVED\\" if perfect, or suggest improvements."
132
  )
133
+ }
134
+
135
+ def generate_ui(user_prompt: str, max_iter: int):
136
+ state: AgentState = {
137
+ "messages": [{"role": "user", "content": user_prompt}],
138
+ "product_vision": "",
139
+ "project_plan": "",
140
+ "design_specs": "",
141
+ "html": "",
142
+ "feedback": "",
143
+ "iteration": 0,
144
+ "done": False,
145
+ "timings": {}
146
+ }
 
 
 
 
 
 
 
 
 
147
 
148
  workflow = StateGraph(AgentState)
149
+ workflow.add_node("product_manager", lambda s: {
150
+ "messages": s["messages"] + [{
151
+ "role": "product_manager",
152
+ "content": (pv := agent(PROMPTS["product_manager"], s, "product_manager", "product_manager"))
153
+ }],
154
+ "product_vision": pv
155
+ })
156
+ workflow.add_node("project_manager", lambda s: {
157
+ "messages": s["messages"] + [{
158
+ "role": "project_manager",
159
+ "content": (pp := agent(PROMPTS["project_manager"], s, "project_manager", "project_manager"))
160
+ }],
161
+ "project_plan": pp
162
+ })
163
+ workflow.add_node("designer", lambda s: {
164
+ "messages": s["messages"] + [{
165
+ "role": "designer",
166
+ "content": (ds := agent(PROMPTS["designer"], s, "designer", "designer"))
167
+ }],
168
+ "design_specs": ds
169
+ })
170
+ workflow.add_node("software_engineer", lambda s: {
171
+ "html": (html := agent(PROMPTS["software_engineer"], s, "software_engineer", "software_engineer")),
172
+ "messages": s["messages"] + [{"role": "software_engineer", "content": html}]
173
+ })
174
+ def qa_fn(s):
175
+ feedback = agent(PROMPTS["qa_engineer"], s, "qa_engineer", "qa_engineer")
176
+ done = "APPROVED" in feedback or s["iteration"] >= max_iter
177
+ return {
178
+ "feedback": feedback,
179
+ "done": done,
180
+ "iteration": s["iteration"] + 1,
181
+ "messages": s["messages"] + [{"role": "qa_engineer", "content": feedback}]
182
+ }
183
+ workflow.add_node("qa_engineer", qa_fn)
184
 
185
  workflow.add_edge("product_manager", "project_manager")
186
  workflow.add_edge("project_manager", "designer")
187
  workflow.add_edge("designer", "software_engineer")
188
+ workflow.add_edge("software_engineer", "qa_engineer")
189
+ workflow.add_conditional_edges("qa_engineer", lambda s: END if s["done"] else "software_engineer")
190
  workflow.set_entry_point("product_manager")
191
 
192
  app = workflow.compile()
 
193
  final_state = app.invoke(state)
194
+ return final_state
195
 
196
  def main():
197
+ st.set_page_config(page_title="Multi-Agent UI Generator", layout="wide")
198
+ st.title("🧠 Multi-Agent UI Generation System")
199
+ max_iter = st.sidebar.slider("Max QA Iterations", 1, 5, 2)
200
+ prompt = st.text_area("What UI do you want to build?", "A coffee shop landing page with a hero image, menu, and contact form.", height=150)
201
+ if st.button("πŸš€ Generate"):
 
 
 
202
  with st.spinner("Agents working..."):
203
+ final = generate_ui(prompt, max_iter)
204
+ st.success("βœ… UI Generated")
205
+ st.subheader("πŸ” Final Output")
206
+ st.components.v1.html(final["html"], height=600, scrolling=True)
207
+ st.subheader("🧠 Agent Messages")
208
+ for msg in final["messages"]:
209
+ st.markdown(f"**{msg['role'].title()}**:\n```\n{msg['content']}\n```")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
  if __name__ == "__main__":
212
  main()