spandana30 commited on
Commit
facf100
Β·
verified Β·
1 Parent(s): 52591b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -141
app.py CHANGED
@@ -1,19 +1,16 @@
1
- # Multi-agent UI generator using direct model.generate() instead of pipeline()
2
-
3
  import streamlit as st
 
4
  import time
5
- import base64
 
 
 
6
  from typing import Dict, List, TypedDict
7
  from langgraph.graph import StateGraph, END
8
- from transformers import AutoModelForCausalLM, AutoTokenizer
9
- from peft import PeftModel
10
- import torch
11
- import os
12
-
13
- st.set_page_config(page_title="Multi-Agent Collaboration", layout="wide")
14
 
15
  HF_TOKEN = os.getenv("HF_TOKEN")
16
 
 
17
  AGENT_MODEL_CONFIG = {
18
  "product_manager": {
19
  "base": "unsloth/gemma-3-1b-it",
@@ -23,168 +20,115 @@ AGENT_MODEL_CONFIG = {
23
  "base": "unsloth/gemma-3-1b-it",
24
  "adapter": "spandana30/project-manager-gemma"
25
  },
26
- "software_architect": {
27
- "base": "unsloth/c4ai-command-r-08-2024-bnb-4bit",
28
- "adapter": "spandana30/software-architect-cohere"
29
- },
30
  "software_engineer": {
31
- "base": "codellama/CodeLLaMA-7b-hf",
32
- "adapter": "spandana30/software-engineer-codellama"
33
- },
34
- "qa": {
35
- "base": "codellama/CodeLLaMA-7b-hf",
36
- "adapter": "spandana30/software-engineer-codellama"
37
  },
 
 
 
 
38
  }
39
 
40
  @st.cache_resource
41
-
42
- def get_model_and_tokenizer(role: str):
43
- cfg = AGENT_MODEL_CONFIG[role]
44
- try:
45
- st.write(f"πŸ” Loading model for {role}: {cfg['base']} + {cfg['adapter']}")
46
- base_model = AutoModelForCausalLM.from_pretrained(
47
- cfg["base"], torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, device_map="auto", token=HF_TOKEN
48
- )
49
- model = PeftModel.from_pretrained(base_model, cfg["adapter"], token=HF_TOKEN)
50
- tokenizer = AutoTokenizer.from_pretrained(cfg["adapter"], token=HF_TOKEN)
51
- return model, tokenizer
52
- except Exception as e:
53
- st.error(f"❌ Failed to load model for {role}\nError: {e}")
54
- raise
 
 
 
 
 
 
55
 
56
  class AgentState(TypedDict):
57
  messages: List[Dict[str, str]]
58
- user_request: str
59
- refined_request: str
60
- scoped_request: str
61
- design_specs: str
62
  html: str
63
  feedback: str
64
  iteration: int
65
  done: bool
66
  timings: Dict[str, float]
67
 
68
- def run_pipeline(role: str, prompt: str):
69
- model, tokenizer = get_model_and_tokenizer(role)
70
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True).to(model.device)
71
- with torch.no_grad():
72
- output = model.generate(
73
- **inputs,
74
- max_new_tokens=1024,
75
- do_sample=False
76
- )
77
- return tokenizer.decode(output[0], skip_special_tokens=True).strip()
78
 
79
  PROMPTS = {
80
- "product_manager": """You're a Product Manager. Refine and clarify this request:
81
- {user_request}
82
- Ensure it's clear, feasible, and user-focused. Output the revised request only.""",
83
- "project_manager": """You're a Project Manager. Given this refined request:
84
- {refined_request}
85
- Break it down into scope and constraints. Output the scoped request only.""",
86
- "designer": """You're a UI designer. Create design specs for:
87
- {scoped_request}
88
- Include color palette, font, layout, and component styles. No code.""",
89
- "software_engineer": """Create a full HTML page with embedded CSS for:
90
- {design_specs}
91
- Requirements:
92
- - Semantic, responsive HTML
93
- - Embedded CSS in <style> tag
94
- - Output complete HTML only.""",
95
- "qa": """Review this webpage:
96
- {html}
97
- Is it visually appealing, responsive, and functional? Reply "APPROVED" or suggest improvements."""
98
  }
99
 
100
- def time_agent(agent_func, state: AgentState, label: str):
101
- start = time.time()
102
- result = agent_func(state)
103
- result["timings"] = state["timings"]
104
- result["timings"][label] = time.time() - start
105
- return result
106
-
107
- def product_manager_agent(state: AgentState):
108
- revised = run_pipeline("product_manager", PROMPTS["product_manager"].format(user_request=state["user_request"]))
109
- return {"refined_request": revised, "messages": state["messages"] + [{"role": "product_manager", "content": revised}]}
110
-
111
- def project_manager_agent(state: AgentState):
112
- scoped = run_pipeline("project_manager", PROMPTS["project_manager"].format(refined_request=state["refined_request"]))
113
- return {"scoped_request": scoped, "messages": state["messages"] + [{"role": "project_manager", "content": scoped}]}
114
-
115
- def designer_agent(state: AgentState):
116
- specs = run_pipeline("product_manager", PROMPTS["designer"].format(scoped_request=state["scoped_request"]))
117
- return {"design_specs": specs, "messages": state["messages"] + [{"role": "designer", "content": specs}]}
118
-
119
- def engineer_agent(state: AgentState):
120
- html = run_pipeline("software_engineer", PROMPTS["software_engineer"].format(design_specs=state["design_specs"]))
121
- return {"html": html, "messages": state["messages"] + [{"role": "software_engineer", "content": html}]}
122
-
123
- def qa_agent(state: AgentState, max_iter: int):
124
- feedback = run_pipeline("qa", PROMPTS["qa"].format(html=state["html"]))
125
- done = "APPROVED" in feedback or state["iteration"] >= max_iter
126
- return {"feedback": feedback, "done": done, "iteration": state["iteration"] + 1,
127
- "messages": state["messages"] + [{"role": "qa", "content": feedback}]}
128
-
129
- def generate_ui(user_request: str, max_iter: int):
130
- state = {"messages": [{"role": "user", "content": user_request}],
131
- "user_request": user_request,
132
- "refined_request": "", "scoped_request": "", "design_specs": "",
133
- "html": "", "feedback": "", "iteration": 0, "done": False, "timings": {}}
134
 
135
  workflow = StateGraph(AgentState)
136
- workflow.add_node("product_manager", lambda s: time_agent(product_manager_agent, s, "product_manager"))
137
- workflow.add_node("project_manager", lambda s: time_agent(project_manager_agent, s, "project_manager"))
138
- workflow.add_node("designer", lambda s: time_agent(designer_agent, s, "designer"))
139
- workflow.add_node("software_engineer", lambda s: time_agent(engineer_agent, s, "software_engineer"))
140
- workflow.add_node("qa", lambda s: time_agent(lambda x: qa_agent(x, max_iter), s, "qa"))
 
 
 
 
 
 
 
 
 
 
 
141
 
142
  workflow.add_edge("product_manager", "project_manager")
143
- workflow.add_edge("project_manager", "designer")
144
- workflow.add_edge("designer", "software_engineer")
145
- workflow.add_edge("software_engineer", "qa")
146
- workflow.add_conditional_edges("qa", lambda s: END if s["done"] else "software_engineer")
147
  workflow.set_entry_point("product_manager")
148
 
149
  app = workflow.compile()
150
- total_start = time.time()
151
  final_state = app.invoke(state)
152
- return final_state["html"], final_state, time.time() - total_start
153
 
154
  def main():
155
- st.title("πŸ€– Multi-Agent UI Generator")
156
- with st.sidebar:
157
- max_iter = st.slider("Max QA Iterations", 1, 5, 2)
158
-
159
- prompt = st.text_area("πŸ“ Describe the UI you want:", "A coffee shop landing page with hero, menu, and contact form.", height=150)
160
-
161
  if st.button("πŸš€ Generate UI"):
162
  with st.spinner("Agents working..."):
163
- html, final_state, total_time = generate_ui(prompt, max_iter)
164
- st.success("βœ… UI Generated Successfully!")
165
- st.components.v1.html(html, height=600, scrolling=True)
166
-
167
- b64 = base64.b64encode(html.encode()).decode()
168
- st.markdown(f'<a href="data:file/html;base64,{b64}" download="ui.html">πŸ“₯ Download HTML</a>', unsafe_allow_html=True)
169
-
170
- st.subheader("🧠 Agent Communication Log")
171
- history_text = ""
172
- for msg in final_state["messages"]:
173
- role = msg["role"].replace("_", " ").title()
174
- content = msg["content"]
175
- history_text += f"---\n{role}:\n{content}\n\n"
176
- st.text_area("Agent Dialogue", value=history_text, height=300)
177
-
178
- b64_hist = base64.b64encode(history_text.encode()).decode()
179
- st.markdown(
180
- f'<a href="data:file/txt;base64,{b64_hist}" download="agent_communication.txt">πŸ“₯ Download Communication Log</a>',
181
- unsafe_allow_html=True)
182
-
183
- st.subheader("πŸ“Š Performance")
184
- st.write(f"⏱️ Total Time: {total_time:.2f} seconds")
185
- st.write(f"πŸ” Iterations: {final_state['iteration']}")
186
- for stage in ["product_manager", "project_manager", "designer", "software_engineer", "qa"]:
187
- st.write(f"🧩 {stage.replace('_', ' ').title()} Time: {final_state['timings'].get(stage, 0):.2f}s")
188
 
189
  if __name__ == "__main__":
190
  main()
 
 
 
1
  import streamlit as st
2
+ import os
3
  import time
4
+ import gc
5
+ import torch
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM
7
+ from peft import PeftModel
8
  from typing import Dict, List, TypedDict
9
  from langgraph.graph import StateGraph, END
 
 
 
 
 
 
10
 
11
  HF_TOKEN = os.getenv("HF_TOKEN")
12
 
13
+ # Agent model config β€” all use Gemma
14
  AGENT_MODEL_CONFIG = {
15
  "product_manager": {
16
  "base": "unsloth/gemma-3-1b-it",
 
20
  "base": "unsloth/gemma-3-1b-it",
21
  "adapter": "spandana30/project-manager-gemma"
22
  },
 
 
 
 
23
  "software_engineer": {
24
+ "base": "unsloth/gemma-3-1b-it",
25
+ "adapter": "spandana30/project-manager-gemma"
 
 
 
 
26
  },
27
+ "qa_engineer": {
28
+ "base": "unsloth/gemma-3-1b-it",
29
+ "adapter": "spandana30/project-manager-gemma"
30
+ }
31
  }
32
 
33
  @st.cache_resource
34
+ def load_agent_model(base_id, adapter_id):
35
+ base_model = AutoModelForCausalLM.from_pretrained(
36
+ base_id,
37
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
38
+ device_map="auto" if torch.cuda.is_available() else None,
39
+ token=HF_TOKEN
40
+ )
41
+ model = PeftModel.from_pretrained(base_model, adapter_id, token=HF_TOKEN)
42
+ tokenizer = AutoTokenizer.from_pretrained(adapter_id, token=HF_TOKEN)
43
+ return model.eval(), tokenizer
44
+
45
+ def call_model(prompt: str, model, tokenizer) -> str:
46
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True).to(model.device)
47
+ outputs = model.generate(
48
+ **inputs,
49
+ max_new_tokens=512,
50
+ do_sample=False,
51
+ temperature=0.3
52
+ )
53
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
54
 
55
  class AgentState(TypedDict):
56
  messages: List[Dict[str, str]]
 
 
 
 
57
  html: str
58
  feedback: str
59
  iteration: int
60
  done: bool
61
  timings: Dict[str, float]
62
 
63
+ def agent(prompt_template, state: AgentState, agent_key: str, timing_label: str):
64
+ start = time.time()
65
+ model, tokenizer = load_agent_model(**AGENT_MODEL_CONFIG[agent_key])
66
+ prompt = prompt_template.format(**state)
67
+ response = call_model(prompt, model, tokenizer)
68
+ state["messages"].append({"role": agent_key, "content": response})
69
+ state["timings"][timing_label] = time.time() - start
70
+ gc.collect()
71
+ return response
 
72
 
73
  PROMPTS = {
74
+ "product_manager": "You're a Product Manager. Refine this user request:\n{messages[-1][content]}",
75
+ "project_manager": "You're a Project Manager. Break down this refined request:\n{messages[-1][content]}",
76
+ "software_engineer": "You're a Software Engineer. Generate HTML+CSS code for:\n{messages[-1][content]}",
77
+ "qa_engineer": "You're a QA Engineer. Review this HTML:\n{html}\nGive feedback or reply APPROVED."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  }
79
 
80
+ def generate_ui(user_prompt: str, max_iter: int):
81
+ state: AgentState = {
82
+ "messages": [{"role": "user", "content": user_prompt}],
83
+ "html": "",
84
+ "feedback": "",
85
+ "iteration": 0,
86
+ "done": False,
87
+ "timings": {}
88
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
  workflow = StateGraph(AgentState)
91
+ workflow.add_node("product_manager", lambda s: {"messages": s["messages"] + [{"role": "product_manager", "content": agent(PROMPTS["product_manager"], s, "product_manager", "product_manager")}]})
92
+ workflow.add_node("project_manager", lambda s: {"messages": s["messages"] + [{"role": "project_manager", "content": agent(PROMPTS["project_manager"], s, "project_manager", "project_manager")}]})
93
+ workflow.add_node("software_engineer", lambda s: {
94
+ "html": agent(PROMPTS["software_engineer"], s, "software_engineer", "software_engineer"),
95
+ "messages": s["messages"] + [{"role": "software_engineer", "content": s["html"]}]
96
+ })
97
+ def qa_fn(s):
98
+ feedback = agent(PROMPTS["qa_engineer"], s, "qa_engineer", "qa_engineer")
99
+ done = "APPROVED" in feedback or s["iteration"] >= max_iter
100
+ return {
101
+ "feedback": feedback,
102
+ "done": done,
103
+ "iteration": s["iteration"] + 1,
104
+ "messages": s["messages"] + [{"role": "qa_engineer", "content": feedback}]
105
+ }
106
+ workflow.add_node("qa_engineer", qa_fn)
107
 
108
  workflow.add_edge("product_manager", "project_manager")
109
+ workflow.add_edge("project_manager", "software_engineer")
110
+ workflow.add_edge("software_engineer", "qa_engineer")
111
+ workflow.add_conditional_edges("qa_engineer", lambda s: END if s["done"] else "software_engineer")
 
112
  workflow.set_entry_point("product_manager")
113
 
114
  app = workflow.compile()
 
115
  final_state = app.invoke(state)
116
+ return final_state
117
 
118
  def main():
119
+ st.set_page_config(page_title="Multi-Agent UI Generator", layout="wide")
120
+ st.title(" Multi-Agent Collaboration")
121
+ max_iter = st.sidebar.slider("Max QA Iterations", 1, 5, 2)
122
+ prompt = st.text_area("Describe your UI:", "A landing page for a coffee shop with a hero image, menu, and contact form.", height=150)
 
 
123
  if st.button("πŸš€ Generate UI"):
124
  with st.spinner("Agents working..."):
125
+ final = generate_ui(prompt, max_iter)
126
+ st.success("βœ… UI Generated")
127
+ st.subheader("πŸ” Output HTML")
128
+ st.components.v1.html(final["html"], height=600, scrolling=True)
129
+ st.subheader("🧠 Agent Messages")
130
+ for msg in final["messages"]:
131
+ st.markdown(f"**{msg['role'].title()}**:\n```\n{msg['content']}\n```")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
  if __name__ == "__main__":
134
  main()