spandana30 commited on
Commit
3079b0f
Β·
verified Β·
1 Parent(s): a42a7cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -70
app.py CHANGED
@@ -1,115 +1,148 @@
 
1
 
2
  import streamlit as st
3
- import os
4
  import time
5
  import base64
6
  from typing import Dict, List, TypedDict
7
  from langgraph.graph import StateGraph, END
8
- from huggingface_hub import InferenceClient
9
-
10
- # Use Mistral model only
11
- client = InferenceClient(
12
- model="mistralai/Mistral-7B-Instruct-v0.2",
13
- token=st.secrets["HF_TOKEN"]
14
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  class AgentState(TypedDict):
17
  messages: List[Dict[str, str]]
 
 
 
18
  design_specs: str
19
  html: str
20
- css: str
21
  feedback: str
22
  iteration: int
23
  done: bool
24
  timings: Dict[str, float]
25
 
26
- DESIGNER_PROMPT = """You're a UI designer. Create design specs for:
 
 
 
 
 
27
  {user_request}
28
- Include:
29
- 1. Color palette (primary, secondary, accent)
30
- 2. Font choices
31
- 3. Layout structure
32
- 4. Component styles
33
- Don't write code - just design guidance."""
34
-
35
- ENGINEER_PROMPT = """Create a complete HTML page with embedded CSS for:
36
  {design_specs}
37
  Requirements:
38
- 1. Full HTML document with <!DOCTYPE>
39
- 2. CSS inside <style> tags in head
40
- 3. Mobile-responsive
41
- 4. Semantic HTML
42
- 5. Ready-to-use (will work when saved as .html)
43
- Output JUST the complete HTML file content:"""
44
-
45
- QA_PROMPT = """Review this website:
46
  {html}
47
- Check for:
48
- 1. Visual quality
49
- 2. Responsiveness
50
- 3. Functionality
51
- Reply "APPROVED" if perfect, or suggest improvements."""
52
 
53
  def time_agent(agent_func, state: AgentState, label: str):
54
  start = time.time()
55
  result = agent_func(state)
56
- duration = time.time() - start
57
  result["timings"] = state["timings"]
58
- result["timings"][label] = duration
59
  return result
60
 
 
 
 
 
 
 
 
 
61
  def designer_agent(state: AgentState):
62
- specs = call_model(DESIGNER_PROMPT.format(user_request=state["messages"][-1]["content"]))
63
  return {"design_specs": specs, "messages": state["messages"] + [{"role": "designer", "content": specs}]}
64
 
65
  def engineer_agent(state: AgentState):
66
- html = call_model(ENGINEER_PROMPT.format(design_specs=state["design_specs"]))
67
- if not html.strip().startswith("<!DOCTYPE"):
68
- html = f"""<!DOCTYPE html>
69
- <html><head><meta charset='UTF-8'><meta name='viewport' content='width=device-width, initial-scale=1.0'>
70
- <title>Generated UI</title></head><body>{html}</body></html>"""
71
  return {"html": html, "messages": state["messages"] + [{"role": "software_engineer", "content": html}]}
72
 
73
  def qa_agent(state: AgentState, max_iter: int):
74
- feedback = call_model(QA_PROMPT.format(html=state["html"]))
75
  done = "APPROVED" in feedback or state["iteration"] >= max_iter
76
  return {"feedback": feedback, "done": done, "iteration": state["iteration"] + 1,
77
  "messages": state["messages"] + [{"role": "qa", "content": feedback}]}
78
 
79
- def call_model(prompt: str, max_retries=3) -> str:
80
- for attempt in range(max_retries):
81
- try:
82
- return client.text_generation(
83
- prompt,
84
- max_new_tokens=3000,
85
- temperature=0.3,
86
- return_full_text=False
87
- )
88
- except Exception as e:
89
- st.error(f"Model call failed (attempt {attempt+1}): {e}")
90
- time.sleep(2)
91
- return "<html><body><h1>Error generating UI</h1></body></html>"
92
-
93
  def generate_ui(user_request: str, max_iter: int):
94
- state = {"messages": [{"role": "user", "content": user_request}], "design_specs": "", "html": "",
95
- "css": "", "feedback": "", "iteration": 0, "done": False, "timings": {}}
 
 
96
 
97
  workflow = StateGraph(AgentState)
 
 
98
  workflow.add_node("designer", lambda s: time_agent(designer_agent, s, "designer"))
99
  workflow.add_node("software_engineer", lambda s: time_agent(engineer_agent, s, "software_engineer"))
100
  workflow.add_node("qa", lambda s: time_agent(lambda x: qa_agent(x, max_iter), s, "qa"))
 
 
 
101
  workflow.add_edge("designer", "software_engineer")
102
  workflow.add_edge("software_engineer", "qa")
103
  workflow.add_conditional_edges("qa", lambda s: END if s["done"] else "software_engineer")
104
- workflow.set_entry_point("designer")
 
105
  app = workflow.compile()
106
  total_start = time.time()
107
  final_state = app.invoke(state)
108
  return final_state["html"], final_state, time.time() - total_start
109
 
110
  def main():
111
- st.set_page_config(page_title="Multi-Agent Collaboration", layout="wide")
112
- st.title("🀝 Multi-Agent Collaboration")
113
  with st.sidebar:
114
  max_iter = st.slider("Max QA Iterations", 1, 5, 2)
115
 
@@ -121,11 +154,9 @@ def main():
121
  st.success("βœ… UI Generated Successfully!")
122
  st.components.v1.html(html, height=600, scrolling=True)
123
 
124
- st.subheader("πŸ“₯ Download HTML")
125
  b64 = base64.b64encode(html.encode()).decode()
126
- st.markdown(f'<a href="data:file/html;base64,{b64}" download="ui.html">Download HTML</a>', unsafe_allow_html=True)
127
 
128
- # Communication History
129
  st.subheader("🧠 Agent Communication Log")
130
  history_text = ""
131
  for msg in final_state["messages"]:
@@ -134,19 +165,16 @@ def main():
134
  history_text += f"---\n{role}:\n{content}\n\n"
135
  st.text_area("Agent Dialogue", value=history_text, height=300)
136
 
137
- # Download Chat Log
138
  b64_hist = base64.b64encode(history_text.encode()).decode()
139
  st.markdown(
140
- f'<a href="data:file/txt;base64,{b64_hist}" download="agent_communication.txt" '
141
- 'style="padding: 0.4em 1em; background: #4CAF50; color: white; border-radius: 0.3em; text-decoration: none;">'
142
- 'πŸ“₯ Download Communication Log</a>',
143
- unsafe_allow_html=True
144
- )
145
  st.subheader("πŸ“Š Performance")
146
  st.write(f"⏱️ Total Time: {total_time:.2f} seconds")
147
  st.write(f"πŸ” Iterations: {final_state['iteration']}")
148
- for stage in ["designer", "software_engineer", "qa"]:
149
- st.write(f"🧩 {stage.title().replace('_', ' ')} Time: {final_state['timings'].get(stage, 0):.2f}s")
150
 
151
  if __name__ == "__main__":
152
  main()
 
1
+ # Updated multi-agent UI generation system with custom fine-tuned LoRA adapters
2
 
3
  import streamlit as st
 
4
  import time
5
  import base64
6
  from typing import Dict, List, TypedDict
7
  from langgraph.graph import StateGraph, END
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
9
+ from peft import PeftModel, PeftConfig
10
+ import torch
11
+
12
+ st.set_page_config(page_title="Multi-Agent Collaboration", layout="wide")
13
+
14
+ # Agent model loading config
15
+ AGENT_MODEL_CONFIG = {
16
+ "product_manager": {
17
+ "base": "mistralai/Mistral-7B-Instruct-v0.2",
18
+ "adapter": "spandana30/product-manager-mistral"
19
+ },
20
+ "project_manager": {
21
+ "base": "google/gemma-1.1-7b-it",
22
+ "adapter": "spandana30/project-manager-gemma"
23
+ },
24
+ "software_architect": {
25
+ "base": "cohere/command-r", # update if you have a local base version
26
+ "adapter": "spandana30/software-architect-cohere"
27
+ },
28
+ "software_engineer": {
29
+ "base": "codellama/CodeLlama-7b-Instruct-hf",
30
+ "adapter": "spandana30/software-engineer-codellama"
31
+ },
32
+ "qa": {
33
+ "base": "codellama/CodeLlama-7b-Instruct-hf",
34
+ "adapter": "spandana30/software-engineer-codellama"
35
+ },
36
+ }
37
+
38
+ @st.cache_resource
39
+
40
+ def load_agent_model(base_id, adapter_id):
41
+ base_model = AutoModelForCausalLM.from_pretrained(
42
+ base_id, torch_dtype=torch.float16, device_map="auto"
43
+ )
44
+ model = PeftModel.from_pretrained(base_model, adapter_id)
45
+ tokenizer = AutoTokenizer.from_pretrained(adapter_id)
46
+ return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=1024)
47
+
48
+ AGENT_PIPELINES = {
49
+ role: load_agent_model(cfg["base"], cfg["adapter"])
50
+ for role, cfg in AGENT_MODEL_CONFIG.items()
51
+ }
52
 
53
  class AgentState(TypedDict):
54
  messages: List[Dict[str, str]]
55
+ user_request: str
56
+ refined_request: str
57
+ scoped_request: str
58
  design_specs: str
59
  html: str
 
60
  feedback: str
61
  iteration: int
62
  done: bool
63
  timings: Dict[str, float]
64
 
65
+ def run_pipeline(role: str, prompt: str):
66
+ response = AGENT_PIPELINES[role](prompt, do_sample=False)[0]['generated_text']
67
+ return response.strip()
68
+
69
+ PROMPTS = {
70
+ "product_manager": """You're a Product Manager. Refine and clarify this request:
71
  {user_request}
72
+ Ensure it's clear, feasible, and user-focused. Output the revised request only.""",
73
+ "project_manager": """You're a Project Manager. Given this refined request:
74
+ {refined_request}
75
+ Break it down into scope and constraints. Output the scoped request only.""",
76
+ "designer": """You're a UI designer. Create design specs for:
77
+ {scoped_request}
78
+ Include color palette, font, layout, and component styles. No code.""",
79
+ "software_engineer": """Create a full HTML page with embedded CSS for:
80
  {design_specs}
81
  Requirements:
82
+ - Semantic, responsive HTML
83
+ - Embedded CSS in <style> tag
84
+ - Output complete HTML only.""",
85
+ "qa": """Review this webpage:
 
 
 
 
86
  {html}
87
+ Is it visually appealing, responsive, and functional? Reply "APPROVED" or suggest improvements."""
88
+ }
 
 
 
89
 
90
  def time_agent(agent_func, state: AgentState, label: str):
91
  start = time.time()
92
  result = agent_func(state)
 
93
  result["timings"] = state["timings"]
94
+ result["timings"][label] = time.time() - start
95
  return result
96
 
97
+ def product_manager_agent(state: AgentState):
98
+ revised = run_pipeline("product_manager", PROMPTS["product_manager"].format(user_request=state["user_request"]))
99
+ return {"refined_request": revised, "messages": state["messages"] + [{"role": "product_manager", "content": revised}]}
100
+
101
+ def project_manager_agent(state: AgentState):
102
+ scoped = run_pipeline("project_manager", PROMPTS["project_manager"].format(refined_request=state["refined_request"]))
103
+ return {"scoped_request": scoped, "messages": state["messages"] + [{"role": "project_manager", "content": scoped}]}
104
+
105
  def designer_agent(state: AgentState):
106
+ specs = run_pipeline("product_manager", PROMPTS["designer"].format(scoped_request=state["scoped_request"]))
107
  return {"design_specs": specs, "messages": state["messages"] + [{"role": "designer", "content": specs}]}
108
 
109
  def engineer_agent(state: AgentState):
110
+ html = run_pipeline("software_engineer", PROMPTS["software_engineer"].format(design_specs=state["design_specs"]))
 
 
 
 
111
  return {"html": html, "messages": state["messages"] + [{"role": "software_engineer", "content": html}]}
112
 
113
  def qa_agent(state: AgentState, max_iter: int):
114
+ feedback = run_pipeline("qa", PROMPTS["qa"].format(html=state["html"]))
115
  done = "APPROVED" in feedback or state["iteration"] >= max_iter
116
  return {"feedback": feedback, "done": done, "iteration": state["iteration"] + 1,
117
  "messages": state["messages"] + [{"role": "qa", "content": feedback}]}
118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  def generate_ui(user_request: str, max_iter: int):
120
+ state = {"messages": [{"role": "user", "content": user_request}],
121
+ "user_request": user_request,
122
+ "refined_request": "", "scoped_request": "", "design_specs": "",
123
+ "html": "", "feedback": "", "iteration": 0, "done": False, "timings": {}}
124
 
125
  workflow = StateGraph(AgentState)
126
+ workflow.add_node("product_manager", lambda s: time_agent(product_manager_agent, s, "product_manager"))
127
+ workflow.add_node("project_manager", lambda s: time_agent(project_manager_agent, s, "project_manager"))
128
  workflow.add_node("designer", lambda s: time_agent(designer_agent, s, "designer"))
129
  workflow.add_node("software_engineer", lambda s: time_agent(engineer_agent, s, "software_engineer"))
130
  workflow.add_node("qa", lambda s: time_agent(lambda x: qa_agent(x, max_iter), s, "qa"))
131
+
132
+ workflow.add_edge("product_manager", "project_manager")
133
+ workflow.add_edge("project_manager", "designer")
134
  workflow.add_edge("designer", "software_engineer")
135
  workflow.add_edge("software_engineer", "qa")
136
  workflow.add_conditional_edges("qa", lambda s: END if s["done"] else "software_engineer")
137
+ workflow.set_entry_point("product_manager")
138
+
139
  app = workflow.compile()
140
  total_start = time.time()
141
  final_state = app.invoke(state)
142
  return final_state["html"], final_state, time.time() - total_start
143
 
144
  def main():
145
+ st.title("πŸ€– Multi-Agent UI Generator")
 
146
  with st.sidebar:
147
  max_iter = st.slider("Max QA Iterations", 1, 5, 2)
148
 
 
154
  st.success("βœ… UI Generated Successfully!")
155
  st.components.v1.html(html, height=600, scrolling=True)
156
 
 
157
  b64 = base64.b64encode(html.encode()).decode()
158
+ st.markdown(f'<a href="data:file/html;base64,{b64}" download="ui.html">πŸ“₯ Download HTML</a>', unsafe_allow_html=True)
159
 
 
160
  st.subheader("🧠 Agent Communication Log")
161
  history_text = ""
162
  for msg in final_state["messages"]:
 
165
  history_text += f"---\n{role}:\n{content}\n\n"
166
  st.text_area("Agent Dialogue", value=history_text, height=300)
167
 
 
168
  b64_hist = base64.b64encode(history_text.encode()).decode()
169
  st.markdown(
170
+ f'<a href="data:file/txt;base64,{b64_hist}" download="agent_communication.txt">πŸ“₯ Download Communication Log</a>',
171
+ unsafe_allow_html=True)
172
+
 
 
173
  st.subheader("πŸ“Š Performance")
174
  st.write(f"⏱️ Total Time: {total_time:.2f} seconds")
175
  st.write(f"πŸ” Iterations: {final_state['iteration']}")
176
+ for stage in ["product_manager", "project_manager", "designer", "software_engineer", "qa"]:
177
+ st.write(f"🧩 {stage.replace('_', ' ').title()} Time: {final_state['timings'].get(stage, 0):.2f}s")
178
 
179
  if __name__ == "__main__":
180
  main()