spandana30 commited on
Commit
cd1de35
Β·
verified Β·
1 Parent(s): 3748474

Upload 3 files

Browse files
Files changed (3) hide show
  1. README (1).md +33 -0
  2. app.py +138 -0
  3. requirements.txt +3 -0
README (1).md ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Multi-Agent UI Generator
3
+ emoji: 🀝
4
+ colorFrom: indigo
5
+ colorTo: teal
6
+ sdk: streamlit
7
+ sdk_version: "1.32.0"
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ # Multi-Agent UI Generator πŸš€
13
+
14
+ This is a Streamlit-based collaborative system where agents (Designer, Software Engineer, QA) work together to generate production-ready UI code.
15
+
16
+ ## Features
17
+
18
+ - Natural language prompt β†’ Design specs
19
+ - Generate complete HTML/CSS UI from specs
20
+ - QA feedback loop until UI is approved
21
+ - Live preview and downloads for HTML + logs
22
+
23
+ ## Instructions
24
+
25
+ 1. Create a Hugging Face Space (Streamlit SDK)
26
+ 2. Upload `app.py`, `requirements.txt`, and this `README.md`
27
+ 3. Set your Hugging Face token in Space Secrets as `HF_TOKEN`
28
+
29
+ ## Powered by
30
+
31
+ - LangGraph 🧠
32
+ - Hugging Face Inference API πŸ’¬
33
+ - Streamlit πŸš€
app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import streamlit as st
3
+ import os
4
+ import time
5
+ import random
6
+ import base64
7
+ from typing import Dict, List, TypedDict
8
+ from io import StringIO
9
+ from langgraph.graph import StateGraph, END
10
+ from huggingface_hub import InferenceClient
11
+
12
+ client = InferenceClient(
13
+ model="mistralai/Mistral-7B-Instruct-v0.2",
14
+ token=st.secrets["HF_TOKEN"]
15
+ )
16
+
17
+ class AgentState(TypedDict):
18
+ messages: List[Dict[str, str]]
19
+ design_specs: str
20
+ html: str
21
+ css: str
22
+ feedback: str
23
+ iteration: int
24
+ done: bool
25
+ timings: Dict[str, float]
26
+
27
+ DESIGNER_PROMPT = """You're a UI designer. Create design specs for:
28
+ {user_request}
29
+
30
+ Include:
31
+ 1. Color palette (primary, secondary, accent)
32
+ 2. Font choices
33
+ 3. Layout structure
34
+ 4. Component styles
35
+
36
+ Don't write code - just design guidance."""
37
+
38
+ ENGINEER_PROMPT = """Create a complete HTML page with embedded CSS for:
39
+ {design_specs}
40
+
41
+ Requirements:
42
+ 1. Full HTML document with <!DOCTYPE>
43
+ 2. CSS inside <style> tags in head
44
+ 3. Mobile-responsive
45
+ 4. Semantic HTML
46
+ 5. Ready-to-use (will work when saved as .html)
47
+
48
+ Output JUST the complete HTML file content:"""
49
+
50
+ QA_PROMPT = """Review this website:
51
+ {html}
52
+
53
+ Check for:
54
+ 1. Visual quality
55
+ 2. Responsiveness
56
+ 3. Functionality
57
+
58
+ Reply "APPROVED" if perfect, or suggest improvements."""
59
+
60
+ def time_agent(agent_func, state: AgentState, label: str):
61
+ start = time.time()
62
+ result = agent_func(state)
63
+ duration = time.time() - start
64
+ result["timings"] = state["timings"]
65
+ result["timings"][label] = duration
66
+ return result
67
+
68
+ def designer_agent(state: AgentState):
69
+ specs = call_model(DESIGNER_PROMPT.format(user_request=state["messages"][-1]["content"]))
70
+ return {"design_specs": specs, "messages": state["messages"] + [{"role": "designer", "content": specs}]}
71
+
72
+ def engineer_agent(state: AgentState):
73
+ html = call_model(ENGINEER_PROMPT.format(design_specs=state["design_specs"]))
74
+ if not html.strip().startswith("<!DOCTYPE"):
75
+ html = f"""<!DOCTYPE html>
76
+ <html><head><meta charset='UTF-8'><meta name='viewport' content='width=device-width, initial-scale=1.0'>
77
+ <title>Generated UI</title></head><body>{html}</body></html>"""
78
+ return {"html": html, "messages": state["messages"] + [{"role": "software_engineer", "content": html}]}
79
+
80
+ def qa_agent(state: AgentState, max_iter: int):
81
+ feedback = call_model(QA_PROMPT.format(html=state["html"]))
82
+ done = "APPROVED" in feedback or state["iteration"] >= max_iter
83
+ return {"feedback": feedback, "done": done, "iteration": state["iteration"] + 1,
84
+ "messages": state["messages"] + [{"role": "qa", "content": feedback}]}
85
+
86
+ def call_model(prompt: str, max_retries=3) -> str:
87
+ for attempt in range(max_retries):
88
+ try:
89
+ return client.text_generation(prompt, max_new_tokens=3000, temperature=0.3, return_full_text=False)
90
+ except Exception:
91
+ time.sleep(2)
92
+ return "<html><body><h1>Error generating UI</h1></body></html>"
93
+
94
+ def generate_ui(user_request: str, max_iter: int):
95
+ state = {"messages": [{"role": "user", "content": user_request}], "design_specs": "", "html": "",
96
+ "css": "", "feedback": "", "iteration": 0, "done": False, "timings": {}}
97
+
98
+ workflow = StateGraph(AgentState)
99
+ workflow.add_node("designer", lambda s: time_agent(designer_agent, s, "designer"))
100
+ workflow.add_node("software_engineer", lambda s: time_agent(engineer_agent, s, "software_engineer"))
101
+ workflow.add_node("qa", lambda s: time_agent(lambda x: qa_agent(x, max_iter), s, "qa"))
102
+ workflow.add_edge("designer", "software_engineer")
103
+ workflow.add_edge("software_engineer", "qa")
104
+ workflow.add_conditional_edges("qa", lambda s: END if s["done"] else "software_engineer")
105
+ workflow.set_entry_point("designer")
106
+ app = workflow.compile()
107
+ total_start = time.time()
108
+ final_state = app.invoke(state)
109
+ return final_state["html"], final_state, time.time() - total_start
110
+
111
+ def main():
112
+ st.set_page_config(page_title="Multi-Agent Collaboration", layout="wide")
113
+ st.title("🀝 Multi-Agent Collaboration")
114
+ with st.sidebar:
115
+ st.header("Settings")
116
+ max_iter = st.slider("Max QA Iterations", 1, 5, 2)
117
+ prompt = st.text_area("Describe the UI you want:", "A clean dashboard with 3 summary cards and a sidebar", height=150)
118
+ if st.button("Generate UI"):
119
+ with st.spinner("Working..."):
120
+ html, final_state, total_time = generate_ui(prompt, max_iter)
121
+ st.success("UI Generated Successfully!")
122
+ st.components.v1.html(html, height=600, scrolling=True)
123
+ st.subheader("πŸ“₯ Download HTML")
124
+ b64 = base64.b64encode(html.encode()).decode()
125
+ st.markdown(f'<a href="data:file/html;base64,{b64}" download="ui.html">Download HTML</a>', unsafe_allow_html=True)
126
+ st.subheader("🧠 Agent Log")
127
+ history_text = "".join([f"---\n{m['role'].title()}:\n{m['content']}\n\n" for m in final_state["messages"]])
128
+ st.text_area("Conversation", history_text, height=300)
129
+ b64_hist = base64.b64encode(history_text.encode()).decode()
130
+ st.markdown(f'<a href="data:file/txt;base64,{b64_hist}" download="agent_log.txt">Download Log</a>', unsafe_allow_html=True)
131
+ st.subheader("πŸ“Š Performance")
132
+ st.write(f"Total Time: {total_time:.2f} seconds")
133
+ st.write(f"Iterations: {final_state['iteration']}")
134
+ for stage in ["designer", "software_engineer", "qa"]:
135
+ st.write(f"{stage.replace('_', ' ').title()}: {final_state['timings'].get(stage, 0):.2f}s")
136
+
137
+ if __name__ == "__main__":
138
+ main()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ streamlit
2
+ huggingface_hub
3
+ langgraph