spandana30 commited on
Commit
ed6f467
Β·
verified Β·
1 Parent(s): a7524b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -19
app.py CHANGED
@@ -4,12 +4,13 @@ import time
4
  import base64
5
  from typing import Dict, List, TypedDict
6
  from langgraph.graph import StateGraph, END
7
- from huggingface_hub import InferenceClient
8
 
9
- client = InferenceClient(
10
- model="mistralai/Mistral-7B-Instruct-v0.2",
11
- token=st.secrets["HF_TOKEN"]
12
- )
 
13
 
14
  class AgentState(TypedDict):
15
  messages: List[Dict[str, str]]
@@ -57,18 +58,12 @@ Check for:
57
  Reply "APPROVED" if perfect, or suggest improvements."""
58
 
59
  def call_model(prompt: str, max_retries=3) -> str:
60
- for attempt in range(max_retries):
61
- try:
62
- return client.text_generation(
63
- prompt,
64
- max_new_tokens=3000,
65
- temperature=0.3,
66
- return_full_text=False
67
- )
68
- except Exception as e:
69
- st.error(f"Model call failed (attempt {attempt+1}): {str(e)}")
70
- st.write("\n\n**Full Error:**", e)
71
- st.stop() # Force exit to show error
72
  return "<html><body><h1>Error generating UI</h1></body></html>"
73
 
74
  def time_agent(agent_func, state: AgentState, label: str):
@@ -135,8 +130,6 @@ def main():
135
  with st.sidebar:
136
  max_iter = st.slider("Max QA Iterations", 1, 5, 2)
137
 
138
- st.write("\nπŸ” HF_TOKEN found:", st.secrets.get("HF_TOKEN", "❌ Missing!"))
139
-
140
  prompt = st.text_area("πŸ“ Describe the UI you want:", "A coffee shop landing page with hero, menu, and contact form.", height=150)
141
 
142
  if st.button("πŸš€ Generate UI"):
 
4
  import base64
5
  from typing import Dict, List, TypedDict
6
  from langgraph.graph import StateGraph, END
7
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
8
 
9
+ # Load CodeLLaMA locally
10
+ model_id = "codellama/CodeLlama-7b-hf"
11
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
12
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
13
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto")
14
 
15
  class AgentState(TypedDict):
16
  messages: List[Dict[str, str]]
 
58
  Reply "APPROVED" if perfect, or suggest improvements."""
59
 
60
  def call_model(prompt: str, max_retries=3) -> str:
61
+ try:
62
+ outputs = generator(prompt, max_new_tokens=1000, temperature=0.3)
63
+ return outputs[0]["generated_text"]
64
+ except Exception as e:
65
+ st.error(f"Local model call failed: {str(e)}")
66
+ st.stop()
 
 
 
 
 
 
67
  return "<html><body><h1>Error generating UI</h1></body></html>"
68
 
69
  def time_agent(agent_func, state: AgentState, label: str):
 
130
  with st.sidebar:
131
  max_iter = st.slider("Max QA Iterations", 1, 5, 2)
132
 
 
 
133
  prompt = st.text_area("πŸ“ Describe the UI you want:", "A coffee shop landing page with hero, menu, and contact form.", height=150)
134
 
135
  if st.button("πŸš€ Generate UI"):