Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
#
|
2 |
|
3 |
import streamlit as st
|
4 |
import time
|
@@ -8,9 +8,12 @@ from langgraph.graph import StateGraph, END
|
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
9 |
from peft import PeftModel, PeftConfig
|
10 |
import torch
|
|
|
11 |
|
12 |
st.set_page_config(page_title="Multi-Agent Collaboration", layout="wide")
|
13 |
|
|
|
|
|
14 |
# Agent model loading config
|
15 |
AGENT_MODEL_CONFIG = {
|
16 |
"product_manager": {
|
@@ -22,7 +25,7 @@ AGENT_MODEL_CONFIG = {
|
|
22 |
"adapter": "spandana30/project-manager-gemma"
|
23 |
},
|
24 |
"software_architect": {
|
25 |
-
"base": "cohere/command-r",
|
26 |
"adapter": "spandana30/software-architect-cohere"
|
27 |
},
|
28 |
"software_engineer": {
|
@@ -39,10 +42,10 @@ AGENT_MODEL_CONFIG = {
|
|
39 |
|
40 |
def load_agent_model(base_id, adapter_id):
|
41 |
base_model = AutoModelForCausalLM.from_pretrained(
|
42 |
-
base_id, torch_dtype=torch.float16, device_map="auto"
|
43 |
)
|
44 |
-
model = PeftModel.from_pretrained(base_model, adapter_id)
|
45 |
-
tokenizer = AutoTokenizer.from_pretrained(adapter_id)
|
46 |
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=1024)
|
47 |
|
48 |
AGENT_PIPELINES = {
|
@@ -177,4 +180,4 @@ def main():
|
|
177 |
st.write(f"🧩 {stage.replace('_', ' ').title()} Time: {final_state['timings'].get(stage, 0):.2f}s")
|
178 |
|
179 |
if __name__ == "__main__":
|
180 |
-
main()
|
|
|
1 |
+
# Multi-agent UI generator with Hugging Face token-based gated model support
|
2 |
|
3 |
import streamlit as st
|
4 |
import time
|
|
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
9 |
from peft import PeftModel, PeftConfig
|
10 |
import torch
|
11 |
+
import os
|
12 |
|
13 |
st.set_page_config(page_title="Multi-Agent Collaboration", layout="wide")
|
14 |
|
15 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
16 |
+
|
17 |
# Agent model loading config
|
18 |
AGENT_MODEL_CONFIG = {
|
19 |
"product_manager": {
|
|
|
25 |
"adapter": "spandana30/project-manager-gemma"
|
26 |
},
|
27 |
"software_architect": {
|
28 |
+
"base": "cohere/command-r",
|
29 |
"adapter": "spandana30/software-architect-cohere"
|
30 |
},
|
31 |
"software_engineer": {
|
|
|
42 |
|
43 |
def load_agent_model(base_id, adapter_id):
|
44 |
base_model = AutoModelForCausalLM.from_pretrained(
|
45 |
+
base_id, torch_dtype=torch.float16, device_map="auto", token=HF_TOKEN
|
46 |
)
|
47 |
+
model = PeftModel.from_pretrained(base_model, adapter_id, token=HF_TOKEN)
|
48 |
+
tokenizer = AutoTokenizer.from_pretrained(adapter_id, token=HF_TOKEN)
|
49 |
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=1024)
|
50 |
|
51 |
AGENT_PIPELINES = {
|
|
|
180 |
st.write(f"🧩 {stage.replace('_', ' ').title()} Time: {final_state['timings'].get(stage, 0):.2f}s")
|
181 |
|
182 |
if __name__ == "__main__":
|
183 |
+
main()
|