Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,51 +1,94 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
|
4 |
-
#
|
|
|
|
|
5 |
models = {
|
6 |
-
"DistilGPT-2":
|
7 |
-
"GPT2 (Small)":
|
8 |
-
"DialoGPT-small":
|
9 |
-
"OPT-350M":
|
10 |
-
"Bloom-560M":
|
|
|
|
|
|
|
|
|
|
|
11 |
}
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
def compare_models(user_input, max_new_tokens=100, temperature=0.7):
|
14 |
-
|
15 |
-
|
|
|
16 |
try:
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
except Exception as e:
|
20 |
-
|
21 |
-
|
|
|
|
|
22 |
|
|
|
|
|
|
|
23 |
with gr.Blocks(css="style.css") as demo:
|
24 |
gr.Markdown("## 🤖 Open-Source Model Comparator\n"
|
25 |
-
"
|
|
|
26 |
|
27 |
with gr.Row():
|
28 |
-
user_input = gr.Textbox(label="Your prompt", placeholder="
|
29 |
generate_btn = gr.Button("Generate", variant="primary")
|
30 |
|
31 |
with gr.Row():
|
32 |
max_tokens = gr.Slider(20, 200, value=100, step=10, label="Max new tokens")
|
33 |
temp = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Creativity (temperature)")
|
34 |
|
|
|
|
|
|
|
|
|
|
|
35 |
with gr.Row():
|
36 |
-
|
37 |
|
38 |
examples = [
|
39 |
["Explain quantum computing in simple terms."],
|
40 |
["Write a haiku about autumn leaves."],
|
41 |
["What are the pros and cons of nuclear energy?"],
|
42 |
["Describe a futuristic city in the year 2200."],
|
43 |
-
["Write a funny short story about a robot learning to cook."]
|
44 |
]
|
45 |
gr.Examples(examples=examples, inputs=[user_input])
|
46 |
|
47 |
-
generate_btn.click(compare_models, inputs=[user_input, max_tokens, temp], outputs=
|
48 |
-
user_input.submit(compare_models, inputs=[user_input, max_tokens, temp], outputs=
|
49 |
|
50 |
if __name__ == "__main__":
|
51 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
|
4 |
+
# ===============================
|
5 |
+
# Load open-source text generation models
|
6 |
+
# ===============================
|
7 |
models = {
|
8 |
+
"DistilGPT-2": "distilgpt2",
|
9 |
+
"GPT2 (Small)": "gpt2",
|
10 |
+
"DialoGPT-small": "microsoft/DialoGPT-small",
|
11 |
+
"OPT-350M": "facebook/opt-350m",
|
12 |
+
"Bloom-560M": "bigscience/bloom-560m",
|
13 |
+
"GPT-Neo-125M": "EleutherAI/gpt-neo-125M",
|
14 |
+
"Falcon-RW-1B": "tiiuae/falcon-rw-1b",
|
15 |
+
"Flan-T5-Small": "google/flan-t5-small",
|
16 |
+
"Flan-T5-Base": "google/flan-t5-base",
|
17 |
+
"Phi-2": "microsoft/phi-2"
|
18 |
}
|
19 |
|
20 |
+
generators = {name: pipeline("text-generation", model=mdl)
|
21 |
+
if "flan" not in mdl.lower() and "bart" not in mdl.lower()
|
22 |
+
else pipeline("text2text-generation", model=mdl)
|
23 |
+
for name, mdl in models.items()}
|
24 |
+
|
25 |
+
# Summarizer model
|
26 |
+
summarizer = pipeline("text2text-generation", model="google/flan-t5-base")
|
27 |
+
|
28 |
+
# ===============================
|
29 |
+
# Function to query all models
|
30 |
+
# ===============================
|
31 |
def compare_models(user_input, max_new_tokens=100, temperature=0.7):
|
32 |
+
raw_outputs = {}
|
33 |
+
clean_outputs = {}
|
34 |
+
for name, generator in generators.items():
|
35 |
try:
|
36 |
+
if "text-generation" in generator.task:
|
37 |
+
output = generator(
|
38 |
+
user_input,
|
39 |
+
max_new_tokens=max_new_tokens,
|
40 |
+
temperature=temperature
|
41 |
+
)[0]["generated_text"]
|
42 |
+
else: # Flan models etc
|
43 |
+
output = generator(user_input, max_new_tokens=max_new_tokens)[0]["generated_text"]
|
44 |
+
|
45 |
+
raw_outputs[name] = output
|
46 |
+
|
47 |
+
# Summarize the answer to improve clarity
|
48 |
+
summary = summarizer("Summarize this: " + output, max_new_tokens=60)[0]["generated_text"]
|
49 |
+
clean_outputs[name] = summary
|
50 |
+
|
51 |
except Exception as e:
|
52 |
+
raw_outputs[name] = f"⚠️ Error: {str(e)}"
|
53 |
+
clean_outputs[name] = "N/A"
|
54 |
+
|
55 |
+
return [raw_outputs[m] for m in models.keys()], [clean_outputs[m] for m in models.keys()]
|
56 |
|
57 |
+
# ===============================
|
58 |
+
# Gradio UI
|
59 |
+
# ===============================
|
60 |
with gr.Blocks(css="style.css") as demo:
|
61 |
gr.Markdown("## 🤖 Open-Source Model Comparator\n"
|
62 |
+
"Compare outputs from multiple open-source LLMs side by side.\n"
|
63 |
+
"Includes a raw output and a cleaned summary (via Flan-T5).")
|
64 |
|
65 |
with gr.Row():
|
66 |
+
user_input = gr.Textbox(label="Your prompt", placeholder="Try: 'Explain quantum computing in simple terms'", lines=2)
|
67 |
generate_btn = gr.Button("Generate", variant="primary")
|
68 |
|
69 |
with gr.Row():
|
70 |
max_tokens = gr.Slider(20, 200, value=100, step=10, label="Max new tokens")
|
71 |
temp = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Creativity (temperature)")
|
72 |
|
73 |
+
gr.Markdown("### 🔎 Raw Outputs")
|
74 |
+
with gr.Row():
|
75 |
+
raw_boxes = [gr.Textbox(label=name, elem_classes="output-box", interactive=False) for name in models.keys()]
|
76 |
+
|
77 |
+
gr.Markdown("### ✨ Cleaned Summaries (Flan-T5)")
|
78 |
with gr.Row():
|
79 |
+
clean_boxes = [gr.Textbox(label=f"{name} (Summary)", elem_classes="output-box", interactive=False) for name in models.keys()]
|
80 |
|
81 |
examples = [
|
82 |
["Explain quantum computing in simple terms."],
|
83 |
["Write a haiku about autumn leaves."],
|
84 |
["What are the pros and cons of nuclear energy?"],
|
85 |
["Describe a futuristic city in the year 2200."],
|
86 |
+
["Write a funny short story about a robot learning to cook."]
|
87 |
]
|
88 |
gr.Examples(examples=examples, inputs=[user_input])
|
89 |
|
90 |
+
generate_btn.click(compare_models, inputs=[user_input, max_tokens, temp], outputs=raw_boxes + clean_boxes)
|
91 |
+
user_input.submit(compare_models, inputs=[user_input, max_tokens, temp], outputs=raw_boxes + clean_boxes)
|
92 |
|
93 |
if __name__ == "__main__":
|
94 |
demo.launch()
|