ZENLLC commited on
Commit
9de1645
·
verified ·
1 Parent(s): 34cf01a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -27
app.py CHANGED
@@ -2,65 +2,73 @@ import gradio as gr
2
  from transformers import pipeline
3
 
4
  # ===============================
5
- # Load open-source text generation models
6
  # ===============================
7
- models = {
8
  "DistilGPT-2": "distilgpt2",
9
- "GPT2 (Small)": "gpt2",
10
- "DialoGPT-small": "microsoft/DialoGPT-small",
11
- "OPT-350M": "facebook/opt-350m",
12
  "Bloom-560M": "bigscience/bloom-560m",
13
- "GPT-Neo-125M": "EleutherAI/gpt-neo-125M",
14
- "Falcon-RW-1B": "tiiuae/falcon-rw-1b",
15
- "Flan-T5-Small": "google/flan-t5-small",
16
  "Flan-T5-Base": "google/flan-t5-base",
17
  "Phi-2": "microsoft/phi-2"
18
  }
19
 
20
- generators = {name: pipeline("text-generation", model=mdl)
21
- if "flan" not in mdl.lower() and "bart" not in mdl.lower()
22
- else pipeline("text2text-generation", model=mdl)
23
- for name, mdl in models.items()}
24
 
25
- # Summarizer model
26
- summarizer = pipeline("text2text-generation", model="google/flan-t5-base")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  # ===============================
29
- # Function to query all models
30
  # ===============================
31
  def compare_models(user_input, max_new_tokens=100, temperature=0.7):
32
- raw_outputs = {}
33
- clean_outputs = {}
34
- for name, generator in generators.items():
35
  try:
36
- if "text-generation" in generator.task:
 
37
  output = generator(
38
  user_input,
39
  max_new_tokens=max_new_tokens,
40
  temperature=temperature
41
  )[0]["generated_text"]
42
- else: # Flan models etc
43
  output = generator(user_input, max_new_tokens=max_new_tokens)[0]["generated_text"]
44
 
45
  raw_outputs[name] = output
46
 
47
- # Summarize the answer to improve clarity
48
- summary = summarizer("Summarize this: " + output, max_new_tokens=60)[0]["generated_text"]
49
  clean_outputs[name] = summary
50
 
51
  except Exception as e:
52
  raw_outputs[name] = f"⚠️ Error: {str(e)}"
53
  clean_outputs[name] = "N/A"
54
 
55
- return [raw_outputs[m] for m in models.keys()], [clean_outputs[m] for m in models.keys()]
56
 
57
  # ===============================
58
  # Gradio UI
59
  # ===============================
60
  with gr.Blocks(css="style.css") as demo:
61
  gr.Markdown("## 🤖 Open-Source Model Comparator\n"
62
- "Compare outputs from multiple open-source LLMs side by side.\n"
63
- "Includes a raw output and a cleaned summary (via Flan-T5).")
64
 
65
  with gr.Row():
66
  user_input = gr.Textbox(label="Your prompt", placeholder="Try: 'Explain quantum computing in simple terms'", lines=2)
@@ -72,11 +80,11 @@ with gr.Blocks(css="style.css") as demo:
72
 
73
  gr.Markdown("### 🔎 Raw Outputs")
74
  with gr.Row():
75
- raw_boxes = [gr.Textbox(label=name, elem_classes="output-box", interactive=False) for name in models.keys()]
76
 
77
  gr.Markdown("### ✨ Cleaned Summaries (Flan-T5)")
78
  with gr.Row():
79
- clean_boxes = [gr.Textbox(label=f"{name} (Summary)", elem_classes="output-box", interactive=False) for name in models.keys()]
80
 
81
  examples = [
82
  ["Explain quantum computing in simple terms."],
 
2
  from transformers import pipeline
3
 
4
  # ===============================
5
+ # Model dictionary (lazy loaded)
6
  # ===============================
7
+ model_names = {
8
  "DistilGPT-2": "distilgpt2",
 
 
 
9
  "Bloom-560M": "bigscience/bloom-560m",
10
+ "OPT-350M": "facebook/opt-350m",
 
 
11
  "Flan-T5-Base": "google/flan-t5-base",
12
  "Phi-2": "microsoft/phi-2"
13
  }
14
 
15
+ loaded_models = {}
16
+ summarizer = None # Flan-T5 for cleanup
 
 
17
 
18
+ # ===============================
19
+ # Lazy-load helper
20
+ # ===============================
21
+ def get_model(name):
22
+ if name not in loaded_models:
23
+ mdl = model_names[name]
24
+ if "flan" in mdl.lower():
25
+ loaded_models[name] = pipeline("text2text-generation", model=mdl)
26
+ else:
27
+ loaded_models[name] = pipeline("text-generation", model=mdl)
28
+ return loaded_models[name]
29
+
30
+ def get_summarizer():
31
+ global summarizer
32
+ if summarizer is None:
33
+ summarizer = pipeline("text2text-generation", model="google/flan-t5-base")
34
+ return summarizer
35
 
36
  # ===============================
37
+ # Compare function
38
  # ===============================
39
  def compare_models(user_input, max_new_tokens=100, temperature=0.7):
40
+ raw_outputs, clean_outputs = {}, {}
41
+ for name in model_names.keys():
 
42
  try:
43
+ generator = get_model(name)
44
+ if generator.task == "text-generation":
45
  output = generator(
46
  user_input,
47
  max_new_tokens=max_new_tokens,
48
  temperature=temperature
49
  )[0]["generated_text"]
50
+ else: # text2text-generation (Flan)
51
  output = generator(user_input, max_new_tokens=max_new_tokens)[0]["generated_text"]
52
 
53
  raw_outputs[name] = output
54
 
55
+ # Summarize
56
+ summary = get_summarizer()("Summarize this: " + output, max_new_tokens=60)[0]["generated_text"]
57
  clean_outputs[name] = summary
58
 
59
  except Exception as e:
60
  raw_outputs[name] = f"⚠️ Error: {str(e)}"
61
  clean_outputs[name] = "N/A"
62
 
63
+ return [raw_outputs[m] for m in model_names.keys()], [clean_outputs[m] for m in model_names.keys()]
64
 
65
  # ===============================
66
  # Gradio UI
67
  # ===============================
68
  with gr.Blocks(css="style.css") as demo:
69
  gr.Markdown("## 🤖 Open-Source Model Comparator\n"
70
+ "Compare outputs from open-source LLMs side by side.\n"
71
+ "Raw output + a cleaned summary from Flan-T5.")
72
 
73
  with gr.Row():
74
  user_input = gr.Textbox(label="Your prompt", placeholder="Try: 'Explain quantum computing in simple terms'", lines=2)
 
80
 
81
  gr.Markdown("### 🔎 Raw Outputs")
82
  with gr.Row():
83
+ raw_boxes = [gr.Textbox(label=name, elem_classes="output-box", interactive=False) for name in model_names.keys()]
84
 
85
  gr.Markdown("### ✨ Cleaned Summaries (Flan-T5)")
86
  with gr.Row():
87
+ clean_boxes = [gr.Textbox(label=f"{name} (Summary)", elem_classes="output-box", interactive=False) for name in model_names.keys()]
88
 
89
  examples = [
90
  ["Explain quantum computing in simple terms."],