madankn79 commited on
Commit
6ed741d
·
1 Parent(s): 861709c
Files changed (1) hide show
  1. app.py +15 -20
app.py CHANGED
@@ -1,9 +1,8 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
 
4
- # Supported summarization models
5
  model_choices = {
6
- # 🥇 High Accuracy Models
7
  "Pegasus (google/pegasus-xsum)": "google/pegasus-xsum",
8
  "BigBird-Pegasus (google/bigbird-pegasus-large-arxiv)": "google/bigbird-pegasus-large-arxiv",
9
  "LongT5 Large (google/long-t5-tglobal-large)": "google/long-t5-tglobal-large",
@@ -12,8 +11,6 @@ model_choices = {
12
  "LED (allenai/led-base-16384)": "allenai/led-base-16384",
13
  "T5 Large (t5-large)": "t5-large",
14
  "Flan-T5 Large (google/flan-t5-large)": "google/flan-t5-large",
15
-
16
- # ⚖️ Balanced (Speed vs Accuracy)
17
  "DistilBART CNN (sshleifer/distilbart-cnn-12-6)": "sshleifer/distilbart-cnn-12-6",
18
  "DistilBART XSum (mrm8488/distilbart-xsum-12-6)": "mrm8488/distilbart-xsum-12-6",
19
  "T5 Base (t5-base)": "t5-base",
@@ -21,16 +18,13 @@ model_choices = {
21
  "BART CNN SamSum (philschmid/bart-large-cnn-samsum)": "philschmid/bart-large-cnn-samsum",
22
  "T5 SamSum (knkarthick/pegasus-samsum)": "knkarthick/pegasus-samsum",
23
  "LongT5 Base (google/long-t5-tglobal-base)": "google/long-t5-tglobal-base",
24
-
25
- # ⚡ Lighter / Faster Models
26
  "T5 Small (t5-small)": "t5-small",
27
  "MBART (facebook/mbart-large-cc25)": "facebook/mbart-large-cc25",
28
- "MarianMT (Helsinki-NLP/opus-mt-en-ro)": "Helsinki-NLP/opus-mt-en-ro", # not trained for summarization, just as placeholder
29
- "Falcon Instruct (tiiuae/falcon-7b-instruct)": "tiiuae/falcon-7b-instruct", # general-purpose, not summarization-specific
30
- "BART ELI5 (yjernite/bart_eli5)": "yjernite/bart_eli5" # trained for explain-like-I'm-5
31
  }
32
 
33
- # Cache for loaded models/tokenizers
34
  model_cache = {}
35
 
36
  def load_model(model_name):
@@ -40,38 +34,39 @@ def load_model(model_name):
40
  model_cache[model_name] = (tokenizer, model)
41
  return model_cache[model_name]
42
 
43
- # Summarization function
44
- def summarize_text(input_text, model_label):
45
  if not input_text.strip():
46
  return "Please enter some text."
47
 
48
  model_name = model_choices[model_label]
49
  tokenizer, model = load_model(model_name)
50
 
51
- if "t5" in model_name.lower():
52
  input_text = "summarize: " + input_text
53
 
54
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
 
55
  summary_ids = model.generate(
56
  inputs["input_ids"],
57
- max_length=20, # Approximate for 65 characters
58
  min_length=5,
59
  do_sample=False
60
  )
61
- summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
62
 
63
- return summary[:65] # Ensure character limit
 
64
 
65
  # Gradio UI
66
  iface = gr.Interface(
67
  fn=summarize_text,
68
  inputs=[
69
  gr.Textbox(lines=6, label="Enter text to summarize"),
70
- gr.Dropdown(choices=list(model_choices.keys()), label="Choose summarization model", value="Pegasus (google/pegasus-xsum)")
 
71
  ],
72
- outputs=gr.Textbox(lines=2, label="Summary (max 65 characters)"),
73
- title="Short Text Summarizer",
74
- description="Summarizes input text to under 65 characters using a selected model."
75
  )
76
 
77
  iface.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
 
4
+ # Model choices ordered by accuracy
5
  model_choices = {
 
6
  "Pegasus (google/pegasus-xsum)": "google/pegasus-xsum",
7
  "BigBird-Pegasus (google/bigbird-pegasus-large-arxiv)": "google/bigbird-pegasus-large-arxiv",
8
  "LongT5 Large (google/long-t5-tglobal-large)": "google/long-t5-tglobal-large",
 
11
  "LED (allenai/led-base-16384)": "allenai/led-base-16384",
12
  "T5 Large (t5-large)": "t5-large",
13
  "Flan-T5 Large (google/flan-t5-large)": "google/flan-t5-large",
 
 
14
  "DistilBART CNN (sshleifer/distilbart-cnn-12-6)": "sshleifer/distilbart-cnn-12-6",
15
  "DistilBART XSum (mrm8488/distilbart-xsum-12-6)": "mrm8488/distilbart-xsum-12-6",
16
  "T5 Base (t5-base)": "t5-base",
 
18
  "BART CNN SamSum (philschmid/bart-large-cnn-samsum)": "philschmid/bart-large-cnn-samsum",
19
  "T5 SamSum (knkarthick/pegasus-samsum)": "knkarthick/pegasus-samsum",
20
  "LongT5 Base (google/long-t5-tglobal-base)": "google/long-t5-tglobal-base",
 
 
21
  "T5 Small (t5-small)": "t5-small",
22
  "MBART (facebook/mbart-large-cc25)": "facebook/mbart-large-cc25",
23
+ "MarianMT (Helsinki-NLP/opus-mt-en-ro)": "Helsinki-NLP/opus-mt-en-ro",
24
+ "Falcon Instruct (tiiuae/falcon-7b-instruct)": "tiiuae/falcon-7b-instruct",
25
+ "BART ELI5 (yjernite/bart_eli5)": "yjernite/bart_eli5"
26
  }
27
 
 
28
  model_cache = {}
29
 
30
  def load_model(model_name):
 
34
  model_cache[model_name] = (tokenizer, model)
35
  return model_cache[model_name]
36
 
37
+ def summarize_text(input_text, model_label, char_limit):
 
38
  if not input_text.strip():
39
  return "Please enter some text."
40
 
41
  model_name = model_choices[model_label]
42
  tokenizer, model = load_model(model_name)
43
 
44
+ if "t5" in model_name.lower() or "flan" in model_name.lower():
45
  input_text = "summarize: " + input_text
46
 
47
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
48
+
49
  summary_ids = model.generate(
50
  inputs["input_ids"],
51
+ max_length=20, # Still approximate; can be tuned per model
52
  min_length=5,
53
  do_sample=False
54
  )
 
55
 
56
+ summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
57
+ return summary[:char_limit] # Enforce character limit
58
 
59
  # Gradio UI
60
  iface = gr.Interface(
61
  fn=summarize_text,
62
  inputs=[
63
  gr.Textbox(lines=6, label="Enter text to summarize"),
64
+ gr.Dropdown(choices=list(model_choices.keys()), label="Choose summarization model", value="Pegasus (google/pegasus-xsum)"),
65
+ gr.Slider(minimum=30, maximum=200, value=65, step=1, label="Max Character Limit")
66
  ],
67
+ outputs=gr.Textbox(lines=3, label="Summary (truncated to character limit)"),
68
+ title="Multi-Model Text Summarizer",
69
+ description="Summarize text using different Hugging Face models with a user-defined character limit."
70
  )
71
 
72
  iface.launch()