Light-Dav commited on
Commit
c13ab36
Β·
verified Β·
1 Parent(s): 51ebb55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -11
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import gradio as gr
2
  from transformers import pipeline
 
3
 
4
  # --- Model Loading ---
5
  # Using the model ID you've been working with, which is fine-tuned on an English dataset (IMDB).
@@ -8,8 +9,9 @@ MODEL_ID = "Light-Dav/sentiment-analysis-full-project"
8
 
9
  try:
10
  # Attempt to load the pipeline. This needs to be outside the function for efficiency.
11
- # The 'return_all_scores=True' is important for getting scores for all labels.
12
- sentiment_analyzer = pipeline("sentiment-analysis", model=MODEL_ID, return_all_scores=True)
 
13
  model_loaded_successfully = True
14
  except Exception as e:
15
  print(f"Error loading model: {e}")
@@ -81,21 +83,24 @@ def interpret_sentiment(label, score):
81
  description = ""
82
  color_class = ""
83
 
84
- if label.lower() == "positive": # Your model might output 'LABEL_2' or 'POS'
 
 
 
85
  emoji = "😊"
86
  description = "This text expresses a **highly positive** sentiment." if score > 0.9 else "This text expresses a **positive** sentiment."
87
  color_class = "sentiment-positive"
88
- elif label.lower() == "negative": # Your model might output 'LABEL_0' or 'NEG'
89
  emoji = "😠"
90
  description = "This text expresses a **highly negative** sentiment." if score > 0.9 else "This text expresses a **negative** sentiment."
91
  color_class = "sentiment-negative"
92
- elif label.lower() == "neutral": # Your model might output 'LABEL_1' or 'NEUTRAL'
93
  emoji = "😐"
94
  description = "This text expresses a **neutral** sentiment."
95
  color_class = "sentiment-neutral"
96
  else:
97
  emoji = "❓"
98
- description = "Could not confidently determine sentiment."
99
  color_class = ""
100
 
101
  return f"<div class='sentiment-display {color_class}'>{emoji} {label.upper()} ({score:.2f})</div>" + \
@@ -118,8 +123,8 @@ def analyze_sentiment(text):
118
  }
119
 
120
  try:
121
- # The pipeline returns a list of dictionaries if return_all_scores=True
122
- # e.g., [[{'label': 'LABEL_0', 'score': 0.9}, {'label': 'LABEL_1', 'score': 0.05}]]
123
  results = sentiment_analyzer(text)[0] # Get the first (and only) list of results
124
 
125
  # Sort results by score in descending order
@@ -178,26 +183,34 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo: # Using gr.Block
178
  ["The meeting concluded without any major decisions."]
179
  ],
180
  inputs=text_input,
 
 
181
  cache_examples=True # Caching examples for faster loading
182
  )
183
 
184
  with gr.Column(scale=3):
185
  gr.Markdown("## πŸ“ˆ Analysis Results πŸ“‰")
186
- overall_sentiment_output = gr.HTML(label="Overall Sentiment") # Using HTML to render custom styled sentiment
187
- confidence_scores_output = gr.Label(num_top_classes=3, label="Confidence Scores") # Default Gradio Label
188
- raw_output = gr.JSON(label="Raw Model Output", visible=False) # For debugging/advanced users
 
 
 
189
 
190
  # --- Event Listeners ---
 
191
  text_input.change(
192
  fn=analyze_sentiment,
193
  inputs=text_input,
194
  outputs=[overall_sentiment_output, confidence_scores_output, raw_output],
195
  # live=True # Uncomment for live updates as user types (can be resource intensive)
196
  )
 
197
  analyze_btn.click(
198
  fn=analyze_sentiment,
199
  inputs=text_input,
200
  outputs=[overall_sentiment_output, confidence_scores_output, raw_output]
201
  )
202
 
 
203
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
+ import os
4
 
5
  # --- Model Loading ---
6
  # Using the model ID you've been working with, which is fine-tuned on an English dataset (IMDB).
 
9
 
10
  try:
11
  # Attempt to load the pipeline. This needs to be outside the function for efficiency.
12
+ # Using top_k=None to get all scores (equivalent to return_all_scores=True in older versions).
13
+ # This also addresses the UserWarning about `return_all_scores` deprecation.
14
+ sentiment_analyzer = pipeline("sentiment-analysis", model=MODEL_ID, top_k=None)
15
  model_loaded_successfully = True
16
  except Exception as e:
17
  print(f"Error loading model: {e}")
 
83
  description = ""
84
  color_class = ""
85
 
86
+ # IMPORTANT: Adjust 'LABEL_0', 'LABEL_1', 'LABEL_2' to your model's actual output labels
87
+ # Check your model's config.json on Hugging Face Hub under 'id2label' or 'label2id'
88
+ # Example: "id2label": {"0": "negative", "1": "neutral", "2": "positive"}
89
+ if label.lower() == "positive" or label.lower() == "label_2": # Assuming LABEL_2 is positive
90
  emoji = "😊"
91
  description = "This text expresses a **highly positive** sentiment." if score > 0.9 else "This text expresses a **positive** sentiment."
92
  color_class = "sentiment-positive"
93
+ elif label.lower() == "negative" or label.lower() == "label_0": # Assuming LABEL_0 is negative
94
  emoji = "😠"
95
  description = "This text expresses a **highly negative** sentiment." if score > 0.9 else "This text expresses a **negative** sentiment."
96
  color_class = "sentiment-negative"
97
+ elif label.lower() == "neutral" or label.lower() == "label_1": # Assuming LABEL_1 is neutral
98
  emoji = "😐"
99
  description = "This text expresses a **neutral** sentiment."
100
  color_class = "sentiment-neutral"
101
  else:
102
  emoji = "❓"
103
+ description = "Could not confidently determine sentiment. Unexpected label."
104
  color_class = ""
105
 
106
  return f"<div class='sentiment-display {color_class}'>{emoji} {label.upper()} ({score:.2f})</div>" + \
 
123
  }
124
 
125
  try:
126
+ # The pipeline returns a list of lists of dictionaries if top_k=None
127
+ # e.g., [[{'label': 'LABEL_0', 'score': 0.9}, {'label': 'LABEL_1', 'score': 0.05}, ...]]
128
  results = sentiment_analyzer(text)[0] # Get the first (and only) list of results
129
 
130
  # Sort results by score in descending order
 
183
  ["The meeting concluded without any major decisions."]
184
  ],
185
  inputs=text_input,
186
+ fn=analyze_sentiment, # <-- CORRECTED: Function to run for examples
187
+ outputs=[overall_sentiment_output, confidence_scores_output, raw_output], # <-- CORRECTED: Outputs for examples
188
  cache_examples=True # Caching examples for faster loading
189
  )
190
 
191
  with gr.Column(scale=3):
192
  gr.Markdown("## πŸ“ˆ Analysis Results πŸ“‰")
193
+ # Using gr.HTML for "Overall Sentiment" to allow custom CSS and rich content
194
+ overall_sentiment_output = gr.HTML(label="Overall Sentiment")
195
+ # Using gr.Label for "Confidence Scores" as it's designed for classification outputs
196
+ confidence_scores_output = gr.Label(num_top_classes=3, label="Confidence Scores")
197
+ # Using gr.JSON for raw output, useful for debugging, but hidden by default
198
+ raw_output = gr.JSON(label="Raw Model Output", visible=False)
199
 
200
  # --- Event Listeners ---
201
+ # Trigger analysis when text input changes (optional, can be resource intensive if live=True)
202
  text_input.change(
203
  fn=analyze_sentiment,
204
  inputs=text_input,
205
  outputs=[overall_sentiment_output, confidence_scores_output, raw_output],
206
  # live=True # Uncomment for live updates as user types (can be resource intensive)
207
  )
208
+ # Trigger analysis when the button is clicked
209
  analyze_btn.click(
210
  fn=analyze_sentiment,
211
  inputs=text_input,
212
  outputs=[overall_sentiment_output, confidence_scores_output, raw_output]
213
  )
214
 
215
+ # Launch the Gradio application
216
  demo.launch()