mskov commited on
Commit
5864c05
·
1 Parent(s): 22b7cff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -112,7 +112,9 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
112
  print("keys ", classification_output.keys())
113
 
114
  # formatted_classification_output = "\n".join([f"{key}: {value}" for key, value in classification_output.items()])
115
- label_score_pairs = [(label, score) for label, score in zip(classification_output['labels'], classification_output['scores'])]
 
 
116
 
117
 
118
 
@@ -124,7 +126,7 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
124
  else:
125
  affirm = ""
126
 
127
- return toxicity_score, label_score_pairs, transcribed_text, affirm
128
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
129
  else:
130
  threshold = slider_logic(slider)
 
112
  print("keys ", classification_output.keys())
113
 
114
  # formatted_classification_output = "\n".join([f"{key}: {value}" for key, value in classification_output.items()])
115
+ # label_score_pairs = [(label, score) for label, score in zip(classification_output['labels'], classification_output['scores'])]
116
+ label_score_dict = {label: score for label, score in zip(classification_output['labels'], classification_output['scores'])}
117
+
118
 
119
 
120
 
 
126
  else:
127
  affirm = ""
128
 
129
+ return toxicity_score, label_score_dict, transcribed_text, affirm
130
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
131
  else:
132
  threshold = slider_logic(slider)