Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -51,25 +51,39 @@ def preprocess_text(text):
|
|
51 |
def predict_sentiment(text):
|
52 |
if not text:
|
53 |
return 0.0
|
54 |
-
encoded_input = tokenizer(
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
)
|
61 |
-
input_ids, attention_mask = encoded_input["input_ids"], encoded_input["attention_mask"]
|
62 |
-
with torch.no_grad():
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
k = 20
|
67 |
-
midpoint = 0.7
|
68 |
-
|
69 |
-
scaled_score = 1 / (1 + np.exp(-k * (score - midpoint)))
|
70 |
-
final_output = scaled_score * 100
|
71 |
-
|
72 |
-
return final_output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
|
75 |
|
|
|
51 |
def predict_sentiment(text):
|
52 |
if not text:
|
53 |
return 0.0
|
54 |
+
# encoded_input = tokenizer(
|
55 |
+
# text.split(),
|
56 |
+
# return_tensors='pt',
|
57 |
+
# padding=True,
|
58 |
+
# truncation=True,
|
59 |
+
# max_length=512
|
60 |
+
# )
|
61 |
+
# input_ids, attention_mask = encoded_input["input_ids"], encoded_input["attention_mask"]
|
62 |
+
# with torch.no_grad():
|
63 |
+
# score = score_model(input_ids, attention_mask)[0].item()
|
64 |
+
|
65 |
+
|
66 |
+
# k = 20
|
67 |
+
# midpoint = 0.7
|
68 |
+
|
69 |
+
# scaled_score = 1 / (1 + np.exp(-k * (score - midpoint)))
|
70 |
+
# final_output = scaled_score * 100
|
71 |
+
|
72 |
+
# return 1-final_output
|
73 |
+
text = preprocess_text(text)
|
74 |
+
encoded_input = tokenizer(text, return_tensors='pt')
|
75 |
+
output = model(**encoded_input)
|
76 |
+
scores = output[0][0].detach().numpy()
|
77 |
+
scores = softmax(scores)
|
78 |
+
ranking = np.argsort(scores)
|
79 |
+
ranking = ranking[::-1]
|
80 |
+
negative_id = -1
|
81 |
+
for idx, label in config.id2label.items():
|
82 |
+
if label.lower() == 'positive':
|
83 |
+
negative_id = idx
|
84 |
+
negative_score = scores[negative_id]
|
85 |
+
|
86 |
+
return (float(negative_score))*100
|
87 |
|
88 |
|
89 |
|