neelsahu
commited on
Commit
·
a474a48
1
Parent(s):
a80a3a6
short form to Full form
Browse files
app.py
CHANGED
@@ -43,18 +43,18 @@ def predict_abusive_lang(text):
|
|
43 |
prediction = model.predict(text)
|
44 |
print("prediction ", prediction)
|
45 |
if len(prediction)!=0 and prediction[0]==0:
|
46 |
-
return ["
|
47 |
elif len(prediction)!=0 and prediction[0]==1:
|
48 |
-
return ["
|
49 |
else :
|
50 |
return ["Please write something in the comment box..","No cleaned text"]
|
51 |
elif lang=='hi':
|
52 |
print("using transformers for Hindi text")
|
53 |
scores = predict_hindi_text(text)
|
54 |
if scores[1] > scores[0]: # If score for abusive class is higher
|
55 |
-
return ["
|
56 |
else:
|
57 |
-
return ["
|
58 |
else:
|
59 |
return ["UN","No cleaned text"]
|
60 |
|
|
|
43 |
prediction = model.predict(text)
|
44 |
print("prediction ", prediction)
|
45 |
if len(prediction)!=0 and prediction[0]==0:
|
46 |
+
return ["Not Abusive", cleaned_text]
|
47 |
elif len(prediction)!=0 and prediction[0]==1:
|
48 |
+
return ["Abusive",cleaned_text]
|
49 |
else :
|
50 |
return ["Please write something in the comment box..","No cleaned text"]
|
51 |
elif lang=='hi':
|
52 |
print("using transformers for Hindi text")
|
53 |
scores = predict_hindi_text(text)
|
54 |
if scores[1] > scores[0]: # If score for abusive class is higher
|
55 |
+
return ["Abusive", text]
|
56 |
else:
|
57 |
+
return ["Not Abusive", text]
|
58 |
else:
|
59 |
return ["UN","No cleaned text"]
|
60 |
|