Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,28 +2,49 @@ import gradio as gr
|
|
2 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
3 |
import torch
|
4 |
|
5 |
-
# Load
|
6 |
model_name = "roberta-base-openai-detector"
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
9 |
|
|
|
10 |
def detect_ai(text):
|
11 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
12 |
with torch.no_grad():
|
13 |
outputs = model(**inputs)
|
14 |
probs = torch.nn.functional.softmax(outputs.logits, dim=1)
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
)
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
3 |
import torch
|
4 |
|
5 |
+
# Load the model
|
6 |
model_name = "roberta-base-openai-detector"
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
9 |
|
10 |
+
# Detection logic
|
11 |
def detect_ai(text):
|
12 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
13 |
with torch.no_grad():
|
14 |
outputs = model(**inputs)
|
15 |
probs = torch.nn.functional.softmax(outputs.logits, dim=1)
|
16 |
+
ai_score = round(probs[0][1].item() * 100, 2)
|
17 |
+
human_score = round(probs[0][0].item() * 100, 2)
|
18 |
+
|
19 |
+
if ai_score > 80:
|
20 |
+
verdict = "β οΈ Likely AI-generated"
|
21 |
+
elif human_score > 80:
|
22 |
+
verdict = "β
Likely Human-written"
|
23 |
+
else:
|
24 |
+
verdict = "β Unclear β Mixed Content"
|
25 |
+
|
26 |
+
return {
|
27 |
+
"AI-generated (%)": ai_score,
|
28 |
+
"Human-written (%)": human_score,
|
29 |
+
"Verdict": verdict
|
30 |
+
}
|
31 |
+
|
32 |
+
# Build UI with Blocks
|
33 |
+
with gr.Blocks(css="footer {display: none !important;}") as demo:
|
34 |
+
gr.Markdown("""
|
35 |
+
# π AI Text Detector
|
36 |
+
**Check if a text was written by AI or a human**
|
37 |
+
Using `roberta-base-openai-detector` β Powered by π€ Hugging Face + Gradio
|
38 |
+
""")
|
39 |
+
|
40 |
+
with gr.Row():
|
41 |
+
input_box = gr.Textbox(label="Paste your text", placeholder="Enter at least 40 words...", lines=10)
|
42 |
+
|
43 |
+
with gr.Row():
|
44 |
+
analyze_btn = gr.Button("π Analyze Text")
|
45 |
+
|
46 |
+
output_json = gr.JSON(label="π Detection Result")
|
47 |
+
|
48 |
+
analyze_btn.click(fn=detect_ai, inputs=input_box, outputs=output_json)
|
49 |
+
|
50 |
+
demo.launch()
|