|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
import torch |
|
import gradio as gr |
|
import numpy as np |
|
|
|
MODEL_NAME = "cardiffnlp/twitter-xlm-roberta-base-sentiment" |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) |
|
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME) |
|
|
|
labels = ['ααααα’αα£α α', 'αααα’α ααα£α α', 'ααααα’αα£α α'] |
|
|
|
def classify_sentiment(text): |
|
|
|
inputs = tokenizer(text, return_tensors="pt", truncation=True) |
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
logits = outputs.logits |
|
probs = torch.nn.functional.softmax(logits, dim=1).numpy()[0] |
|
|
|
top_label = labels[np.argmax(probs)] |
|
confidence = np.max(probs) |
|
return {labels[i]: float(probs[i]) for i in range(len(labels))} |
|
|
|
|
|
iface = gr.Interface( |
|
fn=classify_sentiment, |
|
inputs=gr.Textbox(lines=3, placeholder="α¨ααα§ααααα α’ααα’α ..."), |
|
outputs=gr.Label(num_top_classes=3), |
|
title="Twitter-αα‘ αααα¬α§αααα‘ αααα‘αα€αααα’αα α", |
|
description="αα§ααααα‘ CardiffNLP-αα‘ αα αααααααααα RoBERTa αααααα‘ α’ααα’αααα‘ ααααααα, αααα’α ααα£α αα α£αα α§αα€αααα αααα‘αα€ααͺαα αααα‘αααα‘." |
|
) |
|
|
|
iface.launch(share=True) |