File size: 773 Bytes
4fee431
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1140ed3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import torch
from .model_loader import get_model_tokenizer
import torch.nn.functional as F

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def classify_text(text: str):
    model, tokenizer = get_model_tokenizer()
    inputs = tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=512)
    inputs = {k: v.to(device) for k, v in inputs.items()}

    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs if isinstance(outputs, torch.Tensor) else outputs.logits
        probs = F.softmax(logits, dim=1)
        pred = torch.argmax(probs, dim=1).item()
        prob_percent = probs[0][pred].item() * 100

    return {"label": "Human" if pred == 0 else "AI", "confidence": round(prob_percent, 2)}