from typing import * import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer model_name = "KevSun/Engessay_grading_ML" model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) @torch.no_grad() def grade_Engessay_grading_ML(question: str, answer: str) -> Tuple[float, str]: text = f"{question} {answer}" inputs = tokenizer(text, return_tensors="pt") outputs = model(**inputs) predictions = outputs.logits.squeeze() predicted_scores = predictions.numpy() scaled_scores = 2.25 * predicted_scores - 1.25 rounded_scores = [round(score * 2) / 2 for score in scaled_scores] labels = [ "cohesion", "syntax", "vocabulary", "phraseology", "grammar", "conventions", ] overall_score = round(sum(rounded_scores) / len(rounded_scores) * 2) / 2 comment = "" for label, score in zip(labels, rounded_scores): comment += f"{label}: {score}\n" return overall_score, comment