from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM import gradio as gr import torch # Step 1: Set device device = "cuda" if torch.cuda.is_available() else "cpu" # Step 2: Load model & tokenizer try: model_name = "ibm-granite/granite-3.3-2b-instruct" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) generator = pipeline( "text-generation", model=model, tokenizer=tokenizer, device=0 if device == "cuda" else -1, max_new_tokens=700 ) print("✅ Model and tokenizer loaded successfully.") except Exception as e: print(f"❌ Error loading model/tokenizer: {e}") generator = None # Utility function to generate text def generate_response(prompt): if generator is None: return "❌ Error: Model not loaded." response = generator(prompt) return response[0]["generated_text"] # Functionality 1: Generate Quiz def generate_quiz(subject: str, score: int, num_questions: int): prompt = f""" You are an expert tutor. Topic: {subject} Student Score: {score}/10 Generate {num_questions} multiple-choice questions to help the student's understanding of the topic '{subject}'. Each question must: - Be relevant and based only on the topic: '{subject}' - Be logically sound and factually correct - Have 4 answer options labeled (A–D) - All options should be plausible and follow the same format or pattern - Avoid giving away the correct answer by formatting (e.g., using acronyms only in one option) - Clearly mark the correct answer at the end of each question like this: Correct Answer: B Use this exact format: Qn: A.