Anusha831 commited on
Commit
2fb15ce
Β·
verified Β·
1 Parent(s): 9b015fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -0
app.py CHANGED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
2
+ import gradio as gr
3
+ import torch
4
+
5
+ # Step 1: Set device
6
+ device = "cuda" if torch.cuda.is_available() else "cpu"
7
+
8
+ # Step 2: Load tokenizer and model explicitly
9
+ try:
10
+ model_name = "ibm-granite/granite-3.3-2b-instruct"
11
+
12
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
13
+ model = AutoModelForCausalLM.from_pretrained(model_name)
14
+
15
+ generator = pipeline(
16
+ "text-generation",
17
+ model=model,
18
+ tokenizer=tokenizer,
19
+ device=0 if device == "cuda" else -1,
20
+ max_new_tokens=500
21
+ )
22
+ print("βœ… Model and tokenizer loaded successfully.")
23
+ except Exception as e:
24
+ print(f"❌ Error loading model/tokenizer: {e}")
25
+ generator = None
26
+
27
+ # Step 3: Define generation functions
28
+ def generate_quiz(subject: str, score: int, num_questions: int):
29
+ if generator is None:
30
+ return "❌ Error: Model not loaded."
31
+
32
+ prompt = f"""
33
+ You are an expert tutor.
34
+
35
+ Topic: {subject}
36
+ Student Score: {score}/10
37
+
38
+ Generate {num_questions} multiple-choice questions to help the student;s understaning of the topic '{subject}'.
39
+
40
+ Each question must:
41
+ - Be relevant and based only on the topic: '{subject}'
42
+ - Be logically sound and factually correct
43
+ - Have 4 answer options labeled (A–D)
44
+ - All options should be plausible and follow the same format or pattern
45
+ - Avoid giving away the correct answer by formatting (e.g., using acronyms only in one option)
46
+ - Clearly mark the correct answer at the end of each question like this: Correct Answer: B
47
+
48
+ Use this exact format:
49
+
50
+ Qn: <question>
51
+ A. <option A>
52
+ B. <option B>
53
+ C. <option C>
54
+ D. <option D>
55
+ Correct Answer: <correct option letter>
56
+ """
57
+
58
+ output = generator(prompt)
59
+ return output[0]["generated_text"]
60
+
61
+ def generate_feedback(score):
62
+ if generator is None:
63
+ return "❌ Error: Model not loaded."
64
+
65
+ prompt = f"""
66
+ A student scored {score}/10 on a recent test.
67
+ Provide a friendly, personalized feedback message including suggestions to improve further.
68
+ """
69
+ output = generator(prompt)
70
+ return output[0]["generated_text"]
71
+
72
+ # Step 4: Gradio Interface
73
+ def run_all(subject, score, num_questions):
74
+ quiz = generate_quiz(subject, score, num_questions)
75
+ feedback = generate_feedback(score)
76
+ return quiz, feedback
77
+
78
+ interface = gr.Interface(
79
+ fn=run_all,
80
+ inputs=[
81
+ gr.Textbox(label="Enter Topic (e.g., Algebra)"),
82
+ gr.Slider(0, 10, step=1, label="Score (out of 10)"),
83
+ gr.Slider(1, 10, step=1, label="Number of Questions")
84
+ ],
85
+ outputs=[
86
+ gr.Textbox(label="Generated Quiz", show_copy_button=True),
87
+ gr.Textbox(label="Personalized Feedback", show_copy_button=True)
88
+ ],
89
+ title="EduTutor AI – Personalized Learning & Assessment",
90
+ description="AI-powered quiz and feedback generator using IBM Granite LLM"
91
+ )
92
+
93
+ interface.launch(debug=True)