File size: 3,333 Bytes
f4b4b3e
1a2993b
 
ee303fb
 
 
 
dabc3da
ee303fb
 
 
 
 
 
 
 
f4b4b3e
1a2993b
 
 
 
ee303fb
1a2993b
 
ee303fb
1a2993b
 
ee303fb
1a2993b
ee303fb
 
 
f4b4b3e
ee303fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a2993b
ee303fb
 
1a2993b
ee303fb
 
 
 
1a2993b
 
f4b4b3e
ee303fb
1a2993b
ee303fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a2993b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import gradio as gr
import pandas as pd
import os
import google.generativeai as genai
import fitz  # PyMuPDF to read PDF

# Load Gemini API key from Hugging Face secret
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
    for page in doc:
        text += page.get_text()
    return text.strip()

# Gemini prompting logic
def evaluate_prompt(question, solution, instructions):
    prompt = f"""
You are an expert math evaluator. Follow these instructions carefully:

{instructions}

---

**Question:**
{question}

**Student's Solution:**
{solution}

Evaluate the solution. Say whether it's correct or not. If incorrect, give a clear and detailed explanation of why it's wrong and how to fix it.

Respond in this format:
Correct?: [Yes/No]
Reasoning: <explanation>
"""
    response = model.generate_content(prompt)
    return response.text.strip()

# Handler for single text input
def evaluate_text_inputs(question, solution, instructions_file, instructions_text):
    instructions = instructions_text or extract_text_from_pdf(instructions_file) if instructions_file else ""
    result = evaluate_prompt(question, solution, instructions)
    return result

# Handler for CSV batch input
def evaluate_csv(csv_file, instructions_file, instructions_text):
    instructions = instructions_text or extract_text_from_pdf(instructions_file) if instructions_file else ""
    df = pd.read_csv(csv_file.name)
    
    evaluations = []
    for _, row in df.iterrows():
        q = row.get('question', '')
        s = row.get('solution', '')
        try:
            eval_result = evaluate_prompt(q, s, instructions)
            evaluations.append(eval_result)
        except Exception as e:
            evaluations.append(f"Error: {e}")
    
    df['evaluation'] = evaluations
    output_path = "evaluated_results.csv"
    df.to_csv(output_path, index=False)
    return output_path

# Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("## πŸ€– Solution Evaluator (Gemini API)")
    gr.Markdown("Evaluate student solutions using Google's Gemini API.")

    with gr.Tab("πŸ“ Single Evaluation"):
        with gr.Row():
            question_input = gr.Textbox(label="Question", lines=3)
            solution_input = gr.Textbox(label="Solution", lines=6)
        with gr.Row():
            instructions_text = gr.Textbox(label="Instructions (text)", lines=6)
            instructions_file = gr.File(label="Upload Instructions PDF", file_types=[".pdf"])
        output_single = gr.Textbox(label="Evaluation Result", lines=8)
        btn_single = gr.Button("Evaluate Solution")
        btn_single.click(fn=evaluate_text_inputs,
                         inputs=[question_input, solution_input, instructions_file, instructions_text],
                         outputs=output_single)

    with gr.Tab("πŸ“„ Batch CSV Evaluation"):
        csv_file = gr.File(label="Upload CSV (columns: question, solution)", file_types=[".csv"])
        inst_text = gr.Textbox(label="Instructions (text)", lines=6)
        inst_pdf = gr.File(label="Upload Instructions PDF", file_types=[".pdf"])
        output_csv = gr.File(label="Download Evaluated CSV")
        btn_batch = gr.Button("Evaluate All")
        btn_batch.click(fn=evaluate_csv,
                        inputs=[csv_file, inst_pdf, inst_text],
                        outputs=output_csv)

demo.launch()