solutiontesting / app.py
yuvraj-yadav's picture
Update app.py
dabc3da verified
raw
history blame
3.33 kB
import gradio as gr
import pandas as pd
import os
import google.generativeai as genai
import fitz # PyMuPDF to read PDF
# Load Gemini API key from Hugging Face secret
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
for page in doc:
text += page.get_text()
return text.strip()
# Gemini prompting logic
def evaluate_prompt(question, solution, instructions):
prompt = f"""
You are an expert math evaluator. Follow these instructions carefully:
{instructions}
---
**Question:**
{question}
**Student's Solution:**
{solution}
Evaluate the solution. Say whether it's correct or not. If incorrect, give a clear and detailed explanation of why it's wrong and how to fix it.
Respond in this format:
Correct?: [Yes/No]
Reasoning: <explanation>
"""
response = model.generate_content(prompt)
return response.text.strip()
# Handler for single text input
def evaluate_text_inputs(question, solution, instructions_file, instructions_text):
instructions = instructions_text or extract_text_from_pdf(instructions_file) if instructions_file else ""
result = evaluate_prompt(question, solution, instructions)
return result
# Handler for CSV batch input
def evaluate_csv(csv_file, instructions_file, instructions_text):
instructions = instructions_text or extract_text_from_pdf(instructions_file) if instructions_file else ""
df = pd.read_csv(csv_file.name)
evaluations = []
for _, row in df.iterrows():
q = row.get('question', '')
s = row.get('solution', '')
try:
eval_result = evaluate_prompt(q, s, instructions)
evaluations.append(eval_result)
except Exception as e:
evaluations.append(f"Error: {e}")
df['evaluation'] = evaluations
output_path = "evaluated_results.csv"
df.to_csv(output_path, index=False)
return output_path
# Gradio UI
with gr.Blocks() as demo:
gr.Markdown("## πŸ€– Solution Evaluator (Gemini API)")
gr.Markdown("Evaluate student solutions using Google's Gemini API.")
with gr.Tab("πŸ“ Single Evaluation"):
with gr.Row():
question_input = gr.Textbox(label="Question", lines=3)
solution_input = gr.Textbox(label="Solution", lines=6)
with gr.Row():
instructions_text = gr.Textbox(label="Instructions (text)", lines=6)
instructions_file = gr.File(label="Upload Instructions PDF", file_types=[".pdf"])
output_single = gr.Textbox(label="Evaluation Result", lines=8)
btn_single = gr.Button("Evaluate Solution")
btn_single.click(fn=evaluate_text_inputs,
inputs=[question_input, solution_input, instructions_file, instructions_text],
outputs=output_single)
with gr.Tab("πŸ“„ Batch CSV Evaluation"):
csv_file = gr.File(label="Upload CSV (columns: question, solution)", file_types=[".csv"])
inst_text = gr.Textbox(label="Instructions (text)", lines=6)
inst_pdf = gr.File(label="Upload Instructions PDF", file_types=[".pdf"])
output_csv = gr.File(label="Download Evaluated CSV")
btn_batch = gr.Button("Evaluate All")
btn_batch.click(fn=evaluate_csv,
inputs=[csv_file, inst_pdf, inst_text],
outputs=output_csv)
demo.launch()