Spaces:
Running
Running
import gradio as gr | |
from pathlib import Path | |
import asyncio | |
import google.generativeai as genai | |
from flashcard import ( | |
generate_flashcards_from_pdf, | |
FlashcardSet | |
) | |
import os | |
from dotenv import load_dotenv | |
import tempfile | |
# Load environment variables | |
load_dotenv() | |
genai.configure(api_key=os.environ["GEMINI_API_KEY"]) | |
# Store the current flashcard set in memory | |
current_flashcards = None | |
def create_flashcard_text(flashcards: FlashcardSet) -> str: | |
"""Format flashcard output as a readable string""" | |
output = [f"π Generated {flashcards.total_cards} flashcards about: {flashcards.topic}\n"] | |
for i, card in enumerate(flashcards.cards, 1): | |
output.append(f"\n--- Flashcard {i} (Difficulty: {'β' * card.difficulty}) ---") | |
output.append(f"Q: {card.question}") | |
output.append(f"A: {card.answer}") | |
output.append("\n\nYou can ask me to:") | |
output.append("β’ Modify specific flashcards") | |
output.append("β’ Generate more flashcards") | |
output.append("β’ Change difficulty levels") | |
output.append("β’ Export to Anki") | |
return "\n".join(output) | |
async def handle_modification_request(text: str, flashcards: FlashcardSet) -> str: | |
"""Handle user requests to modify flashcards""" | |
model = genai.GenerativeModel('gemini-pro') | |
# Create a context-aware prompt | |
prompt = f"""Given the following flashcards and user request, suggest how to modify the flashcards. | |
Current flashcards: | |
{create_flashcard_text(flashcards)} | |
User request: {text} | |
Please provide specific suggestions for modifications.""" | |
response = await model.generate_content_async(prompt) | |
return response.text | |
async def process_message(message: dict, history: list) -> tuple[str, list]: | |
"""Process uploaded files and chat messages""" | |
global current_flashcards | |
# Handle file uploads | |
if message.get("files"): | |
for file_path in message["files"]: | |
if file_path.endswith('.pdf'): | |
try: | |
current_flashcards = await async_process_pdf(file_path) | |
response = create_flashcard_text(current_flashcards) | |
return "", history + [ | |
{"role": "user", "content": f"Uploaded: {Path(file_path).name}"}, | |
{"role": "assistant", "content": response} | |
] | |
except Exception as e: | |
error_msg = f"Error processing PDF: {str(e)}" | |
return "", history + [ | |
{"role": "user", "content": f"Uploaded: {Path(file_path).name}"}, | |
{"role": "assistant", "content": error_msg} | |
] | |
else: | |
return "", history + [ | |
{"role": "user", "content": f"Uploaded: {Path(file_path).name}"}, | |
{"role": "assistant", "content": "Please upload a PDF file."} | |
] | |
# Handle text messages | |
if message.get("text"): | |
user_message = message["text"].strip() | |
# If we have flashcards and user is asking for modifications | |
if current_flashcards: | |
try: | |
modification_response = await handle_modification_request(user_message, current_flashcards) | |
return "", history + [ | |
{"role": "user", "content": user_message}, | |
{"role": "assistant", "content": modification_response} | |
] | |
except Exception as e: | |
error_msg = f"Error processing request: {str(e)}" | |
return "", history + [ | |
{"role": "user", "content": user_message}, | |
{"role": "assistant", "content": error_msg} | |
] | |
else: | |
return "", history + [ | |
{"role": "user", "content": user_message}, | |
{"role": "assistant", "content": "Please upload a PDF file first to generate flashcards."} | |
] | |
return "", history + [ | |
{"role": "assistant", "content": "Please upload a PDF file or send a message."} | |
] | |
def export_to_anki(flashcards: FlashcardSet) -> str: | |
"""Convert flashcards to Anki-compatible tab-separated format and save to file""" | |
if not flashcards: | |
return None | |
# Create a temporary file | |
with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f: | |
f.write("#separator:tab\n") | |
f.write("#html:true\n") | |
f.write("#columns:Question\tAnswer\tTags\n") | |
for card in flashcards.cards: | |
question = card.question.replace('\n', '<br>') | |
answer = card.answer.replace('\n', '<br>') | |
tags = f"difficulty_{card.difficulty} {flashcards.topic.replace(' ', '_')}" | |
f.write(f"{question}\t{answer}\t{tags}\n") | |
return f.name | |
async def async_process_pdf(pdf_path: str) -> FlashcardSet: | |
"""Asynchronously process the PDF file""" | |
return await generate_flashcards_from_pdf(pdf_path=pdf_path) | |
# Create Gradio interface | |
with gr.Blocks(title="PDF Flashcard Generator") as demo: | |
gr.Markdown(""" | |
# π PDF Flashcard Generator | |
Upload a PDF document and get AI-generated flashcards to help you study! | |
Powered by Google's Gemini AI | |
""") | |
chatbot = gr.Chatbot( | |
label="Flashcard Generation Chat", | |
bubble_full_width=False, | |
show_copy_button=True, | |
height=600 | |
) | |
chat_input = gr.MultimodalTextbox( | |
label="Upload PDF or type a message", | |
placeholder="Drop a PDF file here or type a message to modify flashcards...", | |
file_types=["pdf", "application/pdf"], | |
show_label=False, | |
sources=["upload", "microphone"] | |
) | |
# Add clear button for better UX | |
clear_button = gr.Button("Clear Chat") | |
chat_input.change( | |
fn=process_message, | |
inputs=[chat_input, chatbot], | |
outputs=[chat_input, chatbot] | |
) | |
# Add clear functionality | |
clear_button.click( | |
lambda: (None, None), | |
outputs=[chat_input, chatbot] | |
) | |
if __name__ == "__main__": | |
demo.launch( | |
share=False, | |
server_name="0.0.0.0", | |
server_port=7860, | |
allowed_paths=["."] | |
) | |