File size: 8,509 Bytes
5775448
9ec24d8
b7b20e2
 
d16f9ab
af77c21
 
5eea801
d16f9ab
 
13c672e
 
 
 
 
 
9aeba3f
 
 
 
 
 
 
a2871ab
 
 
 
56eebd4
a2871ab
 
 
 
 
 
 
 
 
d483f79
 
 
 
 
 
 
 
 
 
af77c21
4f1e7f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b7b20e2
9ec24d8
b7b20e2
9ec24d8
 
b7b20e2
9ec24d8
7586e9b
d16f9ab
564d0c6
 
af77c21
b7b20e2
9ec24d8
b7b20e2
 
 
5eea801
b7b20e2
 
 
d16f9ab
b7b20e2
 
d16f9ab
b7b20e2
d16f9ab
b7b20e2
a64c251
d16f9ab
b7b20e2
 
d16f9ab
b7b20e2
d16f9ab
02f7269
b7b20e2
 
 
 
 
 
d16f9ab
 
 
 
 
 
 
1d3dd26
 
 
 
13c672e
d16f9ab
 
 
 
 
 
e25d497
 
1d3dd26
d16f9ab
13c672e
564d0c6
0c6da03
9bb87f2
d16f9ab
337650d
050076e
9bb87f2
e25d497
1d3dd26
7c9961e
0c6da03
 
 
13c672e
9bb87f2
1d3dd26
 
4f1e7f3
1d3dd26
e25d497
1d3dd26
4f1e7f3
1d3dd26
 
 
 
13c672e
 
1d3dd26
d16f9ab
 
 
 
 
6d00b6b
 
d16f9ab
 
9aeba3f
d16f9ab
 
 
 
9aeba3f
 
d16f9ab
9aeba3f
 
9f9c725
9aeba3f
6d00b6b
9aeba3f
6d00b6b
b7b20e2
a2871ab
b7b20e2
5eea801
b7b20e2
d16f9ab
 
 
6d00b6b
c8472ad
 
d16f9ab
 
 
 
b7b20e2
 
5eea801
b7b20e2
5750c07
0e643c9
 
d16f9ab
 
0e643c9
 
d16f9ab
 
 
0e643c9
d16f9ab
 
0e643c9
b324bf2
d16f9ab
a6a2ff2
0e643c9
 
 
87cc698
 
cfd7b4a
 
0e643c9
 
 
 
 
 
cfd7b4a
 
 
 
 
 
 
0e643c9
 
d16f9ab
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242

import gradio as gr
import torch
import requests
from transformers import pipeline
from sentence_transformers import SentenceTransformer
from qdrant_client import QdrantClient
from datetime import datetime
import dspy
import json
import google.generativeai as genai

# Configure Gemini API
genai.configure(api_key="AIzaSyBO3-HG-WcITn58PdpK7mMyvFQitoH00qA")  # Replace with your actual Gemini API key

# Load Gemini model
def output_guard(answer):
    # Check if answer is empty or too short
    if not answer or len(answer.strip()) < 20:
        print("Output guard triggered: answer too short or empty.")
        return False
    # You can add more checks here if needed
    return True
import os
from datetime import datetime

# Safe path for Hugging Face Spaces (will reset on restart)
feedback_path = "feedback.json"

def store_feedback(question, answer, feedback, correct_answer):
    entry = {
        "question": question,
        "model_answer": answer,
        "feedback": feedback,
        "correct_answer": correct_answer,
        "timestamp": str(datetime.now())
    }
    print("Attempting to store feedback:", entry)

    try:
        with open(feedback_path, "a") as f:
            f.write(json.dumps(entry) + "\n")
        print("โœ… Feedback saved at", feedback_path)
    except Exception as e:
        print("โŒ Error writing feedback:", e)




import re

def latex_to_plain_math(latex_expr):
    # Replace LaTeX formatting with plain text math
    latex_expr = latex_expr.strip()
    latex_expr = re.sub(r"\\frac\{(.+?)\}\{(.+?)\}", r"(\1) / (\2)", latex_expr)
    latex_expr = re.sub(r"\\sqrt\{(.+?)\}", r"โˆš(\1)", latex_expr)
    latex_expr = latex_expr.replace("^2", "ยฒ").replace("^3", "ยณ")
    latex_expr = re.sub(r"\^(\d)", r"^\1", latex_expr)  # other powers
    latex_expr = latex_expr.replace("\\pm", "ยฑ")
    latex_expr = latex_expr.replace("\\cdot", "โ‹…")
    latex_expr = latex_expr.replace("{", "").replace("}", "")
    return latex_expr

# === Load Models ===
print("Loading zero-shot classifier...")
classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")

print("Loading embedding model...")
embedding_model = SentenceTransformer("intfloat/e5-large")


# Use a lighter model for testing
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline


# === Qdrant Setup ===
print("Connecting to Qdrant...")
qdrant_client = QdrantClient(path="qdrant_data")
collection_name = "math_problems"

# === Guard Function ===
def is_valid_math_question(text):
    candidate_labels = ["math", "not math"]
    result = classifier(text, candidate_labels)
    print("Classifier result:", result)
    return result['labels'][0] == "math" and result['scores'][0] > 0.7

# === Retrieval ===
def retrieve_from_qdrant(query):
    print("Retrieving context from Qdrant...")
    query_vector = embedding_model.encode(query).tolist()
    hits = qdrant_client.search(collection_name=collection_name, query_vector=query_vector, limit=1)
    print("Retrieved hits:", hits)
    return [hit.payload for hit in hits] if hits else []

# === Web Search ===
def web_search_tavily(query):
    print("Calling Tavily...")
    TAVILY_API_KEY = "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"
    response = requests.post(
        "https://api.tavily.com/search",
        json={"api_key": TAVILY_API_KEY, "query": query, "search_depth": "advanced"},
    )
    return response.json().get("answer", "No answer found from Tavily.")

# === DSPy Signature ===
class MathAnswer(dspy.Signature):
    question = dspy.InputField()
    retrieved_context = dspy.InputField()
    answer = dspy.OutputField()

# === DSPy Programs ===
import google.generativeai as genai

# Configure Gemini
genai.configure(api_key="AIzaSyBO3-HG-WcITn58PdpK7mMyvFQitoH00qA")  # Replace with your key

class MathRetrievalQA(dspy.Program):
    def forward(self, question):
        print("Inside MathRetrievalQA...")
        context_items = retrieve_from_qdrant(question)
        context = "\n".join([item["solution"] for item in context_items if "solution" in item])
        print("Context for generation:", context)
        f = latex_to_plain_math(context)
        print(f)

        if not context:
            return {"answer": "", "retrieved_context": ""}

        prompt = f"""
You are a math textbook author. Write a clear, professional, and well-formatted solution for the following math problem, using proper LaTeX formatting in every step.

Format the following LaTeX-based math solution into a clean, human-readable explanation as found in textbooks. Use standard math symbols like ยฑ, โˆš, fractions with slashes (e.g. (a + b)/c), and superscripts with ^. Do not use LaTeX syntax or backslashes. Do not wrap equations in dollar signs. Present the steps clearly using numbered headings. Keep all fractions in plain text form.
Problem: {question}
Use the following context if needed:
{f}

Write only the formatted solution, as it would appear in a math textbook. please give me well formated as using stantard math symbols like +,=.- ,x,/.
"""




        try:
            model = genai.GenerativeModel('gemini-2.0-flash')  # or use 'gemini-1.5-flash'
            
            response = model.generate_content(prompt)
            formatted_answer = response.text
            print("Gemini Answer:", formatted_answer)

            return {"answer": formatted_answer, "retrieved_context": context}
        except Exception as e:
            print("Gemini generation error:", e)
            return {"answer": "โš ๏ธ Gemini failed to generate an answer.", "retrieved_context": context}


       # return dspy.Output(answer=answer, retrieved_context=context)

class WebFallbackQA(dspy.Program):
    def forward(self, question):
        print("Fallback to Tavily...")
        answer = web_search_tavily(question)
       # return dspy.Output(answer=answer, retrieved_context="Tavily")
        return {"answer": answer, "retrieved_context": "Tavily"}



class MathRouter(dspy.Program):
    def forward(self, question):
        print("Routing question:", question)
        if not is_valid_math_question(question):
            return {"answer": "โŒ Only math questions are accepted. Please rephrase.", "retrieved_context": ""}
        
        result = MathRetrievalQA().forward(question)
        
        # Apply output guard here
       
        
        return result if result["answer"] else WebFallbackQA().forward(question)

router = MathRouter()



# === Gradio Functions ===
def ask_question(question):
    print("ask_question() called with:", question)
    result = router.forward(question)
    print("Result:", result)
    #return result.answer, question, result.answer
    return result["answer"], question, result["answer"]
 


def submit_feedback(question, model_answer, feedback, correct_answer):
    store_feedback(question, model_answer, feedback, correct_answer)
    return "โœ… Feedback received. Thank you!"

# === Gradio UI ===
with gr.Blocks() as demo:
    gr.Markdown("## ๐Ÿงฎ Math Agent")

    with gr.Tab("Ask a Math Question & Submit Feedback"):
        with gr.Row():
            question_input = gr.Textbox(label="Enter your math question", lines=2)
            submit_btn = gr.Button("Get Answer")

        gr.Markdown("### ๐Ÿง  Answer:")
        answer_output = gr.Markdown()

        # Hidden fields to hold question and answer for feedback inputs
        hidden_q = gr.Textbox(visible=False)
        hidden_a = gr.Textbox(visible=False)

        # Connect submit button to ask_question functio
        submit_btn.click(fn=ask_question, inputs=[question_input], outputs=[answer_output, hidden_q, hidden_a])

        gr.Markdown("### ๐Ÿ“ Submit Feedback")
        fb_like = gr.Radio(["๐Ÿ‘", "๐Ÿ‘Ž"], label="Was the answer helpful?")
        fb_correct = gr.Textbox(label="Correct Answer (optional) or Comments")
        fb_submit_btn = gr.Button("Submit Feedback")
        fb_status = gr.Textbox(label="Status", interactive=False)
        feedback_file = gr.File(label="๐Ÿ“‚ Download Saved Feedback", interactive=False)
        
        # Feedback submit button uses hidden fields + feedback inputs
        fb_submit_btn.click(
            fn=submit_feedback,
            inputs=[hidden_q, hidden_a, fb_like, fb_correct],
            outputs=[fb_status]
        )
        
        # Update the file download component
        fb_submit_btn.click(
            fn=lambda: feedback_path,
            outputs=[feedback_file]
        )




demo.launch(share=True, debug=True)