File size: 11,842 Bytes
1bf7168
15d32ba
 
be1a107
15d32ba
be1a107
 
763a02a
15d32ba
7dbcac6
 
be1a107
7dbcac6
 
584c430
 
 
 
15d32ba
be1a107
 
 
08d1d51
5889e5d
be1a107
 
7dbcac6
917d1cb
 
 
 
 
 
 
 
71be435
917d1cb
 
 
 
 
 
 
 
 
be1a107
917d1cb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be1a107
917d1cb
 
 
 
 
 
be1a107
917d1cb
 
 
7dbcac6
15d32ba
be1a107
15d32ba
be1a107
 
 
 
15d32ba
 
feb219b
be1a107
 
 
 
 
 
 
 
 
 
 
 
e3371ea
be1a107
 
 
 
15d32ba
5889e5d
 
 
179464f
5889e5d
15d32ba
 
be1a107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15d32ba
be1a107
6123382
be1a107
 
 
 
 
 
 
 
 
 
3f8bcd2
be1a107
d326db3
5889e5d
be1a107
5889e5d
 
be1a107
15d32ba
 
07c7d05
4d7a801
ac392d9
07c7d05
02e4fca
be1a107
e3fa8f7
08d1d51
5889e5d
e3fa8f7
08d1d51
 
be1a107
5889e5d
e3fa8f7
08d1d51
 
 
 
5889e5d
be1a107
d326db3
be1a107
d326db3
 
 
be1a107
d326db3
be1a107
d326db3
be1a107
5889e5d
15d32ba
5889e5d
be1a107
e3fa8f7
c520dca
15d32ba
 
 
 
 
 
 
 
 
 
 
 
5889e5d
 
e3fa8f7
 
5889e5d
 
 
 
e3fa8f7
5889e5d
 
 
be1a107
 
 
 
 
 
 
d50edab
be1a107
e3fa8f7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
import gradio as gr
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
import torch
from threading import Thread
import re
import uuid
from openai import OpenAI

client = OpenAI(
    base_url="https://a7g1ajqixo23revq.us-east-1.aws.endpoints.huggingface.cloud/v1/",
    api_key="hf_XXXXX"
)

def format_math(text):
    text = re.sub(r"\[(.*?)\]", r"$$\1$$", text, flags=re.DOTALL)
    text = text.replace(r"\(", "$").replace(r"\)", "$")
    return text

# --------- removed the old global conversations = {} ---------

def generate_conversation_id():
    return str(uuid.uuid4())[:8]

import tiktoken
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")  # any OpenAI encoding works

def generate_response(user_message,
                      max_tokens,
                      temperature,
                      top_p,
                      history_state):
    if not user_message.strip():
        return history_state, history_state

    system_message = "Your role as an assistant..."
    messages = [{"role": "system", "content": system_message}]
    for m in history_state:
        messages.append({"role": m["role"], "content": m["content"]})
    messages.append({"role": "user", "content": user_message})

    try:
        response = client.chat.completions.create(
            model="tgi",
            messages=messages,
            max_tokens=int(max_tokens),        # server-side limit
            temperature=temperature,
            top_p=top_p,
            stream=True
        )
    except Exception as e:
        print(f"[ERROR] OpenAI API call failed: {e}")
        yield history_state + [
            {"role": "user", "content": user_message},
            {"role": "assistant", "content": "⚠️ Generation failed."}
        ], history_state
        return

    assistant_response = ""
    new_history = history_state + [
        {"role": "user", "content": user_message},
        {"role": "assistant", "content": ""}
    ]

    token_budget = int(max_tokens)
    tokens_seen = 0

    try:
        for chunk in response:
            if (not chunk.choices
                or not chunk.choices[0].delta
                or not chunk.choices[0].delta.content):
                continue

            token_text = chunk.choices[0].delta.content
            assistant_response += token_text
            # count how many tokens that piece is worth
            tokens_seen += len(enc.encode(token_text))

            new_history[-1]["content"] = assistant_response.strip()
            yield new_history, new_history

            if tokens_seen >= token_budget:
                break                            # stop the local loop
    except Exception:
        pass

    yield new_history, new_history


example_messages = {
    "IIT-JEE 2024 Mathematics": "A student appears for a quiz consisting of only true-false type questions and answers all the questions. The student knows the answers of some questions and guesses the answers for the remaining questions. Whenever the student knows the answer of a question, he gives the correct answer. Assume that probability of the student giving the correct answer for a question, given that he has guessed it, is $\\frac{1}{2}$. Also assume that the probability of the answer for a question being guessed, given that the student's answer is correct, is $\\frac{1}{6}$. Then the probability that the student knows the answer of a randomly chosen question is?",
    "IIT-JEE 2025         Physics": "A person sitting inside an elevator performs a weighing experiment with an object of mass 50 kg. Suppose that the variation of the height 𝑦 (in m) of the elevator, from the ground, with time 𝑑 (in s) is given by 𝑦 = 8 [1 + sin ( 2πœ‹π‘‘/𝑇 )], where 𝑇 = 40πœ‹ s. Taking acceleration due to gravity, 𝑔 = 10 m/s^2 , the maximum variation of the object’s weight (in N) as observed in the experiment is ?",
    "Goldman Sachs Interview Puzzle": "Four friends need to cross a dangerous bridge at night. Unfortunately, they have only one torch and the bridge is too dangerous to cross without one. The bridge is only strong enough to support two people at a time. Not all people take the same time to cross the bridge. Times for each person: 1 min, 2 mins, 7 mins and 10 mins. What is the shortest time needed for all four of them to cross the bridge?",
    "IIT-JEE 2025 Mathematics": "Let 𝑆 be the set of all seven-digit numbers that can be formed using the digits 0, 1 and 2. For example, 2210222 is in 𝑆, but 0210222 is NOT in 𝑆.Then the number of elements π‘₯ in 𝑆 such that at least one of the digits 0 and 1 appears exactly twice in π‘₯, is ?"
}

with gr.Blocks(theme=gr.themes.Soft()) as demo:
    # --------- session-scoped states ---------
    conversations_state = gr.State({})                  # NEW: one dict per user
    current_convo_id = gr.State(generate_conversation_id())
    history_state = gr.State([])

    # Global heading stays at top
    #gr.Markdown("# Fathom R1 14B Chatbot")
    gr.HTML(
    """
    <div style="display: flex; align-items: center; gap: 16px; margin-bottom: 1em;">
        <div style="background-color: black; padding: 6px; border-radius: 8px;">
            <img src="https://framerusercontent.com/images/j0KjQQyrUfkFw4NwSaxQOLAoBU.png" alt="Fractal AI Logo" style="height: 48px;">
        </div>
        <h1 style="margin: 0;">Fathom R1 14B Chatbot</h1>
    </div>
    """
)

    with gr.Sidebar():
        gr.Markdown("## Conversations")
        conversation_selector = gr.Radio(choices=[], label="Select Conversation", interactive=True)
        new_convo_button = gr.Button("New Conversation βž•")

    with gr.Row():
        with gr.Column(scale=1):
            # INTRO TEXT MOVED HERE
            gr.Markdown(
                """
                Welcome to the Fathom R1 14B Chatbot, developed by Fractal AI Research! 
                
                Our model excels at reasoning tasks in mathematics and science. Given that our model has been optimised for tasks requiring critical thinking, it might overthink for simple chat queries.

                To check out our GitHub repository, click [here](https://github.com/FractalAIResearchLabs/Fathom-R1) 
                
                For training recipe details on how this model was built, please check [here](https://huggingface.co/FractalAIResearch/Fathom-R1-14B)  
                
                Try the example problems below from various popular entrance examinations and interviews or type in your own problems to see how our model breaks down and solves complex reasoning problems.

                NOTE: Once you close this demo window, all currently saved conversations will be lost.
                """
            )

            gr.Markdown("### Settings")
            max_tokens_slider = gr.Slider(minimum=6144, maximum=32768, step=1024, value=16384, label="Max Tokens")
            with gr.Accordion("Advanced Settings", open=True):
                temperature_slider = gr.Slider(minimum=0.1, maximum=2.0, value=0.6, label="Temperature")
                top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, label="Top-p")

            # New acknowledgment line at bottom
            gr.Markdown("""
            
                        We sincerely acknowledge [VIDraft](https://huggingface.co/VIDraft) for their Phi 4 Reasoning Plus [space](https://huggingface.co/spaces/VIDraft/phi-4-reasoning-plus), which served as the starting point for this demo.
                        """
                        )

        with gr.Column(scale=4):
            #chatbot = gr.Chatbot(label="Chat", type="messages")
            chatbot = gr.Chatbot(label="Chat", type="messages", height=520)
            with gr.Row():
                user_input = gr.Textbox(label="User Input", placeholder="Type your question here...", lines=3, scale=8)
                with gr.Column():
                    submit_button = gr.Button("Send", variant="primary", scale=1)
                    clear_button = gr.Button("Clear", scale=1)
            gr.Markdown("**Try these examples:**")
            with gr.Row():
                example1_button = gr.Button("IIT-JEE 2025 Mathematics")
                example2_button = gr.Button("IIT-JEE 2025         Physics")
                example3_button = gr.Button("Goldman Sachs Interview Puzzle")
                example4_button = gr.Button("IIT-JEE 2024 Mathematics")

    # ---------- helper functions now receive/return conversations ----------
    def update_conversation_list(conversations):
        return [conversations[cid]["title"] for cid in conversations]

    def start_new_conversation(conversations):
        new_id = generate_conversation_id()
        conversations[new_id] = {"title": f"New Conversation {new_id}", "messages": []}
        return new_id, [], gr.update(choices=update_conversation_list(conversations), value=conversations[new_id]["title"]), conversations

    def load_conversation(selected_title, conversations):
        for cid, convo in conversations.items():
            if convo["title"] == selected_title:
                return cid, convo["messages"], convo["messages"]
        return current_convo_id.value, history_state.value, history_state.value

    def send_message(user_message, max_tokens, temperature, top_p, convo_id, history, conversations):
        if convo_id not in conversations:
            #title = user_message.strip().split("\n")[0][:40]
            title = " ".join(user_message.strip().split()[:5])
            conversations[convo_id] = {"title": title, "messages": history}
        if conversations[convo_id]["title"].startswith("New Conversation"):
            #conversations[convo_id]["title"] = user_message.strip().split("\n")[0][:40]
            conversations[convo_id]["title"] = " ".join(user_message.strip().split()[:5])
        for updated_history, new_history in generate_response(user_message, max_tokens, temperature, top_p, history):
            conversations[convo_id]["messages"] = new_history
            yield updated_history, new_history, gr.update(choices=update_conversation_list(conversations), value=conversations[convo_id]["title"]), conversations

    submit_button.click(
        fn=send_message,
        inputs=[user_input, max_tokens_slider, temperature_slider, top_p_slider, current_convo_id, history_state, conversations_state],
        outputs=[chatbot, history_state, conversation_selector, conversations_state],
        concurrency_limit=16
    ).then(
        fn=lambda: gr.update(value=""),
        inputs=None,
        outputs=user_input
    )

    clear_button.click(
        fn=lambda: ([], []),
        inputs=None,
        outputs=[chatbot, history_state]
    )

    new_convo_button.click(
        fn=start_new_conversation,
        inputs=[conversations_state],
        outputs=[current_convo_id, history_state, conversation_selector, conversations_state]
    )

    conversation_selector.change(
        fn=load_conversation,
        inputs=[conversation_selector, conversations_state],
        outputs=[current_convo_id, history_state, chatbot]
    )

    example1_button.click(fn=lambda: gr.update(value=example_messages["IIT-JEE 2025 Mathematics"]), inputs=None, outputs=user_input)
    example2_button.click(fn=lambda: gr.update(value=example_messages["IIT-JEE 2025         Physics"]), inputs=None, outputs=user_input)
    example3_button.click(fn=lambda: gr.update(value=example_messages["Goldman Sachs Interview Puzzle"]), inputs=None, outputs=user_input)
    example4_button.click(fn=lambda: gr.update(value=example_messages["IIT-JEE 2024 Mathematics"]), inputs=None, outputs=user_input)

#demo.launch(share=True, ssr_mode=False)

if __name__ == "__main__":
    # first positional argument = concurrency_count
    demo.queue().launch(share=True, ssr_mode=False)