File size: 9,383 Bytes
e3fa8f7 1bf7168 15d32ba e3fa8f7 15d32ba 763a02a e3fa8f7 15d32ba e3fa8f7 7dbcac6 e3fa8f7 7dbcac6 e3fa8f7 584c430 15d32ba e3fa8f7 08d1d51 5889e5d e3fa8f7 7dbcac6 e3fa8f7 917d1cb 71be435 917d1cb e3fa8f7 917d1cb e3fa8f7 917d1cb 7dbcac6 15d32ba e3fa8f7 15d32ba e3fa8f7 15d32ba e3fa8f7 feb219b e3fa8f7 e3371ea e3fa8f7 15d32ba 5889e5d 179464f 5889e5d 15d32ba e3fa8f7 15d32ba e3fa8f7 6123382 e3fa8f7 3f8bcd2 d326db3 5889e5d e3fa8f7 5889e5d e3fa8f7 15d32ba 07c7d05 4d7a801 ac392d9 07c7d05 02e4fca e3fa8f7 08d1d51 5889e5d e3fa8f7 08d1d51 e3fa8f7 5889e5d e3fa8f7 08d1d51 5889e5d e3fa8f7 d326db3 e3fa8f7 d326db3 e3fa8f7 d326db3 e3fa8f7 5889e5d e3fa8f7 15d32ba 5889e5d e3fa8f7 c520dca 15d32ba 5889e5d e3fa8f7 5889e5d e3fa8f7 5889e5d e3fa8f7 d50edab e3fa8f7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 |
# -----------------------------------------------------------
# Fathom-R1 14B Chatbot – per-user conversations version
# -----------------------------------------------------------
import gradio as gr
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
import torch, re, uuid
from threading import Thread
from openai import OpenAI
import tiktoken
# ----------------------- OpenAI client ---------------------
client = OpenAI(
base_url="https://a7g1ajqixo23revq.us-east-1.aws.endpoints.huggingface.cloud/v1/",
api_key="hf_XXXXX" # <-- your key
)
# ------------------ helper / formatting --------------------
def format_math(text):
text = re.sub(r"\[(.*?)\]", r"$$\1$$", text, flags=re.DOTALL)
text = text.replace(r"\(", "$").replace(r"\)", "$")
return text
def generate_conversation_id() -> str:
return str(uuid.uuid4())[:8]
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
# ------------------ generation -----------------------------
def generate_response(user_message,
max_tokens,
temperature,
top_p,
history_state):
if not user_message.strip():
return history_state, history_state
system_message = "Your role as an assistant..."
messages = [{"role": "system", "content": system_message}]
for m in history_state:
messages.append({"role": m["role"], "content": m["content"]})
messages.append({"role": "user", "content": user_message})
try:
response = client.chat.completions.create(
model="tgi",
messages=messages,
max_tokens=int(max_tokens),
temperature=temperature,
top_p=top_p,
stream=True
)
except Exception as e:
print(f"[ERROR] OpenAI API call failed: {e}")
yield history_state + [
{"role": "user", "content": user_message},
{"role": "assistant", "content": "⚠️ Generation failed."}
], history_state
return
assistant_response = ""
new_history = history_state + [
{"role": "user", "content": user_message},
{"role": "assistant", "content": ""}
]
token_budget = int(max_tokens)
tokens_seen = 0
try:
for chunk in response:
if (not chunk.choices
or not chunk.choices[0].delta
or not chunk.choices[0].delta.content):
continue
token_text = chunk.choices[0].delta.content
assistant_response += token_text
tokens_seen += len(enc.encode(token_text))
new_history[-1]["content"] = assistant_response.strip()
yield new_history, new_history
if tokens_seen >= token_budget:
break
except Exception:
pass
yield new_history, new_history
# ------------------ example prompts ------------------------
example_messages = {
"IIT-JEE 2024 Mathematics": "...",
"IIT-JEE 2025 Physics": "...",
"Goldman Sachs Interview Puzzle": "...",
"IIT-JEE 2025 Mathematics": "..."
}
# ===========================================================
# UI / Gradio
# ===========================================================
with gr.Blocks(theme=gr.themes.Soft()) as demo:
# -------- session-scoped states --------
conversations_state = gr.State({}) # <- one dict PER USER
current_convo_id = gr.State(generate_conversation_id())
history_state = gr.State([])
# ---------------- layout ---------------
gr.HTML("""
<div style="display:flex;align-items:center;gap:16px;margin-bottom:1em;">
<div style="background-color:black;padding:6px;border-radius:8px;">
<img src="https://framerusercontent.com/images/j0KjQQyrUfkFw4NwSaxQOLAoBU.png"
style="height:48px;">
</div>
<h1 style="margin:0;">Fathom R1 14B Chatbot</h1>
</div>
""")
with gr.Sidebar():
gr.Markdown("## Conversations")
conversation_selector = gr.Radio(choices=[], label="Select Conversation", interactive=True)
new_convo_button = gr.Button("New Conversation ➕")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("""Welcome to the Fathom R1 14B Chatbot, developed by Fractal AI Research! ...""")
gr.Markdown("### Settings")
max_tokens_slider = gr.Slider(6144, 32768, step=1024, value=16384, label="Max Tokens")
with gr.Accordion("Advanced Settings", open=True):
temperature_slider = gr.Slider(0.1, 2.0, value=0.6, label="Temperature")
top_p_slider = gr.Slider(0.1, 1.0, value=0.95, label="Top-p")
gr.Markdown("""We sincerely acknowledge [VIDraft]...""")
with gr.Column(scale=4):
chatbot = gr.Chatbot(label="Chat", type="messages", height=520)
with gr.Row():
user_input = gr.Textbox(label="User Input", placeholder="Type your question here...", lines=3, scale=8)
with gr.Column():
submit_button = gr.Button("Send", variant="primary", scale=1)
clear_button = gr.Button("Clear", scale=1)
gr.Markdown("**Try these examples:**")
with gr.Row():
example1_button = gr.Button("IIT-JEE 2025 Mathematics")
example2_button = gr.Button("IIT-JEE 2025 Physics")
example3_button = gr.Button("Goldman Sachs Interview Puzzle")
example4_button = gr.Button("IIT-JEE 2024 Mathematics")
# ------------- helper callbacks -----------------
def update_conversation_list(conversations):
return [conversations[cid]["title"] for cid in conversations]
def start_new_conversation(conversations):
new_id = generate_conversation_id()
conversations[new_id] = {"title": f"New Conversation {new_id}", "messages": []}
return (new_id, [], # current_convo_id, history_state
gr.update(choices=update_conversation_list(conversations),
value=conversations[new_id]["title"]),
conversations) # updated dict
def load_conversation(selected_title, conversations):
for cid, convo in conversations.items():
if convo["title"] == selected_title:
return cid, convo["messages"], convo["messages"]
return current_convo_id.value, history_state.value, history_state.value
def send_message(user_message, max_tokens, temperature, top_p,
convo_id, history, conversations):
if convo_id not in conversations:
title = " ".join(user_message.strip().split()[:5])
conversations[convo_id] = {"title": title, "messages": history}
if conversations[convo_id]["title"].startswith("New Conversation"):
conversations[convo_id]["title"] = " ".join(user_message.strip().split()[:5])
for updated_history, new_history in generate_response(
user_message, max_tokens, temperature, top_p, history):
conversations[convo_id]["messages"] = new_history
yield (updated_history,
new_history,
gr.update(choices=update_conversation_list(conversations),
value=conversations[convo_id]["title"]),
conversations) # updated dict each stream chunk
# ------------- UI bindings ----------------------
submit_button.click(
fn=send_message,
inputs=[user_input, max_tokens_slider, temperature_slider, top_p_slider,
current_convo_id, history_state, conversations_state],
outputs=[chatbot, history_state, conversation_selector, conversations_state],
concurrency_limit=16
).then(
fn=lambda: gr.update(value=""),
inputs=None,
outputs=user_input
)
clear_button.click(
fn=lambda: ([], []),
inputs=None,
outputs=[chatbot, history_state]
)
new_convo_button.click(
fn=start_new_conversation,
inputs=[conversations_state],
outputs=[current_convo_id, history_state, conversation_selector, conversations_state]
)
conversation_selector.change(
fn=load_conversation,
inputs=[conversation_selector, conversations_state],
outputs=[current_convo_id, history_state, chatbot]
)
# example buttons (unchanged)
example1_button.click(lambda: gr.update(value=example_messages["IIT-JEE 2025 Mathematics"]),
None, user_input)
example2_button.click(lambda: gr.update(value=example_messages["IIT-JEE 2025 Physics"]),
None, user_input)
example3_button.click(lambda: gr.update(value=example_messages["Goldman Sachs Interview Puzzle"]),
None, user_input)
example4_button.click(lambda: gr.update(value=example_messages["IIT-JEE 2024 Mathematics"]),
None, user_input)
# If running as a Space, `share=True` can be removed.
if __name__ == "__main__":
demo.queue().launch(share=True, ssr_mode=False) |