Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import os
|
|
5 |
|
6 |
API_ENDPOINT = os.getenv("API_ENDPOINT", "none")
|
7 |
API_TOKEN = os.getenv("API_TOKEN")
|
|
|
8 |
|
9 |
def get_ai_response(message, history):
|
10 |
"""Fetch AI response from the API using the modern messages format."""
|
@@ -20,7 +21,7 @@ def get_ai_response(message, history):
|
|
20 |
messages.append({"role": "user", "content": message})
|
21 |
|
22 |
payload = {
|
23 |
-
"model":
|
24 |
"messages": messages,
|
25 |
"stream": False,
|
26 |
"max_tokens": 10000,
|
@@ -89,10 +90,13 @@ summary { cursor: pointer; color: #70a9e6; }
|
|
89 |
.reasoning-content { padding: 10px; margin-top: 5px; background-color: #404040; border-radius: 5px; }
|
90 |
"""
|
91 |
|
92 |
-
|
|
|
|
|
|
|
93 |
with gr.Column():
|
94 |
-
gr.Markdown("##
|
95 |
-
gr.Markdown("This is a demo of
|
96 |
chatbot = gr.Chatbot(elem_id="chatbot", render_markdown=False, bubble_full_width=True)
|
97 |
with gr.Row():
|
98 |
message = gr.Textbox(placeholder="Type your message...", show_label=False, container=False)
|
@@ -171,4 +175,4 @@ with gr.Blocks(css=custom_css, title="chutesai/Llama-4-Maverick-17B-128E-Instruc
|
|
171 |
)
|
172 |
|
173 |
demo.queue()
|
174 |
-
demo.launch()
|
|
|
5 |
|
6 |
API_ENDPOINT = os.getenv("API_ENDPOINT", "none")
|
7 |
API_TOKEN = os.getenv("API_TOKEN")
|
8 |
+
MODEL_ID = os.getenv("MODEL_ID", "none") # Default value if not set
|
9 |
|
10 |
def get_ai_response(message, history):
|
11 |
"""Fetch AI response from the API using the modern messages format."""
|
|
|
21 |
messages.append({"role": "user", "content": message})
|
22 |
|
23 |
payload = {
|
24 |
+
"model": MODEL_ID, # Use the environment variable here
|
25 |
"messages": messages,
|
26 |
"stream": False,
|
27 |
"max_tokens": 10000,
|
|
|
90 |
.reasoning-content { padding: 10px; margin-top: 5px; background-color: #404040; border-radius: 5px; }
|
91 |
"""
|
92 |
|
93 |
+
# Get model name for display (use the full model ID from environment variable)
|
94 |
+
model_display_name = MODEL_ID
|
95 |
+
|
96 |
+
with gr.Blocks(css=custom_css, title=model_display_name) as demo:
|
97 |
with gr.Column():
|
98 |
+
gr.Markdown(f"## {model_display_name}")
|
99 |
+
gr.Markdown(f"This is a demo of {model_display_name}")
|
100 |
chatbot = gr.Chatbot(elem_id="chatbot", render_markdown=False, bubble_full_width=True)
|
101 |
with gr.Row():
|
102 |
message = gr.Textbox(placeholder="Type your message...", show_label=False, container=False)
|
|
|
175 |
)
|
176 |
|
177 |
demo.queue()
|
178 |
+
demo.launch()
|