amusktweewt commited on
Commit
58bd705
·
verified ·
1 Parent(s): 2471f2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +111 -87
app.py CHANGED
@@ -17,62 +17,36 @@ models = [
17
  }
18
  ]
19
 
20
- # Build the custom HTML for a disabled-capable <select>.
21
- dropdown_options = ""
22
- for model in models:
23
- label = f"{model['name']}: {model['description']}"
24
- disabled_attr = "disabled" if not model["enabled"] else ""
25
- if not model["enabled"]:
26
- # Mark label visually so the user sees it's disabled
27
- label = f"{model['name']} (Disabled): {model['description']}"
28
- dropdown_options += f'<option value="{model["id"]}" {disabled_attr}>{label}</option>\n'
29
-
30
- # Updated CSS to follow system theme and enlarge chat area.
31
- custom_css = """
32
- <style>
33
- /* Style for the custom dropdown, using inherited colors */
34
- .custom-select {
35
- background-color: transparent; /* Inherit system background */
36
- color: inherit; /* Inherit system text color */
37
- border: 1px solid var(--border-color, #ccc);
38
- padding: 8px;
39
- border-radius: 4px;
40
- font-size: 1rem;
41
- width: 100%;
42
- box-sizing: border-box;
43
- margin-bottom: 1rem;
44
- }
45
- /* Increase the minimum height of the chat area */
46
- #chat_interface .chatbox {
47
- min-height: 500px;
48
- }
49
- </style>
50
- """
51
-
52
- dropdown_html = f"""
53
- {custom_css}
54
- <label for="model_select"><strong>Select Model:</strong></label>
55
- <select id="model_select" class="custom-select"
56
- onchange="document.getElementById('hidden_model').value = this.value;">
57
- {dropdown_options}
58
- </select>
59
- """
60
 
61
  def respond(message, history: list[tuple[str, str]], model_id, system_message, max_tokens, temperature, top_p):
62
  """
63
- Build a chat prompt and stream the response token-by-token from the model.
 
 
 
 
64
  """
 
65
  client = InferenceClient(model_id)
 
 
66
  messages = []
67
  if system_message:
68
  messages.append({"role": "system", "content": system_message})
 
69
  if history:
70
  for user_msg, bot_msg in history:
71
  messages.append({"role": "user", "content": user_msg})
72
  messages.append({"role": "assistant", "content": bot_msg})
 
73
  messages.append({"role": "user", "content": message})
74
  messages.append({"role": "assistant", "content": ""})
 
75
  response_text = ""
 
76
  for resp in client.chat_completion(
77
  messages,
78
  max_tokens=max_tokens,
@@ -84,51 +58,101 @@ def respond(message, history: list[tuple[str, str]], model_id, system_message, m
84
  response_text += token
85
  yield response_text
86
 
87
- # -- 3) BUILD THE UI IN A BLOCKS CONTEXT --
88
- with gr.Blocks() as demo:
89
- # Custom HTML dropdown for model selection.
90
- gr.HTML(value=dropdown_html)
91
-
92
- # Hidden textbox to store the current model ID.
93
- hidden_model = gr.Textbox(
94
- value=models[0]["id"], # Default to the first model
95
- visible=False,
96
- elem_id="hidden_model"
97
- )
98
-
99
- # ChatInterface with an element ID for styling.
100
- chat = gr.ChatInterface(
101
- respond,
102
- additional_inputs=[
103
- hidden_model,
104
- gr.Textbox(
105
- value="You are a friendly Chatbot.",
106
- label="System message"
107
- ),
108
- gr.Slider(
109
- minimum=1,
110
- maximum=2048,
111
- value=512,
112
- step=1,
113
- label="Max new tokens"
114
- ),
115
- gr.Slider(
116
- minimum=0.1,
117
- maximum=4.0,
118
- value=0.7,
119
- step=0.1,
120
- label="Temperature"
121
- ),
122
- gr.Slider(
123
- minimum=0.1,
124
- maximum=1.0,
125
- value=0.95,
126
- step=0.05,
127
- label="Top-p (nucleus sampling)"
128
- ),
129
- ],
130
- elem_id="chat_interface"
131
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
  if __name__ == "__main__":
134
- demo.launch()
 
17
  }
18
  ]
19
 
20
+ def get_selected_model_id(evt: gr.SelectData):
21
+ """Helper to extract the model ID from dropdown selection"""
22
+ return models[evt.index]["id"] if models[evt.index]["enabled"] else models[0]["id"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  def respond(message, history: list[tuple[str, str]], model_id, system_message, max_tokens, temperature, top_p):
25
  """
26
+ Builds a chat prompt using a simple template:
27
+ - Optionally includes a system message.
28
+ - Iterates over conversation history (each exchange as a tuple of (user, assistant)).
29
+ - Adds the new user message and appends an empty assistant turn.
30
+ Then it streams the response from the model.
31
  """
32
+ # -- 2) Instantiate the InferenceClient using the chosen model --
33
  client = InferenceClient(model_id)
34
+
35
+ # Build the messages list.
36
  messages = []
37
  if system_message:
38
  messages.append({"role": "system", "content": system_message})
39
+
40
  if history:
41
  for user_msg, bot_msg in history:
42
  messages.append({"role": "user", "content": user_msg})
43
  messages.append({"role": "assistant", "content": bot_msg})
44
+
45
  messages.append({"role": "user", "content": message})
46
  messages.append({"role": "assistant", "content": ""})
47
+
48
  response_text = ""
49
+ # Stream the response token-by-token.
50
  for resp in client.chat_completion(
51
  messages,
52
  max_tokens=max_tokens,
 
58
  response_text += token
59
  yield response_text
60
 
61
+ # -- 3) BUILD THE UI WITH A PROPER GRADIO DROPDOWN --
62
+ with gr.Blocks(css="""
63
+ .container {
64
+ max-width: 900px !important;
65
+ margin-left: auto;
66
+ margin-right: auto;
67
+ }
68
+ #chatbot {
69
+ height: 600px !important;
70
+ }
71
+ .model-dropdown .gr-dropdown {
72
+ border-radius: 8px;
73
+ }
74
+ """) as demo:
75
+ with gr.Row():
76
+ with gr.Column(elem_classes="container"):
77
+ # Create proper Gradio Dropdown that will respect theme
78
+ model_choices = [f"{m['name']}: {m['description']}" for m in models]
79
+ model_dropdown = gr.Dropdown(
80
+ choices=model_choices,
81
+ value=model_choices[0],
82
+ label="Select Model",
83
+ elem_classes="model-dropdown",
84
+ scale=3
85
+ )
86
+
87
+ # Hidden textbox to store the current model ID (will be read by 'respond')
88
+ model_id = gr.Textbox(
89
+ value=models[0]["id"],
90
+ visible=False,
91
+ elem_id="hidden_model"
92
+ )
93
+
94
+ # Update the hidden model_id when dropdown changes
95
+ def update_model_id(evt):
96
+ selected_index = evt.index
97
+ if models[selected_index]["enabled"]:
98
+ return models[selected_index]["id"]
99
+ else:
100
+ # If disabled model selected, stay with default
101
+ return models[0]["id"]
102
+
103
+ model_dropdown.select(
104
+ update_model_id,
105
+ inputs=[],
106
+ outputs=[model_id]
107
+ )
108
+
109
+ # System message and parameter controls in a collapsible section
110
+ with gr.Accordion("Advanced Settings", open=False):
111
+ system_message = gr.Textbox(
112
+ value="You are a friendly Chatbot.",
113
+ label="System message"
114
+ )
115
+
116
+ with gr.Row():
117
+ with gr.Column(scale=1):
118
+ max_tokens = gr.Slider(
119
+ minimum=1,
120
+ maximum=2048,
121
+ value=512,
122
+ step=1,
123
+ label="Max new tokens"
124
+ )
125
+
126
+ with gr.Column(scale=1):
127
+ temperature = gr.Slider(
128
+ minimum=0.1,
129
+ maximum=4.0,
130
+ value=0.7,
131
+ step=0.1,
132
+ label="Temperature"
133
+ )
134
+
135
+ with gr.Column(scale=1):
136
+ top_p = gr.Slider(
137
+ minimum=0.1,
138
+ maximum=1.0,
139
+ value=0.95,
140
+ step=0.05,
141
+ label="Top-p (nucleus sampling)"
142
+ )
143
+
144
+ # The ChatInterface with a larger chat area and our parameters
145
+ chat = gr.ChatInterface(
146
+ respond,
147
+ additional_inputs=[
148
+ model_id,
149
+ system_message,
150
+ max_tokens,
151
+ temperature,
152
+ top_p,
153
+ ],
154
+ chatbot=gr.Chatbot(elem_id="chatbot", height=600)
155
+ )
156
 
157
  if __name__ == "__main__":
158
+ demo.launch()