Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -85,7 +85,6 @@ client_yi = InferenceClient("01-ai/Yi-1.5-34B-Chat")
|
|
85 |
def respond(message, history):
|
86 |
func_caller = []
|
87 |
|
88 |
-
# Ensure 'message' is a dictionary
|
89 |
if isinstance(message, dict):
|
90 |
user_prompt = message
|
91 |
if "files" in message and message["files"]:
|
@@ -93,7 +92,7 @@ def respond(message, history):
|
|
93 |
streamer = TextIteratorStreamer(None, skip_prompt=True, **{"skip_special_tokens": True})
|
94 |
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
95 |
|
96 |
-
thread = Thread(target=None.generate, kwargs=generation_kwargs)
|
97 |
thread.start()
|
98 |
|
99 |
buffer = ""
|
@@ -115,7 +114,7 @@ def respond(message, history):
|
|
115 |
message_text = message["text"]
|
116 |
func_caller.append({"role": "user", "content": f'[SYSTEM]You are a helpful assistant. You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message_text}'})
|
117 |
|
118 |
-
response = None.chat_completion(func_caller, max_tokens=200)
|
119 |
response = str(response)
|
120 |
try:
|
121 |
response = response[int(response.find("{")):int(response.rindex("</"))]
|
@@ -140,7 +139,7 @@ def respond(message, history):
|
|
140 |
messages += f"\nuser\n{str(msg[0])}"
|
141 |
messages += f"\nassistant\n{str(msg[1])}"
|
142 |
messages+=f"\nuser\n{message_text}\nweb_result\n{web2}\nassistant\n"
|
143 |
-
stream = None.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
|
144 |
output = ""
|
145 |
for response in stream:
|
146 |
if not response.token.text == "":
|
@@ -153,7 +152,7 @@ def respond(message, history):
|
|
153 |
image = image_gen(f"{str(query)}")
|
154 |
yield gr.Image(image[1])
|
155 |
except:
|
156 |
-
client_sd3 = None.InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
|
157 |
seed = random.randint(0, 999999)
|
158 |
image = client_sd3.text_to_image(query, negative_prompt=f"{seed}")
|
159 |
yield gr.Image(image)
|
@@ -170,11 +169,11 @@ def respond(message, history):
|
|
170 |
image = Image.open(image).convert("RGB")
|
171 |
prompt = f"user <image>\n{txt}assistant"
|
172 |
|
173 |
-
inputs = None(prompt, image, return_tensors="pt")
|
174 |
streamer = TextIteratorStreamer(None, skip_prompt=True, **{"skip_special_tokens": True})
|
175 |
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
176 |
|
177 |
-
thread = Thread(target=None.generate, kwargs=generation_kwargs)
|
178 |
thread.start()
|
179 |
|
180 |
buffer = ""
|
@@ -187,7 +186,7 @@ def respond(message, history):
|
|
187 |
messages += f"\nuser\n{str(msg[0])}"
|
188 |
messages += f"\nassistant\n{str(msg[1])}"
|
189 |
messages += f"\nuser\n{str(message)}"
|
190 |
-
stream = None.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
|
191 |
output = ""
|
192 |
for response in stream:
|
193 |
if not response.token.text == "":
|
@@ -196,13 +195,12 @@ def respond(message, history):
|
|
196 |
else:
|
197 |
yield "Error: Message format is incorrect."
|
198 |
|
199 |
-
|
200 |
# Interface Layout
|
201 |
with gr.Blocks() as demo:
|
202 |
chatbot = gr.Chatbot(label="ChatGPT Style Chatbot", height=500)
|
203 |
|
204 |
with gr.Row():
|
205 |
-
upload_button = gr.
|
206 |
with gr.Column(scale=8):
|
207 |
text_input = gr.Textbox(label="", placeholder="Type your message here...", lines=1)
|
208 |
submit_button = gr.Button("Send")
|
|
|
85 |
def respond(message, history):
|
86 |
func_caller = []
|
87 |
|
|
|
88 |
if isinstance(message, dict):
|
89 |
user_prompt = message
|
90 |
if "files" in message and message["files"]:
|
|
|
92 |
streamer = TextIteratorStreamer(None, skip_prompt=True, **{"skip_special_tokens": True})
|
93 |
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
94 |
|
95 |
+
thread = Thread(target=None.generate, kwargs=generation_kwargs) # Replace None with actual model
|
96 |
thread.start()
|
97 |
|
98 |
buffer = ""
|
|
|
114 |
message_text = message["text"]
|
115 |
func_caller.append({"role": "user", "content": f'[SYSTEM]You are a helpful assistant. You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message_text}'})
|
116 |
|
117 |
+
response = None.chat_completion(func_caller, max_tokens=200) # Replace None with actual model
|
118 |
response = str(response)
|
119 |
try:
|
120 |
response = response[int(response.find("{")):int(response.rindex("</"))]
|
|
|
139 |
messages += f"\nuser\n{str(msg[0])}"
|
140 |
messages += f"\nassistant\n{str(msg[1])}"
|
141 |
messages+=f"\nuser\n{message_text}\nweb_result\n{web2}\nassistant\n"
|
142 |
+
stream = None.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False) # Replace None with actual model
|
143 |
output = ""
|
144 |
for response in stream:
|
145 |
if not response.token.text == "":
|
|
|
152 |
image = image_gen(f"{str(query)}")
|
153 |
yield gr.Image(image[1])
|
154 |
except:
|
155 |
+
client_sd3 = None.InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers") # Replace None with actual model
|
156 |
seed = random.randint(0, 999999)
|
157 |
image = client_sd3.text_to_image(query, negative_prompt=f"{seed}")
|
158 |
yield gr.Image(image)
|
|
|
169 |
image = Image.open(image).convert("RGB")
|
170 |
prompt = f"user <image>\n{txt}assistant"
|
171 |
|
172 |
+
inputs = None(prompt, image, return_tensors="pt") # Replace None with actual model
|
173 |
streamer = TextIteratorStreamer(None, skip_prompt=True, **{"skip_special_tokens": True})
|
174 |
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
175 |
|
176 |
+
thread = Thread(target=None.generate, kwargs=generation_kwargs) # Replace None with actual model
|
177 |
thread.start()
|
178 |
|
179 |
buffer = ""
|
|
|
186 |
messages += f"\nuser\n{str(msg[0])}"
|
187 |
messages += f"\nassistant\n{str(msg[1])}"
|
188 |
messages += f"\nuser\n{str(message)}"
|
189 |
+
stream = None.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False) # Replace None with actual model
|
190 |
output = ""
|
191 |
for response in stream:
|
192 |
if not response.token.text == "":
|
|
|
195 |
else:
|
196 |
yield "Error: Message format is incorrect."
|
197 |
|
|
|
198 |
# Interface Layout
|
199 |
with gr.Blocks() as demo:
|
200 |
chatbot = gr.Chatbot(label="ChatGPT Style Chatbot", height=500)
|
201 |
|
202 |
with gr.Row():
|
203 |
+
upload_button = gr.File(label="Upload File", elem_id="upload-button")
|
204 |
with gr.Column(scale=8):
|
205 |
text_input = gr.Textbox(label="", placeholder="Type your message here...", lines=1)
|
206 |
submit_button = gr.Button("Send")
|