Nithish310 commited on
Commit
454a9bf
·
verified ·
1 Parent(s): fcb953f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -30
app.py CHANGED
@@ -40,7 +40,7 @@ def llava(message, history):
40
 
41
  gr.Info("Analyzing image")
42
  image = Image.open(image).convert("RGB")
43
- prompt = f"<|im_start|>user <image>\n{txt}<|im_start|>assistant"
44
 
45
  inputs = processor(prompt, image, return_tensors="pt")
46
  return inputs
@@ -116,43 +116,47 @@ def respond(message, history):
116
  {"type": "function", "function": {"name": "web_search", "description": "Search query on google",
117
  "parameters": {"type": "object", "properties": {
118
  "query": {"type": "string", "description": "web search query"}},
119
- "required": ["query"]}}},
120
  {"type": "function", "function": {"name": "general_query", "description": "Reply general query of USER",
121
  "parameters": {"type": "object", "properties": {
122
  "prompt": {"type": "string", "description": "A detailed prompt"}},
123
- "required": ["prompt"]}}},
124
  {"type": "function", "function": {"name": "image_generation", "description": "Generate image for user",
125
  "parameters": {"type": "object", "properties": {
126
  "query": {"type": "string",
127
  "description": "image generation prompt"}},
128
- "required": ["query"]}}},
129
  {"type": "function",
130
  "function": {"name": "image_qna", "description": "Answer question asked by user related to image",
131
  "parameters": {"type": "object",
132
  "properties": {"query": {"type": "string", "description": "Question by user"}},
133
- "required": ["query"]}}},
134
  ]
135
 
 
136
  for msg in history:
137
  func_caller.append({"role": "user", "content": f"{str(msg[0])}"})
138
  func_caller.append({"role": "assistant", "content": f"{str(msg[1])}"})
139
 
 
140
  message_text = message["text"]
141
  func_caller.append({"role": "user",
142
  "content": f'[SYSTEM]You are a helpful assistant. You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message_text}'})
143
 
 
144
  response = client_gemma.chat_completion(func_caller, max_tokens=200)
145
  response = str(response)
146
  try:
147
  response = response[int(response.find("{")):int(response.rindex("</"))]
148
  except:
149
- response = response[int(response.find("{")):(int(response.rfind("}")) + 1)]
150
  response = response.replace("\\n", "")
151
  response = response.replace("\\'", "'")
152
  response = response.replace('\\"', '"')
153
  response = response.replace('\\', '')
154
  print(f"\n{response}")
155
 
 
156
  try:
157
  json_data = json.loads(str(response))
158
  if json_data["name"] == "web_search":
@@ -161,17 +165,13 @@ def respond(message, history):
161
  web_results = search(query)
162
  gr.Info("Extracting relevant Info")
163
  web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
164
- messages = f"<|im_start|>system\nYou are OpenCHAT mini a helpful assistant made by Nithish. You are provided with WEB results from which you can find informations to answer users query in Structured and More better way. You do not say Unnecesarry things Only say thing which is important and relevant. You also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions.<|im_end|>"
165
- for msg in history:
166
- messages += f"\n<|im_start|>user\n{str(msg[0])}<|im_end|>"
167
- messages += f"\n<|im_start|>assistant\n{str(msg[1])}<|im_end|>"
168
- messages += f"\n<|im_start|>user\n{message_text}<|im_end|>\n<|im_start|>web_result\n{web2}<|im_end|>\n<|im_start|>assistant\n"
169
  stream = client_mixtral.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True,
170
  details=True, return_full_text=False)
171
  output = ""
172
  for response in stream:
173
  if not response.token.text == "hello":
174
- output += response.token.text.replace("<|im_end|>", "")
175
  yield output
176
  elif json_data["name"] == "image_generation":
177
  query = json_data["arguments"]["query"]
@@ -179,7 +179,7 @@ def respond(message, history):
179
  yield "Generating Image, Please wait 10 sec..."
180
  try:
181
  client_sd3 = InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
182
- seed = random.randint(0, 999999)
183
  negativeprompt = ""
184
  image = client_sd3.text_to_image(query, negative_prompt=f"{seed},{negativeprompt}")
185
  yield gr.Image(image)
@@ -199,33 +199,24 @@ def respond(message, history):
199
  buffer += new_text
200
  yield buffer
201
  else:
202
- messages = f"<|im_start|>system\nYou are OpenGPT a Expert AI Chat bot made by Nithish. You answers users query like professional AI. You are also Mastered in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user.<|im_end|>"
203
- for msg in history:
204
- messages += f"\n<|im_start|>user\n{str(msg[0])}<|im_end|>"
205
- messages += f"\n<|im_start|>assistant\n{str(msg[1])}<|im_end|>"
206
- messages += f"\n<|im_start|>user\n{message_text}<|im_end|>\n<|im_start|>assistant\n"
207
  stream = client_yi.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True,
208
  details=True, return_full_text=False)
209
  output = ""
210
  for response in stream:
211
- if not response.token.text == "<|endoftext|>":
212
- output += response.token.text.replace("<|im_end|>", "")
213
  yield output
214
  except:
215
- messages = f"<|start_header_id|>system\nYou are OpenGPT a helpful AI CHAT BOT made by Nithish. You answers users query like professional . You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user.<|end_header_id|>"
216
- for msg in history:
217
- messages += f"\n<|start_header_id|>user\n{str(msg[0])}<|end_header_id|>"
218
- messages += f"\n<|start_header_id|>assistant\n{str(msg[1])}<|end_header_id|>"
219
- messages += f"\n<|start_header_id|>user\n{message_text}<|end_header_id|>\n<|start_header_id|>assistant\n"
220
  stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True,
221
  details=True, return_full_text=False)
222
  output = ""
223
  for response in stream:
224
  if not response.token.text == "<|eot_id|>":
225
- output += response.token.text
226
  yield output
227
 
228
-
229
  demo = gr.ChatInterface(
230
  fn=respond,
231
  chatbot=gr.Chatbot(
@@ -236,11 +227,11 @@ demo = gr.ChatInterface(
236
  description="# OpenGPT 4o \n ### chat, generate images, perform web searches, and Q&A with images.",
237
  textbox=gr.MultimodalTextbox(),
238
  multimodal=True,
239
- concurrency_limit=200,
240
  cache_examples=False,
241
  theme="default",
242
  css=
243
- """.chat-container {
244
  border: 1px solid #ccc;
245
  border-radius: 5px;
246
  padding: 10px;
@@ -284,6 +275,6 @@ demo = gr.ChatInterface(
284
  content: 'Bot';
285
  font-weight: bold;
286
  }
287
- """,
288
  )
289
  demo.launch()
 
40
 
41
  gr.Info("Analyzing image")
42
  image = Image.open(image).convert("RGB")
43
+ prompt = f"<image>\n{txt}"
44
 
45
  inputs = processor(prompt, image, return_tensors="pt")
46
  return inputs
 
116
  {"type": "function", "function": {"name": "web_search", "description": "Search query on google",
117
  "parameters": {"type": "object", "properties": {
118
  "query": {"type": "string", "description": "web search query"}},
119
+ "required": ["query"]}},
120
  {"type": "function", "function": {"name": "general_query", "description": "Reply general query of USER",
121
  "parameters": {"type": "object", "properties": {
122
  "prompt": {"type": "string", "description": "A detailed prompt"}},
123
+ "required": ["prompt"]}},
124
  {"type": "function", "function": {"name": "image_generation", "description": "Generate image for user",
125
  "parameters": {"type": "object", "properties": {
126
  "query": {"type": "string",
127
  "description": "image generation prompt"}},
128
+ "required": ["query"]}},
129
  {"type": "function",
130
  "function": {"name": "image_qna", "description": "Answer question asked by user related to image",
131
  "parameters": {"type": "object",
132
  "properties": {"query": {"type": "string", "description": "Question by user"}},
133
+ "required": ["query"]}},
134
  ]
135
 
136
+
137
  for msg in history:
138
  func_caller.append({"role": "user", "content": f"{str(msg[0])}"})
139
  func_caller.append({"role": "assistant", "content": f"{str(msg[1])}"})
140
 
141
+
142
  message_text = message["text"]
143
  func_caller.append({"role": "user",
144
  "content": f'[SYSTEM]You are a helpful assistant. You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message_text}'})
145
 
146
+
147
  response = client_gemma.chat_completion(func_caller, max_tokens=200)
148
  response = str(response)
149
  try:
150
  response = response[int(response.find("{")):int(response.rindex("</"))]
151
  except:
152
+ response = response[int(response.find("{")):(int(response.find("}")) + 1)]
153
  response = response.replace("\\n", "")
154
  response = response.replace("\\'", "'")
155
  response = response.replace('\\"', '"')
156
  response = response.replace('\\', '')
157
  print(f"\n{response}")
158
 
159
+
160
  try:
161
  json_data = json.loads(str(response))
162
  if json_data["name"] == "web_search":
 
165
  web_results = search(query)
166
  gr.Info("Extracting relevant Info")
167
  web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
168
+ messages = f"[SYSTEM]You are OpenCHAT mini a helpful assistant made by Nithish. You are provided with WEB results from which you can find informations to answer users query in Structured and More better way. You do not say Unnecesary things Only say thing which is important and relevant. You also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions. [USER]\n{message_text}[WEB_RESULT]\n{web2}[ASSISTANT]"
 
 
 
 
169
  stream = client_mixtral.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True,
170
  details=True, return_full_text=False)
171
  output = ""
172
  for response in stream:
173
  if not response.token.text == "hello":
174
+ output += response.token.text.replace("]", "")
175
  yield output
176
  elif json_data["name"] == "image_generation":
177
  query = json_data["arguments"]["query"]
 
179
  yield "Generating Image, Please wait 10 sec..."
180
  try:
181
  client_sd3 = InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
182
+ seed = random.randint(0, 99999)
183
  negativeprompt = ""
184
  image = client_sd3.text_to_image(query, negative_prompt=f"{seed},{negativeprompt}")
185
  yield gr.Image(image)
 
199
  buffer += new_text
200
  yield buffer
201
  else:
202
+ messages = f"[SYSTEM]You are OpenGPT a Expert AI Chat bot made by Nithish. You answers users query like professional . You are also Mastered in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. [USER]\n{message_text}[ASSISTANT]"
 
 
 
 
203
  stream = client_yi.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True,
204
  details=True, return_full_text=False)
205
  output = ""
206
  for response in stream:
207
+ if not response.token.text == " ":
208
+ output += response.token.text
209
  yield output
210
  except:
211
+ messages = f"[SYSTEM]You are OpenGPT a helpful AI CHAT BOT made by Nithish. You answers users query like professional . You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user.[USER]\n{message_text}[ASSISTANT]"
 
 
 
 
212
  stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True,
213
  details=True, return_full_text=False)
214
  output = ""
215
  for response in stream:
216
  if not response.token.text == "<|eot_id|>":
217
+ output += response.token
218
  yield output
219
 
 
220
  demo = gr.ChatInterface(
221
  fn=respond,
222
  chatbot=gr.Chatbot(
 
227
  description="# OpenGPT 4o \n ### chat, generate images, perform web searches, and Q&A with images.",
228
  textbox=gr.MultimodalTextbox(),
229
  multimodal=True,
230
+ concurrency_limit=20,
231
  cache_examples=False,
232
  theme="default",
233
  css=
234
+ .chat-container {
235
  border: 1px solid #ccc;
236
  border-radius: 5px;
237
  padding: 10px;
 
275
  content: 'Bot';
276
  font-weight: bold;
277
  }
278
+ ,
279
  )
280
  demo.launch()