helloparthshah commited on
Commit
a6407c2
·
1 Parent(s): e0964c2

Files are working!

Browse files
Files changed (3) hide show
  1. main.py +21 -9
  2. requirements.txt +1 -1
  3. src/manager/manager.py +14 -5
main.py CHANGED
@@ -25,15 +25,22 @@ css = """
25
 
26
 
27
  def run_model(message, history):
28
- history.append({
29
- "role": "user",
30
- "content": message,
31
- })
 
 
 
 
 
 
 
 
 
 
32
  yield "", history
33
  for messages in model_manager.run(history):
34
- for message in messages:
35
- if message.get("role") == "summary":
36
- print(f"Summary: {message.get('content', '')}")
37
  yield "", messages
38
 
39
 
@@ -72,7 +79,12 @@ with gr.Blocks(css=css, fill_width=True, fill_height=True) as demo:
72
  render_markdown=True,
73
  placeholder="Type your message here...",
74
  )
75
- gr.ChatInterface(fn=run_model, type="messages", chatbot=chatbot,
76
- additional_outputs=[chatbot], save_history=True)
 
 
 
 
 
77
  if __name__ == "__main__":
78
  demo.launch()
 
25
 
26
 
27
  def run_model(message, history):
28
+ print(f"User: {message}")
29
+ print(f"History: {history}")
30
+ if 'text' in message:
31
+ history.append({
32
+ "role": "user",
33
+ "content": message['text']
34
+ })
35
+ if 'files' in message:
36
+ for file in message['files']:
37
+ history.append({
38
+ "role": "user",
39
+ "content": (file,)
40
+ })
41
+ print(f"History: {history}")
42
  yield "", history
43
  for messages in model_manager.run(history):
 
 
 
44
  yield "", messages
45
 
46
 
 
79
  render_markdown=True,
80
  placeholder="Type your message here...",
81
  )
82
+ gr.ChatInterface(fn=run_model,
83
+ type="messages",
84
+ chatbot=chatbot,
85
+ additional_outputs=[chatbot],
86
+ save_history=True,
87
+ editable=True,
88
+ multimodal=True,)
89
  if __name__ == "__main__":
90
  demo.launch()
requirements.txt CHANGED
@@ -51,7 +51,7 @@ protobuf==5.29.4
51
  pyarrow==20.0.0
52
  pyasn1==0.6.1
53
  pyasn1_modules==0.4.2
54
- pydantic==2.11.2
55
  pydantic_core==2.33.1
56
  pydub==0.25.1
57
  Pygments==2.19.1
 
51
  pyarrow==20.0.0
52
  pyasn1==0.6.1
53
  pyasn1_modules==0.4.2
54
+ pydantic==2.10.6
55
  pydantic_core==2.33.1
56
  pydub==0.25.1
57
  Pygments==2.19.1
src/manager/manager.py CHANGED
@@ -11,6 +11,7 @@ import gradio as gr
11
  from sentence_transformers import SentenceTransformer
12
  import torch
13
  from src.tools.default_tools.memory_manager import MemoryManager
 
14
 
15
  logger = logging.getLogger(__name__)
16
  handler = logging.StreamHandler(sys.stdout)
@@ -134,10 +135,16 @@ class GeminiManager:
134
  # Skip thinking messages (messages with metadata)
135
  if not (message.get("role") == "assistant" and "metadata" in message):
136
  role = "model"
137
- parts = [types.Part.from_text(text=message.get("content", ""))]
138
  match message.get("role"):
139
  case "user":
140
  role = "user"
 
 
 
 
 
 
 
141
  case "memories":
142
  role = "user"
143
  parts = [types.Part.from_text(text="Relevant memories: "+message.get("content", ""))]
@@ -153,6 +160,7 @@ class GeminiManager:
153
  continue
154
  case _:
155
  role = "model"
 
156
  formatted_history.append(types.Content(
157
  role=role,
158
  parts=parts
@@ -206,17 +214,19 @@ class GeminiManager:
206
 
207
  def invoke_manager(self, messages):
208
  chat_history = self.format_chat_history(messages)
 
209
  logger.debug(f"Chat history: {chat_history}")
210
  try:
211
  response = suppress_output(self.generate_response)(chat_history)
212
  except Exception as e:
213
  messages.append({
214
  "role": "assistant",
215
- "content": f"Error generating response: {e}"
 
216
  })
217
- logger.error(f"Error generating response", e)
218
  yield messages
219
- return
220
  logger.debug(f"Response: {response}")
221
 
222
  if (not response.text and not response.function_calls):
@@ -250,5 +260,4 @@ class GeminiManager:
250
  or (call.get("role") == "assistant" and call.get("metadata", {}).get("status") == "done")):
251
  messages.append(call)
252
  yield from self.invoke_manager(messages)
253
- return
254
  yield messages
 
11
  from sentence_transformers import SentenceTransformer
12
  import torch
13
  from src.tools.default_tools.memory_manager import MemoryManager
14
+ from pathlib import Path
15
 
16
  logger = logging.getLogger(__name__)
17
  handler = logging.StreamHandler(sys.stdout)
 
135
  # Skip thinking messages (messages with metadata)
136
  if not (message.get("role") == "assistant" and "metadata" in message):
137
  role = "model"
 
138
  match message.get("role"):
139
  case "user":
140
  role = "user"
141
+ if isinstance(message["content"], tuple):
142
+ path = message["content"][0]
143
+ file = self.client.files.upload(file=path)
144
+ formatted_history.append(file)
145
+ continue
146
+ else:
147
+ parts = [types.Part.from_text(text=message.get("content", ""))]
148
  case "memories":
149
  role = "user"
150
  parts = [types.Part.from_text(text="Relevant memories: "+message.get("content", ""))]
 
160
  continue
161
  case _:
162
  role = "model"
163
+ parts = [types.Part.from_text(text=message.get("content", ""))]
164
  formatted_history.append(types.Content(
165
  role=role,
166
  parts=parts
 
214
 
215
  def invoke_manager(self, messages):
216
  chat_history = self.format_chat_history(messages)
217
+ print(f"Chat history: {chat_history}")
218
  logger.debug(f"Chat history: {chat_history}")
219
  try:
220
  response = suppress_output(self.generate_response)(chat_history)
221
  except Exception as e:
222
  messages.append({
223
  "role": "assistant",
224
+ "content": f"Error generating response: {str(e)}",
225
+ "metadata": {"title": "Error generating response"}
226
  })
227
+ logger.error(f"Error generating response{e}")
228
  yield messages
229
+ return messages
230
  logger.debug(f"Response: {response}")
231
 
232
  if (not response.text and not response.function_calls):
 
260
  or (call.get("role") == "assistant" and call.get("metadata", {}).get("status") == "done")):
261
  messages.append(call)
262
  yield from self.invoke_manager(messages)
 
263
  yield messages