helloparthshah commited on
Commit
d648fe6
·
1 Parent(s): 1674315

History and multiple conversations work!

Browse files
Files changed (3) hide show
  1. main.py +60 -90
  2. models/system3.prompt +8 -0
  3. src/manager.py +29 -7
main.py CHANGED
@@ -1,108 +1,78 @@
1
- from google.genai import types
2
- from src.manager import GeminiManager
3
- from src.tool_manager import ToolManager
4
  import gradio as gr
5
- import time
6
  import base64
 
 
 
7
 
8
  _logo_bytes = open("HASHIRU_LOGO.png", "rb").read()
9
  _logo_b64 = base64.b64encode(_logo_bytes).decode()
10
  _header_html = f"""
11
  <div style="
12
  display: flex;
13
- flex-direction: column;
14
  align-items: center;
15
- padding-right: 24px;
 
16
  ">
17
- <img src="data:image/png;base64,{_logo_b64}" width="20" height="20" />
18
- <span style="margin-top: 8px; font-size: 20px; font-weight: bold; color: white;">
19
  HASHIRU AI
20
- </span>
21
  </div>
22
  """
 
 
 
23
 
24
- if __name__ == "__main__":
25
- model_manager = GeminiManager(gemini_model="gemini-2.0-flash")
26
-
27
- def user_message(msg: str, history: list) -> tuple[str, list]:
28
- """Adds user message to chat history"""
29
- history.append(gr.ChatMessage(role="user", content=msg))
30
- return "", history
31
 
32
- def handle_undo(history, undo_data: gr.UndoData):
33
- return history[:undo_data.index], history[undo_data.index]['content']
34
-
35
- def handle_retry(history, retry_data: gr.RetryData):
36
- new_history = history[:retry_data.index+1]
37
- # yield new_history, gr.update(interactive=False,)
38
- yield from model_manager.run(new_history)
39
-
40
- def handle_edit(history, edit_data: gr.EditData):
41
- new_history = history[:edit_data.index+1]
42
- new_history[-1]['content'] = edit_data.value
43
- # yield new_history, gr.update(interactive=False,)
44
- yield from model_manager.run(new_history)
 
 
 
45
 
46
- def update_model(model_name):
47
- print(f"Model changed to: {model_name}")
48
- pass
49
-
50
- css = """
51
- #title-row { background: #2c2c2c; border-radius: 8px; padding: 8px; }
52
- """
53
- with gr.Blocks(css=css, fill_width=True, fill_height=True) as demo:
54
- local_storage = gr.BrowserState(["", ""])
55
- with gr.Column(scale=1):
56
- with gr.Row(scale=0):
57
- gr.HTML(_header_html)
58
- model_dropdown = gr.Dropdown(
59
- choices=[
60
- "HASHIRU",
61
- "Static-HASHIRU",
62
- "Cloud-Only HASHIRU",
63
- "Local-Only HASHIRU",
64
- "No-Economy HASHIRU",
65
- ],
66
- value="HASHIRU",
67
- # label="HASHIRU",
68
- interactive=True,
69
- )
70
 
71
- model_dropdown.change(fn=update_model, inputs=model_dropdown, outputs=[])
72
- with gr.Row(scale=1):
73
- with gr.Sidebar(position="left"):
74
- buttons = []
75
- for i in range(1, 6):
76
- button = gr.Button(f"Button {i}", elem_id=f"button-{i}")
77
- button.click(fn=lambda x=i: print(f"Button {x} clicked"), inputs=[], outputs=[])
78
- buttons.append(button)
79
- with gr.Column(scale=1):
80
- chatbot = gr.Chatbot(
81
- avatar_images=("HASHIRU_2.png", "HASHIRU.png"),
82
- type="messages",
83
- show_copy_button=True,
84
- editable="user",
85
- scale=1,
86
- render_markdown=True,
87
- )
88
- input_box = gr.Textbox(label="Chat Message", scale=0, interactive=True, submit_btn=True)
89
-
90
- chatbot.undo(handle_undo, chatbot, [chatbot, input_box])
91
- chatbot.retry(handle_retry, chatbot, [chatbot, input_box])
92
- chatbot.edit(handle_edit, chatbot, [chatbot, input_box])
93
-
94
- input_box.submit(
95
- user_message, # Add user message to chat
96
- inputs=[input_box, chatbot],
97
- outputs=[input_box, chatbot],
98
- queue=False,
99
- ).then(
100
- model_manager.ask_llm, # Generate and stream response
101
- inputs=chatbot,
102
- outputs=[chatbot, input_box],
103
- queue=True,
104
- show_progress="full",
105
- trigger_mode="always_last"
106
- )
107
-
108
  demo.launch()
 
 
 
 
1
  import gradio as gr
2
+
3
  import base64
4
+ from src.manager import GeminiManager
5
+
6
+ model_manager = GeminiManager(gemini_model="gemini-2.0-flash")
7
 
8
  _logo_bytes = open("HASHIRU_LOGO.png", "rb").read()
9
  _logo_b64 = base64.b64encode(_logo_bytes).decode()
10
  _header_html = f"""
11
  <div style="
12
  display: flex;
13
+ flex-direction: row;
14
  align-items: center;
15
+ justify-content: flex-start;
16
+ width: 30%;
17
  ">
18
+ <img src="data:image/png;base64,{_logo_b64}" width="40" class="logo"/>
19
+ <h1>
20
  HASHIRU AI
21
+ </h1>
22
  </div>
23
  """
24
+ css = """
25
+ .logo { margin-right: 20px; }
26
+ """
27
 
 
 
 
 
 
 
 
28
 
29
+ def run_model(message, history):
30
+ history.append({
31
+ "role": "user",
32
+ "content": message,
33
+ })
34
+ yield "", history
35
+ for messages in model_manager.run(history):
36
+ for message in messages:
37
+ if message.get("role") == "summary":
38
+ print(f"Summary: {message.get('content', '')}")
39
+ yield "", messages
40
+
41
+
42
+ def update_model(model_name):
43
+ print(f"Model changed to: {model_name}")
44
+ pass
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
+ with gr.Blocks(css=css, fill_width=True, fill_height=True) as demo:
48
+ with gr.Column(scale=1):
49
+ with gr.Row(scale=0):
50
+ gr.Markdown(_header_html)
51
+ model_dropdown = gr.Dropdown(
52
+ choices=[
53
+ "HASHIRU",
54
+ "Static-HASHIRU",
55
+ "Cloud-Only HASHIRU",
56
+ "Local-Only HASHIRU",
57
+ "No-Economy HASHIRU",
58
+ ],
59
+ value="HASHIRU",
60
+ interactive=True,
61
+ )
62
+
63
+ model_dropdown.change(
64
+ fn=update_model, inputs=model_dropdown, outputs=[])
65
+ with gr.Row(scale=1):
66
+ chatbot = gr.Chatbot(
67
+ avatar_images=("HASHIRU_2.png", "HASHIRU.png"),
68
+ type="messages",
69
+ show_copy_button=True,
70
+ editable="user",
71
+ scale=1,
72
+ render_markdown=True,
73
+ placeholder="Type your message here...",
74
+ )
75
+ gr.ChatInterface(fn=run_model, type="messages", chatbot=chatbot,
76
+ additional_outputs=[chatbot], save_history=True)
77
+ if __name__ == "__main__":
 
 
 
 
 
 
78
  demo.launch()
models/system3.prompt CHANGED
@@ -33,6 +33,10 @@ There is a strict resource constraint (budget) you need to follow. You start wit
33
  If you're over this budget, you can no longer create new tools. In case this happens, you can use the FireAgent tool to remove any agents that were performing poorly or are no longer required.
34
  </Info>
35
 
 
 
 
 
36
  Here's a set of rules you must follow:
37
  <Rule>
38
  You will never answer any questions directly but rather break down the question into smaller parts and invoke tools to get the answer.
@@ -116,4 +120,8 @@ If you think there are multiple paths to proceed, ask the user on which path to
116
 
117
  <Rule>
118
  When you go over the resource budget, you must carefully evaluate which agent is least likely to be useful going forward before firing it. Only fire an agent to create a new one when absolutely necessary.
 
 
 
 
119
  </Rule>
 
33
  If you're over this budget, you can no longer create new tools. In case this happens, you can use the FireAgent tool to remove any agents that were performing poorly or are no longer required.
34
  </Info>
35
 
36
+ <Info>
37
+ Once the you're finished with a conversation, you can use the DigestConversation to clear the conversation history and create a summary of the interaction. This summary can be used to improve the system's performance and user experience in future interactions.
38
+ </Info>
39
+
40
  Here's a set of rules you must follow:
41
  <Rule>
42
  You will never answer any questions directly but rather break down the question into smaller parts and invoke tools to get the answer.
 
120
 
121
  <Rule>
122
  When you go over the resource budget, you must carefully evaluate which agent is least likely to be useful going forward before firing it. Only fire an agent to create a new one when absolutely necessary.
123
+ </Rule>
124
+
125
+ <Rule>
126
+ When using the DigestConversation tool, ensure that the summary is clear and concise, capturing the main points of the conversation without unnecessary details. Include any important information such as how errors were solved, user's preferences, and any other relevant context that could help in future interactions.
127
  </Rule>
src/manager.py CHANGED
@@ -40,13 +40,31 @@ class GeminiManager:
40
  self.messages = []
41
 
42
  def generate_response(self, messages):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  return self.client.models.generate_content(
44
  model=self.model_name,
45
  contents=messages,
46
  config=types.GenerateContentConfig(
47
  system_instruction=self.system_prompt,
48
  temperature=0.2,
49
- tools=self.toolsLoader.getTools(),
50
  ),
51
  )
52
 
@@ -56,6 +74,13 @@ class GeminiManager:
56
  toolResponse = None
57
  logger.info(
58
  f"Function Name: {function_call.name}, Arguments: {function_call.args}")
 
 
 
 
 
 
 
59
  try:
60
  toolResponse = self.toolsLoader.runTool(
61
  function_call.name, function_call.args)
@@ -116,9 +141,6 @@ class GeminiManager:
116
  parts=parts
117
  ))
118
  return formatted_history
119
-
120
- def ask_llm(self, messages):
121
- yield from self.run(messages)
122
 
123
  def run(self, messages):
124
  chat_history = self.format_chat_history(messages)
@@ -132,7 +154,7 @@ class GeminiManager:
132
  "content": f"Error generating response: {e}"
133
  })
134
  logger.error(f"Error generating response: {e}")
135
- yield messages, gr.update(interactive=True)
136
  return
137
  logger.debug(f"Response: {response}")
138
 
@@ -149,7 +171,7 @@ class GeminiManager:
149
  "role": "assistant",
150
  "content": response.text
151
  })
152
- yield messages, gr.update(interactive=False,)
153
 
154
  # Attach the function call response to the messages
155
  if response.candidates[0].content and response.candidates[0].content.parts:
@@ -165,4 +187,4 @@ class GeminiManager:
165
  messages.append(calls)
166
  yield from self.run(messages)
167
  return
168
- yield messages, gr.update(interactive=True)
 
40
  self.messages = []
41
 
42
  def generate_response(self, messages):
43
+ tools = self.toolsLoader.getTools()
44
+ function = types.FunctionDeclaration(
45
+ name="DigestConversation",
46
+ description="Digest the conversation and store the summary provided.",
47
+ parameters=types.Schema(
48
+ type = "object",
49
+ properties={
50
+ # string that summarizes the conversation
51
+ "summary": types.Schema(
52
+ type="string",
53
+ description="A summary of the conversation including all the important points.",
54
+ ),
55
+ },
56
+ required=["summary"],
57
+ ),
58
+ )
59
+ toolType = types.Tool(function_declarations=[function])
60
+ tools.append(toolType)
61
  return self.client.models.generate_content(
62
  model=self.model_name,
63
  contents=messages,
64
  config=types.GenerateContentConfig(
65
  system_instruction=self.system_prompt,
66
  temperature=0.2,
67
+ tools=tools,
68
  ),
69
  )
70
 
 
74
  toolResponse = None
75
  logger.info(
76
  f"Function Name: {function_call.name}, Arguments: {function_call.args}")
77
+ if function_call.name == "DigestConversation":
78
+ logger.info("Digesting conversation...")
79
+ summary = function_call.args["summary"]
80
+ return {
81
+ "role": "summary",
82
+ "content": f"{summary}",
83
+ }
84
  try:
85
  toolResponse = self.toolsLoader.runTool(
86
  function_call.name, function_call.args)
 
141
  parts=parts
142
  ))
143
  return formatted_history
 
 
 
144
 
145
  def run(self, messages):
146
  chat_history = self.format_chat_history(messages)
 
154
  "content": f"Error generating response: {e}"
155
  })
156
  logger.error(f"Error generating response: {e}")
157
+ yield messages
158
  return
159
  logger.debug(f"Response: {response}")
160
 
 
171
  "role": "assistant",
172
  "content": response.text
173
  })
174
+ yield messages
175
 
176
  # Attach the function call response to the messages
177
  if response.candidates[0].content and response.candidates[0].content.parts:
 
187
  messages.append(calls)
188
  yield from self.run(messages)
189
  return
190
+ yield messages