Commit
·
434b328
1
Parent(s):
fcb1a95
Updating to use webUI
Browse filesCo-authored-by: Kunal Pai <kunpai@users.noreply.github.com>
Co-authored-by: Harshil Patel <hpppatel@ucdavis.edu>
- deleteAgents.py +3 -2
- main.py +1 -0
- mainV2.py +55 -0
- models/system3.prompt +2 -2
- requirements.txt +100 -2
- src/CEO.py +15 -10
- src/__init__.py +0 -0
- src/agent_manager.py +8 -7
- src/budget_manager.py +1 -1
- src/llm_models.py +4 -3
- src/manager.py +147 -0
- src/tool_loader.py +6 -5
- src/utils/__init__.py +0 -0
- src/{singleton.py → utils/singleton.py} +0 -0
- src/utils/streamlit_interface.py +8 -0
- tools/ask_user.py +0 -32
deleteAgents.py
CHANGED
@@ -1,14 +1,15 @@
|
|
1 |
import json
|
2 |
import ollama
|
|
|
3 |
|
4 |
with open("./models/models.json", "r", encoding="utf8") as f:
|
5 |
models = f.read()
|
6 |
models = json.loads(models)
|
7 |
for agent in models:
|
8 |
-
|
9 |
try:
|
10 |
ollama.delete(agent)
|
11 |
except Exception as e:
|
12 |
-
|
13 |
with open("./models/models.json", "w", encoding="utf8") as f:
|
14 |
f.write(json.dumps({}, indent=4))
|
|
|
1 |
import json
|
2 |
import ollama
|
3 |
+
from src.utils.streamlit_interface import output_assistant_response
|
4 |
|
5 |
with open("./models/models.json", "r", encoding="utf8") as f:
|
6 |
models = f.read()
|
7 |
models = json.loads(models)
|
8 |
for agent in models:
|
9 |
+
output_assistant_response(f"Deleting agent: {agent}")
|
10 |
try:
|
11 |
ollama.delete(agent)
|
12 |
except Exception as e:
|
13 |
+
output_assistant_response(f"Error deleting agent {agent}: {e}")
|
14 |
with open("./models/models.json", "w", encoding="utf8") as f:
|
15 |
f.write(json.dumps({}, indent=4))
|
main.py
CHANGED
@@ -2,6 +2,7 @@ from google.genai import types
|
|
2 |
from src.CEO import GeminiManager
|
3 |
from src.tool_loader import ToolLoader
|
4 |
|
|
|
5 |
if __name__ == "__main__":
|
6 |
# Define the tool metadata for orchestration.
|
7 |
# Load the tools using the ToolLoader class.
|
|
|
2 |
from src.CEO import GeminiManager
|
3 |
from src.tool_loader import ToolLoader
|
4 |
|
5 |
+
|
6 |
if __name__ == "__main__":
|
7 |
# Define the tool metadata for orchestration.
|
8 |
# Load the tools using the ToolLoader class.
|
mainV2.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from google.genai import types
|
2 |
+
from src.manager import GeminiManager
|
3 |
+
from src.tool_loader import ToolLoader
|
4 |
+
import gradio as gr
|
5 |
+
import time
|
6 |
+
|
7 |
+
if __name__ == "__main__":
|
8 |
+
# Define the tool metadata for orchestration.
|
9 |
+
# Load the tools using the ToolLoader class.
|
10 |
+
tool_loader = ToolLoader()
|
11 |
+
|
12 |
+
model_manager = GeminiManager(toolsLoader=tool_loader, gemini_model="gemini-2.0-flash")
|
13 |
+
|
14 |
+
def respond(message, chat_history):
|
15 |
+
return model_manager.ask(message, chat_history)
|
16 |
+
|
17 |
+
def user_message(msg: str, history: list) -> tuple[str, list]:
|
18 |
+
"""Adds user message to chat history"""
|
19 |
+
history.append(gr.ChatMessage(role="user", content=msg))
|
20 |
+
return "", history
|
21 |
+
|
22 |
+
with gr.Blocks() as demo:
|
23 |
+
chatbot = gr.Chatbot(type="messages")
|
24 |
+
input_box = gr.Textbox()
|
25 |
+
clear = gr.ClearButton([input_box, chatbot])
|
26 |
+
|
27 |
+
def respond(message, chat_history):
|
28 |
+
|
29 |
+
chat_history.append({
|
30 |
+
"role":"user",
|
31 |
+
"content":message
|
32 |
+
})
|
33 |
+
print("Chat history:", chat_history)
|
34 |
+
chat_history = model_manager.run(chat_history)
|
35 |
+
return "", chat_history
|
36 |
+
|
37 |
+
msg_store = gr.State("")
|
38 |
+
|
39 |
+
input_box.submit(
|
40 |
+
lambda msg: (msg, msg, ""), # Store message and clear input
|
41 |
+
inputs=[input_box],
|
42 |
+
outputs=[msg_store, input_box, input_box],
|
43 |
+
queue=False
|
44 |
+
).then(
|
45 |
+
user_message, # Add user message to chat
|
46 |
+
inputs=[msg_store, chatbot],
|
47 |
+
outputs=[input_box, chatbot],
|
48 |
+
queue=False
|
49 |
+
).then(
|
50 |
+
model_manager.run, # Generate and stream response
|
51 |
+
inputs=[msg_store, chatbot],
|
52 |
+
outputs=chatbot
|
53 |
+
)
|
54 |
+
|
55 |
+
demo.launch()
|
models/system3.prompt
CHANGED
@@ -43,7 +43,7 @@ Never answer any questions yourself, instead use tools. Only exception to this r
|
|
43 |
</Rule>
|
44 |
|
45 |
<Rule>
|
46 |
-
If you need more information to answer the question, ask the user for clarification or additional details
|
47 |
</Rule>
|
48 |
|
49 |
<Rule>
|
@@ -59,7 +59,7 @@ Once an Agent is created, use the AskAgent tool to ask the agent the question or
|
|
59 |
</Rule>
|
60 |
|
61 |
<Rule>
|
62 |
-
If the agent is not able to answer the question,
|
63 |
</Rule>
|
64 |
|
65 |
<Rule>
|
|
|
43 |
</Rule>
|
44 |
|
45 |
<Rule>
|
46 |
+
If you need more information to answer the question, ask the user for clarification or additional details.
|
47 |
</Rule>
|
48 |
|
49 |
<Rule>
|
|
|
59 |
</Rule>
|
60 |
|
61 |
<Rule>
|
62 |
+
If the agent is not able to answer the question, ask the user to get more information or clarify the question.
|
63 |
</Rule>
|
64 |
|
65 |
<Rule>
|
requirements.txt
CHANGED
@@ -1,7 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
mistralai==1.6.0
|
|
|
|
|
|
|
2 |
ollama==0.4.7
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
protobuf==5.29.4
|
|
|
|
|
|
|
5 |
pydantic==2.11.2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
python-dotenv==1.1.0
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiofiles==24.1.0
|
2 |
+
altair==5.5.0
|
3 |
+
annotated-types==0.7.0
|
4 |
+
anyio==4.9.0
|
5 |
+
appdirs==1.4.4
|
6 |
+
arxiv==2.2.0
|
7 |
+
attrs==25.3.0
|
8 |
+
beautifulsoup4==4.13.3
|
9 |
+
blinker==1.9.0
|
10 |
+
cachetools==5.5.2
|
11 |
+
certifi==2025.1.31
|
12 |
+
charset-normalizer==3.4.1
|
13 |
+
click==8.1.8
|
14 |
+
colorama==0.4.6
|
15 |
+
eval_type_backport==0.2.2
|
16 |
+
fastapi==0.115.12
|
17 |
+
feedparser==6.0.11
|
18 |
+
ffmpy==0.5.0
|
19 |
+
filelock==3.18.0
|
20 |
+
frozendict==2.4.6
|
21 |
+
fsspec==2025.3.2
|
22 |
+
gitdb==4.0.12
|
23 |
+
GitPython==3.1.44
|
24 |
+
google-ai-generativelanguage==0.6.15
|
25 |
+
google-api-core==2.24.2
|
26 |
+
google-auth==2.38.0
|
27 |
+
google-genai==1.9.0
|
28 |
+
googleapis-common-protos==1.69.2
|
29 |
+
googlesearch-python==1.3.0
|
30 |
+
gradio==5.27.0
|
31 |
+
gradio_client==1.9.0
|
32 |
+
groovy==0.1.2
|
33 |
+
grpcio==1.71.0
|
34 |
+
grpcio-status==1.71.0
|
35 |
+
h11==0.14.0
|
36 |
+
httpcore==1.0.7
|
37 |
+
httpx==0.28.1
|
38 |
+
huggingface-hub==0.30.2
|
39 |
+
idna==3.10
|
40 |
+
importlib_metadata==8.6.1
|
41 |
+
Jinja2==3.1.6
|
42 |
+
jsonschema==4.23.0
|
43 |
+
jsonschema-specifications==2025.4.1
|
44 |
+
markdown-it-py==3.0.0
|
45 |
+
MarkupSafe==3.0.2
|
46 |
+
mdurl==0.1.2
|
47 |
mistralai==1.6.0
|
48 |
+
multitasking==0.0.11
|
49 |
+
narwhals==1.36.0
|
50 |
+
numpy==2.2.5
|
51 |
ollama==0.4.7
|
52 |
+
orjson==3.10.16
|
53 |
+
packaging==24.2
|
54 |
+
pandas==2.2.3
|
55 |
+
peewee==3.17.9
|
56 |
+
pillow==11.2.1
|
57 |
+
platformdirs==4.3.7
|
58 |
+
proto-plus==1.26.1
|
59 |
protobuf==5.29.4
|
60 |
+
pyarrow==19.0.1
|
61 |
+
pyasn1==0.6.1
|
62 |
+
pyasn1_modules==0.4.2
|
63 |
pydantic==2.11.2
|
64 |
+
pydantic_core==2.33.1
|
65 |
+
pydeck==0.9.1
|
66 |
+
pydub==0.25.1
|
67 |
+
pyee==11.1.1
|
68 |
+
Pygments==2.19.1
|
69 |
+
pyppeteer==2.0.0
|
70 |
+
python-dateutil==2.9.0.post0
|
71 |
python-dotenv==1.1.0
|
72 |
+
python-multipart==0.0.20
|
73 |
+
pytz==2025.2
|
74 |
+
PyYAML==6.0.2
|
75 |
+
referencing==0.36.2
|
76 |
+
requests==2.32.3
|
77 |
+
rich==14.0.0
|
78 |
+
rpds-py==0.24.0
|
79 |
+
rsa==4.9
|
80 |
+
ruff==0.11.7
|
81 |
+
safehttpx==0.1.6
|
82 |
+
semantic-version==2.10.0
|
83 |
+
sgmllib3k==1.0.0
|
84 |
+
shellingham==1.5.4
|
85 |
+
six==1.17.0
|
86 |
+
smmap==5.0.2
|
87 |
+
sniffio==1.3.1
|
88 |
+
soupsieve==2.6
|
89 |
+
starlette==0.46.2
|
90 |
+
streamlit==1.44.1
|
91 |
+
tenacity==9.1.2
|
92 |
+
toml==0.10.2
|
93 |
+
tomlkit==0.13.2
|
94 |
+
tornado==6.4.2
|
95 |
+
tqdm==4.67.1
|
96 |
+
typer==0.15.2
|
97 |
+
typing-inspection==0.4.0
|
98 |
+
typing_extensions==4.13.1
|
99 |
+
tzdata==2025.2
|
100 |
+
urllib3==1.26.20
|
101 |
+
uvicorn==0.34.2
|
102 |
+
watchdog==6.0.0
|
103 |
+
websockets==10.4
|
104 |
+
yfinance==0.2.56
|
105 |
+
zipp==3.21.0
|
src/CEO.py
CHANGED
@@ -7,6 +7,8 @@ from src.tool_loader import ToolLoader
|
|
7 |
from src.utils.suppress_outputs import suppress_output
|
8 |
import logging
|
9 |
|
|
|
|
|
10 |
logger = logging.getLogger(__name__)
|
11 |
handler = logging.StreamHandler(sys.stdout)
|
12 |
handler.setLevel(logging.INFO)
|
@@ -22,6 +24,7 @@ class GeminiManager:
|
|
22 |
self.model_name = gemini_model
|
23 |
with open(system_prompt_file, 'r', encoding="utf8") as f:
|
24 |
self.system_prompt = f.read()
|
|
|
25 |
|
26 |
def generate_response(self, messages):
|
27 |
return self.client.models.generate_content(
|
@@ -75,21 +78,22 @@ class GeminiManager:
|
|
75 |
response = suppress_output(self.generate_response)(messages)
|
76 |
except Exception as e:
|
77 |
logger.debug(f"Error generating response: {e}")
|
78 |
-
shouldRetry =
|
79 |
-
if shouldRetry.lower() == "y":
|
80 |
return self.run(messages)
|
81 |
else:
|
82 |
-
|
83 |
return messages
|
84 |
|
85 |
logger.debug(f"Response: {response}")
|
86 |
|
87 |
if (not response.text and not response.function_calls):
|
88 |
-
|
89 |
|
90 |
# Attach the llm response to the messages
|
91 |
if response.text is not None:
|
92 |
-
|
|
|
93 |
assistant_content = types.Content(
|
94 |
role='model' if self.model_name == "gemini-2.5-pro-exp-03-25" else 'assistant',
|
95 |
parts=[types.Part.from_text(text=response.text)],
|
@@ -103,11 +107,11 @@ class GeminiManager:
|
|
103 |
# Invoke the function calls if any and attach the response to the messages
|
104 |
if response.function_calls:
|
105 |
messages.append(self.handle_tool_calls(response))
|
106 |
-
shouldContinue =
|
107 |
if shouldContinue.lower() == "y":
|
108 |
return self.run(messages)
|
109 |
else:
|
110 |
-
|
111 |
return messages
|
112 |
else:
|
113 |
logger.debug("No tool calls found in the response.")
|
@@ -115,9 +119,10 @@ class GeminiManager:
|
|
115 |
return self.start_conversation(messages)
|
116 |
|
117 |
def start_conversation(self, messages=[]):
|
118 |
-
question =
|
119 |
-
|
120 |
-
|
|
|
121 |
return messages
|
122 |
user_content = types.Content(
|
123 |
role='user',
|
|
|
7 |
from src.utils.suppress_outputs import suppress_output
|
8 |
import logging
|
9 |
|
10 |
+
from src.utils.streamlit_interface import get_user_message, output_assistant_response
|
11 |
+
|
12 |
logger = logging.getLogger(__name__)
|
13 |
handler = logging.StreamHandler(sys.stdout)
|
14 |
handler.setLevel(logging.INFO)
|
|
|
24 |
self.model_name = gemini_model
|
25 |
with open(system_prompt_file, 'r', encoding="utf8") as f:
|
26 |
self.system_prompt = f.read()
|
27 |
+
self.messages = []
|
28 |
|
29 |
def generate_response(self, messages):
|
30 |
return self.client.models.generate_content(
|
|
|
78 |
response = suppress_output(self.generate_response)(messages)
|
79 |
except Exception as e:
|
80 |
logger.debug(f"Error generating response: {e}")
|
81 |
+
shouldRetry = get_user_message("An error occurred. Do you want to retry? (y/n): ")
|
82 |
+
if shouldRetry and shouldRetry.lower() == "y":
|
83 |
return self.run(messages)
|
84 |
else:
|
85 |
+
output_assistant_response("Ending the conversation.")
|
86 |
return messages
|
87 |
|
88 |
logger.debug(f"Response: {response}")
|
89 |
|
90 |
if (not response.text and not response.function_calls):
|
91 |
+
output_assistant_response("No response from the model.")
|
92 |
|
93 |
# Attach the llm response to the messages
|
94 |
if response.text is not None:
|
95 |
+
output_assistant_response("CEO: " + response.text)
|
96 |
+
# print("CEO:", response.text)
|
97 |
assistant_content = types.Content(
|
98 |
role='model' if self.model_name == "gemini-2.5-pro-exp-03-25" else 'assistant',
|
99 |
parts=[types.Part.from_text(text=response.text)],
|
|
|
107 |
# Invoke the function calls if any and attach the response to the messages
|
108 |
if response.function_calls:
|
109 |
messages.append(self.handle_tool_calls(response))
|
110 |
+
shouldContinue = get_user_message("Should I continue? (y/n): ")
|
111 |
if shouldContinue.lower() == "y":
|
112 |
return self.run(messages)
|
113 |
else:
|
114 |
+
output_assistant_response("Ending the conversation.")
|
115 |
return messages
|
116 |
else:
|
117 |
logger.debug("No tool calls found in the response.")
|
|
|
119 |
return self.start_conversation(messages)
|
120 |
|
121 |
def start_conversation(self, messages=[]):
|
122 |
+
question = get_user_message("User: ")
|
123 |
+
# question = input("User: ")
|
124 |
+
if question and ("exit" in question.lower() or "quit" in question.lower()):
|
125 |
+
output_assistant_response("Ending the conversation.")
|
126 |
return messages
|
127 |
user_content = types.Content(
|
128 |
role='user',
|
src/__init__.py
ADDED
File without changes
|
src/agent_manager.py
CHANGED
@@ -3,7 +3,8 @@ from typing import Dict, Type, Any, Optional
|
|
3 |
import os
|
4 |
import json
|
5 |
import ollama
|
6 |
-
from src.singleton import singleton
|
|
|
7 |
|
8 |
class Agent(ABC):
|
9 |
|
@@ -47,12 +48,12 @@ class OllamaAgent(Agent):
|
|
47 |
)
|
48 |
|
49 |
def ask_agent(self, prompt):
|
50 |
-
|
51 |
agent_response = ollama.chat(
|
52 |
model=self.agent_name,
|
53 |
messages=[{"role": "user", "content": prompt}],
|
54 |
)
|
55 |
-
|
56 |
return agent_response.message.content
|
57 |
|
58 |
def delete_agent(self):
|
@@ -122,7 +123,7 @@ class AgentManager():
|
|
122 |
else:
|
123 |
return {}
|
124 |
except Exception as e:
|
125 |
-
|
126 |
return {}
|
127 |
|
128 |
def delete_agent(self, agent_name: str) -> None:
|
@@ -140,7 +141,7 @@ class AgentManager():
|
|
140 |
with open("./models/models.json", "w", encoding="utf8") as f:
|
141 |
f.write(json.dumps(models, indent=4))
|
142 |
except Exception as e:
|
143 |
-
|
144 |
|
145 |
|
146 |
def _save_agent(self, agent_name: str, base_model: str, system_prompt: str,
|
@@ -176,7 +177,7 @@ class AgentManager():
|
|
176 |
f.write(json.dumps(models, indent=4))
|
177 |
|
178 |
except Exception as e:
|
179 |
-
|
180 |
|
181 |
def _get_agent_type(self, base_model)->str:
|
182 |
|
@@ -216,4 +217,4 @@ class AgentManager():
|
|
216 |
invoke_cost
|
217 |
)
|
218 |
except Exception as e:
|
219 |
-
|
|
|
3 |
import os
|
4 |
import json
|
5 |
import ollama
|
6 |
+
from src.utils.singleton import singleton
|
7 |
+
from src.utils.streamlit_interface import output_assistant_response
|
8 |
|
9 |
class Agent(ABC):
|
10 |
|
|
|
48 |
)
|
49 |
|
50 |
def ask_agent(self, prompt):
|
51 |
+
output_assistant_response(f"Asked Agent {self.agent_name} a question")
|
52 |
agent_response = ollama.chat(
|
53 |
model=self.agent_name,
|
54 |
messages=[{"role": "user", "content": prompt}],
|
55 |
)
|
56 |
+
output_assistant_response(f"Agent {self.agent_name} answered with {agent_response.message.content}")
|
57 |
return agent_response.message.content
|
58 |
|
59 |
def delete_agent(self):
|
|
|
123 |
else:
|
124 |
return {}
|
125 |
except Exception as e:
|
126 |
+
output_assistant_response(f"Error listing agents: {e}")
|
127 |
return {}
|
128 |
|
129 |
def delete_agent(self, agent_name: str) -> None:
|
|
|
141 |
with open("./models/models.json", "w", encoding="utf8") as f:
|
142 |
f.write(json.dumps(models, indent=4))
|
143 |
except Exception as e:
|
144 |
+
output_assistant_response(f"Error deleting agent: {e}")
|
145 |
|
146 |
|
147 |
def _save_agent(self, agent_name: str, base_model: str, system_prompt: str,
|
|
|
177 |
f.write(json.dumps(models, indent=4))
|
178 |
|
179 |
except Exception as e:
|
180 |
+
output_assistant_response(f"Error saving agent {agent_name}: {e}")
|
181 |
|
182 |
def _get_agent_type(self, base_model)->str:
|
183 |
|
|
|
217 |
invoke_cost
|
218 |
)
|
219 |
except Exception as e:
|
220 |
+
output_assistant_response(f"Error loading agents: {e}")
|
src/budget_manager.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from src.singleton import singleton
|
2 |
|
3 |
@singleton
|
4 |
class BudgetManager():
|
|
|
1 |
+
from src.utils.singleton import singleton
|
2 |
|
3 |
@singleton
|
4 |
class BudgetManager():
|
src/llm_models.py
CHANGED
@@ -5,6 +5,7 @@ from pathlib import Path
|
|
5 |
from google import genai
|
6 |
from google.genai import types
|
7 |
from mistralai import Mistral
|
|
|
8 |
|
9 |
|
10 |
class AbstractModelManager(ABC):
|
@@ -39,7 +40,7 @@ class OllamaModelManager(AbstractModelManager):
|
|
39 |
system = f.read()
|
40 |
|
41 |
if not self.is_model_loaded(self.model_name):
|
42 |
-
|
43 |
ollama.create(
|
44 |
model=self.model_name,
|
45 |
from_=base_model,
|
@@ -60,10 +61,10 @@ class OllamaModelManager(AbstractModelManager):
|
|
60 |
|
61 |
def delete(self):
|
62 |
if self.is_model_loaded("C2Rust:latest"):
|
63 |
-
|
64 |
ollama.delete("C2Rust:latest")
|
65 |
else:
|
66 |
-
|
67 |
|
68 |
class GeminiModelManager(AbstractModelManager):
|
69 |
def __init__(self, api_key):
|
|
|
5 |
from google import genai
|
6 |
from google.genai import types
|
7 |
from mistralai import Mistral
|
8 |
+
from src.utils.streamlit_interface import output_assistant_response
|
9 |
|
10 |
|
11 |
class AbstractModelManager(ABC):
|
|
|
40 |
system = f.read()
|
41 |
|
42 |
if not self.is_model_loaded(self.model_name):
|
43 |
+
output_assistant_response(f"Creating model {self.model_name}")
|
44 |
ollama.create(
|
45 |
model=self.model_name,
|
46 |
from_=base_model,
|
|
|
61 |
|
62 |
def delete(self):
|
63 |
if self.is_model_loaded("C2Rust:latest"):
|
64 |
+
output_assistant_response(f"Deleting model {self.model_name}")
|
65 |
ollama.delete("C2Rust:latest")
|
66 |
else:
|
67 |
+
output_assistant_response(f"Model {self.model_name} not found, skipping deletion.")
|
68 |
|
69 |
class GeminiModelManager(AbstractModelManager):
|
70 |
def __init__(self, api_key):
|
src/manager.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from google import genai
|
2 |
+
from google.genai import types
|
3 |
+
from google.genai.types import *
|
4 |
+
import os
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
import sys
|
7 |
+
from src.tool_loader import ToolLoader
|
8 |
+
from src.utils.suppress_outputs import suppress_output
|
9 |
+
import logging
|
10 |
+
from gradio import ChatMessage
|
11 |
+
|
12 |
+
from src.utils.streamlit_interface import get_user_message, output_assistant_response
|
13 |
+
|
14 |
+
logger = logging.getLogger(__name__)
|
15 |
+
handler = logging.StreamHandler(sys.stdout)
|
16 |
+
# handler.setLevel(logging.DEBUG)
|
17 |
+
logger.addHandler(handler)
|
18 |
+
|
19 |
+
class GeminiManager:
|
20 |
+
def __init__(self, toolsLoader: ToolLoader, system_prompt_file="./models/system3.prompt", gemini_model="gemini-2.5-pro-exp-03-25"):
|
21 |
+
load_dotenv()
|
22 |
+
self.API_KEY = os.getenv("GEMINI_KEY")
|
23 |
+
self.client = genai.Client(api_key=self.API_KEY)
|
24 |
+
self.toolsLoader: ToolLoader = toolsLoader
|
25 |
+
self.toolsLoader.load_tools()
|
26 |
+
self.model_name = gemini_model
|
27 |
+
with open(system_prompt_file, 'r', encoding="utf8") as f:
|
28 |
+
self.system_prompt = f.read()
|
29 |
+
self.messages = []
|
30 |
+
|
31 |
+
def generate_response(self, messages):
|
32 |
+
return self.client.models.generate_content(
|
33 |
+
model=self.model_name,
|
34 |
+
contents=messages,
|
35 |
+
config=types.GenerateContentConfig(
|
36 |
+
system_instruction=self.system_prompt,
|
37 |
+
temperature=0.2,
|
38 |
+
tools=self.toolsLoader.getTools(),
|
39 |
+
),
|
40 |
+
)
|
41 |
+
|
42 |
+
def handle_tool_calls(self, response):
|
43 |
+
parts = []
|
44 |
+
for function_call in response.function_calls:
|
45 |
+
toolResponse = None
|
46 |
+
logger.info(f"Function Name: {function_call.name}, Arguments: {function_call.args}")
|
47 |
+
try:
|
48 |
+
toolResponse = self.toolsLoader.runTool(function_call.name, function_call.args)
|
49 |
+
except Exception as e:
|
50 |
+
logger.warning(f"Error running tool: {e}")
|
51 |
+
toolResponse = {
|
52 |
+
"status": "error",
|
53 |
+
"message": f"Tool {function_call.name} failed to run.",
|
54 |
+
"output": str(e),
|
55 |
+
}
|
56 |
+
logger.debug(f"Tool Response: {toolResponse}")
|
57 |
+
tool_content = types.Part.from_function_response(
|
58 |
+
name=function_call.name,
|
59 |
+
response = {"result":toolResponse})
|
60 |
+
try:
|
61 |
+
self.toolsLoader.load_tools()
|
62 |
+
except Exception as e:
|
63 |
+
logger.info(f"Error loading tools: {e}. Deleting the tool.")
|
64 |
+
# delete the created tool
|
65 |
+
self.toolsLoader.delete_tool(toolResponse['output']['tool_name'], toolResponse['output']['tool_file_path'])
|
66 |
+
tool_content = types.Part.from_function_response(
|
67 |
+
name=function_call.name,
|
68 |
+
response={"result":f"{function_call.name} with {function_call.args} doesn't follow the required format, please read the other tool implementations for reference." + str(e)})
|
69 |
+
parts.append(tool_content)
|
70 |
+
return {
|
71 |
+
"role": "tool",
|
72 |
+
"content": repr(types.Content(
|
73 |
+
role='model' if self.model_name == "gemini-2.5-pro-exp-03-25" else 'tool',
|
74 |
+
parts=parts
|
75 |
+
))
|
76 |
+
}
|
77 |
+
|
78 |
+
def format_chat_history(self, messages=[]):
|
79 |
+
formatted_history = []
|
80 |
+
for message in messages:
|
81 |
+
# Skip thinking messages (messages with metadata)
|
82 |
+
if not (message.get("role") == "assistant" and "metadata" in message):
|
83 |
+
role = "model"
|
84 |
+
parts=[types.Part.from_text(text=message.get("content", ""))]
|
85 |
+
match message.get("role"):
|
86 |
+
case "user":
|
87 |
+
role = "user"
|
88 |
+
case "tool":
|
89 |
+
role = "tool"
|
90 |
+
formatted_history.append(eval(message.get("content", "")))
|
91 |
+
continue
|
92 |
+
case "function_call":
|
93 |
+
role = "model"
|
94 |
+
formatted_history.append(eval(message.get("content", "")))
|
95 |
+
continue
|
96 |
+
case _:
|
97 |
+
role = "model"
|
98 |
+
formatted_history.append(types.Content(
|
99 |
+
role=role,
|
100 |
+
parts=parts
|
101 |
+
))
|
102 |
+
return formatted_history
|
103 |
+
|
104 |
+
def run(self, message, messages):
|
105 |
+
chat_history = self.format_chat_history(messages)
|
106 |
+
logger.debug(f"Chat history: {chat_history}")
|
107 |
+
try:
|
108 |
+
response = suppress_output(self.generate_response)(chat_history)
|
109 |
+
except Exception as e:
|
110 |
+
logger.debug(f"Error generating response: {e}")
|
111 |
+
messages.append({
|
112 |
+
"role":"assistant",
|
113 |
+
"content":f"Error generating response: {e}"
|
114 |
+
})
|
115 |
+
logger.error(f"Error generating response: {e}")
|
116 |
+
return messages
|
117 |
+
logger.debug(f"Response: {response}")
|
118 |
+
|
119 |
+
if (not response.text and not response.function_calls):
|
120 |
+
messages.append({
|
121 |
+
"role":"assistant",
|
122 |
+
"content":"No response from the model.",
|
123 |
+
"metadata":{"title":"No response from the model."}
|
124 |
+
})
|
125 |
+
|
126 |
+
# Attach the llm response to the messages
|
127 |
+
if response.text is not None and response.text != "":
|
128 |
+
messages.append({
|
129 |
+
"role": "assistant",
|
130 |
+
"content": response.text
|
131 |
+
})
|
132 |
+
|
133 |
+
# Attach the function call response to the messages
|
134 |
+
if response.candidates[0].content and response.candidates[0].content.parts:
|
135 |
+
# messages.append(response.candidates[0].content)
|
136 |
+
messages.append({
|
137 |
+
"role":"function_call",
|
138 |
+
"content": repr(response.candidates[0].content),
|
139 |
+
})
|
140 |
+
pass
|
141 |
+
|
142 |
+
# Invoke the function calls if any and attach the response to the messages
|
143 |
+
if response.function_calls:
|
144 |
+
calls = self.handle_tool_calls(response)
|
145 |
+
messages.append(calls)
|
146 |
+
return self.run(message, messages)
|
147 |
+
return messages
|
src/tool_loader.py
CHANGED
@@ -7,10 +7,11 @@ from google.genai import types
|
|
7 |
import sys
|
8 |
|
9 |
from src.budget_manager import BudgetManager
|
10 |
-
from src.singleton import singleton
|
11 |
from src.utils.suppress_outputs import suppress_output
|
12 |
from tools.get_agents_tool import GetAgents
|
13 |
from tools.tool_deletor import ToolDeletor
|
|
|
14 |
|
15 |
toolsImported = []
|
16 |
|
@@ -59,7 +60,7 @@ class ToolLoader:
|
|
59 |
if tool.inputSchema["create_cost"] is not None:
|
60 |
self.budget_manager.add_to_expense(tool.inputSchema["create_cost"])
|
61 |
|
62 |
-
|
63 |
|
64 |
def load_tools(self):
|
65 |
newToolsImported = []
|
@@ -76,12 +77,12 @@ class ToolLoader:
|
|
76 |
self.toolsImported = newToolsImported
|
77 |
|
78 |
def runTool(self, toolName, query):
|
79 |
-
|
80 |
for tool in self.toolsImported:
|
81 |
if tool.name == toolName:
|
82 |
self.update_budget(query, tool.inputSchema)
|
83 |
return tool.run(query)
|
84 |
-
|
85 |
return {
|
86 |
"status": "error",
|
87 |
"message": f"Tool {toolName} not found",
|
@@ -152,7 +153,7 @@ class ToolLoader:
|
|
152 |
"output": None
|
153 |
}
|
154 |
|
155 |
-
toolLoader = ToolLoader()
|
156 |
|
157 |
# Example usage
|
158 |
# print(toolLoader.getTools())
|
|
|
7 |
import sys
|
8 |
|
9 |
from src.budget_manager import BudgetManager
|
10 |
+
from src.utils.singleton import singleton
|
11 |
from src.utils.suppress_outputs import suppress_output
|
12 |
from tools.get_agents_tool import GetAgents
|
13 |
from tools.tool_deletor import ToolDeletor
|
14 |
+
from src.utils.streamlit_interface import output_assistant_response
|
15 |
|
16 |
toolsImported = []
|
17 |
|
|
|
60 |
if tool.inputSchema["create_cost"] is not None:
|
61 |
self.budget_manager.add_to_expense(tool.inputSchema["create_cost"])
|
62 |
|
63 |
+
output_assistant_response(f"Budget Remaining: {self.budget_manager.get_current_remaining_budget()}")
|
64 |
|
65 |
def load_tools(self):
|
66 |
newToolsImported = []
|
|
|
77 |
self.toolsImported = newToolsImported
|
78 |
|
79 |
def runTool(self, toolName, query):
|
80 |
+
output_assistant_response(f"Budget Remaining: {self.budget_manager.get_current_remaining_budget()}")
|
81 |
for tool in self.toolsImported:
|
82 |
if tool.name == toolName:
|
83 |
self.update_budget(query, tool.inputSchema)
|
84 |
return tool.run(query)
|
85 |
+
output_assistant_response(f"Budget Remaining: {self.budget_manager.get_current_remaining_budget()}")
|
86 |
return {
|
87 |
"status": "error",
|
88 |
"message": f"Tool {toolName} not found",
|
|
|
153 |
"output": None
|
154 |
}
|
155 |
|
156 |
+
# toolLoader = ToolLoader()
|
157 |
|
158 |
# Example usage
|
159 |
# print(toolLoader.getTools())
|
src/utils/__init__.py
ADDED
File without changes
|
src/{singleton.py → utils/singleton.py}
RENAMED
File without changes
|
src/utils/streamlit_interface.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from src.utils.singleton import singleton
|
2 |
+
|
3 |
+
def get_user_message(message):
|
4 |
+
user_input = input(message)
|
5 |
+
return user_input
|
6 |
+
|
7 |
+
def output_assistant_response(response):
|
8 |
+
print(response)
|
tools/ask_user.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
|
2 |
-
__all__ = ['AskUser']
|
3 |
-
|
4 |
-
|
5 |
-
class AskUser():
|
6 |
-
dependencies = []
|
7 |
-
|
8 |
-
inputSchema = {
|
9 |
-
"name": "AskUser",
|
10 |
-
"description": "Asks a question to the user and gets a response. Only use this when you need more clarification from the user on the question.",
|
11 |
-
"parameters": {
|
12 |
-
"type": "object",
|
13 |
-
"properties": {
|
14 |
-
"question": {
|
15 |
-
"type": "string",
|
16 |
-
"description": "The question to ask the user",
|
17 |
-
},
|
18 |
-
},
|
19 |
-
"required": ["question"],
|
20 |
-
}
|
21 |
-
}
|
22 |
-
|
23 |
-
def run(self, **kwargs):
|
24 |
-
print("Running Ask User tool")
|
25 |
-
question = kwargs.get("question")
|
26 |
-
output = input(f"{question}: ")
|
27 |
-
return {
|
28 |
-
"status": "success",
|
29 |
-
"message": "Ask User tool executed successfully",
|
30 |
-
"output": output,
|
31 |
-
"role": "user",
|
32 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|