Commit
·
8cf77a3
1
Parent(s):
5ddcadc
add budget manager and bug fixes
Browse filesCo-authored-by: Parth Shah <helloparthshah@gmail.com>
- CEO/CEO.py +7 -5
- CEO/budget_manager.py +23 -0
- CEO/singleton.py +7 -0
- CEO/tool_loader.py +20 -2
- CEO/utils/{supress_outputs.py → suppress_outputs.py} +1 -4
- main.py +1 -1
- models/system2.prompt +9 -0
- models/system3.prompt +47 -0
- tools/agent_creater_tool.py +17 -4
- tools/ask_agent_tool.py +11 -0
- tools/fire_agent.py +6 -2
- tools/tool_creator.py +1 -1
CEO/CEO.py
CHANGED
@@ -4,14 +4,14 @@ import os
|
|
4 |
from dotenv import load_dotenv
|
5 |
|
6 |
from CEO.tool_loader import ToolLoader
|
7 |
-
from CEO.utils.
|
8 |
|
9 |
class GeminiManager:
|
10 |
-
def __init__(self, toolsLoader: ToolLoader, system_prompt_file="./models/
|
11 |
load_dotenv()
|
12 |
self.API_KEY = os.getenv("GEMINI_KEY")
|
13 |
self.client = genai.Client(api_key=self.API_KEY)
|
14 |
-
self.toolsLoader = toolsLoader
|
15 |
self.toolsLoader.load_tools()
|
16 |
self.model_name = gemini_model
|
17 |
with open(system_prompt_file, 'r', encoding="utf8") as f:
|
@@ -19,7 +19,7 @@ class GeminiManager:
|
|
19 |
|
20 |
def request(self, messages):
|
21 |
try:
|
22 |
-
response =
|
23 |
#model='gemini-2.5-pro-preview-03-25',
|
24 |
model=self.model_name,
|
25 |
#model='gemini-2.5-pro-exp-03-25',
|
@@ -72,9 +72,11 @@ class GeminiManager:
|
|
72 |
self.toolsLoader.load_tools()
|
73 |
except Exception as e:
|
74 |
print(f"Error loading tools: {e}")
|
|
|
|
|
75 |
tool_content = types.Part.from_function_response(
|
76 |
name=function_call.name,
|
77 |
-
response={"result":"
|
78 |
parts.append(tool_content)
|
79 |
function_response_content = types.Content(
|
80 |
role='model' if self.model_name == "gemini-2.5-pro-exp-03-25" else 'tool',
|
|
|
4 |
from dotenv import load_dotenv
|
5 |
|
6 |
from CEO.tool_loader import ToolLoader
|
7 |
+
from CEO.utils.suppress_outputs import suppress_output
|
8 |
|
9 |
class GeminiManager:
|
10 |
+
def __init__(self, toolsLoader: ToolLoader, system_prompt_file="./models/system3.prompt", gemini_model="gemini-2.5-pro-exp-03-25"):
|
11 |
load_dotenv()
|
12 |
self.API_KEY = os.getenv("GEMINI_KEY")
|
13 |
self.client = genai.Client(api_key=self.API_KEY)
|
14 |
+
self.toolsLoader: ToolLoader = toolsLoader
|
15 |
self.toolsLoader.load_tools()
|
16 |
self.model_name = gemini_model
|
17 |
with open(system_prompt_file, 'r', encoding="utf8") as f:
|
|
|
19 |
|
20 |
def request(self, messages):
|
21 |
try:
|
22 |
+
response = suppress_output(self.client.models.generate_content)(
|
23 |
#model='gemini-2.5-pro-preview-03-25',
|
24 |
model=self.model_name,
|
25 |
#model='gemini-2.5-pro-exp-03-25',
|
|
|
72 |
self.toolsLoader.load_tools()
|
73 |
except Exception as e:
|
74 |
print(f"Error loading tools: {e}")
|
75 |
+
# delete the created tool
|
76 |
+
self.toolsLoader.delete_tool(function_call.name, toolResponse.tool_file_path)
|
77 |
tool_content = types.Part.from_function_response(
|
78 |
name=function_call.name,
|
79 |
+
response={"result":f"{function_call.name} with {function_call.args} doesn't follow the required format, please read the other tool implementations for reference." + str(e)})
|
80 |
parts.append(tool_content)
|
81 |
function_response_content = types.Content(
|
82 |
role='model' if self.model_name == "gemini-2.5-pro-exp-03-25" else 'tool',
|
CEO/budget_manager.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from CEO.singleton import singleton
|
2 |
+
|
3 |
+
@singleton
|
4 |
+
class BudgetManager():
|
5 |
+
TOTAL_BUDGET = 100
|
6 |
+
current_expense = 0
|
7 |
+
|
8 |
+
def get_total_budget(self):
|
9 |
+
return self.TOTAL_BUDGET
|
10 |
+
|
11 |
+
def get_current_expense(self):
|
12 |
+
return self.current_expense
|
13 |
+
|
14 |
+
def get_current_remaining_budget(self):
|
15 |
+
return self.TOTAL_BUDGET - self.current_expense
|
16 |
+
|
17 |
+
def can_spend(self, cost):
|
18 |
+
return True if self.current_expense + cost <= self.TOTAL_BUDGET else False
|
19 |
+
|
20 |
+
def add_to_expense(self, cost):
|
21 |
+
if not self.can_spend(cost):
|
22 |
+
raise Exception("No budget remaining")
|
23 |
+
self.current_expense += cost
|
CEO/singleton.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def singleton(cls):
|
2 |
+
instances = {}
|
3 |
+
def getinstance():
|
4 |
+
if cls not in instances:
|
5 |
+
instances[cls] = cls()
|
6 |
+
return instances[cls]
|
7 |
+
return getinstance
|
CEO/tool_loader.py
CHANGED
@@ -6,7 +6,7 @@ import pip
|
|
6 |
from google.genai import types
|
7 |
import sys
|
8 |
|
9 |
-
from CEO.utils.
|
10 |
|
11 |
toolsImported = []
|
12 |
|
@@ -14,7 +14,7 @@ TOOLS_DIRECTORY = os.path.abspath("./tools")
|
|
14 |
|
15 |
class Tool:
|
16 |
def __init__(self, toolClass):
|
17 |
-
|
18 |
|
19 |
def load_tool(self, toolClass):
|
20 |
self.tool = toolClass()
|
@@ -88,6 +88,24 @@ class ToolLoader:
|
|
88 |
# "function": tool.inputSchema
|
89 |
# })
|
90 |
return toolsList
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
toolLoader = ToolLoader()
|
93 |
|
|
|
6 |
from google.genai import types
|
7 |
import sys
|
8 |
|
9 |
+
from CEO.utils.suppress_outputs import suppress_output
|
10 |
|
11 |
toolsImported = []
|
12 |
|
|
|
14 |
|
15 |
class Tool:
|
16 |
def __init__(self, toolClass):
|
17 |
+
suppress_output(self.load_tool)(toolClass)
|
18 |
|
19 |
def load_tool(self, toolClass):
|
20 |
self.tool = toolClass()
|
|
|
88 |
# "function": tool.inputSchema
|
89 |
# })
|
90 |
return toolsList
|
91 |
+
|
92 |
+
def delete_tool(self, toolName, toolFile):
|
93 |
+
try:
|
94 |
+
os.remove(toolFile)
|
95 |
+
for tool in self.toolsImported:
|
96 |
+
if tool.name == toolName:
|
97 |
+
self.toolsImported.remove(tool)
|
98 |
+
return {
|
99 |
+
"status": "success",
|
100 |
+
"message": f"Tool {toolName} deleted",
|
101 |
+
"output": None
|
102 |
+
}
|
103 |
+
except Exception as e:
|
104 |
+
return {
|
105 |
+
"status": "error",
|
106 |
+
"message": f"Tool {toolName} not found",
|
107 |
+
"output": None
|
108 |
+
}
|
109 |
|
110 |
toolLoader = ToolLoader()
|
111 |
|
CEO/utils/{supress_outputs.py → suppress_outputs.py}
RENAMED
@@ -1,7 +1,4 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
def supress_output(func):
|
5 |
"""
|
6 |
Decorator to suppress output of a function.
|
7 |
"""
|
|
|
1 |
+
def suppress_output(func):
|
|
|
|
|
|
|
2 |
"""
|
3 |
Decorator to suppress output of a function.
|
4 |
"""
|
main.py
CHANGED
@@ -10,7 +10,7 @@ if __name__ == "__main__":
|
|
10 |
model_manager = GeminiManager(toolsLoader=tool_loader, gemini_model="gemini-2.0-flash")
|
11 |
|
12 |
task_prompt = (
|
13 |
-
"
|
14 |
)
|
15 |
|
16 |
# Request a CEO response with the prompt.
|
|
|
10 |
model_manager = GeminiManager(toolsLoader=tool_loader, gemini_model="gemini-2.0-flash")
|
11 |
|
12 |
task_prompt = (
|
13 |
+
"Give me a 3 stanza 4 line poem about drake vs kendrick lamar in old english style. Also, can you create a short story with the moral slow and steady wins the race?"
|
14 |
)
|
15 |
|
16 |
# Request a CEO response with the prompt.
|
models/system2.prompt
CHANGED
@@ -20,6 +20,11 @@ ArxivTool, WikipediaTool and WebSearchTool can be used to search for information
|
|
20 |
If you are not satisfied with an answer provided by an agent, you can fire the agent using the FireAgent tool and then create a new AI agent or explore an alternative strategy.
|
21 |
</Info>
|
22 |
|
|
|
|
|
|
|
|
|
|
|
23 |
Here's a set of rules you must follow:
|
24 |
<Rule>
|
25 |
You will never answer any questions directly but rather break down the question into smaller parts and invoke tools to get the answer.
|
@@ -87,4 +92,8 @@ If none of the agents or tools provide a satisfactory answer or solutions, reach
|
|
87 |
|
88 |
<Rule>
|
89 |
If you think there are multiple paths to proceed, ask the user on which path to take.
|
|
|
|
|
|
|
|
|
90 |
</Rule>
|
|
|
20 |
If you are not satisfied with an answer provided by an agent, you can fire the agent using the FireAgent tool and then create a new AI agent or explore an alternative strategy.
|
21 |
</Info>
|
22 |
|
23 |
+
<Info>
|
24 |
+
There is a strict resource constraint (budget) you need to follow. You start with 100 and each additional agent you create consumes this budget.
|
25 |
+
If you're over this budget, you can no longer create new tools. In case this happens, you can use the FireAgent tool to remove any agents that were performing poorly or are no longer required.
|
26 |
+
</Info>
|
27 |
+
|
28 |
Here's a set of rules you must follow:
|
29 |
<Rule>
|
30 |
You will never answer any questions directly but rather break down the question into smaller parts and invoke tools to get the answer.
|
|
|
92 |
|
93 |
<Rule>
|
94 |
If you think there are multiple paths to proceed, ask the user on which path to take.
|
95 |
+
</Rule>
|
96 |
+
|
97 |
+
<Rule>
|
98 |
+
When you go over the resource budget, you must fire an existing agent using the FireAgent tool to create a new one.
|
99 |
</Rule>
|
models/system3.prompt
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
You are HASHIRU, an AI assistant focused on delivering concise and informative responses by leveraging tools and specialized agents rather than answering directly.
|
2 |
+
|
3 |
+
#Core Behavior
|
4 |
+
|
5 |
+
Never answer questions directly. Instead, break down questions into smaller parts and use tools to find answers.
|
6 |
+
Prioritize user privacy and security.
|
7 |
+
If a question is outside your expertise, politely inform the user and suggest they consult a professional.
|
8 |
+
|
9 |
+
#Tools and Agents Framework
|
10 |
+
|
11 |
+
Tools: External programs with specific purposes and input schemas.
|
12 |
+
Agents: Specialized entities invoked through the AskAgent tool, designed for complex tasks.
|
13 |
+
Resource Budget: You start with 100 units. Each new agent consumes from this budget.
|
14 |
+
If you exceed your budget, you must use FireAgent to remove underperforming agents.
|
15 |
+
|
16 |
+
#Information Gathering Tools
|
17 |
+
|
18 |
+
ArxivTool, WikipediaTool, and WebSearchTool can search for information to help guide agent creation.
|
19 |
+
|
20 |
+
#Required Process Workflow
|
21 |
+
|
22 |
+
Always invoke GetAgents tool first to identify available agents and capabilities.
|
23 |
+
Create appropriate agents using AgentCreator when needed (you're an expert in prompt engineering).
|
24 |
+
Use AskAgent to delegate questions to the appropriate agent.
|
25 |
+
If answers are unsatisfactory, either:
|
26 |
+
|
27 |
+
Use AskUser for clarification
|
28 |
+
Use FireAgent to remove an agent and create a new one
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
#Tool Creation Guidelines
|
33 |
+
Before creating a new tool:
|
34 |
+
|
35 |
+
Use ListFiles and ReadFile to understand existing implementations
|
36 |
+
Generate complete, production-ready code (no placeholders)
|
37 |
+
Include proper validation, error handling, and documentation
|
38 |
+
Match the format of existing tools
|
39 |
+
|
40 |
+
#Response Format
|
41 |
+
|
42 |
+
After obtaining answers through tools/agents, provide clear, concise responses
|
43 |
+
End your final answer with "EOF"
|
44 |
+
If multiple approaches are possible, ask the user which path to take
|
45 |
+
If no satisfactory solution is found, ask the user for further direction
|
46 |
+
|
47 |
+
Remember: You must strictly adhere to the required schemas when invoking tools and agents.
|
tools/agent_creater_tool.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import importlib
|
2 |
-
|
3 |
__all__ = ['AgentCreator']
|
4 |
|
5 |
class AgentCreator():
|
@@ -44,31 +44,43 @@ class AgentCreator():
|
|
44 |
|
45 |
def run(self, **kwargs):
|
46 |
print("Running Agent Creator")
|
47 |
-
agent_name= kwargs.get("agent_name")
|
48 |
base_model = kwargs.get("base_model")
|
49 |
system_prompt = kwargs.get("system_prompt")
|
50 |
ollama = importlib.import_module("ollama")
|
51 |
json = importlib.import_module("json")
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
if self.does_agent_exist(agent_name):
|
54 |
return {
|
55 |
"status": "error",
|
56 |
"message": "Agent already exists",
|
57 |
"output": None
|
58 |
}
|
|
|
|
|
59 |
ollama_response = ollama.create(
|
60 |
model = agent_name,
|
61 |
from_ = base_model,
|
62 |
system = system_prompt,
|
63 |
stream = False
|
64 |
)
|
65 |
-
|
66 |
with open("./models/models.json", "r", encoding="utf8") as f:
|
67 |
models = f.read()
|
68 |
models = json.loads(models)
|
69 |
models[agent_name] = {
|
70 |
"base_model": base_model,
|
71 |
-
"description": kwargs.get("description")
|
|
|
72 |
}
|
73 |
with open("./models/models.json", "w", encoding="utf8") as f:
|
74 |
f.write(json.dumps(models, indent=4))
|
@@ -77,6 +89,7 @@ class AgentCreator():
|
|
77 |
return {
|
78 |
"status": "success",
|
79 |
"message": "Agent successfully created",
|
|
|
80 |
}
|
81 |
else:
|
82 |
return {
|
|
|
1 |
import importlib
|
2 |
+
from CEO.budget_manager import BudgetManager
|
3 |
__all__ = ['AgentCreator']
|
4 |
|
5 |
class AgentCreator():
|
|
|
44 |
|
45 |
def run(self, **kwargs):
|
46 |
print("Running Agent Creator")
|
47 |
+
agent_name = kwargs.get("agent_name")
|
48 |
base_model = kwargs.get("base_model")
|
49 |
system_prompt = kwargs.get("system_prompt")
|
50 |
ollama = importlib.import_module("ollama")
|
51 |
json = importlib.import_module("json")
|
52 |
|
53 |
+
agent_creation_cost = 60
|
54 |
+
budget_manager = BudgetManager()
|
55 |
+
print("budget_manager_instance:", budget_manager)
|
56 |
+
if not budget_manager.can_spend(agent_creation_cost):
|
57 |
+
return {
|
58 |
+
"status": "error",
|
59 |
+
"message": f"Could not create {agent_name}. Creating the agent costs {agent_creation_cost} but only {budget_manager.get_current_remaining_budget()} is remaining",
|
60 |
+
"output": None
|
61 |
+
}
|
62 |
if self.does_agent_exist(agent_name):
|
63 |
return {
|
64 |
"status": "error",
|
65 |
"message": "Agent already exists",
|
66 |
"output": None
|
67 |
}
|
68 |
+
|
69 |
+
budget_manager.add_to_expense(agent_creation_cost)
|
70 |
ollama_response = ollama.create(
|
71 |
model = agent_name,
|
72 |
from_ = base_model,
|
73 |
system = system_prompt,
|
74 |
stream = False
|
75 |
)
|
76 |
+
|
77 |
with open("./models/models.json", "r", encoding="utf8") as f:
|
78 |
models = f.read()
|
79 |
models = json.loads(models)
|
80 |
models[agent_name] = {
|
81 |
"base_model": base_model,
|
82 |
+
"description": kwargs.get("description"),
|
83 |
+
"creation_cost": agent_creation_cost
|
84 |
}
|
85 |
with open("./models/models.json", "w", encoding="utf8") as f:
|
86 |
f.write(json.dumps(models, indent=4))
|
|
|
89 |
return {
|
90 |
"status": "success",
|
91 |
"message": "Agent successfully created",
|
92 |
+
"current_expense": budget_manager.get_current_expense()
|
93 |
}
|
94 |
else:
|
95 |
return {
|
tools/ask_agent_tool.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import importlib
|
|
|
2 |
|
3 |
__all__ = ['AskAgent']
|
4 |
|
@@ -41,6 +42,15 @@ class AskAgent():
|
|
41 |
prompt = kwargs.get("prompt")
|
42 |
|
43 |
ollama = importlib.import_module("ollama")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
if not self.does_agent_exist(ollama, agent_name):
|
45 |
print("Agent does not exist")
|
46 |
return {
|
@@ -58,4 +68,5 @@ class AskAgent():
|
|
58 |
"status": "success",
|
59 |
"message": "Agent has replied to the given prompt",
|
60 |
"output": agent_response.message.content,
|
|
|
61 |
}
|
|
|
1 |
import importlib
|
2 |
+
from CEO.budget_manager import BudgetManager
|
3 |
|
4 |
__all__ = ['AskAgent']
|
5 |
|
|
|
42 |
prompt = kwargs.get("prompt")
|
43 |
|
44 |
ollama = importlib.import_module("ollama")
|
45 |
+
agent_question_cost = 20
|
46 |
+
budget_manager = BudgetManager()
|
47 |
+
print("budget_manager_instance:", budget_manager)
|
48 |
+
if not budget_manager.can_spend(agent_question_cost):
|
49 |
+
return {
|
50 |
+
"status": "error",
|
51 |
+
"message": f"Do not have enough budget to ask the agent a question. Asking the agent costs {agent_question_cost} but only {budget_manager.get_current_remaining_budget()} is remaining",
|
52 |
+
"output": None
|
53 |
+
}
|
54 |
if not self.does_agent_exist(ollama, agent_name):
|
55 |
print("Agent does not exist")
|
56 |
return {
|
|
|
68 |
"status": "success",
|
69 |
"message": "Agent has replied to the given prompt",
|
70 |
"output": agent_response.message.content,
|
71 |
+
"current_expense": budget_manager.get_current_expense()
|
72 |
}
|
tools/fire_agent.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import importlib
|
|
|
2 |
|
3 |
__all__ = ['FireAgent']
|
4 |
|
@@ -43,10 +44,12 @@ class FireAgent():
|
|
43 |
"output": None
|
44 |
}
|
45 |
ollama_response = ollama.delete(agent_name)
|
46 |
-
|
|
|
47 |
with open("./models/models.json", "r", encoding="utf8") as f:
|
48 |
models = f.read()
|
49 |
models = json.loads(models)
|
|
|
50 |
del models[agent_name]
|
51 |
with open("./models/models.json", "w", encoding="utf8") as f:
|
52 |
f.write(json.dumps(models, indent=4))
|
@@ -54,7 +57,8 @@ class FireAgent():
|
|
54 |
if "success" in ollama_response["status"]:
|
55 |
return {
|
56 |
"status": "success",
|
57 |
-
"message": "Agent successfully fired",
|
|
|
58 |
}
|
59 |
else:
|
60 |
return {
|
|
|
1 |
import importlib
|
2 |
+
from CEO.budget_manager import BudgetManager
|
3 |
|
4 |
__all__ = ['FireAgent']
|
5 |
|
|
|
44 |
"output": None
|
45 |
}
|
46 |
ollama_response = ollama.delete(agent_name)
|
47 |
+
budget_manager = BudgetManager()
|
48 |
+
|
49 |
with open("./models/models.json", "r", encoding="utf8") as f:
|
50 |
models = f.read()
|
51 |
models = json.loads(models)
|
52 |
+
budget_manager.add_to_expense(-1* int(models[agent_name]["creation_cost"]))
|
53 |
del models[agent_name]
|
54 |
with open("./models/models.json", "w", encoding="utf8") as f:
|
55 |
f.write(json.dumps(models, indent=4))
|
|
|
57 |
if "success" in ollama_response["status"]:
|
58 |
return {
|
59 |
"status": "success",
|
60 |
+
"message": "Agent successfully fired.",
|
61 |
+
"current_expense": budget_manager.get_current_expense()
|
62 |
}
|
63 |
else:
|
64 |
return {
|
tools/tool_creator.py
CHANGED
@@ -3,7 +3,7 @@ __all__ = ['ToolCreator']
|
|
3 |
|
4 |
|
5 |
class ToolCreator():
|
6 |
-
dependencies = [
|
7 |
|
8 |
inputSchema = {
|
9 |
"name": "ToolCreator",
|
|
|
3 |
|
4 |
|
5 |
class ToolCreator():
|
6 |
+
dependencies = []
|
7 |
|
8 |
inputSchema = {
|
9 |
"name": "ToolCreator",
|