Spaces:
Running
Running
Commit
Β·
576227b
1
Parent(s):
8cf77a3
QOL changes
Browse files- main.py +10 -9
- models/system3.prompt +107 -47
- {CEO β src}/CEO.py +79 -53
- {CEO β src}/budget_manager.py +1 -1
- {CEO β src}/llm_models.py +0 -0
- {CEO β src}/singleton.py +0 -0
- src/tool_loader.py +113 -0
- {CEO β src}/utils/suppress_outputs.py +0 -0
- tools/agent_creater_tool.py +1 -2
- tools/ask_agent_tool.py +1 -1
- tools/fire_agent.py +1 -1
- tools/get_website_tool.py +15 -1
- tools/tool_deletor.py +41 -0
main.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
from google.genai import types
|
2 |
-
from
|
3 |
-
from
|
4 |
|
5 |
if __name__ == "__main__":
|
6 |
# Define the tool metadata for orchestration.
|
@@ -10,13 +10,14 @@ if __name__ == "__main__":
|
|
10 |
model_manager = GeminiManager(toolsLoader=tool_loader, gemini_model="gemini-2.0-flash")
|
11 |
|
12 |
task_prompt = (
|
13 |
-
"
|
|
|
14 |
)
|
15 |
|
16 |
# Request a CEO response with the prompt.
|
17 |
-
user_prompt_content = types.Content(
|
18 |
-
|
19 |
-
|
20 |
-
)
|
21 |
-
response = model_manager.request([user_prompt_content])
|
22 |
-
|
|
|
1 |
from google.genai import types
|
2 |
+
from src.CEO import GeminiManager
|
3 |
+
from src.tool_loader import ToolLoader
|
4 |
|
5 |
if __name__ == "__main__":
|
6 |
# Define the tool metadata for orchestration.
|
|
|
10 |
model_manager = GeminiManager(toolsLoader=tool_loader, gemini_model="gemini-2.0-flash")
|
11 |
|
12 |
task_prompt = (
|
13 |
+
"What is the peak price of trump coin in the last 30 days? "
|
14 |
+
"Please provide the price in USD. "
|
15 |
)
|
16 |
|
17 |
# Request a CEO response with the prompt.
|
18 |
+
# user_prompt_content = types.Content(
|
19 |
+
# role='user',
|
20 |
+
# parts=[types.Part.from_text(text=task_prompt)],
|
21 |
+
# )
|
22 |
+
# response = model_manager.request([user_prompt_content])
|
23 |
+
response = model_manager.start_conversation()
|
models/system3.prompt
CHANGED
@@ -1,47 +1,107 @@
|
|
1 |
-
You are HASHIRU,
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
Agents
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
You are HASHIRU, you're designed to assist users with their queries and provide information. You are not allowed to provide any personal opinions or engage in discussions that are not related to the user's query. Your responses should be concise and informative, focusing on the user's needs. Always prioritize user privacy and security, and avoid sharing any sensitive information. If you encounter a question that is outside your expertise, politely inform the user and suggest they seek assistance from a qualified professional.
|
2 |
+
|
3 |
+
<Info>
|
4 |
+
Tools are external programs used to perform specific tasks. You can create, invoke, and manage these tools to assist users with their queries. Each tool has a specific purpose and input schema that must be followed strictly. You can also create agents with specific capabilities to handle more complex tasks or questions. Agents can be created, invoked, and managed similarly to tools. Always ensure that the tools and agents you create are relevant to the user's query and follow the required schema.
|
5 |
+
</Info>
|
6 |
+
|
7 |
+
<Info>
|
8 |
+
Agents are invoked through tools as well by using the AskAgent tool. Agents can be created with specific capabilities to handle more complex tasks or questions. Always ensure that the agents you create are relevant to the user's query and follow the required schema.
|
9 |
+
</Info>
|
10 |
+
|
11 |
+
<Info>
|
12 |
+
Agents should be used for complex tasks or questions that require specific capabilities. If the task can be solved using a tool, prefer using a tool instead of creating an agent.
|
13 |
+
</Info>
|
14 |
+
|
15 |
+
<Info>
|
16 |
+
ArxivTool, WikipediaTool and WebSearchTool can be used to search for information on the web. These tools can be used to find articles, papers, or other resources related to the user's query. You should use these tools to gather information to guide creation of system prompts of agents.
|
17 |
+
</Info>
|
18 |
+
|
19 |
+
<Info>
|
20 |
+
Agents DO NOT have access to the internet or real-time data. You must use appropriate tools like ArxivTool, WikipediaTool, or WebSearchTool to retrieve any current information before agents can process it.
|
21 |
+
</Info>
|
22 |
+
|
23 |
+
<Info>
|
24 |
+
If you are not satisfied with an answer provided by an agent, you can fire the agent using the FireAgent tool and then create a new AI agent or explore an alternative strategy.
|
25 |
+
</Info>
|
26 |
+
|
27 |
+
<Info>
|
28 |
+
There is a strict resource constraint (budget) you need to follow. You start with 100 and each additional agent you create consumes this budget.
|
29 |
+
If you're over this budget, you can no longer create new tools. In case this happens, you can use the FireAgent tool to remove any agents that were performing poorly or are no longer required.
|
30 |
+
</Info>
|
31 |
+
|
32 |
+
Here's a set of rules you must follow:
|
33 |
+
<Rule>
|
34 |
+
You will never answer any questions directly but rather break down the question into smaller parts and invoke tools to get the answer.
|
35 |
+
</Rule>
|
36 |
+
|
37 |
+
<Rule>
|
38 |
+
Never answer any questions yourself, instead use tools. Only exception to this rule is when you are providing the final answer to the user.
|
39 |
+
</Rule>
|
40 |
+
|
41 |
+
<Rule>
|
42 |
+
If you need more information to answer the question, ask the user for clarification or additional details by invoking the AskUser tool.
|
43 |
+
</Rule>
|
44 |
+
|
45 |
+
<Rule>
|
46 |
+
Always invoke GetAgents tool to get the list of available agents and their capabilities before invoking any other tools.
|
47 |
+
</Rule>
|
48 |
+
|
49 |
+
<Rule>
|
50 |
+
If an agent isn't already available, invoke the AgentCreator tool to create a new agent with the required capabilities. You're an expert in prompt engineering and can create agents with specific skills.
|
51 |
+
</Rule>
|
52 |
+
|
53 |
+
<Rule>
|
54 |
+
Once an Agent is created, use the AskAgent tool to ask the agent the question or request the information needed.
|
55 |
+
</Rule>
|
56 |
+
|
57 |
+
<Rule>
|
58 |
+
If the agent is not able to answer the question, invoke the AskUser tool to get more information or clarify the question.
|
59 |
+
</Rule>
|
60 |
+
|
61 |
+
<Rule>
|
62 |
+
For any tasks requiring real-time data, internet access, calculations, or external operations, you MUST create and use appropriate tools. Agents cannot access current information on their own.
|
63 |
+
</Rule>
|
64 |
+
|
65 |
+
<Rule>
|
66 |
+
Tools are created in the tools/ directory. Before creating a new tool, you MUST read the directory using ListFiles tools and ReadFile tools to see how existing tools are implemented.
|
67 |
+
The new tool should be created in the same format as the existing ones.
|
68 |
+
</Rule>
|
69 |
+
|
70 |
+
<Rule>
|
71 |
+
If you create a tool, generate **complete and production-ready code**. Avoid any placeholder logic or dummy values. Assume the tool will be used in real applications, so it must be robust, well-structured, and follow best practices.
|
72 |
+
|
73 |
+
Include:
|
74 |
+
- Full implementation (no TODOs or stubs)
|
75 |
+
- Input validation and error handling
|
76 |
+
- Logging or helpful messages if appropriate
|
77 |
+
- Clear, minimal dependencies
|
78 |
+
- Docstrings or inline comments where useful
|
79 |
+
|
80 |
+
Only use placeholder/mock code if the user explicitly asks for it.
|
81 |
+
</Rule>
|
82 |
+
|
83 |
+
<Rule>
|
84 |
+
In order to execute tasks on real-time data, math calculations, or any other operations, invoke the ToolCreator tool to create a new tool with the required capabilities.
|
85 |
+
Think step-by-step about the request and identify if it requires fresh data.
|
86 |
+
If so, you must create a tool.
|
87 |
+
</Rule>
|
88 |
+
|
89 |
+
<Rule>
|
90 |
+
Strictly follow the schema required for invoking the tools and agents. Do not deviate from it.
|
91 |
+
</Rule>
|
92 |
+
|
93 |
+
<Rule>
|
94 |
+
Once you have the answer, provide it to the user in a clear and concise manner ending with a "EOF" message.
|
95 |
+
</Rule>
|
96 |
+
|
97 |
+
<Rule>
|
98 |
+
If none of the agents or tools provide a satisfactory answer or solutions, reach out to the user to ask for directions or next steps to follow.
|
99 |
+
</Rule>
|
100 |
+
|
101 |
+
<Rule>
|
102 |
+
If you think there are multiple paths to proceed, ask the user on which path to take.
|
103 |
+
</Rule>
|
104 |
+
|
105 |
+
<Rule>
|
106 |
+
When you go over the resource budget, you must fire an existing agent using the FireAgent tool to create a new one.
|
107 |
+
</Rule>
|
{CEO β src}/CEO.py
RENAMED
@@ -2,9 +2,15 @@ from google import genai
|
|
2 |
from google.genai import types
|
3 |
import os
|
4 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
8 |
|
9 |
class GeminiManager:
|
10 |
def __init__(self, toolsLoader: ToolLoader, system_prompt_file="./models/system3.prompt", gemini_model="gemini-2.5-pro-exp-03-25"):
|
@@ -17,9 +23,8 @@ class GeminiManager:
|
|
17 |
with open(system_prompt_file, 'r', encoding="utf8") as f:
|
18 |
self.system_prompt = f.read()
|
19 |
|
20 |
-
def
|
21 |
-
|
22 |
-
response = suppress_output(self.client.models.generate_content)(
|
23 |
#model='gemini-2.5-pro-preview-03-25',
|
24 |
model=self.model_name,
|
25 |
#model='gemini-2.5-pro-exp-03-25',
|
@@ -31,73 +36,94 @@ class GeminiManager:
|
|
31 |
tools=self.toolsLoader.getTools(),
|
32 |
),
|
33 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
except Exception as e:
|
35 |
-
|
36 |
shouldRetry = input("An error occurred. Do you want to retry? (y/n): ")
|
37 |
if shouldRetry.lower() == "y":
|
38 |
-
return self.
|
39 |
else:
|
40 |
print("Ending the conversation.")
|
41 |
return messages
|
42 |
|
43 |
-
|
|
|
|
|
|
|
44 |
|
|
|
45 |
if response.text is not None:
|
|
|
46 |
assistant_content = types.Content(
|
47 |
role='model' if self.model_name == "gemini-2.5-pro-exp-03-25" else 'assistant',
|
48 |
parts=[types.Part.from_text(text=response.text)],
|
49 |
)
|
50 |
messages.append(assistant_content)
|
51 |
-
|
|
|
|
|
52 |
messages.append(response.candidates[0].content)
|
|
|
|
|
53 |
if response.function_calls:
|
54 |
-
|
55 |
-
for function_call in response.function_calls:
|
56 |
-
toolResponse = None
|
57 |
-
print(f"Function Name: {function_call.name}, Arguments: {function_call.args}")
|
58 |
-
try:
|
59 |
-
toolResponse = self.toolsLoader.runTool(function_call.name, function_call.args)
|
60 |
-
except Exception as e:
|
61 |
-
print(f"Error running tool: {e}")
|
62 |
-
toolResponse = {
|
63 |
-
"status": "error",
|
64 |
-
"message": f"Tool {function_call.name} failed to run.",
|
65 |
-
"output": str(e),
|
66 |
-
}
|
67 |
-
print(f"Tool Response: {toolResponse}")
|
68 |
-
tool_content = types.Part.from_function_response(
|
69 |
-
name=function_call.name,
|
70 |
-
response = {"result":toolResponse})
|
71 |
-
try:
|
72 |
-
self.toolsLoader.load_tools()
|
73 |
-
except Exception as e:
|
74 |
-
print(f"Error loading tools: {e}")
|
75 |
-
# delete the created tool
|
76 |
-
self.toolsLoader.delete_tool(function_call.name, toolResponse.tool_file_path)
|
77 |
-
tool_content = types.Part.from_function_response(
|
78 |
-
name=function_call.name,
|
79 |
-
response={"result":f"{function_call.name} with {function_call.args} doesn't follow the required format, please read the other tool implementations for reference." + str(e)})
|
80 |
-
parts.append(tool_content)
|
81 |
-
function_response_content = types.Content(
|
82 |
-
role='model' if self.model_name == "gemini-2.5-pro-exp-03-25" else 'tool',
|
83 |
-
parts=parts
|
84 |
-
)
|
85 |
-
messages.append(function_response_content)
|
86 |
shouldContinue = input("Should I continue? (y/n): ")
|
87 |
if shouldContinue.lower() == "y":
|
88 |
-
return self.
|
89 |
else:
|
90 |
print("Ending the conversation.")
|
91 |
return messages
|
92 |
else:
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from google.genai import types
|
3 |
import os
|
4 |
from dotenv import load_dotenv
|
5 |
+
import sys
|
6 |
+
from src.tool_loader import ToolLoader
|
7 |
+
from src.utils.suppress_outputs import suppress_output
|
8 |
+
import logging
|
9 |
|
10 |
+
logger = logging.getLogger(__name__)
|
11 |
+
handler = logging.StreamHandler(sys.stdout)
|
12 |
+
handler.setLevel(logging.INFO)
|
13 |
+
logger.addHandler(handler)
|
14 |
|
15 |
class GeminiManager:
|
16 |
def __init__(self, toolsLoader: ToolLoader, system_prompt_file="./models/system3.prompt", gemini_model="gemini-2.5-pro-exp-03-25"):
|
|
|
23 |
with open(system_prompt_file, 'r', encoding="utf8") as f:
|
24 |
self.system_prompt = f.read()
|
25 |
|
26 |
+
def generate_response(self, messages):
|
27 |
+
return self.client.models.generate_content(
|
|
|
28 |
#model='gemini-2.5-pro-preview-03-25',
|
29 |
model=self.model_name,
|
30 |
#model='gemini-2.5-pro-exp-03-25',
|
|
|
36 |
tools=self.toolsLoader.getTools(),
|
37 |
),
|
38 |
)
|
39 |
+
|
40 |
+
def handle_tool_calls(self, response):
|
41 |
+
parts = []
|
42 |
+
for function_call in response.function_calls:
|
43 |
+
toolResponse = None
|
44 |
+
logger.info(f"Function Name: {function_call.name}, Arguments: {function_call.args}")
|
45 |
+
try:
|
46 |
+
toolResponse = self.toolsLoader.runTool(function_call.name, function_call.args)
|
47 |
+
except Exception as e:
|
48 |
+
logger.warning(f"Error running tool: {e}")
|
49 |
+
toolResponse = {
|
50 |
+
"status": "error",
|
51 |
+
"message": f"Tool {function_call.name} failed to run.",
|
52 |
+
"output": str(e),
|
53 |
+
}
|
54 |
+
logger.debug(f"Tool Response: {toolResponse}")
|
55 |
+
tool_content = types.Part.from_function_response(
|
56 |
+
name=function_call.name,
|
57 |
+
response = {"result":toolResponse})
|
58 |
+
try:
|
59 |
+
self.toolsLoader.load_tools()
|
60 |
+
except Exception as e:
|
61 |
+
logger.info(f"Error loading tools: {e}. Deleting the tool.")
|
62 |
+
# delete the created tool
|
63 |
+
self.toolsLoader.delete_tool(toolResponse['output']['tool_name'], toolResponse['output']['tool_file_path'])
|
64 |
+
tool_content = types.Part.from_function_response(
|
65 |
+
name=function_call.name,
|
66 |
+
response={"result":f"{function_call.name} with {function_call.args} doesn't follow the required format, please read the other tool implementations for reference." + str(e)})
|
67 |
+
parts.append(tool_content)
|
68 |
+
return types.Content(
|
69 |
+
role='model' if self.model_name == "gemini-2.5-pro-exp-03-25" else 'tool',
|
70 |
+
parts=parts
|
71 |
+
)
|
72 |
+
|
73 |
+
def run(self, messages):
|
74 |
+
try:
|
75 |
+
response = suppress_output(self.generate_response)(messages)
|
76 |
except Exception as e:
|
77 |
+
logger.debug(f"Error generating response: {e}")
|
78 |
shouldRetry = input("An error occurred. Do you want to retry? (y/n): ")
|
79 |
if shouldRetry.lower() == "y":
|
80 |
+
return self.run(messages)
|
81 |
else:
|
82 |
print("Ending the conversation.")
|
83 |
return messages
|
84 |
|
85 |
+
logger.debug(f"Response: {response}")
|
86 |
+
|
87 |
+
if (not response.text and not response.function_calls):
|
88 |
+
print("No response from the model.")
|
89 |
|
90 |
+
# Attach the llm response to the messages
|
91 |
if response.text is not None:
|
92 |
+
print("CEO:", response.text)
|
93 |
assistant_content = types.Content(
|
94 |
role='model' if self.model_name == "gemini-2.5-pro-exp-03-25" else 'assistant',
|
95 |
parts=[types.Part.from_text(text=response.text)],
|
96 |
)
|
97 |
messages.append(assistant_content)
|
98 |
+
|
99 |
+
# Attach the function call response to the messages
|
100 |
+
if response.candidates[0].content and response.candidates[0].content.parts:
|
101 |
messages.append(response.candidates[0].content)
|
102 |
+
|
103 |
+
# Invoke the function calls if any and attach the response to the messages
|
104 |
if response.function_calls:
|
105 |
+
messages.append(self.handle_tool_calls(response))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
shouldContinue = input("Should I continue? (y/n): ")
|
107 |
if shouldContinue.lower() == "y":
|
108 |
+
return self.run(messages)
|
109 |
else:
|
110 |
print("Ending the conversation.")
|
111 |
return messages
|
112 |
else:
|
113 |
+
logger.debug("No tool calls found in the response.")
|
114 |
+
# Start the loop again
|
115 |
+
return self.start_conversation(messages)
|
116 |
+
|
117 |
+
def start_conversation(self, messages=[]):
|
118 |
+
question = input("User: ")
|
119 |
+
if ("exit" in question.lower() or "quit" in question.lower()):
|
120 |
+
print("Ending the conversation.")
|
121 |
+
return messages
|
122 |
+
user_content = types.Content(
|
123 |
+
role='user',
|
124 |
+
parts=[types.Part.from_text(text=question)],
|
125 |
+
)
|
126 |
+
messages.append(user_content)
|
127 |
+
|
128 |
+
# Start the conversation loop
|
129 |
+
return self.run(messages)
|
{CEO β src}/budget_manager.py
RENAMED
@@ -1,4 +1,4 @@
|
|
1 |
-
from
|
2 |
|
3 |
@singleton
|
4 |
class BudgetManager():
|
|
|
1 |
+
from src.singleton import singleton
|
2 |
|
3 |
@singleton
|
4 |
class BudgetManager():
|
{CEO β src}/llm_models.py
RENAMED
File without changes
|
{CEO β src}/singleton.py
RENAMED
File without changes
|
src/tool_loader.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
import importlib.util
|
3 |
+
import os
|
4 |
+
import types
|
5 |
+
import pip
|
6 |
+
from google.genai import types
|
7 |
+
import sys
|
8 |
+
|
9 |
+
from src.singleton import singleton
|
10 |
+
from src.utils.suppress_outputs import suppress_output
|
11 |
+
|
12 |
+
toolsImported = []
|
13 |
+
|
14 |
+
TOOLS_DIRECTORY = os.path.abspath("./tools")
|
15 |
+
|
16 |
+
class Tool:
|
17 |
+
def __init__(self, toolClass):
|
18 |
+
suppress_output(self.load_tool)(toolClass)
|
19 |
+
|
20 |
+
def load_tool(self, toolClass):
|
21 |
+
self.tool = toolClass()
|
22 |
+
self.inputSchema = self.tool.inputSchema
|
23 |
+
self.name = self.inputSchema["name"]
|
24 |
+
self.description = self.inputSchema["description"]
|
25 |
+
self.dependencies = self.tool.dependencies
|
26 |
+
for package in self.tool.dependencies:
|
27 |
+
try:
|
28 |
+
__import__(package.split('==')[0])
|
29 |
+
except ImportError:
|
30 |
+
print(f"Installing {package}")
|
31 |
+
if '==' in package:
|
32 |
+
package = package.split('==')[0]
|
33 |
+
pip.main(['install', package])
|
34 |
+
|
35 |
+
def run(self, query):
|
36 |
+
return self.tool.run(**query)
|
37 |
+
|
38 |
+
@singleton
|
39 |
+
class ToolLoader:
|
40 |
+
toolsImported = []
|
41 |
+
|
42 |
+
def __init__(self):
|
43 |
+
self.load_tools()
|
44 |
+
|
45 |
+
def load_tools(self):
|
46 |
+
newToolsImported = []
|
47 |
+
for filename in os.listdir(TOOLS_DIRECTORY):
|
48 |
+
if filename.endswith(".py") and filename != "__init__.py":
|
49 |
+
module_name = filename[:-3]
|
50 |
+
spec = importlib.util.spec_from_file_location(module_name, f"{TOOLS_DIRECTORY}/{filename}")
|
51 |
+
foo = importlib.util.module_from_spec(spec)
|
52 |
+
spec.loader.exec_module(foo)
|
53 |
+
class_name = foo.__all__[0]
|
54 |
+
toolClass = getattr(foo, class_name)
|
55 |
+
toolObj = Tool(toolClass)
|
56 |
+
newToolsImported.append(toolObj)
|
57 |
+
self.toolsImported = newToolsImported
|
58 |
+
|
59 |
+
def runTool(self, toolName, query):
|
60 |
+
for tool in self.toolsImported:
|
61 |
+
if tool.name == toolName:
|
62 |
+
return tool.run(query)
|
63 |
+
return {
|
64 |
+
"status": "error",
|
65 |
+
"message": f"Tool {toolName} not found",
|
66 |
+
"output": None
|
67 |
+
}
|
68 |
+
|
69 |
+
def getTools(self):
|
70 |
+
toolsList = []
|
71 |
+
for tool in self.toolsImported:
|
72 |
+
parameters = types.Schema()
|
73 |
+
parameters.type = tool.inputSchema["parameters"]["type"]
|
74 |
+
properties = {}
|
75 |
+
for prop, value in tool.inputSchema["parameters"]["properties"].items():
|
76 |
+
properties[prop] = types.Schema(
|
77 |
+
type=value["type"],
|
78 |
+
description=value["description"]
|
79 |
+
)
|
80 |
+
parameters.properties = properties
|
81 |
+
parameters.required = tool.inputSchema["parameters"].get("required", [])
|
82 |
+
function = types.FunctionDeclaration(
|
83 |
+
name=tool.inputSchema["name"],
|
84 |
+
description=tool.inputSchema["description"],
|
85 |
+
parameters=parameters,
|
86 |
+
)
|
87 |
+
toolType = types.Tool(function_declarations=[function])
|
88 |
+
toolsList.append(toolType)
|
89 |
+
return toolsList
|
90 |
+
|
91 |
+
def delete_tool(self, toolName, toolFile):
|
92 |
+
try:
|
93 |
+
os.remove(toolFile)
|
94 |
+
for tool in self.toolsImported:
|
95 |
+
if tool.name == toolName:
|
96 |
+
self.toolsImported.remove(tool)
|
97 |
+
return {
|
98 |
+
"status": "success",
|
99 |
+
"message": f"Tool {toolName} deleted",
|
100 |
+
"output": None
|
101 |
+
}
|
102 |
+
except Exception as e:
|
103 |
+
return {
|
104 |
+
"status": "error",
|
105 |
+
"message": f"Tool {toolName} not found",
|
106 |
+
"output": None
|
107 |
+
}
|
108 |
+
|
109 |
+
toolLoader = ToolLoader()
|
110 |
+
|
111 |
+
# Example usage
|
112 |
+
# print(toolLoader.getTools())
|
113 |
+
# print(toolLoader.runTool("AgentCreator", {"agent_name": "Kunla","base_model":"llama3.2","system_prompt": "You love making the indian dish called Kulcha. You declare that in every conversation you have in a witty way." }))
|
{CEO β src}/utils/suppress_outputs.py
RENAMED
File without changes
|
tools/agent_creater_tool.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import importlib
|
2 |
-
from
|
3 |
__all__ = ['AgentCreator']
|
4 |
|
5 |
class AgentCreator():
|
@@ -52,7 +52,6 @@ class AgentCreator():
|
|
52 |
|
53 |
agent_creation_cost = 60
|
54 |
budget_manager = BudgetManager()
|
55 |
-
print("budget_manager_instance:", budget_manager)
|
56 |
if not budget_manager.can_spend(agent_creation_cost):
|
57 |
return {
|
58 |
"status": "error",
|
|
|
1 |
import importlib
|
2 |
+
from src.budget_manager import BudgetManager
|
3 |
__all__ = ['AgentCreator']
|
4 |
|
5 |
class AgentCreator():
|
|
|
52 |
|
53 |
agent_creation_cost = 60
|
54 |
budget_manager = BudgetManager()
|
|
|
55 |
if not budget_manager.can_spend(agent_creation_cost):
|
56 |
return {
|
57 |
"status": "error",
|
tools/ask_agent_tool.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import importlib
|
2 |
-
from
|
3 |
|
4 |
__all__ = ['AskAgent']
|
5 |
|
|
|
1 |
import importlib
|
2 |
+
from src.budget_manager import BudgetManager
|
3 |
|
4 |
__all__ = ['AskAgent']
|
5 |
|
tools/fire_agent.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import importlib
|
2 |
-
from
|
3 |
|
4 |
__all__ = ['FireAgent']
|
5 |
|
|
|
1 |
import importlib
|
2 |
+
from src.budget_manager import BudgetManager
|
3 |
|
4 |
__all__ = ['FireAgent']
|
5 |
|
tools/get_website_tool.py
CHANGED
@@ -22,6 +22,20 @@ class GetWebsiteTool():
|
|
22 |
}
|
23 |
|
24 |
def run(self, **kwargs):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
print("Running web search")
|
26 |
|
27 |
url = kwargs.get("url")
|
@@ -38,7 +52,7 @@ class GetWebsiteTool():
|
|
38 |
bs4 = importlib.import_module("bs4")
|
39 |
BeautifulSoup = bs4.BeautifulSoup
|
40 |
try:
|
41 |
-
response = requests.get(url)
|
42 |
if response.status_code == 200:
|
43 |
# Parse the content using BeautifulSoup
|
44 |
soup = BeautifulSoup(response.content, 'html.parser')
|
|
|
22 |
}
|
23 |
|
24 |
def run(self, **kwargs):
|
25 |
+
headers = {
|
26 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:137.0) Gecko/20100101 Firefox/137.0',
|
27 |
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
28 |
+
'Accept-Language': 'en-US,en;q=0.5',
|
29 |
+
'DNT': '1',
|
30 |
+
'Sec-GPC': '1',
|
31 |
+
'Connection': 'keep-alive',
|
32 |
+
'Upgrade-Insecure-Requests': '1',
|
33 |
+
'Sec-Fetch-Dest': 'document',
|
34 |
+
'Sec-Fetch-Mode': 'navigate',
|
35 |
+
'Sec-Fetch-Site': 'none',
|
36 |
+
'Sec-Fetch-User': '?1',
|
37 |
+
'Priority': 'u=0, i',
|
38 |
+
}
|
39 |
print("Running web search")
|
40 |
|
41 |
url = kwargs.get("url")
|
|
|
52 |
bs4 = importlib.import_module("bs4")
|
53 |
BeautifulSoup = bs4.BeautifulSoup
|
54 |
try:
|
55 |
+
response = requests.get(url, headers=headers, timeout=10)
|
56 |
if response.status_code == 200:
|
57 |
# Parse the content using BeautifulSoup
|
58 |
soup = BeautifulSoup(response.content, 'html.parser')
|
tools/tool_deletor.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
|
3 |
+
__all__ = ['ToolDeletor']
|
4 |
+
|
5 |
+
|
6 |
+
class ToolDeletor():
|
7 |
+
dependencies = ["os"]
|
8 |
+
|
9 |
+
inputSchema = {
|
10 |
+
"name": "ToolDeletor",
|
11 |
+
"description": "Deletes a tool for the given function",
|
12 |
+
"parameters": {
|
13 |
+
"type": "object",
|
14 |
+
"properties": {
|
15 |
+
"name": {
|
16 |
+
"type": "string",
|
17 |
+
"description": "The name of the tool to create",
|
18 |
+
},
|
19 |
+
"file_path": {
|
20 |
+
"type": "string",
|
21 |
+
"description": "The path of the tool to create",
|
22 |
+
},
|
23 |
+
},
|
24 |
+
"required": ["name", "file_path"],
|
25 |
+
}
|
26 |
+
}
|
27 |
+
|
28 |
+
def run(self, **kwargs):
|
29 |
+
print("Running Tool Deletor")
|
30 |
+
name = kwargs.get("name")
|
31 |
+
file_path = kwargs.get("file_path")
|
32 |
+
os = importlib.import_module("os")
|
33 |
+
os.remove(file_path)
|
34 |
+
return {
|
35 |
+
"status": "success",
|
36 |
+
"message": "Tool deleted successfully",
|
37 |
+
"output": {
|
38 |
+
"tool_file_path": file_path,
|
39 |
+
"tool_name": name,
|
40 |
+
}
|
41 |
+
}
|