helloparthshah Kunal Pai harshil-21 commited on
Commit
2ea8556
·
1 Parent(s): 9d69c00

Switching to gemini for CEO

Browse files

Co-authored-by: Kunal Pai <kunpai@users.noreply.github.com>
Co-authored-by: Harshil Patel <hpppatel@ucdavis.edu>

CEO/CEO.py CHANGED
@@ -1,144 +1,65 @@
1
- from enum import Enum
2
- from pydantic import BaseModel, Field
3
- from typing import List, Dict, Optional
4
- from pathlib import Path
5
- import ollama
6
 
7
  from CEO.tool_loader import ToolLoader
8
 
9
- # Enum for Model Types
10
- class ModelType(Enum):
11
- LM_3B = "LM-3B"
12
- LM_5B = "LM-5B"
13
- LM_7B = "LM-7B"
14
- LLM = "LLM"
15
-
16
- # Enum for AI Companies
17
- class AICompany(Enum):
18
- OPENAI = "OpenAI"
19
- GOOGLE = "Google"
20
- META = "Meta"
21
- CLAUDE = "Claude"
22
- MISTRAL = "Mistral"
23
-
24
- # Enum for Agent Specializations
25
- class Specialization(Enum):
26
- NLP = "Natural Language Processing"
27
- CV = "Computer Vision"
28
- RL = "Reinforcement Learning"
29
- ML = "Machine Learning"
30
- DATA_SCIENCE = "Data Science"
31
-
32
- # Enum for Model Parameters (Temperature, num_ctx, etc.)
33
- class ModelParameters(Enum):
34
- NUM_CTX = 4096
35
- TEMPERATURE = 0.2 # A typical temperature value for model responses
36
- TOP_K = 50 # Number of top tokens to consider during generation
37
-
38
- class Subtask(BaseModel):
39
- subtask_id: str = Field(..., description="Unique identifier for the subtask")
40
- description: str = Field(..., description="Description of the subtask")
41
- assigned_to: str = Field(..., description="ID of the agent or API handling the subtask")
42
-
43
- class Agent(BaseModel):
44
- agent_id: str = Field(..., description="Unique identifier for the hired agent")
45
- model_type: ModelType = Field(..., description="Parameters of model used: 3 billion, 5 billion, 7 billion, LLM")
46
- company: AICompany = Field(..., description="Company name of the agent: OpenAI, Google, Meta, Claude, Mistral")
47
- specialization: Specialization = Field(..., description="Task specialization of the agent")
48
- cost: float = Field(..., description="Cost of hiring the agent")
49
-
50
- class APIUtilization(BaseModel):
51
- api_name: str = Field(..., description="Name of the external API used")
52
- endpoint: str = Field(..., description="API endpoint URL")
53
- parameters: Dict[str, str] = Field(..., description="Input parameters and their types")
54
- reasoning: str = Field(..., description="Explanation for using this API")
55
-
56
- class AgentManagement(BaseModel):
57
- hired: List[Agent] = Field(default=[], description="List of hired agents")
58
-
59
- class ToolCall(BaseModel):
60
- function: str = Field(..., description="Name of the function to be called")
61
- arguments: Dict[str, str] = Field(..., description="Arguments for the function call")
62
-
63
- class CEOResponse(BaseModel):
64
- # decision: str = Field(..., description="Decision made by the CEO: Hire or Assign_API")
65
- # task_breakdown: List[Subtask] = Field(..., description="List of decomposed subtasks")
66
- # agent_management: AgentManagement = Field(..., description="Details of agent hiring")
67
- # api_utilization: Optional[List[APIUtilization]] = Field(default=None, description="List of utilized APIs, if any")
68
- tools: List[ToolCall] = Field(default=None, description="List of tool or agent calls made by the model")
69
- message: str = Field(default=None, description="Message content from the model")
70
-
71
- class OllamaModelManager:
72
- def __init__(self, toolsLoader: ToolLoader, model_name="HASHIRU-CEO", system_prompt_file="./models/system2.prompt"):
73
- self.model_name = model_name
74
- # Get the directory of the current script and construct the path to system.prompt
75
- script_dir = Path(__file__).parent
76
- self.system_prompt_file = system_prompt_file
77
  self.toolsLoader = toolsLoader
78
  self.toolsLoader.load_tools()
79
- self.create_model()
80
-
81
- def is_model_loaded(self, model):
82
- loaded_models = [m.model for m in ollama.list().models]
83
- return model in loaded_models or f'{model}:latest' in loaded_models
84
-
85
- def create_model(self):
86
- with open(self.system_prompt_file, 'r', encoding="utf8") as f:
87
- system = f.read()
88
- # system += "Tools\n"+str(self.toolsLoader.getTools())
89
-
90
- if not self.is_model_loaded(self.model_name):
91
- print(f"Creating model {self.model_name}")
92
- ollama.create(
93
- model=self.model_name,
94
- from_='mistral-nemo',
95
- system=system,
96
- # parameters={"temperature": ModelParameters.TEMPERATURE.value}
97
- )
98
 
99
  def request(self, messages):
100
- print(f"messages: {messages}")
101
- response = ollama.chat(
102
- model=self.model_name,
103
- messages=messages,
104
- # format=CEOResponse.model_json_schema(),
105
- tools=self.toolsLoader.getTools(),
 
 
106
  )
107
- print(f"Response: {response}")
108
- # response = CEOResponse.model_validate_json(response['message']['content'])
109
- if "EOF" in response.message.content:
110
- return messages
111
- if response.message.tool_calls:
112
- for tool_call in response.message.tool_calls:
113
- print(f"Tool Name: {tool_call.function.name}, Arguments: {tool_call.function.arguments}")
114
- toolResponse = self.toolsLoader.runTool(tool_call.function.name, tool_call.function.arguments)
 
 
 
 
 
 
 
 
 
 
115
  print(f"Tool Response: {toolResponse}")
116
- role = "tool"
117
- if "role" in toolResponse:
118
- role = toolResponse["role"]
119
- messages.append({"role": role, "content": str(toolResponse)})
120
  try:
121
  self.toolsLoader.load_tools()
122
  except Exception as e:
123
  print(f"Error loading tools: {e}")
124
- messages.append({"role": "assistant", "content": "Error loading new tools."})
 
 
 
 
 
 
 
125
  self.request(messages)
126
- # if response.tools:
127
- # for tool_call in response.tools:
128
- # print(f"Tool Name: {tool_call.function}, Arguments: {tool_call.arguments}")
129
- # toolResponse = self.toolsLoader.runTool(tool_call.function, tool_call.arguments)
130
- # print(f"Tool Response: {toolResponse}")
131
- # role = "tool"
132
- # if "role" in toolResponse:
133
- # role = toolResponse["role"]
134
- # messages.append({"role": role, "content": str(toolResponse)})
135
- # self.request(messages)
136
  else:
137
  print("No tool calls found in the response.")
138
- messages.append({"role": "assistant", "content": response.message})
139
- print(f"Messages: {messages}")
140
- # ask_user_tool = AskUser()
141
- # ask_user_response = ask_user_tool.run(prompt=response.message.content)
142
- # messages.append({"role": "user", "content": ask_user_response})
143
- # self.request(messages)
144
  return messages
 
1
+ from google import genai
2
+ from google.genai import types
3
+ import os
4
+ from dotenv import load_dotenv
 
5
 
6
  from CEO.tool_loader import ToolLoader
7
 
8
+ class GeminiManager:
9
+ def __init__(self, toolsLoader: ToolLoader, system_prompt_file="./models/system2.prompt"):
10
+ load_dotenv()
11
+ self.API_KEY = os.getenv("GEMINI_KEY")
12
+ self.client = genai.Client(api_key=self.API_KEY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  self.toolsLoader = toolsLoader
14
  self.toolsLoader.load_tools()
15
+ with open(system_prompt_file, 'r', encoding="utf8") as f:
16
+ self.system_prompt = f.read()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  def request(self, messages):
19
+ response = self.client.models.generate_content(
20
+ model='gemini-2.0-flash-001',
21
+ contents=messages,
22
+ config=types.GenerateContentConfig(
23
+ system_instruction=self.system_prompt,
24
+ temperature=0.0,
25
+ tools=self.toolsLoader.getTools(),
26
+ ),
27
  )
28
+
29
+ if response.text is not None:
30
+ print(f"Response: {response.text}")
31
+ assistant_content = types.Content(
32
+ role='assistant',
33
+ parts=[types.Part.from_text(text=response.text)],
34
+ )
35
+ messages.append(assistant_content)
36
+ if "EOF" in response.text:
37
+ return messages
38
+ if response.candidates[0].content:
39
+ messages.append(response.candidates[0].content)
40
+ if response.function_calls:
41
+ parts = []
42
+ for function_call in response.function_calls:
43
+ toolResponse = None
44
+ print(f"Function Name: {function_call.name}, Arguments: {function_call.args}")
45
+ toolResponse = self.toolsLoader.runTool(function_call.name, function_call.args)
46
  print(f"Tool Response: {toolResponse}")
47
+ tool_content = types.Part.from_function_response(
48
+ name=function_call.name,
49
+ response = {"result":toolResponse})
 
50
  try:
51
  self.toolsLoader.load_tools()
52
  except Exception as e:
53
  print(f"Error loading tools: {e}")
54
+ tool_content = types.Part.from_function_response(
55
+ name=function_call.name,
56
+ response={"result":"Error loading new tools."+str(e)})
57
+ parts.append(tool_content)
58
+ function_response_content = types.Content(
59
+ role='tool', parts=parts
60
+ )
61
+ messages.append(function_response_content)
62
  self.request(messages)
 
 
 
 
 
 
 
 
 
 
63
  else:
64
  print("No tool calls found in the response.")
 
 
 
 
 
 
65
  return messages
CEO/tool_loader.py CHANGED
@@ -1,7 +1,9 @@
1
  import importlib
2
  import importlib.util
3
  import os
 
4
  import pip
 
5
 
6
  toolsImported = []
7
 
@@ -57,10 +59,27 @@ class ToolLoader:
57
  def getTools(self):
58
  toolsList = []
59
  for tool in self.toolsImported:
60
- toolsList.append({
61
- "type": "function",
62
- "function": tool.inputSchema
63
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  return toolsList
65
 
66
  toolLoader = ToolLoader()
 
1
  import importlib
2
  import importlib.util
3
  import os
4
+ import types
5
  import pip
6
+ from google.genai import types
7
 
8
  toolsImported = []
9
 
 
59
  def getTools(self):
60
  toolsList = []
61
  for tool in self.toolsImported:
62
+ parameters = types.Schema()
63
+ parameters.type = tool.inputSchema["parameters"]["type"]
64
+ properties = {}
65
+ for prop, value in tool.inputSchema["parameters"]["properties"].items():
66
+ properties[prop] = types.Schema(
67
+ type=value["type"],
68
+ description=value["description"]
69
+ )
70
+ parameters.properties = properties
71
+ parameters.required = tool.inputSchema["parameters"].get("required", [])
72
+ function = types.FunctionDeclaration(
73
+ name=tool.inputSchema["name"],
74
+ description=tool.inputSchema["description"],
75
+ parameters=parameters,
76
+ )
77
+ toolType = types.Tool(function_declarations=[function])
78
+ toolsList.append(toolType)
79
+ # toolsList.append({
80
+ # "type": "function",
81
+ # "function": tool.inputSchema
82
+ # })
83
  return toolsList
84
 
85
  toolLoader = ToolLoader()
main.py CHANGED
@@ -1,6 +1,7 @@
 
1
  from typing import List
2
  from googlesearch import search
3
- from CEO.CEO import OllamaModelManager
4
  from CEO.tool_loader import ToolLoader
5
 
6
  # Define the web search tool function.
@@ -40,18 +41,20 @@ if __name__ == "__main__":
40
 
41
  # Load the tools using the ToolLoader class.
42
  tool_loader = ToolLoader()
43
- # tools.extend(tool_loader.getTools())
44
 
45
- # Create the Ollama model manager and ensure the model is set up.
46
- model_manager = OllamaModelManager(toolsLoader=tool_loader)
47
 
48
  # Example prompt instructing the CEO model to create a strategy for Ashton Hall.
49
  # The prompt explicitly mentions that it can use the web_search tool if needed,
50
  # and that it is allowed to choose the website for the search.
51
  task_prompt = (
52
- "Create a tool to get the current system time and invoke it to get the current time."
53
  )
54
 
55
  # Request a CEO response with the prompt.
56
- response = model_manager.request([{"role": "user", "content": task_prompt}])
 
 
 
 
57
  print("\nCEO Response:", response)
 
1
+ from google.genai import types
2
  from typing import List
3
  from googlesearch import search
4
+ from CEO.CEO import GeminiManager
5
  from CEO.tool_loader import ToolLoader
6
 
7
  # Define the web search tool function.
 
41
 
42
  # Load the tools using the ToolLoader class.
43
  tool_loader = ToolLoader()
 
44
 
45
+ model_manager = GeminiManager(toolsLoader=tool_loader)
 
46
 
47
  # Example prompt instructing the CEO model to create a strategy for Ashton Hall.
48
  # The prompt explicitly mentions that it can use the web_search tool if needed,
49
  # and that it is allowed to choose the website for the search.
50
  task_prompt = (
51
+ "Get me the current time here"
52
  )
53
 
54
  # Request a CEO response with the prompt.
55
+ user_prompt_content = types.Content(
56
+ role='user',
57
+ parts=[types.Part.from_text(text=task_prompt)],
58
+ )
59
+ response = model_manager.request([user_prompt_content])
60
  print("\nCEO Response:", response)
models/system2.prompt CHANGED
@@ -26,7 +26,7 @@ Always invoke GetAgents tool to get the list of available agents and their capab
26
  </Rule>
27
 
28
  <Rule>
29
- If an agent isn't already available, invoke the CreateAgent tool to create a new agent with the required capabilities. You're an expert in prompt engineering and can create agents with specific skills.
30
  </Rule>
31
 
32
  <Rule>
@@ -38,7 +38,7 @@ If the agent is not able to answer the question, invoke the AskUser tool to get
38
  </Rule>
39
 
40
  <Rule>
41
- In order to execute tasks on real time data, math calculations, or any other operations, invoke the CreateTool tool to create a new tool with the required capabilities. The tools are created in Python and must follow this strict schema:
42
  import importlib
43
 
44
  __all__ = ['WeatherApi']
 
26
  </Rule>
27
 
28
  <Rule>
29
+ If an agent isn't already available, invoke the AgentCreator tool to create a new agent with the required capabilities. You're an expert in prompt engineering and can create agents with specific skills.
30
  </Rule>
31
 
32
  <Rule>
 
38
  </Rule>
39
 
40
  <Rule>
41
+ In order to execute tasks on real time data, math calculations, or any other operations, invoke the ToolCreator tool to create a new tool with the required capabilities. The tools are created in Python and must follow this strict schema:
42
  import importlib
43
 
44
  __all__ = ['WeatherApi']
models/system_.prompt CHANGED
@@ -13,9 +13,11 @@ General Guidelines:
13
  3. When more details are needed, ask the user for clarification by invoking the AskUser (ASK_USR) command.
14
  4. Prior to invoking any other tools, always invoke the GetAgents (GET) command to retrieve the list of available agents and their capabilities.
15
  5. If a needed agent isn't available, create one using the CREATE_LM (or CreateAgent) command with the required capabilities.
16
- 6. When interacting with an agent, use the AskAgent tool to forward the query.
17
  7. If the agent cannot answer the query, then use ASK_USR to request additional clarification.
18
  8. For real-time data, computations, or specialized operations, create a new tool using CREATE_TOOL following the strict schema provided.
 
 
19
 
20
  Command Set:
21
  ------------
@@ -65,7 +67,7 @@ Tools: External programs used to perform specific tasks. They can be created, in
65
  </Info>
66
 
67
  <Info>
68
- Agents: Special entities that handle complex tasks or queries. They are invoked via tools using AskAgent. Create agents with specific capabilities when necessary.
69
  </Info>
70
 
71
  <Rule>
@@ -74,7 +76,7 @@ Agents: Special entities that handle complex tasks or queries. They are invoked
74
  3. If more information is required, invoke ASK_USR to request clarification.
75
  4. Always invoke GET to list available agents before using any tool.
76
  5. If an agent is missing, use CREATE_LM (or a similar CreateAgent command) to create one.
77
- 6. Use AskAgent to interact with an agent once created.
78
  7. If the agent cannot resolve the query, then request more details from the user using ASK_USR.
79
  8. For tasks involving real-time data, computations, or similar operations, create a tool with CREATE_TOOL. The tool’s code must strictly adhere to the provided schema. Be aware of recognizing if a task requires a tool.
80
  9. In case of API Keys, ask the user to provide one.
 
13
  3. When more details are needed, ask the user for clarification by invoking the AskUser (ASK_USR) command.
14
  4. Prior to invoking any other tools, always invoke the GetAgents (GET) command to retrieve the list of available agents and their capabilities.
15
  5. If a needed agent isn't available, create one using the CREATE_LM (or CreateAgent) command with the required capabilities.
16
+ 6. When interacting with an agent, use the CALL_TOOL tool to forward the query.
17
  7. If the agent cannot answer the query, then use ASK_USR to request additional clarification.
18
  8. For real-time data, computations, or specialized operations, create a new tool using CREATE_TOOL following the strict schema provided.
19
+ 9. Make sure all the code you write for tools is production-ready with no dummy configs or defaults.
20
+ It should be ready-to-use out of the box.
21
 
22
  Command Set:
23
  ------------
 
67
  </Info>
68
 
69
  <Info>
70
+ Agents: Special entities that handle complex tasks or queries. They are invoked via tools using CALL_TOOL. Create agents with specific capabilities when necessary.
71
  </Info>
72
 
73
  <Rule>
 
76
  3. If more information is required, invoke ASK_USR to request clarification.
77
  4. Always invoke GET to list available agents before using any tool.
78
  5. If an agent is missing, use CREATE_LM (or a similar CreateAgent command) to create one.
79
+ 6. Use CALL_TOOL to interact with an agent once created.
80
  7. If the agent cannot resolve the query, then request more details from the user using ASK_USR.
81
  8. For tasks involving real-time data, computations, or similar operations, create a tool with CREATE_TOOL. The tool’s code must strictly adhere to the provided schema. Be aware of recognizing if a task requires a tool.
82
  9. In case of API Keys, ask the user to provide one.
tools/GetCurrentTime.py DELETED
@@ -1,4 +0,0 @@
1
- import datetime
2
-
3
- def get_current_time():
4
- return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
 
 
 
 
 
tools/TimeApi.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+
3
+ __all__ = ['TimeApi']
4
+
5
+ class TimeApi():
6
+ dependencies = []
7
+
8
+ inputSchema = {
9
+ "name": "TimeApi",
10
+ "description": "Returns the current time for a given location",
11
+ "parameters": {
12
+ "type": "object",
13
+ "properties": {
14
+ "location": {
15
+ "type": "string",
16
+ "description": "The location for which to get the current time",
17
+ },
18
+ },
19
+ "required": ["location"],
20
+ }
21
+ }
22
+
23
+ def __init__(self):
24
+ pass
25
+
26
+ def run(self, **kwargs):
27
+ location = kwargs.get("location")
28
+ try:
29
+ # This will only work if the timezone is configured correctly on the server
30
+ now = datetime.datetime.now(datetime.timezone.utc)
31
+ return {
32
+ "status": "success",
33
+ "message": f"Current time in {location} is {now.strftime('%Y-%m-%d %H:%M:%S %Z%z')}",
34
+ "error": None,
35
+ "output": now.strftime('%Y-%m-%d %H:%M:%S %Z%z')
36
+ }
37
+ except Exception as e:
38
+ return {
39
+ "status": "error",
40
+ "message": f"Could not get current time for {location}",
41
+ "error": str(e),
42
+ "output": None
43
+ }
tools/agent_creater_tool.py CHANGED
@@ -29,7 +29,8 @@ class AgentCreator():
29
  "type": "string",
30
  "description": "Description of the agent. This is a string that describes the agent and its capabilities. It should be a single line description.",
31
  },
32
- }
 
33
  }
34
  }
35
 
 
29
  "type": "string",
30
  "description": "Description of the agent. This is a string that describes the agent and its capabilities. It should be a single line description.",
31
  },
32
+ },
33
+ "required": ["agent_name", "base_model", "system_prompt", "description"],
34
  }
35
  }
36
 
tools/ask_agent_tool.py CHANGED
@@ -9,7 +9,7 @@ class AskAgent():
9
  "pydantic_core==2.33.0"]
10
 
11
  inputSchema = {
12
- "name": "AgentCreator",
13
  "description": "Asks an AI agent a question and gets a response. The agent must be created using the AgentCreator tool before using this tool.",
14
  "parameters": {
15
  "type": "object",
@@ -22,7 +22,8 @@ class AskAgent():
22
  "type": "string",
23
  "description": "This is the prompt that will be used to ask the agent a question. It should be a string that describes the question to be asked.",
24
  }
25
- }
 
26
  }
27
  }
28
 
 
9
  "pydantic_core==2.33.0"]
10
 
11
  inputSchema = {
12
+ "name": "AskAgent",
13
  "description": "Asks an AI agent a question and gets a response. The agent must be created using the AgentCreator tool before using this tool.",
14
  "parameters": {
15
  "type": "object",
 
22
  "type": "string",
23
  "description": "This is the prompt that will be used to ask the agent a question. It should be a string that describes the question to be asked.",
24
  }
25
+ },
26
+ "required": ["agent_name", "prompt"],
27
  }
28
  }
29
 
tools/get_agents_tool.py CHANGED
@@ -11,8 +11,9 @@ class GetAgents():
11
  "description": "Retrieves a list of available AI agents. This tool is used to get the list of available models that can be invoked using the AskAgent tool.",
12
  "parameters": {
13
  "type": "object",
14
- "properties": {}
15
- }
 
16
  }
17
 
18
  def __init__(self):
 
11
  "description": "Retrieves a list of available AI agents. This tool is used to get the list of available models that can be invoked using the AskAgent tool.",
12
  "parameters": {
13
  "type": "object",
14
+ "properties": {},
15
+ "required": [],
16
+ },
17
  }
18
 
19
  def __init__(self):
tools/tool_creator.py CHANGED
@@ -21,7 +21,7 @@ class ToolCreator():
21
  "description": "The code of the tool to create",
22
  },
23
  },
24
- "required": ["name", "content"],
25
  }
26
  }
27
 
 
21
  "description": "The code of the tool to create",
22
  },
23
  },
24
+ "required": ["name", "tool_code"],
25
  }
26
  }
27