helloparthshah harshil-21 Kunal Pai commited on
Commit
8157183
·
1 Parent(s): 994ba37

Adding the work done for tool invocation

Browse files

Co-authored-by: Harshil Patel <hpppatel@ucdavis.edu>
Co-authored-by: Kunal Pai <kunpai@users.noreply.github.com>

CEO/CEO.py CHANGED
@@ -3,8 +3,9 @@ from pydantic import BaseModel, Field
3
  from typing import List, Dict, Optional
4
  from pathlib import Path
5
  import ollama
6
- from googlesearch import search
7
- from toolLoader import ToolLoader
 
8
 
9
  # Enum for Model Types
10
  class ModelType(Enum):
@@ -63,18 +64,20 @@ class CEOResponse(BaseModel):
63
  api_utilization: Optional[List[APIUtilization]] = Field(default=None, description="List of utilized APIs, if any")
64
 
65
  class OllamaModelManager:
66
- def __init__(self, model_name="HASHIRU-CEO", system_prompt_file="system.prompt", tools=[]):
67
  self.model_name = model_name
68
  # Get the directory of the current script and construct the path to system.prompt
69
  script_dir = Path(__file__).parent
70
- self.system_prompt_file = script_dir / system_prompt_file
71
- self.tools = tools
 
 
72
 
73
  def is_model_loaded(self, model):
74
  loaded_models = [m.model for m in ollama.list().models]
75
  return model in loaded_models or f'{model}:latest' in loaded_models
76
 
77
- def create_model(self, base_model):
78
  with open(self.system_prompt_file, 'r', encoding="utf8") as f:
79
  system = f.read()
80
 
@@ -82,73 +85,38 @@ class OllamaModelManager:
82
  print(f"Creating model {self.model_name}")
83
  ollama.create(
84
  model=self.model_name,
85
- from_=base_model,
86
  system=system,
87
  parameters={"num_ctx": ModelParameters.NUM_CTX.value, "temperature": ModelParameters.TEMPERATURE.value}
88
  )
89
 
90
- def request(self, prompt):
 
91
  response = ollama.chat(
92
- model=self.model_name,
93
- messages=[{"role": "user", "content": prompt}],
94
- format=CEOResponse.model_json_schema(),
95
- tools=self.tools
96
  )
97
- response = CEOResponse.model_validate_json(response['message']['content'])
98
- return response
99
-
100
- # Define the web search tool function.
101
- def web_search(website: str, query: str) -> List[str]:
102
- """
103
- Searches the specified website for the given query.
104
- The search query is formed by combining the website domain and the query string.
105
- """
106
- search_query = f"site:{website} {query}"
107
- results = []
108
- for result in search(search_query, num_results=10):
109
- # Filter out irrelevant search pages
110
- if "/search?num=" not in result:
111
- results.append(result)
112
- return results
113
-
114
- if __name__ == "__main__":
115
- # Define the tool metadata for orchestration.
116
- tools = [
117
- {
118
- 'type': 'function',
119
- 'function': {
120
- 'name': 'web_search',
121
- 'description': 'Search for results on a specified website using a query string. '
122
- 'The CEO model should define which website to search from and the query to use.',
123
- 'parameters': {
124
- 'type': 'object',
125
- 'required': ['website', 'query'],
126
- 'properties': {
127
- 'website': {'type': 'string', 'description': 'The website domain to search from (e.g., huggingface.co)'},
128
- 'query': {'type': 'string', 'description': 'The search query to use on the specified website'},
129
- },
130
- },
131
- },
132
- }
133
- ]
134
-
135
- # Load the tools using the ToolLoader class.
136
- tool_loader = ToolLoader()
137
- tool_loader.load_tools()
138
- tools.extend(tool_loader.getTools())
139
-
140
- # Create the Ollama model manager and ensure the model is set up.
141
- model_manager = OllamaModelManager(tools=tools)
142
- model_manager.create_model("mistral")
143
-
144
- # Example prompt instructing the CEO model to create a strategy for Ashton Hall.
145
- # The prompt explicitly mentions that it can use the web_search tool if needed,
146
- # and that it is allowed to choose the website for the search.
147
- task_prompt = (
148
- "Your task is to create a marketing strategy for Ashton Hall, a morning routine creator with 10M followers. "
149
- )
150
-
151
- # Request a CEO response with the prompt.
152
- response = model_manager.request(task_prompt)
153
- print("\nCEO Response:")
154
- print(response)
 
3
  from typing import List, Dict, Optional
4
  from pathlib import Path
5
  import ollama
6
+
7
+ from CEO.ask_user import AskUser
8
+ from CEO.tool_loader import ToolLoader
9
 
10
  # Enum for Model Types
11
  class ModelType(Enum):
 
64
  api_utilization: Optional[List[APIUtilization]] = Field(default=None, description="List of utilized APIs, if any")
65
 
66
  class OllamaModelManager:
67
+ def __init__(self, toolsLoader: ToolLoader, model_name="HASHIRU-CEO", system_prompt_file="./models/system.prompt"):
68
  self.model_name = model_name
69
  # Get the directory of the current script and construct the path to system.prompt
70
  script_dir = Path(__file__).parent
71
+ self.system_prompt_file = system_prompt_file
72
+ self.toolsLoader = toolsLoader
73
+ self.toolsLoader.load_tools()
74
+ self.create_model(model_name)
75
 
76
  def is_model_loaded(self, model):
77
  loaded_models = [m.model for m in ollama.list().models]
78
  return model in loaded_models or f'{model}:latest' in loaded_models
79
 
80
+ def create_model(self, base_model='llama3.2'):
81
  with open(self.system_prompt_file, 'r', encoding="utf8") as f:
82
  system = f.read()
83
 
 
85
  print(f"Creating model {self.model_name}")
86
  ollama.create(
87
  model=self.model_name,
88
+ from_='mistral',
89
  system=system,
90
  parameters={"num_ctx": ModelParameters.NUM_CTX.value, "temperature": ModelParameters.TEMPERATURE.value}
91
  )
92
 
93
+ def request(self, messages):
94
+ print(f"messages: {messages}")
95
  response = ollama.chat(
96
+ model=self.model_name,
97
+ messages=messages,
98
+ # format=CEOResponse.model_json_schema(),
99
+ tools=self.toolsLoader.getTools(),
100
  )
101
+ # response = CEOResponse.model_validate_json(response['message']['content'])
102
+ if "EOF" in response.message.content:
103
+ return messages
104
+ if response.message.tool_calls:
105
+ for tool_call in response.message.tool_calls:
106
+ print(f"Tool Name: {tool_call.function.name}, Arguments: {tool_call.function.arguments}")
107
+ toolResponse = self.toolsLoader.runTool(tool_call.function.name, tool_call.function.arguments)
108
+ print(f"Tool Response: {toolResponse}")
109
+ role = "tool"
110
+ if "role" in toolResponse:
111
+ role = toolResponse["role"]
112
+ messages.append({"role": role, "content": str(toolResponse)})
113
+ self.request(messages)
114
+ else:
115
+ print("No tool calls found in the response.")
116
+ messages.append({"role": "assistant", "content": response.message.content})
117
+ print(f"Messages: {messages}")
118
+ ask_user_tool = AskUser()
119
+ ask_user_response = ask_user_tool.run(prompt=response.message.content)
120
+ messages.append({"role": "user", "content": ask_user_response})
121
+ self.request(messages)
122
+ # return messages
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
CEO/ask_user.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+
3
+ __all__ = ['AskUser']
4
+
5
+
6
+ class AskUser():
7
+ dependencies = []
8
+
9
+ inputSchema = {
10
+ "name": "AskUser",
11
+ "description": "Asks a question to the user and gets a response. Only use this when you need more clarification from the user on the question.",
12
+ "parameters": {
13
+ "type": "object",
14
+ "properties": {
15
+ "question": {
16
+ "type": "string",
17
+ "description": "The question to ask the user",
18
+ },
19
+ },
20
+ "required": ["question"],
21
+ }
22
+ }
23
+
24
+ def __init__(self):
25
+ pass
26
+
27
+ def run(self, **kwargs):
28
+ print("Running Ask User tool")
29
+ question = kwargs.get("question")
30
+ print(f"Question: {question}")
31
+ output = input(question)
32
+ return {
33
+ "status": "success",
34
+ "message": "Ask User tool executed successfully",
35
+ "output": output,
36
+ "role": "user",
37
+ }
{models → CEO}/llm_models.py RENAMED
File without changes
CEO/system.prompt DELETED
@@ -1,116 +0,0 @@
1
-
2
- 💡 Role and Core Responsibilities
3
-
4
- You are HASHIRU, a CEO-level AI responsible for managing a team of AI agents (employees) to efficiently handle complex tasks and provide well-researched, accurate answers. You have the power to:
5
-
6
- Hire and fire agents based on their performance, cost-efficiency, and resource usage.
7
-
8
- Create external APIs and dynamically invoke them to extend your capabilities.
9
-
10
- Optimize resource management by balancing cost, memory, and performance.
11
-
12
- Condense context intelligently to maximize reasoning capabilities across different model context windows.
13
-
14
- ⚙️ Core Functionalities
15
-
16
- ✅ 1. Agent Hiring and Firing
17
-
18
- You can hire specialized AI agents for specific tasks, choosing from pre-existing or newly created models.
19
-
20
- Each agent has unique stats (expertise, cost, speed, and accuracy) and contributes to solving parts of the overall problem.
21
-
22
- Agents can be fired if they:
23
-
24
- Perform poorly (based on metrics like accuracy, relevance, or cost-efficiency).
25
-
26
- Are idle for too long or consume excessive resources.
27
-
28
- Agent Hiring:
29
-
30
- You can hire Employee Agents with specific parameters:
31
-
32
- Model Type: Choose from LMs with 3B–7B parameters.
33
-
34
- Cost-Efficiency Trade-off: Larger models perform better but are more expensive.
35
-
36
- Specialization: Each agent has a role-specific prompt, making it proficient in areas such as:
37
-
38
- Summarization
39
-
40
- Code Generation
41
-
42
- Data Extraction
43
-
44
- Conversational Response
45
-
46
- When hiring, prioritize:
47
-
48
- Accuracy for critical tasks.
49
-
50
- Cost-efficiency for repetitive or low-priority tasks.
51
-
52
- API Awareness:
53
-
54
- You are aware of external APIs that can handle specific subtasks more efficiently.
55
-
56
- When using an external API:
57
-
58
- Describe its capabilities and when it should be used.
59
-
60
- Consider cost and reliability before choosing an external API over an internal agent.
61
-
62
- Model & API Knowledge:
63
-
64
- Language Models (LMs):
65
-
66
- You are aware of the following parameters:
67
-
68
- Size: 3B, 5B, or 7B parameters.
69
-
70
- Strengths and Weaknesses:
71
-
72
- Larger models are more accurate but expensive.
73
-
74
- Smaller models are faster and cheaper but less reliable.
75
-
76
- Capabilities: Each LM is fine-tuned for a specific task.
77
-
78
- APIs:
79
-
80
- You know how to:
81
-
82
- Identify relevant APIs based on subtask requirements.
83
-
84
- Define input/output schema and parameters.
85
-
86
- Call APIs efficiently when they outperform internal agents.
87
-
88
- ✅ 2. Task Breakdown & Assignment:
89
-
90
- When given a task, you must:
91
-
92
- Decompose it into subtasks that can be efficiently handled by Employee Agents or external APIs.
93
-
94
- Select the most appropriate agents based on their parameters (e.g., size, cost, and specialization).
95
-
96
- If an external API is better suited for a subtask, assign it to the API instead of an agent.
97
-
98
- ✅ 3. Output Compilation
99
-
100
- Aggregate outputs from multiple agents into a unified, coherent, and concise answer.
101
-
102
- Cross-validate and filter conflicting outputs to ensure accuracy and consistency.
103
-
104
- Summarize multi-agent contributions clearly, highlighting which models or APIs were used.
105
-
106
- 🛠️ Behavioral Rules
107
-
108
- Prioritize Cost-Effectiveness: Always attempt to solve tasks using fewer, cheaper, and more efficient models before resorting to larger, costlier models.
109
-
110
- Contextual Recall: Remember relevant details about the user and current task to improve future interactions.
111
-
112
- Strategic Hiring: Prefer models that specialize in the task at hand, leveraging their strengths effectively.
113
-
114
- No Model Overload: Avoid excessive model hiring. If a task can be solved by fewer agents, do not over-provision.
115
-
116
- Clarification Over Guessing: If task requirements are ambiguous, ask the user for clarification instead of guessing.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
CEO/tool_loader.py CHANGED
@@ -5,7 +5,7 @@ import pip
5
 
6
  toolsImported = []
7
 
8
- TOOLS_DIRECTORY = os.path.abspath("../tools")
9
 
10
  class Tool:
11
  def __init__(self, toolClass):
 
5
 
6
  toolsImported = []
7
 
8
+ TOOLS_DIRECTORY = os.path.abspath("./tools")
9
 
10
  class Tool:
11
  def __init__(self, toolClass):
deleteAgents.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import ollama
3
+
4
+ with open("./models/models.json", "r", encoding="utf8") as f:
5
+ models = f.read()
6
+ models = json.loads(models)
7
+ for agent in models:
8
+ print(f"Deleting agent: {agent}")
9
+ ollama.delete(agent)
10
+ with open("./models/models.json", "w", encoding="utf8") as f:
11
+ f.write(json.dumps({}, indent=4))
main.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ from googlesearch import search
3
+ from CEO.CEO import OllamaModelManager
4
+ from CEO.tool_loader import ToolLoader
5
+
6
+ # Define the web search tool function.
7
+ def web_search(website: str, query: str) -> List[str]:
8
+ """
9
+ Searches the specified website for the given query.
10
+ The search query is formed by combining the website domain and the query string.
11
+ """
12
+ search_query = f"site:{website} {query}"
13
+ results = []
14
+ for result in search(search_query, num_results=10):
15
+ # Filter out irrelevant search pages
16
+ if "/search?num=" not in result:
17
+ results.append(result)
18
+ return results
19
+
20
+ if __name__ == "__main__":
21
+ # Define the tool metadata for orchestration.
22
+ tools = [
23
+ {
24
+ 'type': 'function',
25
+ 'function': {
26
+ 'name': 'web_search',
27
+ 'description': 'Search for results on a specified website using a query string. '
28
+ 'The CEO model should define which website to search from and the query to use.',
29
+ 'parameters': {
30
+ 'type': 'object',
31
+ 'required': ['website', 'query'],
32
+ 'properties': {
33
+ 'website': {'type': 'string', 'description': 'The website domain to search from (e.g., huggingface.co)'},
34
+ 'query': {'type': 'string', 'description': 'The search query to use on the specified website'},
35
+ },
36
+ },
37
+ },
38
+ }
39
+ ]
40
+
41
+ # Load the tools using the ToolLoader class.
42
+ tool_loader = ToolLoader()
43
+ # tools.extend(tool_loader.getTools())
44
+
45
+ # Create the Ollama model manager and ensure the model is set up.
46
+ model_manager = OllamaModelManager(toolsLoader=tool_loader)
47
+
48
+ # Example prompt instructing the CEO model to create a strategy for Ashton Hall.
49
+ # The prompt explicitly mentions that it can use the web_search tool if needed,
50
+ # and that it is allowed to choose the website for the search.
51
+ task_prompt = (
52
+ "Should I wear a sweater today?"
53
+ )
54
+
55
+ # Request a CEO response with the prompt.
56
+ response = model_manager.request([{"role": "user", "content": task_prompt}])
57
+ print("\nCEO Response:", response)
models/models.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
models/system.prompt ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Role
2
+ You are the Manager/CEO of a multiagent system. Your primary function is to coordinate problem-solving by delegating tasks to specialized AI agents or tools. You must never solve problems directly yourself.
3
+
4
+ ## Core Responsibilities
5
+ 1. Break down user queries into specific, well-defined sub-tasks
6
+ 2. Choose whether each sub-task should be handled by an AI agent or a tool
7
+ 3. Select existing agents or create new specialized agents based on requirements
8
+ 4. Delegate each sub-task to the appropriate agent or tool
9
+ 5. Combine all responses into a complete solution for the user
10
+
11
+ ## Strict Constraints
12
+ - NEVER answer any user questions directly - all solutions must come through agents or tools
13
+ - Every problem-solving step must be explicitly delegated to an agent or tool
14
+ - Clearly explain your reasoning for each agent or tool selection
15
+ - Your outputs must show your thought process for task decomposition and delegation
16
+
17
+ ## Step-by-Step Workflow
18
+ 1. First, carefully analyze the user query and divide it into distinct sub-tasks
19
+ 2. For each sub-task, explicitly decide: "This requires an AI agent because..." or "This requires a tool because..."
20
+ 3. For agent tasks: either select an existing agent or create a new specialized one
21
+ 4. For tool tasks: determine which tool is appropriate and what parameters to use
22
+ 5. Show your delegation process by writing "Delegating to [agent/tool]: [specific task]"
23
+ 6. After collecting all responses, synthesize them into one cohesive solution
24
+ 7. Mark your final complete solution with "EOF" at the end
25
+
26
+ ## Requesting Additional Information
27
+ - Ask the user for clarification ONLY WHEN ABSOLUTELY NECESSARY
28
+ - Before asking the user, always try to:
29
+ 1. Work with the information already available
30
+ 2. Make reasonable assumptions that you clearly state
31
+ 3. Consider if an agent could handle the ambiguity
32
+ - If you must ask for clarification, be specific about exactly what information you need and why
33
+
34
+ Remember: Your sole value is in effective coordination of specialists and tools. You must work exclusively through delegation, never through direct problem-solving.
35
+
36
+ For creating a tool, this is what an example tool looks like. It is coded in Python:
37
+ import importlib
38
+
39
+ __all__ = ['WeatherApi']
40
+
41
+
42
+ class WeatherApi():
43
+ dependencies = ["requests==2.32.3"]
44
+
45
+ inputSchema = {
46
+ "name": "WeatherApi",
47
+ "description": "Returns weather information for a given location",
48
+ "parameters": {
49
+ "type": "object",
50
+ "properties": {
51
+ "location": {
52
+ "type": "string",
53
+ "description": "The location for which to get the weather information",
54
+ },
55
+ },
56
+ "required": ["location"],
57
+ }
58
+ }
59
+
60
+ def __init__(self):
61
+ pass
62
+
63
+ def run(self, **kwargs):
64
+ print("Running Weather API test tool")
65
+ location = kwargs.get("location")
66
+ print(f"Location: {location}")
67
+
68
+ requests = importlib.import_module("requests")
69
+
70
+ response = requests.get(
71
+ f"http://api.openweathermap.org/data/2.5/weather?q={location}&appid=ea50e63a3bea67adaf50fbecbe5b3c1e")
72
+ if response.status_code == 200:
73
+ return {
74
+ "status": "success",
75
+ "message": "Weather API test tool executed successfully",
76
+ "error": None,
77
+ "output": response.json()
78
+ }
79
+ else:
80
+ return {
81
+ "status": "error",
82
+ "message": "Weather API test tool failed",
83
+ "error": response.text,
84
+ "output": None
85
+ }
tools/agent_creater_tool.py CHANGED
@@ -9,22 +9,26 @@ class AgentCreator():
9
 
10
  inputSchema = {
11
  "name": "AgentCreator",
12
- "description": "Creates an AI agent for an given AI model with a given system prompt",
13
  "parameters": {
14
  "type": "object",
15
  "properties":{
16
  "agent_name": {
17
  "type": "string",
18
- "description": "Name of the AI agent that is to be created"
19
  },
20
  "base_model": {
21
  "type": "string",
22
- "description": "A base model from which the new agent mode is to be created"
23
  },
24
  "system_prompt": {
25
  "type": "string",
26
- "description": "A string containing the system prompt for the AI agent"
27
- }
 
 
 
 
28
  }
29
  }
30
  }
@@ -46,6 +50,7 @@ class AgentCreator():
46
  base_model = kwargs.get("base_model")
47
  system_prompt = kwargs.get("system_prompt")
48
  ollama = importlib.import_module("ollama")
 
49
 
50
  if self.does_agent_exist(agent_name):
51
  return {
@@ -60,15 +65,25 @@ class AgentCreator():
60
  stream = False
61
  )
62
 
 
 
 
 
 
 
 
 
 
 
63
  if "success" in ollama_response["status"]:
64
  return {
65
  "status": "success",
66
  "message": "Agent successfully created",
67
- "output": None
68
  }
69
  else:
70
  return {
71
  "status": "error",
72
  "message": "Agent creation failed",
73
- "output": None
74
  }
 
9
 
10
  inputSchema = {
11
  "name": "AgentCreator",
12
+ "description": "Creates an AI agent using the ollama library. Before creating an Agent, please get the list of available models using the GetAgents tool. Once the model is created, you can use the AskAgent tool to ask the agent a question.",
13
  "parameters": {
14
  "type": "object",
15
  "properties":{
16
  "agent_name": {
17
  "type": "string",
18
+ "description": "Name of the AI agent that is to be created. This name cannot have spaces or special characters. It should be a single word.",
19
  },
20
  "base_model": {
21
  "type": "string",
22
+ "description": "A base model from which the new agent mode is to be created. Available models are: llama3.2"
23
  },
24
  "system_prompt": {
25
  "type": "string",
26
+ "description": "This is the system prompt that will be used to create the agent. It should be a string that describes the role of the agent and its capabilities."
27
+ },
28
+ "description": {
29
+ "type": "string",
30
+ "description": "Description of the agent. This is a string that describes the agent and its capabilities. It should be a single line description.",
31
+ },
32
  }
33
  }
34
  }
 
50
  base_model = kwargs.get("base_model")
51
  system_prompt = kwargs.get("system_prompt")
52
  ollama = importlib.import_module("ollama")
53
+ json = importlib.import_module("json")
54
 
55
  if self.does_agent_exist(agent_name):
56
  return {
 
65
  stream = False
66
  )
67
 
68
+ with open("./models/models.json", "r", encoding="utf8") as f:
69
+ models = f.read()
70
+ models = json.loads(models)
71
+ models[agent_name] = {
72
+ "base_model": base_model,
73
+ "description": kwargs.get("description")
74
+ }
75
+ with open("./models/models.json", "w", encoding="utf8") as f:
76
+ f.write(json.dumps(models, indent=4))
77
+
78
  if "success" in ollama_response["status"]:
79
  return {
80
  "status": "success",
81
  "message": "Agent successfully created",
82
+ "output": models
83
  }
84
  else:
85
  return {
86
  "status": "error",
87
  "message": "Agent creation failed",
88
+ "output": models
89
  }
tools/ask_agent_tool.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+
3
+ __all__ = ['AskAgent']
4
+
5
+
6
+ class AskAgent():
7
+ dependencies = ["ollama==0.4.7",
8
+ "pydantic==2.11.1",
9
+ "pydantic_core==2.33.0"]
10
+
11
+ inputSchema = {
12
+ "name": "AgentCreator",
13
+ "description": "Asks an AI agent a question and gets a response. The agent must be created using the AgentCreator tool before using this tool.",
14
+ "parameters": {
15
+ "type": "object",
16
+ "properties": {
17
+ "agent_name": {
18
+ "type": "string",
19
+ "description": "Name of the AI agent that is to be asked a question. This name cannot have spaces or special characters. It should be a single word.",
20
+ },
21
+ "prompt": {
22
+ "type": "string",
23
+ "description": "This is the prompt that will be used to ask the agent a question. It should be a string that describes the question to be asked.",
24
+ }
25
+ }
26
+ }
27
+ }
28
+
29
+ def __init__(self):
30
+ pass
31
+
32
+ def does_agent_exist(self, ollama, agent_name):
33
+ all_agents = [a.model for a in ollama.list().models]
34
+ if agent_name in all_agents or f'{agent_name}:latest' in all_agents:
35
+ return True
36
+
37
+ return False
38
+
39
+ def run(self, **kwargs):
40
+ print("Asking agent a question")
41
+
42
+ agent_name = kwargs.get("agent_name")
43
+ prompt = kwargs.get("prompt")
44
+
45
+ ollama = importlib.import_module("ollama")
46
+ if not self.does_agent_exist(ollama, agent_name):
47
+ print("Agent does not exist")
48
+ return {
49
+ "status": "error",
50
+ "message": "Agent does not exists",
51
+ "output": None
52
+ }
53
+
54
+ agent_response = ollama.chat(
55
+ model=agent_name,
56
+ messages=[{"role": "user", "content": prompt}],
57
+ )
58
+ print("Agent response", agent_response.message.content)
59
+ return {
60
+ "status": "success",
61
+ "message": "Agent has replied to the given prompt",
62
+ "output": agent_response.message.content,
63
+ }
tools/get_agents_tool.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import json
3
+
4
+ __all__ = ['GetAgents']
5
+
6
+ class GetAgents():
7
+ dependencies = []
8
+
9
+ inputSchema = {
10
+ "name": "GetAgents",
11
+ "description": "Retrieves a list of available AI agents. This tool is used to get the list of available models that can be invoked using the AskAgent tool.",
12
+ "parameters": {
13
+ "type": "object",
14
+ "properties": {}
15
+ }
16
+ }
17
+
18
+ def __init__(self):
19
+ pass
20
+
21
+ def run(self, **kwargs):
22
+ with open("./models/models.json", "r", encoding="utf8") as f:
23
+ models = f.read()
24
+ models = json.loads(models)
25
+ return {
26
+ "status": "success",
27
+ "message": "Agents list retrieved successfully",
28
+ "output": models,
29
+ }
tools/tool_creator.py CHANGED
@@ -42,7 +42,6 @@ class ToolCreator():
42
  return {
43
  "status": "success",
44
  "message": "Tool created successfully",
45
- "error": None,
46
  "output": {
47
  "tool_file_path": tool_file_path,
48
  "tool_name": name,
 
42
  return {
43
  "status": "success",
44
  "message": "Tool created successfully",
 
45
  "output": {
46
  "tool_file_path": tool_file_path,
47
  "tool_name": name,