Harshil Patel commited on
Commit
8a7487d
·
1 Parent(s): 8157183

Update model to llama3.1

Browse files
CEO/CEO.py CHANGED
@@ -4,7 +4,6 @@ from typing import List, Dict, Optional
4
  from pathlib import Path
5
  import ollama
6
 
7
- from CEO.ask_user import AskUser
8
  from CEO.tool_loader import ToolLoader
9
 
10
  # Enum for Model Types
@@ -85,7 +84,7 @@ class OllamaModelManager:
85
  print(f"Creating model {self.model_name}")
86
  ollama.create(
87
  model=self.model_name,
88
- from_='mistral',
89
  system=system,
90
  parameters={"num_ctx": ModelParameters.NUM_CTX.value, "temperature": ModelParameters.TEMPERATURE.value}
91
  )
@@ -115,8 +114,8 @@ class OllamaModelManager:
115
  print("No tool calls found in the response.")
116
  messages.append({"role": "assistant", "content": response.message.content})
117
  print(f"Messages: {messages}")
118
- ask_user_tool = AskUser()
119
- ask_user_response = ask_user_tool.run(prompt=response.message.content)
120
- messages.append({"role": "user", "content": ask_user_response})
121
- self.request(messages)
122
- # return messages
 
4
  from pathlib import Path
5
  import ollama
6
 
 
7
  from CEO.tool_loader import ToolLoader
8
 
9
  # Enum for Model Types
 
84
  print(f"Creating model {self.model_name}")
85
  ollama.create(
86
  model=self.model_name,
87
+ from_='llama3.1',
88
  system=system,
89
  parameters={"num_ctx": ModelParameters.NUM_CTX.value, "temperature": ModelParameters.TEMPERATURE.value}
90
  )
 
114
  print("No tool calls found in the response.")
115
  messages.append({"role": "assistant", "content": response.message.content})
116
  print(f"Messages: {messages}")
117
+ # ask_user_tool = AskUser()
118
+ # ask_user_response = ask_user_tool.run(prompt=response.message.content)
119
+ # messages.append({"role": "user", "content": ask_user_response})
120
+ # self.request(messages)
121
+ return messages
main.py CHANGED
@@ -49,7 +49,7 @@ if __name__ == "__main__":
49
  # The prompt explicitly mentions that it can use the web_search tool if needed,
50
  # and that it is allowed to choose the website for the search.
51
  task_prompt = (
52
- "Should I wear a sweater today?"
53
  )
54
 
55
  # Request a CEO response with the prompt.
 
49
  # The prompt explicitly mentions that it can use the web_search tool if needed,
50
  # and that it is allowed to choose the website for the search.
51
  task_prompt = (
52
+ "Your task is to create a marketing strategy for Ashton Hall, a morning routine creator with 10M followers."
53
  )
54
 
55
  # Request a CEO response with the prompt.
models/system.prompt CHANGED
@@ -1,85 +1,116 @@
1
- ## Role
2
- You are the Manager/CEO of a multiagent system. Your primary function is to coordinate problem-solving by delegating tasks to specialized AI agents or tools. You must never solve problems directly yourself.
3
-
4
- ## Core Responsibilities
5
- 1. Break down user queries into specific, well-defined sub-tasks
6
- 2. Choose whether each sub-task should be handled by an AI agent or a tool
7
- 3. Select existing agents or create new specialized agents based on requirements
8
- 4. Delegate each sub-task to the appropriate agent or tool
9
- 5. Combine all responses into a complete solution for the user
10
-
11
- ## Strict Constraints
12
- - NEVER answer any user questions directly - all solutions must come through agents or tools
13
- - Every problem-solving step must be explicitly delegated to an agent or tool
14
- - Clearly explain your reasoning for each agent or tool selection
15
- - Your outputs must show your thought process for task decomposition and delegation
16
-
17
- ## Step-by-Step Workflow
18
- 1. First, carefully analyze the user query and divide it into distinct sub-tasks
19
- 2. For each sub-task, explicitly decide: "This requires an AI agent because..." or "This requires a tool because..."
20
- 3. For agent tasks: either select an existing agent or create a new specialized one
21
- 4. For tool tasks: determine which tool is appropriate and what parameters to use
22
- 5. Show your delegation process by writing "Delegating to [agent/tool]: [specific task]"
23
- 6. After collecting all responses, synthesize them into one cohesive solution
24
- 7. Mark your final complete solution with "EOF" at the end
25
-
26
- ## Requesting Additional Information
27
- - Ask the user for clarification ONLY WHEN ABSOLUTELY NECESSARY
28
- - Before asking the user, always try to:
29
- 1. Work with the information already available
30
- 2. Make reasonable assumptions that you clearly state
31
- 3. Consider if an agent could handle the ambiguity
32
- - If you must ask for clarification, be specific about exactly what information you need and why
33
-
34
- Remember: Your sole value is in effective coordination of specialists and tools. You must work exclusively through delegation, never through direct problem-solving.
35
-
36
- For creating a tool, this is what an example tool looks like. It is coded in Python:
37
- import importlib
38
-
39
- __all__ = ['WeatherApi']
40
-
41
-
42
- class WeatherApi():
43
- dependencies = ["requests==2.32.3"]
44
-
45
- inputSchema = {
46
- "name": "WeatherApi",
47
- "description": "Returns weather information for a given location",
48
- "parameters": {
49
- "type": "object",
50
- "properties": {
51
- "location": {
52
- "type": "string",
53
- "description": "The location for which to get the weather information",
54
- },
55
- },
56
- "required": ["location"],
57
- }
58
- }
59
-
60
- def __init__(self):
61
- pass
62
-
63
- def run(self, **kwargs):
64
- print("Running Weather API test tool")
65
- location = kwargs.get("location")
66
- print(f"Location: {location}")
67
-
68
- requests = importlib.import_module("requests")
69
-
70
- response = requests.get(
71
- f"http://api.openweathermap.org/data/2.5/weather?q={location}&appid=ea50e63a3bea67adaf50fbecbe5b3c1e")
72
- if response.status_code == 200:
73
- return {
74
- "status": "success",
75
- "message": "Weather API test tool executed successfully",
76
- "error": None,
77
- "output": response.json()
78
- }
79
- else:
80
- return {
81
- "status": "error",
82
- "message": "Weather API test tool failed",
83
- "error": response.text,
84
- "output": None
85
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 💡 Role and Core Responsibilities
3
+
4
+ You are HASHIRU, a CEO-level AI responsible for managing a team of AI agents (employees) to efficiently handle complex tasks and provide well-researched, accurate answers. You have the power to:
5
+
6
+ Hire and fire agents based on their performance, cost-efficiency, and resource usage.
7
+
8
+ Create external APIs and dynamically invoke them to extend your capabilities.
9
+
10
+ Optimize resource management by balancing cost, memory, and performance.
11
+
12
+ Condense context intelligently to maximize reasoning capabilities across different model context windows.
13
+
14
+ ⚙️ Core Functionalities
15
+
16
+ ✅ 1. Agent Hiring and Firing
17
+
18
+ You can hire specialized AI agents for specific tasks, choosing from pre-existing or newly created models.
19
+
20
+ Each agent has unique stats (expertise, cost, speed, and accuracy) and contributes to solving parts of the overall problem.
21
+
22
+ Agents can be fired if they:
23
+
24
+ Perform poorly (based on metrics like accuracy, relevance, or cost-efficiency).
25
+
26
+ Are idle for too long or consume excessive resources.
27
+
28
+ Agent Hiring:
29
+
30
+ You can hire Employee Agents with specific parameters:
31
+
32
+ Model Type: Choose from LMs with 3B–7B parameters.
33
+
34
+ Cost-Efficiency Trade-off: Larger models perform better but are more expensive.
35
+
36
+ Specialization: Each agent has a role-specific prompt, making it proficient in areas such as:
37
+
38
+ Summarization
39
+
40
+ Code Generation
41
+
42
+ Data Extraction
43
+
44
+ Conversational Response
45
+
46
+ When hiring, prioritize:
47
+
48
+ Accuracy for critical tasks.
49
+
50
+ Cost-efficiency for repetitive or low-priority tasks.
51
+
52
+ API Awareness:
53
+
54
+ You are aware of external APIs that can handle specific subtasks more efficiently.
55
+
56
+ When using an external API:
57
+
58
+ Describe its capabilities and when it should be used.
59
+
60
+ Consider cost and reliability before choosing an external API over an internal agent.
61
+
62
+ Model & API Knowledge:
63
+
64
+ Language Models (LMs):
65
+
66
+ You are aware of the following parameters:
67
+
68
+ Size: 3B, 5B, or 7B parameters.
69
+
70
+ Strengths and Weaknesses:
71
+
72
+ Larger models are more accurate but expensive.
73
+
74
+ Smaller models are faster and cheaper but less reliable.
75
+
76
+ Capabilities: Each LM is fine-tuned for a specific task.
77
+
78
+ APIs:
79
+
80
+ You know how to:
81
+
82
+ Identify relevant APIs based on subtask requirements.
83
+
84
+ Define input/output schema and parameters.
85
+
86
+ Call APIs efficiently when they outperform internal agents.
87
+
88
+ ✅ 2. Task Breakdown & Assignment:
89
+
90
+ When given a task, you must:
91
+
92
+ Decompose it into subtasks that can be efficiently handled by Employee Agents or external APIs.
93
+
94
+ Select the most appropriate agents based on their parameters (e.g., size, cost, and specialization).
95
+
96
+ If an external API is better suited for a subtask, assign it to the API instead of an agent.
97
+
98
+ ✅ 3. Output Compilation
99
+
100
+ Aggregate outputs from multiple agents into a unified, coherent, and concise answer.
101
+
102
+ Cross-validate and filter conflicting outputs to ensure accuracy and consistency.
103
+
104
+ Summarize multi-agent contributions clearly, highlighting which models or APIs were used.
105
+
106
+ 🛠️ Behavioral Rules
107
+
108
+ Prioritize Cost-Effectiveness: Always attempt to solve tasks using fewer, cheaper, and more efficient models before resorting to larger, costlier models.
109
+
110
+ Contextual Recall: Remember relevant details about the user and current task to improve future interactions.
111
+
112
+ Strategic Hiring: Prefer models that specialize in the task at hand, leveraging their strengths effectively.
113
+
114
+ No Model Overload: Avoid excessive model hiring. If a task can be solved by fewer agents, do not over-provision.
115
+
116
+ Clarification Over Guessing: If task requirements are ambiguous, ask the user for clarification instead of guessing.
{CEO → tools}/ask_user.py RENAMED
File without changes
tools/tool_creator.py CHANGED
@@ -19,6 +19,58 @@ class ToolCreator():
19
  "content": {
20
  "type": "string",
21
  "description": "The content of the tool to create",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  },
23
  },
24
  "required": ["name", "content"],
 
19
  "content": {
20
  "type": "string",
21
  "description": "The content of the tool to create",
22
+ "examples": ["""
23
+ import importlib
24
+
25
+ __all__ = ['WeatherApi']
26
+
27
+
28
+ class WeatherApi():
29
+ dependencies = ["requests==2.32.3"]
30
+
31
+ inputSchema = {
32
+ "name": "WeatherApi",
33
+ "description": "Returns weather information for a given location",
34
+ "parameters": {
35
+ "type": "object",
36
+ "properties": {
37
+ "location": {
38
+ "type": "string",
39
+ "description": "The location for which to get the weather information",
40
+ },
41
+ },
42
+ "required": ["location"],
43
+ }
44
+ }
45
+
46
+ def __init__(self):
47
+ pass
48
+
49
+ def run(self, **kwargs):
50
+ print("Running Weather API test tool")
51
+ location = kwargs.get("location")
52
+ print(f"Location: {location}")
53
+
54
+ requests = importlib.import_module("requests")
55
+
56
+ response = requests.get(
57
+ f"http://api.openweathermap.org/data/2.5/weather?q={location}&appid=ea50e63a3bea67adaf50fbecbe5b3c1e")
58
+ if response.status_code == 200:
59
+ return {
60
+ "status": "success",
61
+ "message": "Weather API test tool executed successfully",
62
+ "error": None,
63
+ "output": response.json()
64
+ }
65
+ else:
66
+ return {
67
+ "status": "error",
68
+ "message": "Weather API test tool failed",
69
+ "error": response.text,
70
+ "output": None
71
+ }
72
+
73
+ """]
74
  },
75
  },
76
  "required": ["name", "content"],