helloparthshah commited on
Commit
25fe98a
·
1 Parent(s): a6407c2

Updated costs based on resource usage and actual api costs (created by HASHIRU AI)

Browse files
src/manager/manager.py CHANGED
@@ -140,8 +140,13 @@ class GeminiManager:
140
  role = "user"
141
  if isinstance(message["content"], tuple):
142
  path = message["content"][0]
143
- file = self.client.files.upload(file=path)
144
- formatted_history.append(file)
 
 
 
 
 
145
  continue
146
  else:
147
  parts = [types.Part.from_text(text=message.get("content", ""))]
@@ -197,19 +202,22 @@ class GeminiManager:
197
  return results
198
 
199
  def run(self, messages):
200
- if self.use_memory:
201
- memories = self.get_k_memories(messages[-1]['content'], k=5, threshold=0.1)
202
- if len(memories) > 0:
203
- messages.append({
204
- "role": "memories",
205
- "content": f"{memories}",
206
- })
207
- messages.append({
208
- "role": "assistant",
209
- "content": f"Memories: {memories}",
210
- "metadata": {"title": "Memories"}
211
- })
212
- yield messages
 
 
 
213
  yield from self.invoke_manager(messages)
214
 
215
  def invoke_manager(self, messages):
 
140
  role = "user"
141
  if isinstance(message["content"], tuple):
142
  path = message["content"][0]
143
+ try:
144
+ file = self.client.files.upload(file=path)
145
+ formatted_history.append(file)
146
+ except Exception as e:
147
+ logger.error(f"Error uploading file: {e}")
148
+ formatted_history.append(
149
+ types.Part.from_text(text="Error uploading file: "+str(e)))
150
  continue
151
  else:
152
  parts = [types.Part.from_text(text=message.get("content", ""))]
 
202
  return results
203
 
204
  def run(self, messages):
205
+ try:
206
+ if self.use_memory:
207
+ memories = self.get_k_memories(messages[-1]['content'], k=5, threshold=0.1)
208
+ if len(memories) > 0:
209
+ messages.append({
210
+ "role": "memories",
211
+ "content": f"{memories}",
212
+ })
213
+ messages.append({
214
+ "role": "assistant",
215
+ "content": f"Memories: {memories}",
216
+ "metadata": {"title": "Memories"}
217
+ })
218
+ yield messages
219
+ except Exception as e:
220
+ pass
221
  yield from self.invoke_manager(messages)
222
 
223
  def invoke_manager(self, messages):
src/tools/default_tools/agent_cost_manager.py CHANGED
@@ -16,59 +16,59 @@ class AgentCostManager():
16
 
17
  costs = {
18
  "llama3.2": {
19
- "description": "3 Billion parameter model",
20
- "create_resource_cost": 10,
21
- "invoke_resource_cost": 10,
22
  },
23
  "mistral": {
24
  "description": "7 Billion parameter model",
25
- "create_resource_cost": 20,
26
- "invoke_resource_cost": 50,
27
  },
28
  "deepseek-r1": {
29
  "description": "7 Billion reasoning model",
30
- "create_resource_cost": 20,
31
- "invoke_resource_cost": 50,
32
  },
33
  "gemini-2.5-flash-preview-04-17": {
34
  "description": "Adaptive thinking, cost efficiency",
35
- "create_expense_cost": 20,
36
- "invoke_expense_cost": 50
37
  },
38
  "gemini-2.5-pro-preview-03-25": {
39
  "description": "Enhanced thinking and reasoning, multimodal understanding, advanced coding, and more",
40
- "create_expense_cost": 20,
41
- "invoke_expense_cost": 50
42
  },
43
  "gemini-2.0-flash": {
44
  "description": "Next generation features, speed, thinking, realtime streaming, and multimodal generation",
45
- "create_expense_cost": 20,
46
- "invoke_expense_cost": 50
47
  },
48
  "gemini-2.0-flash-lite": {
49
  "description": "Cost efficiency and low latency",
50
- "create_expense_cost": 20,
51
- "invoke_expense_cost": 50
52
  },
53
  "gemini-1.5-flash": {
54
  "description": "Fast and versatile performance across a diverse variety of tasks",
55
- "create_expense_cost": 20,
56
- "invoke_expense_cost": 50
57
  },
58
  "gemini-1.5-flash-8b": {
59
  "description": "High volume and lower intelligence tasks",
60
- "create_expense_cost": 20,
61
- "invoke_expense_cost": 50
62
  },
63
  "gemini-1.5-pro": {
64
  "description": "Complex reasoning tasks requiring more intelligence",
65
- "create_expense_cost": 20,
66
- "invoke_expense_cost": 50
67
  },
68
  "gemini-2.0-flash-live-001": {
69
  "description": "Low-latency bidirectional voice and video interactions",
70
- "create_expense_cost": 20,
71
- "invoke_expense_cost": 50
72
  }
73
  }
74
 
 
16
 
17
  costs = {
18
  "llama3.2": {
19
+ "description": "1 Billion parameter model",
20
+ "create_resource_cost": 14,
21
+ "invoke_resource_cost": 15,
22
  },
23
  "mistral": {
24
  "description": "7 Billion parameter model",
25
+ "create_resource_cost": 75,
26
+ "invoke_resource_cost": 40,
27
  },
28
  "deepseek-r1": {
29
  "description": "7 Billion reasoning model",
30
+ "create_resource_cost": 28,
31
+ "invoke_resource_cost": 35,
32
  },
33
  "gemini-2.5-flash-preview-04-17": {
34
  "description": "Adaptive thinking, cost efficiency",
35
+ "create_expense_cost": 0.005,
36
+ "invoke_expense_cost": 0.00017
37
  },
38
  "gemini-2.5-pro-preview-03-25": {
39
  "description": "Enhanced thinking and reasoning, multimodal understanding, advanced coding, and more",
40
+ "create_expense_cost": 0.005,
41
+ "invoke_expense_cost": 0.0001275
42
  },
43
  "gemini-2.0-flash": {
44
  "description": "Next generation features, speed, thinking, realtime streaming, and multimodal generation",
45
+ "create_expense_cost": 0.005,
46
+ "invoke_expense_cost": 0.00017
47
  },
48
  "gemini-2.0-flash-lite": {
49
  "description": "Cost efficiency and low latency",
50
+ "create_expense_cost": 0.005,
51
+ "invoke_expense_cost": 0.00017
52
  },
53
  "gemini-1.5-flash": {
54
  "description": "Fast and versatile performance across a diverse variety of tasks",
55
+ "create_expense_cost": 0.005,
56
+ "invoke_expense_cost": 0.00017
57
  },
58
  "gemini-1.5-flash-8b": {
59
  "description": "High volume and lower intelligence tasks",
60
+ "create_expense_cost": 0.005,
61
+ "invoke_expense_cost": 0.00017
62
  },
63
  "gemini-1.5-pro": {
64
  "description": "Complex reasoning tasks requiring more intelligence",
65
+ "create_expense_cost": 0.005,
66
+ "invoke_expense_cost": 0.0001275
67
  },
68
  "gemini-2.0-flash-live-001": {
69
  "description": "Low-latency bidirectional voice and video interactions",
70
+ "create_expense_cost": 0.005,
71
+ "invoke_expense_cost": 0.000635
72
  }
73
  }
74
 
src/tools/user_tools/get_website_tool.py CHANGED
@@ -1,4 +1,7 @@
1
  import importlib
 
 
 
2
 
3
  __all__ = ['GetWebsiteTool']
4
 
@@ -8,7 +11,7 @@ class GetWebsiteTool():
8
 
9
  inputSchema = {
10
  "name": "GetWebsiteTool",
11
- "description": "Returns the content of a website based on a query string.",
12
  "parameters": {
13
  "type": "object",
14
  "properties": {
@@ -21,6 +24,57 @@ class GetWebsiteTool():
21
  }
22
  }
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  def run(self, **kwargs):
25
  headers = {
26
  'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:137.0) Gecko/20100101 Firefox/137.0',
@@ -46,7 +100,7 @@ class GetWebsiteTool():
46
  "message": "Missing required parameters: 'url'",
47
  "output": None
48
  }
49
-
50
  output = None
51
  requests = importlib.import_module("requests")
52
  bs4 = importlib.import_module("bs4")
@@ -57,17 +111,16 @@ class GetWebsiteTool():
57
  # Parse the content using BeautifulSoup
58
  soup = BeautifulSoup(response.content, 'html.parser')
59
  # Extract text from the parsed HTML
60
- output = soup.get_text()
 
 
 
61
  else:
62
  return {
63
  "status": "error",
64
  "message": f"Failed to fetch content from {url}. Status code: {response.status_code}",
65
  "output": None
66
  }
67
-
68
- # truncate the results to avoid excessive output
69
- if len(output) > 1000:
70
- output = output[:1000] + "... (truncated)"
71
 
72
  return {
73
  "status": "success",
 
1
  import importlib
2
+ from collections import defaultdict
3
+ import re
4
+ import time
5
 
6
  __all__ = ['GetWebsiteTool']
7
 
 
11
 
12
  inputSchema = {
13
  "name": "GetWebsiteTool",
14
+ "description": "Returns a summary of the content of a website based on a query string.",
15
  "parameters": {
16
  "type": "object",
17
  "properties": {
 
24
  }
25
  }
26
 
27
+ def summarize_text(self, text):
28
+ # Clean the text more thoroughly
29
+ text = re.sub(r'\[[0-9]*\]', ' ', text)
30
+ text = re.sub(r'\s+', ' ', text)
31
+ text = re.sub(r'[^a-zA-Z0-9.\s]', '', text) # Remove special characters except periods
32
+
33
+ # Tokenize into sentences
34
+ sentences = re.split(r'(?<=[.!?])\s+', text)
35
+ sentences = [s.strip() for s in sentences if s]
36
+
37
+ # Calculate word frequencies
38
+ word_frequencies = defaultdict(int)
39
+ for sentence in sentences:
40
+ words = sentence.lower().split()
41
+ for word in words:
42
+ word_frequencies[word] += 1
43
+
44
+ # Normalize word frequencies
45
+ total_words = sum(word_frequencies.values())
46
+ if total_words > 0:
47
+ for word in word_frequencies:
48
+ word_frequencies[word] /= total_words
49
+
50
+ # Calculate sentence scores based on word frequencies, sentence length, and coherence
51
+ sentence_scores = {}
52
+ for i, sentence in enumerate(sentences):
53
+ score = 0
54
+ words = sentence.lower().split()
55
+ for word in words:
56
+ score += word_frequencies[word]
57
+
58
+ # Consider sentence length
59
+ sentence_length_factor = 1 - abs(len(words) - 15) / 15 # Prefer sentences around 15 words
60
+ score += sentence_length_factor * 0.1
61
+
62
+ # Add a coherence score
63
+ if i > 0 and sentences[i-1] in sentence_scores:
64
+ previous_sentence_words = sentences[i-1].lower().split()
65
+ common_words = set(words) & set(previous_sentence_words)
66
+ coherence_score = len(common_words) / len(words)
67
+ score += coherence_score * 0.1
68
+
69
+ sentence_scores[sentence] = score
70
+
71
+ # Get the top 3 sentences with the highest scores
72
+ ranked_sentences = sorted(sentence_scores, key=sentence_scores.get, reverse=True)[:3]
73
+
74
+ # Generate the summary
75
+ summary = ". ".join(ranked_sentences) + "."
76
+ return summary
77
+
78
  def run(self, **kwargs):
79
  headers = {
80
  'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:137.0) Gecko/20100101 Firefox/137.0',
 
100
  "message": "Missing required parameters: 'url'",
101
  "output": None
102
  }
103
+
104
  output = None
105
  requests = importlib.import_module("requests")
106
  bs4 = importlib.import_module("bs4")
 
111
  # Parse the content using BeautifulSoup
112
  soup = BeautifulSoup(response.content, 'html.parser')
113
  # Extract text from the parsed HTML
114
+ text = soup.get_text()
115
+
116
+ # Summarize the text
117
+ output = self.summarize_text(text)
118
  else:
119
  return {
120
  "status": "error",
121
  "message": f"Failed to fetch content from {url}. Status code: {response.status_code}",
122
  "output": None
123
  }
 
 
 
 
124
 
125
  return {
126
  "status": "success",