Spaces:
Sleeping
Sleeping
mchinea
commited on
Commit
·
d5411e4
1
Parent(s):
413ad0f
update tools
Browse files- agent_smolagent.py +19 -8
- requirements.txt +3 -1
- tools_smolagent.py +43 -9
agent_smolagent.py
CHANGED
@@ -14,7 +14,15 @@ from smolagents.default_tools import (DuckDuckGoSearchTool,
|
|
14 |
SpeechToTextTool,
|
15 |
PythonInterpreterTool)
|
16 |
#from final_answer import FinalAnswerTool, check_reasoning, ensure_formatting
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
# read_file,
|
19 |
# extract_text_from_image, analyze_csv_file,
|
20 |
# analyze_excel_file, youtube_transcribe,
|
@@ -50,17 +58,20 @@ def build_agent():
|
|
50 |
return CodeAgent(
|
51 |
model=model_desp,
|
52 |
tools=[#FinalAnswerTool(),
|
53 |
-
|
54 |
VisitWebpageTool(max_output_length=500000),
|
55 |
-
WikipediaSearchTool(extract_format='HTML'),
|
56 |
SpeechToTextTool(),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
#youtube_transcribe,
|
58 |
# use_vision_model,
|
59 |
# youtube_frames_to_images,
|
60 |
-
# review_youtube_video,
|
61 |
-
#read_file,
|
62 |
-
#extract_text_from_image,
|
63 |
-
#analyze_csv_file, analyze_excel_file,
|
64 |
#transcribe_audio,
|
65 |
],
|
66 |
managed_agents=[],
|
@@ -195,7 +206,7 @@ if __name__ == "__main__":
|
|
195 |
'''
|
196 |
task = {
|
197 |
"task_id": "8e867cd7-cff9-4e6c-867a-ff5ddc2550be",
|
198 |
-
"question":
|
199 |
"Level": "1",
|
200 |
"file_name": "",
|
201 |
}
|
|
|
14 |
SpeechToTextTool,
|
15 |
PythonInterpreterTool)
|
16 |
#from final_answer import FinalAnswerTool, check_reasoning, ensure_formatting
|
17 |
+
from tools_smolagent import (read_file,
|
18 |
+
extract_text_from_image,
|
19 |
+
analyze_csv_file,
|
20 |
+
analyze_excel_file,
|
21 |
+
youtube_transcribe,
|
22 |
+
transcribe_audio,
|
23 |
+
wikipedia_search)
|
24 |
+
|
25 |
+
#(use_vision_model, youtube_frames_to_images,
|
26 |
# read_file,
|
27 |
# extract_text_from_image, analyze_csv_file,
|
28 |
# analyze_excel_file, youtube_transcribe,
|
|
|
58 |
return CodeAgent(
|
59 |
model=model_desp,
|
60 |
tools=[#FinalAnswerTool(),
|
61 |
+
DuckDuckGoSearchTool(),
|
62 |
VisitWebpageTool(max_output_length=500000),
|
63 |
+
#WikipediaSearchTool(extract_format='HTML'),
|
64 |
SpeechToTextTool(),
|
65 |
+
read_file,
|
66 |
+
extract_text_from_image,
|
67 |
+
analyze_csv_file,
|
68 |
+
analyze_excel_file,
|
69 |
+
youtube_transcribe,
|
70 |
+
transcribe_audio,
|
71 |
+
wikipedia_search
|
72 |
#youtube_transcribe,
|
73 |
# use_vision_model,
|
74 |
# youtube_frames_to_images,
|
|
|
|
|
|
|
|
|
75 |
#transcribe_audio,
|
76 |
],
|
77 |
managed_agents=[],
|
|
|
206 |
'''
|
207 |
task = {
|
208 |
"task_id": "8e867cd7-cff9-4e6c-867a-ff5ddc2550be",
|
209 |
+
"question": question1,
|
210 |
"Level": "1",
|
211 |
"file_name": "",
|
212 |
}
|
requirements.txt
CHANGED
@@ -13,10 +13,12 @@ pydub
|
|
13 |
#tavily-python
|
14 |
#wikipedia
|
15 |
pytesseract
|
16 |
-
|
17 |
|
18 |
wikipedia-api
|
19 |
smolagents
|
20 |
smolagents[transformers]
|
21 |
smolagents[openai]
|
22 |
duckduckgo_search
|
|
|
|
|
|
13 |
#tavily-python
|
14 |
#wikipedia
|
15 |
pytesseract
|
16 |
+
openai-whisper
|
17 |
|
18 |
wikipedia-api
|
19 |
smolagents
|
20 |
smolagents[transformers]
|
21 |
smolagents[openai]
|
22 |
duckduckgo_search
|
23 |
+
yt-dlp
|
24 |
+
wikipedia
|
tools_smolagent.py
CHANGED
@@ -2,16 +2,20 @@ import tempfile
|
|
2 |
import requests
|
3 |
import os
|
4 |
|
5 |
-
from time import sleep
|
6 |
-
from
|
|
|
7 |
from typing import Optional, List
|
8 |
import yt_dlp
|
9 |
-
|
|
|
|
|
|
|
|
|
10 |
|
11 |
from PIL import Image
|
12 |
-
from
|
13 |
-
from
|
14 |
-
from dotenv import load_dotenv
|
15 |
#from model_provider import create_react_model, create_vision_model
|
16 |
#import imageio
|
17 |
|
@@ -112,7 +116,7 @@ def youtube_frames_to_images(url: str, sample_interval_frames: int = 24) -> List
|
|
112 |
|
113 |
reader.close()
|
114 |
return images
|
115 |
-
|
116 |
@tool
|
117 |
def review_youtube_video(url: str, question: str) -> str:
|
118 |
"""
|
@@ -138,7 +142,7 @@ def review_youtube_video(url: str, question: str) -> str:
|
|
138 |
return response.text
|
139 |
except Exception as e:
|
140 |
return f"Error asking {model} about video: {str(e)}"
|
141 |
-
|
142 |
|
143 |
@tool
|
144 |
def read_file(filepath: str ) -> str:
|
@@ -312,4 +316,34 @@ def transcribe_audio(audio_file_path: str) -> str:
|
|
312 |
# Load model
|
313 |
model = whisper.load_model(model_size)
|
314 |
result = model.transcribe(audio_file_path)
|
315 |
-
return result['text']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import requests
|
3 |
import os
|
4 |
|
5 |
+
#from time import sleep
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
#from urllib.parse import urlparse
|
8 |
from typing import Optional, List
|
9 |
import yt_dlp
|
10 |
+
import wikipedia
|
11 |
+
|
12 |
+
from smolagents import tool
|
13 |
+
|
14 |
+
#from google.genai import types
|
15 |
|
16 |
from PIL import Image
|
17 |
+
#from google import genai
|
18 |
+
#from dotenv import load_dotenv
|
|
|
19 |
#from model_provider import create_react_model, create_vision_model
|
20 |
#import imageio
|
21 |
|
|
|
116 |
|
117 |
reader.close()
|
118 |
return images
|
119 |
+
|
120 |
@tool
|
121 |
def review_youtube_video(url: str, question: str) -> str:
|
122 |
"""
|
|
|
142 |
return response.text
|
143 |
except Exception as e:
|
144 |
return f"Error asking {model} about video: {str(e)}"
|
145 |
+
'''
|
146 |
|
147 |
@tool
|
148 |
def read_file(filepath: str ) -> str:
|
|
|
316 |
# Load model
|
317 |
model = whisper.load_model(model_size)
|
318 |
result = model.transcribe(audio_file_path)
|
319 |
+
return result['text']
|
320 |
+
|
321 |
+
|
322 |
+
@tool
|
323 |
+
def wikipedia_search(query: str) -> dict:
|
324 |
+
"""
|
325 |
+
Search Wikipedia for a given query and return the first 10 results with summaries.
|
326 |
+
|
327 |
+
Args:
|
328 |
+
query: The search term or topic.
|
329 |
+
Returns:
|
330 |
+
A dictionary with a 'wiki_results' key containing formatted Wikipedia summaries.
|
331 |
+
"""
|
332 |
+
wikipedia.set_lang("en")
|
333 |
+
try:
|
334 |
+
results = wikipedia.search(query, results=10)
|
335 |
+
summaries = []
|
336 |
+
for title in results:
|
337 |
+
try:
|
338 |
+
summary = wikipedia.summary(title, sentences=2)
|
339 |
+
summaries.append(f"## {title}\n{summary}")
|
340 |
+
except wikipedia.exceptions.DisambiguationError as e:
|
341 |
+
summaries.append(f"## {title}\nDisambiguation required. Example options: {e.options[:3]}")
|
342 |
+
except wikipedia.exceptions.PageError:
|
343 |
+
summaries.append(f"## {title}\nPage not found.")
|
344 |
+
|
345 |
+
formatted = "\n\n---\n\n".join(summaries)
|
346 |
+
return {"wiki_results": formatted}
|
347 |
+
|
348 |
+
except Exception as e:
|
349 |
+
return {"wiki_results": f"Error during Wikipedia search: {str(e)}"}
|