Spaces:
Sleeping
Sleeping
mchinea
commited on
Commit
·
818fde4
1
Parent(s):
438a309
update
Browse files- requirements.txt +1 -1
- tools_smolagent.py +315 -0
requirements.txt
CHANGED
@@ -12,7 +12,7 @@ Pillow
|
|
12 |
pydub
|
13 |
#tavily-python
|
14 |
#wikipedia
|
15 |
-
|
16 |
#openai-whisper
|
17 |
|
18 |
wikipedia-api
|
|
|
12 |
pydub
|
13 |
#tavily-python
|
14 |
#wikipedia
|
15 |
+
pytesseract
|
16 |
#openai-whisper
|
17 |
|
18 |
wikipedia-api
|
tools_smolagent.py
ADDED
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tempfile
|
2 |
+
import requests
|
3 |
+
import os
|
4 |
+
|
5 |
+
from time import sleep
|
6 |
+
from urllib.parse import urlparse
|
7 |
+
from typing import Optional, List
|
8 |
+
import yt_dlp
|
9 |
+
from google.genai import types
|
10 |
+
|
11 |
+
from PIL import Image
|
12 |
+
from smolagents import CodeAgent, tool, OpenAIServerModel, LiteLLMModel
|
13 |
+
from google import genai
|
14 |
+
from dotenv import load_dotenv
|
15 |
+
#from model_provider import create_react_model, create_vision_model
|
16 |
+
#import imageio
|
17 |
+
|
18 |
+
load_dotenv(override=True)
|
19 |
+
'''
|
20 |
+
@tool
|
21 |
+
def use_vision_model(question: str, images: List[Image.Image]) -> str:
|
22 |
+
"""
|
23 |
+
Use a Vision Model to answer a question about a set of images.
|
24 |
+
Always use this tool to ask questions about a set of images you have been provided.
|
25 |
+
This function uses an image-to-text AI model.
|
26 |
+
You can ask a question about a list of one image or a list of multiple images.
|
27 |
+
So, if you have multiple images that you want to ask the same question of, pass the entire list of images to the model.
|
28 |
+
Ensure your prompt is specific enough to retrieve the exact information you are looking for.
|
29 |
+
|
30 |
+
Args:
|
31 |
+
question: The question to ask about the images. Type: str
|
32 |
+
images: The list of images to as the question about. Type: List[PIL.Image.Image]
|
33 |
+
"""
|
34 |
+
image_model = create_vision_model()
|
35 |
+
|
36 |
+
content = [
|
37 |
+
{
|
38 |
+
"type": "text",
|
39 |
+
"text": question
|
40 |
+
}
|
41 |
+
]
|
42 |
+
print(f"Asking model a question about {len(images)} images")
|
43 |
+
for image in images:
|
44 |
+
content.append({
|
45 |
+
"type": "image",
|
46 |
+
"image": image # ✅ Directly the PIL Image, no wrapping
|
47 |
+
})
|
48 |
+
|
49 |
+
messages = [
|
50 |
+
{
|
51 |
+
"role": "user",
|
52 |
+
"content": content
|
53 |
+
}
|
54 |
+
]
|
55 |
+
|
56 |
+
output = image_model(messages).content
|
57 |
+
print(f'Model returned: {output}')
|
58 |
+
return output
|
59 |
+
|
60 |
+
|
61 |
+
@tool
|
62 |
+
def youtube_frames_to_images(url: str, sample_interval_frames: int = 24) -> List[Image.Image]:
|
63 |
+
"""
|
64 |
+
Reviews a YouTube video and returns a List of PIL Images (List[PIL.Image.Image]), which can then be reviewed by a vision model.
|
65 |
+
Only use this tool if you have been given a YouTube video that you need to analyze.
|
66 |
+
This will generate a list of images, and you can use the use_vision_model tool to analyze those images
|
67 |
+
Args:
|
68 |
+
url: The Youtube URL
|
69 |
+
sample_interval_frames: The sampling interval (default is 24 frames)
|
70 |
+
"""
|
71 |
+
with tempfile.TemporaryDirectory() as tmpdir:
|
72 |
+
# Download the video locally
|
73 |
+
ydl_opts = {
|
74 |
+
'format': 'bestvideo[height<=1080]+bestaudio/best[height<=1080]/best',
|
75 |
+
'outtmpl': os.path.join(tmpdir, 'video.%(ext)s'),
|
76 |
+
'quiet': True,
|
77 |
+
'noplaylist': True,
|
78 |
+
'merge_output_format': 'mp4',
|
79 |
+
'force_ipv4': True, # Avoid IPv6 issues
|
80 |
+
}
|
81 |
+
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
82 |
+
info = ydl.extract_info(url, download=True)
|
83 |
+
|
84 |
+
# Find the downloaded file
|
85 |
+
video_path = None
|
86 |
+
for file in os.listdir(tmpdir):
|
87 |
+
if file.endswith('.mp4'):
|
88 |
+
video_path = os.path.join(tmpdir, file)
|
89 |
+
break
|
90 |
+
|
91 |
+
if not video_path:
|
92 |
+
raise RuntimeError("Failed to download video as mp4")
|
93 |
+
|
94 |
+
# ✅ Fix: Use `imageio.get_reader()` instead of `imopen()`
|
95 |
+
reader = imageio.get_reader(video_path) # Works for frame-by-frame iteration
|
96 |
+
# metadata = reader.get_meta_data()
|
97 |
+
# fps = metadata.get('fps')
|
98 |
+
|
99 |
+
# if fps is None:
|
100 |
+
# reader.close()
|
101 |
+
# raise RuntimeError("Unable to determine FPS from video metadata")
|
102 |
+
|
103 |
+
# frame_interval = int(fps * sample_interval_frames)
|
104 |
+
frame_interval = sample_interval_frames # Use the provided interval directly
|
105 |
+
images: List[Image.Image] = []
|
106 |
+
|
107 |
+
# ✅ Iterate over frames using `get_reader()`
|
108 |
+
for idx, frame in enumerate(reader):
|
109 |
+
print(f"Processing frame {idx}")
|
110 |
+
if idx % frame_interval == 0:
|
111 |
+
images.append(Image.fromarray(frame))
|
112 |
+
|
113 |
+
reader.close()
|
114 |
+
return images
|
115 |
+
'''
|
116 |
+
@tool
|
117 |
+
def review_youtube_video(url: str, question: str) -> str:
|
118 |
+
"""
|
119 |
+
Reviews a YouTube video and answers a specific question about that video.
|
120 |
+
Args:
|
121 |
+
url (str): the URL to the YouTube video. Should be like this format: https://www.youtube.com/watch?v=9hE5-98ZeCg
|
122 |
+
question (str): The question you are asking about the video
|
123 |
+
"""
|
124 |
+
try:
|
125 |
+
client = genai.Client(api_key=os.getenv('GEMINI_KEY'))
|
126 |
+
model = 'gemini-2.0-flash-lite'
|
127 |
+
response = client.models.generate_content(
|
128 |
+
model=model,
|
129 |
+
contents=types.Content(
|
130 |
+
parts=[
|
131 |
+
types.Part(
|
132 |
+
file_data=types.FileData(file_uri=url)
|
133 |
+
),
|
134 |
+
types.Part(text=question)
|
135 |
+
]
|
136 |
+
)
|
137 |
+
)
|
138 |
+
return response.text
|
139 |
+
except Exception as e:
|
140 |
+
return f"Error asking {model} about video: {str(e)}"
|
141 |
+
|
142 |
+
|
143 |
+
@tool
|
144 |
+
def read_file(filepath: str ) -> str:
|
145 |
+
"""
|
146 |
+
Used to read the content of a file. Returns the content as a string.
|
147 |
+
Will only work for text-based files, such as .txt files or code files.
|
148 |
+
Do not use for audio or visual files.
|
149 |
+
|
150 |
+
Args:
|
151 |
+
filepath (str): The path to the file to be read.
|
152 |
+
Returns:
|
153 |
+
str: Content of the file as a string.
|
154 |
+
|
155 |
+
Raises:
|
156 |
+
IOError: If there is an error opening or reading from the file.
|
157 |
+
"""
|
158 |
+
try:
|
159 |
+
with open(filepath, 'r', encoding='utf-8') as file:
|
160 |
+
content = file.read()
|
161 |
+
print(content)
|
162 |
+
return content
|
163 |
+
except FileNotFoundError:
|
164 |
+
print(f"File not found: {filepath}")
|
165 |
+
except IOError as e:
|
166 |
+
print(f"Error reading file: {str(e)}")
|
167 |
+
|
168 |
+
|
169 |
+
@tool
|
170 |
+
def extract_text_from_image(image_path: str) -> str:
|
171 |
+
"""
|
172 |
+
Extract text from an image using pytesseract (if available).
|
173 |
+
|
174 |
+
Args:
|
175 |
+
image_path: Path to the image file
|
176 |
+
|
177 |
+
Returns:
|
178 |
+
Extracted text or error message
|
179 |
+
"""
|
180 |
+
try:
|
181 |
+
# Try to import pytesseract
|
182 |
+
import pytesseract
|
183 |
+
from PIL import Image
|
184 |
+
|
185 |
+
# Open the image
|
186 |
+
image = Image.open(image_path)
|
187 |
+
|
188 |
+
# Extract text
|
189 |
+
text = pytesseract.image_to_string(image)
|
190 |
+
|
191 |
+
return f"Extracted text from image:\n\n{text}"
|
192 |
+
except ImportError:
|
193 |
+
return "Error: pytesseract is not installed. Please install it with 'pip install pytesseract' and ensure Tesseract OCR is installed on your system."
|
194 |
+
except Exception as e:
|
195 |
+
return f"Error extracting text from image: {str(e)}"
|
196 |
+
|
197 |
+
@tool
|
198 |
+
def analyze_csv_file(file_path: str, query: str) -> str:
|
199 |
+
"""
|
200 |
+
Analyze a CSV file using pandas and answer a question about it.
|
201 |
+
To use this file you need to have saved it in a location and pass that location to the function.
|
202 |
+
The download_file_from_url tool will save it by name to tempfile.gettempdir()
|
203 |
+
|
204 |
+
Args:
|
205 |
+
file_path: Path to the CSV file
|
206 |
+
query: Question about the data
|
207 |
+
|
208 |
+
Returns:
|
209 |
+
Analysis result or error message
|
210 |
+
"""
|
211 |
+
try:
|
212 |
+
import pandas as pd
|
213 |
+
|
214 |
+
# Read the CSV file
|
215 |
+
df = pd.read_csv(file_path)
|
216 |
+
|
217 |
+
# Run various analyses based on the query
|
218 |
+
result = f"CSV file loaded with {len(df)} rows and {len(df.columns)} columns.\n"
|
219 |
+
result += f"Columns: {', '.join(df.columns)}\n\n"
|
220 |
+
|
221 |
+
# Add summary statistics
|
222 |
+
result += "Summary statistics:\n"
|
223 |
+
result += str(df.describe())
|
224 |
+
|
225 |
+
return result
|
226 |
+
except ImportError:
|
227 |
+
return "Error: pandas is not installed. Please install it with 'pip install pandas'."
|
228 |
+
except Exception as e:
|
229 |
+
return f"Error analyzing CSV file: {str(e)}"
|
230 |
+
|
231 |
+
@tool
|
232 |
+
def analyze_excel_file(file_path: str, query: str) -> str:
|
233 |
+
"""
|
234 |
+
Analyze an Excel file using pandas and answer a question about it.
|
235 |
+
To use this file you need to have saved it in a location and pass that location to the function.
|
236 |
+
The download_file_from_url tool will save it by name to tempfile.gettempdir()
|
237 |
+
|
238 |
+
Args:
|
239 |
+
file_path: Path to the Excel file
|
240 |
+
query: Question about the data
|
241 |
+
|
242 |
+
Returns:
|
243 |
+
Analysis result or error message
|
244 |
+
"""
|
245 |
+
try:
|
246 |
+
import pandas as pd
|
247 |
+
|
248 |
+
# Read the Excel file
|
249 |
+
df = pd.read_excel(file_path)
|
250 |
+
|
251 |
+
# Run various analyses based on the query
|
252 |
+
result = f"Excel file loaded with {len(df)} rows and {len(df.columns)} columns.\n"
|
253 |
+
result += f"Columns: {', '.join(df.columns)}\n\n"
|
254 |
+
|
255 |
+
# Add summary statistics
|
256 |
+
result += "Summary statistics:\n"
|
257 |
+
result += str(df.describe())
|
258 |
+
|
259 |
+
return result
|
260 |
+
except ImportError:
|
261 |
+
return "Error: pandas and openpyxl are not installed. Please install them with 'pip install pandas openpyxl'."
|
262 |
+
except Exception as e:
|
263 |
+
return f"Error analyzing Excel file: {str(e)}"
|
264 |
+
|
265 |
+
import whisper
|
266 |
+
|
267 |
+
@tool
|
268 |
+
def youtube_transcribe(url: str) -> str:
|
269 |
+
"""
|
270 |
+
Transcribes a YouTube video. Use when you need to process the audio from a YouTube video into Text.
|
271 |
+
Args:
|
272 |
+
url: Url of the YouTube video
|
273 |
+
"""
|
274 |
+
model_size: str = "base"
|
275 |
+
# Load model
|
276 |
+
model = whisper.load_model(model_size)
|
277 |
+
with tempfile.TemporaryDirectory() as tmpdir:
|
278 |
+
# Download audio
|
279 |
+
ydl_opts = {
|
280 |
+
'format': 'bestaudio/best',
|
281 |
+
'outtmpl': os.path.join(tmpdir, 'audio.%(ext)s'),
|
282 |
+
'quiet': True,
|
283 |
+
'noplaylist': True,
|
284 |
+
'postprocessors': [{
|
285 |
+
'key': 'FFmpegExtractAudio',
|
286 |
+
'preferredcodec': 'wav',
|
287 |
+
'preferredquality': '192',
|
288 |
+
}],
|
289 |
+
'force_ipv4': True,
|
290 |
+
}
|
291 |
+
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
292 |
+
info = ydl.extract_info(url, download=True)
|
293 |
+
|
294 |
+
audio_path = next((os.path.join(tmpdir, f) for f in os.listdir(tmpdir) if f.endswith('.wav')), None)
|
295 |
+
if not audio_path:
|
296 |
+
raise RuntimeError("Failed to find audio")
|
297 |
+
|
298 |
+
# Transcribe
|
299 |
+
result = model.transcribe(audio_path)
|
300 |
+
return result['text']
|
301 |
+
|
302 |
+
@tool
|
303 |
+
def transcribe_audio(audio_file_path: str) -> str:
|
304 |
+
"""
|
305 |
+
Transcribes an audio file. Use when you need to process audio data.
|
306 |
+
DO NOT use this tool for YouTube video; use the youtube_transcribe tool to process audio data from YouTube.
|
307 |
+
Use this tool when you have an audio file in .mp3, .wav, .aac, .ogg, .flac, .m4a, .alac or .wma
|
308 |
+
Args:
|
309 |
+
audio_file_path: Filepath to the audio file (str)
|
310 |
+
"""
|
311 |
+
model_size: str = "small"
|
312 |
+
# Load model
|
313 |
+
model = whisper.load_model(model_size)
|
314 |
+
result = model.transcribe(audio_file_path)
|
315 |
+
return result['text']
|