Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -19,31 +19,30 @@ except ImportError:
|
|
19 |
# --- Constants ---
|
20 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
# --- Basic Agent Definition ---
|
23 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
24 |
class BasicAgent:
|
25 |
"""
|
26 |
-
|
27 |
-
|
28 |
-
Based on Unit 2 course content:
|
29 |
-
- Uses CodeAgent from smolagents (writes Python code to solve problems)
|
30 |
-
- Has access to DuckDuckGoSearchTool for web search
|
31 |
-
- Follows ReAct pattern automatically within CodeAgent
|
32 |
-
|
33 |
-
How ReAct works in smolagents:
|
34 |
-
1. CodeAgent receives a question
|
35 |
-
2. INTERNAL LOOP (hidden from us):
|
36 |
-
- Think: LLM reasons about what to do next
|
37 |
-
- Act: LLM writes Python code (may call tools like DuckDuckGoSearchTool)
|
38 |
-
- Observe: LLM sees the code execution results
|
39 |
-
- Repeat: If not satisfied, continues the loop (up to max_steps)
|
40 |
-
3. Returns final answer when LLM decides it's complete
|
41 |
-
|
42 |
-
The while loop is built into CodeAgent - we just call agent.run(question)!
|
43 |
"""
|
44 |
|
45 |
def __init__(self):
|
46 |
-
print("
|
47 |
|
48 |
# Check GPU availability
|
49 |
print(f"π GPU Check:")
|
@@ -54,80 +53,95 @@ class BasicAgent:
|
|
54 |
print(f" - Device name: {torch.cuda.get_device_name()}")
|
55 |
print(f" - Device memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
|
56 |
else:
|
57 |
-
print(" - No CUDA devices found
|
58 |
-
|
59 |
if SMOLAGENTS_AVAILABLE:
|
60 |
try:
|
61 |
-
# Initialize the model
|
|
|
62 |
self.model = TransformersModel(
|
63 |
model_id="google/gemma-3-4b-it",
|
64 |
-
torch_dtype=torch.
|
65 |
-
device_map="auto"
|
66 |
-
trust_remote_code=True,
|
67 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
-
# Create CodeAgent with DuckDuckGoSearchTool
|
70 |
self.agent = CodeAgent(
|
71 |
-
tools=[
|
72 |
model=self.model,
|
73 |
additional_authorized_imports=[
|
74 |
'math', 'statistics', 're', # Basic computation
|
75 |
'requests', 'json', # Web requests and JSON
|
76 |
-
'pandas', 'numpy', # Data analysis
|
77 |
'zipfile', 'os', # File processing
|
78 |
'datetime', 'time' # Date/time operations
|
79 |
]
|
80 |
)
|
81 |
-
|
82 |
-
self.
|
83 |
-
print("β
Smolagents CodeAgent initialized
|
84 |
-
|
85 |
except Exception as e:
|
86 |
print(f"β οΈ Error initializing smolagents: {e}")
|
87 |
-
|
|
|
|
|
88 |
else:
|
89 |
-
self.
|
90 |
-
|
91 |
-
def __call__(self, question: str) -> str:
|
92 |
-
"""
|
93 |
-
Main agent execution - just pass the question to CodeAgent.
|
94 |
-
The CodeAgent will Think-Act-Observe automatically and format the answer properly.
|
95 |
-
"""
|
96 |
-
print(f"Agent processing: {question[:80]}...")
|
97 |
-
|
98 |
-
if not self.available:
|
99 |
-
return self._fallback_response(question)
|
100 |
-
|
101 |
-
try:
|
102 |
-
# Let the CodeAgent handle everything with ReAct pattern
|
103 |
-
# We'll include formatting instructions in our prompt
|
104 |
-
formatted_question = f"""Please answer this question with just the direct answer needed (no prefixes like "The answer is"):
|
105 |
|
106 |
-
|
|
|
107 |
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
- Write Python code for calculations or data processing"""
|
114 |
|
115 |
-
|
|
|
116 |
|
117 |
-
|
118 |
-
|
119 |
-
return final_answer
|
120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
except Exception as e:
|
122 |
-
|
123 |
-
return "
|
124 |
-
|
125 |
-
def
|
126 |
-
"""
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
|
|
|
|
|
|
|
|
131 |
|
132 |
|
133 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
|
19 |
# --- Constants ---
|
20 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
21 |
|
22 |
+
CURRENT_PROMPT = """You are a helpful assistant that can search the web and execute Python code to answer questions.
|
23 |
+
|
24 |
+
Question: {question}
|
25 |
+
|
26 |
+
To answer this question:
|
27 |
+
1. If you need current information or facts you're unsure about, use the search tool first
|
28 |
+
2. Write Python code to solve the problem
|
29 |
+
3. Make sure your final answer is clear and direct
|
30 |
+
|
31 |
+
Available tools:
|
32 |
+
- web_search(query): Search the web for information
|
33 |
+
- All standard Python libraries
|
34 |
+
|
35 |
+
Please provide a complete solution that ends with the final answer."""
|
36 |
+
|
37 |
# --- Basic Agent Definition ---
|
38 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
39 |
class BasicAgent:
|
40 |
"""
|
41 |
+
Basic agent using smolagents CodeAgent with DuckDuckGoSearchTool.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
"""
|
43 |
|
44 |
def __init__(self):
|
45 |
+
print("BasicAgent initialized.")
|
46 |
|
47 |
# Check GPU availability
|
48 |
print(f"π GPU Check:")
|
|
|
53 |
print(f" - Device name: {torch.cuda.get_device_name()}")
|
54 |
print(f" - Device memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
|
55 |
else:
|
56 |
+
print(" - No CUDA devices found, will use CPU")
|
57 |
+
|
58 |
if SMOLAGENTS_AVAILABLE:
|
59 |
try:
|
60 |
+
# Initialize the model
|
61 |
+
print("π€ Initializing TransformersModel...")
|
62 |
self.model = TransformersModel(
|
63 |
model_id="google/gemma-3-4b-it",
|
64 |
+
torch_dtype=torch.bfloat16,
|
65 |
+
device_map="auto"
|
|
|
66 |
)
|
67 |
+
|
68 |
+
# Verify where model actually loaded
|
69 |
+
if hasattr(self.model, 'device'):
|
70 |
+
print(f"β
Model loaded on device: {self.model.device}")
|
71 |
+
elif hasattr(self.model, 'model') and hasattr(self.model.model, 'device'):
|
72 |
+
print(f"β
Model loaded on device: {self.model.model.device}")
|
73 |
+
else:
|
74 |
+
print("β
Model loaded (device info not directly accessible)")
|
75 |
+
|
76 |
+
# Initialize the search tool
|
77 |
+
self.search_tool = DuckDuckGoSearchTool()
|
78 |
|
79 |
+
# Create CodeAgent with DuckDuckGoSearchTool and additional imports
|
80 |
self.agent = CodeAgent(
|
81 |
+
tools=[self.search_tool],
|
82 |
model=self.model,
|
83 |
additional_authorized_imports=[
|
84 |
'math', 'statistics', 're', # Basic computation
|
85 |
'requests', 'json', # Web requests and JSON
|
86 |
+
'pandas', 'numpy', # Data analysis
|
87 |
'zipfile', 'os', # File processing
|
88 |
'datetime', 'time' # Date/time operations
|
89 |
]
|
90 |
)
|
91 |
+
|
92 |
+
self.tools_available = True
|
93 |
+
print("β
Smolagents CodeAgent initialized with DuckDuckGoSearchTool")
|
94 |
+
|
95 |
except Exception as e:
|
96 |
print(f"β οΈ Error initializing smolagents: {e}")
|
97 |
+
import traceback
|
98 |
+
traceback.print_exc()
|
99 |
+
self.tools_available = False
|
100 |
else:
|
101 |
+
self.tools_available = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
+
if not self.tools_available:
|
104 |
+
print("β οΈ Using fallback implementation without smolagents")
|
105 |
|
106 |
+
def _run_smolagents(self, question):
|
107 |
+
"""Run question through smolagents CodeAgent with enhanced prompting."""
|
108 |
+
try:
|
109 |
+
# Use the global CURRENT_PROMPT variable
|
110 |
+
formatted_question = CURRENT_PROMPT.format(question=question)
|
|
|
111 |
|
112 |
+
print(f"π Processing question: {question}")
|
113 |
+
print(f"π§ Available tools: {[tool.__class__.__name__ for tool in self.agent.tools]}")
|
114 |
|
115 |
+
# Run the agent
|
116 |
+
result = self.agent.run(formatted_question)
|
|
|
117 |
|
118 |
+
print(f"Raw result: {result}")
|
119 |
+
|
120 |
+
# Clean up the result (remove any remaining prefixes)
|
121 |
+
if isinstance(result, str):
|
122 |
+
result = result.strip()
|
123 |
+
# Remove common prefixes
|
124 |
+
prefixes_to_remove = ["The answer is ", "Answer: ", "Final answer: "]
|
125 |
+
for prefix in prefixes_to_remove:
|
126 |
+
if result.startswith(prefix):
|
127 |
+
result = result[len(prefix):].strip()
|
128 |
+
|
129 |
+
return result
|
130 |
+
|
131 |
except Exception as e:
|
132 |
+
import traceback
|
133 |
+
return f"Agent error: {e}\n{traceback.format_exc()}"
|
134 |
+
|
135 |
+
def _fallback_implementation(self, question):
|
136 |
+
"""Fallback when smolagents is not available."""
|
137 |
+
return f"Smolagents not available. Question received: {question}"
|
138 |
+
|
139 |
+
def __call__(self, question):
|
140 |
+
"""Process a question using the smolagents CodeAgent or fallback."""
|
141 |
+
if self.tools_available:
|
142 |
+
return self._run_smolagents(question)
|
143 |
+
else:
|
144 |
+
return self._fallback_implementation(question)
|
145 |
|
146 |
|
147 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|