Reset
Browse files- .env.example +0 -4
- .gitignore +0 -11
- .vscode/tasks.json +0 -22
- agents/__init__.py +0 -5
- agents/data_agent/__init__.py +0 -3
- agents/data_agent/agent.py +0 -36
- agents/media_agent/__init__.py +0 -3
- agents/media_agent/agent.py +0 -36
- agents/web_agent/__init__.py +0 -3
- agents/web_agent/agent.py +0 -36
- app.py +31 -83
- conf/config.yaml +0 -83
- final_model/README.md +0 -202
- final_model/adapter_config.json +0 -39
- final_model/adapter_model.safetensors +0 -3
- final_model/added_tokens.json +0 -3
- final_model/merges.txt +0 -0
- final_model/special_tokens_map.json +0 -6
- final_model/tokenizer.json +0 -0
- final_model/tokenizer_config.json +0 -166
- final_model/training_args.bin +0 -3
- final_model/vocab.json +0 -0
- main.py +0 -318
- main_v2.py +0 -155
- model_factory.py +0 -39
- notebooks/SmolVLM2_Video_FT.ipynb +0 -0
- notebooks/bonus-unit1.ipynb +0 -0
- notebooks/unsloth_SmolLM2-135M-Instruct-bnb-4bit_xingyaoww_code-act.ipynb +0 -0
- prompts/code_agent.yaml +0 -325
- prompts/code_agent_modified.yaml +0 -281
- requirements.txt +2 -38
- serve.py +0 -635
- serve_test.py +0 -104
- test_questions.py +0 -39
- tools/__init__.py +0 -0
- tools/smart_search/__init__.py +0 -0
- tools/smart_search/tool.py +0 -236
- train.py +0 -419
.env.example
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
# Required API configuration
|
2 |
-
API_BASE=https://api.anthropic.com
|
3 |
-
API_KEY=sk-replace-with-your-api-key
|
4 |
-
MODEL_ID=anthropic/claude-3-7-sonnet-20250219
|
|
|
|
|
|
|
|
|
|
.gitignore
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
.env
|
2 |
-
logs
|
3 |
-
lora_model
|
4 |
-
memory_snapshot.pickle
|
5 |
-
models
|
6 |
-
outputs
|
7 |
-
__pycache__
|
8 |
-
.pytest_cache
|
9 |
-
unsloth_compiled_cache
|
10 |
-
.venv
|
11 |
-
wandb
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.vscode/tasks.json
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
{
|
2 |
-
// See https://go.microsoft.com/fwlink/?LinkId=733558
|
3 |
-
// for the documentation about the tasks.json format
|
4 |
-
"version": "2.0.0",
|
5 |
-
"tasks": [
|
6 |
-
{
|
7 |
-
"label": "prerun",
|
8 |
-
"type": "shell",
|
9 |
-
"command": "uv run python -m phoenix.server.main serve"
|
10 |
-
},
|
11 |
-
{
|
12 |
-
"label": "run",
|
13 |
-
"type": "shell",
|
14 |
-
"command": "uv run python -m main_v2"
|
15 |
-
},
|
16 |
-
{
|
17 |
-
"label": "test",
|
18 |
-
"type": "shell",
|
19 |
-
"command": "uv run python -m unittest test_questions.py"
|
20 |
-
}
|
21 |
-
]
|
22 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
agents/__init__.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
from .data_agent import create_data_agent
|
2 |
-
from .media_agent import create_media_agent
|
3 |
-
from .web_agent import create_web_agent
|
4 |
-
|
5 |
-
__all__ = ["create_web_agent", "create_data_agent", "create_media_agent"]
|
|
|
|
|
|
|
|
|
|
|
|
agents/data_agent/__init__.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
from .agent import create_data_agent
|
2 |
-
|
3 |
-
__all__ = ["create_data_agent"]
|
|
|
|
|
|
|
|
agents/data_agent/agent.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import importlib
|
2 |
-
|
3 |
-
import yaml
|
4 |
-
from smolagents import CodeAgent
|
5 |
-
|
6 |
-
from tools import parse_csv, perform_calculation
|
7 |
-
|
8 |
-
|
9 |
-
def create_data_agent(model):
|
10 |
-
"""
|
11 |
-
Create a specialized agent for data analysis tasks.
|
12 |
-
|
13 |
-
Args:
|
14 |
-
model: The model to use for the agent
|
15 |
-
|
16 |
-
Returns:
|
17 |
-
Configured CodeAgent for data analysis
|
18 |
-
"""
|
19 |
-
# Load default prompts
|
20 |
-
prompt_templates = yaml.safe_load(
|
21 |
-
importlib.resources.files("smolagents.prompts")
|
22 |
-
.joinpath("code_agent.yaml")
|
23 |
-
.read_text()
|
24 |
-
)
|
25 |
-
|
26 |
-
data_agent = CodeAgent(
|
27 |
-
tools=[parse_csv, perform_calculation],
|
28 |
-
model=model,
|
29 |
-
name="data_agent",
|
30 |
-
description="Specialized agent for data analysis. Use this agent to analyze data, perform calculations, and extract insights from structured data.",
|
31 |
-
add_base_tools=True,
|
32 |
-
additional_authorized_imports=["pandas", "numpy", "math", "csv", "io"],
|
33 |
-
prompt_templates=prompt_templates,
|
34 |
-
)
|
35 |
-
|
36 |
-
return data_agent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
agents/media_agent/__init__.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
from .agent import create_media_agent
|
2 |
-
|
3 |
-
__all__ = ["create_media_agent"]
|
|
|
|
|
|
|
|
agents/media_agent/agent.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import importlib
|
2 |
-
|
3 |
-
import yaml
|
4 |
-
from smolagents import CodeAgent
|
5 |
-
|
6 |
-
from tools import analyze_image, read_pdf
|
7 |
-
|
8 |
-
|
9 |
-
def create_media_agent(model):
|
10 |
-
"""
|
11 |
-
Create a specialized agent for handling media (images, PDFs).
|
12 |
-
|
13 |
-
Args:
|
14 |
-
model: The model to use for the agent
|
15 |
-
|
16 |
-
Returns:
|
17 |
-
Configured CodeAgent for media handling
|
18 |
-
"""
|
19 |
-
# Load default prompts
|
20 |
-
prompt_templates = yaml.safe_load(
|
21 |
-
importlib.resources.files("smolagents.prompts")
|
22 |
-
.joinpath("code_agent.yaml")
|
23 |
-
.read_text()
|
24 |
-
)
|
25 |
-
|
26 |
-
media_agent = CodeAgent(
|
27 |
-
tools=[analyze_image, read_pdf],
|
28 |
-
model=model,
|
29 |
-
name="media_agent",
|
30 |
-
description="Specialized agent for handling media files like images and PDFs. Use this agent to analyze images and extract text from PDF documents.",
|
31 |
-
add_base_tools=True,
|
32 |
-
additional_authorized_imports=["PIL", "io", "requests"],
|
33 |
-
prompt_templates=prompt_templates,
|
34 |
-
)
|
35 |
-
|
36 |
-
return media_agent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
agents/web_agent/__init__.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
from .agent import create_web_agent
|
2 |
-
|
3 |
-
__all__ = ["create_web_agent"]
|
|
|
|
|
|
|
|
agents/web_agent/agent.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import importlib
|
2 |
-
|
3 |
-
import yaml
|
4 |
-
from smolagents import CodeAgent
|
5 |
-
|
6 |
-
from tools import browse_webpage, extract_dates, find_in_page, web_search
|
7 |
-
|
8 |
-
|
9 |
-
def create_web_agent(model):
|
10 |
-
"""
|
11 |
-
Create a specialized agent for web browsing tasks.
|
12 |
-
|
13 |
-
Args:
|
14 |
-
model: The model to use for the agent
|
15 |
-
|
16 |
-
Returns:
|
17 |
-
Configured CodeAgent for web browsing
|
18 |
-
"""
|
19 |
-
# Load default prompts
|
20 |
-
prompt_templates = yaml.safe_load(
|
21 |
-
importlib.resources.files("smolagents.prompts")
|
22 |
-
.joinpath("code_agent.yaml")
|
23 |
-
.read_text()
|
24 |
-
)
|
25 |
-
|
26 |
-
web_agent = CodeAgent(
|
27 |
-
tools=[web_search, browse_webpage, find_in_page, extract_dates],
|
28 |
-
model=model,
|
29 |
-
name="web_agent",
|
30 |
-
description="Specialized agent for web browsing and searching. Use this agent to find information online, browse websites, and extract information from web pages.",
|
31 |
-
add_base_tools=True,
|
32 |
-
additional_authorized_imports=["requests", "bs4", "re", "json"],
|
33 |
-
prompt_templates=prompt_templates,
|
34 |
-
)
|
35 |
-
|
36 |
-
return web_agent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -1,65 +1,34 @@
|
|
1 |
-
import inspect
|
2 |
import os
|
3 |
-
import time
|
4 |
-
|
5 |
import gradio as gr
|
6 |
-
import pandas as pd
|
7 |
import requests
|
8 |
-
|
9 |
-
|
10 |
-
from main_v2 import main
|
11 |
-
|
12 |
-
# global question_counter
|
13 |
-
# question_counter = 0
|
14 |
|
15 |
# (Keep Constants as is)
|
16 |
# --- Constants ---
|
17 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
18 |
|
19 |
-
|
20 |
# --- Basic Agent Definition ---
|
21 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
22 |
class BasicAgent:
|
23 |
def __init__(self):
|
24 |
print("BasicAgent initialized.")
|
25 |
-
|
26 |
def __call__(self, question: str) -> str:
|
27 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
|
|
|
|
|
|
28 |
|
29 |
-
|
30 |
-
# question_counter += 1
|
31 |
-
|
32 |
-
# if question_counter > 1:
|
33 |
-
# return "This is a default answer."
|
34 |
-
|
35 |
-
# fixed_answer = "This is a default answer."
|
36 |
-
# print(f"Agent returning fixed answer: {fixed_answer}")
|
37 |
-
# return fixed_answer
|
38 |
-
|
39 |
-
# TODO: Set a wait time between each question to avoid rate limiting
|
40 |
-
# print("Sleeping for 60 seconds...")
|
41 |
-
# time.sleep(60)
|
42 |
-
|
43 |
-
final_answer = str(main(task=question))
|
44 |
-
|
45 |
-
print("--------------------------------")
|
46 |
-
print(f"Question: {question}")
|
47 |
-
print(f"Final Answer: {final_answer}")
|
48 |
-
print("--------------------------------")
|
49 |
-
|
50 |
-
return final_answer
|
51 |
-
|
52 |
-
|
53 |
-
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
54 |
"""
|
55 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
56 |
and displays the results.
|
57 |
"""
|
58 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
59 |
-
space_id = os.getenv("SPACE_ID")
|
60 |
|
61 |
if profile:
|
62 |
-
username
|
63 |
print(f"User logged in: {username}")
|
64 |
else:
|
65 |
print("User not logged in.")
|
@@ -86,16 +55,16 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
86 |
response.raise_for_status()
|
87 |
questions_data = response.json()
|
88 |
if not questions_data:
|
89 |
-
|
90 |
-
|
91 |
print(f"Fetched {len(questions_data)} questions.")
|
92 |
except requests.exceptions.RequestException as e:
|
93 |
print(f"Error fetching questions: {e}")
|
94 |
return f"Error fetching questions: {e}", None
|
95 |
except requests.exceptions.JSONDecodeError as e:
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
except Exception as e:
|
100 |
print(f"An unexpected error occurred fetching questions: {e}")
|
101 |
return f"An unexpected error occurred fetching questions: {e}", None
|
@@ -112,36 +81,18 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
112 |
continue
|
113 |
try:
|
114 |
submitted_answer = agent(question_text)
|
115 |
-
answers_payload.append(
|
116 |
-
|
117 |
-
)
|
118 |
-
results_log.append(
|
119 |
-
{
|
120 |
-
"Task ID": task_id,
|
121 |
-
"Question": question_text,
|
122 |
-
"Submitted Answer": submitted_answer,
|
123 |
-
}
|
124 |
-
)
|
125 |
except Exception as e:
|
126 |
-
|
127 |
-
|
128 |
-
{
|
129 |
-
"Task ID": task_id,
|
130 |
-
"Question": question_text,
|
131 |
-
"Submitted Answer": f"AGENT ERROR: {e}",
|
132 |
-
}
|
133 |
-
)
|
134 |
|
135 |
if not answers_payload:
|
136 |
print("Agent did not produce any answers to submit.")
|
137 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
138 |
|
139 |
-
# 4. Prepare Submission
|
140 |
-
submission_data = {
|
141 |
-
"username": username.strip(),
|
142 |
-
"agent_code": agent_code,
|
143 |
-
"answers": answers_payload,
|
144 |
-
}
|
145 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
146 |
print(status_update)
|
147 |
|
@@ -211,19 +162,20 @@ with gr.Blocks() as demo:
|
|
211 |
|
212 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
213 |
|
214 |
-
status_output = gr.Textbox(
|
215 |
-
label="Run Status / Submission Result", lines=5, interactive=False
|
216 |
-
)
|
217 |
# Removed max_rows=10 from DataFrame constructor
|
218 |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
219 |
|
220 |
-
run_button.click(
|
|
|
|
|
|
|
221 |
|
222 |
if __name__ == "__main__":
|
223 |
-
print("\n" + "-"
|
224 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
225 |
space_host_startup = os.getenv("SPACE_HOST")
|
226 |
-
space_id_startup = os.getenv("SPACE_ID")
|
227 |
|
228 |
if space_host_startup:
|
229 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
@@ -231,18 +183,14 @@ if __name__ == "__main__":
|
|
231 |
else:
|
232 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
233 |
|
234 |
-
if space_id_startup:
|
235 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
236 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
237 |
-
print(
|
238 |
-
f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main"
|
239 |
-
)
|
240 |
else:
|
241 |
-
print(
|
242 |
-
"ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined."
|
243 |
-
)
|
244 |
|
245 |
-
print("-"
|
246 |
|
247 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
248 |
-
demo.launch(debug=True, share=False)
|
|
|
|
|
1 |
import os
|
|
|
|
|
2 |
import gradio as gr
|
|
|
3 |
import requests
|
4 |
+
import inspect
|
5 |
+
import pandas as pd
|
|
|
|
|
|
|
|
|
6 |
|
7 |
# (Keep Constants as is)
|
8 |
# --- Constants ---
|
9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
10 |
|
|
|
11 |
# --- Basic Agent Definition ---
|
12 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
13 |
class BasicAgent:
|
14 |
def __init__(self):
|
15 |
print("BasicAgent initialized.")
|
|
|
16 |
def __call__(self, question: str) -> str:
|
17 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
18 |
+
fixed_answer = "This is a default answer."
|
19 |
+
print(f"Agent returning fixed answer: {fixed_answer}")
|
20 |
+
return fixed_answer
|
21 |
|
22 |
+
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
"""
|
24 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
25 |
and displays the results.
|
26 |
"""
|
27 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
28 |
+
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
29 |
|
30 |
if profile:
|
31 |
+
username= f"{profile.username}"
|
32 |
print(f"User logged in: {username}")
|
33 |
else:
|
34 |
print("User not logged in.")
|
|
|
55 |
response.raise_for_status()
|
56 |
questions_data = response.json()
|
57 |
if not questions_data:
|
58 |
+
print("Fetched questions list is empty.")
|
59 |
+
return "Fetched questions list is empty or invalid format.", None
|
60 |
print(f"Fetched {len(questions_data)} questions.")
|
61 |
except requests.exceptions.RequestException as e:
|
62 |
print(f"Error fetching questions: {e}")
|
63 |
return f"Error fetching questions: {e}", None
|
64 |
except requests.exceptions.JSONDecodeError as e:
|
65 |
+
print(f"Error decoding JSON response from questions endpoint: {e}")
|
66 |
+
print(f"Response text: {response.text[:500]}")
|
67 |
+
return f"Error decoding server response for questions: {e}", None
|
68 |
except Exception as e:
|
69 |
print(f"An unexpected error occurred fetching questions: {e}")
|
70 |
return f"An unexpected error occurred fetching questions: {e}", None
|
|
|
81 |
continue
|
82 |
try:
|
83 |
submitted_answer = agent(question_text)
|
84 |
+
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
85 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
except Exception as e:
|
87 |
+
print(f"Error running agent on task {task_id}: {e}")
|
88 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
if not answers_payload:
|
91 |
print("Agent did not produce any answers to submit.")
|
92 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
93 |
|
94 |
+
# 4. Prepare Submission
|
95 |
+
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
|
|
|
|
|
|
|
|
96 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
97 |
print(status_update)
|
98 |
|
|
|
162 |
|
163 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
164 |
|
165 |
+
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
|
|
|
|
166 |
# Removed max_rows=10 from DataFrame constructor
|
167 |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
168 |
|
169 |
+
run_button.click(
|
170 |
+
fn=run_and_submit_all,
|
171 |
+
outputs=[status_output, results_table]
|
172 |
+
)
|
173 |
|
174 |
if __name__ == "__main__":
|
175 |
+
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
176 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
177 |
space_host_startup = os.getenv("SPACE_HOST")
|
178 |
+
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
179 |
|
180 |
if space_host_startup:
|
181 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
|
|
183 |
else:
|
184 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
185 |
|
186 |
+
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
187 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
188 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
189 |
+
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
|
|
|
|
190 |
else:
|
191 |
+
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
|
|
|
|
192 |
|
193 |
+
print("-"*(60 + len(" App Starting ")) + "\n")
|
194 |
|
195 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
196 |
+
demo.launch(debug=True, share=False)
|
conf/config.yaml
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
defaults:
|
2 |
-
- _self_
|
3 |
-
|
4 |
-
# Model configuration
|
5 |
-
model:
|
6 |
-
name: "unsloth/SmolLM2-135M-Instruct-bnb-4bit"
|
7 |
-
# name: "HuggingFaceTB/SmolLM2-135M-Instruct"
|
8 |
-
max_seq_length: 2048 # Auto supports RoPE Scaling internally
|
9 |
-
provider: "openai"
|
10 |
-
dtype: null # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
11 |
-
load_in_4bit: true # Use 4bit quantization to reduce memory usage
|
12 |
-
|
13 |
-
# PEFT configuration
|
14 |
-
peft:
|
15 |
-
r: 64
|
16 |
-
lora_alpha: 128
|
17 |
-
lora_dropout: 0.05
|
18 |
-
bias: "none"
|
19 |
-
use_gradient_checkpointing: "unsloth"
|
20 |
-
random_state: 3407
|
21 |
-
use_rslora: true
|
22 |
-
loftq_config: null
|
23 |
-
target_modules:
|
24 |
-
- "q_proj"
|
25 |
-
- "k_proj"
|
26 |
-
- "v_proj"
|
27 |
-
- "o_proj"
|
28 |
-
- "gate_proj"
|
29 |
-
- "up_proj"
|
30 |
-
- "down_proj"
|
31 |
-
|
32 |
-
# Dataset configuration
|
33 |
-
dataset:
|
34 |
-
validation_split: 0.1 # 10% of data for validation
|
35 |
-
seed: 3407 # Random seed for dataset splitting
|
36 |
-
|
37 |
-
# Training configuration
|
38 |
-
training:
|
39 |
-
args:
|
40 |
-
per_device_train_batch_size: 2
|
41 |
-
per_device_eval_batch_size: 2
|
42 |
-
gradient_accumulation_steps: 16
|
43 |
-
warmup_steps: 100
|
44 |
-
max_steps: 120
|
45 |
-
learning_rate: 5e-5
|
46 |
-
logging_steps: 1
|
47 |
-
save_strategy: "steps"
|
48 |
-
save_steps: 30
|
49 |
-
eval_strategy: "steps"
|
50 |
-
eval_steps: 30
|
51 |
-
save_total_limit: 2
|
52 |
-
optim: "adamw_8bit"
|
53 |
-
weight_decay: 0.01
|
54 |
-
lr_scheduler_type: "cosine_with_restarts"
|
55 |
-
seed: 3407
|
56 |
-
output_dir: "outputs"
|
57 |
-
gradient_checkpointing: true
|
58 |
-
load_best_model_at_end: true
|
59 |
-
metric_for_best_model: "eval_loss"
|
60 |
-
greater_is_better: false
|
61 |
-
|
62 |
-
sft:
|
63 |
-
dataset_num_proc: 2
|
64 |
-
packing: false
|
65 |
-
data_collator:
|
66 |
-
mlm: false
|
67 |
-
pad_to_multiple_of: 8
|
68 |
-
|
69 |
-
# Output configuration
|
70 |
-
output:
|
71 |
-
dir: "final_model"
|
72 |
-
|
73 |
-
# Training control
|
74 |
-
train: false
|
75 |
-
|
76 |
-
# Testing configuration
|
77 |
-
test: true # Whether to run testing after training
|
78 |
-
test_dataset:
|
79 |
-
name: "gaia-benchmark/GAIA"
|
80 |
-
config: "2023_level1" # Use level 1 questions for testing
|
81 |
-
split: "test" # Use test split for testing
|
82 |
-
max_samples: 3 # Number of samples to test on
|
83 |
-
max_length: 2048 # Maximum sequence length for testing
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
final_model/README.md
DELETED
@@ -1,202 +0,0 @@
|
|
1 |
-
---
|
2 |
-
base_model: unsloth/SmolLM2-135M-Instruct-bnb-4bit
|
3 |
-
library_name: peft
|
4 |
-
---
|
5 |
-
|
6 |
-
# Model Card for Model ID
|
7 |
-
|
8 |
-
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
## Model Details
|
13 |
-
|
14 |
-
### Model Description
|
15 |
-
|
16 |
-
<!-- Provide a longer summary of what this model is. -->
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
- **Developed by:** [More Information Needed]
|
21 |
-
- **Funded by [optional]:** [More Information Needed]
|
22 |
-
- **Shared by [optional]:** [More Information Needed]
|
23 |
-
- **Model type:** [More Information Needed]
|
24 |
-
- **Language(s) (NLP):** [More Information Needed]
|
25 |
-
- **License:** [More Information Needed]
|
26 |
-
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
-
|
28 |
-
### Model Sources [optional]
|
29 |
-
|
30 |
-
<!-- Provide the basic links for the model. -->
|
31 |
-
|
32 |
-
- **Repository:** [More Information Needed]
|
33 |
-
- **Paper [optional]:** [More Information Needed]
|
34 |
-
- **Demo [optional]:** [More Information Needed]
|
35 |
-
|
36 |
-
## Uses
|
37 |
-
|
38 |
-
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
-
|
40 |
-
### Direct Use
|
41 |
-
|
42 |
-
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
-
|
44 |
-
[More Information Needed]
|
45 |
-
|
46 |
-
### Downstream Use [optional]
|
47 |
-
|
48 |
-
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
-
|
50 |
-
[More Information Needed]
|
51 |
-
|
52 |
-
### Out-of-Scope Use
|
53 |
-
|
54 |
-
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
-
|
56 |
-
[More Information Needed]
|
57 |
-
|
58 |
-
## Bias, Risks, and Limitations
|
59 |
-
|
60 |
-
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
-
|
62 |
-
[More Information Needed]
|
63 |
-
|
64 |
-
### Recommendations
|
65 |
-
|
66 |
-
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
-
|
68 |
-
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
-
|
70 |
-
## How to Get Started with the Model
|
71 |
-
|
72 |
-
Use the code below to get started with the model.
|
73 |
-
|
74 |
-
[More Information Needed]
|
75 |
-
|
76 |
-
## Training Details
|
77 |
-
|
78 |
-
### Training Data
|
79 |
-
|
80 |
-
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
-
|
82 |
-
[More Information Needed]
|
83 |
-
|
84 |
-
### Training Procedure
|
85 |
-
|
86 |
-
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
-
|
88 |
-
#### Preprocessing [optional]
|
89 |
-
|
90 |
-
[More Information Needed]
|
91 |
-
|
92 |
-
|
93 |
-
#### Training Hyperparameters
|
94 |
-
|
95 |
-
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
-
|
97 |
-
#### Speeds, Sizes, Times [optional]
|
98 |
-
|
99 |
-
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
-
|
101 |
-
[More Information Needed]
|
102 |
-
|
103 |
-
## Evaluation
|
104 |
-
|
105 |
-
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
-
|
107 |
-
### Testing Data, Factors & Metrics
|
108 |
-
|
109 |
-
#### Testing Data
|
110 |
-
|
111 |
-
<!-- This should link to a Dataset Card if possible. -->
|
112 |
-
|
113 |
-
[More Information Needed]
|
114 |
-
|
115 |
-
#### Factors
|
116 |
-
|
117 |
-
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
-
|
119 |
-
[More Information Needed]
|
120 |
-
|
121 |
-
#### Metrics
|
122 |
-
|
123 |
-
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
-
|
125 |
-
[More Information Needed]
|
126 |
-
|
127 |
-
### Results
|
128 |
-
|
129 |
-
[More Information Needed]
|
130 |
-
|
131 |
-
#### Summary
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
## Model Examination [optional]
|
136 |
-
|
137 |
-
<!-- Relevant interpretability work for the model goes here -->
|
138 |
-
|
139 |
-
[More Information Needed]
|
140 |
-
|
141 |
-
## Environmental Impact
|
142 |
-
|
143 |
-
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
-
|
145 |
-
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
-
|
147 |
-
- **Hardware Type:** [More Information Needed]
|
148 |
-
- **Hours used:** [More Information Needed]
|
149 |
-
- **Cloud Provider:** [More Information Needed]
|
150 |
-
- **Compute Region:** [More Information Needed]
|
151 |
-
- **Carbon Emitted:** [More Information Needed]
|
152 |
-
|
153 |
-
## Technical Specifications [optional]
|
154 |
-
|
155 |
-
### Model Architecture and Objective
|
156 |
-
|
157 |
-
[More Information Needed]
|
158 |
-
|
159 |
-
### Compute Infrastructure
|
160 |
-
|
161 |
-
[More Information Needed]
|
162 |
-
|
163 |
-
#### Hardware
|
164 |
-
|
165 |
-
[More Information Needed]
|
166 |
-
|
167 |
-
#### Software
|
168 |
-
|
169 |
-
[More Information Needed]
|
170 |
-
|
171 |
-
## Citation [optional]
|
172 |
-
|
173 |
-
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
-
|
175 |
-
**BibTeX:**
|
176 |
-
|
177 |
-
[More Information Needed]
|
178 |
-
|
179 |
-
**APA:**
|
180 |
-
|
181 |
-
[More Information Needed]
|
182 |
-
|
183 |
-
## Glossary [optional]
|
184 |
-
|
185 |
-
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
-
|
187 |
-
[More Information Needed]
|
188 |
-
|
189 |
-
## More Information [optional]
|
190 |
-
|
191 |
-
[More Information Needed]
|
192 |
-
|
193 |
-
## Model Card Authors [optional]
|
194 |
-
|
195 |
-
[More Information Needed]
|
196 |
-
|
197 |
-
## Model Card Contact
|
198 |
-
|
199 |
-
[More Information Needed]
|
200 |
-
### Framework versions
|
201 |
-
|
202 |
-
- PEFT 0.15.2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
final_model/adapter_config.json
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"alpha_pattern": {},
|
3 |
-
"auto_mapping": null,
|
4 |
-
"base_model_name_or_path": "unsloth/SmolLM2-135M-Instruct-bnb-4bit",
|
5 |
-
"bias": "none",
|
6 |
-
"corda_config": null,
|
7 |
-
"eva_config": null,
|
8 |
-
"exclude_modules": null,
|
9 |
-
"fan_in_fan_out": false,
|
10 |
-
"inference_mode": true,
|
11 |
-
"init_lora_weights": true,
|
12 |
-
"layer_replication": null,
|
13 |
-
"layers_pattern": null,
|
14 |
-
"layers_to_transform": null,
|
15 |
-
"loftq_config": {},
|
16 |
-
"lora_alpha": 128,
|
17 |
-
"lora_bias": false,
|
18 |
-
"lora_dropout": 0.05,
|
19 |
-
"megatron_config": null,
|
20 |
-
"megatron_core": "megatron.core",
|
21 |
-
"modules_to_save": null,
|
22 |
-
"peft_type": "LORA",
|
23 |
-
"r": 64,
|
24 |
-
"rank_pattern": {},
|
25 |
-
"revision": null,
|
26 |
-
"target_modules": [
|
27 |
-
"gate_proj",
|
28 |
-
"k_proj",
|
29 |
-
"down_proj",
|
30 |
-
"o_proj",
|
31 |
-
"up_proj",
|
32 |
-
"v_proj",
|
33 |
-
"q_proj"
|
34 |
-
],
|
35 |
-
"task_type": "CAUSAL_LM",
|
36 |
-
"trainable_token_indices": null,
|
37 |
-
"use_dora": false,
|
38 |
-
"use_rslora": true
|
39 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
final_model/adapter_model.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:8d98bcb921ed12503c0bd357aad0fc6be4cbbd93d8e1cca6f533df793d828de4
|
3 |
-
size 78207176
|
|
|
|
|
|
|
|
final_model/added_tokens.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"<|PAD_TOKEN|>": 49152
|
3 |
-
}
|
|
|
|
|
|
|
|
final_model/merges.txt
DELETED
The diff for this file is too large to render.
See raw diff
|
|
final_model/special_tokens_map.json
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"bos_token": "<|im_start|>",
|
3 |
-
"eos_token": "<|im_end|>",
|
4 |
-
"pad_token": "<|PAD_TOKEN|>",
|
5 |
-
"unk_token": "�"
|
6 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
final_model/tokenizer.json
DELETED
The diff for this file is too large to render.
See raw diff
|
|
final_model/tokenizer_config.json
DELETED
@@ -1,166 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"add_prefix_space": false,
|
3 |
-
"added_tokens_decoder": {
|
4 |
-
"0": {
|
5 |
-
"content": "<|endoftext|>",
|
6 |
-
"lstrip": false,
|
7 |
-
"normalized": false,
|
8 |
-
"rstrip": false,
|
9 |
-
"single_word": false,
|
10 |
-
"special": true
|
11 |
-
},
|
12 |
-
"1": {
|
13 |
-
"content": "<|im_start|>",
|
14 |
-
"lstrip": false,
|
15 |
-
"normalized": false,
|
16 |
-
"rstrip": false,
|
17 |
-
"single_word": false,
|
18 |
-
"special": true
|
19 |
-
},
|
20 |
-
"2": {
|
21 |
-
"content": "<|im_end|>",
|
22 |
-
"lstrip": false,
|
23 |
-
"normalized": false,
|
24 |
-
"rstrip": false,
|
25 |
-
"single_word": false,
|
26 |
-
"special": true
|
27 |
-
},
|
28 |
-
"3": {
|
29 |
-
"content": "<repo_name>",
|
30 |
-
"lstrip": false,
|
31 |
-
"normalized": false,
|
32 |
-
"rstrip": false,
|
33 |
-
"single_word": false,
|
34 |
-
"special": true
|
35 |
-
},
|
36 |
-
"4": {
|
37 |
-
"content": "<reponame>",
|
38 |
-
"lstrip": false,
|
39 |
-
"normalized": false,
|
40 |
-
"rstrip": false,
|
41 |
-
"single_word": false,
|
42 |
-
"special": true
|
43 |
-
},
|
44 |
-
"5": {
|
45 |
-
"content": "<file_sep>",
|
46 |
-
"lstrip": false,
|
47 |
-
"normalized": false,
|
48 |
-
"rstrip": false,
|
49 |
-
"single_word": false,
|
50 |
-
"special": true
|
51 |
-
},
|
52 |
-
"6": {
|
53 |
-
"content": "<filename>",
|
54 |
-
"lstrip": false,
|
55 |
-
"normalized": false,
|
56 |
-
"rstrip": false,
|
57 |
-
"single_word": false,
|
58 |
-
"special": true
|
59 |
-
},
|
60 |
-
"7": {
|
61 |
-
"content": "<gh_stars>",
|
62 |
-
"lstrip": false,
|
63 |
-
"normalized": false,
|
64 |
-
"rstrip": false,
|
65 |
-
"single_word": false,
|
66 |
-
"special": true
|
67 |
-
},
|
68 |
-
"8": {
|
69 |
-
"content": "<issue_start>",
|
70 |
-
"lstrip": false,
|
71 |
-
"normalized": false,
|
72 |
-
"rstrip": false,
|
73 |
-
"single_word": false,
|
74 |
-
"special": true
|
75 |
-
},
|
76 |
-
"9": {
|
77 |
-
"content": "<issue_comment>",
|
78 |
-
"lstrip": false,
|
79 |
-
"normalized": false,
|
80 |
-
"rstrip": false,
|
81 |
-
"single_word": false,
|
82 |
-
"special": true
|
83 |
-
},
|
84 |
-
"10": {
|
85 |
-
"content": "<issue_closed>",
|
86 |
-
"lstrip": false,
|
87 |
-
"normalized": false,
|
88 |
-
"rstrip": false,
|
89 |
-
"single_word": false,
|
90 |
-
"special": true
|
91 |
-
},
|
92 |
-
"11": {
|
93 |
-
"content": "<jupyter_start>",
|
94 |
-
"lstrip": false,
|
95 |
-
"normalized": false,
|
96 |
-
"rstrip": false,
|
97 |
-
"single_word": false,
|
98 |
-
"special": true
|
99 |
-
},
|
100 |
-
"12": {
|
101 |
-
"content": "<jupyter_text>",
|
102 |
-
"lstrip": false,
|
103 |
-
"normalized": false,
|
104 |
-
"rstrip": false,
|
105 |
-
"single_word": false,
|
106 |
-
"special": true
|
107 |
-
},
|
108 |
-
"13": {
|
109 |
-
"content": "<jupyter_code>",
|
110 |
-
"lstrip": false,
|
111 |
-
"normalized": false,
|
112 |
-
"rstrip": false,
|
113 |
-
"single_word": false,
|
114 |
-
"special": true
|
115 |
-
},
|
116 |
-
"14": {
|
117 |
-
"content": "<jupyter_output>",
|
118 |
-
"lstrip": false,
|
119 |
-
"normalized": false,
|
120 |
-
"rstrip": false,
|
121 |
-
"single_word": false,
|
122 |
-
"special": true
|
123 |
-
},
|
124 |
-
"15": {
|
125 |
-
"content": "<jupyter_script>",
|
126 |
-
"lstrip": false,
|
127 |
-
"normalized": false,
|
128 |
-
"rstrip": false,
|
129 |
-
"single_word": false,
|
130 |
-
"special": true
|
131 |
-
},
|
132 |
-
"16": {
|
133 |
-
"content": "<empty_output>",
|
134 |
-
"lstrip": false,
|
135 |
-
"normalized": false,
|
136 |
-
"rstrip": false,
|
137 |
-
"single_word": false,
|
138 |
-
"special": true
|
139 |
-
},
|
140 |
-
"24211": {
|
141 |
-
"content": "�",
|
142 |
-
"lstrip": false,
|
143 |
-
"normalized": false,
|
144 |
-
"rstrip": false,
|
145 |
-
"single_word": false,
|
146 |
-
"special": true
|
147 |
-
},
|
148 |
-
"49152": {
|
149 |
-
"content": "<|PAD_TOKEN|>",
|
150 |
-
"lstrip": false,
|
151 |
-
"normalized": false,
|
152 |
-
"rstrip": false,
|
153 |
-
"single_word": false,
|
154 |
-
"special": true
|
155 |
-
}
|
156 |
-
},
|
157 |
-
"bos_token": "<|im_start|>",
|
158 |
-
"chat_template": "{% if 'role' in messages[0] %}{% for message in messages %}{% if message['role'] == 'user' %}{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|im_start|>assistant\n' + message['content'] + '<|im_end|>\n' }}{% else %}{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}{% else %}{% for message in messages %}{% if message['from'] == 'human' %}{{'<|im_start|>user\n' + message['value'] + '<|im_end|>\n'}}{% elif message['from'] == 'gpt' %}{{'<|im_start|>assistant\n' + message['value'] + '<|im_end|>\n' }}{% else %}{{ '<|im_start|>system\n' + message['value'] + '<|im_end|>\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}{% endif %}",
|
159 |
-
"clean_up_tokenization_spaces": false,
|
160 |
-
"eos_token": "<|im_end|>",
|
161 |
-
"extra_special_tokens": {},
|
162 |
-
"model_max_length": 1000000000000000019884624838656,
|
163 |
-
"pad_token": "<|PAD_TOKEN|>",
|
164 |
-
"tokenizer_class": "GPT2Tokenizer",
|
165 |
-
"unk_token": "�"
|
166 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
final_model/training_args.bin
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:25a38695ea7436f8b5f07e385ec08f740c1162981287c799c90727893bda974e
|
3 |
-
size 6033
|
|
|
|
|
|
|
|
final_model/vocab.json
DELETED
The diff for this file is too large to render.
See raw diff
|
|
main.py
DELETED
@@ -1,318 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
import importlib
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
import time
|
6 |
-
import uuid # for generating thread IDs for checkpointer
|
7 |
-
from typing import AsyncIterator, Optional, TypedDict
|
8 |
-
|
9 |
-
import litellm
|
10 |
-
import yaml
|
11 |
-
from dotenv import find_dotenv, load_dotenv
|
12 |
-
from langgraph.checkpoint.memory import MemorySaver
|
13 |
-
from langgraph.graph import END, START, StateGraph
|
14 |
-
from openinference.instrumentation.smolagents import SmolagentsInstrumentor
|
15 |
-
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
16 |
-
from phoenix.otel import register
|
17 |
-
from smolagents import CodeAgent, LiteLLMModel
|
18 |
-
from smolagents.memory import ActionStep, FinalAnswerStep
|
19 |
-
from smolagents.monitoring import LogLevel
|
20 |
-
from utils import extract_final_answer
|
21 |
-
|
22 |
-
from agents import create_data_analysis_agent, create_media_agent, create_web_agent
|
23 |
-
from prompts import MANAGER_SYSTEM_PROMPT
|
24 |
-
from tools import perform_calculation, web_search
|
25 |
-
|
26 |
-
litellm._turn_on_debug()
|
27 |
-
|
28 |
-
# Configure OpenTelemetry with BatchSpanProcessor
|
29 |
-
register()
|
30 |
-
tracer_provider = register()
|
31 |
-
tracer_provider.add_span_processor(BatchSpanProcessor())
|
32 |
-
SmolagentsInstrumentor().instrument(tracer_provider=tracer_provider)
|
33 |
-
|
34 |
-
# Configure logging
|
35 |
-
logging.basicConfig(
|
36 |
-
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
37 |
-
)
|
38 |
-
logger = logging.getLogger(__name__)
|
39 |
-
|
40 |
-
# Load environment variables
|
41 |
-
load_dotenv(find_dotenv())
|
42 |
-
|
43 |
-
# Get required environment variables with validation
|
44 |
-
API_BASE = os.getenv("API_BASE")
|
45 |
-
API_KEY = os.getenv("API_KEY")
|
46 |
-
MODEL_ID = os.getenv("MODEL_ID")
|
47 |
-
|
48 |
-
if not all([API_BASE, API_KEY, MODEL_ID]):
|
49 |
-
raise ValueError(
|
50 |
-
"Missing required environment variables: API_BASE, API_KEY, MODEL_ID"
|
51 |
-
)
|
52 |
-
|
53 |
-
|
54 |
-
# Define the state types for our graph
|
55 |
-
class AgentState(TypedDict):
|
56 |
-
task: str
|
57 |
-
current_step: Optional[dict] # Store serializable dict instead of ActionStep
|
58 |
-
error: Optional[str]
|
59 |
-
answer_text: Optional[str]
|
60 |
-
|
61 |
-
|
62 |
-
# Initialize model with error handling
|
63 |
-
try:
|
64 |
-
model = LiteLLMModel(
|
65 |
-
api_base=API_BASE,
|
66 |
-
api_key=API_KEY,
|
67 |
-
model_id=MODEL_ID,
|
68 |
-
)
|
69 |
-
except Exception as e:
|
70 |
-
logger.error(f"Failed to initialize model: {str(e)}")
|
71 |
-
raise
|
72 |
-
|
73 |
-
web_agent = create_web_agent(model)
|
74 |
-
data_agent = create_data_analysis_agent(model)
|
75 |
-
media_agent = create_media_agent(model)
|
76 |
-
|
77 |
-
tools = [
|
78 |
-
# DuckDuckGoSearchTool(max_results=3),
|
79 |
-
# VisitWebpageTool(max_output_length=1000),
|
80 |
-
web_search,
|
81 |
-
perform_calculation,
|
82 |
-
]
|
83 |
-
|
84 |
-
# Initialize agent with error handling
|
85 |
-
try:
|
86 |
-
prompt_templates = yaml.safe_load(
|
87 |
-
importlib.resources.files("smolagents.prompts")
|
88 |
-
.joinpath("code_agent.yaml")
|
89 |
-
.read_text()
|
90 |
-
)
|
91 |
-
# prompt_templates["system_prompt"] = MANAGER_SYSTEM_PROMPT
|
92 |
-
|
93 |
-
agent = CodeAgent(
|
94 |
-
add_base_tools=True,
|
95 |
-
additional_authorized_imports=[
|
96 |
-
"json",
|
97 |
-
"pandas",
|
98 |
-
"numpy",
|
99 |
-
"re",
|
100 |
-
],
|
101 |
-
# max_steps=10,
|
102 |
-
managed_agents=[web_agent, data_agent, media_agent],
|
103 |
-
model=model,
|
104 |
-
prompt_templates=prompt_templates,
|
105 |
-
tools=tools,
|
106 |
-
step_callbacks=None,
|
107 |
-
verbosity_level=LogLevel.ERROR,
|
108 |
-
)
|
109 |
-
agent.logger.console.width = 66
|
110 |
-
|
111 |
-
agent.visualize()
|
112 |
-
|
113 |
-
tools = agent.tools
|
114 |
-
print(f"Tools: {tools}")
|
115 |
-
|
116 |
-
except Exception as e:
|
117 |
-
logger.error(f"Failed to initialize agent: {str(e)}")
|
118 |
-
raise
|
119 |
-
|
120 |
-
|
121 |
-
async def process_step(state: AgentState) -> AgentState:
|
122 |
-
"""Process a single step of the agent's execution."""
|
123 |
-
try:
|
124 |
-
# Clear previous step results before running agent.run
|
125 |
-
state["current_step"] = None
|
126 |
-
state["answer_text"] = None
|
127 |
-
state["error"] = None
|
128 |
-
|
129 |
-
steps = agent.run(
|
130 |
-
task=state["task"],
|
131 |
-
additional_args=None,
|
132 |
-
images=None,
|
133 |
-
# max_steps=1, # Process one step at a time
|
134 |
-
stream=True,
|
135 |
-
reset=False, # Maintain agent's internal state across process_step calls
|
136 |
-
)
|
137 |
-
|
138 |
-
for step in steps:
|
139 |
-
if isinstance(step, ActionStep):
|
140 |
-
# Convert ActionStep to serializable dict using the correct attributes
|
141 |
-
state["current_step"] = {
|
142 |
-
"step_number": step.step_number,
|
143 |
-
"model_output": step.model_output,
|
144 |
-
"observations": step.observations,
|
145 |
-
"tool_calls": [
|
146 |
-
{"name": tc.name, "arguments": tc.arguments}
|
147 |
-
for tc in (step.tool_calls or [])
|
148 |
-
],
|
149 |
-
"action_output": step.action_output,
|
150 |
-
}
|
151 |
-
logger.info(f"Processed action step {step.step_number}")
|
152 |
-
|
153 |
-
logger.info(f"Step {step.step_number} details: {step}")
|
154 |
-
logger.info(f"Sleeping for 60 seconds...")
|
155 |
-
time.sleep(60)
|
156 |
-
|
157 |
-
elif isinstance(step, FinalAnswerStep):
|
158 |
-
state["answer_text"] = step.final_answer
|
159 |
-
logger.info("Processed final answer")
|
160 |
-
logger.debug(f"Final answer details: {step}")
|
161 |
-
logger.info(f"Extracted answer text: {state['answer_text']}")
|
162 |
-
# Return immediately when we get a final answer
|
163 |
-
return state
|
164 |
-
# If loop finishes without FinalAnswerStep, return current state
|
165 |
-
return state
|
166 |
-
except Exception as e:
|
167 |
-
state["error"] = str(e)
|
168 |
-
logger.error(f"Error during agent execution step: {str(e)}")
|
169 |
-
return state
|
170 |
-
|
171 |
-
|
172 |
-
def should_continue(state: AgentState) -> bool:
|
173 |
-
"""Determine if the agent should continue processing steps."""
|
174 |
-
# Continue if we don't have an answer_text and no error
|
175 |
-
continue_execution = state.get("answer_text") is None and state.get("error") is None
|
176 |
-
logger.debug(
|
177 |
-
f"Checking should_continue: answer_text={state.get('answer_text') is not None}, error={state.get('error') is not None} -> Continue={continue_execution}"
|
178 |
-
)
|
179 |
-
return continue_execution
|
180 |
-
|
181 |
-
|
182 |
-
# Build the LangGraph graph once with persistence
|
183 |
-
memory = MemorySaver()
|
184 |
-
builder = StateGraph(AgentState)
|
185 |
-
builder.add_node("process_step", process_step)
|
186 |
-
builder.add_edge(START, "process_step")
|
187 |
-
builder.add_conditional_edges(
|
188 |
-
"process_step", should_continue, {True: "process_step", False: END}
|
189 |
-
)
|
190 |
-
graph = builder.compile(checkpointer=memory)
|
191 |
-
|
192 |
-
|
193 |
-
async def stream_execution(task: str, thread_id: str) -> AsyncIterator[AgentState]:
|
194 |
-
"""Stream the execution of the agent."""
|
195 |
-
if not task:
|
196 |
-
raise ValueError("Task cannot be empty")
|
197 |
-
|
198 |
-
logger.info(f"Initializing agent execution for task: {task}")
|
199 |
-
|
200 |
-
# Initialize the state
|
201 |
-
initial_state: AgentState = {
|
202 |
-
"task": task,
|
203 |
-
"current_step": None,
|
204 |
-
"error": None,
|
205 |
-
"answer_text": None,
|
206 |
-
}
|
207 |
-
|
208 |
-
# Pass thread_id via the config dict so the checkpointer can persist state
|
209 |
-
async for state in graph.astream(
|
210 |
-
initial_state, {"configurable": {"thread_id": thread_id}}
|
211 |
-
):
|
212 |
-
yield state
|
213 |
-
# Propagate error immediately if it occurs without an answer
|
214 |
-
if state.get("error") and not state.get("answer_text"):
|
215 |
-
logger.error(f"Propagating error from stream: {state['error']}")
|
216 |
-
raise Exception(state["error"])
|
217 |
-
|
218 |
-
|
219 |
-
async def run_with_streaming(task: str, thread_id: str) -> dict:
|
220 |
-
"""Run the agent with streaming output and return the results."""
|
221 |
-
last_state = None
|
222 |
-
steps = []
|
223 |
-
error = None
|
224 |
-
final_answer_text = None
|
225 |
-
|
226 |
-
try:
|
227 |
-
logger.info(f"Starting execution run for task: {task}")
|
228 |
-
async for state in stream_execution(task, thread_id):
|
229 |
-
last_state = state
|
230 |
-
|
231 |
-
if current_step := state.get("current_step"):
|
232 |
-
if not steps or steps[-1]["step_number"] != current_step["step_number"]:
|
233 |
-
steps.append(current_step)
|
234 |
-
# Keep print here for direct user feedback during streaming
|
235 |
-
print(f"\nStep {current_step['step_number']}:")
|
236 |
-
print(f"Model Output: {current_step['model_output']}")
|
237 |
-
print(f"Observations: {current_step['observations']}")
|
238 |
-
if current_step.get("tool_calls"):
|
239 |
-
print("Tool Calls:")
|
240 |
-
for tc in current_step["tool_calls"]:
|
241 |
-
print(f" - {tc['name']}: {tc['arguments']}")
|
242 |
-
if current_step.get("action_output"):
|
243 |
-
print(f"Action Output: {current_step['action_output']}")
|
244 |
-
|
245 |
-
# After the stream is finished, process the last state
|
246 |
-
logger.info("Stream finished.")
|
247 |
-
if last_state:
|
248 |
-
# LangGraph streams dicts where keys are node names, values are state dicts
|
249 |
-
node_name = list(last_state.keys())[0]
|
250 |
-
actual_state = last_state.get(node_name)
|
251 |
-
if actual_state:
|
252 |
-
final_answer_text = actual_state.get("answer_text")
|
253 |
-
error = actual_state.get("error")
|
254 |
-
logger.info(
|
255 |
-
f"Final answer text extracted from last state: {final_answer_text}"
|
256 |
-
)
|
257 |
-
logger.info(f"Error extracted from last state: {error}")
|
258 |
-
# Ensure steps list is consistent with the final state if needed
|
259 |
-
last_step_in_state = actual_state.get("current_step")
|
260 |
-
if last_step_in_state and (
|
261 |
-
not steps
|
262 |
-
or steps[-1]["step_number"] != last_step_in_state["step_number"]
|
263 |
-
):
|
264 |
-
logger.debug("Adding last step from final state to steps list.")
|
265 |
-
steps.append(last_step_in_state)
|
266 |
-
else:
|
267 |
-
logger.warning(
|
268 |
-
"Could not find actual state dictionary within last_state."
|
269 |
-
)
|
270 |
-
|
271 |
-
return {"steps": steps, "final_answer": final_answer_text, "error": error}
|
272 |
-
|
273 |
-
except Exception as e:
|
274 |
-
import traceback
|
275 |
-
|
276 |
-
logger.error(
|
277 |
-
f"Exception during run_with_streaming: {str(e)}\n{traceback.format_exc()}"
|
278 |
-
)
|
279 |
-
# Attempt to return based on the last known state even if exception occurred outside stream
|
280 |
-
final_answer_text = None
|
281 |
-
error_msg = str(e)
|
282 |
-
if last_state:
|
283 |
-
node_name = list(last_state.keys())[0]
|
284 |
-
actual_state = last_state.get(node_name)
|
285 |
-
if actual_state:
|
286 |
-
final_answer_text = actual_state.get("answer_text")
|
287 |
-
|
288 |
-
return {"steps": steps, "final_answer": final_answer_text, "error": error_msg}
|
289 |
-
|
290 |
-
|
291 |
-
def main(task: str, thread_id: str = str(uuid.uuid4())):
|
292 |
-
# Enhance the question with minimal instructions
|
293 |
-
enhanced_question = f"""
|
294 |
-
GAIA Question: {task}
|
295 |
-
|
296 |
-
Please solve this multi-step reasoning problem by:
|
297 |
-
1. Breaking it down into logical steps
|
298 |
-
2. Using specialized agents when needed
|
299 |
-
3. Providing the final answer in the exact format requested
|
300 |
-
"""
|
301 |
-
|
302 |
-
logger.info(
|
303 |
-
f"Starting agent run from __main__ for task: '{task}' with thread_id: {thread_id}"
|
304 |
-
)
|
305 |
-
result = asyncio.run(run_with_streaming(enhanced_question, thread_id))
|
306 |
-
logger.info("Agent run finished.")
|
307 |
-
|
308 |
-
logger.info(f"Result: {result}")
|
309 |
-
return extract_final_answer(result)
|
310 |
-
|
311 |
-
|
312 |
-
if __name__ == "__main__":
|
313 |
-
# Example Usage
|
314 |
-
task_to_run = "What is the capital of France?"
|
315 |
-
thread_id = str(uuid.uuid4()) # Generate a unique thread ID for this run
|
316 |
-
|
317 |
-
final_answer = main(task_to_run, thread_id)
|
318 |
-
print(f"Final Answer: {final_answer}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
main_v2.py
DELETED
@@ -1,155 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import os
|
3 |
-
|
4 |
-
import requests
|
5 |
-
import yaml
|
6 |
-
from dotenv import find_dotenv, load_dotenv
|
7 |
-
from litellm._logging import _disable_debugging
|
8 |
-
from openinference.instrumentation.smolagents import SmolagentsInstrumentor
|
9 |
-
from phoenix.otel import register
|
10 |
-
|
11 |
-
# from smolagents import CodeAgent, LiteLLMModel, LiteLLMRouterModel
|
12 |
-
from smolagents import CodeAgent, LiteLLMModel
|
13 |
-
from smolagents.monitoring import LogLevel
|
14 |
-
|
15 |
-
from model_factory import ModelFactory
|
16 |
-
from tools.smart_search.tool import SmartSearchTool
|
17 |
-
|
18 |
-
_disable_debugging()
|
19 |
-
|
20 |
-
# Configure OpenTelemetry with Phoenix
|
21 |
-
register()
|
22 |
-
SmolagentsInstrumentor().instrument()
|
23 |
-
|
24 |
-
logging.basicConfig(
|
25 |
-
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
26 |
-
)
|
27 |
-
logger = logging.getLogger(__name__)
|
28 |
-
|
29 |
-
# load_dotenv(find_dotenv())
|
30 |
-
|
31 |
-
# API_BASE = os.getenv("API_BASE")
|
32 |
-
# API_KEY = os.getenv("API_KEY")
|
33 |
-
# MODEL_ID = os.getenv("MODEL_ID")
|
34 |
-
|
35 |
-
# Create model using the factory
|
36 |
-
model = ModelFactory.create_model()
|
37 |
-
|
38 |
-
# data_agent = create_data_agent(model)
|
39 |
-
# media_agent = create_media_agent(model)
|
40 |
-
# web_agent = create_web_agent(model)
|
41 |
-
|
42 |
-
# search_agent = ToolCallingAgent(
|
43 |
-
# tools=[DuckDuckGoSearchTool(), VisitWebpageTool()],
|
44 |
-
# model=model,
|
45 |
-
# name="search_agent",
|
46 |
-
# description="This is an agent that can do web search.",
|
47 |
-
# )
|
48 |
-
|
49 |
-
prompt_templates = yaml.safe_load(open("prompts/code_agent_modified.yaml", "r"))
|
50 |
-
|
51 |
-
agent = CodeAgent(
|
52 |
-
# add_base_tools=True,
|
53 |
-
# additional_authorized_imports=[
|
54 |
-
# "json",
|
55 |
-
# "pandas",
|
56 |
-
# "numpy",
|
57 |
-
# "re",
|
58 |
-
# # "requests"
|
59 |
-
# # "urllib.request",
|
60 |
-
# ],
|
61 |
-
# max_steps=10,
|
62 |
-
# managed_agents=[web_agent, data_agent, media_agent],
|
63 |
-
# managed_agents=[search_agent],
|
64 |
-
model=model,
|
65 |
-
prompt_templates=prompt_templates,
|
66 |
-
tools=[
|
67 |
-
SmartSearchTool(),
|
68 |
-
# VisitWebpageTool(max_output_length=1024),
|
69 |
-
],
|
70 |
-
step_callbacks=None,
|
71 |
-
verbosity_level=LogLevel.ERROR,
|
72 |
-
)
|
73 |
-
|
74 |
-
agent.visualize()
|
75 |
-
|
76 |
-
|
77 |
-
def main(task: str):
|
78 |
-
# Format the task to request both succinct and verbose answers
|
79 |
-
formatted_task = f"""Please provide two answers to the following question:
|
80 |
-
|
81 |
-
1. A succinct answer that follows these rules:
|
82 |
-
- Contains ONLY the answer, nothing else
|
83 |
-
- Does not repeat the question
|
84 |
-
- Does not include explanations, reasoning, or context
|
85 |
-
- Does not include source attribution or references
|
86 |
-
- Does not use phrases like "The answer is" or "I found that"
|
87 |
-
- Does not include formatting, bullet points, or line breaks
|
88 |
-
- If the answer is a number, return only the number
|
89 |
-
- If the answer requires multiple items, separate them with commas
|
90 |
-
- If the answer requires ordering, maintain the specified order
|
91 |
-
- Uses the most direct and succinct form possible
|
92 |
-
|
93 |
-
2. A verbose answer that includes:
|
94 |
-
- The complete answer with all relevant details
|
95 |
-
- Explanations and reasoning
|
96 |
-
- Context and background information
|
97 |
-
- Source attribution where appropriate
|
98 |
-
|
99 |
-
Question: {task}
|
100 |
-
|
101 |
-
Please format your response as a JSON object with two keys:
|
102 |
-
- "succinct_answer": The concise answer following the rules above
|
103 |
-
- "verbose_answer": The detailed explanation with context"""
|
104 |
-
|
105 |
-
result = agent.run(
|
106 |
-
additional_args=None,
|
107 |
-
images=None,
|
108 |
-
max_steps=3,
|
109 |
-
reset=True,
|
110 |
-
stream=False,
|
111 |
-
task=formatted_task,
|
112 |
-
)
|
113 |
-
|
114 |
-
# Parse the result into a dictionary
|
115 |
-
try:
|
116 |
-
import json
|
117 |
-
|
118 |
-
# Find the JSON object in the response
|
119 |
-
json_str = result[result.find("{") : result.rfind("}") + 1]
|
120 |
-
parsed_result = json.loads(json_str)
|
121 |
-
except (ValueError, AttributeError) as e:
|
122 |
-
logger.error(f"Error parsing result: {e}")
|
123 |
-
# If parsing fails, return the raw result
|
124 |
-
return result
|
125 |
-
|
126 |
-
logger.info(f"Result: {parsed_result}")
|
127 |
-
return parsed_result["succinct_answer"]
|
128 |
-
|
129 |
-
|
130 |
-
if __name__ == "__main__":
|
131 |
-
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
132 |
-
|
133 |
-
api_url = DEFAULT_API_URL
|
134 |
-
questions_url = f"{api_url}/questions"
|
135 |
-
submit_url = f"{api_url}/submit"
|
136 |
-
|
137 |
-
response = requests.get(questions_url, timeout=15)
|
138 |
-
response.raise_for_status()
|
139 |
-
questions_data = response.json()
|
140 |
-
|
141 |
-
for question_data in questions_data[:1]:
|
142 |
-
file_name = question_data["file_name"]
|
143 |
-
level = question_data["Level"]
|
144 |
-
question = question_data["question"]
|
145 |
-
task_id = question_data["task_id"]
|
146 |
-
|
147 |
-
logger.info(f"Question: {question}")
|
148 |
-
# logger.info(f"Level: {level}")
|
149 |
-
if file_name:
|
150 |
-
logger.info(f"File Name: {file_name}")
|
151 |
-
# logger.info(f"Task ID: {task_id}")
|
152 |
-
|
153 |
-
final_answer = main(question)
|
154 |
-
logger.info(f"Final Answer: {final_answer}")
|
155 |
-
logger.info("--------------------------------")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_factory.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
from dotenv import find_dotenv, load_dotenv
|
4 |
-
from smolagents import LiteLLMModel, TransformersModel
|
5 |
-
|
6 |
-
|
7 |
-
class ModelFactory:
|
8 |
-
@staticmethod
|
9 |
-
def create_model():
|
10 |
-
"""
|
11 |
-
Creates and returns a LiteLLMModel instance configured with environment variables.
|
12 |
-
|
13 |
-
Returns:
|
14 |
-
LiteLLMModel: A configured instance of LiteLLMModel
|
15 |
-
"""
|
16 |
-
# Load environment variables
|
17 |
-
load_dotenv(find_dotenv())
|
18 |
-
|
19 |
-
# Get configuration from environment variables
|
20 |
-
api_base = os.getenv("API_BASE")
|
21 |
-
api_key = os.getenv("API_KEY")
|
22 |
-
model_id = os.getenv("MODEL_ID")
|
23 |
-
|
24 |
-
# Create and return the model
|
25 |
-
# return LiteLLMModel(
|
26 |
-
# api_base=api_base,
|
27 |
-
# api_key=api_key,
|
28 |
-
# model_id=model_id,
|
29 |
-
# )
|
30 |
-
|
31 |
-
return TransformersModel(
|
32 |
-
# max_new_tokens=5000,
|
33 |
-
max_new_tokens=256,
|
34 |
-
model_id="HuggingFaceTB/SmolLM2-135M-Instruct",
|
35 |
-
# model_id="HuggingFaceTB/SmolLM2-360M-Instruct",
|
36 |
-
# model_id="HuggingFaceTB/SmolLM2-1.7B-Instruct",
|
37 |
-
# model_id="HuggingFaceTB/SmolVLM2-256M-Video-Instruct",
|
38 |
-
# model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
39 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
notebooks/SmolVLM2_Video_FT.ipynb
DELETED
The diff for this file is too large to render.
See raw diff
|
|
notebooks/bonus-unit1.ipynb
DELETED
The diff for this file is too large to render.
See raw diff
|
|
notebooks/unsloth_SmolLM2-135M-Instruct-bnb-4bit_xingyaoww_code-act.ipynb
DELETED
The diff for this file is too large to render.
See raw diff
|
|
prompts/code_agent.yaml
DELETED
@@ -1,325 +0,0 @@
|
|
1 |
-
system_prompt: |-
|
2 |
-
You are an expert assistant who can solve any task using code blobs. You will be given a task to solve as best you can.
|
3 |
-
To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code.
|
4 |
-
To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences.
|
5 |
-
|
6 |
-
At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task and the tools that you want to use.
|
7 |
-
Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '<end_code>' sequence.
|
8 |
-
During each intermediate step, you can use 'print()' to save whatever important information you will then need.
|
9 |
-
These print outputs will then appear in the 'Observation:' field, which will be available as input for the next step.
|
10 |
-
In the end you have to return a final answer using the `final_answer` tool.
|
11 |
-
|
12 |
-
Here are a few examples using notional tools:
|
13 |
-
---
|
14 |
-
Task: "Generate an image of the oldest person in this document."
|
15 |
-
|
16 |
-
Thought: I will proceed step by step and use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer.
|
17 |
-
Code:
|
18 |
-
```py
|
19 |
-
answer = document_qa(document=document, question="Who is the oldest person mentioned?")
|
20 |
-
print(answer)
|
21 |
-
```<end_code>
|
22 |
-
Observation: "The oldest person in the document is John Doe, a 55 year old lumberjack living in Newfoundland."
|
23 |
-
|
24 |
-
Thought: I will now generate an image showcasing the oldest person.
|
25 |
-
Code:
|
26 |
-
```py
|
27 |
-
image = image_generator("A portrait of John Doe, a 55-year-old man living in Canada.")
|
28 |
-
final_answer(image)
|
29 |
-
```<end_code>
|
30 |
-
|
31 |
-
---
|
32 |
-
Task: "What is the result of the following operation: 5 + 3 + 1294.678?"
|
33 |
-
|
34 |
-
Thought: I will use python code to compute the result of the operation and then return the final answer using the `final_answer` tool
|
35 |
-
Code:
|
36 |
-
```py
|
37 |
-
result = 5 + 3 + 1294.678
|
38 |
-
final_answer(result)
|
39 |
-
```<end_code>
|
40 |
-
|
41 |
-
---
|
42 |
-
Task:
|
43 |
-
"Answer the question in the variable `question` about the image stored in the variable `image`. The question is in French.
|
44 |
-
You have been provided with these additional arguments, that you can access using the keys as variables in your python code:
|
45 |
-
{'question': 'Quel est l'animal sur l'image?', 'image': 'path/to/image.jpg'}"
|
46 |
-
|
47 |
-
Thought: I will use the following tools: `translator` to translate the question into English and then `image_qa` to answer the question on the input image.
|
48 |
-
Code:
|
49 |
-
```py
|
50 |
-
translated_question = translator(question=question, src_lang="French", tgt_lang="English")
|
51 |
-
print(f"The translated question is {translated_question}.")
|
52 |
-
answer = image_qa(image=image, question=translated_question)
|
53 |
-
final_answer(f"The answer is {answer}")
|
54 |
-
```<end_code>
|
55 |
-
|
56 |
-
---
|
57 |
-
Task:
|
58 |
-
In a 1979 interview, Stanislaus Ulam discusses with Martin Sherwin about other great physicists of his time, including Oppenheimer.
|
59 |
-
What does he say was the consequence of Einstein learning too much math on his creativity, in one word?
|
60 |
-
|
61 |
-
Thought: I need to find and read the 1979 interview of Stanislaus Ulam with Martin Sherwin.
|
62 |
-
Code:
|
63 |
-
```py
|
64 |
-
pages = search(query="1979 interview Stanislaus Ulam Martin Sherwin physicists Einstein")
|
65 |
-
print(pages)
|
66 |
-
```<end_code>
|
67 |
-
Observation:
|
68 |
-
No result found for query "1979 interview Stanislaus Ulam Martin Sherwin physicists Einstein".
|
69 |
-
|
70 |
-
Thought: The query was maybe too restrictive and did not find any results. Let's try again with a broader query.
|
71 |
-
Code:
|
72 |
-
```py
|
73 |
-
pages = search(query="1979 interview Stanislaus Ulam")
|
74 |
-
print(pages)
|
75 |
-
```<end_code>
|
76 |
-
Observation:
|
77 |
-
Found 6 pages:
|
78 |
-
[Stanislaus Ulam 1979 interview](https://ahf.nuclearmuseum.org/voices/oral-histories/stanislaus-ulams-interview-1979/)
|
79 |
-
|
80 |
-
[Ulam discusses Manhattan Project](https://ahf.nuclearmuseum.org/manhattan-project/ulam-manhattan-project/)
|
81 |
-
|
82 |
-
(truncated)
|
83 |
-
|
84 |
-
Thought: I will read the first 2 pages to know more.
|
85 |
-
Code:
|
86 |
-
```py
|
87 |
-
for url in ["https://ahf.nuclearmuseum.org/voices/oral-histories/stanislaus-ulams-interview-1979/", "https://ahf.nuclearmuseum.org/manhattan-project/ulam-manhattan-project/"]:
|
88 |
-
whole_page = visit_webpage(url)
|
89 |
-
print(whole_page)
|
90 |
-
print("\n" + "="*80 + "\n") # Print separator between pages
|
91 |
-
```<end_code>
|
92 |
-
Observation:
|
93 |
-
Manhattan Project Locations:
|
94 |
-
Los Alamos, NM
|
95 |
-
Stanislaus Ulam was a Polish-American mathematician. He worked on the Manhattan Project at Los Alamos and later helped design the hydrogen bomb. In this interview, he discusses his work at
|
96 |
-
(truncated)
|
97 |
-
|
98 |
-
Thought: I now have the final answer: from the webpages visited, Stanislaus Ulam says of Einstein: "He learned too much mathematics and sort of diminished, it seems to me personally, it seems to me his purely physics creativity." Let's answer in one word.
|
99 |
-
Code:
|
100 |
-
```py
|
101 |
-
final_answer("diminished")
|
102 |
-
```<end_code>
|
103 |
-
|
104 |
-
---
|
105 |
-
Task: "Which city has the highest population: Guangzhou or Shanghai?"
|
106 |
-
|
107 |
-
Thought: I need to get the populations for both cities and compare them: I will use the tool `search` to get the population of both cities.
|
108 |
-
Code:
|
109 |
-
```py
|
110 |
-
for city in ["Guangzhou", "Shanghai"]:
|
111 |
-
print(f"Population {city}:", search(f"{city} population")
|
112 |
-
```<end_code>
|
113 |
-
Observation:
|
114 |
-
Population Guangzhou: ['Guangzhou has a population of 15 million inhabitants as of 2021.']
|
115 |
-
Population Shanghai: '26 million (2019)'
|
116 |
-
|
117 |
-
Thought: Now I know that Shanghai has the highest population.
|
118 |
-
Code:
|
119 |
-
```py
|
120 |
-
final_answer("Shanghai")
|
121 |
-
```<end_code>
|
122 |
-
|
123 |
-
---
|
124 |
-
Task: "What is the current age of the pope, raised to the power 0.36?"
|
125 |
-
|
126 |
-
Thought: I will use the tool `wiki` to get the age of the pope, and confirm that with a web search.
|
127 |
-
Code:
|
128 |
-
```py
|
129 |
-
pope_age_wiki = wiki(query="current pope age")
|
130 |
-
print("Pope age as per wikipedia:", pope_age_wiki)
|
131 |
-
pope_age_search = web_search(query="current pope age")
|
132 |
-
print("Pope age as per google search:", pope_age_search)
|
133 |
-
```<end_code>
|
134 |
-
Observation:
|
135 |
-
Pope age: "The pope Francis is currently 88 years old."
|
136 |
-
|
137 |
-
Thought: I know that the pope is 88 years old. Let's compute the result using python code.
|
138 |
-
Code:
|
139 |
-
```py
|
140 |
-
pope_current_age = 88 ** 0.36
|
141 |
-
final_answer(pope_current_age)
|
142 |
-
```<end_code>
|
143 |
-
|
144 |
-
Above example were using notional tools that might not exist for you. On top of performing computations in the Python code snippets that you create, you only have access to these tools, behaving like regular python functions:
|
145 |
-
```python
|
146 |
-
{%- for tool in tools.values() %}
|
147 |
-
def {{ tool.name }}({% for arg_name, arg_info in tool.inputs.items() %}{{ arg_name }}: {{ arg_info.type }}{% if not loop.last %}, {% endif %}{% endfor %}) -> {{tool.output_type}}:
|
148 |
-
"""{{ tool.description }}
|
149 |
-
|
150 |
-
Args:
|
151 |
-
{%- for arg_name, arg_info in tool.inputs.items() %}
|
152 |
-
{{ arg_name }}: {{ arg_info.description }}
|
153 |
-
{%- endfor %}
|
154 |
-
"""
|
155 |
-
{% endfor %}
|
156 |
-
```
|
157 |
-
|
158 |
-
{%- if managed_agents and managed_agents.values() | list %}
|
159 |
-
You can also give tasks to team members.
|
160 |
-
Calling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'task'.
|
161 |
-
Given that this team member is a real human, you should be very verbose in your task, it should be a long string providing informations as detailed as necessary.
|
162 |
-
Here is a list of the team members that you can call:
|
163 |
-
```python
|
164 |
-
{%- for agent in managed_agents.values() %}
|
165 |
-
def {{ agent.name }}("Your query goes here.") -> str:
|
166 |
-
"""{{ agent.description }}"""
|
167 |
-
{% endfor %}
|
168 |
-
```
|
169 |
-
{%- endif %}
|
170 |
-
|
171 |
-
Here are the rules you should always follow to solve your task:
|
172 |
-
1. Always provide a 'Thought:' sequence, and a 'Code:\n```py' sequence ending with '```<end_code>' sequence, else you will fail.
|
173 |
-
2. Use only variables that you have defined!
|
174 |
-
3. Always use the right arguments for the tools. DO NOT pass the arguments as a dict as in 'answer = wiki({'query': "What is the place where James Bond lives?"})', but use the arguments directly as in 'answer = wiki(query="What is the place where James Bond lives?")'.
|
175 |
-
4. Take care to not chain too many sequential tool calls in the same code block, especially when the output format is unpredictable. For instance, a call to search has an unpredictable return format, so do not have another tool call that depends on its output in the same block: rather output results with print() to use them in the next block.
|
176 |
-
5. Call a tool only when needed, and never re-do a tool call that you previously did with the exact same parameters.
|
177 |
-
6. Don't name any new variable with the same name as a tool: for instance don't name a variable 'final_answer'.
|
178 |
-
7. Never create any notional variables in our code, as having these in your logs will derail you from the true variables.
|
179 |
-
8. You can use imports in your code, but only from the following list of modules: {{authorized_imports}}
|
180 |
-
9. The state persists between code executions: so if in one step you've created variables or imported modules, these will all persist.
|
181 |
-
10. Don't give up! You're in charge of solving the task, not providing directions to solve it.
|
182 |
-
|
183 |
-
Now Begin!
|
184 |
-
planning:
|
185 |
-
initial_plan : |-
|
186 |
-
You are a world expert at analyzing a situation to derive facts, and plan accordingly towards solving a task.
|
187 |
-
Below I will present you a task. You will need to 1. build a survey of facts known or needed to solve the task, then 2. make a plan of action to solve the task.
|
188 |
-
|
189 |
-
## 1. Facts survey
|
190 |
-
You will build a comprehensive preparatory survey of which facts we have at our disposal and which ones we still need.
|
191 |
-
These "facts" will typically be specific names, dates, values, etc. Your answer should use the below headings:
|
192 |
-
### 1.1. Facts given in the task
|
193 |
-
List here the specific facts given in the task that could help you (there might be nothing here).
|
194 |
-
|
195 |
-
### 1.2. Facts to look up
|
196 |
-
List here any facts that we may need to look up.
|
197 |
-
Also list where to find each of these, for instance a website, a file... - maybe the task contains some sources that you should re-use here.
|
198 |
-
|
199 |
-
### 1.3. Facts to derive
|
200 |
-
List here anything that we want to derive from the above by logical reasoning, for instance computation or simulation.
|
201 |
-
|
202 |
-
Don't make any assumptions. For each item, provide a thorough reasoning. Do not add anything else on top of three headings above.
|
203 |
-
|
204 |
-
## 2. Plan
|
205 |
-
Then for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts.
|
206 |
-
This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer.
|
207 |
-
Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS.
|
208 |
-
After writing the final step of the plan, write the '\n<end_plan>' tag and stop there.
|
209 |
-
|
210 |
-
You can leverage these tools, behaving like regular python functions:
|
211 |
-
```python
|
212 |
-
{%- for tool in tools.values() %}
|
213 |
-
def {{ tool.name }}({% for arg_name, arg_info in tool.inputs.items() %}{{ arg_name }}: {{ arg_info.type }}{% if not loop.last %}, {% endif %}{% endfor %}) -> {{tool.output_type}}:
|
214 |
-
"""{{ tool.description }}
|
215 |
-
|
216 |
-
Args:
|
217 |
-
{%- for arg_name, arg_info in tool.inputs.items() %}
|
218 |
-
{{ arg_name }}: {{ arg_info.description }}
|
219 |
-
{%- endfor %}
|
220 |
-
"""
|
221 |
-
{% endfor %}
|
222 |
-
```
|
223 |
-
|
224 |
-
{%- if managed_agents and managed_agents.values() | list %}
|
225 |
-
You can also give tasks to team members.
|
226 |
-
Calling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'task'.
|
227 |
-
Given that this team member is a real human, you should be very verbose in your task, it should be a long string providing informations as detailed as necessary.
|
228 |
-
Here is a list of the team members that you can call:
|
229 |
-
```python
|
230 |
-
{%- for agent in managed_agents.values() %}
|
231 |
-
def {{ agent.name }}("Your query goes here.") -> str:
|
232 |
-
"""{{ agent.description }}"""
|
233 |
-
{% endfor %}
|
234 |
-
```
|
235 |
-
{%- endif %}
|
236 |
-
|
237 |
-
---
|
238 |
-
Now begin! Here is your task:
|
239 |
-
```
|
240 |
-
{{task}}
|
241 |
-
```
|
242 |
-
First in part 1, write the facts survey, then in part 2, write your plan.
|
243 |
-
update_plan_pre_messages: |-
|
244 |
-
You are a world expert at analyzing a situation, and plan accordingly towards solving a task.
|
245 |
-
You have been given the following task:
|
246 |
-
```
|
247 |
-
{{task}}
|
248 |
-
```
|
249 |
-
|
250 |
-
Below you will find a history of attempts made to solve this task.
|
251 |
-
You will first have to produce a survey of known and unknown facts, then propose a step-by-step high-level plan to solve the task.
|
252 |
-
If the previous tries so far have met some success, your updated plan can build on these results.
|
253 |
-
If you are stalled, you can make a completely new plan starting from scratch.
|
254 |
-
|
255 |
-
Find the task and history below:
|
256 |
-
update_plan_post_messages: |-
|
257 |
-
Now write your updated facts below, taking into account the above history:
|
258 |
-
## 1. Updated facts survey
|
259 |
-
### 1.1. Facts given in the task
|
260 |
-
### 1.2. Facts that we have learned
|
261 |
-
### 1.3. Facts still to look up
|
262 |
-
### 1.4. Facts still to derive
|
263 |
-
|
264 |
-
Then write a step-by-step high-level plan to solve the task above.
|
265 |
-
## 2. Plan
|
266 |
-
### 2. 1. ...
|
267 |
-
Etc.
|
268 |
-
This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer.
|
269 |
-
Beware that you have {remaining_steps} steps remaining.
|
270 |
-
Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS.
|
271 |
-
After writing the final step of the plan, write the '\n<end_plan>' tag and stop there.
|
272 |
-
|
273 |
-
You can leverage these tools, behaving like regular python functions:
|
274 |
-
```python
|
275 |
-
{%- for tool in tools.values() %}
|
276 |
-
def {{ tool.name }}({% for arg_name, arg_info in tool.inputs.items() %}{{ arg_name }}: {{ arg_info.type }}{% if not loop.last %}, {% endif %}{% endfor %}) -> {{tool.output_type}}:
|
277 |
-
"""{{ tool.description }}
|
278 |
-
|
279 |
-
Args:
|
280 |
-
{%- for arg_name, arg_info in tool.inputs.items() %}
|
281 |
-
{{ arg_name }}: {{ arg_info.description }}
|
282 |
-
{%- endfor %}"""
|
283 |
-
{% endfor %}
|
284 |
-
```
|
285 |
-
|
286 |
-
{%- if managed_agents and managed_agents.values() | list %}
|
287 |
-
You can also give tasks to team members.
|
288 |
-
Calling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'task'.
|
289 |
-
Given that this team member is a real human, you should be very verbose in your task, it should be a long string providing informations as detailed as necessary.
|
290 |
-
Here is a list of the team members that you can call:
|
291 |
-
```python
|
292 |
-
{%- for agent in managed_agents.values() %}
|
293 |
-
def {{ agent.name }}("Your query goes here.") -> str:
|
294 |
-
"""{{ agent.description }}"""
|
295 |
-
{% endfor %}
|
296 |
-
```
|
297 |
-
{%- endif %}
|
298 |
-
|
299 |
-
Now write your updated facts survey below, then your new plan.
|
300 |
-
managed_agent:
|
301 |
-
task: |-
|
302 |
-
You're a helpful agent named '{{name}}'.
|
303 |
-
You have been submitted this task by your manager.
|
304 |
-
---
|
305 |
-
Task:
|
306 |
-
{{task}}
|
307 |
-
---
|
308 |
-
You're helping your manager solve a wider task: so make sure to not provide a one-line answer, but give as much information as possible to give them a clear understanding of the answer.
|
309 |
-
|
310 |
-
Your final_answer WILL HAVE to contain these parts:
|
311 |
-
### 1. Task outcome (short version):
|
312 |
-
### 2. Task outcome (extremely detailed version):
|
313 |
-
### 3. Additional context (if relevant):
|
314 |
-
|
315 |
-
Put all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost.
|
316 |
-
And even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback.
|
317 |
-
report: |-
|
318 |
-
Here is the final answer from your managed agent '{{name}}':
|
319 |
-
{{final_answer}}
|
320 |
-
final_answer:
|
321 |
-
pre_messages: |-
|
322 |
-
An agent tried to answer a user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:
|
323 |
-
post_messages: |-
|
324 |
-
Based on the above, please provide an answer to the following user task:
|
325 |
-
{{task}}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompts/code_agent_modified.yaml
DELETED
@@ -1,281 +0,0 @@
|
|
1 |
-
system_prompt: |-
|
2 |
-
You are an expert assistant who can solve any task using code blobs. You will be given a task to solve as best you can.
|
3 |
-
To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code.
|
4 |
-
To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences.
|
5 |
-
|
6 |
-
At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task and the tools that you want to use.
|
7 |
-
Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '<end_code>' sequence.
|
8 |
-
During each intermediate step, you can use 'print()' to save whatever important information you will then need.
|
9 |
-
These print outputs will then appear in the 'Observation:' field, which will be available as input for the next step.
|
10 |
-
In the end you have to return a final answer using the `final_answer` tool.
|
11 |
-
|
12 |
-
Here are a few examples using the available tools:
|
13 |
-
---
|
14 |
-
Task: "What is the current age of the pope?"
|
15 |
-
|
16 |
-
Thought: I will use the smart_search tool to find information about the pope's age. This tool will automatically check both web search and Wikipedia if available.
|
17 |
-
Code:
|
18 |
-
```py
|
19 |
-
result = smart_search(query="current pope age")
|
20 |
-
print("Search result:", result)
|
21 |
-
```<end_code>
|
22 |
-
Observation:
|
23 |
-
Wikipedia result:
|
24 |
-
Pope Francis is currently 88 years old.
|
25 |
-
|
26 |
-
Web search result:
|
27 |
-
Pope Francis, born 17 December 1936, is currently 88 years old.
|
28 |
-
|
29 |
-
Thought: I can now provide the final answer based on the reliable information found.
|
30 |
-
Code:
|
31 |
-
```py
|
32 |
-
final_answer("88 years old")
|
33 |
-
```<end_code>
|
34 |
-
|
35 |
-
---
|
36 |
-
Task: "Which city has the highest population: Guangzhou or Shanghai?"
|
37 |
-
|
38 |
-
Thought: I will use smart_search to find the population information for both cities.
|
39 |
-
Code:
|
40 |
-
```py
|
41 |
-
for city in ["Guangzhou", "Shanghai"]:
|
42 |
-
print(f"Population {city}:", smart_search(f"{city} population"))
|
43 |
-
```<end_code>
|
44 |
-
Observation:
|
45 |
-
Population Guangzhou:
|
46 |
-
Web search result:
|
47 |
-
Guangzhou has a population of 15 million inhabitants as of 2021.
|
48 |
-
|
49 |
-
Population Shanghai:
|
50 |
-
Web search result:
|
51 |
-
Shanghai has a population of 26 million as of 2019.
|
52 |
-
|
53 |
-
Thought: Now I know that Shanghai has the highest population.
|
54 |
-
Code:
|
55 |
-
```py
|
56 |
-
final_answer("Shanghai")
|
57 |
-
```<end_code>
|
58 |
-
|
59 |
-
---
|
60 |
-
Task: "What is the capital of France?"
|
61 |
-
|
62 |
-
Thought: I will use smart_search to find information about France's capital. This tool will automatically check both web search and Wikipedia if available.
|
63 |
-
Code:
|
64 |
-
```py
|
65 |
-
result = smart_search(query="capital of France")
|
66 |
-
print("Search result:", result)
|
67 |
-
```<end_code>
|
68 |
-
Observation:
|
69 |
-
Wikipedia result:
|
70 |
-
Paris is the capital and most populous city of France.
|
71 |
-
|
72 |
-
Web search result:
|
73 |
-
Paris is the capital city of France, located in the north-central part of the country.
|
74 |
-
|
75 |
-
Thought: I can now provide the final answer based on the reliable information found.
|
76 |
-
Code:
|
77 |
-
```py
|
78 |
-
final_answer("Paris")
|
79 |
-
```<end_code>
|
80 |
-
|
81 |
-
---
|
82 |
-
Task: "What is the average lifespan of a domestic cat?"
|
83 |
-
|
84 |
-
Thought: I will use smart_search to find information about cat lifespans.
|
85 |
-
Code:
|
86 |
-
```py
|
87 |
-
result = smart_search(query="average lifespan of domestic cat")
|
88 |
-
print("Search result:", result)
|
89 |
-
```<end_code>
|
90 |
-
Observation:
|
91 |
-
Web search result:
|
92 |
-
The average lifespan of a domestic cat is 12-15 years, though some can live into their 20s with proper care.
|
93 |
-
|
94 |
-
Thought: I can now provide the final answer based on the search results.
|
95 |
-
Code:
|
96 |
-
```py
|
97 |
-
final_answer("12-15 years")
|
98 |
-
```<end_code>
|
99 |
-
|
100 |
-
You have access to these tools, behaving like regular python functions:
|
101 |
-
```python
|
102 |
-
{%- for tool in tools.values() %}
|
103 |
-
def {{ tool.name }}({% for arg_name, arg_info in tool.inputs.items() %}{{ arg_name }}: {{ arg_info.type }}{% if not loop.last %}, {% endif %}{% endfor %}) -> {{tool.output_type}}:
|
104 |
-
"""{{ tool.description }}
|
105 |
-
|
106 |
-
Args:
|
107 |
-
{%- for arg_name, arg_info in tool.inputs.items() %}
|
108 |
-
{{ arg_name }}: {{ arg_info.description }}
|
109 |
-
{%- endfor %}
|
110 |
-
"""
|
111 |
-
{% endfor %}
|
112 |
-
```
|
113 |
-
|
114 |
-
{%- if managed_agents and managed_agents.values() | list %}
|
115 |
-
You can also give tasks to team members.
|
116 |
-
Calling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'task'.
|
117 |
-
Given that this team member is a real human, you should be very verbose in your task, it should be a long string providing informations as detailed as necessary.
|
118 |
-
Here is a list of the team members that you can call:
|
119 |
-
```python
|
120 |
-
{%- for agent in managed_agents.values() %}
|
121 |
-
def {{ agent.name }}("Your query goes here.") -> str:
|
122 |
-
"""{{ agent.description }}"""
|
123 |
-
{% endfor %}
|
124 |
-
```
|
125 |
-
{%- endif %}
|
126 |
-
|
127 |
-
Here are the rules you should always follow to solve your task:
|
128 |
-
1. Always provide a 'Thought:' sequence, and a 'Code:\n```py' sequence ending with '```<end_code>' sequence, else you will fail.
|
129 |
-
2. Use only variables that you have defined!
|
130 |
-
3. Always use the right arguments for the tools. DO NOT pass the arguments as a dict as in 'answer = wiki({'query': "What is the place where James Bond lives?"})', but use the arguments directly as in 'answer = wiki(query="What is the place where James Bond lives?")'.
|
131 |
-
4. Take care to not chain too many sequential tool calls in the same code block, especially when the output format is unpredictable. For instance, a call to search has an unpredictable return format, so do not have another tool call that depends on its output in the same block: rather output results with print() to use them in the next block.
|
132 |
-
5. Call a tool only when needed, and never re-do a tool call that you previously did with the exact same parameters.
|
133 |
-
6. Don't name any new variable with the same name as a tool: for instance don't name a variable 'final_answer'.
|
134 |
-
7. Never create any notional variables in our code, as having these in your logs will derail you from the true variables.
|
135 |
-
8. You can use imports in your code, but only from the following list of modules: {{authorized_imports}}
|
136 |
-
9. The state persists between code executions: so if in one step you've created variables or imported modules, these will all persist.
|
137 |
-
10. Don't give up! You're in charge of solving the task, not providing directions to solve it.
|
138 |
-
|
139 |
-
Now Begin!
|
140 |
-
planning:
|
141 |
-
initial_plan : |-
|
142 |
-
You are a world expert at analyzing a situation to derive facts, and plan accordingly towards solving a task.
|
143 |
-
Below I will present you a task. You will need to 1. build a survey of facts known or needed to solve the task, then 2. make a plan of action to solve the task.
|
144 |
-
|
145 |
-
## 1. Facts survey
|
146 |
-
You will build a comprehensive preparatory survey of which facts we have at our disposal and which ones we still need.
|
147 |
-
These "facts" will typically be specific names, dates, values, etc. Your answer should use the below headings:
|
148 |
-
### 1.1. Facts given in the task
|
149 |
-
List here the specific facts given in the task that could help you (there might be nothing here).
|
150 |
-
|
151 |
-
### 1.2. Facts to look up
|
152 |
-
List here any facts that we may need to look up.
|
153 |
-
Also list where to find each of these, for instance a website, a file... - maybe the task contains some sources that you should re-use here.
|
154 |
-
|
155 |
-
### 1.3. Facts to derive
|
156 |
-
List here anything that we want to derive from the above by logical reasoning, for instance computation or simulation.
|
157 |
-
|
158 |
-
Don't make any assumptions. For each item, provide a thorough reasoning. Do not add anything else on top of three headings above.
|
159 |
-
|
160 |
-
## 2. Plan
|
161 |
-
Then for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts.
|
162 |
-
This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer.
|
163 |
-
Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS.
|
164 |
-
After writing the final step of the plan, write the '\n<end_plan>' tag and stop there.
|
165 |
-
|
166 |
-
You can leverage these tools, behaving like regular python functions:
|
167 |
-
```python
|
168 |
-
{%- for tool in tools.values() %}
|
169 |
-
def {{ tool.name }}({% for arg_name, arg_info in tool.inputs.items() %}{{ arg_name }}: {{ arg_info.type }}{% if not loop.last %}, {% endif %}{% endfor %}) -> {{tool.output_type}}:
|
170 |
-
"""{{ tool.description }}
|
171 |
-
|
172 |
-
Args:
|
173 |
-
{%- for arg_name, arg_info in tool.inputs.items() %}
|
174 |
-
{{ arg_name }}: {{ arg_info.description }}
|
175 |
-
{%- endfor %}
|
176 |
-
"""
|
177 |
-
{% endfor %}
|
178 |
-
```
|
179 |
-
|
180 |
-
{%- if managed_agents and managed_agents.values() | list %}
|
181 |
-
You can also give tasks to team members.
|
182 |
-
Calling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'task'.
|
183 |
-
Given that this team member is a real human, you should be very verbose in your task, it should be a long string providing informations as detailed as necessary.
|
184 |
-
Here is a list of the team members that you can call:
|
185 |
-
```python
|
186 |
-
{%- for agent in managed_agents.values() %}
|
187 |
-
def {{ agent.name }}("Your query goes here.") -> str:
|
188 |
-
"""{{ agent.description }}"""
|
189 |
-
{% endfor %}
|
190 |
-
```
|
191 |
-
{%- endif %}
|
192 |
-
|
193 |
-
---
|
194 |
-
Now begin! Here is your task:
|
195 |
-
```
|
196 |
-
{{task}}
|
197 |
-
```
|
198 |
-
First in part 1, write the facts survey, then in part 2, write your plan.
|
199 |
-
update_plan_pre_messages: |-
|
200 |
-
You are a world expert at analyzing a situation, and plan accordingly towards solving a task.
|
201 |
-
You have been given the following task:
|
202 |
-
```
|
203 |
-
{{task}}
|
204 |
-
```
|
205 |
-
|
206 |
-
Below you will find a history of attempts made to solve this task.
|
207 |
-
You will first have to produce a survey of known and unknown facts, then propose a step-by-step high-level plan to solve the task.
|
208 |
-
If the previous tries so far have met some success, your updated plan can build on these results.
|
209 |
-
If you are stalled, you can make a completely new plan starting from scratch.
|
210 |
-
|
211 |
-
Find the task and history below:
|
212 |
-
update_plan_post_messages: |-
|
213 |
-
Now write your updated facts below, taking into account the above history:
|
214 |
-
## 1. Updated facts survey
|
215 |
-
### 1.1. Facts given in the task
|
216 |
-
### 1.2. Facts that we have learned
|
217 |
-
### 1.3. Facts still to look up
|
218 |
-
### 1.4. Facts still to derive
|
219 |
-
|
220 |
-
Then write a step-by-step high-level plan to solve the task above.
|
221 |
-
## 2. Plan
|
222 |
-
### 2. 1. ...
|
223 |
-
Etc.
|
224 |
-
This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer.
|
225 |
-
Beware that you have {remaining_steps} steps remaining.
|
226 |
-
Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS.
|
227 |
-
After writing the final step of the plan, write the '\n<end_plan>' tag and stop there.
|
228 |
-
|
229 |
-
You can leverage these tools, behaving like regular python functions:
|
230 |
-
```python
|
231 |
-
{%- for tool in tools.values() %}
|
232 |
-
def {{ tool.name }}({% for arg_name, arg_info in tool.inputs.items() %}{{ arg_name }}: {{ arg_info.type }}{% if not loop.last %}, {% endif %}{% endfor %}) -> {{tool.output_type}}:
|
233 |
-
"""{{ tool.description }}
|
234 |
-
|
235 |
-
Args:
|
236 |
-
{%- for arg_name, arg_info in tool.inputs.items() %}
|
237 |
-
{{ arg_name }}: {{ arg_info.description }}
|
238 |
-
{%- endfor %}"""
|
239 |
-
{% endfor %}
|
240 |
-
```
|
241 |
-
|
242 |
-
{%- if managed_agents and managed_agents.values() | list %}
|
243 |
-
You can also give tasks to team members.
|
244 |
-
Calling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'task'.
|
245 |
-
Given that this team member is a real human, you should be very verbose in your task, it should be a long string providing informations as detailed as necessary.
|
246 |
-
Here is a list of the team members that you can call:
|
247 |
-
```python
|
248 |
-
{%- for agent in managed_agents.values() %}
|
249 |
-
def {{ agent.name }}("Your query goes here.") -> str:
|
250 |
-
"""{{ agent.description }}"""
|
251 |
-
{% endfor %}
|
252 |
-
```
|
253 |
-
{%- endif %}
|
254 |
-
|
255 |
-
Now write your updated facts survey below, then your new plan.
|
256 |
-
managed_agent:
|
257 |
-
task: |-
|
258 |
-
You're a helpful agent named '{{name}}'.
|
259 |
-
You have been submitted this task by your manager.
|
260 |
-
---
|
261 |
-
Task:
|
262 |
-
{{task}}
|
263 |
-
---
|
264 |
-
You're helping your manager solve a wider task: so make sure to not provide a one-line answer, but give as much information as possible to give them a clear understanding of the answer.
|
265 |
-
|
266 |
-
Your final_answer WILL HAVE to contain these parts:
|
267 |
-
### 1. Task outcome (short version):
|
268 |
-
### 2. Task outcome (extremely detailed version):
|
269 |
-
### 3. Additional context (if relevant):
|
270 |
-
|
271 |
-
Put all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost.
|
272 |
-
And even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback.
|
273 |
-
report: |-
|
274 |
-
Here is the final answer from your managed agent '{{name}}':
|
275 |
-
{{final_answer}}
|
276 |
-
final_answer:
|
277 |
-
pre_messages: |-
|
278 |
-
An agent tried to answer a user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:
|
279 |
-
post_messages: |-
|
280 |
-
Based on the above, please provide an answer to the following user task:
|
281 |
-
{{task}}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1,38 +1,2 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
bitsandbytes>=0.45.5
|
4 |
-
duckduckgo-search>=8.0.1
|
5 |
-
gradio[oauth]>=5.26.0
|
6 |
-
hf-xet>=1.0.5
|
7 |
-
hydra-core>=1.3.2
|
8 |
-
ipywidgets>=8.1.6
|
9 |
-
isort>=6.0.1
|
10 |
-
jupyter>=1.1.1
|
11 |
-
kagglehub>=0.3.12
|
12 |
-
langchain>=0.1.0
|
13 |
-
langchain-community>=0.0.10
|
14 |
-
langgraph>=0.3.34
|
15 |
-
litellm>=1.10.0
|
16 |
-
llama-index>=0.12.33
|
17 |
-
llama-index-embeddings-huggingface>=0.5.3
|
18 |
-
llama-index-readers-wikipedia>=0.3.0
|
19 |
-
markdown>=3.8
|
20 |
-
mlcroissant>=1.0.17
|
21 |
-
num2words>=0.5.14
|
22 |
-
numpy>=2.2.5
|
23 |
-
pandas>=2.0.0
|
24 |
-
peft>=0.15.2
|
25 |
-
pytest>=8.3.5
|
26 |
-
pytest-cov>=6.1.1
|
27 |
-
python-dotenv>=1.0.0
|
28 |
-
requests>=2.32.3
|
29 |
-
sentence-transformers>=4.1.0
|
30 |
-
smolagents[litellm,telemetry,vllm]>=1.14.0
|
31 |
-
sse-starlette>=2.3.4
|
32 |
-
tensorboardX>=2.6.2.2
|
33 |
-
trl>=0.17.0
|
34 |
-
typing-extensions>=4.5.0
|
35 |
-
unsloth>=2025.4.5
|
36 |
-
wandb>=0.19.10
|
37 |
-
wikipedia>=1.4.0
|
38 |
-
wikipedia-api>=0.8.1
|
|
|
1 |
+
gradio
|
2 |
+
requests
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
serve.py
DELETED
@@ -1,635 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
import logging
|
3 |
-
import os
|
4 |
-
import time
|
5 |
-
from pprint import pprint
|
6 |
-
from threading import Thread
|
7 |
-
from typing import Any, Dict, List
|
8 |
-
|
9 |
-
# isort: off
|
10 |
-
from unsloth import (
|
11 |
-
FastLanguageModel,
|
12 |
-
FastModel,
|
13 |
-
FastVisionModel,
|
14 |
-
is_bfloat16_supported,
|
15 |
-
) # noqa: E402
|
16 |
-
from unsloth.chat_templates import get_chat_template # noqa: E402
|
17 |
-
|
18 |
-
# isort: on
|
19 |
-
|
20 |
-
import asyncio
|
21 |
-
import json
|
22 |
-
import threading
|
23 |
-
import uuid
|
24 |
-
from datetime import datetime
|
25 |
-
from typing import Dict, List, Optional
|
26 |
-
|
27 |
-
from datasets import (
|
28 |
-
Dataset,
|
29 |
-
DatasetDict,
|
30 |
-
IterableDataset,
|
31 |
-
IterableDatasetDict,
|
32 |
-
load_dataset,
|
33 |
-
)
|
34 |
-
from fastapi import FastAPI, HTTPException, Request
|
35 |
-
from openai.types.chat.chat_completion import ChatCompletion
|
36 |
-
from openai.types.chat.chat_completion import Choice as ChatCompletionChoice
|
37 |
-
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
|
38 |
-
from openai.types.chat.chat_completion_chunk import Choice as ChatCompletionChunkChoice
|
39 |
-
from openai.types.chat.chat_completion_chunk import ChoiceDelta
|
40 |
-
from openai.types.chat.chat_completion_message import ChatCompletionMessage
|
41 |
-
from openai.types.chat.completion_create_params import CompletionCreateParams
|
42 |
-
from openai.types.fine_tuning import FineTuningJob
|
43 |
-
from peft import PeftModel
|
44 |
-
from pydantic import TypeAdapter
|
45 |
-
from ray import serve
|
46 |
-
from smolagents import CodeAgent, LiteLLMModel, Model, TransformersModel, VLLMModel
|
47 |
-
from smolagents.monitoring import LogLevel
|
48 |
-
from sse_starlette import EventSourceResponse
|
49 |
-
from starlette.responses import JSONResponse
|
50 |
-
from transformers import (
|
51 |
-
AutoModelForCausalLM,
|
52 |
-
AutoTokenizer,
|
53 |
-
DataCollatorForLanguageModeling,
|
54 |
-
Trainer,
|
55 |
-
TrainingArguments,
|
56 |
-
)
|
57 |
-
from transformers.generation.streamers import AsyncTextIteratorStreamer
|
58 |
-
from transformers.image_utils import load_image
|
59 |
-
from trl import SFTTrainer
|
60 |
-
|
61 |
-
dtype = (
|
62 |
-
None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
63 |
-
)
|
64 |
-
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
65 |
-
max_seq_length = 2048 # Supports RoPE Scaling interally, so choose any!
|
66 |
-
# max_seq_length = 4096 # Choose any! We auto support RoPE Scaling internally!
|
67 |
-
|
68 |
-
|
69 |
-
logger = logging.getLogger("ray.serve")
|
70 |
-
|
71 |
-
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
72 |
-
|
73 |
-
app = FastAPI()
|
74 |
-
|
75 |
-
# middlewares = [
|
76 |
-
# middleware
|
77 |
-
# for middleware in ConnexionMiddleware.default_middlewares
|
78 |
-
# if middleware is not SecurityMiddleware
|
79 |
-
# ]
|
80 |
-
|
81 |
-
# connexion_app = AsyncApp(import_name=__name__, middlewares=middlewares)
|
82 |
-
|
83 |
-
# connexion_app.add_api(
|
84 |
-
# # "api/openai/v1/openapi/openapi.yaml",
|
85 |
-
# "api/v1/openapi/openapi.yaml",
|
86 |
-
# # base_path="/openai/v1",
|
87 |
-
# base_path="/v1",
|
88 |
-
# pythonic_params=True,
|
89 |
-
# resolver_error=501,
|
90 |
-
# )
|
91 |
-
|
92 |
-
# # fastapi_app.mount("/api", ConnexionMiddleware(app=connexion_app, import_name=__name__))
|
93 |
-
# # app.mount("/api", ConnexionMiddleware(app=connexion_app, import_name=__name__))
|
94 |
-
# app.mount(
|
95 |
-
# "/",
|
96 |
-
# ConnexionMiddleware(
|
97 |
-
# app=connexion_app,
|
98 |
-
# import_name=__name__,
|
99 |
-
# # middlewares=middlewares,
|
100 |
-
# ),
|
101 |
-
# )
|
102 |
-
|
103 |
-
|
104 |
-
@serve.deployment(
|
105 |
-
autoscaling_config={
|
106 |
-
# https://docs.ray.io/en/latest/serve/advanced-guides/advanced-autoscaling.html#required-define-upper-and-lower-autoscaling-limits
|
107 |
-
"max_replicas": 1,
|
108 |
-
"min_replicas": 1, # TOOD: set to 0
|
109 |
-
"target_ongoing_requests": 2, # https://docs.ray.io/en/latest/serve/advanced-guides/advanced-autoscaling.html#target-ongoing-requests-default-2
|
110 |
-
},
|
111 |
-
max_ongoing_requests=5, # https://docs.ray.io/en/latest/serve/advanced-guides/advanced-autoscaling.html#max-ongoing-requests-default-5
|
112 |
-
ray_actor_options={"num_gpus": 1},
|
113 |
-
)
|
114 |
-
@serve.ingress(app)
|
115 |
-
class ModelDeployment:
|
116 |
-
def __init__(
|
117 |
-
self,
|
118 |
-
model_name: str,
|
119 |
-
):
|
120 |
-
self.model_name = model_name
|
121 |
-
self.fine_tuning_jobs: Dict[str, FineTuningJob] = {}
|
122 |
-
self.training_threads: Dict[str, threading.Thread] = {}
|
123 |
-
|
124 |
-
# Load base model and processor
|
125 |
-
self.model, self.processor = FastModel.from_pretrained(
|
126 |
-
load_in_4bit=load_in_4bit,
|
127 |
-
max_seq_length=max_seq_length,
|
128 |
-
model_name=self.model_name,
|
129 |
-
)
|
130 |
-
|
131 |
-
# Configure LoRA for fine-tuning
|
132 |
-
self.model = FastModel.get_peft_model(
|
133 |
-
self.model,
|
134 |
-
r=16, # LoRA rank
|
135 |
-
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
|
136 |
-
lora_alpha=32,
|
137 |
-
lora_dropout=0.05,
|
138 |
-
bias="none",
|
139 |
-
use_gradient_checkpointing=True,
|
140 |
-
random_state=42,
|
141 |
-
use_rslora=False,
|
142 |
-
)
|
143 |
-
|
144 |
-
FastModel.for_inference(self.model) # Enable native 2x faster inference
|
145 |
-
|
146 |
-
def reconfigure(self, config: Dict[str, Any]):
|
147 |
-
print("=== reconfigure ===")
|
148 |
-
print("config:")
|
149 |
-
print(config)
|
150 |
-
# https://docs.ray.io/en/latest/serve/production-guide/config.html#dynamically-change-parameters-without-restarting-replicas-user-config
|
151 |
-
|
152 |
-
def _run_training(self, job_id: str, training_file: str, model_name: str):
|
153 |
-
"""Run the training process in a separate thread."""
|
154 |
-
try:
|
155 |
-
# Update job status to queued
|
156 |
-
self.fine_tuning_jobs[job_id].status = "queued"
|
157 |
-
|
158 |
-
# Simulate file validation
|
159 |
-
time.sleep(2)
|
160 |
-
|
161 |
-
# Update job status to running
|
162 |
-
self.fine_tuning_jobs[job_id].status = "running"
|
163 |
-
self.fine_tuning_jobs[job_id].started_at = int(datetime.now().timestamp())
|
164 |
-
|
165 |
-
# Load and prepare dataset
|
166 |
-
dataset = load_dataset("json", data_files=training_file)
|
167 |
-
|
168 |
-
# Configure chat template
|
169 |
-
tokenizer = get_chat_template(
|
170 |
-
self.processor,
|
171 |
-
chat_template="chatml",
|
172 |
-
mapping={
|
173 |
-
"role": "from",
|
174 |
-
"content": "value",
|
175 |
-
"user": "human",
|
176 |
-
"assistant": "gpt",
|
177 |
-
},
|
178 |
-
map_eos_token=True,
|
179 |
-
)
|
180 |
-
|
181 |
-
# Format dataset
|
182 |
-
def formatting_prompts_func(examples):
|
183 |
-
convos = examples["conversations"]
|
184 |
-
texts = [
|
185 |
-
tokenizer.apply_chat_template(
|
186 |
-
convo, tokenize=False, add_generation_prompt=False
|
187 |
-
)
|
188 |
-
for convo in convos
|
189 |
-
]
|
190 |
-
return {"text": texts}
|
191 |
-
|
192 |
-
dataset = dataset.map(formatting_prompts_func, batched=True)
|
193 |
-
|
194 |
-
# Configure training arguments
|
195 |
-
training_args = TrainingArguments(
|
196 |
-
output_dir=f"models/{job_id}",
|
197 |
-
num_train_epochs=3,
|
198 |
-
per_device_train_batch_size=4,
|
199 |
-
gradient_accumulation_steps=4,
|
200 |
-
learning_rate=2e-4,
|
201 |
-
fp16=True,
|
202 |
-
logging_steps=10,
|
203 |
-
save_strategy="epoch",
|
204 |
-
optim="adamw_torch",
|
205 |
-
warmup_ratio=0.1,
|
206 |
-
lr_scheduler_type="cosine",
|
207 |
-
weight_decay=0.01,
|
208 |
-
)
|
209 |
-
|
210 |
-
# Create data collator
|
211 |
-
data_collator = DataCollatorForLanguageModeling(
|
212 |
-
tokenizer=tokenizer,
|
213 |
-
mlm=False,
|
214 |
-
)
|
215 |
-
|
216 |
-
# Create trainer
|
217 |
-
trainer = SFTTrainer(
|
218 |
-
model=self.model,
|
219 |
-
tokenizer=tokenizer,
|
220 |
-
train_dataset=dataset["train"],
|
221 |
-
args=training_args,
|
222 |
-
data_collator=data_collator,
|
223 |
-
max_seq_length=max_seq_length,
|
224 |
-
packing=False,
|
225 |
-
)
|
226 |
-
|
227 |
-
# Train
|
228 |
-
trainer.train()
|
229 |
-
|
230 |
-
# Save model and adapter
|
231 |
-
output_dir = f"models/{job_id}"
|
232 |
-
os.makedirs(output_dir, exist_ok=True)
|
233 |
-
|
234 |
-
# Save the base model config and tokenizer
|
235 |
-
self.model.config.save_pretrained(output_dir)
|
236 |
-
tokenizer.save_pretrained(output_dir)
|
237 |
-
|
238 |
-
# Save the adapter weights
|
239 |
-
self.model.save_pretrained(output_dir)
|
240 |
-
|
241 |
-
# Save the merged model in 16-bit format
|
242 |
-
try:
|
243 |
-
# First try to merge and save in 16-bit
|
244 |
-
self.model.save_pretrained_merged(
|
245 |
-
output_dir,
|
246 |
-
tokenizer,
|
247 |
-
save_method="merged_16bit",
|
248 |
-
)
|
249 |
-
except Exception as merge_error:
|
250 |
-
print(f"Failed to merge weights: {str(merge_error)}")
|
251 |
-
# If merging fails, just save the adapter weights
|
252 |
-
self.model.save_pretrained(output_dir)
|
253 |
-
|
254 |
-
# Update job status to succeeded
|
255 |
-
self.fine_tuning_jobs[job_id].status = "succeeded"
|
256 |
-
self.fine_tuning_jobs[job_id].finished_at = int(datetime.now().timestamp())
|
257 |
-
self.fine_tuning_jobs[job_id].trained_tokens = (
|
258 |
-
trainer.state.global_step * training_args.per_device_train_batch_size
|
259 |
-
)
|
260 |
-
|
261 |
-
# Add result files
|
262 |
-
result_files = [
|
263 |
-
f"{output_dir}/config.json",
|
264 |
-
f"{output_dir}/tokenizer.json",
|
265 |
-
f"{output_dir}/adapter_config.json",
|
266 |
-
f"{output_dir}/adapter_model.bin",
|
267 |
-
]
|
268 |
-
|
269 |
-
# Add merged model files if they exist
|
270 |
-
if os.path.exists(f"{output_dir}/pytorch_model.bin"):
|
271 |
-
result_files.append(f"{output_dir}/pytorch_model.bin")
|
272 |
-
|
273 |
-
self.fine_tuning_jobs[job_id].result_files = result_files
|
274 |
-
|
275 |
-
except Exception as e:
|
276 |
-
# Update job status to failed
|
277 |
-
self.fine_tuning_jobs[job_id].status = "failed"
|
278 |
-
self.fine_tuning_jobs[job_id].finished_at = int(datetime.now().timestamp())
|
279 |
-
self.fine_tuning_jobs[job_id].error = str(e)
|
280 |
-
print(f"Training failed: {str(e)}")
|
281 |
-
import traceback
|
282 |
-
|
283 |
-
print(traceback.format_exc())
|
284 |
-
|
285 |
-
@app.post("/v1/fine_tuning/jobs")
|
286 |
-
async def create_fine_tuning_job(self, body: dict):
|
287 |
-
"""Create a fine-tuning job."""
|
288 |
-
try:
|
289 |
-
# Validate required fields
|
290 |
-
if "training_file" not in body:
|
291 |
-
raise HTTPException(status_code=400, detail="training_file is required")
|
292 |
-
if "model" not in body:
|
293 |
-
raise HTTPException(status_code=400, detail="model is required")
|
294 |
-
|
295 |
-
# Generate job ID
|
296 |
-
job_id = f"ftjob-{uuid.uuid4().hex[:8]}"
|
297 |
-
|
298 |
-
# Create job object
|
299 |
-
job = FineTuningJob(
|
300 |
-
id=job_id,
|
301 |
-
object="fine_tuning.job",
|
302 |
-
created_at=int(datetime.now().timestamp()),
|
303 |
-
finished_at=None,
|
304 |
-
model=body["model"],
|
305 |
-
fine_tuned_model=None,
|
306 |
-
organization_id="org-123",
|
307 |
-
status="validating_files", # Start with validating_files
|
308 |
-
hyperparameters=body.get("hyperparameters", {}),
|
309 |
-
training_file=body["training_file"],
|
310 |
-
trained_tokens=None,
|
311 |
-
error=None,
|
312 |
-
result_files=[], # Required field
|
313 |
-
seed=42, # Required field
|
314 |
-
)
|
315 |
-
|
316 |
-
# Store job
|
317 |
-
self.fine_tuning_jobs[job_id] = job
|
318 |
-
|
319 |
-
# Start training in background thread
|
320 |
-
thread = threading.Thread(
|
321 |
-
target=self._run_training,
|
322 |
-
args=(job_id, body["training_file"], body["model"]),
|
323 |
-
)
|
324 |
-
thread.start()
|
325 |
-
self.training_threads[job_id] = thread
|
326 |
-
|
327 |
-
return job.model_dump()
|
328 |
-
|
329 |
-
except Exception as e:
|
330 |
-
raise HTTPException(status_code=500, detail=str(e))
|
331 |
-
|
332 |
-
@app.get("/v1/fine_tuning/jobs")
|
333 |
-
async def list_fine_tuning_jobs(self):
|
334 |
-
"""List all fine-tuning jobs."""
|
335 |
-
return {
|
336 |
-
"object": "list",
|
337 |
-
"data": [job.model_dump() for job in self.fine_tuning_jobs.values()],
|
338 |
-
}
|
339 |
-
|
340 |
-
@app.get("/v1/fine_tuning/jobs/{job_id}")
|
341 |
-
async def get_fine_tuning_job(self, job_id: str):
|
342 |
-
"""Get a specific fine-tuning job."""
|
343 |
-
if job_id not in self.fine_tuning_jobs:
|
344 |
-
raise HTTPException(status_code=404, detail="Job not found")
|
345 |
-
return self.fine_tuning_jobs[job_id].model_dump()
|
346 |
-
|
347 |
-
@app.post("/v1/fine_tuning/jobs/{job_id}/cancel")
|
348 |
-
async def cancel_fine_tuning_job(self, job_id: str):
|
349 |
-
"""Cancel a fine-tuning job."""
|
350 |
-
if job_id not in self.fine_tuning_jobs:
|
351 |
-
raise HTTPException(status_code=404, detail="Job not found")
|
352 |
-
|
353 |
-
job = self.fine_tuning_jobs[job_id]
|
354 |
-
if job.status not in ["created", "running"]:
|
355 |
-
raise HTTPException(status_code=400, detail="Job cannot be cancelled")
|
356 |
-
|
357 |
-
job.status = "cancelled"
|
358 |
-
job.finished_at = int(datetime.now().timestamp())
|
359 |
-
|
360 |
-
return job.model_dump()
|
361 |
-
|
362 |
-
@app.post("/v1/chat/completions")
|
363 |
-
async def create_chat_completion(self, body: dict, raw_request: Request):
|
364 |
-
"""Creates a model response for the given chat conversation. Learn more in the [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision), and [audio](/docs/guides/audio) guides. Parameter support can differ depending on the model used to generate the response, particularly for newer reasoning models. Parameters that are only supported for reasoning models are noted below. For the current state of unsupported parameters in reasoning models, [refer to the reasoning guide](/docs/guides/reasoning).
|
365 |
-
|
366 |
-
# noqa: E501
|
367 |
-
|
368 |
-
:param create_chat_completion_request:
|
369 |
-
:type create_chat_completion_request: dict | bytes
|
370 |
-
|
371 |
-
:rtype: Union[CreateChatCompletionResponse, Tuple[CreateChatCompletionResponse, int], Tuple[CreateChatCompletionResponse, int, Dict[str, str]]
|
372 |
-
"""
|
373 |
-
print("=== create_chat_completion ===")
|
374 |
-
|
375 |
-
print("body:")
|
376 |
-
pprint(body)
|
377 |
-
|
378 |
-
ta = TypeAdapter(CompletionCreateParams)
|
379 |
-
|
380 |
-
print("ta.validate_python...")
|
381 |
-
pprint(ta.validate_python(body))
|
382 |
-
|
383 |
-
max_new_tokens = body.get("max_completion_tokens", body.get("max_tokens"))
|
384 |
-
messages = body.get("messages")
|
385 |
-
model_name = body.get("model")
|
386 |
-
stream = body.get("stream", False)
|
387 |
-
temperature = body.get("temperature")
|
388 |
-
tools = body.get("tools")
|
389 |
-
|
390 |
-
images = []
|
391 |
-
|
392 |
-
for message in messages:
|
393 |
-
for content in message["content"]:
|
394 |
-
if "type" in content and content["type"] == "image_url":
|
395 |
-
image_url = content["image_url"]["url"]
|
396 |
-
image = load_image(image_url)
|
397 |
-
images.append(image)
|
398 |
-
|
399 |
-
content["type"] = "image"
|
400 |
-
del content["image_url"]
|
401 |
-
elif isinstance(content, dict) and "text" in content:
|
402 |
-
# Convert content to string if it's a dict with text
|
403 |
-
message["content"] = content["text"]
|
404 |
-
elif isinstance(content, list):
|
405 |
-
# Join list items with newlines if content is a list
|
406 |
-
message["content"] = "\n".join(content)
|
407 |
-
|
408 |
-
images = images if images else None
|
409 |
-
|
410 |
-
if model_name != self.model_name:
|
411 |
-
# adapter_path = model_name
|
412 |
-
# self.model.load_adapter(adapter_path)
|
413 |
-
|
414 |
-
return JSONResponse(content={"error": "Model not found"}, status_code=404)
|
415 |
-
|
416 |
-
prompt = self.processor.apply_chat_template(
|
417 |
-
add_generation_prompt=True,
|
418 |
-
conversation=messages,
|
419 |
-
# documents=documents,
|
420 |
-
tools=tools,
|
421 |
-
tokenize=False, # Return string instead of token IDs
|
422 |
-
)
|
423 |
-
|
424 |
-
print("prompt:")
|
425 |
-
print(prompt)
|
426 |
-
|
427 |
-
if images:
|
428 |
-
inputs = self.processor(text=prompt, images=images, return_tensors="pt")
|
429 |
-
else:
|
430 |
-
inputs = self.processor(text=prompt, return_tensors="pt")
|
431 |
-
|
432 |
-
inputs = inputs.to(self.model.device)
|
433 |
-
input_ids = inputs.input_ids
|
434 |
-
|
435 |
-
class GeneratorThread(Thread):
|
436 |
-
"""Thread to generate completions in the background."""
|
437 |
-
|
438 |
-
def __init__(self, model, **generation_kwargs):
|
439 |
-
super().__init__()
|
440 |
-
|
441 |
-
self.chat_completion = None
|
442 |
-
self.generation_kwargs = generation_kwargs
|
443 |
-
self.model = model
|
444 |
-
|
445 |
-
def run(self):
|
446 |
-
import torch
|
447 |
-
import torch._dynamo.config
|
448 |
-
|
449 |
-
try:
|
450 |
-
try:
|
451 |
-
self.generated_ids = self.model.generate(
|
452 |
-
**self.generation_kwargs
|
453 |
-
)
|
454 |
-
|
455 |
-
except torch._dynamo.exc.BackendCompilerFailed as e:
|
456 |
-
print(e)
|
457 |
-
print("Disabling dynamo...")
|
458 |
-
|
459 |
-
torch._dynamo.config.disable = True
|
460 |
-
|
461 |
-
self.generated_ids = self.model.generate(
|
462 |
-
**self.generation_kwargs
|
463 |
-
)
|
464 |
-
|
465 |
-
except Exception as e:
|
466 |
-
print(e)
|
467 |
-
print("Warning: Exception in GeneratorThread")
|
468 |
-
self.generated_ids = []
|
469 |
-
|
470 |
-
def join(self, timeout=None):
|
471 |
-
super().join()
|
472 |
-
|
473 |
-
return self.generated_ids
|
474 |
-
|
475 |
-
decode_kwargs = dict(skip_special_tokens=True)
|
476 |
-
|
477 |
-
streamer = (
|
478 |
-
AsyncTextIteratorStreamer(
|
479 |
-
self.processor,
|
480 |
-
skip_prompt=True,
|
481 |
-
**decode_kwargs,
|
482 |
-
)
|
483 |
-
if stream
|
484 |
-
else None
|
485 |
-
)
|
486 |
-
|
487 |
-
generation_kwargs = dict(
|
488 |
-
**inputs,
|
489 |
-
max_new_tokens=max_new_tokens,
|
490 |
-
streamer=streamer,
|
491 |
-
temperature=temperature,
|
492 |
-
use_cache=True,
|
493 |
-
)
|
494 |
-
|
495 |
-
thread = GeneratorThread(self.model, **generation_kwargs)
|
496 |
-
thread.start()
|
497 |
-
|
498 |
-
if stream:
|
499 |
-
|
500 |
-
async def event_publisher():
|
501 |
-
i = 0
|
502 |
-
|
503 |
-
try:
|
504 |
-
async for new_text in streamer:
|
505 |
-
print("new_text:")
|
506 |
-
print(new_text)
|
507 |
-
|
508 |
-
choices: List[ChatCompletionChunkChoice] = [
|
509 |
-
ChatCompletionChunkChoice(
|
510 |
-
_request_id=None,
|
511 |
-
delta=ChoiceDelta(
|
512 |
-
_request_id=None,
|
513 |
-
content=new_text,
|
514 |
-
function_call=None,
|
515 |
-
refusal=None,
|
516 |
-
role="assistant",
|
517 |
-
tool_calls=None,
|
518 |
-
),
|
519 |
-
finish_reason=None,
|
520 |
-
index=0,
|
521 |
-
logprobs=None,
|
522 |
-
)
|
523 |
-
]
|
524 |
-
|
525 |
-
chat_completion_chunk = ChatCompletionChunk(
|
526 |
-
_request_id=None,
|
527 |
-
choices=choices,
|
528 |
-
created=int(time.time()),
|
529 |
-
id=str(i),
|
530 |
-
model=model_name,
|
531 |
-
object="chat.completion.chunk",
|
532 |
-
service_tier=None,
|
533 |
-
system_fingerprint=None,
|
534 |
-
usage=None,
|
535 |
-
)
|
536 |
-
|
537 |
-
yield chat_completion_chunk.model_dump_json()
|
538 |
-
|
539 |
-
i += 1
|
540 |
-
|
541 |
-
except asyncio.CancelledError as e:
|
542 |
-
print("Disconnected from client (via refresh/close)")
|
543 |
-
raise e
|
544 |
-
|
545 |
-
except Exception as e:
|
546 |
-
print(f"Exception: {e}")
|
547 |
-
raise e
|
548 |
-
|
549 |
-
return EventSourceResponse(event_publisher())
|
550 |
-
|
551 |
-
generated_ids = thread.join()
|
552 |
-
input_length = input_ids.shape[1]
|
553 |
-
|
554 |
-
batch_decoded_outputs = self.processor.batch_decode(
|
555 |
-
generated_ids[:, input_length:],
|
556 |
-
skip_special_tokens=True,
|
557 |
-
)
|
558 |
-
|
559 |
-
choices: List[ChatCompletionChoice] = []
|
560 |
-
|
561 |
-
for i, response in enumerate(batch_decoded_outputs):
|
562 |
-
print("response:")
|
563 |
-
print(response)
|
564 |
-
|
565 |
-
# try:
|
566 |
-
# response = json.loads(response)
|
567 |
-
|
568 |
-
# finish_reason: str = response.get("finish_reason")
|
569 |
-
# tool_calls_json = response.get("tool_calls")
|
570 |
-
# tool_calls: List[ToolCall] = []
|
571 |
-
|
572 |
-
# for tool_call_json in tool_calls_json:
|
573 |
-
# tool_call = ToolCall(
|
574 |
-
# function=FunctionToolCallArguments(
|
575 |
-
# arguments=tool_call_json.get("arguments"),
|
576 |
-
# name=tool_call_json.get("name"),
|
577 |
-
# ),
|
578 |
-
# id=tool_call_json.get("id"),
|
579 |
-
# type="function",
|
580 |
-
# )
|
581 |
-
|
582 |
-
# tool_calls.append(tool_call)
|
583 |
-
|
584 |
-
# message: ChatMessage = ChatMessage(
|
585 |
-
# role="assistant",
|
586 |
-
# tool_calls=tool_calls,
|
587 |
-
# )
|
588 |
-
|
589 |
-
# except json.JSONDecodeError:
|
590 |
-
# finish_reason: str = "stop"
|
591 |
-
# message: ChatMessage = ChatMessage(
|
592 |
-
# role="assistant",
|
593 |
-
# content=response,
|
594 |
-
# )
|
595 |
-
|
596 |
-
message = ChatCompletionMessage(
|
597 |
-
audio=None,
|
598 |
-
content=response,
|
599 |
-
refusal=None,
|
600 |
-
role="assistant",
|
601 |
-
tool_calls=None,
|
602 |
-
)
|
603 |
-
|
604 |
-
choices.append(
|
605 |
-
ChatCompletionChoice(
|
606 |
-
index=i,
|
607 |
-
finish_reason="stop",
|
608 |
-
logprobs=None,
|
609 |
-
message=message,
|
610 |
-
)
|
611 |
-
)
|
612 |
-
|
613 |
-
chat_completion = ChatCompletion(
|
614 |
-
choices=choices,
|
615 |
-
created=int(time.time()),
|
616 |
-
id="1",
|
617 |
-
model=model_name,
|
618 |
-
object="chat.completion",
|
619 |
-
service_tier=None,
|
620 |
-
system_fingerprint=None,
|
621 |
-
usage=None,
|
622 |
-
)
|
623 |
-
|
624 |
-
return chat_completion.model_dump(mode="json")
|
625 |
-
|
626 |
-
|
627 |
-
def build_app(cli_args: Dict[str, str]) -> serve.Application:
|
628 |
-
"""Builds the Serve app based on CLI arguments."""
|
629 |
-
return ModelDeployment.options().bind(
|
630 |
-
cli_args.get("model_name"),
|
631 |
-
)
|
632 |
-
|
633 |
-
|
634 |
-
# uv run serve run serve:build_app model_name="HuggingFaceTB/SmolVLM-Instruct"
|
635 |
-
# uv run serve run serve:build_app model_name="unsloth/SmolLM2-135M-Instruct-bnb-4bit"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
serve_test.py
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
import time
|
4 |
-
|
5 |
-
from openai import OpenAI
|
6 |
-
|
7 |
-
# Initialize the OpenAI client with the local server
|
8 |
-
client = OpenAI(
|
9 |
-
base_url="http://localhost:8000/v1",
|
10 |
-
api_key="not-needed", # API key is not needed for local server
|
11 |
-
)
|
12 |
-
|
13 |
-
|
14 |
-
def test_chat_completion():
|
15 |
-
try:
|
16 |
-
print("Sending chat completion request...")
|
17 |
-
response = client.chat.completions.create(
|
18 |
-
model="unsloth/SmolLM2-135M-Instruct-bnb-4bit",
|
19 |
-
messages=[{"role": "user", "content": "Hello"}],
|
20 |
-
temperature=0.7,
|
21 |
-
max_tokens=50,
|
22 |
-
)
|
23 |
-
|
24 |
-
# Print the response
|
25 |
-
print("\nResponse:")
|
26 |
-
print(response.choices[0].message.content)
|
27 |
-
|
28 |
-
# Print full response object for debugging
|
29 |
-
print("\nFull response object:")
|
30 |
-
print(json.dumps(response.model_dump(), indent=2))
|
31 |
-
|
32 |
-
except Exception as e:
|
33 |
-
print(f"Error occurred: {str(e)}")
|
34 |
-
import traceback
|
35 |
-
|
36 |
-
print("\nFull traceback:")
|
37 |
-
print(traceback.format_exc())
|
38 |
-
|
39 |
-
|
40 |
-
def test_fine_tuning():
|
41 |
-
try:
|
42 |
-
# Create a sample training file
|
43 |
-
training_data = {
|
44 |
-
"conversations": [
|
45 |
-
{
|
46 |
-
"from": "human",
|
47 |
-
"value": "What is the capital of France?",
|
48 |
-
},
|
49 |
-
{
|
50 |
-
"from": "gpt",
|
51 |
-
"value": "The capital of France is Paris.",
|
52 |
-
},
|
53 |
-
]
|
54 |
-
}
|
55 |
-
|
56 |
-
training_file = "training_data.json"
|
57 |
-
with open(training_file, "w") as f:
|
58 |
-
json.dump(training_data, f)
|
59 |
-
|
60 |
-
print("\nCreating fine-tuning job...")
|
61 |
-
job = client.fine_tuning.jobs.create(
|
62 |
-
training_file=training_file,
|
63 |
-
model="unsloth/SmolLM2-135M-Instruct-bnb-4bit",
|
64 |
-
)
|
65 |
-
print(f"Created job: {job.id}")
|
66 |
-
|
67 |
-
# Wait for job to start
|
68 |
-
print("\nWaiting for job to start...")
|
69 |
-
time.sleep(2)
|
70 |
-
|
71 |
-
# List jobs
|
72 |
-
print("\nListing fine-tuning jobs...")
|
73 |
-
jobs = client.fine_tuning.jobs.list()
|
74 |
-
print(f"Found {len(jobs.data)} jobs")
|
75 |
-
|
76 |
-
# Get job status
|
77 |
-
print("\nGetting job status...")
|
78 |
-
job = client.fine_tuning.jobs.retrieve(job.id)
|
79 |
-
print(f"Job status: {job.status}")
|
80 |
-
|
81 |
-
# Wait for job to complete or fail
|
82 |
-
print("\nWaiting for job to complete...")
|
83 |
-
while job.status in ["created", "running"]:
|
84 |
-
time.sleep(5)
|
85 |
-
job = client.fine_tuning.jobs.retrieve(job.id)
|
86 |
-
print(f"Job status: {job.status}")
|
87 |
-
|
88 |
-
# Clean up
|
89 |
-
os.remove(training_file)
|
90 |
-
|
91 |
-
except Exception as e:
|
92 |
-
print(f"Error occurred: {str(e)}")
|
93 |
-
import traceback
|
94 |
-
|
95 |
-
print("\nFull traceback:")
|
96 |
-
print(traceback.format_exc())
|
97 |
-
|
98 |
-
|
99 |
-
if __name__ == "__main__":
|
100 |
-
print("Testing chat completions endpoint...")
|
101 |
-
test_chat_completion()
|
102 |
-
|
103 |
-
print("\nTesting fine-tuning endpoints...")
|
104 |
-
test_fine_tuning()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
test_questions.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import unittest
|
2 |
-
|
3 |
-
import requests
|
4 |
-
|
5 |
-
from main_v2 import main
|
6 |
-
|
7 |
-
|
8 |
-
class TestQuestions(unittest.TestCase):
|
9 |
-
def setUp(self):
|
10 |
-
self.api_url = "https://agents-course-unit4-scoring.hf.space"
|
11 |
-
self.questions_url = f"{self.api_url}/questions"
|
12 |
-
|
13 |
-
# Get questions from the API
|
14 |
-
response = requests.get(self.questions_url, timeout=15)
|
15 |
-
response.raise_for_status()
|
16 |
-
self.questions = response.json()
|
17 |
-
|
18 |
-
# Expected answers for each question
|
19 |
-
self.expected_answers = {
|
20 |
-
"How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia.": "3",
|
21 |
-
# Add more expected answers as you verify them
|
22 |
-
}
|
23 |
-
|
24 |
-
def test_questions(self):
|
25 |
-
"""Test each question and verify the succinct answer matches the expected value."""
|
26 |
-
for question_data in self.questions:
|
27 |
-
question = question_data["question"]
|
28 |
-
if question in self.expected_answers:
|
29 |
-
expected_answer = self.expected_answers[question]
|
30 |
-
actual_answer = main(question)
|
31 |
-
self.assertEqual(
|
32 |
-
actual_answer,
|
33 |
-
expected_answer,
|
34 |
-
f"Question: {question}\nExpected: {expected_answer}\nGot: {actual_answer}",
|
35 |
-
)
|
36 |
-
|
37 |
-
|
38 |
-
if __name__ == "__main__":
|
39 |
-
unittest.main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/__init__.py
DELETED
File without changes
|
tools/smart_search/__init__.py
DELETED
File without changes
|
tools/smart_search/tool.py
DELETED
@@ -1,236 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import re
|
3 |
-
from typing import Optional
|
4 |
-
|
5 |
-
import requests
|
6 |
-
from smolagents import Tool
|
7 |
-
from smolagents.default_tools import DuckDuckGoSearchTool
|
8 |
-
|
9 |
-
logger = logging.getLogger(__name__)
|
10 |
-
|
11 |
-
|
12 |
-
class SmartSearchTool(Tool):
|
13 |
-
name = "smart_search"
|
14 |
-
description = """A smart search tool that searches Wikipedia for information."""
|
15 |
-
inputs = {
|
16 |
-
"query": {
|
17 |
-
"type": "string",
|
18 |
-
"description": "The search query to find information",
|
19 |
-
}
|
20 |
-
}
|
21 |
-
output_type = "string"
|
22 |
-
|
23 |
-
def __init__(self):
|
24 |
-
super().__init__()
|
25 |
-
self.web_search_tool = DuckDuckGoSearchTool(max_results=1)
|
26 |
-
self.api_url = "https://en.wikipedia.org/w/api.php"
|
27 |
-
self.headers = {
|
28 |
-
"User-Agent": "SmartSearchTool/1.0 (https://github.com/yourusername/yourproject; your@email.com)"
|
29 |
-
}
|
30 |
-
|
31 |
-
def get_wikipedia_page(self, title: str) -> Optional[str]:
|
32 |
-
"""Get the raw wiki markup of a Wikipedia page."""
|
33 |
-
try:
|
34 |
-
params = {
|
35 |
-
"action": "query",
|
36 |
-
"prop": "revisions",
|
37 |
-
"rvprop": "content",
|
38 |
-
"rvslots": "main",
|
39 |
-
"format": "json",
|
40 |
-
"titles": title,
|
41 |
-
"redirects": 1,
|
42 |
-
}
|
43 |
-
response = requests.get(self.api_url, params=params, headers=self.headers)
|
44 |
-
response.raise_for_status()
|
45 |
-
data = response.json()
|
46 |
-
|
47 |
-
# Extract page content
|
48 |
-
pages = data.get("query", {}).get("pages", {})
|
49 |
-
for page_id, page_data in pages.items():
|
50 |
-
if "revisions" in page_data:
|
51 |
-
return page_data["revisions"][0]["slots"]["main"]["*"]
|
52 |
-
return None
|
53 |
-
except Exception as e:
|
54 |
-
logger.error(f"Error getting Wikipedia page: {e}")
|
55 |
-
return None
|
56 |
-
|
57 |
-
def clean_wiki_content(self, content: str) -> str:
|
58 |
-
"""Clean Wikipedia content by removing markup and formatting."""
|
59 |
-
# Remove citations
|
60 |
-
content = re.sub(r"\[\d+\]", "", content)
|
61 |
-
# Remove edit links
|
62 |
-
content = re.sub(r"\[edit\]", "", content)
|
63 |
-
# Remove file links
|
64 |
-
content = re.sub(r"\[\[File:.*?\]\]", "", content)
|
65 |
-
# Convert links to just text
|
66 |
-
content = re.sub(r"\[\[(?:[^|\]]*\|)?([^\]]+)\]\]", r"\1", content)
|
67 |
-
# Remove HTML comments
|
68 |
-
content = re.sub(r"<!--.*?-->", "", content, flags=re.DOTALL)
|
69 |
-
# Remove templates
|
70 |
-
content = re.sub(r"\{\{.*?\}\}", "", content)
|
71 |
-
# Remove small tags
|
72 |
-
content = re.sub(r"<small>.*?</small>", "", content)
|
73 |
-
# Normalize whitespace
|
74 |
-
content = re.sub(r"\n\s*\n", "\n\n", content)
|
75 |
-
return content.strip()
|
76 |
-
|
77 |
-
def format_wiki_table(self, table_content: str) -> str:
|
78 |
-
"""Format a Wikipedia table into readable text."""
|
79 |
-
# Split into rows
|
80 |
-
rows = table_content.strip().split("\n")
|
81 |
-
formatted_rows = []
|
82 |
-
current_row = []
|
83 |
-
|
84 |
-
for row in rows:
|
85 |
-
# Skip empty rows and table structure markers
|
86 |
-
if not row.strip() or row.startswith("|-") or row.startswith("|+"):
|
87 |
-
if current_row:
|
88 |
-
formatted_rows.append("\t".join(current_row))
|
89 |
-
current_row = []
|
90 |
-
continue
|
91 |
-
|
92 |
-
# Extract cells
|
93 |
-
cells = []
|
94 |
-
# Split the row into cells using | or ! as separators
|
95 |
-
cell_parts = re.split(r"[|!]", row)
|
96 |
-
for cell in cell_parts[1:]: # Skip the first empty part
|
97 |
-
# Clean up the cell content
|
98 |
-
cell = cell.strip()
|
99 |
-
# Remove any remaining markup
|
100 |
-
cell = re.sub(r"<.*?>", "", cell) # Remove HTML tags
|
101 |
-
cell = re.sub(r"\[\[.*?\|(.*?)\]\]", r"\1", cell) # Convert links
|
102 |
-
cell = re.sub(r"\[\[(.*?)\]\]", r"\1", cell) # Convert simple links
|
103 |
-
cell = re.sub(r"\{\{.*?\}\}", "", cell) # Remove templates
|
104 |
-
cell = re.sub(r"<small>.*?</small>", "", cell) # Remove small tags
|
105 |
-
cell = re.sub(r'rowspan="\d+"', "", cell) # Remove rowspan
|
106 |
-
cell = re.sub(r'colspan="\d+"', "", cell) # Remove colspan
|
107 |
-
cell = re.sub(r'class=".*?"', "", cell) # Remove class attributes
|
108 |
-
cell = re.sub(r'style=".*?"', "", cell) # Remove style attributes
|
109 |
-
cell = re.sub(r'align=".*?"', "", cell) # Remove align attributes
|
110 |
-
cell = re.sub(r'width=".*?"', "", cell) # Remove width attributes
|
111 |
-
cell = re.sub(r'bgcolor=".*?"', "", cell) # Remove bgcolor attributes
|
112 |
-
cell = re.sub(r'valign=".*?"', "", cell) # Remove valign attributes
|
113 |
-
cell = re.sub(r'border=".*?"', "", cell) # Remove border attributes
|
114 |
-
cell = re.sub(
|
115 |
-
r'cellpadding=".*?"', "", cell
|
116 |
-
) # Remove cellpadding attributes
|
117 |
-
cell = re.sub(
|
118 |
-
r'cellspacing=".*?"', "", cell
|
119 |
-
) # Remove cellspacing attributes
|
120 |
-
cell = re.sub(r"<ref.*?</ref>", "", cell) # Remove references
|
121 |
-
cell = re.sub(r"<ref.*?/>", "", cell) # Remove empty references
|
122 |
-
cell = re.sub(
|
123 |
-
r"<br\s*/?>", " ", cell
|
124 |
-
) # Replace line breaks with spaces
|
125 |
-
cell = re.sub(r"\s+", " ", cell) # Normalize whitespace
|
126 |
-
cells.append(cell)
|
127 |
-
|
128 |
-
if cells:
|
129 |
-
current_row.extend(cells)
|
130 |
-
|
131 |
-
if current_row:
|
132 |
-
formatted_rows.append("\t".join(current_row))
|
133 |
-
|
134 |
-
if formatted_rows:
|
135 |
-
return "\n".join(formatted_rows)
|
136 |
-
return ""
|
137 |
-
|
138 |
-
def extract_wikipedia_title(self, search_result: str) -> Optional[str]:
|
139 |
-
"""Extract Wikipedia page title from search result."""
|
140 |
-
# Look for Wikipedia links in the format [Title - Wikipedia](url)
|
141 |
-
wiki_match = re.search(
|
142 |
-
r"\[([^\]]+)\s*-\s*Wikipedia\]\(https://en\.wikipedia\.org/wiki/[^)]+\)",
|
143 |
-
search_result,
|
144 |
-
)
|
145 |
-
if wiki_match:
|
146 |
-
return wiki_match.group(1).strip()
|
147 |
-
return None
|
148 |
-
|
149 |
-
def forward(self, query: str) -> str:
|
150 |
-
logger.info(f"Starting smart search for query: {query}")
|
151 |
-
|
152 |
-
# First do a web search to find the Wikipedia page
|
153 |
-
search_result = self.web_search_tool.forward(query)
|
154 |
-
logger.info(f"Web search results: {search_result[:100]}...")
|
155 |
-
|
156 |
-
# Extract Wikipedia page title from search results
|
157 |
-
wiki_title = self.extract_wikipedia_title(search_result)
|
158 |
-
if not wiki_title:
|
159 |
-
return f"Could not find Wikipedia page in search results for '{query}'."
|
160 |
-
|
161 |
-
# Get Wikipedia page content
|
162 |
-
page_content = self.get_wikipedia_page(wiki_title)
|
163 |
-
if not page_content:
|
164 |
-
return f"Could not find Wikipedia page for '{wiki_title}'."
|
165 |
-
|
166 |
-
# Format tables and content
|
167 |
-
formatted_content = []
|
168 |
-
current_section = []
|
169 |
-
in_table = False
|
170 |
-
table_content = []
|
171 |
-
|
172 |
-
for line in page_content.split("\n"):
|
173 |
-
if line.startswith("{|"):
|
174 |
-
in_table = True
|
175 |
-
table_content = [line]
|
176 |
-
elif line.startswith("|}"):
|
177 |
-
in_table = False
|
178 |
-
table_content.append(line)
|
179 |
-
formatted_table = self.format_wiki_table("\n".join(table_content))
|
180 |
-
if formatted_table:
|
181 |
-
current_section.append(formatted_table)
|
182 |
-
elif in_table:
|
183 |
-
table_content.append(line)
|
184 |
-
else:
|
185 |
-
if line.strip():
|
186 |
-
current_section.append(line)
|
187 |
-
elif current_section:
|
188 |
-
formatted_content.append("\n".join(current_section))
|
189 |
-
current_section = []
|
190 |
-
|
191 |
-
if current_section:
|
192 |
-
formatted_content.append("\n".join(current_section))
|
193 |
-
|
194 |
-
# Clean and return the formatted content
|
195 |
-
cleaned_content = self.clean_wiki_content("\n\n".join(formatted_content))
|
196 |
-
return f"Wikipedia content for '{wiki_title}':\n\n{cleaned_content}"
|
197 |
-
|
198 |
-
|
199 |
-
def main(query: str) -> str:
|
200 |
-
"""
|
201 |
-
Test function to run the SmartSearchTool directly.
|
202 |
-
|
203 |
-
Args:
|
204 |
-
query: The search query to test
|
205 |
-
|
206 |
-
Returns:
|
207 |
-
The search results
|
208 |
-
"""
|
209 |
-
# Configure logging
|
210 |
-
logging.basicConfig(
|
211 |
-
level=logging.INFO,
|
212 |
-
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
213 |
-
)
|
214 |
-
|
215 |
-
# Create and run the tool
|
216 |
-
tool = SmartSearchTool()
|
217 |
-
result = tool.forward(query)
|
218 |
-
|
219 |
-
# Print the result
|
220 |
-
print("\nSearch Results:")
|
221 |
-
print("-" * 80)
|
222 |
-
print(result)
|
223 |
-
print("-" * 80)
|
224 |
-
|
225 |
-
return result
|
226 |
-
|
227 |
-
|
228 |
-
if __name__ == "__main__":
|
229 |
-
import sys
|
230 |
-
|
231 |
-
if len(sys.argv) > 1:
|
232 |
-
query = " ".join(sys.argv[1:])
|
233 |
-
main(query)
|
234 |
-
else:
|
235 |
-
print("Usage: python tool.py <search query>")
|
236 |
-
print("Example: python tool.py 'Mercedes Sosa discography'")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
train.py
DELETED
@@ -1,419 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
"""
|
3 |
-
Fine-tuning script for SmolLM2-135M model using Unsloth.
|
4 |
-
|
5 |
-
This script demonstrates how to:
|
6 |
-
1. Install and configure Unsloth
|
7 |
-
2. Prepare and format training data
|
8 |
-
3. Configure and run the training process
|
9 |
-
4. Save and evaluate the model
|
10 |
-
|
11 |
-
To run this script:
|
12 |
-
1. Install dependencies: pip install -r requirements.txt
|
13 |
-
2. Run: python train.py
|
14 |
-
"""
|
15 |
-
|
16 |
-
import logging
|
17 |
-
import os
|
18 |
-
from datetime import datetime
|
19 |
-
from pathlib import Path
|
20 |
-
from typing import Union
|
21 |
-
|
22 |
-
import hydra
|
23 |
-
from omegaconf import DictConfig, OmegaConf
|
24 |
-
|
25 |
-
# isort: off
|
26 |
-
from unsloth import FastLanguageModel, FastModel, is_bfloat16_supported # noqa: E402
|
27 |
-
from unsloth.chat_templates import get_chat_template # noqa: E402
|
28 |
-
|
29 |
-
# isort: on
|
30 |
-
|
31 |
-
import os
|
32 |
-
|
33 |
-
import torch
|
34 |
-
from datasets import (
|
35 |
-
Dataset,
|
36 |
-
DatasetDict,
|
37 |
-
IterableDataset,
|
38 |
-
IterableDatasetDict,
|
39 |
-
load_dataset,
|
40 |
-
)
|
41 |
-
from peft import PeftModel
|
42 |
-
from smolagents import CodeAgent, LiteLLMModel, Model, TransformersModel, VLLMModel
|
43 |
-
from smolagents.monitoring import LogLevel
|
44 |
-
from transformers import (
|
45 |
-
AutoModelForCausalLM,
|
46 |
-
AutoTokenizer,
|
47 |
-
DataCollatorForLanguageModeling,
|
48 |
-
Trainer,
|
49 |
-
TrainingArguments,
|
50 |
-
)
|
51 |
-
from trl import SFTTrainer
|
52 |
-
|
53 |
-
from tools.smart_search.tool import SmartSearchTool
|
54 |
-
|
55 |
-
|
56 |
-
# Setup logging
|
57 |
-
def setup_logging():
|
58 |
-
"""Configure logging for the training process."""
|
59 |
-
# Create logs directory if it doesn't exist
|
60 |
-
log_dir = Path("logs")
|
61 |
-
log_dir.mkdir(exist_ok=True)
|
62 |
-
|
63 |
-
# Create a unique log file name with timestamp
|
64 |
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
65 |
-
log_file = log_dir / f"training_{timestamp}.log"
|
66 |
-
|
67 |
-
# Configure logging
|
68 |
-
logging.basicConfig(
|
69 |
-
level=logging.INFO,
|
70 |
-
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
71 |
-
handlers=[logging.FileHandler(log_file), logging.StreamHandler()],
|
72 |
-
)
|
73 |
-
|
74 |
-
logger = logging.getLogger(__name__)
|
75 |
-
logger.info(f"Logging initialized. Log file: {log_file}")
|
76 |
-
return logger
|
77 |
-
|
78 |
-
|
79 |
-
logger = setup_logging()
|
80 |
-
|
81 |
-
|
82 |
-
def install_dependencies():
|
83 |
-
"""Install required dependencies."""
|
84 |
-
logger.info("Installing dependencies...")
|
85 |
-
try:
|
86 |
-
os.system(
|
87 |
-
'pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"'
|
88 |
-
)
|
89 |
-
os.system("pip install --no-deps xformers trl peft accelerate bitsandbytes")
|
90 |
-
logger.info("Dependencies installed successfully")
|
91 |
-
except Exception as e:
|
92 |
-
logger.error(f"Error installing dependencies: {e}")
|
93 |
-
raise
|
94 |
-
|
95 |
-
|
96 |
-
def load_model(cfg: DictConfig) -> tuple[FastLanguageModel, AutoTokenizer]:
|
97 |
-
"""Load and configure the model."""
|
98 |
-
logger.info("Loading model and tokenizer...")
|
99 |
-
try:
|
100 |
-
model, tokenizer = FastModel.from_pretrained(
|
101 |
-
model_name=cfg.model.name,
|
102 |
-
max_seq_length=cfg.model.max_seq_length,
|
103 |
-
dtype=cfg.model.dtype,
|
104 |
-
load_in_4bit=cfg.model.load_in_4bit,
|
105 |
-
)
|
106 |
-
logger.info("Base model loaded successfully")
|
107 |
-
|
108 |
-
# Configure LoRA
|
109 |
-
model = FastModel.get_peft_model(
|
110 |
-
model,
|
111 |
-
r=cfg.peft.r,
|
112 |
-
target_modules=cfg.peft.target_modules,
|
113 |
-
lora_alpha=cfg.peft.lora_alpha,
|
114 |
-
lora_dropout=cfg.peft.lora_dropout,
|
115 |
-
bias=cfg.peft.bias,
|
116 |
-
use_gradient_checkpointing=cfg.peft.use_gradient_checkpointing,
|
117 |
-
random_state=cfg.peft.random_state,
|
118 |
-
use_rslora=cfg.peft.use_rslora,
|
119 |
-
loftq_config=cfg.peft.loftq_config,
|
120 |
-
)
|
121 |
-
logger.info("LoRA configuration applied successfully")
|
122 |
-
|
123 |
-
return model, tokenizer
|
124 |
-
except Exception as e:
|
125 |
-
logger.error(f"Error loading model: {e}")
|
126 |
-
raise
|
127 |
-
|
128 |
-
|
129 |
-
def load_and_format_dataset(
|
130 |
-
tokenizer: AutoTokenizer,
|
131 |
-
cfg: DictConfig,
|
132 |
-
) -> tuple[
|
133 |
-
Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset], AutoTokenizer
|
134 |
-
]:
|
135 |
-
"""Load and format the training dataset."""
|
136 |
-
logger.info("Loading and formatting dataset...")
|
137 |
-
try:
|
138 |
-
# Load the code-act dataset
|
139 |
-
dataset = load_dataset("xingyaoww/code-act", split="codeact")
|
140 |
-
logger.info(f"Dataset loaded successfully. Size: {len(dataset)} examples")
|
141 |
-
|
142 |
-
# Split into train and validation sets
|
143 |
-
dataset = dataset.train_test_split(
|
144 |
-
test_size=cfg.dataset.validation_split, seed=cfg.dataset.seed
|
145 |
-
)
|
146 |
-
logger.info(
|
147 |
-
f"Dataset split into train ({len(dataset['train'])} examples) and validation ({len(dataset['test'])} examples) sets"
|
148 |
-
)
|
149 |
-
|
150 |
-
# Configure chat template
|
151 |
-
tokenizer = get_chat_template(
|
152 |
-
tokenizer,
|
153 |
-
chat_template="chatml", # Supports zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, unsloth
|
154 |
-
mapping={
|
155 |
-
"role": "from",
|
156 |
-
"content": "value",
|
157 |
-
"user": "human",
|
158 |
-
"assistant": "gpt",
|
159 |
-
}, # ShareGPT style
|
160 |
-
map_eos_token=True, # Maps <|im_end|> to </s> instead
|
161 |
-
)
|
162 |
-
logger.info("Chat template configured successfully")
|
163 |
-
|
164 |
-
def formatting_prompts_func(examples):
|
165 |
-
convos = examples["conversations"]
|
166 |
-
texts = [
|
167 |
-
tokenizer.apply_chat_template(
|
168 |
-
convo, tokenize=False, add_generation_prompt=False
|
169 |
-
)
|
170 |
-
for convo in convos
|
171 |
-
]
|
172 |
-
return {"text": texts}
|
173 |
-
|
174 |
-
# Apply formatting to both train and validation sets
|
175 |
-
dataset = DatasetDict(
|
176 |
-
{
|
177 |
-
"train": dataset["train"].map(formatting_prompts_func, batched=True),
|
178 |
-
"validation": dataset["test"].map(
|
179 |
-
formatting_prompts_func, batched=True
|
180 |
-
),
|
181 |
-
}
|
182 |
-
)
|
183 |
-
logger.info("Dataset formatting completed successfully")
|
184 |
-
|
185 |
-
return dataset, tokenizer
|
186 |
-
except Exception as e:
|
187 |
-
logger.error(f"Error loading/formatting dataset: {e}")
|
188 |
-
raise
|
189 |
-
|
190 |
-
|
191 |
-
def create_trainer(
|
192 |
-
model: FastLanguageModel,
|
193 |
-
tokenizer: AutoTokenizer,
|
194 |
-
dataset: Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset],
|
195 |
-
cfg: DictConfig,
|
196 |
-
) -> Trainer:
|
197 |
-
"""Create and configure the SFTTrainer."""
|
198 |
-
logger.info("Creating trainer...")
|
199 |
-
try:
|
200 |
-
# Create TrainingArguments from config
|
201 |
-
training_args_dict = OmegaConf.to_container(cfg.training.args, resolve=True)
|
202 |
-
# Add dynamic precision settings
|
203 |
-
training_args_dict.update(
|
204 |
-
{
|
205 |
-
"fp16": not is_bfloat16_supported(),
|
206 |
-
"bf16": is_bfloat16_supported(),
|
207 |
-
}
|
208 |
-
)
|
209 |
-
training_args = TrainingArguments(**training_args_dict)
|
210 |
-
|
211 |
-
# Create data collator from config
|
212 |
-
data_collator = DataCollatorForLanguageModeling(
|
213 |
-
tokenizer=tokenizer,
|
214 |
-
**cfg.training.sft.data_collator,
|
215 |
-
)
|
216 |
-
|
217 |
-
# Create SFT config without data_collator to avoid duplication
|
218 |
-
sft_config = OmegaConf.to_container(cfg.training.sft, resolve=True)
|
219 |
-
sft_config.pop("data_collator", None) # Remove data_collator from config
|
220 |
-
|
221 |
-
trainer = SFTTrainer(
|
222 |
-
model=model,
|
223 |
-
tokenizer=tokenizer,
|
224 |
-
train_dataset=dataset["train"],
|
225 |
-
eval_dataset=dataset["validation"],
|
226 |
-
args=training_args,
|
227 |
-
data_collator=data_collator,
|
228 |
-
**sft_config,
|
229 |
-
)
|
230 |
-
logger.info("Trainer created successfully")
|
231 |
-
return trainer
|
232 |
-
except Exception as e:
|
233 |
-
logger.error(f"Error creating trainer: {e}")
|
234 |
-
raise
|
235 |
-
|
236 |
-
|
237 |
-
@hydra.main(version_base=None, config_path="conf", config_name="config")
|
238 |
-
def main(cfg: DictConfig) -> None:
|
239 |
-
"""Main training function."""
|
240 |
-
try:
|
241 |
-
logger.info("Starting training process...")
|
242 |
-
logger.info(f"Configuration:\n{OmegaConf.to_yaml(cfg)}")
|
243 |
-
|
244 |
-
# Install dependencies
|
245 |
-
# install_dependencies()
|
246 |
-
|
247 |
-
# Train if requested
|
248 |
-
if cfg.train:
|
249 |
-
# Load model and tokenizer
|
250 |
-
model, tokenizer = load_model(cfg)
|
251 |
-
|
252 |
-
# Load and prepare dataset
|
253 |
-
dataset, tokenizer = load_and_format_dataset(tokenizer, cfg)
|
254 |
-
|
255 |
-
# Create trainer
|
256 |
-
trainer: Trainer = create_trainer(model, tokenizer, dataset, cfg)
|
257 |
-
|
258 |
-
logger.info("Starting training...")
|
259 |
-
trainer.train()
|
260 |
-
|
261 |
-
# Save model
|
262 |
-
logger.info(f"Saving final model to {cfg.output.dir}...")
|
263 |
-
trainer.save_model(cfg.output.dir)
|
264 |
-
|
265 |
-
# Save model in VLLM format
|
266 |
-
logger.info("Saving model in VLLM format...")
|
267 |
-
model.save_pretrained_merged(
|
268 |
-
cfg.output.dir, tokenizer, save_method="merged_16bit"
|
269 |
-
)
|
270 |
-
|
271 |
-
# Print final metrics
|
272 |
-
final_metrics = trainer.state.log_history[-1]
|
273 |
-
logger.info("\nTraining completed!")
|
274 |
-
logger.info(f"Final training loss: {final_metrics.get('loss', 'N/A')}")
|
275 |
-
logger.info(
|
276 |
-
f"Final validation loss: {final_metrics.get('eval_loss', 'N/A')}"
|
277 |
-
)
|
278 |
-
else:
|
279 |
-
logger.info("Training skipped as train=False")
|
280 |
-
|
281 |
-
# Test if requested
|
282 |
-
if cfg.test:
|
283 |
-
logger.info("\nStarting testing...")
|
284 |
-
try:
|
285 |
-
# Enable memory history tracking
|
286 |
-
torch.cuda.memory._record_memory_history()
|
287 |
-
|
288 |
-
# Set memory allocation configuration
|
289 |
-
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = (
|
290 |
-
"expandable_segments:True,max_split_size_mb:128"
|
291 |
-
)
|
292 |
-
|
293 |
-
# Load test dataset
|
294 |
-
test_dataset = load_dataset(
|
295 |
-
cfg.test_dataset.name,
|
296 |
-
cfg.test_dataset.config,
|
297 |
-
split=cfg.test_dataset.split,
|
298 |
-
trust_remote_code=True,
|
299 |
-
)
|
300 |
-
logger.info(f"Loaded test dataset with {len(test_dataset)} examples")
|
301 |
-
logger.info(f"Dataset features: {test_dataset.features}")
|
302 |
-
|
303 |
-
# Clear CUDA cache before loading model
|
304 |
-
torch.cuda.empty_cache()
|
305 |
-
|
306 |
-
# Initialize model
|
307 |
-
model: Model = LiteLLMModel(
|
308 |
-
api_base="http://localhost:8000/v1",
|
309 |
-
api_key="not-needed",
|
310 |
-
model_id=f"{cfg.model.provider}/{cfg.model.name}",
|
311 |
-
# model_id=cfg.model.name,
|
312 |
-
# model_id=cfg.output.dir,
|
313 |
-
)
|
314 |
-
|
315 |
-
# model: Model = TransformersModel(
|
316 |
-
# model_id=cfg.model.name,
|
317 |
-
# # model_id=cfg.output.dir,
|
318 |
-
# )
|
319 |
-
|
320 |
-
# model: Model = VLLMModel(
|
321 |
-
# model_id=cfg.model.name,
|
322 |
-
# # model_id=cfg.output.dir,
|
323 |
-
# )
|
324 |
-
|
325 |
-
# Create CodeAgent with SmartSearchTool
|
326 |
-
agent = CodeAgent(
|
327 |
-
model=model,
|
328 |
-
tools=[SmartSearchTool()],
|
329 |
-
verbosity_level=LogLevel.ERROR,
|
330 |
-
)
|
331 |
-
|
332 |
-
# Format task to get succinct answer
|
333 |
-
def format_task(question):
|
334 |
-
return f"""Please provide two answers to the following question:
|
335 |
-
|
336 |
-
1. A succinct answer that follows these rules:
|
337 |
-
- Contains ONLY the answer, nothing else
|
338 |
-
- Does not repeat the question
|
339 |
-
- Does not include explanations, reasoning, or context
|
340 |
-
- Does not include source attribution or references
|
341 |
-
- Does not use phrases like "The answer is" or "I found that"
|
342 |
-
- Does not include formatting, bullet points, or line breaks
|
343 |
-
- If the answer is a number, return only the number
|
344 |
-
- If the answer requires multiple items, separate them with commas
|
345 |
-
- If the answer requires ordering, maintain the specified order
|
346 |
-
- Uses the most direct and succinct form possible
|
347 |
-
|
348 |
-
2. A verbose answer that includes:
|
349 |
-
- The complete answer with all relevant details
|
350 |
-
- Explanations and reasoning
|
351 |
-
- Context and background information
|
352 |
-
- Source attribution where appropriate
|
353 |
-
|
354 |
-
Question: {question}
|
355 |
-
|
356 |
-
Please format your response as a JSON object with two keys:
|
357 |
-
- "succinct_answer": The concise answer following the rules above
|
358 |
-
- "verbose_answer": The detailed explanation with context"""
|
359 |
-
|
360 |
-
# Run inference on test samples
|
361 |
-
logger.info("Running inference on test samples...")
|
362 |
-
for i, example in enumerate(test_dataset):
|
363 |
-
try:
|
364 |
-
# Clear CUDA cache before each sample
|
365 |
-
torch.cuda.empty_cache()
|
366 |
-
|
367 |
-
# Format the task
|
368 |
-
task = format_task(example["Question"])
|
369 |
-
|
370 |
-
# Run the agent
|
371 |
-
result = agent.run(
|
372 |
-
task=task,
|
373 |
-
max_steps=3,
|
374 |
-
reset=True,
|
375 |
-
stream=False,
|
376 |
-
)
|
377 |
-
|
378 |
-
# Parse the result
|
379 |
-
import json
|
380 |
-
|
381 |
-
json_str = result[result.find("{") : result.rfind("}") + 1]
|
382 |
-
parsed_result = json.loads(json_str)
|
383 |
-
answer = parsed_result["succinct_answer"]
|
384 |
-
|
385 |
-
logger.info(f"\nTest Sample {i+1}:")
|
386 |
-
logger.info(f"Question: {example['Question']}")
|
387 |
-
logger.info(f"Model Response: {answer}")
|
388 |
-
logger.info("-" * 80)
|
389 |
-
|
390 |
-
# Log memory usage after each sample
|
391 |
-
logger.info(f"Memory usage after sample {i+1}:")
|
392 |
-
logger.info(
|
393 |
-
f"Allocated: {torch.cuda.memory_allocated() / 1024**2:.2f} MB"
|
394 |
-
)
|
395 |
-
logger.info(
|
396 |
-
f"Reserved: {torch.cuda.memory_reserved() / 1024**2:.2f} MB"
|
397 |
-
)
|
398 |
-
|
399 |
-
except Exception as e:
|
400 |
-
logger.error(f"Error processing test sample {i+1}: {str(e)}")
|
401 |
-
continue
|
402 |
-
|
403 |
-
# Dump memory snapshot for analysis
|
404 |
-
torch.cuda.memory._dump_snapshot("memory_snapshot.pickle")
|
405 |
-
logger.info("Memory snapshot saved to memory_snapshot.pickle")
|
406 |
-
|
407 |
-
except Exception as e:
|
408 |
-
logger.error(f"Error during testing: {e}")
|
409 |
-
raise
|
410 |
-
|
411 |
-
except Exception as e:
|
412 |
-
logger.error(f"Error in main training process: {e}")
|
413 |
-
raise
|
414 |
-
|
415 |
-
|
416 |
-
if __name__ == "__main__":
|
417 |
-
main()
|
418 |
-
|
419 |
-
# uv run python train.py
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|