wishwakankanamg commited on
Commit
4e2b09a
·
verified ·
1 Parent(s): 6a7aabe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -225
app.py CHANGED
@@ -1,201 +1,3 @@
1
- # import os
2
- # import gradio as gr
3
- # import requests
4
- # import inspect
5
- # import pandas as pd
6
-
7
- # # (Keep Constants as is)
8
- # # --- Constants ---
9
- # DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
-
11
- # # --- Basic Agent Definition ---
12
- # # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
- # class BasicAgent:
14
- # def __init__(self):
15
- # print("BasicAgent initialized.")
16
- # def __call__(self, question: str) -> str:
17
- # print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- # fixed_answer = "This is a default answer."
19
- # print(f"Agent returning fixed answer: {fixed_answer}")
20
- # return fixed_answer
21
-
22
- # def run_and_submit_all( profile: gr.OAuthProfile | None):
23
- # """
24
- # Fetches all questions, runs the BasicAgent on them, submits all answers,
25
- # and displays the results.
26
- # """
27
- # # --- Determine HF Space Runtime URL and Repo URL ---
28
- # space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
29
-
30
- # if profile:
31
- # username= f"{profile.username}"
32
- # print(f"User logged in: {username}")
33
- # else:
34
- # print("User not logged in.")
35
- # return "Please Login to Hugging Face with the button.", None
36
-
37
- # api_url = DEFAULT_API_URL
38
- # questions_url = f"{api_url}/questions"
39
- # submit_url = f"{api_url}/submit"
40
-
41
- # # 1. Instantiate Agent ( modify this part to create your agent)
42
- # try:
43
- # agent = BasicAgent()
44
- # except Exception as e:
45
- # print(f"Error instantiating agent: {e}")
46
- # return f"Error initializing agent: {e}", None
47
- # # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
48
- # agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
49
- # print(agent_code)
50
-
51
- # # 2. Fetch Questions
52
- # print(f"Fetching questions from: {questions_url}")
53
- # try:
54
- # response = requests.get(questions_url, timeout=15)
55
- # response.raise_for_status()
56
- # questions_data = response.json()
57
- # if not questions_data:
58
- # print("Fetched questions list is empty.")
59
- # return "Fetched questions list is empty or invalid format.", None
60
- # print(f"Fetched {len(questions_data)} questions.")
61
- # except requests.exceptions.RequestException as e:
62
- # print(f"Error fetching questions: {e}")
63
- # return f"Error fetching questions: {e}", None
64
- # except requests.exceptions.JSONDecodeError as e:
65
- # print(f"Error decoding JSON response from questions endpoint: {e}")
66
- # print(f"Response text: {response.text[:500]}")
67
- # return f"Error decoding server response for questions: {e}", None
68
- # except Exception as e:
69
- # print(f"An unexpected error occurred fetching questions: {e}")
70
- # return f"An unexpected error occurred fetching questions: {e}", None
71
-
72
- # # 3. Run your Agent
73
- # results_log = []
74
- # answers_payload = []
75
- # print(f"Running agent on {len(questions_data)} questions...")
76
- # for item in questions_data:
77
- # task_id = item.get("task_id")
78
- # question_text = item.get("question")
79
- # if not task_id or question_text is None:
80
- # print(f"Skipping item with missing task_id or question: {item}")
81
- # continue
82
- # try:
83
- # submitted_answer = agent(question_text)
84
- # answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
- # results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
86
- # except Exception as e:
87
- # print(f"Error running agent on task {task_id}: {e}")
88
- # results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
89
-
90
- # if not answers_payload:
91
- # print("Agent did not produce any answers to submit.")
92
- # return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
-
94
- # # 4. Prepare Submission
95
- # submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
96
- # status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
97
- # print(status_update)
98
-
99
- # # 5. Submit
100
- # print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
101
- # try:
102
- # response = requests.post(submit_url, json=submission_data, timeout=60)
103
- # response.raise_for_status()
104
- # result_data = response.json()
105
- # final_status = (
106
- # f"Submission Successful!\n"
107
- # f"User: {result_data.get('username')}\n"
108
- # f"Overall Score: {result_data.get('score', 'N/A')}% "
109
- # f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
110
- # f"Message: {result_data.get('message', 'No message received.')}"
111
- # )
112
- # print("Submission successful.")
113
- # results_df = pd.DataFrame(results_log)
114
- # return final_status, results_df
115
- # except requests.exceptions.HTTPError as e:
116
- # error_detail = f"Server responded with status {e.response.status_code}."
117
- # try:
118
- # error_json = e.response.json()
119
- # error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
120
- # except requests.exceptions.JSONDecodeError:
121
- # error_detail += f" Response: {e.response.text[:500]}"
122
- # status_message = f"Submission Failed: {error_detail}"
123
- # print(status_message)
124
- # results_df = pd.DataFrame(results_log)
125
- # return status_message, results_df
126
- # except requests.exceptions.Timeout:
127
- # status_message = "Submission Failed: The request timed out."
128
- # print(status_message)
129
- # results_df = pd.DataFrame(results_log)
130
- # return status_message, results_df
131
- # except requests.exceptions.RequestException as e:
132
- # status_message = f"Submission Failed: Network error - {e}"
133
- # print(status_message)
134
- # results_df = pd.DataFrame(results_log)
135
- # return status_message, results_df
136
- # except Exception as e:
137
- # status_message = f"An unexpected error occurred during submission: {e}"
138
- # print(status_message)
139
- # results_df = pd.DataFrame(results_log)
140
- # return status_message, results_df
141
-
142
-
143
- # # --- Build Gradio Interface using Blocks ---
144
- # with gr.Blocks() as demo:
145
- # gr.Markdown("# Basic Agent Evaluation Runner")
146
- # gr.Markdown(
147
- # """
148
- # **Instructions:**
149
-
150
- # 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
151
- # 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
152
- # 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
153
-
154
- # ---
155
- # **Disclaimers:**
156
- # Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
157
- # This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
158
- # """
159
- # )
160
-
161
- # gr.LoginButton()
162
-
163
- # run_button = gr.Button("Run Evaluation & Submit All Answers")
164
-
165
- # status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
166
- # # Removed max_rows=10 from DataFrame constructor
167
- # results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
168
-
169
- # run_button.click(
170
- # fn=run_and_submit_all,
171
- # outputs=[status_output, results_table]
172
- # )
173
-
174
- # if __name__ == "__main__":
175
- # print("\n" + "-"*30 + " App Starting " + "-"*30)
176
- # # Check for SPACE_HOST and SPACE_ID at startup for information
177
- # space_host_startup = os.getenv("SPACE_HOST")
178
- # space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
179
-
180
- # if space_host_startup:
181
- # print(f"✅ SPACE_HOST found: {space_host_startup}")
182
- # print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
183
- # else:
184
- # print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
185
-
186
- # if space_id_startup: # Print repo URLs if SPACE_ID is found
187
- # print(f"✅ SPACE_ID found: {space_id_startup}")
188
- # print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
189
- # print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
190
- # else:
191
- # print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
192
-
193
- # print("-"*(60 + len(" App Starting ")) + "\n")
194
-
195
- # print("Launching Gradio Interface for Basic Agent Evaluation...")
196
- # demo.launch(debug=True, share=False)
197
-
198
-
199
  import os
200
  import gradio as gr
201
  import requests
@@ -210,7 +12,9 @@ from smolagents import (
210
  OpenAIServerModel,
211
  DuckDuckGoSearchTool,
212
  FinalAnswerTool,
213
- PythonInterpreterTool
 
 
214
  )
215
  from huggingface_hub import login, InferenceClient
216
 
@@ -219,17 +23,6 @@ from huggingface_hub import login, InferenceClient
219
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
220
 
221
  # --- Basic Agent Definition ---
222
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
223
- from smolagents import CodeAgent, HfApiModel
224
- import os
225
- from smolagents import HfApiModel
226
-
227
- class CustomModel(Model):
228
- def generate(messages, stop_sequences=["Task"]):
229
- response = client.chat_completion(messages, stop=stop_sequences, max_tokens=1024)
230
- answer = response.choices[0].message
231
- return answer
232
-
233
 
234
 
235
  # --- Agent Definition ---
@@ -238,25 +31,24 @@ class BasicAgent:
238
  print("Initializing BasicAgent with tools...")
239
 
240
  # Load OpenAI token from environment
241
- openai_token = os.getenv("OPENAI_API_KEY")
242
  if not openai_token:
243
- raise ValueError("Missing OpenAI API token!")
244
 
245
  # Initialize model and tools
246
 
247
- model_id = "meta-llama/Llama-3.2-1B-Instruct"
248
-
249
-
250
- client = InferenceClient(model=model_id)
251
-
252
- custom_model = CustomModel(client)
253
-
254
-
255
- model = OpenAIServerModel(
256
- #api_base="openai",
257
- api_key=openai_token,
258
- model_id="gpt-4.1"
259
  )
 
 
 
 
 
 
260
  search_tool = DuckDuckGoSearchTool()
261
  final_answer_tool = FinalAnswerTool()
262
  reverse_tool = ReverseTextTool()
@@ -271,7 +63,7 @@ class BasicAgent:
271
 
272
  # Build the agent
273
  self.agent = CodeAgent(
274
- model=custom_model,
275
  prompt_templates=prompt_templates,
276
  tools=[search_tool, reverse_tool, table_tool, veg_tool, python_tool, exfood_tool], #final_answer_tool
277
  add_base_tools=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import gradio as gr
3
  import requests
 
12
  OpenAIServerModel,
13
  DuckDuckGoSearchTool,
14
  FinalAnswerTool,
15
+ PythonInterpreterTool,
16
+ InferenceClientModel,
17
+ HfApiModel
18
  )
19
  from huggingface_hub import login, InferenceClient
20
 
 
23
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
24
 
25
  # --- Basic Agent Definition ---
 
 
 
 
 
 
 
 
 
 
 
26
 
27
 
28
  # --- Agent Definition ---
 
31
  print("Initializing BasicAgent with tools...")
32
 
33
  # Load OpenAI token from environment
34
+ openai_token = os.getenv("HF_TOKEN")
35
  if not openai_token:
36
+ raise ValueError("Missing API token!")
37
 
38
  # Initialize model and tools
39
 
40
+ model = InferenceClientModel(
41
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
42
+ provider="together",
43
+ token=openai_token,
44
+ max_tokens=5000,
 
 
 
 
 
 
 
45
  )
46
+
47
+ # model = OpenAIServerModel(
48
+ # #api_base="openai",
49
+ # api_key=openai_token,
50
+ # model_id="gpt-4.1"
51
+ # )
52
  search_tool = DuckDuckGoSearchTool()
53
  final_answer_tool = FinalAnswerTool()
54
  reverse_tool = ReverseTextTool()
 
63
 
64
  # Build the agent
65
  self.agent = CodeAgent(
66
+ model=model,
67
  prompt_templates=prompt_templates,
68
  tools=[search_tool, reverse_tool, table_tool, veg_tool, python_tool, exfood_tool], #final_answer_tool
69
  add_base_tools=True,