magnustragardh commited on
Commit
ceba91c
·
1 Parent(s): 81917a3

Basic Llamaindex implementation.

Browse files
Files changed (3) hide show
  1. app.py +68 -22
  2. requirements.txt +6 -1
  3. system_prompt.txt +41 -0
app.py CHANGED
@@ -1,25 +1,75 @@
1
  import os
 
2
  import gradio as gr
3
  import requests
4
- import inspect
5
- import pandas as pd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
 
10
 
11
  # --- Basic Agent Definition ---
12
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
  class BasicAgent:
14
  def __init__(self):
 
 
 
 
 
 
 
 
 
15
  print("BasicAgent initialized.")
16
- def __call__(self, question: str) -> str:
 
17
  print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
24
  Fetches all questions, runs the BasicAgent on them, submits all answers,
25
  and displays the results.
@@ -35,10 +85,9 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
35
  return "Please Login to Hugging Face with the button.", None
36
 
37
  api_url = DEFAULT_API_URL
38
- questions_url = f"{api_url}/questions"
39
  submit_url = f"{api_url}/submit"
40
 
41
- # 1. Instantiate Agent ( modify this part to create your agent)
42
  try:
43
  agent = BasicAgent()
44
  except Exception as e:
@@ -49,22 +98,19 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
49
  print(agent_code)
50
 
51
  # 2. Fetch Questions
52
- print(f"Fetching questions from: {questions_url}")
53
  try:
54
- response = requests.get(questions_url, timeout=15)
55
- response.raise_for_status()
56
- questions_data = response.json()
57
  if not questions_data:
58
- print("Fetched questions list is empty.")
59
- return "Fetched questions list is empty or invalid format.", None
60
  print(f"Fetched {len(questions_data)} questions.")
 
 
 
 
61
  except requests.exceptions.RequestException as e:
62
  print(f"Error fetching questions: {e}")
63
  return f"Error fetching questions: {e}", None
64
- except requests.exceptions.JSONDecodeError as e:
65
- print(f"Error decoding JSON response from questions endpoint: {e}")
66
- print(f"Response text: {response.text[:500]}")
67
- return f"Error decoding server response for questions: {e}", None
68
  except Exception as e:
69
  print(f"An unexpected error occurred fetching questions: {e}")
70
  return f"An unexpected error occurred fetching questions: {e}", None
@@ -80,7 +126,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
80
  print(f"Skipping item with missing task_id or question: {item}")
81
  continue
82
  try:
83
- submitted_answer = agent(question_text)
84
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
86
  except Exception as e:
 
1
  import os
2
+ from pathlib import Path
3
  import gradio as gr
4
  import requests
5
+ #from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
6
+ from llama_index.llms.gemini import Gemini
7
+ from llama_index.core.agent.workflow import ReActAgent
8
+ from llama_index.core.tools import FunctionTool
9
+ from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
10
+ from llama_index.tools.wikipedia import WikipediaToolSpec
11
+ from dotenv import load_dotenv
12
+
13
+ try:
14
+ import mlflow
15
+ mlflow.set_experiment("final_handson")
16
+ mlflow.llama_index.autolog()
17
+ except ImportError:
18
+ pass
19
+
20
+
21
+ load_dotenv()
22
+ # See https://discord.com/channels/879548962464493619/1360558713274109982:
23
+ #For anyone who has run out of free tier, you can switch to a Gemini model in Google Colab:
24
+ #go to https://aistudio.google.com/apikey and create a new API key
25
+ #insert it into Google Colab using 'Secrets' tab on the left pane
26
+ #make sure to grant access to the notebook!
27
+ #in the notebook, use:
28
+ #from google.colab import userdata
29
+ #model = LiteLLMModel(model_id="gemini/gemini-2.0-flash-lite", api_key=userdata.get('GEMINI_API_KEY'))
30
+ #agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=model)
31
 
 
32
  # --- Constants ---
33
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
34
+ SYSTEM_PROMPT = (Path(__file__).parent / 'system_prompt.txt').read_text()
35
+ GOOGLE_API_KEY = os.environ['GOOGLE_API_KEY']
36
 
37
  # --- Basic Agent Definition ---
 
38
  class BasicAgent:
39
  def __init__(self):
40
+ search_tool = FunctionTool.from_defaults(DuckDuckGoSearchToolSpec().duckduckgo_full_search)
41
+ wikipedia_load_tool = FunctionTool.from_defaults(WikipediaToolSpec().load_data)
42
+ wikipedia_search_tool = FunctionTool.from_defaults(WikipediaToolSpec().search_data)
43
+ self._tools = [search_tool, wikipedia_load_tool, wikipedia_search_tool]
44
+
45
+ self._llm = Gemini(api_key=GOOGLE_API_KEY, model="models/gemini-2.0-flash-lite")
46
+ self._agent = ReActAgent(tools=self._tools, llm=self._llm)
47
+ # Modify the react prompt.
48
+ self._agent.update_prompts({"react_header": SYSTEM_PROMPT})
49
  print("BasicAgent initialized.")
50
+
51
+ async def __call__(self, question: str) -> str:
52
  print(f"Agent received question (first 50 chars): {question[:50]}...")
53
+ agent_output = await self._agent.run(user_msg=question)
54
+ print(f"Agent returning answer: {agent_output}")
55
+ response_parts = str(agent_output).split('FINAL ANSWER: ')
56
+ if len(response_parts) > 1:
57
+ response = response_parts[-1]
58
+ else:
59
+ response = str(agent_output)
60
+ return response.strip()
61
+
62
+
63
+ def fetch_questions(api_url: str = DEFAULT_API_URL):
64
+ questions_url = f"{api_url}/questions"
65
+ print(f"Fetching questions from: {questions_url}")
66
+ response = requests.get(questions_url, timeout=15)
67
+ response.raise_for_status()
68
+ questions_data = response.json()
69
+ return questions_data
70
+
71
 
72
+ async def run_and_submit_all( profile: gr.OAuthProfile | None):
73
  """
74
  Fetches all questions, runs the BasicAgent on them, submits all answers,
75
  and displays the results.
 
85
  return "Please Login to Hugging Face with the button.", None
86
 
87
  api_url = DEFAULT_API_URL
 
88
  submit_url = f"{api_url}/submit"
89
 
90
+ # 1. Instantiate Agent
91
  try:
92
  agent = BasicAgent()
93
  except Exception as e:
 
98
  print(agent_code)
99
 
100
  # 2. Fetch Questions
 
101
  try:
102
+ questions_data = fetch_questions()
 
 
103
  if not questions_data:
104
+ print("Fetched questions list is empty.")
105
+ return "Fetched questions list is empty or invalid format.", None
106
  print(f"Fetched {len(questions_data)} questions.")
107
+ except requests.exceptions.JSONDecodeError as e:
108
+ print(f"Error decoding JSON response from questions endpoint: {e}")
109
+ # print(f"Response text: {response.text[:500]}")
110
+ return f"Error decoding server response for questions: {e}", None
111
  except requests.exceptions.RequestException as e:
112
  print(f"Error fetching questions: {e}")
113
  return f"Error fetching questions: {e}", None
 
 
 
 
114
  except Exception as e:
115
  print(f"An unexpected error occurred fetching questions: {e}")
116
  return f"An unexpected error occurred fetching questions: {e}", None
 
126
  print(f"Skipping item with missing task_id or question: {item}")
127
  continue
128
  try:
129
+ submitted_answer = await agent(question_text)
130
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
131
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
132
  except Exception as e:
requirements.txt CHANGED
@@ -1,2 +1,7 @@
 
1
  gradio
2
- requests
 
 
 
 
 
1
+ dotenv
2
  gradio
3
+ requests
4
+ llama-index
5
+ llama-index-llms-gemini
6
+ llama-index-tools-duckduckgo
7
+ llama-index-tools-wikipedia
system_prompt.txt ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are a general AI assistant. I will ask you a question.
2
+
3
+ ## Tools
4
+
5
+ You have access to a wide variety of tools. You are responsible for using the tools in any sequence you deem appropriate to complete the task at hand.
6
+ This may require breaking the task into subtasks and using different tools to complete each subtask.
7
+
8
+ You have access to the following tools:
9
+ {tool_desc}
10
+
11
+
12
+ ## Output Format
13
+
14
+ Please answer in the same language as the question and use the following format:
15
+
16
+ ```
17
+ Thought: The current language of the user is: (user's language). I need to use a tool to help me answer the question.
18
+ Action: tool name (one of {tool_names}) if using a tool.
19
+ Action Input: the input to the tool, in a JSON format representing the kwargs (e.g. {{"input": "hello world", "num_beams": 5}})
20
+ ```
21
+
22
+ Please ALWAYS start with a Thought.
23
+
24
+ NEVER surround your response with markdown code markers. You may use code markers within your response if you need to.
25
+
26
+ Please use a valid JSON format for the Action Input. Do NOT do this {{'input': 'hello world', 'num_beams': 5}}.
27
+
28
+ If this format is used, the tool will respond in the following format:
29
+
30
+ ```
31
+ Observation: tool response
32
+ ```
33
+
34
+ You should keep repeating the above format till you have enough information to answer the question without using any more tools. At that point, you MUST respond with the following template:
35
+
36
+ FINAL ANSWER: [YOUR FINAL ANSWER].
37
+
38
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string. don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
39
+ ## Current Conversation
40
+
41
+ Below is the current conversation consisting of interleaving human and assistant messages.