File size: 7,432 Bytes
10e9b7d eccf8e4 3c4371f 2a43fe1 10e9b7d c0a6618 e80aab9 3db6293 e80aab9 c0a6618 2a43fe1 c0a6618 2a43fe1 c0a6618 2a43fe1 31243f4 2a43fe1 c0a6618 2a43fe1 c0a6618 2a43fe1 c0a6618 2a43fe1 c0a6618 4021bf3 2a43fe1 31243f4 c0a6618 31243f4 c0a6618 7e4a06b c0a6618 3c4371f 7e4a06b 3c4371f 7d65c66 3c4371f 7e4a06b 31243f4 e80aab9 c0a6618 31243f4 2a43fe1 c0a6618 31243f4 3c4371f 31243f4 2a43fe1 36ed51a 2a43fe1 3c4371f c0a6618 31243f4 eccf8e4 c0a6618 7d65c66 31243f4 7d65c66 c0a6618 e80aab9 c0a6618 7d65c66 31243f4 c0a6618 31243f4 c0a6618 7d65c66 e80aab9 c0a6618 31243f4 e80aab9 7d65c66 e80aab9 31243f4 e80aab9 3c4371f e80aab9 31243f4 7d65c66 31243f4 e80aab9 c0a6618 e80aab9 c0a6618 0ee0419 e514fd7 c0a6618 e514fd7 e80aab9 dec180d 31243f4 9088b99 7d65c66 31243f4 e80aab9 3c4371f c0a6618 2a43fe1 c0a6618 2a43fe1 c0a6618 2a43fe1 3c4371f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
import os
import gradio as gr
import requests
import pandas as pd
from groq import Groq
# --- New Imports for LangChain Agent ---
from langchain_groq import ChatGroq
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.prompts import ChatPromptTemplate
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# --- Agent Definition ---
# This new agent uses LangChain to orchestrate an LLM with tools.
class LangChainAgent:
def __init__(self, groq_api_key, tavily_api_key):
"""
Initializes the agent with an LLM and a set of tools.
"""
print("Initializing LangChainAgent...")
# 1. Initialize the LLM
# We use ChatGroq, the LangChain integration for Groq's API.
self.llm = ChatGroq(
model_name="llama3-70b-8192",
groq_api_key=groq_api_key,
temperature=0.0
)
# 2. Define the tools the agent can use
# For now, we'll just give it a web search tool.
self.tools = [
TavilySearchResults(max_results=3, tavily_api_key=tavily_api_key)
]
# 3. Create the Agent Prompt
# This tells the agent how to behave and how to use the tools.
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant. You have access to a web search tool. Respond with the final answer to the user's question."),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
# 4. Create the Agent itself
agent = create_tool_calling_agent(self.llm, self.tools, prompt)
# 5. Create the Agent Executor
# This is the runtime that will actually execute the agent's logic.
self.agent_executor = AgentExecutor(
agent=agent,
tools=self.tools,
verbose=True # Set to True to see the agent's thought process
)
print("LangChainAgent initialized.")
def __call__(self, question: str) -> str:
"""
This method is called to answer a question.
It invokes the agent executor.
"""
print(f"LangChainAgent received question (first 50 chars): {question[:50]}...")
# We need to handle the case where the agent makes a mistake
try:
response = self.agent_executor.invoke({"input": question})
answer = response.get("output", "No answer found.")
except Exception as e:
print(f"An error occurred in the agent executor: {e}")
answer = f"Agent failed with an error: {e}"
print(f"LangChainAgent generated answer: {answer}")
return answer
def run_and_submit_all(profile: gr.OAuthProfile | None):
"""
Fetches questions, runs the LangChainAgent on them, submits the answers,
and displays the results.
"""
# --- Authentication and Setup ---
space_id = os.getenv("SPACE_ID")
if profile:
username = f"{profile.username}"
print(f"User logged in: {username}")
else:
print("User not logged in.")
return "Please Login to Hugging Face with the button.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
# 1. Instantiate Agent (using the new LangChainAgent)
try:
groq_api_key = os.getenv("GROQ_API_KEY")
tavily_api_key = os.getenv("TAVILY_API_KEY")
if not groq_api_key or not tavily_api_key:
raise ValueError("API Keys (GROQ_API_KEY, TAVILY_API_KEY) not found in secrets.")
agent = LangChainAgent(groq_api_key=groq_api_key, tavily_api_key=tavily_api_key)
except Exception as e:
print(f"Error instantiating agent: {e}")
return f"Error initializing agent: {e}", None
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
print(f"Agent code link: {agent_code}")
# 2. Fetch Questions (same as before)
print(f"Fetching questions from: {questions_url}")
try:
response = requests.get(questions_url, timeout=20)
response.raise_for_status()
questions_data = response.json()
except Exception as e:
return f"Error fetching questions: {e}", None
# 3. Run your Agent (same as before)
results_log = []
answers_payload = []
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
continue
submitted_answer = agent(question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
# 4. Prepare Submission (same as before)
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
# 5. Submit (same as before)
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
results_df = pd.DataFrame(results_log)
return final_status, results_df
except Exception as e:
status_message = f"An unexpected error occurred during submission: {e}"
results_df = pd.DataFrame(results_log)
return status_message, results_df
# --- Build Gradio Interface (Mostly the same) ---
with gr.Blocks() as demo:
gr.Markdown("# LangChain Agent Evaluation Runner")
gr.Markdown(
"""
**Instructions:**
1. Make sure you have set `GROQ_API_KEY` and `TAVILY_API_KEY` in your Space's secrets.
2. Log in below. This is required for submission.
3. Click 'Run Evaluation' to start the agent. You can see its thought process in the application logs!
"""
)
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(
fn=run_and_submit_all,
outputs=[status_output, results_table]
)
if __name__ == "__main__":
print("\n" + "-"*30 + " App Starting " + "-"*30)
# Startup checks for secrets
if not os.getenv("GROQ_API_KEY"):
print("⚠️ WARNING: GROQ_API_KEY secret not set.")
else:
print("✅ GROQ_API_KEY secret is set.")
if not os.getenv("TAVILY_API_KEY"):
print("⚠️ WARNING: TAVILY_API_KEY secret not set.")
else:
print("✅ TAVILY_API_KEY secret is set.")
print("-"*(60 + len(" App Starting ")) + "\n")
demo.launch(debug=True, share=False) |