|
import os |
|
import gradio as gr |
|
from typing import List, Dict |
|
from smolagents import InferenceClientModel, LiteLLMModel, ToolCallingAgent, MCPClient |
|
|
|
SYSTEM_PROMPT = """You are a helpful Formula 1 assistant and strategist. You have access to various F1 data and tools to help answer questions about races, drivers, teams, and more. |
|
Be concise and accurate in your responses. If you don't know something, use the available tools to find the information. |
|
In addition, you will be asked to act as a live race engineer strategist during a Formula 1 race, making crucial calls during the event.""" |
|
|
|
def format_messages(history: List[List[str]], message: str) -> List[Dict[str, str]]: |
|
"""Format the conversation history and new message for the agent.""" |
|
messages = [{"role": "system", "content": SYSTEM_PROMPT}] |
|
|
|
|
|
for user_msg, bot_msg in history: |
|
messages.append({"role": "user", "content": user_msg}) |
|
messages.append({"role": "assistant", "content": bot_msg}) |
|
|
|
|
|
messages.append({"role": "user", "content": message}) |
|
return messages |
|
|
|
def agent_chat(message: str, history: List[List[str]]) -> str: |
|
"""Handle chat messages with conversation history.""" |
|
|
|
|
|
formatted_messages = format_messages(history, message) |
|
|
|
|
|
chat_history = "\n".join( |
|
f"{msg['role'].capitalize()}: {msg['content']}" |
|
for msg in formatted_messages[1:] |
|
) |
|
|
|
|
|
full_prompt = f"{SYSTEM_PROMPT}\n\n{chat_history}" |
|
|
|
|
|
response = str(agent.run(full_prompt)) |
|
return response |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
only_list_tools = False |
|
local_model = True |
|
|
|
try: |
|
|
|
mcp_client = MCPClient( |
|
{"url": "https://agents-mcp-hackathon-f1-mcp-server.hf.space/gradio_api/mcp/sse", "transport": "sse"}) |
|
tools = mcp_client.get_tools() |
|
|
|
print("### MCP tools ### ") |
|
print("\n".join(f"Tool {1+i}: {t.name}: {t.description}" for i,t in enumerate(tools))) |
|
|
|
if only_list_tools: |
|
mcp_client.disconnect() |
|
exit(0) |
|
|
|
|
|
|
|
if local_model: |
|
model = LiteLLMModel( |
|
model_id="ollama_chat/qwen3:8b", |
|
api_base="http://127.0.0.1:11434", |
|
num_ctx=32768, |
|
) |
|
else: |
|
model = InferenceClientModel( |
|
model_id="deepseek-ai/DeepSeek-R1", |
|
provider="nebius", |
|
api_key=os.getenv("NEBIUS_API_KEY") |
|
) |
|
|
|
agent = ToolCallingAgent(tools=[*tools], model=model) |
|
|
|
|
|
chat_interface = gr.ChatInterface( |
|
fn=agent_chat, |
|
type="messages", |
|
examples=[ |
|
"What are the driver standings for the 2024 Formula 1 season?", |
|
"What is the calendar for the 2024 Formula 1 season?" |
|
], |
|
title="๐๏ธ Formula 1 Assistant", |
|
description="This is a simple agent that uses MCP tools to answer questions about Formula 1." |
|
) |
|
|
|
chat_interface.launch() |
|
|
|
finally: |
|
mcp_client.disconnect() |