f1-mcp-server / mcp_client.py
arre99's picture
deleted unused files and folders. cleaned up front-end UI a bit. Tidied up the notebooks for both APIs. Added mcp.json config info in the README
d69ce5a
raw
history blame
3.46 kB
import os
import gradio as gr
from typing import List, Dict
from smolagents import InferenceClientModel, LiteLLMModel, ToolCallingAgent, MCPClient
SYSTEM_PROMPT = """You are a helpful Formula 1 assistant and strategist. You have access to various F1 data and tools to help answer questions about races, drivers, teams, and more.
Be concise and accurate in your responses. If you don't know something, use the available tools to find the information.
In addition, you will be asked to act as a live race engineer strategist during a Formula 1 race, making crucial calls during the event."""
def format_messages(history: List[List[str]], message: str) -> List[Dict[str, str]]:
"""Format the conversation history and new message for the agent."""
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
# Add conversation history
for user_msg, bot_msg in history:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": bot_msg})
# Add the new message
messages.append({"role": "user", "content": message})
return messages
def agent_chat(message: str, history: List[List[str]]) -> str:
"""Handle chat messages with conversation history."""
# Format messages with system prompt and history
formatted_messages = format_messages(history, message)
# Convert messages to a single string with role indicators
chat_history = "\n".join(
f"{msg['role'].capitalize()}: {msg['content']}"
for msg in formatted_messages[1:] # Skip system prompt in the chat history
)
# Include system prompt at the beginning
full_prompt = f"{SYSTEM_PROMPT}\n\n{chat_history}"
# Get agent response
response = str(agent.run(full_prompt))
return response
if __name__ == "__main__":
only_list_tools = False # Set to True to only list tools (used for debugging)
local_model = True # If you have Ollama installed, set this to True
try:
mcp_client = MCPClient(
{"url": "https://agents-mcp-hackathon-f1-mcp-server.hf.space/gradio_api/mcp/sse", "transport": "sse"})
tools = mcp_client.get_tools()
print("### MCP tools ### ")
print("\n".join(f"Tool {1+i}: {t.name}: {t.description}" for i,t in enumerate(tools)))
if only_list_tools:
mcp_client.disconnect()
exit(0)
# Define model
if local_model:
model = LiteLLMModel(
model_id="ollama_chat/qwen3:8b",
api_base="http://127.0.0.1:11434", # Default ollama server
num_ctx=32768,
)
else:
model = InferenceClientModel(
model_id="deepseek-ai/DeepSeek-R1",
provider="nebius",
api_key=os.getenv("NEBIUS_API_KEY")
)
agent = ToolCallingAgent(tools=[*tools], model=model)
chat_interface = gr.ChatInterface(
fn=agent_chat,
type="messages",
examples=[
"What are the driver standings for the 2024 Formula 1 season?",
"What is the calendar for the 2024 Formula 1 season?"
],
title="๐ŸŽ๏ธ Formula 1 Assistant",
description="This is a simple agent that uses MCP tools to answer questions about Formula 1."
)
chat_interface.launch()
finally:
mcp_client.disconnect()