File size: 4,279 Bytes
febfdb0
de3a23c
febfdb0
de3a23c
35aeee0
7a165bc
 
 
fa395d9
de3a23c
7a165bc
f18c3a4
35aeee0
7a165bc
fa395d9
428a745
7a165bc
 
428a745
fa395d9
7a165bc
35aeee0
f18c3a4
42f3a9e
428a745
f18c3a4
febfdb0
 
 
cf55632
428a745
 
de3a23c
7a165bc
febfdb0
 
cf55632
428a745
febfdb0
d4991e4
febfdb0
 
428a745
de3a23c
 
 
 
 
42f3a9e
 
 
35aeee0
cf55632
 
febfdb0
35aeee0
 
42f3a9e
35aeee0
 
 
 
7a165bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35aeee0
7a165bc
 
 
 
35aeee0
febfdb0
42f3a9e
7a165bc
febfdb0
428a745
cf55632
35aeee0
cf55632
35aeee0
428a745
 
35aeee0
 
 
cf55632
 
 
febfdb0
 
35aeee0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import os
import datetime
import gradio as gr
import openf1_tools
from smolagents import InferenceClientModel, LiteLLMModel, ToolCallingAgent, MCPClient
from dotenv import load_dotenv

load_dotenv()

# Can manully set this to a specific time to make the agent think it is in the past
time = datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0).isoformat()
spanish_gp_race_plus1h = "2025-06-01T13:45:00Z" # Race start +45 minutes

SYSTEM_PROMPT = f"""You are a helpful Formula 1 assistant and strategist. You have access to various F1 data and tools to help answer questions about races, drivers, teams, and more. Be concise and accurate in your responses. You must use the available tools to find the information.
In addition, you will be asked to act as a live race engineer strategist during a Formula 1 race, making crucial calls during the event.
For formula 1 related tasks, start by calling get_api_endpoints() to see all available endpoints and use them to access the OpenF1 API.
Then retrieve information about a specific endpoint, using get_endpoint_info(endpoint), to make sure it does what you want it to do.
If you are unsure what a filter does, get its description using get_filter_info(filter_name).
Lastly, combine the endpoint and filters to create a request to the OpenF1 API and call send_request() to send the request.

Current UTC time (ISO 8601): {spanish_gp_race_plus1h}"""


def agent_chat(message: str, history: list):
    message = f"{SYSTEM_PROMPT}\n\nTask: {message}"
    return agent.run(message, max_steps=80)


if __name__ == "__main__":

    list_tools = False # Set to True to only list tools (used for debugging)
    local_model = False # If you have Ollama installed, set this to True
    openf1_tool_only = True
    provider = "nebius" # "nebius" (mistral) or "sambanova" (deepseek)
    
    try:

        # Connect to my MCP server hosted on HF spaces
        mcp_client = MCPClient(
            {"url": "https://agents-mcp-hackathon-f1-mcp-server.hf.space/gradio_api/mcp/sse", "transport": "sse"})
        tools = mcp_client.get_tools()

        # Filter tools to only use the OpenF1 library
        if openf1_tool_only:
            openf1_fn_names = [f"f1_mcp_server_{fn}" for fn in dir(openf1_tools) if callable(getattr(openf1_tools, fn))]
            openf1_fn_names.remove("f1_mcp_server_urlopen")
            tools = [t for t in tools if (t.name in openf1_fn_names)]

        if list_tools:
            print("### MCP tools ### ")
            print("\n".join(f"Tool {1+i}: {t.name}: {t.description}" for i,t in enumerate(tools)))
            mcp_client.disconnect()
            exit(0)

        # Define model
        if local_model:
            model = LiteLLMModel(
                model_id="ollama_chat/qwen3:1.7b",
                api_base="http://127.0.0.1:11434", # Default ollama server
                num_ctx=32768,
            )
        else:

            # Get model ID 
            model_id_env_mapping = {
                "nebius": "deepseek-ai/DeepSeek-R1-0528",
                "sambanova": "deepseek-ai/DeepSeek-R1-0528"
            }
            model_id = model_id_env_mapping[provider]

            # Get API key from environment variable
            provider_env_mapping = {
                "nebius": "NEBIUS_API_KEY",
                "sambanova": "SAMBANOVA_API_KEY"
            }
            api_key = os.getenv(provider_env_mapping[provider])

            model = InferenceClientModel(
                model_id=model_id,
                provider=provider,
                api_key=api_key,
                temperature=0
            )

        agent = ToolCallingAgent(model=model, tools=[*tools])
        # invoked through agent.run("This is the task i want you to do.")

        # Launch chat interface
        chat_interface = gr.ChatInterface(
            fn=agent_chat,
            type="messages",
            examples=[
                "What is the calendar for the 2024 Formula 1 season?",
                "Who won the Monaco 2024 GP"
            ],
            title="🏎️ Formula 1 Assistant",
            description="This is a simple agent that uses MCP tools to answer questions about Formula 1."
        )

        chat_interface.launch()

    finally:
        mcp_client.disconnect()