File size: 3,031 Bytes
febfdb0
de3a23c
febfdb0
de3a23c
35aeee0
fa395d9
de3a23c
 
35aeee0
 
 
fa395d9
 
 
35aeee0
 
42f3a9e
35aeee0
42f3a9e
 
 
febfdb0
 
 
cf55632
de3a23c
35aeee0
de3a23c
febfdb0
 
cf55632
febfdb0
d4991e4
febfdb0
 
de3a23c
 
 
 
 
42f3a9e
 
 
35aeee0
cf55632
 
35aeee0
febfdb0
35aeee0
 
42f3a9e
35aeee0
 
 
 
 
d69ce5a
 
 
35aeee0
febfdb0
42f3a9e
febfdb0
 
cf55632
35aeee0
cf55632
35aeee0
 
 
 
 
 
cf55632
 
 
febfdb0
 
35aeee0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import os
import datetime
import gradio as gr
import openf1_tools
from smolagents import InferenceClientModel, LiteLLMModel, ToolCallingAgent, MCPClient

# Can manully set this to a specific time to make the agent think it is in the past
time = datetime.datetime.now().astimezone().isoformat() 

SYSTEM_PROMPT = """You are a helpful Formula 1 assistant and strategist. You have access to various F1 data and tools to help answer questions about races, drivers, teams, and more. 
Be concise and accurate in your responses. If you don't know something, use the available tools to find the information.
In addition, you will be asked to act as a live race engineer strategist during a Formula 1 race, making crucial calls during the event.

Current time (ISO 8601): {time}"""


def agent_chat(message: str, history: list):

    # Manually compose messages: system prompt, then history, then current user message
    message = f"{SYSTEM_PROMPT}\n{"\n".join([f"{x['role']}: {x['content']}" for x in history])}\nTask: {message}"
    return agent.run(message, max_steps=5)


if __name__ == "__main__":

    list_tools = True # Set to True to only list tools (used for debugging)
    local_model = True # If you have Ollama installed, set this to True
    openf1_tool_only = True
    
    try:

        mcp_client = MCPClient(
            {"url": "https://agents-mcp-hackathon-f1-mcp-server.hf.space/gradio_api/mcp/sse", "transport": "sse"})
        tools = mcp_client.get_tools()

        if openf1_tool_only:
            openf1_fn_names = [f"f1_mcp_server_{fn}" for fn in dir(openf1_tools) if callable(getattr(openf1_tools, fn))]
            openf1_fn_names.remove("f1_mcp_server_urlopen")
            tools = [t for t in tools if (t.name in openf1_fn_names)]

        if list_tools:
            print("### MCP tools ### ")
            print("\n".join(f"Tool {1+i}: {t.name}: {t.description}" for i,t in enumerate(tools)))
            mcp_client.disconnect()
            exit(0)


        # Define model
        if local_model:
            model = LiteLLMModel(
                model_id="ollama_chat/qwen3:1.7b",
                api_base="http://127.0.0.1:11434", # Default ollama server
                num_ctx=32768,
            )
        else:
            model = InferenceClientModel(
                model_id="deepseek-ai/DeepSeek-R1",
                provider="nebius",
                api_key=os.getenv("NEBIUS_API_KEY")
            )

        agent = ToolCallingAgent(model=model, tools=[*tools])


        chat_interface = gr.ChatInterface(
            fn=agent_chat,
            type="messages",
            examples=[
                "What are the driver standings for the 2024 Formula 1 season?",
                "What is the calendar for the 2024 Formula 1 season?"
            ],
            title="🏎️ Formula 1 Assistant",
            description="This is a simple agent that uses MCP tools to answer questions about Formula 1."
        )

        chat_interface.launch()

    finally:
        mcp_client.disconnect()