File size: 3,462 Bytes
febfdb0
 
35aeee0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
febfdb0
 
 
cf55632
35aeee0
 
febfdb0
 
cf55632
febfdb0
d4991e4
febfdb0
 
d4991e4
d69ce5a
febfdb0
cf55632
35aeee0
cf55632
 
35aeee0
febfdb0
35aeee0
 
 
 
 
 
 
 
d69ce5a
 
 
35aeee0
febfdb0
35aeee0
febfdb0
 
cf55632
35aeee0
cf55632
35aeee0
 
 
 
 
 
cf55632
 
 
febfdb0
 
35aeee0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import os
import gradio as gr
from typing import List, Dict
from smolagents import InferenceClientModel, LiteLLMModel, ToolCallingAgent, MCPClient

SYSTEM_PROMPT = """You are a helpful Formula 1 assistant and strategist. You have access to various F1 data and tools to help answer questions about races, drivers, teams, and more. 
Be concise and accurate in your responses. If you don't know something, use the available tools to find the information.
In addition, you will be asked to act as a live race engineer strategist during a Formula 1 race, making crucial calls during the event."""

def format_messages(history: List[List[str]], message: str) -> List[Dict[str, str]]:
    """Format the conversation history and new message for the agent."""
    messages = [{"role": "system", "content": SYSTEM_PROMPT}]
    
    # Add conversation history
    for user_msg, bot_msg in history:
        messages.append({"role": "user", "content": user_msg})
        messages.append({"role": "assistant", "content": bot_msg})
    
    # Add the new message
    messages.append({"role": "user", "content": message})
    return messages

def agent_chat(message: str, history: List[List[str]]) -> str:
    """Handle chat messages with conversation history."""

    # Format messages with system prompt and history
    formatted_messages = format_messages(history, message)
    
    # Convert messages to a single string with role indicators
    chat_history = "\n".join(
        f"{msg['role'].capitalize()}: {msg['content']}" 
        for msg in formatted_messages[1:]  # Skip system prompt in the chat history
    )
    
    # Include system prompt at the beginning
    full_prompt = f"{SYSTEM_PROMPT}\n\n{chat_history}"
    
    # Get agent response
    response = str(agent.run(full_prompt))
    return response


if __name__ == "__main__":

    only_list_tools = False # Set to True to only list tools (used for debugging)
    local_model = True # If you have Ollama installed, set this to True
    
    try:

        mcp_client = MCPClient(
            {"url": "https://agents-mcp-hackathon-f1-mcp-server.hf.space/gradio_api/mcp/sse", "transport": "sse"})
        tools = mcp_client.get_tools()

        print("### MCP tools ### ")
        print("\n".join(f"Tool {1+i}: {t.name}: {t.description}" for i,t in enumerate(tools)))

        if only_list_tools:
            mcp_client.disconnect()
            exit(0)


        # Define model
        if local_model:
            model = LiteLLMModel(
                model_id="ollama_chat/qwen3:8b",
                api_base="http://127.0.0.1:11434", # Default ollama server
                num_ctx=32768,
            )
        else:
            model = InferenceClientModel(
                model_id="deepseek-ai/DeepSeek-R1",
                provider="nebius",
                api_key=os.getenv("NEBIUS_API_KEY")
            )

        agent = ToolCallingAgent(tools=[*tools], model=model)


        chat_interface = gr.ChatInterface(
            fn=agent_chat,
            type="messages",
            examples=[
                "What are the driver standings for the 2024 Formula 1 season?",
                "What is the calendar for the 2024 Formula 1 season?"
            ],
            title="🏎️ Formula 1 Assistant",
            description="This is a simple agent that uses MCP tools to answer questions about Formula 1."
        )

        chat_interface.launch()

    finally:
        mcp_client.disconnect()