arre99 commited on
Commit
42f3a9e
·
1 Parent(s): 070bf41

changed the model to Qwen3-1.7b and made it include its previous history

Browse files
Files changed (2) hide show
  1. mcp_client.py +10 -36
  2. todo.txt +1 -2
mcp_client.py CHANGED
@@ -7,42 +7,17 @@ SYSTEM_PROMPT = """You are a helpful Formula 1 assistant and strategist. You hav
7
  Be concise and accurate in your responses. If you don't know something, use the available tools to find the information.
8
  In addition, you will be asked to act as a live race engineer strategist during a Formula 1 race, making crucial calls during the event."""
9
 
10
- def format_messages(history: List[List[str]], message: str) -> List[Dict[str, str]]:
11
- """Format the conversation history and new message for the agent."""
12
- messages = [{"role": "system", "content": SYSTEM_PROMPT}]
13
-
14
- # Add conversation history
15
- for user_msg, bot_msg in history:
16
- messages.append({"role": "user", "content": user_msg})
17
- messages.append({"role": "assistant", "content": bot_msg})
18
-
19
- # Add the new message
20
- messages.append({"role": "user", "content": message})
21
- return messages
22
 
23
- def agent_chat(message: str, history: List[List[str]]) -> str:
24
- """Handle chat messages with conversation history."""
25
 
26
- # Format messages with system prompt and history
27
- formatted_messages = format_messages(history, message)
28
-
29
- # Convert messages to a single string with role indicators
30
- chat_history = "\n".join(
31
- f"{msg['role'].capitalize()}: {msg['content']}"
32
- for msg in formatted_messages[1:] # Skip system prompt in the chat history
33
- )
34
-
35
- # Include system prompt at the beginning
36
- full_prompt = f"{SYSTEM_PROMPT}\n\n{chat_history}"
37
-
38
- # Get agent response
39
- response = str(agent.run(full_prompt))
40
- return response
41
 
42
 
43
  if __name__ == "__main__":
44
 
45
- only_list_tools = False # Set to True to only list tools (used for debugging)
46
  local_model = True # If you have Ollama installed, set this to True
47
 
48
  try:
@@ -51,10 +26,9 @@ if __name__ == "__main__":
51
  {"url": "https://agents-mcp-hackathon-f1-mcp-server.hf.space/gradio_api/mcp/sse", "transport": "sse"})
52
  tools = mcp_client.get_tools()
53
 
54
- print("### MCP tools ### ")
55
- print("\n".join(f"Tool {1+i}: {t.name}: {t.description}" for i,t in enumerate(tools)))
56
-
57
- if only_list_tools:
58
  mcp_client.disconnect()
59
  exit(0)
60
 
@@ -62,7 +36,7 @@ if __name__ == "__main__":
62
  # Define model
63
  if local_model:
64
  model = LiteLLMModel(
65
- model_id="ollama_chat/qwen3:8b",
66
  api_base="http://127.0.0.1:11434", # Default ollama server
67
  num_ctx=32768,
68
  )
@@ -73,7 +47,7 @@ if __name__ == "__main__":
73
  api_key=os.getenv("NEBIUS_API_KEY")
74
  )
75
 
76
- agent = ToolCallingAgent(tools=[*tools], model=model)
77
 
78
 
79
  chat_interface = gr.ChatInterface(
 
7
  Be concise and accurate in your responses. If you don't know something, use the available tools to find the information.
8
  In addition, you will be asked to act as a live race engineer strategist during a Formula 1 race, making crucial calls during the event."""
9
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ def agent_chat(message: str, history: list):
 
12
 
13
+ # Manually compose messages: system prompt, then history, then current user message
14
+ message = f"{SYSTEM_PROMPT}\n{"\n".join([f"{x['role']}: {x['content']}" for x in history])}\nTask: {message}"
15
+ return agent.run(message, max_steps=5)
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
 
18
  if __name__ == "__main__":
19
 
20
+ list_tools = False # Set to True to only list tools (used for debugging)
21
  local_model = True # If you have Ollama installed, set this to True
22
 
23
  try:
 
26
  {"url": "https://agents-mcp-hackathon-f1-mcp-server.hf.space/gradio_api/mcp/sse", "transport": "sse"})
27
  tools = mcp_client.get_tools()
28
 
29
+ if list_tools:
30
+ print("### MCP tools ### ")
31
+ print("\n".join(f"Tool {1+i}: {t.name}: {t.description}" for i,t in enumerate(tools)))
 
32
  mcp_client.disconnect()
33
  exit(0)
34
 
 
36
  # Define model
37
  if local_model:
38
  model = LiteLLMModel(
39
+ model_id="ollama_chat/qwen3:1.7b",
40
  api_base="http://127.0.0.1:11434", # Default ollama server
41
  num_ctx=32768,
42
  )
 
47
  api_key=os.getenv("NEBIUS_API_KEY")
48
  )
49
 
50
+ agent = ToolCallingAgent(model=model, tools=[*tools])
51
 
52
 
53
  chat_interface = gr.ChatInterface(
todo.txt CHANGED
@@ -2,5 +2,4 @@
2
  * Solution is to have a static json file that maps drivers for each season/year
3
  - Same applies for constructor championship standings but instead Constructor dropdown
4
  * Similar solution to above
5
- - Add driver comparison
6
- - simplify the mcp_client.py to single call.
 
2
  * Solution is to have a static json file that maps drivers for each season/year
3
  - Same applies for constructor championship standings but instead Constructor dropdown
4
  * Similar solution to above
5
+ - Add driver comparison