Added intermediate responses and response streaming via a queue. Improved dialog logging.
Browse files- client/interface.py +15 -22
- client/prompts.py +4 -14
- rss_client.py +43 -11
client/interface.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2 |
|
3 |
import json
|
4 |
import logging
|
|
|
5 |
from anthropic.types import text_block
|
6 |
from gradio.components.chatbot import ChatMessage
|
7 |
|
@@ -15,6 +16,7 @@ dialog = gradio_funcs.get_dialog_logger(clear = True)
|
|
15 |
|
16 |
async def agent_input(
|
17 |
bridge: AnthropicBridge,
|
|
|
18 |
chat_history: list
|
19 |
) -> list:
|
20 |
|
@@ -46,6 +48,7 @@ async def agent_input(
|
|
46 |
else:
|
47 |
intermediate_reply = f'I Will check the {website} RSS feed for you'
|
48 |
|
|
|
49 |
dialog.info('LLM: %s', intermediate_reply)
|
50 |
dialog.info('LLM: called %s on %s', tool_name, website)
|
51 |
|
@@ -63,6 +66,9 @@ async def agent_input(
|
|
63 |
'content': prompt
|
64 |
}]
|
65 |
|
|
|
|
|
|
|
66 |
logger.info('Re-prompting input %s', input_message)
|
67 |
result = await bridge.process_query(
|
68 |
prompts.GET_FEED_SYSTEM_PROMPT,
|
@@ -71,22 +77,12 @@ async def agent_input(
|
|
71 |
|
72 |
try:
|
73 |
|
74 |
-
|
75 |
|
76 |
except (IndexError, AttributeError):
|
77 |
-
|
78 |
-
|
79 |
-
logger.info('LLM final reply: %s', final_reply)
|
80 |
|
81 |
-
|
82 |
-
"role": "assistant",
|
83 |
-
"content": intermediate_reply
|
84 |
-
})
|
85 |
-
|
86 |
-
chat_history.append({
|
87 |
-
"role": "assistant",
|
88 |
-
"content": final_reply
|
89 |
-
})
|
90 |
|
91 |
else:
|
92 |
try:
|
@@ -97,12 +93,9 @@ async def agent_input(
|
|
97 |
|
98 |
logger.info('Direct, no-tool reply: %s', reply)
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
})
|
104 |
-
|
105 |
-
return chat_history
|
106 |
|
107 |
|
108 |
def format_chat_history(history) -> list[dict]:
|
@@ -114,9 +107,9 @@ def format_chat_history(history) -> list[dict]:
|
|
114 |
if isinstance(chat_message, ChatMessage):
|
115 |
role, content = chat_message.role, chat_message.content
|
116 |
else:
|
117 |
-
role, content = chat_message.get(
|
118 |
|
119 |
-
if role in [
|
120 |
-
messages.append({
|
121 |
|
122 |
return messages
|
|
|
2 |
|
3 |
import json
|
4 |
import logging
|
5 |
+
import queue
|
6 |
from anthropic.types import text_block
|
7 |
from gradio.components.chatbot import ChatMessage
|
8 |
|
|
|
16 |
|
17 |
async def agent_input(
|
18 |
bridge: AnthropicBridge,
|
19 |
+
output_queue: queue.Queue,
|
20 |
chat_history: list
|
21 |
) -> list:
|
22 |
|
|
|
48 |
else:
|
49 |
intermediate_reply = f'I Will check the {website} RSS feed for you'
|
50 |
|
51 |
+
output_queue.put(intermediate_reply)
|
52 |
dialog.info('LLM: %s', intermediate_reply)
|
53 |
dialog.info('LLM: called %s on %s', tool_name, website)
|
54 |
|
|
|
66 |
'content': prompt
|
67 |
}]
|
68 |
|
69 |
+
dialog.info('System: re-prompting LLM with return from %s call', tool_name)
|
70 |
+
dialog.info('New prompt: %s ...', prompt[:150])
|
71 |
+
|
72 |
logger.info('Re-prompting input %s', input_message)
|
73 |
result = await bridge.process_query(
|
74 |
prompts.GET_FEED_SYSTEM_PROMPT,
|
|
|
77 |
|
78 |
try:
|
79 |
|
80 |
+
reply = result['llm_response'].content[0].text
|
81 |
|
82 |
except (IndexError, AttributeError):
|
83 |
+
reply = 'No final reply from model'
|
|
|
|
|
84 |
|
85 |
+
logger.info('LLM final reply: %s', reply)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
else:
|
88 |
try:
|
|
|
93 |
|
94 |
logger.info('Direct, no-tool reply: %s', reply)
|
95 |
|
96 |
+
dialog.info('LLM: %s ...', reply[:100])
|
97 |
+
output_queue.put(reply)
|
98 |
+
output_queue.put('bot-finished')
|
|
|
|
|
|
|
99 |
|
100 |
|
101 |
def format_chat_history(history) -> list[dict]:
|
|
|
107 |
if isinstance(chat_message, ChatMessage):
|
108 |
role, content = chat_message.role, chat_message.content
|
109 |
else:
|
110 |
+
role, content = chat_message.get('role'), chat_message.get('content')
|
111 |
|
112 |
+
if role in ['user', 'assistant', 'system']:
|
113 |
+
messages.append({'role': role, 'content': content})
|
114 |
|
115 |
return messages
|
client/prompts.py
CHANGED
@@ -5,19 +5,10 @@ from string import Template
|
|
5 |
|
6 |
DEFAULT_SYSTEM_PROMPT = 'You are a helpful tool-using assistant.'
|
7 |
|
8 |
-
GET_FEED_SYSTEM_PROMPT = '''
|
9 |
-
You are a helpful assistant. Your job is to facilitate interactions between
|
10 |
-
Human users and LLM agents.
|
11 |
-
'''
|
12 |
|
13 |
GET_FEED_PROMPT = Template(
|
14 |
-
'''
|
15 |
-
Below is an exchange between a user and an agent. The user has asked
|
16 |
-
the agent to get new content from the $website RSS feed. In order to
|
17 |
-
complete the request, the agent has called a function which returned
|
18 |
-
the RSS feed content from $website in JSON format. Your job is to
|
19 |
-
complete the exchange by using the returned JSON RSS feed data to write
|
20 |
-
a human readable reply to the user.
|
21 |
|
22 |
user: $user_query
|
23 |
|
@@ -27,6 +18,5 @@ function call: get_feed_content($website)
|
|
27 |
|
28 |
function return: $articles
|
29 |
|
30 |
-
assistant:
|
31 |
-
|
32 |
-
)
|
|
|
5 |
|
6 |
DEFAULT_SYSTEM_PROMPT = 'You are a helpful tool-using assistant.'
|
7 |
|
8 |
+
GET_FEED_SYSTEM_PROMPT = '''You are a helpful assistant. Your job is to facilitate interactions between Human users and LLM agents.'''
|
|
|
|
|
|
|
9 |
|
10 |
GET_FEED_PROMPT = Template(
|
11 |
+
'''Below is an exchange between a user and an agent. The user has asked the agent to get new content from the $website RSS feed. In order to complete the request, the agent has called a function which returned the RSS feed content from $website in JSON format. Your job is to complete the exchange by using the returned JSON RSS feed data to write a human readable reply to the user.
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
user: $user_query
|
14 |
|
|
|
18 |
|
19 |
function return: $articles
|
20 |
|
21 |
+
assistant:'''
|
22 |
+
)
|
|
rss_client.py
CHANGED
@@ -1,7 +1,11 @@
|
|
1 |
'''RSS MCP server demonstration client app.'''
|
2 |
|
3 |
import os
|
|
|
4 |
import logging
|
|
|
|
|
|
|
5 |
from pathlib import Path
|
6 |
from logging.handlers import RotatingFileHandler
|
7 |
import gradio as gr
|
@@ -48,23 +52,51 @@ BRIDGE = AnthropicBridge(
|
|
48 |
|
49 |
logger.info('Started Anthropic API bridge')
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
-
async def send_message(message: str, chat_history: list) -> str:
|
53 |
-
'''Submits user message to agent.
|
54 |
-
|
55 |
Args:
|
56 |
message: the new message from the user as a string
|
57 |
chat_history: list containing conversation history where each element is
|
58 |
a dictionary with keys 'role' and 'content'
|
59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
Returns
|
61 |
New chat history with model's response to user added.
|
62 |
'''
|
63 |
|
64 |
-
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
-
|
68 |
|
69 |
|
70 |
with gr.Blocks(title='MCP RSS client') as demo:
|
@@ -82,8 +114,8 @@ with gr.Blocks(title='MCP RSS client') as demo:
|
|
82 |
)
|
83 |
|
84 |
# Dialog log output
|
85 |
-
dialog_output = gr.Textbox(label='Internal dialog', lines=10, max_lines=
|
86 |
-
timer = gr.Timer(
|
87 |
|
88 |
timer.tick( # pylint: disable=no-member
|
89 |
lambda: gradio_funcs.update_dialog(), # pylint: disable=unnecessary-lambda
|
@@ -106,9 +138,9 @@ with gr.Blocks(title='MCP RSS client') as demo:
|
|
106 |
)
|
107 |
|
108 |
msg.submit( # pylint: disable=no-member
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
)
|
113 |
|
114 |
|
|
|
1 |
'''RSS MCP server demonstration client app.'''
|
2 |
|
3 |
import os
|
4 |
+
import asyncio
|
5 |
import logging
|
6 |
+
import time
|
7 |
+
import queue
|
8 |
+
from typing import Tuple
|
9 |
from pathlib import Path
|
10 |
from logging.handlers import RotatingFileHandler
|
11 |
import gradio as gr
|
|
|
52 |
|
53 |
logger.info('Started Anthropic API bridge')
|
54 |
|
55 |
+
# Queue to return responses to user
|
56 |
+
OUTPUT_QUEUE = queue.Queue()
|
57 |
+
logger.info('Created response queue')
|
58 |
+
|
59 |
+
def user_message(message: str, history: list) -> Tuple[str, list]:
|
60 |
+
'''Adds user message to conversation and returns for immediate posting.
|
61 |
|
|
|
|
|
|
|
62 |
Args:
|
63 |
message: the new message from the user as a string
|
64 |
chat_history: list containing conversation history where each element is
|
65 |
a dictionary with keys 'role' and 'content'
|
66 |
|
67 |
+
Returns
|
68 |
+
New chat history with user's message added.
|
69 |
+
'''
|
70 |
+
|
71 |
+
return '', history + [{'role': 'user', 'content': message}]
|
72 |
+
|
73 |
+
|
74 |
+
def send_message(chat_history: list):
|
75 |
+
'''Submits chat history to agent, streams reply, one character at a time.
|
76 |
+
|
77 |
+
Args:
|
78 |
+
chat_history: list containing conversation history where each element is
|
79 |
+
a dictionary with keys 'role' and 'content'
|
80 |
+
|
81 |
Returns
|
82 |
New chat history with model's response to user added.
|
83 |
'''
|
84 |
|
85 |
+
asyncio.run(interface.agent_input(BRIDGE, OUTPUT_QUEUE, chat_history))
|
86 |
+
|
87 |
+
while True:
|
88 |
+
response = OUTPUT_QUEUE.get()
|
89 |
+
|
90 |
+
if response == 'bot-finished':
|
91 |
+
break
|
92 |
+
|
93 |
+
chat_history.append({'role': 'assistant', 'content': ''})
|
94 |
+
|
95 |
+
for character in response:
|
96 |
+
chat_history[-1]['content'] += character
|
97 |
+
time.sleep(0.005)
|
98 |
|
99 |
+
yield chat_history
|
100 |
|
101 |
|
102 |
with gr.Blocks(title='MCP RSS client') as demo:
|
|
|
114 |
)
|
115 |
|
116 |
# Dialog log output
|
117 |
+
dialog_output = gr.Textbox(label='Internal dialog', lines=10, max_lines=100)
|
118 |
+
timer = gr.Timer(0.5, active=True)
|
119 |
|
120 |
timer.tick( # pylint: disable=no-member
|
121 |
lambda: gradio_funcs.update_dialog(), # pylint: disable=unnecessary-lambda
|
|
|
138 |
)
|
139 |
|
140 |
msg.submit( # pylint: disable=no-member
|
141 |
+
user_message, [msg, chatbot], [msg, chatbot], queue=False
|
142 |
+
).then(
|
143 |
+
send_message, chatbot, chatbot
|
144 |
)
|
145 |
|
146 |
|