File size: 3,530 Bytes
0ae4701 390e1b0 f97da2b 0ae4701 f97da2b 0ae4701 7cc32e5 0ae4701 f97da2b 390e1b0 f97da2b 0ae4701 7cc32e5 9b866fc 7cc32e5 0ae4701 7cc32e5 0ae4701 9b866fc f97da2b 0ae4701 9b866fc 0ae4701 f97da2b 390e1b0 7cc32e5 f97da2b 390e1b0 9a98462 390e1b0 7cc32e5 f97da2b 390e1b0 f97da2b 390e1b0 f97da2b 390e1b0 f97da2b 0ae4701 7cc32e5 0ae4701 9b866fc 390e1b0 0ae4701 390e1b0 0ae4701 390e1b0 0ae4701 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
'''Functions for controlling chat flow between Gradio and Anthropic/MCP'''
import json
import logging
import queue
from anthropic.types import text_block
from gradio.components.chatbot import ChatMessage
from client import prompts
from client.anthropic_bridge import AnthropicBridge
import client.gradio_functions as gradio_funcs
# Create dialog logger
dialog = gradio_funcs.get_dialog_logger(clear = True)
async def agent_input(
bridge: AnthropicBridge,
output_queue: queue.Queue,
chat_history: list
) -> list:
'''Handles model interactions.'''
logger = logging.getLogger(__name__ + '.agent_input')
reply = 'No reply from LLM'
user_query = chat_history[-1]['content']
dialog.info('User: %s', user_query)
input_messages = format_chat_history(chat_history)
result = await bridge.process_query(
prompts.DEFAULT_SYSTEM_PROMPT,
input_messages
)
logger.debug(result)
if result['tool_result']:
tool_call = result['tool_call']
tool_name = tool_call['name']
if tool_name == 'get_feed':
tool_parameters = tool_call['parameters']
website = tool_parameters['website']
response_content = result['llm_response'].content[0]
if isinstance(response_content, text_block.TextBlock):
intermediate_reply = response_content.text
else:
intermediate_reply = f'I Will check the {website} RSS feed for you'
output_queue.put(intermediate_reply)
dialog.info('LLM: %s', intermediate_reply)
dialog.info('LLM: called %s on %s', tool_name, website)
articles = json.loads(result['tool_result'].content)['text']
prompt = prompts.GET_FEED_PROMPT.substitute(
website=website,
user_query=user_query,
intermediate_reply=intermediate_reply,
articles=articles
)
input_message =[{
'role': 'user',
'content': prompt
}]
dialog.info('System: re-prompting LLM with return from %s call', tool_name)
dialog.info('New prompt: %s ...', prompt[:75])
logger.info('Re-prompting input %s', input_message)
result = await bridge.process_query(
prompts.GET_FEED_SYSTEM_PROMPT,
input_message
)
try:
reply = result['llm_response'].content[0].text
except (IndexError, AttributeError):
reply = 'No final reply from model'
logger.info('LLM final reply: %s', reply)
else:
try:
reply = result['llm_response'].content[0].text
except AttributeError:
reply = 'Bad reply - could not parse'
logger.info('Direct, no-tool reply: %s', reply)
dialog.info('LLM: %s ...', reply[:75])
output_queue.put(reply)
output_queue.put('bot-finished')
def format_chat_history(history) -> list[dict]:
'''Formats gradio chat history for submission to anthropic.'''
messages = []
for chat_message in history:
if isinstance(chat_message, ChatMessage):
role, content = chat_message.role, chat_message.content
else:
role, content = chat_message.get('role'), chat_message.get('content')
if role in ['user', 'assistant', 'system']:
messages.append({'role': role, 'content': content})
return messages
|