File size: 2,147 Bytes
0ae4701
 
 
390e1b0
0ae4701
f97da2b
 
0ae4701
7cc32e5
d366b45
7cc32e5
 
 
 
0ae4701
f97da2b
 
390e1b0
f97da2b
 
 
0ae4701
 
7cc32e5
9b866fc
7cc32e5
 
d366b45
 
 
 
 
 
 
7cc32e5
0ae4701
 
7cc32e5
 
 
 
0ae4701
f97da2b
d366b45
 
 
 
 
 
 
 
 
f97da2b
 
d366b45
 
f97da2b
 
 
 
 
0ae4701
d366b45
 
0ae4701
390e1b0
0ae4701
 
 
 
 
 
 
 
 
 
 
390e1b0
0ae4701
390e1b0
 
0ae4701
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
'''Functions for controlling chat flow between Gradio and Anthropic/MCP'''

import logging
import queue
from gradio.components.chatbot import ChatMessage

from client import prompts
from client.anthropic_bridge import AnthropicBridge
import client.gradio_functions as gradio_funcs
import client.tool_workflows as tool_funcs

# Create dialog logger
dialog = gradio_funcs.get_dialog_logger(clear = True)


async def agent_input(
        bridge: AnthropicBridge,
        output_queue: queue.Queue,
        chat_history: list
) -> list:

    '''Handles model interactions.'''

    logger = logging.getLogger(__name__ + '.agent_input')
    reply = 'No reply from LLM'

    user_query = chat_history[-1]['content']

    if len(chat_history) > 1:
        prior_reply = chat_history[-2]['content']

    else:
        prior_reply = ''

    dialog.info('User: %s', user_query)

    input_messages = format_chat_history(chat_history)
    result = await bridge.process_query(
        prompts.DEFAULT_SYSTEM_PROMPT,
        input_messages
    )

    if result['tool_result']:
        logger.info('LLM called tool, entering tool loop.')
        await tool_funcs.tool_loop(
            user_query,
            prior_reply,
            result,
            bridge,
            output_queue,
            dialog
        )

    else:
        logger.info('LLM replied directly.')

        try:
            reply = result['llm_response'].content[0].text

        except AttributeError:
            reply = 'Bad reply - could not parse'

        logger.info('Reply: %s', reply)
        output_queue.put(reply)

    output_queue.put('bot-finished')


def format_chat_history(history) -> list[dict]:
    '''Formats gradio chat history for submission to anthropic.'''

    messages = []

    for chat_message in history:
        if isinstance(chat_message, ChatMessage):
            role, content = chat_message.role, chat_message.content
        else:
            role, content = chat_message.get('role'), chat_message.get('content')

        if role in ['user', 'assistant', 'system']:
            messages.append({'role': role, 'content': content})

    return messages