gperdrizet commited on
Commit
e3f6e50
·
verified ·
1 Parent(s): 1c35e11

Added reprompting loop workflow for new tools.

Browse files
Files changed (1) hide show
  1. client/tool_workflows.py +215 -0
client/tool_workflows.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''Functions to handle re-prompting and final reply generation
2
+ downstream of LLM tool calls.'''
3
+
4
+ import json
5
+ import logging
6
+ import queue
7
+ from anthropic.types import text_block
8
+ from client import prompts
9
+ from client.anthropic_bridge import AnthropicBridge
10
+
11
+ INTERMEDIATE_REPLY_HINTS = {
12
+ 'context_search': 'Let me find some additional context before I generate a final answer.',
13
+ 'find_article': 'I will find the title of that article.',
14
+ 'get_summary': 'I will summarize that article',
15
+ 'get_link': 'I will get the link to that article'
16
+ }
17
+
18
+ async def tool_loop(
19
+ user_query: str,
20
+ prior_reply: str,
21
+ result: list,
22
+ bridge: AnthropicBridge,
23
+ output_queue: queue.Queue,
24
+ dialog: logging.Logger
25
+ ) -> None:
26
+
27
+ '''Re-prompts the LLM in a loop until it generates a final reply based on tool output.
28
+
29
+ Args:
30
+ user_query: the original user input that provoked the tool call
31
+ result: the complete model reply containing the tool call
32
+ bridge: AnthropicBridge class instance
33
+ output_queue: queue to send results back to Gradio UI
34
+ dialog: logger instance to record intermediate responses and internal dialog
35
+ '''
36
+
37
+ tool_call = result['tool_call']
38
+ tool_name = tool_call['name']
39
+
40
+ if tool_name == 'get_feed':
41
+ reply = await get_feed_call(
42
+ user_query,
43
+ result,
44
+ bridge,
45
+ output_queue,
46
+ dialog
47
+ )
48
+
49
+ output_queue.put(reply)
50
+
51
+ else:
52
+ tool_call = result['tool_call']
53
+ tool_name = tool_call['name']
54
+ tool_parameters = tool_call['parameters']
55
+ response_content = result['llm_response'].content[0]
56
+
57
+ if isinstance(response_content, text_block.TextBlock):
58
+ intermediate_reply = response_content.text
59
+ else:
60
+ intermediate_reply = INTERMEDIATE_REPLY_HINTS[tool_name]
61
+
62
+ dialog.info('LLM intermediate reply: %s', intermediate_reply)
63
+ dialog.info('MCP: called %s', tool_name)
64
+
65
+ tool_result = json.loads(result['tool_result'].content)['text']
66
+
67
+ prompt = prompts.OTHER_TOOL_PROMPT.substitute(
68
+ user_query=user_query,
69
+ prior_reply=prior_reply,
70
+ intermediate_reply=intermediate_reply,
71
+ tool_name=tool_name,
72
+ tool_parameters=tool_parameters,
73
+ tool_result=tool_result
74
+ )
75
+
76
+ dialog.info('System: re-prompting LLM with return from %s call', tool_name)
77
+
78
+ while True:
79
+
80
+ reply = await other_call(
81
+ prompt,
82
+ bridge,
83
+ dialog
84
+ )
85
+
86
+ if 'final reply' in reply:
87
+ final_reply = reply['final reply']
88
+ dialog.info('LLM final reply: %s ...', final_reply[:50])
89
+ output_queue.put(final_reply)
90
+ break
91
+
92
+ else:
93
+ prompt = reply['new_prompt']
94
+
95
+
96
+ async def get_feed_call(
97
+ user_query: str,
98
+ result: list,
99
+ bridge: AnthropicBridge,
100
+ output_queue: queue.Queue,
101
+ dialog: logging.Logger
102
+ ) -> str:
103
+
104
+ '''Re-prompts LLM after a call to get_feed().
105
+
106
+ Args:
107
+ user_query: the original user input that provoked the tool call
108
+ result: the complete model reply containing the tool call
109
+ bridge: AnthropicBridge class instance
110
+ output_queue: queue to send results back to Gradio UI
111
+ dialog: logger instance to record intermediate responses and internal dialog
112
+ '''
113
+
114
+ tool_call = result['tool_call']
115
+ tool_name = tool_call['name']
116
+ tool_parameters = tool_call['parameters']
117
+ website = tool_parameters['website']
118
+ response_content = result['llm_response'].content[0]
119
+
120
+ if isinstance(response_content, text_block.TextBlock):
121
+ intermediate_reply = response_content.text
122
+ else:
123
+ intermediate_reply = f'I Will check the {website} RSS feed for you'
124
+
125
+ dialog.info('LLM intermediate reply: %s', intermediate_reply)
126
+ dialog.info('MCP: called %s on %s', tool_name, website)
127
+
128
+ articles = json.loads(result['tool_result'].content)['text']
129
+
130
+ prompt = prompts.GET_FEED_PROMPT.substitute(
131
+ website=website,
132
+ user_query=user_query,
133
+ intermediate_reply=intermediate_reply,
134
+ articles=articles
135
+ )
136
+
137
+ input_message =[{
138
+ 'role': 'user',
139
+ 'content': prompt
140
+ }]
141
+
142
+ dialog.info('System: re-prompting LLM with return from %s call', tool_name)
143
+
144
+ result = await bridge.process_query(
145
+ prompts.REPROMPTING_SYSTEM_PROMPT,
146
+ input_message
147
+ )
148
+
149
+ try:
150
+
151
+ reply = result['llm_response'].content[0].text
152
+
153
+ except (IndexError, AttributeError):
154
+ reply = 'No final reply from model'
155
+
156
+ dialog.info('LLM final reply: %s ...', reply[:50])
157
+
158
+ output_queue.put(reply)
159
+
160
+
161
+ async def other_call(
162
+ prompt: list[dict],
163
+ bridge: AnthropicBridge,
164
+ dialog: logging.Logger
165
+ ) -> dict:
166
+
167
+ '''Re-prompts LLM after a call to get_feed().
168
+
169
+ Args:
170
+ prompt: prompt to to send the LLM
171
+ result: the complete model reply containing the tool call
172
+ bridge: AnthropicBridge class instance
173
+ output_queue: queue to send results back to Gradio UI
174
+ dialog: logger instance to record intermediate responses and internal dialog
175
+ '''
176
+
177
+ input_message =[{
178
+ 'role': 'user',
179
+ 'content': prompt
180
+ }]
181
+
182
+ result = await bridge.process_query(
183
+ prompts.REPROMPTING_SYSTEM_PROMPT,
184
+ input_message
185
+ )
186
+
187
+ if result['tool_result']:
188
+
189
+ tool_call = result['tool_call']
190
+ tool_name = tool_call['name']
191
+ tool_parameters = tool_call['parameters']
192
+ response_content = result['llm_response'].content[0]
193
+
194
+ if isinstance(response_content, text_block.TextBlock):
195
+ intermediate_reply = response_content.text
196
+ else:
197
+ intermediate_reply = INTERMEDIATE_REPLY_HINTS[tool_name]
198
+
199
+ dialog.info('LLM intermediate reply: %s', intermediate_reply)
200
+ dialog.info('MCP: called %s', tool_name)
201
+
202
+ tool_result = json.loads(result['tool_result'].content)['text']
203
+
204
+ prompt += f'agent: {intermediate_reply}\n'
205
+ prompt += f'function call: {tool_name}("{tool_parameters}")'
206
+ prompt += f'function return: {tool_result}'
207
+
208
+ dialog.info('System: re-prompting LLM with return from %s call', tool_name)
209
+
210
+ return {'new_prompt': prompt}
211
+
212
+ else:
213
+
214
+ reply = result['llm_response'].content[0].text
215
+ return {'final reply': reply}