Update app.py
Browse files
app.py
CHANGED
@@ -117,23 +117,19 @@ If the user is engaging in discussion, try to steer them towards getting in touc
|
|
117 |
system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}."
|
118 |
return system_prompt
|
119 |
|
120 |
-
# def chat(self, message, history):
|
121 |
-
# messages = [{"role": "system", "content": self.system_prompt()}] + history + [{"role": "user", "content": message}]
|
122 |
-
# done = False
|
123 |
-
# while not done:
|
124 |
-
# response = self.openai.chat.completions.create(model="meta-llama/llama-3.3-8b-instruct:free", messages=messages, tools=tools)
|
125 |
-
# if response.choices[0].finish_reason=="tool_calls":
|
126 |
-
# message = response.choices[0].message
|
127 |
-
# tool_calls = message.tool_calls
|
128 |
-
# results = self.handle_tool_call(tool_calls)
|
129 |
-
# messages.append(message)
|
130 |
-
# messages.extend(results)
|
131 |
-
# else:
|
132 |
-
# done = True
|
133 |
-
# return response.choices[0].message.content
|
134 |
def chat(self, message, history):
|
135 |
messages = [{"role": "system", "content": self.system_prompt()}] + history + [{"role": "user", "content": message}]
|
136 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
return response.choices[0].message.content
|
138 |
|
139 |
|
|
|
117 |
system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}."
|
118 |
return system_prompt
|
119 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
def chat(self, message, history):
|
121 |
messages = [{"role": "system", "content": self.system_prompt()}] + history + [{"role": "user", "content": message}]
|
122 |
+
done = False
|
123 |
+
while not done:
|
124 |
+
response = self.openai.chat.completions.create(model="meta-llama/llama-3.3-8b-instruct:free", messages=messages, tools=tools)
|
125 |
+
if response.choices[0].finish_reason=="tool_calls":
|
126 |
+
message = response.choices[0].message
|
127 |
+
tool_calls = message.tool_calls
|
128 |
+
results = self.handle_tool_call(tool_calls)
|
129 |
+
messages.append(message)
|
130 |
+
messages.extend(results)
|
131 |
+
else:
|
132 |
+
done = True
|
133 |
return response.choices[0].message.content
|
134 |
|
135 |
|