Spaces:
Sleeping
Sleeping
tranformation of history added
Browse files
app.py
CHANGED
@@ -10,8 +10,17 @@ model = AutoModelForCausalLM.from_pretrained("IlyaGusev/saiga_llama3_8b", torch_
|
|
10 |
model = model
|
11 |
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
def predict(message, history):
|
14 |
# print(history) [[вопрос1, ответ1], [вопрос2, ответ2]...]
|
|
|
15 |
history_transformer_format = history + [{"role": "user", "content": message},
|
16 |
{"role": "assistant", "content": ""}]
|
17 |
|
@@ -32,8 +41,8 @@ def predict(message, history):
|
|
32 |
|
33 |
partial_message = ""
|
34 |
for new_token in streamer:
|
35 |
-
print(new_token)
|
36 |
-
if new_token != '
|
37 |
partial_message += new_token
|
38 |
yield partial_message
|
39 |
|
|
|
10 |
model = model
|
11 |
|
12 |
|
13 |
+
def transform_history(history):
|
14 |
+
transformed_history = []
|
15 |
+
for qa_pair in history:
|
16 |
+
transformed_history.append({"role": "user", "content": qa_pair[0]})
|
17 |
+
transformed_history.append({"role": "assistant", "content": qa_pair[1]})
|
18 |
+
return transformed_history
|
19 |
+
|
20 |
+
|
21 |
def predict(message, history):
|
22 |
# print(history) [[вопрос1, ответ1], [вопрос2, ответ2]...]
|
23 |
+
history = transform_history(history)
|
24 |
history_transformer_format = history + [{"role": "user", "content": message},
|
25 |
{"role": "assistant", "content": ""}]
|
26 |
|
|
|
41 |
|
42 |
partial_message = ""
|
43 |
for new_token in streamer:
|
44 |
+
# print(new_token)
|
45 |
+
if new_token != 'assistant':
|
46 |
partial_message += new_token
|
47 |
yield partial_message
|
48 |
|