Spaces:
Sleeping
Sleeping
Commit
·
7740cf7
1
Parent(s):
81050d1
test xíu
Browse files
app.py
CHANGED
@@ -1,40 +1,41 @@
|
|
1 |
-
# app_phogpt4b_chat.py
|
2 |
import gradio as gr
|
3 |
-
# Load model directly
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
|
|
|
6 |
tokenizer = AutoTokenizer.from_pretrained("vinai/PhoGPT-4B-Chat", trust_remote_code=True)
|
7 |
model = AutoModelForCausalLM.from_pretrained("vinai/PhoGPT-4B-Chat", trust_remote_code=True)
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
# Gom lịch sử chat + tin nhắn mới vào prompt
|
12 |
-
prompt = ""
|
13 |
for user_msg, bot_msg in history:
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
16 |
|
17 |
-
|
18 |
-
outputs =
|
19 |
-
|
20 |
-
max_new_tokens=
|
21 |
-
temperature=
|
22 |
-
top_p=
|
23 |
-
do_sample=True
|
|
|
24 |
)
|
25 |
-
|
|
|
|
|
26 |
|
27 |
-
# Tách phần Bot trả lời
|
28 |
-
answer = generated.replace(prompt, "").strip()
|
29 |
|
30 |
-
# Cập nhật lịch sử và trả về
|
31 |
-
history.append((message, answer))
|
32 |
-
return history
|
33 |
-
|
34 |
-
# 3️⃣ Giao diện Gradio
|
35 |
demo = gr.ChatInterface(
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
38 |
)
|
39 |
|
40 |
if __name__ == "__main__":
|
|
|
|
|
1 |
import gradio as gr
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
+
# Load PhoGPT-4B-Chat model and tokenizer
|
5 |
tokenizer = AutoTokenizer.from_pretrained("vinai/PhoGPT-4B-Chat", trust_remote_code=True)
|
6 |
model = AutoModelForCausalLM.from_pretrained("vinai/PhoGPT-4B-Chat", trust_remote_code=True)
|
7 |
|
8 |
+
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
9 |
+
messages = f"{system_message}\n"
|
|
|
|
|
10 |
for user_msg, bot_msg in history:
|
11 |
+
if user_msg:
|
12 |
+
messages += f"User: {user_msg}\n"
|
13 |
+
if bot_msg:
|
14 |
+
messages += f"Bot: {bot_msg}\n"
|
15 |
+
messages += f"User: {message}\nBot:"
|
16 |
|
17 |
+
inputs = tokenizer(messages, return_tensors="pt")
|
18 |
+
outputs = model.generate(
|
19 |
+
**inputs,
|
20 |
+
max_new_tokens=max_tokens,
|
21 |
+
temperature=temperature,
|
22 |
+
top_p=top_p,
|
23 |
+
do_sample=True,
|
24 |
+
pad_token_id=tokenizer.eos_token_id
|
25 |
)
|
26 |
+
full_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
27 |
+
response = full_output.replace(messages, "").strip()
|
28 |
+
yield response
|
29 |
|
|
|
|
|
30 |
|
|
|
|
|
|
|
|
|
|
|
31 |
demo = gr.ChatInterface(
|
32 |
+
respond,
|
33 |
+
additional_inputs=[
|
34 |
+
gr.Textbox(value="Bạn là một chatbot người Việt thân thiện.", label="System message"),
|
35 |
+
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
36 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
37 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
|
38 |
+
],
|
39 |
)
|
40 |
|
41 |
if __name__ == "__main__":
|