File size: 1,462 Bytes
7fe0df9
1dacd0d
0ebd72e
3e5f6ae
ce17359
d660115
021acab
33ec9db
4a1ebe5
33ec9db
ecddc27
 
 
 
 
33ec9db
c657076
 
c17ad66
ecddc27
e855123
ebf038c
0b64435
 
c7a303b
c365981
ecddc27
33ec9db
4a1ebe5
3e5f6ae
c263170
564f282
7740cf7
7fe0df9
 
 
 
7740cf7
c263170
 
 
7fe0df9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
# app.py
import gradio as gr
import openai

# openai.api_key = "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"
client = openai.OpenAI()
def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p):
    
    #make history
    messages = [{"role": "system", "content": system_message}]
    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})
    messages.append({"role": "user", "content": message})
    print("Messages: ", messages)

    #input
    response = client.responses.create(
        model="gpt-4.1-nano", 
        input=messages,
        temperature=temperature,
        top_p=top_p,
        max_output_tokens=max_tokens
    )
    response = response.output_text
    
    yield response

demo = gr.ChatInterface(
    respond, #câu phản hồi
    additional_inputs=[
        gr.Textbox("Bạn là một chatbot tiếng Việt thân thiện.", label="System message"),
        gr.Slider(1, 2048, value=512, step=1, label="Max new tokens"),
        gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
    ],
)

if __name__ == "__main__":
    demo.launch()