File size: 1,149 Bytes
1915306
 
 
e13b239
1915306
8b5f3bd
9558488
8b5f3bd
4a65c44
8b5f3bd
1915306
8b5f3bd
e8b0ec8
 
 
 
 
1915306
 
 
 
 
 
 
 
 
 
 
 
 
 
8b5f3bd
1915306
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import gradio as gr
from huggingface_hub import InferenceClient

client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

def respond(message, history: list[tuple[str, str]]):
    system_message = "Answer only in Bisaya language. If user enters english reject it in bisaya language. Don't prompt any english words unless there's no direct translation."
    max_tokens = 4096
    temperature = 0.6
    top_p = 0.95

    messages = [{"role": "system", "content": system_message}]
    for user_text, assistant_text in history:
        if user_text:
            messages.append({"role": "user", "content": user_text})
        if assistant_text:
            messages.append({"role": "assistant", "content": assistant_text})
    messages.append({"role": "user", "content": message})

    response = ""
    for message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        token = message.choices[0].delta.content
        response += token
        yield response

demo = gr.ChatInterface(respond)

if __name__ == "__main__":
    demo.launch()