|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
|
|
client = InferenceClient("unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit") |
|
|
|
def respond( |
|
message, |
|
history: list[tuple[str, str]], |
|
system_message, |
|
max_tokens, |
|
temperature, |
|
top_p, |
|
): |
|
messages = [{"role": "system", "content": system_message}] |
|
|
|
for user_text, assistant_text in history: |
|
if user_text: |
|
messages.append({"role": "user", "content": user_text}) |
|
if assistant_text: |
|
messages.append({"role": "assistant", "content": assistant_text}) |
|
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
response = "" |
|
for message in client.chat_completion( |
|
messages, |
|
max_tokens=max_tokens, |
|
stream=True, |
|
temperature=temperature, |
|
top_p=top_p, |
|
): |
|
token = message.choices[0].delta.content |
|
response += token |
|
yield response |
|
|
|
|
|
demo = gr.ChatInterface( |
|
respond, |
|
additional_inputs=[ |
|
gr.Textbox( |
|
value="You are a friendly Chatbot. Respond only in bisaya language. No english translation.", |
|
label="System message", |
|
visible=False, |
|
), |
|
gr.Slider( |
|
minimum=1, |
|
maximum=2048, |
|
value=512, |
|
step=1, |
|
label="Max new tokens", |
|
visible=False, |
|
), |
|
gr.Slider( |
|
minimum=0.1, |
|
maximum=4.0, |
|
value=0.7, |
|
step=0.1, |
|
label="Temperature", |
|
visible=False, |
|
), |
|
gr.Slider( |
|
minimum=0.1, |
|
maximum=1.0, |
|
value=0.95, |
|
step=0.05, |
|
label="Top-p (nucleus sampling)", |
|
visible=False, |
|
), |
|
], |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|