Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
from huggingface_hub import InferenceClient | |
MODEL_ID = "unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF" | |
token = os.environ.get("HF_TOKEN") | |
client = InferenceClient(model=MODEL_ID, token=token) | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
): | |
# Monta o prompt tipo chat manualmente | |
prompt = f"{system_message}\n\n" | |
for user_msg, bot_msg in history: | |
if user_msg: | |
prompt += f"User: {user_msg}\n" | |
if bot_msg: | |
prompt += f"Assistant: {bot_msg}\n" | |
prompt += f"User: {message}\nAssistant:" | |
# Chama o endpoint de geração de texto normal, sem streaming | |
response = client.text_generation( | |
prompt, | |
max_new_tokens=max_tokens, | |
temperature=temperature, | |
top_p=top_p, | |
) | |
# A resposta vem como string simples | |
yield response | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), | |
], | |
) | |
if __name__ == "__main__": | |
demo.launch() | |