File size: 1,173 Bytes
21b2e67
c0eecae
 
 
d46b7dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c0eecae
d46b7dc
c0eecae
d46b7dc
 
 
 
 
 
 
21b2e67
c0eecae
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr
import spaces
from transformers import pipeline

# Initialize model once at startup
model = pipeline(
    "text-generation",
    model="unsloth/DeepSeek-R1-Distill-Llama-8B",
    torch_dtype="auto",
    device_map="auto"
)

@spaces.GPU(duration=120)
def chat_response(message, history):
    # Format conversation history for model input
    messages = []
    for human, assistant in history:
        messages.extend([
            {"role": "user", "content": human},
            {"role": "assistant", "content": assistant}
        ])
    messages.append({"role": "user", "content": message})
    
    # Generate response
    response = model(
        messages,
        max_new_tokens=256,
        temperature=0.7,
        do_sample=True
    )
    
    return response[0]['generated_text'][-1]["content"]

# Create chat interface
demo = gr.ChatInterface(
    chat_response,
    chatbot=gr.Chatbot(height=500),
    textbox=gr.Textbox(placeholder="Ask me anything...", container=False, scale=7),
    title="DeepSeek-Llama-8B Chat Demo",
    examples=[["Explain quantum computing simply"], ["Write a Python function for Fibonacci sequence"]]
)
demo.launch()