import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM import torch model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-llm-7b-chat", device_map="auto", torch_dtype=torch.float16) tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-llm-7b-chat") def chat(prompt): inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_new_tokens=256, do_sample=True) return tokenizer.decode(outputs[0], skip_special_tokens=True) gr.Interface(fn=chat, inputs="text", outputs="text").launch()