Balaramkm commited on
Commit
2ccf180
·
verified ·
1 Parent(s): 223d1da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
  model_id = "deepseek-ai/deepseek-coder-1.3b-instruct"
@@ -7,15 +7,20 @@ model_id = "deepseek-ai/deepseek-coder-1.3b-instruct"
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(
9
  model_id,
10
- device_map="auto",
11
  torch_dtype=torch.float16,
 
12
  trust_remote_code=True
13
  )
14
 
15
  def chat(prompt):
16
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
17
  outputs = model.generate(**inputs, max_new_tokens=256, do_sample=True)
18
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
- return response
20
 
21
- gr.Interface(fn=chat, inputs="text", outputs="text", title="DeepSeek Coder 6.7B Chatbot").launch()
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
  model_id = "deepseek-ai/deepseek-coder-1.3b-instruct"
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(
9
  model_id,
 
10
  torch_dtype=torch.float16,
11
+ device_map="auto",
12
  trust_remote_code=True
13
  )
14
 
15
  def chat(prompt):
16
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
17
  outputs = model.generate(**inputs, max_new_tokens=256, do_sample=True)
18
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
19
 
20
+ gr.Interface(
21
+ fn=chat,
22
+ inputs="text",
23
+ outputs="text",
24
+ title="DeepSeek Coder 1.3B",
25
+ description="Free coding assistant running on Hugging Face CPU"
26
+ ).launch()