LymphSteamer commited on
Commit
0d51f33
·
verified ·
1 Parent(s): 285cd49

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -1
app.py CHANGED
@@ -1,7 +1,30 @@
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  def greet(name):
4
  return "Hello " + name + "!!"
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer,AutoModelForCasualLM
3
+ def haiku_generate():
4
+ model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
5
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
6
+ model = AutoModelForCasualLM.from_pretrained{
7
+ model_id,
8
+ device_map="auto",
9
+ low_cpu_mem_usage=True
10
+ }
11
+ model.eval()
12
 
13
+ prompt ="俳句だけを生成して"
14
+ inputs = tokenizer(prompt,return_tensors="pt")
15
+ if not torch.cuda.is_available():
16
+ inputs ={k:v.to("cpu") for k,v in inputs.items()}
17
+ with torch.no_glad():
18
+ outputs = model.generate{
19
+ **inputs,
20
+ max_new_tokens=25,
21
+ do_sample=True,
22
+ temperature=0.7,
23
+ top_p=0.9
24
+ }
25
+ return tokenizer.decode(outputs[0],skip_special_tokens=True)
26
  def greet(name):
27
  return "Hello " + name + "!!"
28
 
29
+ demo = gr.Interface(fn=haiku_generate, outputs="text")
30
  demo.launch()