anugrahap commited on
Commit
1362ad3
·
1 Parent(s): b171ab1

update pipeline

Browse files
Files changed (1) hide show
  1. app.py +2 -10
app.py CHANGED
@@ -7,16 +7,8 @@ model_name = 'anugrahap/gpt2-indo-textgen'
7
  HF_TOKEN = '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'
8
  hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "output-gpt2-indo-textgen")
9
 
10
- # define the tokenization method
11
- tokenizer = AutoTokenizer.from_pretrained(model_name,
12
- model_max_length=1e30,
13
- padding_side='right',
14
- return_tensors='pt')
15
-
16
- # add the EOS token as PAD token to avoid warnings
17
- model = AutoModelForCausalLM.from_pretrained(model_name, pad_token_id=tokenizer.eos_token_id)
18
-
19
- generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
20
 
21
  # create the decoder parameter to generate the text
22
  def single_generation(text,min_length,max_length,temperature,top_k,top_p,num_beams,repetition_penalty,do_sample):
 
7
  HF_TOKEN = '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'
8
  hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "output-gpt2-indo-textgen")
9
 
10
+ #using text generation pipeline
11
+ generator = pipeline('text-generation', model=model_name)
 
 
 
 
 
 
 
 
12
 
13
  # create the decoder parameter to generate the text
14
  def single_generation(text,min_length,max_length,temperature,top_k,top_p,num_beams,repetition_penalty,do_sample):