anugrahap commited on
Commit
8a3751a
·
1 Parent(s): 1df0250

Final app for version 2 with Flagging added

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -7,16 +7,16 @@ model_name = 'anugrahap/gpt2-indo-textgen'
7
  HF_TOKEN = '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'
8
  hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "output-gpt2-indo-textgen")
9
 
10
- # # define the tokenization method
11
- # tokenizer = AutoTokenizer.from_pretrained(model_name,
12
- # model_max_length=1e30,
13
- # padding_side='right',
14
- # return_tensors='pt')
15
 
16
- # # add the EOS token as PAD token to avoid warnings
17
- # model = AutoModelForCausalLM.from_pretrained(model_name, pad_token_id=tokenizer.eos_token_id)
18
 
19
- generator = pipeline('text-generation', model=model_name)
20
 
21
  # create the decoder parameter to generate the text
22
  def single_generation(text,min_length,max_length,temperature,top_k,top_p,num_beams,repetition_penalty,do_sample):
 
7
  HF_TOKEN = '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'
8
  hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "output-gpt2-indo-textgen")
9
 
10
+ # define the tokenization method
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name,
12
+ model_max_length=1e30,
13
+ padding_side='right',
14
+ return_tensors='pt')
15
 
16
+ # add the EOS token as PAD token to avoid warnings
17
+ model = AutoModelForCausalLM.from_pretrained(model_name, pad_token_id=tokenizer.eos_token_id)
18
 
19
+ generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
20
 
21
  # create the decoder parameter to generate the text
22
  def single_generation(text,min_length,max_length,temperature,top_k,top_p,num_beams,repetition_penalty,do_sample):