sharmavaruncs commited on
Commit
571080e
·
1 Parent(s): 5dc48c7

added values for temp,topp,topk

Browse files
Files changed (1) hide show
  1. app.py +13 -7
app.py CHANGED
@@ -348,7 +348,7 @@ def predict(audio_array,multiModal_model,key,tokenizer,text):
348
 
349
  return (label_mapping[maxVal]).capitalize()
350
 
351
- def GenerateText(emo,gpt_tokenizer,gpt_model,t_val):
352
  """
353
  Generate text based on a given emotion using a GPT-2 model.
354
 
@@ -374,8 +374,8 @@ def GenerateText(emo,gpt_tokenizer,gpt_model,t_val):
374
  gpt_model.to(device)
375
 
376
  # Generate multiple text samples based on the prompt
377
- sample_outputs = gpt_model.generate(generated, do_sample=True, top_k=50,
378
- max_length=30, top_p=0.95, temperature=t_val, num_return_sequences=10)#,no_repeat_ngram_size=1)
379
 
380
  # Extract and split the generated text into words
381
  outputs = set([gpt_tokenizer.decode(sample_output, skip_special_tokens=True).split(':')[-1] for sample_output in sample_outputs])
@@ -435,19 +435,25 @@ def process_file(ser_model,tokenizer,gpt_model,gpt_tokenizer):
435
  # Store the value of emo in the session state
436
  st.session_state.emo = emo
437
  if st.button(button_label1):
438
- with st.spinner(st.markdown("<p style='font-size: 16px; font-weight: bold;'>Generating tips (it may take upto 3-4 mins depending upon the Network speed). Please wait...</p>", unsafe_allow_html=True)):
439
  # Retrieve prompt from the emotion
440
  emo = st.session_state.emo
441
  # Call the function for GENAI
442
- GenerateText(emo,gpt_tokenizer,gpt_model,1.1)
 
 
 
443
 
444
 
445
  if st.button(button_label2):
446
- with st.spinner(st.markdown("<p style='font-size: 16px; font-weight: bold;'>Generating tips (it may take upto 3-4 mins depending upon the Network speed). Please wait...</p>", unsafe_allow_html=True)):
447
  # Retrieve prompt from the emotion
448
  emo = st.session_state.emo
 
 
 
449
  # Call the function for GENAI
450
- GenerateText(emo,gpt_tokenizer,gpt_model,0.3)
451
 
452
  def main():
453
  """
 
348
 
349
  return (label_mapping[maxVal]).capitalize()
350
 
351
+ def GenerateText(emo,gpt_tokenizer,gpt_model,t_val,t_k,t_p):
352
  """
353
  Generate text based on a given emotion using a GPT-2 model.
354
 
 
374
  gpt_model.to(device)
375
 
376
  # Generate multiple text samples based on the prompt
377
+ sample_outputs = gpt_model.generate(generated, do_sample=True, top_k=t_k,
378
+ max_length=30, top_p=t_p, temperature=t_val, num_return_sequences=10)#,no_repeat_ngram_size=1)
379
 
380
  # Extract and split the generated text into words
381
  outputs = set([gpt_tokenizer.decode(sample_output, skip_special_tokens=True).split(':')[-1] for sample_output in sample_outputs])
 
435
  # Store the value of emo in the session state
436
  st.session_state.emo = emo
437
  if st.button(button_label1):
438
+ with st.spinner(st.markdown("<p style='font-size: 16px; font-weight: bold;'>Generating tips (it may take upto 2-3 mins). Please wait...</p>", unsafe_allow_html=True)):
439
  # Retrieve prompt from the emotion
440
  emo = st.session_state.emo
441
  # Call the function for GENAI
442
+ temp=0.9
443
+ top_k=50
444
+ top_p=0.8
445
+ GenerateText(emo,gpt_tokenizer,gpt_model,temp,top_k,top_p)
446
 
447
 
448
  if st.button(button_label2):
449
+ with st.spinner(st.markdown("<p style='font-size: 16px; font-weight: bold;'>Generating tips (it may take upto 2-3 mins). Please wait...</p>", unsafe_allow_html=True)):
450
  # Retrieve prompt from the emotion
451
  emo = st.session_state.emo
452
+ temp=0.2
453
+ top_k=90
454
+ top_p=0.95
455
  # Call the function for GENAI
456
+ GenerateText(emo,gpt_tokenizer,gpt_model,temp,top_k,top_p)
457
 
458
  def main():
459
  """