Spaces:
Sleeping
Sleeping
Commit
·
5dc48c7
1
Parent(s):
ec0da2b
added 2ndbutton
Browse files
app.py
CHANGED
@@ -348,7 +348,7 @@ def predict(audio_array,multiModal_model,key,tokenizer,text):
|
|
348 |
|
349 |
return (label_mapping[maxVal]).capitalize()
|
350 |
|
351 |
-
def GenerateText(emo,gpt_tokenizer,gpt_model):
|
352 |
"""
|
353 |
Generate text based on a given emotion using a GPT-2 model.
|
354 |
|
@@ -375,7 +375,7 @@ def GenerateText(emo,gpt_tokenizer,gpt_model):
|
|
375 |
|
376 |
# Generate multiple text samples based on the prompt
|
377 |
sample_outputs = gpt_model.generate(generated, do_sample=True, top_k=50,
|
378 |
-
max_length=30, top_p=0.95, temperature=
|
379 |
|
380 |
# Extract and split the generated text into words
|
381 |
outputs = set([gpt_tokenizer.decode(sample_output, skip_special_tokens=True).split(':')[-1] for sample_output in sample_outputs])
|
@@ -403,7 +403,8 @@ def process_file(ser_model,tokenizer,gpt_model,gpt_tokenizer):
|
|
403 |
None
|
404 |
"""
|
405 |
emo = ""
|
406 |
-
|
|
|
407 |
uploaded_file = st.file_uploader("Upload your file! It should be .wav", type=["wav"])
|
408 |
|
409 |
if uploaded_file is not None:
|
@@ -433,12 +434,20 @@ def process_file(ser_model,tokenizer,gpt_model,gpt_tokenizer):
|
|
433 |
|
434 |
# Store the value of emo in the session state
|
435 |
st.session_state.emo = emo
|
436 |
-
if st.button(
|
437 |
with st.spinner(st.markdown("<p style='font-size: 16px; font-weight: bold;'>Generating tips (it may take upto 3-4 mins depending upon the Network speed). Please wait...</p>", unsafe_allow_html=True)):
|
438 |
# Retrieve prompt from the emotion
|
439 |
emo = st.session_state.emo
|
440 |
# Call the function for GENAI
|
441 |
-
GenerateText(emo,gpt_tokenizer,gpt_model)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
442 |
|
443 |
def main():
|
444 |
"""
|
|
|
348 |
|
349 |
return (label_mapping[maxVal]).capitalize()
|
350 |
|
351 |
+
def GenerateText(emo,gpt_tokenizer,gpt_model,t_val):
|
352 |
"""
|
353 |
Generate text based on a given emotion using a GPT-2 model.
|
354 |
|
|
|
375 |
|
376 |
# Generate multiple text samples based on the prompt
|
377 |
sample_outputs = gpt_model.generate(generated, do_sample=True, top_k=50,
|
378 |
+
max_length=30, top_p=0.95, temperature=t_val, num_return_sequences=10)#,no_repeat_ngram_size=1)
|
379 |
|
380 |
# Extract and split the generated text into words
|
381 |
outputs = set([gpt_tokenizer.decode(sample_output, skip_special_tokens=True).split(':')[-1] for sample_output in sample_outputs])
|
|
|
403 |
None
|
404 |
"""
|
405 |
emo = ""
|
406 |
+
button_label1 = "Show Helpful Tips (More Creative)"
|
407 |
+
button_label2 = "Show Helpful Tips (More Balanced)"
|
408 |
uploaded_file = st.file_uploader("Upload your file! It should be .wav", type=["wav"])
|
409 |
|
410 |
if uploaded_file is not None:
|
|
|
434 |
|
435 |
# Store the value of emo in the session state
|
436 |
st.session_state.emo = emo
|
437 |
+
if st.button(button_label1):
|
438 |
with st.spinner(st.markdown("<p style='font-size: 16px; font-weight: bold;'>Generating tips (it may take upto 3-4 mins depending upon the Network speed). Please wait...</p>", unsafe_allow_html=True)):
|
439 |
# Retrieve prompt from the emotion
|
440 |
emo = st.session_state.emo
|
441 |
# Call the function for GENAI
|
442 |
+
GenerateText(emo,gpt_tokenizer,gpt_model,1.1)
|
443 |
+
|
444 |
+
|
445 |
+
if st.button(button_label2):
|
446 |
+
with st.spinner(st.markdown("<p style='font-size: 16px; font-weight: bold;'>Generating tips (it may take upto 3-4 mins depending upon the Network speed). Please wait...</p>", unsafe_allow_html=True)):
|
447 |
+
# Retrieve prompt from the emotion
|
448 |
+
emo = st.session_state.emo
|
449 |
+
# Call the function for GENAI
|
450 |
+
GenerateText(emo,gpt_tokenizer,gpt_model,0.3)
|
451 |
|
452 |
def main():
|
453 |
"""
|