sharmavaruncs commited on
Commit
fc991f2
·
1 Parent(s): 4d5780b

added suppression of spinner message

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -148,6 +148,7 @@ class MultimodalModel(nn.Module):
148
  logits = self.classifier(concat_output)
149
  return logits
150
 
 
151
  def speechtoText(wavfile):
152
  return speech_model.transcribe(wavfile)['text']
153
 
@@ -218,7 +219,7 @@ def load_model():
218
  #tokenizer_gpt = AutoTokenizer.from_pretrained("netgvarun2005/GPTVirtualTherapistTokenizer", pad_token='<|pad|>',bos_token='<|startoftext|>',eos_token='<|endoftext|>')
219
  tokenizer_gpt = AutoTokenizer.from_pretrained("netgvarun2005/GPTTherapistDeepSpeedTokenizer", pad_token='<|pad|>',bos_token='<|startoftext|>',eos_token='<|endoftext|>')
220
  #model_gpt = AutoModelForCausalLM.from_pretrained("netgvarun2005/GPTVirtualTherapist")
221
- model_gpt = AutoModelForCausalLM.from_pretrained("netgvarun2005/GPTTherapistDeepSpeedModel").to(device)
222
 
223
  return multiModel,tokenizer,model_gpt,tokenizer_gpt
224
 
@@ -296,6 +297,9 @@ def GenerateText(emo,gpt_tokenizer,gpt_model):
296
 
297
  generated = gpt_tokenizer(prompt, return_tensors="pt").input_ids
298
 
 
 
 
299
  sample_outputs = gpt_model.generate(generated, do_sample=True, top_k=50,
300
  max_length=30, top_p=0.95, temperature=1.1, num_return_sequences=10)#,no_repeat_ngram_size=1)
301
 
 
148
  logits = self.classifier(concat_output)
149
  return logits
150
 
151
+ @st.cache(show_spinner=False)
152
  def speechtoText(wavfile):
153
  return speech_model.transcribe(wavfile)['text']
154
 
 
219
  #tokenizer_gpt = AutoTokenizer.from_pretrained("netgvarun2005/GPTVirtualTherapistTokenizer", pad_token='<|pad|>',bos_token='<|startoftext|>',eos_token='<|endoftext|>')
220
  tokenizer_gpt = AutoTokenizer.from_pretrained("netgvarun2005/GPTTherapistDeepSpeedTokenizer", pad_token='<|pad|>',bos_token='<|startoftext|>',eos_token='<|endoftext|>')
221
  #model_gpt = AutoModelForCausalLM.from_pretrained("netgvarun2005/GPTVirtualTherapist")
222
+ model_gpt = AutoModelForCausalLM.from_pretrained("netgvarun2005/GPTTherapistDeepSpeedModel")
223
 
224
  return multiModel,tokenizer,model_gpt,tokenizer_gpt
225
 
 
297
 
298
  generated = gpt_tokenizer(prompt, return_tensors="pt").input_ids
299
 
300
+ generated = generated.to(device)
301
+ gpt_model.to(device)
302
+
303
  sample_outputs = gpt_model.generate(generated, do_sample=True, top_k=50,
304
  max_length=30, top_p=0.95, temperature=1.1, num_return_sequences=10)#,no_repeat_ngram_size=1)
305