Spaces:
Sleeping
Sleeping
Commit
·
4de9cc5
1
Parent(s):
1e1a787
removed hardcoded tokenier directories
Browse files
app.py
CHANGED
@@ -52,14 +52,8 @@ speech_model = whisper.load_model("base")
|
|
52 |
num_labels=7
|
53 |
label_mapping = ['angry', 'calm', 'disgust', 'fearful', 'happy', 'sad', 'surprised']
|
54 |
|
55 |
-
# Create the path to the file in the parent directory
|
56 |
-
tokenizer1_dir = "./EmotionDetector/Models/Tokenizer/"
|
57 |
-
|
58 |
# Define your model name from the Hugging Face model hub
|
59 |
model_weights_path = "https://huggingface.co/netgvarun2005/MultiModalBertHubert/resolve/main/MultiModal_model_state_dict.pth"
|
60 |
-
# GenAI model
|
61 |
-
tokenizer2_dir = "./GenAI/Tokenizer/"
|
62 |
-
|
63 |
|
64 |
# Emo Detector
|
65 |
model_id = "facebook/hubert-base-ls960"
|
@@ -198,10 +192,10 @@ def load_model():
|
|
198 |
multiModel.load_state_dict(torch.hub.load_state_dict_from_url(model_weights_path, map_location=device), strict=False)
|
199 |
|
200 |
# multiModel.load_state_dict(torch.load(file_path + "/MultiModal_model_state_dict.pth",map_location=device),strict=False)
|
201 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
202 |
|
203 |
# GenAI
|
204 |
-
tokenizer_gpt = AutoTokenizer.from_pretrained(
|
205 |
model_gpt = AutoModelForCausalLM.from_pretrained("netgvarun2005/GPTVirtualTherapist")
|
206 |
|
207 |
return multiModel,tokenizer,model_gpt,tokenizer_gpt
|
|
|
52 |
num_labels=7
|
53 |
label_mapping = ['angry', 'calm', 'disgust', 'fearful', 'happy', 'sad', 'surprised']
|
54 |
|
|
|
|
|
|
|
55 |
# Define your model name from the Hugging Face model hub
|
56 |
model_weights_path = "https://huggingface.co/netgvarun2005/MultiModalBertHubert/resolve/main/MultiModal_model_state_dict.pth"
|
|
|
|
|
|
|
57 |
|
58 |
# Emo Detector
|
59 |
model_id = "facebook/hubert-base-ls960"
|
|
|
192 |
multiModel.load_state_dict(torch.hub.load_state_dict_from_url(model_weights_path, map_location=device), strict=False)
|
193 |
|
194 |
# multiModel.load_state_dict(torch.load(file_path + "/MultiModal_model_state_dict.pth",map_location=device),strict=False)
|
195 |
+
tokenizer = AutoTokenizer.from_pretrained("netgvarun2005/MultiModalBertHubertTokenizer")
|
196 |
|
197 |
# GenAI
|
198 |
+
tokenizer_gpt = AutoTokenizer.from_pretrained("netgvarun2005/GPTVirtualTherapistTokenizer", pad_token='<|pad|>',bos_token='<|startoftext|>',eos_token='<|endoftext|>')
|
199 |
model_gpt = AutoModelForCausalLM.from_pretrained("netgvarun2005/GPTVirtualTherapist")
|
200 |
|
201 |
return multiModel,tokenizer,model_gpt,tokenizer_gpt
|