Spaces:
Sleeping
Sleeping
Commit
·
4bb9090
1
Parent(s):
550afd7
corrected local paths
Browse files
app.py
CHANGED
@@ -56,20 +56,17 @@ label_mapping = ['angry', 'calm', 'disgust', 'fearful', 'happy', 'sad', 'surpris
|
|
56 |
current_dir = os.path.dirname(os.path.abspath(__file__))
|
57 |
|
58 |
# Create the path to the file in the parent directory
|
59 |
-
|
60 |
-
#file_path = os.path.join(parent_dir, "MultiModal/MultiModal_model_state_dict.pth")
|
61 |
|
62 |
# Define your model name from the Hugging Face model hub
|
63 |
model_weights_path = "https://huggingface.co/netgvarun2005/MultiModalBertHubert/resolve/main/MultiModal_model_state_dict.pth"
|
64 |
# GenAI model
|
65 |
-
|
66 |
|
67 |
|
68 |
# Emo Detector
|
69 |
model_id = "facebook/hubert-base-ls960"
|
70 |
bert_model_name = "bert-base-uncased"
|
71 |
-
tokenizerDir = os.path.join(parent_dir, 'Tokenizer\\')
|
72 |
-
|
73 |
|
74 |
def config():
|
75 |
# Loading Image using PIL
|
@@ -204,10 +201,10 @@ def load_model():
|
|
204 |
multiModel.load_state_dict(torch.hub.load_state_dict_from_url(model_weights_path, map_location=device), strict=False)
|
205 |
|
206 |
# multiModel.load_state_dict(torch.load(file_path + "/MultiModal_model_state_dict.pth",map_location=device),strict=False)
|
207 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
208 |
|
209 |
# GenAI
|
210 |
-
tokenizer_gpt = AutoTokenizer.from_pretrained(
|
211 |
model_gpt = AutoModelForCausalLM.from_pretrained("netgvarun2005/GPTVirtualTherapist")
|
212 |
|
213 |
return multiModel,tokenizer,model_gpt,tokenizer_gpt
|
|
|
56 |
current_dir = os.path.dirname(os.path.abspath(__file__))
|
57 |
|
58 |
# Create the path to the file in the parent directory
|
59 |
+
tokenizer1_dir = os.path.abspath(os.path.join(current_dir, "/EmotionDetector/Models/Tokenizer/"))
|
|
|
60 |
|
61 |
# Define your model name from the Hugging Face model hub
|
62 |
model_weights_path = "https://huggingface.co/netgvarun2005/MultiModalBertHubert/resolve/main/MultiModal_model_state_dict.pth"
|
63 |
# GenAI model
|
64 |
+
tokenizer2_dir = os.path.abspath(os.path.join(current_dir, "/GenAI/Tokenizer/"))
|
65 |
|
66 |
|
67 |
# Emo Detector
|
68 |
model_id = "facebook/hubert-base-ls960"
|
69 |
bert_model_name = "bert-base-uncased"
|
|
|
|
|
70 |
|
71 |
def config():
|
72 |
# Loading Image using PIL
|
|
|
201 |
multiModel.load_state_dict(torch.hub.load_state_dict_from_url(model_weights_path, map_location=device), strict=False)
|
202 |
|
203 |
# multiModel.load_state_dict(torch.load(file_path + "/MultiModal_model_state_dict.pth",map_location=device),strict=False)
|
204 |
+
tokenizer = AutoTokenizer.from_pretrained(tokenizer1_dir)
|
205 |
|
206 |
# GenAI
|
207 |
+
tokenizer_gpt = AutoTokenizer.from_pretrained(tokenizer2_dir, pad_token='<|pad|>',bos_token='<|startoftext|>',eos_token='<|endoftext|>')
|
208 |
model_gpt = AutoModelForCausalLM.from_pretrained("netgvarun2005/GPTVirtualTherapist")
|
209 |
|
210 |
return multiModel,tokenizer,model_gpt,tokenizer_gpt
|