sharmavaruncs commited on
Commit
24bbf89
·
1 Parent(s): ccd2e4a

corrected paths

Browse files
Files changed (1) hide show
  1. app.py +9 -4
app.py CHANGED
@@ -4,10 +4,8 @@ import matplotlib.pyplot as plt
4
  import os
5
  import librosa
6
  import time
7
- import IPython.display as ipd
8
  from matplotlib import cm
9
  import soundfile as sf
10
- from IPython.display import clear_output
11
  import sounddevice as sd
12
  import torch
13
  import torch.nn as nn
@@ -59,7 +57,10 @@ current_dir = os.path.dirname(os.path.abspath(__file__))
59
 
60
  # Create the path to the file in the parent directory
61
  parent_dir = os.path.abspath(os.path.join(current_dir, "../EmotionDetector/Models/"))
62
- file_path = os.path.join(parent_dir, "MultiModal/MultiModal_model_state_dict.pth")
 
 
 
63
 
64
  # GenAI model
65
  parent_dir2 = os.path.abspath(os.path.join(current_dir, "../GenAI/"))
@@ -199,7 +200,11 @@ def preprocessWavFile(wavfile):
199
  def load_model():
200
  # Load the model
201
  multiModel = MultimodalModel(bert_model_name, num_labels)
202
- multiModel.load_state_dict(torch.load(file_path + "/MultiModal_model_state_dict.pth",map_location=device),strict=False)
 
 
 
 
203
  tokenizer = AutoTokenizer.from_pretrained(tokenizerDir)
204
 
205
  # GenAI
 
4
  import os
5
  import librosa
6
  import time
 
7
  from matplotlib import cm
8
  import soundfile as sf
 
9
  import sounddevice as sd
10
  import torch
11
  import torch.nn as nn
 
57
 
58
  # Create the path to the file in the parent directory
59
  parent_dir = os.path.abspath(os.path.join(current_dir, "../EmotionDetector/Models/"))
60
+ #file_path = os.path.join(parent_dir, "MultiModal/MultiModal_model_state_dict.pth")
61
+
62
+ # Define your model name from the Hugging Face model hub
63
+ model_weights_path = "https://huggingface.co/netgvarun2005/MultiModalBertHubert/blob/main/MultiModal_model_state_dict.pth"
64
 
65
  # GenAI model
66
  parent_dir2 = os.path.abspath(os.path.join(current_dir, "../GenAI/"))
 
200
  def load_model():
201
  # Load the model
202
  multiModel = MultimodalModel(bert_model_name, num_labels)
203
+
204
+ # Load the model weights directly from Hugging Face Spaces
205
+ multiModel.load_state_dict(torch.hub.load_state_dict_from_url(model_weights_path, map_location=device), strict=False)
206
+
207
+ # multiModel.load_state_dict(torch.load(file_path + "/MultiModal_model_state_dict.pth",map_location=device),strict=False)
208
  tokenizer = AutoTokenizer.from_pretrained(tokenizerDir)
209
 
210
  # GenAI