import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import librosa import time import IPython.display as ipd from matplotlib import cm import soundfile as sf from IPython.display import clear_output import sounddevice as sd import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader, random_split from PIL import Image import torch.nn.functional as F import streamlit as st import tempfile import noisereduce as nr import altair as alt import pyaudio import wave import whisper from transformers import ( HubertForSequenceClassification, Wav2Vec2FeatureExtractor, AutoModel, AutoTokenizer, HubertForSequenceClassification ) from transformers import AutoTokenizer, AutoModelForCausalLM emo2promptMapping = { 'Angry':'ANGRY', 'Calm':'CALM', 'Disgust':'DISGUSTED', 'Fearful':'FEARFUL', 'Happy': 'HAPPY', 'Sad': 'SAD', 'Surprised': 'SURPRISED' } # Check if GPU (cuda) is available if torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') #Load speech to text model speech_model = whisper.load_model("base") #Define Labels related info num_labels=7 label_mapping = ['angry', 'calm', 'disgust', 'fearful', 'happy', 'sad', 'surprised'] # Get the path of the current directory current_dir = os.path.dirname(os.path.abspath(__file__)) # Create the path to the file in the parent directory parent_dir = os.path.abspath(os.path.join(current_dir, "../EmotionDetector/Models/")) file_path = os.path.join(parent_dir, "MultiModal/MultiModal_model_state_dict.pth") # GenAI model parent_dir2 = os.path.abspath(os.path.join(current_dir, "../GenAI/")) # Emo Detector model_id = "facebook/hubert-base-ls960" bert_model_name = "bert-base-uncased" tokenizerDir = os.path.join(parent_dir, 'Tokenizer\\') def config(): # Loading Image using PIL im = Image.open('./icon.png') # Set the page configuration with the title and icon st.set_page_config(page_title="Virtual Therapist", page_icon=im) # Add custom CSS styles st.markdown(""" """, unsafe_allow_html=True) # Render mobile screen container and its content st.sidebar.title("Sound Recorder") # Define a custom style for your title title_style = """ """ # Display the title with the custom style st.markdown(title_style, unsafe_allow_html=True) st.markdown("# WELCOME! HOW ARE YOU FEELING? PLEASE RECORD AN AUDIO!", unsafe_allow_html=True) st.markdown("# BASED ON YOUR EMOTIONAL STATE, I WILL SUGGEST SOME TIPS!", unsafe_allow_html=True) return class MultimodalModel(nn.Module): ''' Custom PyTorch model that takes as input both the audio features and the text embeddings, and concatenates the last hidden states from the Hubert and BERT models. ''' def __init__(self, bert_model_name, num_labels): super().__init__() self.hubert = HubertForSequenceClassification.from_pretrained("netgvarun2005/HubertStandaloneEmoDetector", num_labels=num_labels).hubert self.bert = AutoModel.from_pretrained(bert_model_name) self.classifier = nn.Linear(self.hubert.config.hidden_size + self.bert.config.hidden_size, num_labels) def forward(self, input_values, text): hubert_output = self.hubert(input_values).last_hidden_state bert_output = self.bert(text).last_hidden_state # Apply mean pooling along the sequence dimension hubert_output = hubert_output.mean(dim=1) bert_output = bert_output.mean(dim=1) concat_output = torch.cat((hubert_output, bert_output), dim=-1) logits = self.classifier(concat_output) return logits def speechtoText(wavfile): return speech_model.transcribe(wavfile)['text'] def resampleaudio(wavfile): audio, sr = librosa.load(wavfile, sr=None) # Set the desired target sample rate target_sample_rate = 16000 # Resample the audio to the target sample rate resampled_audio = librosa.resample(audio, orig_sr=sr, target_sr=target_sample_rate) sf.write(wavfile,resampled_audio, target_sample_rate) return wavfile def noiseReduction(wavfile): audio, sr = librosa.load(wavfile, sr=None) # Set parameters for noise reduction n_fft = 2048 # FFT window size hop_length = 512 # Hop length for STFT # Perform noise reduction reduced_noise = nr.reduce_noise(y=audio, sr=sr, n_fft=n_fft, hop_length=hop_length) # Save the denoised audio to a new WAV file sf.write(wavfile,reduced_noise, sr) return wavfile def removeSilence(wavfile): # Load the audio file audio_file = wavfile audio, sr = librosa.load(audio_file, sr=None) # Split the audio file based on silence clips = librosa.effects.split(audio, top_db=40) # Combine the audio clips non_silent_audio = [] for start, end in clips: non_silent_audio.extend(audio[start:end]) # Save the audio without silence to a new WAV file sf.write(wavfile,non_silent_audio, sr) return wavfile def preprocessWavFile(wavfile): resampledwavfile = resampleaudio(wavfile) denoised_file = noiseReduction(resampledwavfile) return removeSilence(denoised_file) @st.cache(allow_output_mutation=True) def load_model(): # Load the model multiModel = MultimodalModel(bert_model_name, num_labels) multiModel.load_state_dict(torch.load(file_path + "/MultiModal_model_state_dict.pth",map_location=device),strict=False) tokenizer = AutoTokenizer.from_pretrained(tokenizerDir) # GenAI tokenizer_gpt = AutoTokenizer.from_pretrained(os.path.join(parent_dir2,"Tokenizer"), pad_token='<|pad|>',bos_token='<|startoftext|>',eos_token='<|endoftext|>') model_gpt = AutoModelForCausalLM.from_pretrained("netgvarun2005/GPTVirtualTherapist") return multiModel,tokenizer,model_gpt,tokenizer_gpt def predict(audio_array,multiModal_model,key,tokenizer,text): input_text = tokenizer(text, return_tensors="pt", truncation=True, padding=True) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_id) input_audio = feature_extractor( raw_speech=audio_array, sampling_rate=16000, padding=True, return_tensors="pt" ) logits = multiModal_model(input_audio["input_values"], input_text["input_ids"]) probabilities = F.softmax(logits, dim=1).to_dense() _, predicted = torch.max(probabilities, 1) class_prob = probabilities.tolist() class_prob = class_prob[0] class_prob = [round(value, 2) for value in class_prob] maxVal = np.argmax(class_prob) # Display the final transcript if label_mapping[predicted] == "": st.write("Inference impossible, a problem occurred with your audio or your parameters, we apologize :(") return (label_mapping[maxVal]).capitalize() def record_audio(output_file, duration=5): # st.sidebar.markdown("Recording...") sd.wait() # Wait for microphone to start sd.wait() # Wait for microphone to start time.sleep(0.4) st.sidebar.markdown("
Recording...
", unsafe_allow_html=True) chunk = 1024 sample_format = pyaudio.paInt16 channels = 2 fs = 44100 p = pyaudio.PyAudio() stream = p.open(format=sample_format, channels=channels, rate=fs, frames_per_buffer=chunk, input=True) frames = [] for _ in range(int(fs / chunk * duration)): data = stream.read(chunk) frames.append(data) stream.stop_stream() stream.close() p.terminate() wf = wave.open(output_file, 'wb') wf.setnchannels(channels) wf.setsampwidth(p.get_sample_size(sample_format)) wf.setframerate(fs) wf.writeframes(b''.join(frames)) wf.close() time.sleep(0.5) # st.sidebar.markdown("Recording finished!") st.sidebar.markdown("Recording finished!
", unsafe_allow_html=True) time.sleep(0.5) def GenerateText(emo,gpt_tokenizer,gpt_model): prompt = f'Generating transcriptions! Please wait...
", unsafe_allow_html=True) transcription = speechtoText(output_wav_file) emo = predict(audio_array,ser_model,2,tokenizer,transcription) # Display the transcription in a textbox st.sidebar.text_area("Transcription", transcription, height=25) txt = f"You seem to be {(emo2promptMapping[emo]).capitalize()}!\n Click on 'Show Helpful Tips' button to proceed further." st.markdown(f"