Vageesh1 commited on
Commit
79050de
Β·
1 Parent(s): 3fea08c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -58
app.py CHANGED
@@ -62,65 +62,65 @@ def ui():
62
  template=template
63
  )
64
 
65
- llm_chain = LLMChain(
66
- llm = ChatOpenAI(temperature=0.0,model_name='gpt-3.5-turbo'),
67
- prompt=prompt,
68
- verbose=True,
69
- memory=ConversationBufferWindowMemory(k=2)
70
- )
71
-
72
- if 'history' not in st.session_state:
73
- st.session_state['history'] = []
74
-
75
- if 'generated' not in st.session_state:
76
- st.session_state['generated'] = ["Hello ! Ask me anything about " + " πŸ€—"]
77
-
78
- if 'past' not in st.session_state:
79
- st.session_state['past'] = ["Hey ! πŸ‘‹"]
80
-
81
- if user_api_key is not None and user_api_key.strip() != "":
82
- eleven_labs_api_key = st.sidebar.text_input(
83
- label="#### Your Eleven Labs API key πŸ‘‡",
84
- placeholder="Paste your Eleven Labs API key",
85
- type="password")
86
-
87
- set_api_key(user_api_key)
88
-
89
- #container for the chat history
90
- response_container = st.container()
91
- #container for the user's text input
92
- container = st.container()
93
-
94
- with container:
95
- with st.form(key='my_form', clear_on_submit=True):
96
- audio_file = audiorecorder("Click to record", "Recording...")
97
- wav_file = open("./output_audio.mp3", "wb")
98
- wav_file.write(audio.tobytes())
99
- submit_button = st.form_submit_button(label='Send')
100
- if submit_button and audio_file:
101
- output_file_path = "./output_audio.mp3"
102
- # save_uploaded_file_as_mp3(audio_file,output_file_path )
103
- hindi_input_audio,sample_rate=torchaudio.load(output_file_path)
104
- #applying the audio recognition
105
- hindi_transcription=parse_transcription('./output_audio.mp3')
106
- st.success(f"Audio file saved as {output_file_path}")
107
- #convert hindi to english
108
- english_input=hindi_to_english(hindi_transcription)
109
- #feeding the input to the LLM
110
- english_output = conversational_chat(english_input)
111
- #converting english to hindi
112
- hin_output=translate_english_to_hindi(english_output)
113
- #getting the hindi_tts
114
- hindi_output_audio=hindi_tts(hin_output)
115
-
116
- st.session_state['past'].append(hindi_input_audio)
117
- st.session_state['generated'].append(hindi_output_audio)
118
 
119
- if st.session_state['generated']:
120
- with response_container:
121
- for i in range(len(st.session_state['generated'])):
122
- st.audio(st.session_state["past"][i],format='audio/wav')
123
- st.audio(st.session_state["generated"][i],format='audio/wav')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  if __name__ == '__main__':
126
  ui()
 
62
  template=template
63
  )
64
 
65
+ llm_chain = LLMChain(
66
+ llm = ChatOpenAI(temperature=0.0,model_name='gpt-3.5-turbo'),
67
+ prompt=prompt,
68
+ verbose=True,
69
+ memory=ConversationBufferWindowMemory(k=2)
70
+ )
71
+
72
+ if 'history' not in st.session_state:
73
+ st.session_state['history'] = []
74
+
75
+ if 'generated' not in st.session_state:
76
+ st.session_state['generated'] = ["Hello ! Ask me anything about " + " πŸ€—"]
77
+
78
+ if 'past' not in st.session_state:
79
+ st.session_state['past'] = ["Hey ! πŸ‘‹"]
80
+
81
+ if user_api_key is not None and user_api_key.strip() != "":
82
+ eleven_labs_api_key = st.sidebar.text_input(
83
+ label="#### Your Eleven Labs API key πŸ‘‡",
84
+ placeholder="Paste your Eleven Labs API key",
85
+ type="password")
86
+
87
+ set_api_key(user_api_key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
+ #container for the chat history
90
+ response_container = st.container()
91
+ #container for the user's text input
92
+ container = st.container()
93
+
94
+ with container:
95
+ with st.form(key='my_form', clear_on_submit=True):
96
+ audio_file = audiorecorder("Click to record", "Recording...")
97
+ wav_file = open("./output_audio.mp3", "wb")
98
+ wav_file.write(audio.tobytes())
99
+ submit_button = st.form_submit_button(label='Send')
100
+ if submit_button and audio_file:
101
+ output_file_path = "./output_audio.mp3"
102
+ # save_uploaded_file_as_mp3(audio_file,output_file_path )
103
+ hindi_input_audio,sample_rate=torchaudio.load(output_file_path)
104
+ #applying the audio recognition
105
+ hindi_transcription=parse_transcription('./output_audio.mp3')
106
+ st.success(f"Audio file saved as {output_file_path}")
107
+ #convert hindi to english
108
+ english_input=hindi_to_english(hindi_transcription)
109
+ #feeding the input to the LLM
110
+ english_output = conversational_chat(english_input)
111
+ #converting english to hindi
112
+ hin_output=translate_english_to_hindi(english_output)
113
+ #getting the hindi_tts
114
+ hindi_output_audio=hindi_tts(hin_output)
115
+
116
+ st.session_state['past'].append(hindi_input_audio)
117
+ st.session_state['generated'].append(hindi_output_audio)
118
+
119
+ if st.session_state['generated']:
120
+ with response_container:
121
+ for i in range(len(st.session_state['generated'])):
122
+ st.audio(st.session_state["past"][i],format='audio/wav')
123
+ st.audio(st.session_state["generated"][i],format='audio/wav')
124
 
125
  if __name__ == '__main__':
126
  ui()