Futuresony commited on
Commit
0f1e860
·
verified ·
1 Parent(s): 447d428

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -0
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import librosa
3
+ from asr import transcribe, ASR_EXAMPLES, ASR_NOTE
4
+ from lid import identify # Import language identification model
5
+
6
+ # Function to detect language and transcribe automatically
7
+ def auto_detect_and_transcribe(audio):
8
+ detected_lang = identify(audio) # Identify language from audio
9
+ if detected_lang in ["swh", "eng"]: # Ensure it's either Swahili or English
10
+ return f"[Detected Language: {detected_lang.upper()}]\n\n" + transcribe(audio)
11
+ return "Error: Unsupported language detected."
12
+
13
+ # Speech-to-Text Interface with Auto Language Detection
14
+ mms_transcribe = gr.Interface(
15
+ fn=auto_detect_and_transcribe,
16
+ inputs=gr.Audio(),
17
+ outputs="text",
18
+ examples=ASR_EXAMPLES,
19
+ title="Speech-to-Text (Automatic Language Detection)",
20
+ description="Upload or record audio, and the model will detect if it is Swahili or English before transcribing.",
21
+ article=ASR_NOTE,
22
+ allow_flagging="never",
23
+ )
24
+
25
+ # Main Gradio App
26
+ with gr.Blocks() as demo:
27
+ gr.Markdown("<p align='center' style='font-size: 20px;'>MMS Speech-to-Text</p>")
28
+ gr.HTML("<center>Convert speech to text while automatically detecting Swahili or English.</center>")
29
+
30
+ mms_transcribe.render()
31
+
32
+ if __name__ == "__main__":
33
+ demo.queue()
34
+ demo.launch()
35
+