File size: 1,471 Bytes
0f1e860
 
 
421def4
0f1e860
421def4
 
cfded5c
 
 
 
 
 
 
 
8e6ce27
cfded5c
 
 
 
421def4
 
 
0f1e860
cfded5c
0f1e860
421def4
0f1e860
 
 
421def4
 
0f1e860
 
 
 
 
 
 
421def4
0f1e860
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import gradio as gr
import librosa
from asr import transcribe, ASR_EXAMPLES, ASR_NOTE
from lid import identify  # Import Language Identification model

# Function to detect language and transcribe speech
def auto_transcribe(audio):
    # Detect language
    detected_lang = identify(audio)
    
    # Debug: Print detected language
    print(f"Detected Language: {detected_lang}")  

    # Ensure the detected language is Swahili or English
    if detected_lang.startswith("swh"):
        detected_lang = "swh"
    elif detected_lang.startswith("eng"):
        detected_lang = "eng"
    else:
        return f"Error: Detected language '{detected_lang}' is not supported."

    # Transcribe using detected language
    return transcribe(audio, lang=detected_lang)

# Speech-to-Text Interface with Auto Language Detection
mms_transcribe = gr.Interface(
    fn=auto_transcribe,
    inputs=gr.Audio(),
    outputs="text",
    examples=ASR_EXAMPLES,
    title="Speech-to-Text (Auto Language Detection)",
    description="Automatically detects whether speech is in Swahili or English and transcribes it.",
    article=ASR_NOTE,
    allow_flagging="never",
)

# Main Gradio App
with gr.Blocks() as demo:
    gr.Markdown("<p align='center' style='font-size: 20px;'>MMS Speech-to-Text</p>")
    gr.HTML("<center>Automatically detects and transcribes Swahili or English speech.</center>")

    mms_transcribe.render()

if __name__ == "__main__":
    demo.queue()
    demo.launch()