Project_1 / app.py
Futuresony's picture
Create app.py
eb87835 verified
raw
history blame
4.03 kB
import gradio as gr
import torch
import torchaudio
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
from huggingface_hub import InferenceClient
from ttsmms import download, TTS
from langdetect import detect
import os
import wave
import numpy as np
# === Step 1: Load ASR Model ===
asr_model_name = "Futuresony/Future-sw_ASR-24-02-2025"
processor = Wav2Vec2Processor.from_pretrained(asr_model_name)
asr_model = Wav2Vec2ForCTC.from_pretrained(asr_model_name)
# === Step 2: Load Text Generation Model ===
client = InferenceClient("unsloth/gemma-3-1b-it")
def format_prompt(user_input):
return f"{user_input}"
# === Step 3: Load TTS Models ===
swahili_dir = download("swh", "./data/swahili")
english_dir = download("eng", "./data/english")
swahili_tts = TTS(swahili_dir)
english_tts = TTS(english_dir)
# === Step 4: Generate silent fallback audio ===
def create_silent_wav(filename="./error.wav", duration_sec=1.0, sample_rate=16000):
if not os.path.exists(filename):
silence = np.zeros(int(sample_rate * duration_sec), dtype=np.int16)
with wave.open(filename, 'w') as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(silence.tobytes())
create_silent_wav() # Call once at startup
# === Step 5: Transcription Function ===
def transcribe(audio_file):
try:
speech_array, sample_rate = torchaudio.load(audio_file)
resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)
speech_array = resampler(speech_array).squeeze().numpy()
input_values = processor(speech_array, sampling_rate=16000, return_tensors="pt").input_values
with torch.no_grad():
logits = asr_model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.batch_decode(predicted_ids)[0]
return transcription
except Exception as e:
print("ASR Error:", e)
return "[ASR Failed]"
# === Step 6: Text Generation Function ===
def generate_text(prompt):
try:
formatted_prompt = format_prompt(prompt)
response = client.text_generation(formatted_prompt, max_new_tokens=250, temperature=0.7, top_p=0.95)
return response.strip()
except Exception as e:
print("Text Generation Error:", e)
return "[Text Generation Failed]"
# === Step 7: Text-to-Speech Function ===
def text_to_speech(text):
lang = detect(text)
wav_path = "./output.wav"
try:
if lang == "sw":
swahili_tts.synthesis(text, wav_path=wav_path)
else:
english_tts.synthesis(text, wav_path=wav_path)
return wav_path
except Exception as e:
print("TTS Error:", e)
return "./error.wav" # Use fallback silent audio
# === Step 8: Combined Logic ===
def process_audio(audio):
transcription = transcribe(audio)
generated_text = generate_text(transcription)
speech = text_to_speech(generated_text)
print(f"[DEBUG] Transcription: {transcription}")
print(f"[DEBUG] Generated Text: {generated_text}")
print(f"[DEBUG] TTS Output Path: {speech} (type={type(speech)})")
return transcription, generated_text, speech
# === Step 9: Gradio Interface ===
with gr.Blocks() as demo:
gr.Markdown("<p align='center' style='font-size: 20px;'>End-to-End ASR β†’ Text Generation β†’ TTS</p>")
gr.HTML("<center>Upload or record audio. The model will transcribe, generate a response, and read it out.</center>")
audio_input = gr.Audio(label="πŸŽ™οΈ Input Audio", type="filepath")
text_output = gr.Textbox(label="πŸ“ Transcription")
generated_text_output = gr.Textbox(label="πŸ€– Generated Text")
audio_output = gr.Audio(label="πŸ”Š Output Speech")
submit_btn = gr.Button("Submit")
submit_btn.click(
fn=process_audio,
inputs=audio_input,
outputs=[text_output, generated_text_output, audio_output]
)
if __name__ == "__main__":
demo.launch()