Spaces:
Sleeping
Sleeping
File size: 2,113 Bytes
4f467e8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import gradio as gr
from transformers import pipeline
import soundfile as sf
from huggingface_hub import InferenceClient
# Initialize Facebook MMS ASR model
asr_model = pipeline("automatic-speech-recognition", model="facebook/mms-1b-all")
# Initialize Facebook MMS TTS model
tts_model = pipeline("text-to-speech", model="facebook/mms-tts")
# Initialize the Chat Model (Gemma-2-9B or Futuresony.gguf)
chat_client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf") # Change if needed
def asr_chat_tts(audio):
"""
1. Convert Speech to Text (ASR)
2. Process text through Chat Model (LLM)
3. Convert response to Speech (TTS)
"""
# Step 1: Transcribe speech using Facebook MMS ASR
transcription = asr_model(audio)["text"]
# Step 2: Process text through the chat model
messages = [{"role": "system", "content": "You are a helpful AI assistant."}]
messages.append({"role": "user", "content": transcription})
response = ""
for msg in chat_client.chat_completion(messages, max_tokens=512, stream=True):
token = msg.choices[0].delta.content
response += token
# Step 3: Convert response to speech using Facebook MMS TTS
speech = tts_model(response)
output_file = "generated_speech.wav"
sf.write(output_file, speech["audio"], samplerate=speech["sampling_rate"])
return transcription, response, output_file
# Gradio Interface
with gr.Blocks() as demo:
gr.Markdown("<h2 style='text-align: center;'>ASR β Chatbot β TTS</h2>")
with gr.Row():
audio_input = gr.Audio(source="microphone", type="filepath", label="π€ Speak Here")
text_transcription = gr.Textbox(label="π Transcription", interactive=False)
text_response = gr.Textbox(label="π€ Chatbot Response", interactive=False)
audio_output = gr.Audio(label="π Generated Speech")
submit_button = gr.Button("Process Speech π")
submit_button.click(fn=asr_chat_tts, inputs=[audio_input], outputs=[text_transcription, text_response, audio_output])
# Run the App
if __name__ == "__main__":
demo.launch()
|