Futuresony commited on
Commit
984af4c
·
verified ·
1 Parent(s): ccd1fb9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -44
app.py CHANGED
@@ -1,51 +1,70 @@
1
  import gradio as gr
2
- import torch
3
- import torchaudio
4
- from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
5
- from huggingface_hub import InferenceClient
6
 
7
- # Load ASR model
8
- asr_model_name = "Futuresony/Future-sw_ASR-24-02-2025"
9
- processor = Wav2Vec2Processor.from_pretrained(asr_model_name)
10
- asr_model = Wav2Vec2ForCTC.from_pretrained(asr_model_name)
11
 
12
- # Load text generation client
13
- client = InferenceClient("unsloth/gemma-3-1b-it")
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- # Function: Transcribe audio
16
- def transcribe(audio_file):
17
- waveform, sample_rate = torchaudio.load(audio_file)
18
- resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)
19
- waveform = resampler(waveform).squeeze().numpy()
20
- inputs = processor(waveform, sampling_rate=16000, return_tensors="pt")
21
- with torch.no_grad():
22
- logits = asr_model(inputs.input_values).logits
23
- predicted_ids = torch.argmax(logits, dim=-1)
24
- transcription = processor.batch_decode(predicted_ids)[0]
25
- return transcription
26
 
27
- # Function: Generate response based on transcription
28
- def generate_text(prompt):
29
- response = client.text_generation(prompt, max_new_tokens=150, temperature=0.7)
30
- return response.strip()
31
-
32
- # Gradio interface
33
- def asr_and_generate(audio):
34
- if not audio:
35
- return "No audio provided.", ""
36
- transcription = transcribe(audio)
37
- generated = generate_text(transcription)
38
- return transcription, generated
39
 
40
- demo = gr.Interface(
41
- fn=asr_and_generate,
42
- inputs=gr.Audio(label="Upload or Record Audio", type="filepath"),
43
- outputs=[
44
- gr.Textbox(label="Transcription"),
45
- gr.Textbox(label="AI Response")
46
- ],
47
- title="ASR to Text Generation",
48
- description="Upload audio. The model will transcribe speech to text and generate a response using a fine-tuned text model."
49
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
- demo.launch()
 
 
 
 
1
  import gradio as gr
2
+ import edge_tts
3
+ import asyncio
4
+ import tempfile
5
+ import os
6
 
7
+ # Get all available voices
8
+ async def get_voices():
9
+ voices = await edge_tts.list_voices()
10
+ return {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices}
11
 
12
+ # Text-to-speech function
13
+ async def text_to_speech(text, voice, rate, pitch):
14
+ if not text.strip():
15
+ return None, gr.Warning("Please enter text to convert.")
16
+ if not voice:
17
+ return None, gr.Warning("Please select a voice.")
18
+
19
+ voice_short_name = voice.split(" - ")[0]
20
+ rate_str = f"{rate:+d}%"
21
+ pitch_str = f"{pitch:+d}Hz"
22
+ communicate = edge_tts.Communicate(text, voice_short_name, rate=rate_str, pitch=pitch_str)
23
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
24
+ tmp_path = tmp_file.name
25
+ await communicate.save(tmp_path)
26
+ return tmp_path, None
27
 
28
+ # Gradio interface function
29
+ def tts_interface(text, voice, rate, pitch):
30
+ audio, warning = asyncio.run(text_to_speech(text, voice, rate, pitch))
31
+ return audio, warning
 
 
 
 
 
 
 
32
 
33
+ # Create Gradio application
34
+ import gradio as gr
 
 
 
 
 
 
 
 
 
 
35
 
36
+ async def create_demo():
37
+ voices = await get_voices()
38
+
39
+ description = """
40
+ Convert text to speech using Microsoft Edge TTS. Adjust speech rate and pitch: 0 is default, positive values increase, negative values decrease.
41
+
42
+
43
+
44
+
45
+ """
46
+
47
+ demo = gr.Interface(
48
+ fn=tts_interface,
49
+ inputs=[
50
+ gr.Textbox(label="Input Text", lines=5),
51
+ gr.Dropdown(choices=[""] + list(voices.keys()), label="Select Voice", value=""),
52
+ gr.Slider(minimum=-50, maximum=50, value=0, label="Speech Rate Adjustment (%)", step=1),
53
+ gr.Slider(minimum=-20, maximum=20, value=0, label="Pitch Adjustment (Hz)", step=1)
54
+ ],
55
+ outputs=[
56
+ gr.Audio(label="Generated Audio", type="filepath"),
57
+ gr.Markdown(label="Warning", visible=False)
58
+ ],
59
+ title="Edge TTS Text-to-Speech",
60
+ description=description,
61
+
62
+ analytics_enabled=False,
63
+ allow_flagging="manual"
64
+ )
65
+ return demo
66
 
67
+ # Run the application
68
+ if __name__ == "__main__":
69
+ demo = asyncio.run(create_demo())
70
+ demo.launch()