Spaces:
Sleeping
Sleeping
Commit
·
2ae17a3
1
Parent(s):
a1640a9
Prompting changes
Browse files
app.py
CHANGED
@@ -8,20 +8,25 @@ import numpy as np
|
|
8 |
|
9 |
base_url = "https://api.sandbox.deepgram.com/nlu"
|
10 |
token_str = os.environ['DG_TOKEN']
|
11 |
-
def tts_fn(text,
|
12 |
texts = [text]
|
|
|
13 |
response = requests.post(
|
14 |
f'{base_url}',
|
15 |
-
files=[('texts', ('texts', json.dumps(texts), 'application/json'))],
|
16 |
-
params={'synthesize': 'true', '
|
17 |
headers={
|
18 |
'Authorization': f'Token {token_str}'
|
19 |
},
|
20 |
).json()
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
23 |
return (sample_rate, audio)
|
24 |
|
|
|
25 |
|
26 |
app = gr.Blocks()
|
27 |
|
@@ -30,14 +35,18 @@ with app:
|
|
30 |
with gr.Row():
|
31 |
with gr.Column():
|
32 |
pangram = "The beige hue on the waters of the loch impressed all, including the French queen, before she heard that symphony again, just as young Arthur wanted."
|
33 |
-
cherry = "
|
34 |
textbox = gr.TextArea(label="Text", placeholder="Type a sentence here", value=cherry)
|
35 |
-
|
|
|
|
|
36 |
pitch_steps = gr.Slider(minimum=-24, maximum=24, value=0, step=1, label="Pitch Steps: 12 to an octave")
|
37 |
-
variability = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.1, label="Variability")
|
|
|
|
|
38 |
|
39 |
with gr.Column():
|
40 |
audio_output = gr.Audio(label="Output Audio", elem_id='tts-audio')
|
41 |
btn = gr.Button("Generate")
|
42 |
-
btn.click(tts_fn, inputs=[textbox,
|
43 |
-
app.launch()
|
|
|
8 |
|
9 |
base_url = "https://api.sandbox.deepgram.com/nlu"
|
10 |
token_str = os.environ['DG_TOKEN']
|
11 |
+
def tts_fn(text, prompt_audio, pitch_steps, inference_steps, inference_temperature):
|
12 |
texts = [text]
|
13 |
+
prompt_audio = np.reshape(prompt_audio[1], (1, 1, -1)).astype(np.float32, order='C') / 32768.0
|
14 |
response = requests.post(
|
15 |
f'{base_url}',
|
16 |
+
files=[('texts', ('texts', json.dumps(texts), 'application/json')), ('prompt_audio', ('prompt_audio', json.dumps(prompt_audio.tolist()), 'application/json'))],
|
17 |
+
params={'synthesize': 'true', 'pitch_steps': int(pitch_steps), 'soundstorm_steps': inference_steps, 'temperature': inference_temperature},
|
18 |
headers={
|
19 |
'Authorization': f'Token {token_str}'
|
20 |
},
|
21 |
).json()
|
22 |
+
try:
|
23 |
+
sample_rate = int(response['results'][0]['sample_rate'])
|
24 |
+
audio = (np.array(response['results'][0]['audio']).transpose() * 32767).astype(np.int16)
|
25 |
+
except Exception:
|
26 |
+
print(response)
|
27 |
return (sample_rate, audio)
|
28 |
|
29 |
+
demo_files = ['demo_files/man.wav', 'demo_files/woman.wav', 'demo_files/man_2.wav', 'demo_files/woman_2.wav', 'demo_files/meditation.wav']
|
30 |
|
31 |
app = gr.Blocks()
|
32 |
|
|
|
35 |
with gr.Row():
|
36 |
with gr.Column():
|
37 |
pangram = "The beige hue on the waters of the loch impressed all, including the French queen, before she heard that symphony again, just as young Arthur wanted."
|
38 |
+
cherry = "Your request has been processed and the audio is ready for playback."
|
39 |
textbox = gr.TextArea(label="Text", placeholder="Type a sentence here", value=cherry)
|
40 |
+
prompt_audio = gr.Audio(label="Prompt Audio (first 3 seconds of selection)", source='upload')
|
41 |
+
examples = gr.Examples(label='Sample Speakers', examples=demo_files, inputs=prompt_audio)
|
42 |
+
# speed = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Speed")
|
43 |
pitch_steps = gr.Slider(minimum=-24, maximum=24, value=0, step=1, label="Pitch Steps: 12 to an octave")
|
44 |
+
# variability = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.1, label="Variability")
|
45 |
+
inference_steps = gr.Slider(minimum=1, maximum=32, value=1, step=1, label="Inference Steps: quality vs latency tradeoff. Results are sometimes unstable for values >1.")
|
46 |
+
inference_temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.9, step=0.05, label="Temperature: fidelity vs variability tradeoff")
|
47 |
|
48 |
with gr.Column():
|
49 |
audio_output = gr.Audio(label="Output Audio", elem_id='tts-audio')
|
50 |
btn = gr.Button("Generate")
|
51 |
+
btn.click(tts_fn, inputs=[textbox, prompt_audio, pitch_steps, inference_steps, inference_temperature], outputs=[audio_output])
|
52 |
+
app.launch(share=True)
|