Spaces:
Paused
Paused
upd4
Browse files- __pycache__/app.cpython-312.pyc +0 -0
- app.py +29 -27
__pycache__/app.cpython-312.pyc
CHANGED
Binary files a/__pycache__/app.cpython-312.pyc and b/__pycache__/app.cpython-312.pyc differ
|
|
app.py
CHANGED
@@ -1,46 +1,48 @@
|
|
1 |
-
from fastapi import FastAPI
|
2 |
-
|
3 |
import outetts
|
4 |
-
import
|
5 |
|
6 |
# Initialize the interface
|
7 |
interface = outetts.Interface(
|
8 |
config=outetts.ModelConfig.auto_config(
|
9 |
model=outetts.Models.VERSION_1_0_SIZE_1B,
|
10 |
-
# For llama.cpp backend
|
11 |
backend=outetts.Backend.LLAMACPP,
|
12 |
quantization=outetts.LlamaCppQuantization.FP16
|
13 |
-
# For transformers backend
|
14 |
-
#backend=outetts.Backend.HF,
|
15 |
)
|
16 |
)
|
17 |
|
18 |
# Load the default speaker profile
|
19 |
speaker = interface.load_default_speaker("EN-FEMALE-1-NEUTRAL")
|
20 |
|
21 |
-
# Or create your own speaker profiles in seconds and reuse them instantly
|
22 |
-
# speaker = interface.create_speaker("path/to/audio.wav")
|
23 |
-
# interface.save_speaker(speaker, "speaker.json")
|
24 |
-
# speaker = interface.load_speaker("speaker.json")
|
25 |
-
|
26 |
-
# Generate speech
|
27 |
-
output = interface.generate(
|
28 |
-
config=outetts.GenerationConfig(
|
29 |
-
text="Hello, how are you doing?",
|
30 |
-
generation_type=outetts.GenerationType.CHUNKED,
|
31 |
-
speaker=speaker,
|
32 |
-
sampler_config=outetts.SamplerConfig(
|
33 |
-
temperature=0.4
|
34 |
-
),
|
35 |
-
)
|
36 |
-
)
|
37 |
-
|
38 |
-
# Save to file
|
39 |
-
output_path = os.path.join(os.getcwd(),"output.wav")
|
40 |
-
output.save(output_path)
|
41 |
-
|
42 |
app = FastAPI()
|
43 |
|
44 |
@app.get("/")
|
45 |
def greet_json():
|
46 |
return {"Hello": "World!"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
|
|
|
2 |
import outetts
|
3 |
+
import io
|
4 |
|
5 |
# Initialize the interface
|
6 |
interface = outetts.Interface(
|
7 |
config=outetts.ModelConfig.auto_config(
|
8 |
model=outetts.Models.VERSION_1_0_SIZE_1B,
|
|
|
9 |
backend=outetts.Backend.LLAMACPP,
|
10 |
quantization=outetts.LlamaCppQuantization.FP16
|
|
|
|
|
11 |
)
|
12 |
)
|
13 |
|
14 |
# Load the default speaker profile
|
15 |
speaker = interface.load_default_speaker("EN-FEMALE-1-NEUTRAL")
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
app = FastAPI()
|
18 |
|
19 |
@app.get("/")
|
20 |
def greet_json():
|
21 |
return {"Hello": "World!"}
|
22 |
+
|
23 |
+
@app.websocket("/ws/tts")
|
24 |
+
async def websocket_tts(websocket: WebSocket):
|
25 |
+
await websocket.accept()
|
26 |
+
try:
|
27 |
+
while True:
|
28 |
+
# Empfange Text-Chunk vom Client
|
29 |
+
data = await websocket.receive_text()
|
30 |
+
# Generiere Audio aus Text
|
31 |
+
output = interface.generate(
|
32 |
+
config=outetts.GenerationConfig(
|
33 |
+
text=data,
|
34 |
+
generation_type=outetts.GenerationType.CHUNKED,
|
35 |
+
speaker=speaker,
|
36 |
+
sampler_config=outetts.SamplerConfig(
|
37 |
+
temperature=0.4
|
38 |
+
),
|
39 |
+
)
|
40 |
+
)
|
41 |
+
# Schreibe Audio in BytesIO
|
42 |
+
audio_buffer = io.BytesIO()
|
43 |
+
output.save(audio_buffer)
|
44 |
+
audio_bytes = audio_buffer.getvalue()
|
45 |
+
# Sende Audiodaten als Bytes zurück
|
46 |
+
await websocket.send_bytes(audio_bytes)
|
47 |
+
except WebSocketDisconnect:
|
48 |
+
pass
|