Spaces:
Paused
Paused
# app.py ────────────────────────────────────────────────────────────── | |
import os, json, torch, asyncio | |
from fastapi import FastAPI, WebSocket, WebSocketDisconnect | |
from huggingface_hub import login | |
from transformers import AutoTokenizer, AutoModelForCausalLM, LogitsProcessor, DynamicCache # Added StaticCache | |
from snac import SNAC | |
# 0) Login + Device --------------------------------------------------- | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
if HF_TOKEN: | |
login(HF_TOKEN) | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
torch.backends.cuda.enable_flash_sdp(False) # PyTorch‑2.2‑Bug | |
# 1) Konstanten ------------------------------------------------------- | |
REPO = "SebastianBodza/Kartoffel_Orpheus-3B_german_natural-v0.1" | |
CHUNK_TOKENS = 50 | |
START_TOKEN = 128259 | |
NEW_BLOCK = 128257 | |
EOS_TOKEN = 128258 | |
AUDIO_BASE = 128266 | |
AUDIO_SPAN = 4096 * 7 # 28 672 Codes | |
AUDIO_IDS = torch.arange(AUDIO_BASE, AUDIO_BASE + AUDIO_SPAN) # Renamed VALID_AUDIO to AUDIO_IDS | |
# 2) Logit‑Mask (NEW_BLOCK + Audio; EOS erst nach 1. Block) ---------- | |
class AudioMask(LogitsProcessor): | |
def __init__(self, audio_ids: torch.Tensor): | |
super().__init__() | |
self.allow = torch.cat([ | |
torch.tensor([NEW_BLOCK], device=audio_ids.device), | |
audio_ids | |
]) | |
self.eos = torch.tensor([EOS_TOKEN], device=audio_ids.device) | |
self.sent_blocks = 0 | |
self.buffer_pos = 0 # Added buffer position | |
def __call__(self, input_ids, scores): | |
allow = torch.cat([self.allow, self.eos]) # Reverted masking logic | |
mask = torch.full_like(scores, float("-inf")) | |
mask[:, allow] = 0 | |
return scores + mask | |
# 3) FastAPI Grundgerüst --------------------------------------------- | |
app = FastAPI() | |
def hello(): | |
return {"status": "ok"} | |
def load_models(): | |
global tok, model, snac, masker | |
print("⏳ Lade Modelle …", flush=True) | |
tok = AutoTokenizer.from_pretrained(REPO) | |
snac = SNAC.from_pretrained("hubertsiuzdak/snac_24khz").to(device) | |
model = AutoModelForCausalLM.from_pretrained( | |
REPO, | |
device_map={"": 0} if device == "cuda" else None, | |
torch_dtype=torch.bfloat16 if device == "cuda" else None, | |
low_cpu_mem_usage=True, | |
) | |
model.config.pad_token_id = model.config.eos_token_id | |
masker = AudioMask(AUDIO_IDS.to(device)) | |
print("✅ Modelle geladen", flush=True) | |
# 4) Helper ----------------------------------------------------------- | |
def build_prompt(text: str, voice: str): | |
prompt_ids = tok(f"{voice}: {text}", return_tensors="pt").input_ids.to(device) | |
ids = torch.cat([torch.tensor([[START_TOKEN]], device=device), | |
prompt_ids, | |
torch.tensor([[128009, 128260]], device=device)], 1) | |
attn = torch.ones_like(ids) | |
return ids, attn # Ensure attention mask is created | |
def decode_block(block7: list[int]) -> bytes: | |
l1,l2,l3=[],[],[] | |
l1.append(block7[0] - 0 * 4096) # Subtract position 0 offset | |
l2.append(block7[1] - 1 * 4096) # Subtract position 1 offset | |
l3 += [block7[2] - 2 * 4096, block7[3] - 3 * 4096] # Subtract position offsets | |
l2.append(block7[4] - 4 * 4096) # Subtract position 4 offset | |
l3 += [block7[5] - 5 * 4096, block7[6] - 6 * 4096] # Subtract position offsets | |
with torch.no_grad(): | |
codes = [torch.tensor(x, device=device).unsqueeze(0) | |
for x in (l1,l2,l3)] | |
audio = snac.decode(codes).squeeze().detach().cpu().numpy() | |
return (audio*32767).astype("int16").tobytes() | |
# 5) WebSocket‑Endpoint ---------------------------------------------- | |
async def tts(ws: WebSocket): | |
await ws.accept() | |
try: | |
req = json.loads(await ws.receive_text()) | |
text = req.get("text", "") | |
voice = req.get("voice", "Jakob") | |
ids, attn = build_prompt(text, voice) | |
past = None | |
offset_len = ids.size(1) | |
past = None | |
last_tok = None | |
buf = [] | |
while True: | |
next_cache_pos = torch.tensor([offset_len], device=device) if past is not None else None | |
gen = model.generate( | |
input_ids = ids if past is None else torch.tensor([[last_tok]], device=device), | |
attention_mask = attn if past is None else None, | |
past_key_values = past, | |
cache_position = next_cache_pos, # **hier nur ab 2. Durchlauf** | |
max_new_tokens = CHUNK_TOKENS, | |
logits_processor=[masker], | |
do_sample=True, temperature=0.7, top_p=0.95, | |
use_cache=True, return_dict_in_generate=True, | |
) | |
# neu erzeugte Tokens hinter dem bisherigen Ende | |
new_tokens = gen.sequences[0, offset_len:].tolist() | |
if not new_tokens: | |
break | |
offset_len += len(new_tokens) # Cache ist jetzt größer | |
past = gen.past_key_values # Cache zurück für nächste Runde | |
last_tok = new_tokens[-1] | |
for t in new_tokens: | |
if t == EOS_TOKEN: | |
raise StopIteration | |
if t == NEW_BLOCK: | |
buf.clear() | |
continue | |
buf.append(t - AUDIO_BASE) | |
if len(buf) == 7: | |
await ws.send_bytes(decode_block(buf)) | |
buf.clear() | |
masker.sent_blocks = 1 # ab jetzt darf EOS | |
except (StopIteration, WebSocketDisconnect): | |
pass | |
except Exception as e: | |
print("❌ WS‑Error:", e, flush=True) | |
import traceback | |
traceback.print_exc() | |
if ws.client_state.name != "DISCONNECTED": | |
await ws.close(code=1011) | |
finally: | |
if ws.client_state.name != "DISCONNECTED": | |
try: | |
await ws.close() | |
except RuntimeError: | |
pass | |
# 6) Dev‑Start -------------------------------------------------------- | |
if __name__ == "__main__": | |
import uvicorn, sys | |
uvicorn.run("app:app", host="0.0.0.0", port=7860, log_level="info") |