Commit
·
7ac8db1
1
Parent(s):
c1e9a88
update docs
Browse files
app.py
CHANGED
@@ -116,200 +116,248 @@ _patch_t5x_for_gpu_coords()
|
|
116 |
|
117 |
def create_documentation_interface():
|
118 |
"""Create a Gradio interface for documentation and transparency"""
|
119 |
-
|
120 |
with gr.Blocks(title="MagentaRT Research API", theme=gr.themes.Soft()) as interface:
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
|
|
131 |
with gr.Tabs():
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
313 |
return interface
|
314 |
|
315 |
jam_registry: dict[str, JamWorker] = {}
|
|
|
116 |
|
117 |
def create_documentation_interface():
|
118 |
"""Create a Gradio interface for documentation and transparency"""
|
|
|
119 |
with gr.Blocks(title="MagentaRT Research API", theme=gr.themes.Soft()) as interface:
|
120 |
+
gr.Markdown(
|
121 |
+
r"""
|
122 |
+
# 🎵 MagentaRT Live Music Generation Research API
|
123 |
+
|
124 |
+
**Research-only implementation for iOS/web app development**
|
125 |
+
|
126 |
+
This API uses Google's [MagentaRT](https://github.com/magenta/magenta-realtime) to generate
|
127 |
+
continuous music either as **bar-aligned chunks over HTTP** or as **low-latency realtime chunks via WebSocket**.
|
128 |
+
"""
|
129 |
+
)
|
130 |
+
|
131 |
with gr.Tabs():
|
132 |
+
# ------------------------------------------------------------------
|
133 |
+
# About & current status
|
134 |
+
# ------------------------------------------------------------------
|
135 |
+
with gr.Tab("📖 About & Status"):
|
136 |
+
gr.Markdown(
|
137 |
+
r"""
|
138 |
+
## What this is
|
139 |
+
We're exploring AI‑assisted loop‑based music creation that can run on GPUs (not just TPUs) and stream to apps in realtime.
|
140 |
+
|
141 |
+
### Implemented backends
|
142 |
+
- **HTTP (bar‑aligned):** `/generate`, `/jam/start`, `/jam/next`, `/jam/stop`, `/jam/update`, etc.
|
143 |
+
- **WebSocket (realtime):** `ws://…/ws/jam` with `mode="rt"` (Colab‑style continuous chunks). New in this build.
|
144 |
+
|
145 |
+
## What we learned (GPU notes)
|
146 |
+
- **L40S 48GB:** comfortably **faster than realtime** → we added a `pace: "realtime"` switch so the server doesn’t outrun playback.
|
147 |
+
- **L4 24GB:** **consistently just under realtime**; even with pre‑roll buffering, TF32/JAX tunings, reduced chunk size, and the **base** checkpoint, we still see eventual under‑runs.
|
148 |
+
- **Implication:** For production‑quality realtime, aim for ~**40GB VRAM** per user/session (e.g., **A100 40GB**, or MIG slices ≈ **35–40GB** on newer parts). Smaller GPUs can demo, but sustained realtime is not reliable.
|
149 |
+
|
150 |
+
## Model / audio specs
|
151 |
+
- **Model:** MagentaRT (T5X; decoder RVQ depth = 16)
|
152 |
+
- **Audio:** 48 kHz stereo, 2.0 s chunks by default, 40 ms crossfade
|
153 |
+
- **Context:** 10 s rolling context window
|
154 |
+
"""
|
155 |
+
)
|
156 |
+
|
157 |
+
# ------------------------------------------------------------------
|
158 |
+
# HTTP API
|
159 |
+
# ------------------------------------------------------------------
|
160 |
+
with gr.Tab("🔧 API (HTTP)"):
|
161 |
+
gr.Markdown(
|
162 |
+
r"""
|
163 |
+
### Single Generation
|
164 |
+
```bash
|
165 |
+
curl -X POST \
|
166 |
+
"$HOST/generate" \
|
167 |
+
-F "loop_audio=@drum_loop.wav" \
|
168 |
+
-F "bpm=120" \
|
169 |
+
-F "bars=8" \
|
170 |
+
-F "styles=acid house,techno" \
|
171 |
+
-F "guidance_weight=5.0" \
|
172 |
+
-F "temperature=1.1"
|
173 |
+
```
|
174 |
+
|
175 |
+
### Continuous Jamming (bar‑aligned, HTTP)
|
176 |
+
```bash
|
177 |
+
# 1) Start a session
|
178 |
+
echo $(curl -s -X POST "$HOST/jam/start" \
|
179 |
+
-F "loop_audio=@loop.wav" \
|
180 |
+
-F "bpm=120" \
|
181 |
+
-F "bars_per_chunk=8") | jq .
|
182 |
+
# → {"session_id":"…"}
|
183 |
+
|
184 |
+
# 2) Pull next chunk (repeat)
|
185 |
+
curl "$HOST/jam/next?session_id=$SESSION"
|
186 |
+
|
187 |
+
# 3) Stop
|
188 |
+
curl -X POST "$HOST/jam/stop" \
|
189 |
+
-H "Content-Type: application/json" \
|
190 |
+
-d '{"session_id":"'$SESSION'"}'
|
191 |
+
```
|
192 |
+
|
193 |
+
### Common parameters
|
194 |
+
- **bpm** *(int)* – beats per minute
|
195 |
+
- **bars / bars_per_chunk** *(int)* – musical length
|
196 |
+
- **styles** *(str)* – comma‑separated text prompts (mixed internally)
|
197 |
+
- **guidance_weight** *(float)* – style adherence (CFG weight)
|
198 |
+
- **temperature / topk** – sampling controls
|
199 |
+
- **intro_bars_to_drop** *(int, /generate)* – generate-and-trim intro
|
200 |
+
"""
|
201 |
+
)
|
202 |
+
|
203 |
+
# ------------------------------------------------------------------
|
204 |
+
# WebSocket API: realtime (‘rt’ mode)
|
205 |
+
# ------------------------------------------------------------------
|
206 |
+
with gr.Tab("🧩 API (WebSocket • rt mode)"):
|
207 |
+
gr.Markdown(
|
208 |
+
r"""
|
209 |
+
Connect to `wss://…/ws/jam` and send a **JSON control stream**. In `rt` mode the server emits ~2 s WAV chunks (or binary frames) continuously.
|
210 |
+
|
211 |
+
### Start (client → server)
|
212 |
+
```jsonc
|
213 |
+
{
|
214 |
+
"type": "start",
|
215 |
+
"mode": "rt",
|
216 |
+
"binary_audio": false, // true → raw WAV bytes + separate chunk_meta
|
217 |
+
"params": {
|
218 |
+
"styles": "heavy metal", // or "jazz, hiphop"
|
219 |
+
"style_weights": "1.0,1.0", // optional, auto‑normalized
|
220 |
+
"temperature": 1.1,
|
221 |
+
"topk": 40,
|
222 |
+
"guidance_weight": 1.1,
|
223 |
+
"pace": "realtime", // "realtime" | "asap" (default)
|
224 |
+
"max_decode_frames": 50 // 50≈2.0s; try 36–45 on smaller GPUs
|
225 |
+
}
|
226 |
+
}
|
227 |
+
```
|
228 |
+
|
229 |
+
### Server events (server → client)
|
230 |
+
- `{"type":"started","mode":"rt"}` – handshake
|
231 |
+
- `{"type":"chunk","audio_base64":"…","metadata":{…}}` – base64 WAV
|
232 |
+
- `metadata.sample_rate` *(int)* – usually 48000
|
233 |
+
- `metadata.chunk_frames` *(int)* – e.g., 50
|
234 |
+
- `metadata.chunk_seconds` *(float)* – frames / 25.0
|
235 |
+
- `metadata.crossfade_seconds` *(float)* – typically 0.04
|
236 |
+
- `{"type":"chunk_meta","metadata":{…}}` – sent **after** a binary frame when `binary_audio=true`
|
237 |
+
- `{"type":"status",…}`, `{"type":"error",…}`, `{"type":"stopped"}`
|
238 |
+
|
239 |
+
### Update (client → server)
|
240 |
+
```jsonc
|
241 |
+
{
|
242 |
+
"type": "update",
|
243 |
+
"styles": "jazz, hiphop",
|
244 |
+
"style_weights": "1.0,0.8",
|
245 |
+
"temperature": 1.2,
|
246 |
+
"topk": 64,
|
247 |
+
"guidance_weight": 1.0,
|
248 |
+
"pace": "realtime", // optional live flip
|
249 |
+
"max_decode_frames": 40 // optional; <= 50
|
250 |
+
}
|
251 |
+
```
|
252 |
+
|
253 |
+
### Stop / ping
|
254 |
+
```json
|
255 |
+
{"type":"stop"}
|
256 |
+
{"type":"ping"}
|
257 |
+
```
|
258 |
+
|
259 |
+
### Browser quick‑start (schedules seamlessly with 25–40 ms crossfade)
|
260 |
+
```html
|
261 |
+
<script>
|
262 |
+
const XFADE = 0.025; // 25 ms
|
263 |
+
let ctx, gain, ws, nextTime = 0;
|
264 |
+
async function start(){
|
265 |
+
ctx = new (window.AudioContext||window.webkitAudioContext)();
|
266 |
+
gain = ctx.createGain(); gain.connect(ctx.destination);
|
267 |
+
ws = new WebSocket("wss://YOUR_SPACE/ws/jam");
|
268 |
+
ws.onopen = ()=> ws.send(JSON.stringify({
|
269 |
+
type:"start", mode:"rt", binary_audio:false,
|
270 |
+
params:{ styles:"warmup", temperature:1.1, topk:40, guidance_weight:1.1, pace:"realtime" }
|
271 |
+
}));
|
272 |
+
ws.onmessage = async ev => {
|
273 |
+
const msg = JSON.parse(ev.data);
|
274 |
+
if (msg.type === "chunk" && msg.audio_base64){
|
275 |
+
const bin = atob(msg.audio_base64); const buf = new Uint8Array(bin.length);
|
276 |
+
for (let i=0;i<bin.length;i++) buf[i] = bin.charCodeAt(i);
|
277 |
+
const ab = buf.buffer; const audio = await ctx.decodeAudioData(ab);
|
278 |
+
const src = ctx.createBufferSource(); const g = ctx.createGain();
|
279 |
+
src.buffer = audio; src.connect(g); g.connect(gain);
|
280 |
+
if (nextTime < ctx.currentTime + 0.05) nextTime = ctx.currentTime + 0.12;
|
281 |
+
const startAt = nextTime, dur = audio.duration;
|
282 |
+
nextTime = startAt + Math.max(0, dur - XFADE);
|
283 |
+
g.gain.setValueAtTime(0, startAt);
|
284 |
+
g.gain.linearRampToValueAtTime(1, startAt + XFADE);
|
285 |
+
g.gain.setValueAtTime(1, startAt + Math.max(0, dur - XFADE));
|
286 |
+
g.gain.linearRampToValueAtTime(0, startAt + dur);
|
287 |
+
src.start(startAt);
|
288 |
+
}
|
289 |
+
};
|
290 |
+
}
|
291 |
+
</script>
|
292 |
+
```
|
293 |
+
|
294 |
+
### Python client (async)
|
295 |
+
```python
|
296 |
+
import asyncio, json, websockets, base64, soundfile as sf, io
|
297 |
+
async def run(url):
|
298 |
+
async with websockets.connect(url) as ws:
|
299 |
+
await ws.send(json.dumps({"type":"start","mode":"rt","binary_audio":False,
|
300 |
+
"params": {"styles":"warmup","temperature":1.1,"topk":40,"guidance_weight":1.1,"pace":"realtime"}}))
|
301 |
+
while True:
|
302 |
+
msg = json.loads(await ws.recv())
|
303 |
+
if msg.get("type") == "chunk":
|
304 |
+
wav = base64.b64decode(msg["audio_base64"]) # bytes of a WAV
|
305 |
+
x, sr = sf.read(io.BytesIO(wav), dtype="float32")
|
306 |
+
print("chunk", x.shape, sr)
|
307 |
+
elif msg.get("type") in ("stopped","error"): break
|
308 |
+
asyncio.run(run("wss://YOUR_SPACE/ws/jam"))
|
309 |
+
```
|
310 |
+
"""
|
311 |
+
)
|
312 |
+
|
313 |
+
# ------------------------------------------------------------------
|
314 |
+
# Performance & hardware guidance
|
315 |
+
# ------------------------------------------------------------------
|
316 |
+
with gr.Tab("📊 Performance & Hardware"):
|
317 |
+
gr.Markdown(
|
318 |
+
r"""
|
319 |
+
### Current observations
|
320 |
+
- **L40S 48GB** → faster than realtime. Use `pace:"realtime"` to avoid client over‑buffering.
|
321 |
+
- **L4 24GB** → slightly **below** realtime even with pre‑roll buffering, TF32/Autotune, smaller chunks (`max_decode_frames`), and the **base** checkpoint.
|
322 |
+
|
323 |
+
### Practical guidance
|
324 |
+
- For consistent realtime, target **~40GB VRAM per active stream** (e.g., **A100 40GB**, or MIG slices ≈ **35–40GB** on newer GPUs).
|
325 |
+
- Keep client‑side **overlap‑add** (25–40 ms) for seamless chunk joins.
|
326 |
+
- Prefer **`pace:"realtime"`** once playback begins; use **ASAP** only to build a short pre‑roll if needed.
|
327 |
+
- Optional knob: **`max_decode_frames`** (default **50** ≈ 2.0 s). Reducing to **36–45** can lower per‑chunk latency/VRAM, but doesn’t increase frames/sec throughput.
|
328 |
+
|
329 |
+
### Concurrency
|
330 |
+
This research build is designed for **one active jam per GPU**. Concurrency would require GPU partitioning (MIG) or horizontal scaling with a session scheduler.
|
331 |
+
"""
|
332 |
+
)
|
333 |
+
|
334 |
+
# ------------------------------------------------------------------
|
335 |
+
# Changelog & legal
|
336 |
+
# ------------------------------------------------------------------
|
337 |
+
with gr.Tab("🗒️ Changelog & Legal"):
|
338 |
+
gr.Markdown(
|
339 |
+
r"""
|
340 |
+
### Recent changes
|
341 |
+
- New **WebSocket realtime** route: `/ws/jam` (`mode:"rt"`)
|
342 |
+
- Added server pacing flag: `pace: "realtime" | "asap"`
|
343 |
+
- Exposed `max_decode_frames` for shorter chunks on smaller GPUs
|
344 |
+
- Client test page now does proper **overlap‑add** crossfade between chunks
|
345 |
+
|
346 |
+
### Licensing
|
347 |
+
This project uses MagentaRT under:
|
348 |
+
- **Code:** Apache 2.0
|
349 |
+
- **Model weights:** CC‑BY 4.0
|
350 |
+
Please review the MagentaRT repo for full terms.
|
351 |
+
"""
|
352 |
+
)
|
353 |
+
|
354 |
+
gr.Markdown(
|
355 |
+
r"""
|
356 |
+
---
|
357 |
+
**🔬 Research Project** | **📱 iOS/Web Development** | **🎵 Powered by MagentaRT**
|
358 |
+
"""
|
359 |
+
)
|
360 |
+
|
361 |
return interface
|
362 |
|
363 |
jam_registry: dict[str, JamWorker] = {}
|