Spaces:
Sleeping
Sleeping
Update musicgen_app.py
Browse files- musicgen_app.py +3 -2
musicgen_app.py
CHANGED
@@ -115,7 +115,8 @@ def _do_predictions(texts, melodies, duration, progress=False, **gen_kwargs):
|
|
115 |
newDuration = generateTime(texts[0], duration)
|
116 |
MODEL.set_generation_params(duration=duration, **gen_kwargs)
|
117 |
print("new batch", len(newTexts), newTexts[0], [None if m is None else (m[0], m[1].shape) for m in melodies])
|
118 |
-
print("foo do predictions dir")
|
|
|
119 |
be = time.time()
|
120 |
processed_melodies = []
|
121 |
target_sr = 32000
|
@@ -198,7 +199,7 @@ def predict_full(model, decoder, text, extra, melody, duration, topk, topp, temp
|
|
198 |
if INTERRUPTING:
|
199 |
raise gr.Error("Interrupted.")
|
200 |
MODEL.set_custom_progress_callback(_progress)
|
201 |
-
|
202 |
videos, wavs = _do_predictions(
|
203 |
[text, extra], [melody], duration, progress=True,
|
204 |
top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef)
|
|
|
115 |
newDuration = generateTime(texts[0], duration)
|
116 |
MODEL.set_generation_params(duration=duration, **gen_kwargs)
|
117 |
print("new batch", len(newTexts), newTexts[0], [None if m is None else (m[0], m[1].shape) for m in melodies])
|
118 |
+
print("foo do predictions dir newTexts0 below:")
|
119 |
+
print(newTexts[0])
|
120 |
be = time.time()
|
121 |
processed_melodies = []
|
122 |
target_sr = 32000
|
|
|
199 |
if INTERRUPTING:
|
200 |
raise gr.Error("Interrupted.")
|
201 |
MODEL.set_custom_progress_callback(_progress)
|
202 |
+
print("predict full extra: "+str(extra))
|
203 |
videos, wavs = _do_predictions(
|
204 |
[text, extra], [melody], duration, progress=True,
|
205 |
top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef)
|