File size: 5,408 Bytes
6aef5ba b2107ed 6aef5ba f524066 6aef5ba e1c09af f4f936c b2107ed 6aef5ba 92c5896 c1d97d2 6aef5ba 92c5896 6aef5ba 4a977ff 6aef5ba 4a977ff dea35d2 4d74f56 f4f936c 4d74f56 92c5896 4a977ff f4f936c 4a977ff 2ff3681 6aef5ba |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from tempfile import NamedTemporaryFile
import torch
import gradio as gr
from scipy.io.wavfile import write
from audiocraft.models import MusicGen
import os
from audiocraft.data.audio import audio_write
MODEL = None
def split_process(audio, chosen_out_track):
os.makedirs("out", exist_ok=True)
write('test.wav', audio[0], audio[1])
os.system("python3 -m demucs.separate -n mdx_extra_q -d cpu test.wav -o out")
#return "./out/mdx_extra_q/test/vocals.wav","./out/mdx_extra_q/test/bass.wav","./out/mdx_extra_q/test/drums.wav","./out/mdx_extra_q/test/other.wav"
if chosen_out_track == "vocals":
return "./out/mdx_extra_q/test/vocals.wav"
elif chosen_out_track == "bass":
return "./out/mdx_extra_q/test/bass.wav"
elif chosen_out_track == "drums":
return "./out/mdx_extra_q/test/drums.wav"
elif chosen_out_track == "other":
return "./out/mdx_extra_q/test/other.wav"
elif chosen_out_track == "all-in":
return "test.wav"
def load_model(version):
print("Loading model", version)
return MusicGen.get_pretrained(version)
def predict(music_prompt, melody, duration, model):
text = music_prompt
global MODEL
topk = int(250)
if MODEL is None or MODEL.name != model:
MODEL = load_model(model)
if duration > MODEL.lm.cfg.dataset.segment_duration:
raise gr.Error("MusicGen currently supports durations of up to 30 seconds!")
MODEL.set_generation_params(
use_sampling=True,
top_k=250,
top_p=0,
temperature=1.0,
cfg_coef=3.0,
duration=duration,
)
if melody:
sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t().unsqueeze(0)
print(melody.shape)
if melody.dim() == 2:
melody = melody[None]
melody = melody[..., :int(sr * MODEL.lm.cfg.dataset.segment_duration)]
output = MODEL.generate_with_chroma(
descriptions=[text],
melody_wavs=melody,
melody_sample_rate=sr,
progress=False
)
else:
output = MODEL.generate(descriptions=[text], progress=False)
output = output.detach().cpu().float()[0]
with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file:
audio_write(file.name, output, MODEL.sample_rate, strategy="loudness", add_suffix=False)
#waveform_video = gr.make_waveform(file.name)
return file.name
css="""
#col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(
"""
# Split Audio to MusicGen
Upload an audio file, split audio tracks with Demucs, choose a track as conditional sound for MusicGen, get a remix !
<br/>
<a href="https://huggingface.co/spaces/fffiloni/SplitTrack2MusicGen?duplicate=true" style="display: inline-block;margin-top: .5em;margin-right: .25em;" target="_blank">
<img style="margin-bottom: 0em;display: inline;margin-top: -.25em;" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
for longer sequences, more control and no queue.</p>
"""
)
with gr.Column():
uploaded_sound = gr.Audio(type="numpy", label="Input", source="upload")
chosen_track = gr.Radio(["vocals", "bass", "drums", "other", "all-in"], label="Track", info="Which track from your audio do you want to mashup ?", value="vocals")
load_sound_btn = gr.Button('Load your sound')
#split_vocals = gr.Audio(type="filepath", label="Vocals")
#split_bass = gr.Audio(type="filepath", label="Bass")
#split_drums = gr.Audio(type="filepath", label="Drums")
#split_others = gr.Audio(type="filepath", label="Other")
with gr.Row():
music_prompt = gr.Textbox(label="Musical Prompt", info="Describe what kind of music you wish for", interactive=True)
melody = gr.Audio(source="upload", type="numpy", label="Track Condition (from previous step)", interactive=False)
with gr.Row():
model = gr.Radio(["melody", "medium", "small", "large"], label="Model", value="melody", interactive=True)
with gr.Row():
duration = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="Generated Music Duration", interactive=True)
with gr.Row():
submit = gr.Button("Submit")
#with gr.Row():
# topk = gr.Number(label="Top-k", value=250, interactive=True)
# topp = gr.Number(label="Top-p", value=0, interactive=True)
# temperature = gr.Number(label="Temperature", value=1.0, interactive=True)
# cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
output = gr.Audio(label="Generated Music")
load_sound_btn.click(split_process, inputs=[uploaded_sound, chosen_track], outputs=[melody])
submit.click(predict, inputs=[music_prompt, melody, duration, model], outputs=[output])
demo.queue(max_size=32).launch()
|