import gradio as gr from transformers import pipeline from IPython.display import Audio from transformers import AutoProcessor, MusicgenForConditionalGeneration processor = AutoProcessor.from_pretrained("facebook/musicgen-small") model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") def generate(description): audio = processor(description, padding=True, return_tensors="pt") audio_values = model.generate(**audio, max_new_tokens=256) return Audio(audio_values[0].numpy(), rate=sampling_rate) demo = gr.Interface( fn=generate, inputs=gr.Textbox(label="Enter Text to Convert to Audio"), outputs=gr.Audio(label="Generated Audio"), live=True ) demo.launch()