from transformers import pipeline from transformers import AutoProcessor, MusicgenForConditionalGeneration processor = AutoProcessor.from_pretrained("facebook/musicgen-small") model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") def generate(description): audio = processor(description, padding=True, return_tensors="pt") return audio['audio'] demo = gr.Interface( fn=generate, inputs=gr.Textbox(label="Enter Text to Convert to Audio"), outputs=gr.Audio(label="Generated Audio"), live=True ) demo.launch()