diff --git "a/AdFQT4oBgHgl3EQfMTYr/content/tmp_files/load_file.txt" "b/AdFQT4oBgHgl3EQfMTYr/content/tmp_files/load_file.txt" new file mode 100644--- /dev/null +++ "b/AdFQT4oBgHgl3EQfMTYr/content/tmp_files/load_file.txt" @@ -0,0 +1,754 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf,len=753 +page_content='A R C H I S O U N D : A U D I O G E N E R AT I O N W I T H D I F F U S I O N flavio schneider Master’s Thesis Supervised by Zhijing Jin, Prof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Bernhard Schölkopf ETH Zurich January 2023 A B S T R A C T The recent surge in popularity of diffusion models for image gener- ation has brought new attention to the potential of these models in other areas of media generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' One area that has yet to be fully ex- plored is the application of diffusion models to audio generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Au- dio generation requires an understanding of multiple aspects, such as the temporal dimension, long term structure, multiple layers of overlapping sounds, and the nuances that only trained listeners can detect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In this work, we investigate the potential of diffusion models for audio generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We propose a set of models to tackle multiple aspects, including a new method for text-conditional latent audio dif- fusion with stacked 1D U-Nets, that can generate multiple minutes of music from a textual description.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' For each model, we make an effort to maintain reasonable inference speed, targeting real-time on a single consumer GPU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In addition to trained models, we provide a collection of open source libraries with the hope of simplifying future work in the field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Samples can be found at bit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='ly/audio-diffusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' iii C O N T E N T S 1 introduction 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Audio Generation 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Challenges 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Existing Methods 2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4 Research Questions 2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5 Contributions 4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Models 4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Libraries 4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='6 Structure of the Thesis 5 2 audio representation 7 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Desirable Properties 7 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Compressibility 7 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Decodability 7 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Diffuseability 7 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Waveform 8 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Spectrograms 8 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 STFT 8 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 MEL 10 3 existing diffusion methods 11 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 DDPM-Diffusion 11 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Noising (0 → t) 12 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Denoising (t − 1 ← t) 13 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Training Objective 13 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4 Sampling 14 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5 Limitations 14 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 DDIM 14 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 V-Diffusion 15 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Noising (0 → σt) 15 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Denoising (σt−1 ← σt) 16 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Training Objective 16 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4 Sampling (σ0 = 0 ← · · · ← σt−1 ← σt = 1) 16 4 architectures 17 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Our a-unet Library 17 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Background of U-Net 17 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 U-Net Block 17 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Items 19 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4 Plugins 20 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Our audio-encoders-pytorch Library 21 5 models 23 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Overview 23 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Diffusion Unconditional Generator 23 v vi contents 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Motivation 23 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Method 23 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Diffusion Method 24 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4 Transforms 25 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5 Usage 26 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='6 Evaluation 27 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Text-conditional Diffusion 27 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Motivation 27 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Method 28 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Evaluation 29 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4 Diffusion Auto-Encoders with Latent Diffusion 30 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Motivation 30 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Method 30 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Evaluation 31 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5 Diffusion Upsampler 32 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Motivation 32 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Method 32 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Evaluation 33 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='6 Diffusion Vocoder 34 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Motivation 34 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Method 34 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Evaluation 35 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='7 Training Info 35 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Data 35 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Training 35 6 future work 37 7 conclusion 39 bibliography 41 1 I N T R O D U C T I O N Music is an art of time at the intersection of fine-grained perception and symbolic patter recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In this work, we will investigate the use of diffusion model to generate music, or more broadly audio, in order to gain a deeper understanding of this intersection using modern deep learning diffusion models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 audio generation Audio generation refers to the process of automatically synthesizing novel waveforms using deep learning models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Audio generation has been commonly approached in two different ways: symbolically or at the waveform level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Symbolically generating audio involves creat- ing a representation of the audio using symbols, such as MIDI data, which can then be converted into an audio waveform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This method is often easier to work with, but it can be difficult to capture all the nuanced details of a sound using symbols.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Waveform-based audio generation, on the other hand, involves generating the raw audio waveform directly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This method is more complex, due to the sheer amount of values that have to be generated per second, but it allows for a more precise and detailed representation of sound, that includes all of its intricacies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Furthermore, audio generation can be uncondi- tional or conditional.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Unconditional models are trained only on audio data and are able to generate new samples without any additional in- put.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Conditional models, on the other hand, are trained on pairs of audio data and some kind of conditioning information, such as a text description, genre label, lyrics, speaker id, or some other description of the audio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' At inference time, this conditioning information can be used to guide the generation of novel audio samples that match the desired characteristics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In this thesis, we will explore methods of con- ditional and unconditional waveform-level generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 challenges Multiple tradeoffs have to be considered when generating audio at the waveform level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' To generate a single second of high quality 48kHz stereo audio, 96000 values must be generated, which is comparable in size to a medium resolution image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' If the goal is to generate an entire song (hundreds of seconds) maintaining high-quality and a rea- sonable generation speed, this task becomes much more challenging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' A common approach to generating long audio sequences is to do so 1 2 introduction in chunks, however, if the context length, or the amount of audio that the model can consider at any given time is not sufficient, the result- ing structure may not be consistent over multiple seconds or minutes of generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' A longer context may allow for more consistent coarse structure, but may also lead to lower overall quality of detail or vice- versa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 existing methods In this section, we will review some of the most well-known or influ- ential waveform-based methods that have been developed to date.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' One of the pioneering waveform level generation models is WaveNet (2016 [8]), a fully convolutional architecture that exploits dilated con- volutions with various dilation factors in order to capture a large con- text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' It’s able to synthesize a few seconds of both speech and classical piano music at 16kHz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Jukebox (2020 [2]) uses multiple quantized au- toencoders to discretize sounds at 3 different resolutions, followed by a cascade of transformer upsampler models to generate the quantized representations autoregressively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Jukebox is able to generate 44kHz music conditioned on lyrics, artists and genres.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The stack of trans- formers trades-off generation speed for structure and quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Audi- oLM (2022 [1]) uses a (residual) quantized autoencoder to compress the waveform into discrete tokens and a semantic encoder, later a cas- cade of transformer decoders (semantic, coarse, fine) is used to gener- ate 16kHz audio continuations top-down from the semantic represen- tation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Musika (2022) trains a set of 1D convolutional autoencoders to compress log-magnitude spectrograms, and a vocoder to reconstruct both phase and magnitude from the compressed representation, us- ing a 2D GAN discriminator trained on sequential chunks of audio exploits this process autoregressively to generate longer sequences of 44kHz audio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This method has a limited context length, but is very efficient given the 1D structure of convolutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Riffusion1 (2022) fine- tunes the Stable Diffusion model [12] on chunks of mel-spectrograms of 5s at 44kHz, and uses style transfer to generate multiple coherent concatenated images while conditioning on a textual description of the song.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This method has a limited 5s context length, and trades off speed given the large 2D architecture, but works surprisingly well considering that the original model is trained on images, not audio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4 research questions Diffusion models have recently demonstrated exceptional capabilities in the field of image generation [11, 12], leading to an explosion of incredible AI generated art 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Iteratively removing small amounts of 1 https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='riffusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='com/about 2 https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='midjourney.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='com/showcase/ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4 research questions 3 noise from pure noise allows diffusion models to hallucinate novel samples that have common attributes to the data in the training set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Compared to GANs, diffusion models in the image domain don’t suffer from training instability, scale well with parameter size, and have good mode coverage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' As long as the training data can be progressively corrupted from a clean to a fully covered state, diffusion models have the potential to be applied to multiple domains to generate novel samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This opens up a wide range of possibilities beyond image generation, including video and audio generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In this thesis, we explore the potential of diffusion models for audio generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We will explore whether diffusion models can be used on audio as effectively as with images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The aim is to generate high- quality 48kHz stereo audio as efficiently as possible and to control the generation in different ways, with a focus on text-conditional audio generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 4 introduction 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5 contributions 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Models We introduce the following models, some of which are/will be acces- sible in the archisound library: Long: a latent diffusion model for text-conditional music genera- tion that is capable of generating audio with an extended con- text of multiple minutes at 48kHz, targeting context length and structure (∼857M parameters).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Crisp: a text-conditional audio generation diffusion model with a context of tens of seconds at 48kHz, targeting simplicity and high-quality waveforms (∼419M parameters).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Upsampler: a diffusion model to uspsample music from 3kHz to 48kHz (∼238M parameters).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Vocoder: A diffusion model to reconstruct 48kHz waveforms from 80-channel mel-spectrograms, variable input length (∼178M parameters).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Libraries Moreover, we open-source the following libraries, on which previous models are based: archisound3, our library including trained models ready to use.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This repository doesn’t contain any modelling code, but acts as a wrapper and documentation for our models hosted on Hug- gingface 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' audio-diffusion-pytorch5 (ADP), the main library including the proposed audio diffusion models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This library has both a-unet and audio-encoders-pytorch as dependencies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' At the time of writing, this library has 550+ stars on GitHub, and has been downloaded more than 50000 times on pip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' a-unet6, a highly customizable library to build U-Net architec- tures in any dimension, expansible with multiple blocks and plugins.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This library can be used for any type of grid data: 1D, 2D, 3D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' audio-encoders-pytorch7 (AEP), a set of encoders and autoen- coders for 1D data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 3 https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='com/archinetai/archisound 4 https://huggingface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='co/archinetai 5 https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='com/archinetai/audio-diffusion-pytorch 6 https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='com/archinetai/a-unet 7 https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='com/archinetai/audio-encoders-pytorch 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='6 structure of the thesis 5 Some additional libraries we open-soruce that are not documented in this thesis, but might nevertheless be interesting to the reader, in- clude: cqt-pytorch8 for invertible CQT spectrograms using NSGT, and bitcodes-pytorch9 a method for vector-quantization into binary codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='6 structure of the thesis In Chapter 2, we present the various audio representations and pro- vide a set of tradeoffs that must be considered when selecting an ap- propriate representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In Chapter 3, we describe the general prin- ciples of diffusion and then delve into the specific diffusion methods that we have tested.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In Chapter 4, we examine our custom architec- tures, including the U-Net and autoencoder, and provide detailed de- scriptions of each component and how they can be easily integrated into our library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In Chapter 5, we propose a range of diffusion models that combine the diffusion methods from Chapter 3 with our custom architecture from Chapter 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Finally, in Chapters 6 and 7, we discuss potential future work and present our conclusions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 8 https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='com/archinetai/cqt-pytorch 9 https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='com/archinetai/bitcodes-pytorch 2 A U D I O R E P R E S E N TAT I O N In the following section, we will introduce the different types of au- dio representation that we can choose from, and compare the differ- ent tradeoffs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Before that, we’ll have a look at the different desirable properties that should be considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 desirable properties 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Compressibility We define compressibility as the approximate number of values per second needed for high-quality audio compared to the original wave- form, and how many can be easily removed without a significant loss in fidelity, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' by applying a convolutional only autoencoder on the representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Perceptibility Perceptibility implies how close is the representation to human hear- ing, this part is important since if we are compressing a representa- tion that carries a lot of information we are not able to perceive in the first place we will lose a lot of useful capacity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' More specifically, humans hear sound in the range of frequency from 20Hz to 20kHz, on a logarithmic scale, which means that the frequency resolution decreases as we approach 20kHz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Decodability Decodability refers to how simple and fast is to decode the given representation back to the waveform domain that can be reproduced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Diffuseability Diffusability is a set of desirable properties that are important in or- der for a diffusion model to be applicable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In particular, (1) the values should be approximately in the range [−1, 1], (2) the signal should ide- ally have some inductive biases that can be exploited by the network (primarily 1D or 2D convolutional blocks), (3) time-shift invarance if we are doing inpainting or autoregressive generation, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=" the repre- sentation should look the same at different time steps for the same 7 J6&' EgC COJJJU 16bI626Uf2 g 2JU9JI CJUK Ot fJ6 O11&JU9J MA6- fuG UnupGI Ot csUuGje' E 1e fuG Unp6I Ot ednGucI6e gug F Je fuG 26Uf gnqiO JUfO g cOIb]GX-A9JnGq fGU2O1 Ot 2gb6 [C'L I] MG16 C Je t W (TT) - T- roa(I : ) to f dtiw blse TT Igndo lgi o q eigemi bne Is : gg TIT2I." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="S S'3 2LECLKOCVW2 IgbIgJA OA6I fIJU6' JGUC6 f6 MJII fAbIC9JJA JSA6 JS1&61 g!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="1616UC62 COU2fLICf f9U JOM 116d6UC162: H& 16d6UC162 f6Ug fO Ag1 JO16 FS JO22 InUCfIOU OU MSAG1OLUe' JI& 1ed6UCI6 MJJI p6 SIgGL fO 16- 62f coaiq61 b6icebfp!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='f: IU igcf It M6 gbbj g 2fgqgig Fi 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='126 qiL6CI gUg fgf MgA61OLI gL6 g pg2IC 6b1626SfOU MIC ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='26dnGUC62 fG qi2gqASUg62 SI6 FuSt JOu& 26dnGUC62 916 2JOM fO qlt- ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="ode lof ldeauib lie9 'fi bn bvlovi i iboogb o dt i " metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='Ot 2f6160 gq1o (+8000xs): M6 p261A6g fgf Mif g 2f9ugig ID coU- ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='leix e i Exxa e ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='gi ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="Jsi&6: e g &oog cobg2o' fG Jnp6 Ot Agj6e Ot g 2fguggg " metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='Vi9 9d Iliw a9 on ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='oibs ilsp-gid o aimabro9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="2OL MII COUf9IU I 26COUg Ot gHqIO' It M6 MgUf fO &UG1gf6 JJfbJ6 " metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="It 1 = 48000 Ug M6 9I6 LGbI626UIU& ghqiO 9f 48 KHE' FUGU FJ6 f6U- " metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='ghqio) gug 1 fe nupel ot bojure eeg fo iebi62euf fue gnqio cg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=" o = l dm [a S'S MVAEEOBW 2onUg' (+) FJ6 ASJnG2 2onJg Of gA6 fOO JUgua AgJ62 IU f6 fIU6 q!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='- VNDIO ELKEEMIVIIOM 8 M i e qICf 6 bg26 OI q!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='L6CIA fJG MSAGULOU LO fJG Jg&UIfng6 JUIOL- 9Jfo&6f61 gUg fo fIgIU gU ggg!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='foUSJ JUog6J (c9JJ6g Aocog6L) fo b16- bjoif qo of 6saa gbbja: coou bigcjc6 Je fo qiecsig fue buga6 Ug GUC6 fG IUCA6 pIg262 Ot 2bgFI9I JOC9JIfA fgf COUAOJIfOU 6X- ol t ig liti T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=" A61 2gI JO22 I dJf OU f6 of6 gug bg26 e A61 gig fO Jg&uinq6 ebeco&ig cgu p6 6gea cobi6e26q b fo 3sx MIfy Fanl6 S: Wsafqe abecoaisu gug bsee ot g euaj6 cusuel 2LEI ge- (x(tr)) s 9rugi mi rwore 2 gLCfS α(X(t『)) guq bjg26 96 q6uU6q g2 waa(x(t r)) := lx(t r)l- 9Uq bμe(x(t r)) : fO 69J 9Ug JUg&JUga bgLfe' gUg J6UC6 pgCK fo MgA6uLo' Wg&uIfng6 IUfo JgIfn6 gU bg26' g 16bi626UfgfOU fSf c9U p6 JA66g pgK ." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='bgi9i li9 GCIGJA 2I& f6 S2f ONG IOL (EE) C 9J2O G FJG 2J&USJ MIF I KGLUGJe Ot JGu&t M LG 2LEL CSU p6 CObrIIGq lovo i d blow o 2IUG MAGe tOI f6 J&USIA OUG SKI& FG GSJIg&JUSI 2LEL b16g61!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="U6g Jf612 cObo26g Ot Co2JU6 MSA62 1OL fJ6 L69J bgLf' 9Ug 92 M: JUfG1G2fIU&X MG C9U COU2Ig6I fJ6 2LEL 92 ID-COUAOJNFIOU MIfJ MJG16 M J2 g MJUqoM tnUCfIOU Ot 2ugb6 M fugf 12 26 fo J2ojfG fuG (s) 2l上l[x)(tr) =X(r) (1) X-xI2 16b1626Uf2 f6' LJ6 2LEL 9f g bo1Uf (t ) 12 q66g g2: 1O1 M616 F6 X-gx12 16b1626Uf2 JIU691JA 2c9J6g 116dn6UC162' gUq f6 S3 LECLEOCKV2 Q IUAGL2IOU 9Ug bJg26 IGCOU2fLNICfIOU JU OUG &o: l od oo o o d L L6CoG1 f6 J!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="6J 2c9J6g Jg&UIfg6 bCfLo&ige' obIIIgOU ." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='foitofi o ol oi l il t t d lditi \'i CJO1C6 JU gqiO gbbJIcgfIOU2\' LG qi2gqAgUtg&6 Ot f6 J6J 2c9J6 12 fgf C6bfIOU fJSU g JU69IJA 2C9J6g 2b6CfIo&1gJ\' JSKIU& If 9 AG1A COJUJUOU 1 iv i oiti H Isos i st rts boa o oi bvio t o baed i gle rltireol Isioe eidt gle Ie gt ( + orgo d E&6 3: WE-2cJG 2becftog" gfq 2c9JGg Mi roa() to AepI- IHMS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="S b 9t tig lib 9i 9 bilo q SUe geI&I2 1edUCIG2 OU g Jo&gIfIC 2C9J6 MIC I2 2fI Of f6 Jgig fO cOUub1622 l1O f6 6g2^ fo cOb1622 bgLf2: 2626' COUA6LfI& fJ6 f19U21OLJ f Jg&UIfg6 gUg bg26 g126Uf9U&J62 21UC6 g JOf Ot 19UqOUU622 12 2fJI b1626Uf I f6 q6f9JJ2 Ot fJ6 SLig: IU g IGbLG2GUTSfIOU' VNDIO KELKEEMIVLIOM 103 E X I S T I N G D I F F U S I O N M E T H O D S Diffusion models," metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' first proposed in [3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 17] are most commonly imple- mented with U-Net [7,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 13] that is repeatedly called during inference for each denoising step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Since the same network is called multiple times during sampling, the weights are shared, making it a recur- rent model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Since the data can be progressively corrupted from a clean to a fully covered state, we can use this trick to jump to any intermediate noise level and denoise a single step, backpropagating only once during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' From the perspective of recurrent models, (forward) diffusion allows us to recover the memory at an intermedi- ate state (which we can see as the corrupted datapoint) without the need to backpropagate the entire chain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This is a useful technique for efficiently generating intermediate states, and has the advantage that it can be highly parallelized during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Compared to recur- rent models, the memory state is predefined by the (noise) corruption process and not fully learned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Diffusion exploits very similar princi- ples as autoregressive transformer models [19], namely a highly par- allelizeable training process and repeated network call with weight- sharing during sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Compared to other generative models like GANs, diffusion models are easier to train and don’t suffer from in- stability problems arsing from having to coordinate a generator and discriminator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Diffusion models are a category of powerful generative models first introduced in [17] (2015), and later popularized in [3] (2020), thanks to the impressive results obtained in image generation on CIFAR10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In this section, we will examine different diffusion methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' First, the seminal DDPM [3] method, which involves training the diffu- sion process with a finite number of denoising steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Following that, DDIM [18] introduces a few changes that generalize DDPM to an arbi- trary number of steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Then we will introduce V-diffusion from [16], a continuous diffusion method that aims to improve the mixing of the signal-to-noise ratio from DDIM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' For DDPM and V-diffusion, we will highlight the most important operations, namely: (1) noising the original datapoint (signal) to a desired noise, (2) denoising a single step with the use of our (trained) network, (3) the training objective used, and (4) a sampling technique that repeatedly applies (2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 ddpm-diffusion DDPM [3] is one of the seminal works in diffusion models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The method starts by assuming that xxx(0) 0 , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' ,xxx(D) 0 is a dataset of D i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=" points 11 MJGIG ef M(O' I) (2) Xf qGAIgfOU M6 9U 6g2JJA 2gbJ6 xf f6 JO12 A6121OU Ot ×o~ g2: M616 f := II=( -f) q6b6Uq2 OU 9JI f 26J6Cf6q IU d(×f I×f-): ×|hf :=^fx0f;" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='=(-f)I ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='d(xf |x0) := ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='(4) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=':es bgtslumrof roitudirtaib ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='fG L6bgLGUGfLISSfIOU fLICK If C9U p6 2OMU fSf FJI2 J2 9J2O g JOLIU9J ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='JGaGj f Lia bioceqrL6 1e cJJeg f toMgig qilejOu bioc: eue ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="Msa o qiLCIa Inb o UO126 JGG o' (om CJ6S stboTt) fO UO1e6 " metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="B 2I f6 b16e g2nbOUa' M6 c q61I6 d(xf I xo)~ I'6' g " metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="3'I'I Voe& (0→ f) " metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='tioq uoivi t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="C9JJ6 00AONC6 2C6NJ6' MIC COUFOI f6 IUCL6926 IU JO126 JGA6I LOU " metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='b6Ug6Uf OU fJ6 b16AOn2 bo1Uf 9Ug 2OUU6 ab6tbg19J61612 ↓: : : ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='fO 29UbJ6 g JOLU9J gI2fLJpHIfIOU MIfJ fJ6 69U 9Ug COUASLISUC6 g6- ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='F6 JO126 J6A6J O1 OnIL qggboIUf ×f-1 p OU6 2f6b fO J6A6J f M6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="JI A6 M(×f I hrf = ^I -fxf-」f = fI): IU MOLq2' I M6 M9Uf fO JUCI6926 : (-x I x)p edt bns (T 1o mumixem o mor Igvl 9io f 2gbj6g g UKoM g2Jpfou d(xo)(f6 2p2cbf Iugicf62 EI&M6 2: D!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="2JOU JU1616UC6' oibuA EI&n6 : D!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='L2IOU fIUIU&.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' oibuA IS EXIIИ DIEEIOИ WEHOD3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 ddpm-diffusion 13 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Denoising (t − 1 ← t) The reverse process distribution q(xxxt−1 | xxxt) is also a normal distri- bution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' However, it cannot be directly estimated as it depends on the entire dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Instead, we train a neural network with parameters θ as an approximation: pθ(xxxt−1 | xxxt) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='.= N (xxxt−1 | µµµθ(xxxt), Σθ(xxxt)) (6) If our model is trained properly, similarly to the forward process, we will be able to carry out a single denoising step by sampling the normal distribution using the learned mean and variance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Training Objective To train our model, we need a handle on the true mean and covari- ance of the reverse process q(xxxt−1 | xxxt).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' As we have seen before, this is not directly tractable, however, if we include additional informa- tion about either xxx0 (the true data point), or ǫǫǫt (the noise used to get xxxt from xxx0 in the forward process) we can compute a different but tractable auxiliary distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In the case where xxx0 is given, the distribution is: q(xxxt−1 | xxxt,xxx0) = N � xxxt−1 | ˜µµµ(xxxt,xxx0), ˜Σ(xxxt,xxx0) � (7) With mean ˜µµµ(xxxt,xxx0) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='.= √1−βt(1− ¯βt−1) 1− ¯βt xxxt + √ ¯βt−1βt 1− ¯βt xxx0 and covariance ˜Σ(xxxt,xxx0) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='.= 1− ¯βt−1 1− ¯βt βtI, as shown in [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' To train our network, we will then minimize the divergence between this tractable distribution and the distribution estimated with our model: Lt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='.= DKL [q(xxxt−1 | xxxt,xxx0) || pθ(xxxt−1 | xxxt)] (8) = Exxx0 � 1 2 ∥Σθ(xxxt)∥2 2 ∥˜µµµ(xxxt,xxx0) −µµµθ(xxxt)∥2 2 � (9) Which amounts to a simple L2 loss between the auxiliary mean, and the true mean estimated by the model, with some extra scaling factor that is dependent on the covariance, in [3] the covariance is fixed to Σθ(xxxt) = βtI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' A more rigorous argument using variational inference can be applied to show that this is a lower bound of the negative log-likelihood of the data distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' More concretely, our model fθ will output an estimated mean given the noisy datapoint and the noise level as input: µµµθ(xxxt) = fθ(xxxt, t), which we can then use to sample the next xxxt−1 from a normal distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' If instead we assume ǫǫǫt is given, we can follow a similar procedure to get the loss Lt: Lt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='.= DKL [q(xxxt−1 | xxxt,ǫǫǫt) || pθ(xxxt−1 | xxxt)] (10) = E � β2 t 2βt(1 − ¯βt) ∥Σθ(xxxt)∥2 2 ∥ǫǫǫt − ǫǫǫθ(xxxt)∥2 2 � (11) 14 existing diffusion methods In this case our model will estimate the noise instead of the mean of the datapoint xxxt, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' ǫǫǫθ(xxxt) = fθ(xxxt, t), however we can still recover the mean as: ˜µµµ = 1 √1−βt � xxxt − βt √ 1− ¯βtǫǫǫt � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Empirically, it has been shown in [3] that the objective can be simplified further by ignoring the scaling factor: Lt = Eǫǫǫt � ∥ǫǫǫt −ǫǫǫθ(xxxt)∥2 2 � (12) The final objective function to train the model is then computed with random noise levels t sampled from a uniform distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' L .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='.= Et∼[1,T][Lt] (13) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4 Sampling Sampling in DDPM is very straightforward, we start with xxxT ∼ N(0, I) and recursively call the model T times using at each step the esti- mated means µµµθ(xxxt) (or noises ǫǫǫθ(xxxt)) of the T normal distributions to get each subsequent sample: xxxT−1 ∼ pθ(· | xxxT), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' , xxx1 ∼ pθ(· | xxx2) , xxx0 ∼ pθ(· | xxx1) where xxx0 will be our generated output data point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Note that this is a stochastic sampling process, since at each step additional noise is added from sampling the normal distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5 Limitations This method requires on the order of hundreds of sampling steps to get good quality samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Compared to more modern methods that follow, the number of steps T is a fixed hyperparameter both during training and sampling, limiting its flexibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 ddim DDIM [18], is another seminal work for diffusion models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' By intro- ducing a few changes to DDPM, the number of sampling steps used during inference can be dynamically changed while maintaining the same training procedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This allows to sample between x10 and x100 faster, and to trade speed for quality at will.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' A direct implication of having a variable number of steps during sampling is that we can train with very large T, or even infinitely large T, leading to a contin- uous diffusion process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The idea of DDIM is that if we know both xxx0 and xxxt, we can use q(xxxt−1 | xxxt,xxx0) to sample xxxt−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' There are two pos- sibilities, either train our network to predict directly (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' no sampling) xxx0, or train our network to predict the noise ǫǫǫt (as done in DDPM) that combined with xxxt can be used to infer xxx0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' A key observation is that using this alternative method doesn’t change the training objec- tive, as the objective only depends on the backward diffusion process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 v-diffusion 15 Importantly, we can use a different forward process to recover the next step, for example use q(xxxt−2 | xxxt,xxx0) to jump directly to xxxt−2 instead of xxxt−1, essentially skipping a sampling step and speeding up the process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' If we make the time-step continuous, we can jump to any intermediate step in (0, t].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Even more interestingly, this con- tinuous sampling procedure can be viewed from the lens of ordinary differential equations, allowing us to use a variety of existing sam- plers, like the basic Euler methods or more advanced ODE samplers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 v-diffusion V-diffusion, or v-objective diffusion [16], is a diffusion method in- spired from DDIM, trained with a continuous value σt ∈ [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This is the method we found to work best on a variety of audio tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In v-diffusion, if σt = 0 then xxxσt represents a data point xxx from the data distribution and if ��t = 1, it will be Gaussian noise ǫǫǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In DDIM we can choose to either use the model to predict xxx0, or use it to predict ǫǫǫt, in this case however, a velocity value vvvσt is estimated from which both xxx0 and ǫǫǫσt can be inferred.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Noising (0 → σt) �� ��� �� ��� ��� ��� � Figure 6: V-Diffusion semicircle The noising process uses a weighting on a circle: xxxσt = ασtxxx0 + βσtǫǫǫ (14) Where ασt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='.= cos(φt), and βσt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='.= sin(φt), where φt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='.= π 2 σt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' When σt = 0, then xxxσt = xxx0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' no noise is added, if instead σt = 1, then xxxσt = xxx1 = ǫǫǫ, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' only noise ǫǫǫ ∼ N(0, I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Intuitively, using the weighting on a circle makes sure that as we move σt linearly from 0 to 1 the noising process slowly removes information from xxx0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' By sampling a random σt ∈ [0, 1], we are more likely to pick a value that resembles xxx0 instead of pure noise ǫǫǫ, meaning that the model will more often see data with smaller amount of noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Empirically, this has been shown to be beneficial over standard DDIM diffusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 16 existing diffusion methods 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Denoising (σt−1 ← σt) To denoise a from noise level σt to noise level σt−1, we can use our velocity-estimating model ˆvvvσt = fθ(xxxσt, σt), note that the velocity here is defined as the derivative vvvσt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='.= ∂xxxσt σt , i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' how much does the datapoint change with a small change in the noise level σt (see circle figure).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' As mentioned before, using an estimate of vvvt, we can obtain both xxx0 and ǫǫǫt, which in turn can be used to estimate xxxσt−1 in DDIM style: ˆvvvσt = fθ(xxxσt, σt) (15) ˆxxx0 = ασtxxxσt − βσt ˆvvvσt (16) ˆǫǫǫσt = βσtxxxσt + ασt ˆvvvσt (17) ˆxxxσt−1 = ασt−1 ˆxxx0 + βσt−1 ˆǫǫǫt (18) In the previous equations, the first 3 lines show how to recover the clean datapoint xxx0 and the noise ǫǫǫt from vvvt, and the last line, remixes the noise with the initial datapoint to get xxxσt−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The previous equations can be formally obtained by using trigonometric properties on the definition of velocity (as shown in the appendix of [16]), and intuitively understood by rearranging vectors on the semicircle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Training Objective By taking the derivative of the noising formulation, we can compute the true velocity vvvσt = ασtǫǫǫ − βσtxxxσt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The training objective is then: L = Et∼[0,1],σt � ∥ˆvvvσt − vvvσt∥2 2 � (19) = Et∼[0,1],σt � ∥fθ(xxxσt, σt) − ασtǫǫǫ − βσtxxxσt∥2 2 � (20) (21) Where ǫǫǫ ∼ N(0, I) and xxxσt is computed according to the noising for- mulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4 Sampling (σ0 = 0 ← · · · ← σt−1 ← σt = 1) To obtain a new data point ˆxxx0, some starting random noise ǫǫǫ ∼ N(0, I) is sampled, and the denoising procedure previously demonstrated is iteratively applied over a linear sigma-schedule.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' EI&LG : -V6f PJOCK COL6 CObOUGU Ot fG CIf6CL6 (JgfGg U EI&L6 ): IU O1gG fo pJJg g &6UGIC n-V6f PJOcK If 12 JGC622ga fO I6UF!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="ta fG I'-Vf Brock aei d bqe IU& fO 9Jf61 f6 pg2IC f6UbJS6 2 fgf 6xb6LIUU6UfgfIOU gUg If61gfIOJ Agiefa ot n-efe: Lue Bog Ot g-uef Je fo bioe fue af Jeaej Ot bliud ot old gibliud gldsosd o xodloot 2gbulori sdt isrd il en- bivoq w eoiev gib iw Isb ot ab l t o od t g t ew Is [ l oold oit JUGU2' JUCJq!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="U&: UGM 2KI COUUGCIOU2' COUAOJFIOU9J PJOCK2' 9ffGU- GLU AGL2IOU2 fSf IUCOIbOLgf6 UMIUGLON2 GUJSUC6UGUf2 9Uq IIbLOA6- bom rit it v bvlov ido -U T b bs oe o id ibi JIg&6e' f IU OnI C26 M6 MJJI gggbf If 1O ID COUAOJOU2 U O1q61 gICIf6CfI6 26g SD COUAOfIOU2 fO 6xbJOIf fG 2bgfIg 2LICfL6 O FO JG9LU g bI62GLAG !" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="UG qGF9IJ2 gf JJFbJG LG2OJIfIOU2' LG OLI&IUgJ J6UrgfIOU' -V6fe cOU21ef Ot 9U 6UCoq61 U6fMOLK gUq g q6Coq1 J6f- fAb6 O1 COUAOJIFIOU9J 9ICIf6CfL6 OLI&IU9JJA g6A6JOb6g 1OL Ug86 28- [l V-U fiw bgmlqi lnommo lbom oai 4'I'1Back&tong ot -Vsf I O g-U IBV VBCHILECLNBE218 architectures These include a downsampling block that simultaneously reduces the resolution and number of channels of the input (typically imple- mented with a single convolution)," metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' a stack of customizable processing items (see subsection 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 for details), an inner block that may contain another instance of the block recursively, a second stack of processing items that typically mirrors the first stack, an upsampling block that re- verses the effects of the downsampling (typically implemented with a single transposed convolution), and a skip block that merges the skip connection using some operation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Furthermore, we select 3 possible types of conditioning contexts that can be injected in the processing items, namely: a feature-vector based conditioning typically used with diffusion to provide the noise level, an embedding based conditioning that injects multiple embedding vectors as context, typically used for text/CLIP-embedding based conditioning, and lastly a channel-based conditioning used to inject entire stacks of channels in the block.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Depending on the task, we might a different combination of conditioning methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' All described characteristics can be defined and customized using the following block: from a_unet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='apex import Block block = Block( dim=1, in_channels=2, channels=4, factor=2, items=[.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='], # Optional items_up=[.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='], downsample_t=.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=', upsample_t=.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=', skipt_t=.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=', inner_block=.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' ) This is a building block for a U-Net, where we can customize the num- ber of input/ouput channels (in_channels), the number of channels post-downsampling, and the downsampling factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The items list will contain the different items that will be duplicated after the inner block.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Optionally, we can change the type of skip connection, down- sampling and upsampling operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The inner_block can be an- other instance of Block to recursively nest multiple blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Since a U-Net is usually composed of multiple nested blocks where the number of in_channles of the inner block must match the num- ber of channels of the outer block, we provide XUnet as a glue class, and XBlock as a template class for Block to make this process more convenient and automated: from a_unet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="apex import XUNet, XBlock unet = XUNet( procK = XBrocK(' ( +[ + []= [ ." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' A .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='M] = 6 L66qLolmglqIfw 2 " CL022Vf+6U10UIf6W 92 C" XBTOC ) oq q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="n_ mo :wolof oJax ooja b lia d LJG 6XSJUbJ6 COUUPIUSFIOU ILOUU EI&NIL6 8' OI 9UA OFUGI COUUPIUSFIOU' eti t-U :8 gi ." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="O Ot 6&1OU A6CfOL2' gU g IJ6cfIGW (I) tO1 U)6CfI& g 26f Ot 1OAIg6g Gp6qq!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='& AGcfO12 g 66qLoLMg qIGw () 1O1 WIL JIk6 b1Oc622I (C) tOL CIO2e-SfGUOU pef6GU I6&JOU AGCfOLe gg g bLOAIgeg 2f O1 26Jt-ff6UFO LOC622I& IIf 6fM66U I681OU A6CO12" g C L22fGU+TOIfG 1G1GUf CSUU6Je IOAI6g 1G9fIL6 A6CO1 g VfFGufTOuIfGW (v) tOL CG22u& UIf\' g WoqnrgfiouIfew (w) fo gbbja Jogngfiou Ot fG g!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='t Ioilovo () m bivo 9w xd t o 3Ife .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="lb gbivo ot oJax 1OLMgiq6g fo 9I PJOck2' Lgig6f612 c9U 9J2o p6 b1OAig6q fo g 2b6cIC 2KIb COUUGCFIOU 2KTb-f IU fG XnM6f' fUSf MJII IU fHU gnfOUSIC9JA 2Kb-f= 1 XBrock(cuguu6r2=je' 9cfol=5' 1f6w2=[-." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='-1)* ([.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=']= = 8=J) ([.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='=2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='= .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='=) p『oc2=[ Tu-cge2=* .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='I=mib YAI - I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='20 architectures Additional customized items can be easily included without alter- ing the template code, making experimentation very simple.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4 Plugins Plugins are used to augment the U-Net model with additional func- tionality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' It’s often the case that we have to wrap the U-Net model with some pre- and post-transformation, or that we have to alter or augment the inputs provided to the U-Net.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In order to maintain a modular structure, plugins can be used to directly modify the U-Net type without having to change the model code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Time Conditioning Plugin The time conditioning plugin is used to convert a floating point value to a conditioning feature vector , this is useful during diffusion to provide the current noise level, or timestep.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' To obtain the time feature vector from a floating point value, a learned weight is multiplied by the time information to get a frequency vector that is then processed using a pair of sin and cos to get Fourier features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The Fourier features are then transformed to a learned feature vector of the desired size by a stack of MLPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This function can be easily added to the base U-Net as: UNetWithTime = TimeConditioningPlugin(UNet) This extends the U-Net with an additional time parameter, which can be one or more floating point values of each batch element.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Embedding Classifier Free Guidance Plugin Classifier free guidance is a method proposed in [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We provide a ClassifierFreeGuidancePlugin used to increase the conditioning strength of the provided embedding .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' During training, the embed- ding is masked with a fixed (learned) embedding with a small prob- ability in order to ensure that the network is able to generate realistic output without access to any conditioning information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' During infer- ence, the network is called twice, once with the conditioning embed- ding to get ˆye, and once with the fixed embedding used as mask to get ˆym.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' A scaling factor embedding_scale (λ) is then used to guide the network to produce an output that gives more or less importance to the conditioning embedding compared to the masked embedding: ˆy = ˆym + (ˆym − ˆye) · λ (22) This plugin can be easily used by augmenting the U-Net as: UNetCFG = ClassifierFreeGuidancePlugin( net_t=UNet, embedding_max_length=64 ) 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 our audio-encoders-pytorch library 21 Later the new UNetCFG model can be called with the additional param- eter embedding_mask_proba to probabilistically mask a batch of em- beddings during training (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' a value of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 will mask 10% of the em- beddings with a fixed embedding of length embedding_max_length), or with an embedding_scale parameter during inference, to call the U-Net twice with and without masking, and apply the scaling factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In both cases, the embedding parameter must be provided as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Text Conditioning The text conditioning plugin augments the U-Net embedding condi- tioning information with a learned text embedding from a frozen pretrained language model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' By default, the T5-base transformer model from [10] is used if no embedder is provided.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' UNetWithText = TextConditioningPlugin( net_t=UNet, embedder=T5Embedder() ) This adds an additional text field to the U-Net forward method that automatically extends the embedding with text embeddings from a pretrained language model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 our audio-encoders-pytorch library The autoencoder component has a similar structure to the U-Net, with a few changes: (1) to make it an autoencoder no skip connections will be used, (2) no attention blocks will be used to make it generic to any input sequence length (3) no conditioning blocks will be applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We open-source the autoencoder library audio-encoders-pytorch (AEP) as a separate library from a-unet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' AEP includes both encoders and decoders, and a set of bottlenecks that can be used to normalize the latent space, namely (1) a variational bottleneck in the style of VAEs [5], (2) a simple tanh bottleneck, (3) a quantizer bottleneck, sim- ilar to the one proposed by VQ-VAEs [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Furthermore, we propose two encoders that encode spectrograms channelwise into a 1D latent, namely a ME1d (magnitude spectrogram only encoder), or MelE1d (mel spectrogram encoder), both compatible with the different bottle- necks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 5 M O D E L S 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 overview In this section we describe various diffusion models and their under- lying structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We investigate various diffusion models that serve different purposes and functions, including upsampling and autoen- coding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Although these models may have distinct applications, they are ultimately utilized with the goal of audio generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' All of the different models are implemented using variations and combinations of the previously described achitectures (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' U-Nets and auto-encoders).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The models proposed are implemented in the audio-diffusion-pytorch (ADP) library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 diffusion unconditional generator The diffusion generator is the simplest model we propose to syn- thetize unconditional audio and is implemented with a single 1D U-Net.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Motivation The unconditional diffusion model is a good starting point to test the overall quality of the particular architecture and diffusion method used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' It doesn’t include any type of conditioning, making the dataset and training procedure very simple, and at the same time can give a good idea of the generation quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Method The diffusion generator takes a raw high-quality stereo audio source as input from the datasets, that is then corrupted to a random noise level based on the chosen diffusion method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Using a U-Net, the gen- erator then predicts the output, which may be the denoised input or a value that is used to compute the denoised input, depending on the type of diffusion method employed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The noise level (usually called time or σ) is provided as conditioning to the network thorugh as an encoded feature vector to tell the network how much noise must be removed from the provided input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' For the diffusion generator nei- ther the embedding conditioning nor the cross attention blocks are used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=" 23 2bJCif 2b66g' gUg 2gbj6 dgJf^ 2oupj gef fo 2gbj6 l1ou' Mif gionug 2o 2gbj!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="u& 2f6be biognc62 rt d of oiaib-v bof W Igvl ebuol i iev tt le 6 Jong2-o1J6g' M6 &t &og 6f2 & gog 1ox Ji&y gaUSIC-Lgu&e' 6AGU MIf ggAgUC6 2gbJIu& Gfoge: It biob oe e a-df g 21b2 Mf g21C 2JbJ61 qnL& IU1616UC6 fo &6U61f6 16920U9pJ6 qitteioU efoqe: Onf ot f6 pox' f6 og6J goueftgf6g &oog I6- Ms Gagjrgteg fue beltouguc ot fue bioboeeg Joqel Mify q!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='leieUf E!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='&G IO: D!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='21OU JUOq6I IUIGIGUC6 WO126 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="fioitudirtib stsb gt MIf AgJ& O126 J6A6J2 fO &6U61g16 g U6M bJg2JPJ6 29bJ6 1OU bglovni vlevitsigti ai t9V-U grlt brs bglqmse i glqmse oibus gri DLJ& IU1616UC6' g 9UqOUU A6CfO1 MIfJ fJ6 29JU6 2Jgb6 g2 9 fL9IU- EI&n6 O: D!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2OU JqGI fIgUI& oibuA 54 ИODE25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 diffusion unconditional generator 25 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4 Transforms Independently of the diffusion method used, this model without any addition struggles to generate more than a few second of sound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' If the raw waveform is provided to the network the initial convolutional blocks of the U-Net will have to process huge samples, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' even a single second of high-quality 48kHz audio requires 48000 values to be processed by the first convolutional block.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This can be a speed issue if the audio is not downsampled quickly enough in the U-Net, as the inefficiency will compound over the number of sampling steps of the diffusion process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In addition to that, if attention blocks are used, we will have to downsample enough to make sure that the number of timesteps to be in the range of 1024 or 2048 values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Exceeding that will slow down self-attention drastically due to the n2 computational complexity for sequence length n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Hence, a lot of downsampling is required with long audio samples if we want to satisfy these criteria.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' To mitigate the challenges mentioned earlier, we investigate the use of various methods and audio transforms to convert the raw audio source into a representation that reduces the temporal dimension in exchange for additional channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Patching The first transform is patching, proposed originally for the image do- main in [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We adapt patching to the 1D domain, where the idea is to group sequential time steps into chunks, that will then be trans- posed to channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Given a patch size p, the length t is reduced by t p where the number of channels increases to c · p, at the end of the U-Net processing the channels are unchunked back to the full length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We found patching to give drastic speedups, almost at a factor of p for p = 2, 4, 8, 16, 32, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=', allowing to train models with much longer audio sources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' However, even if the audio generation quality almost matches the non-patched version, audible aliasing is present with all factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This drawback is likely due to the repeated unchunking pro- cess, which will have a repeating structure, creating a high-frequency sine wave in the signal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Furthermore, we found that patching with p ⩾ 64 started to degrade quality, probably due to some capacity constraint in the channel dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We can think of patching as a deterministic auto-encoding process, with a downsampling factor of p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 STFT The second transform is the previously introduced STFT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We use the common setting of 1024 num fft and window length with 256 hop size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' By wrapping the U-Net with STFT and iSTFT the transform downsamples the length of the audio by 1024 while equally increas- 26 models ing the channel count.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' STFT is implemented with the Fast-Fourier Transform, hence it’s efficient to apply.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' No normalization is required on the spectrogram, since the diffusion loss will still be applied on the reconstructed wave.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This method gives great speedups thanks to the large downsampling, but similarly to patching suffers from degrada- tion in quality compared to the raw wave representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Perceptible noise is present in the generations both when transforming to magni- tude+phase, or when using real+complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Learned Transform Lastly, we propose a learned transformation with a single convolu- tional and transposed-convolutional block at the start and respec- tively end of the U-Net.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The transform consists in using a large kernel size and stride of 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This will down-sample the original signal in a single step, trading off small amounts of speed from the determinis- tic patching or FFT implemented STFT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' However, since it’s a convo- lutional method, we can choose the number of channels and increase it to a larger value (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 128, double the kernel size and stride) than used during patching, giving more capacity to be resilient to artifacts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' At the same time, we can use ideas from STFT and have large over- lapping windows with learned kernels instead of fixed sine/cosine waves (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' kernel size 128, stride 64, 64 channels, with padding to preserve dimension), which can help to overcome aliasing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We found this to be the best quality/speed tradeoff method of pre-transforming audio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5 Usage The diffusion generation model proposed is constructed by first adding the LTPlugin to the default U-Net UNetV0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This plugin wraps the U- Net with the previously described learned transform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' After that, we have to provide the U-Net type to the DiffusionModel class which is responsible for constructing the U-Net, the diffusion training method (by default V-Diffusion), and the diffusion sampler (by default DDIM).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' from audio_diffusion_pytorch import DiffusionModel, UNetV0, LTPlugin, VDiffusion, VSampler UNet = LTPlugin( UNetV0, num_filters=128, window_length=64, stride=64 ) model = DiffusionModel( net_t=UNet, in_channels=channels, channels=[256, 256, 512, 512, 1024, 1024], factors=[1, 2, 2, 2, 2, 2], items=[2, 2, 2, 2, 4, 4], 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 text-conditional diffusion 27 attentions=[0, 0, 0, 0, 1, 1], attention_features=64, attention_heads=12, diffusion_t=VDiffusion, sampler_t=VSampler ) This model can be easily used to get the diffusion loss for train- ing (which automatically applies the entire diffusion process) or to sample a new element provided the starting noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' # Training x = torch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='randn(1, 2, 2**21) # [batch, channels, length] loss = model(x) # Sampling noise = torch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='randn(1, 2, 2**21) sample = model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='sample(noise=x, num_steps=50) 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='6 Evaluation We found that it’s important for quality to have a single non-downsampled block at the start to process the transformed audio at full resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Furthermore, attention blocks are crucial for temporal consistency of the generated audio, but can only be applied after the original wave- form is down sampled to around 1024-2048 length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' For example, if the original audio has length 219 (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' ∼11s at 48kHz), we downsam- ple by 64 = 26 in the learned transform, and by 23 in the 4 blocks before the first attention block, hence the context length of the first attention blocks will be in the desired range of 210 = 1024.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This model can generate high quality audio over tens of seconds, possibly more depending on the speed requirements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In general, a larger set of initial convolutional/resnet blocks (closer to the wave- form) will result in better audio quality, at the cost of generation speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We found that the architecture is able to generalize to longer sam- ples than it was trained on, if attention blocks are used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The samples maintain good long-context awareness even when doubling or more the training length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Note that this increases the attention context size and hence needs to be considered for before training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 text-conditional diffusion 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Motivation We used text as a mean of conditioning for several reasons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In Imagen [15] it has been shown that pretrained and frozen language models can be successfully applied to condition the diffusion process to gen- erate images matching a textual description,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=" and that by increasing Clo22-9ff6uf1ou2=[1' {' {' {' {'{] o = ( :uds Isoitibbs giwollo fOUIu& MIfJ L2 gUg CEC c9U p6 692JJA 9qq6q fo f6 Joq6I MIf fJ6 ibo x9T grribb9dm9 b9r169l b9xil s 1o 1ovs mi ," metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="29mrit 9dt o oo1 1166 niggUc6 [+]: DLIu& fLgUI' f6 f6xf Gp6qgu& 12 qiobb6g IO JUCLG926 f6 2LG&ty Ot f6 fGxf COUqfOUJu& M6 gbA cJs22IGL- DibuA nibbedma Ewpeqqtua x9T JI2f MIfJ 2bgC62 9Uq fJ6 OfJ61 2o0 Ot fJ6 fIUU62 M6 26 COJUIg2 fO t it ." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="0 ilidd 1opn2f' M6 2nt6 f6 J!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="2f ot J6fggfg gug qiob 6gc 6j66Uf Mif g JUOUJA 2FSLf (I ot ) OL GUg (V ot ): LO JSKG FJG COUqIFIOUJU& JUO16 fLgIUGg OU gUg Jom JSU fofSJ CJHUK2 fJG 2ou& 12 JSgG Ot (6'&: I ot +) d w 2JC &GUGISfIOU' MG fISJU OU JGTSgSg' JUCJg!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='U& FJG fIfJG Ot fUG 2OU& m ron .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='Igbom oiauib gt noitibro ot bgau zi oidw gribbgdm9 L2 fL9U21OLJ6 6UCOq61 f 6UCO6 f6 f6Xf9J L6b1626UtSFIOU JfO bii be o a w[l iwollo V-U t o oil boNtgMS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=". J9KIU& fJ6 IU6L19C6 JOL6 &6U61IC Ug 692A fO I26' (---) i o o UgfCJIu&: LJI2 JIUf2 fO f6 1gCf fgf g 2JIUIJg1 J6fJog J1&f 9J2O MOLK Fu6 2s6 Ot fG J9u&ng&6 Joql MI 162nJf JU gU JubiO6g 6xf-Ig&6 58 WODEF2 F- s io qlgd gdt diw bvloa llsuau i tsdt 2TT i mldorq ommo i idT ." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='igbo o s t ld o i d ilep ois b tiw 2brow wgf s 9ldmumr ot 9lds 2i Igbom 9dt tsdt bruo ud .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' (2TT) 86U61JC MO1q2 fgf 916 1onUq JU fIfJ62: M6 9J20 fLJ6g f6xf-fO-2b66CJ f6Xfn9J q62CLbIOU 2b6CI9JI 2Ju& FJ6 &6I6 Ot f6 2Ou& OL JUO16 gd iw oibus otsm ot Iow iow ot rinoitibron xg briuof 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3Eofom .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='=J62-pnibb9dm9 Unw-2f6b2=20" [x9 m" ]=x Uo126\' 2gwbr6 = woq6\'29wbf6( # 29wbua ro22 = woq6(x* f6xf=[a f6xf、]* 6wpeqqiua-wg2-blopg=0\'1) # ga wpeqq-wg-a= .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='9=p_pnbb9dm_92 =_ i o Ix : nust WOT26 23 IEX-COMDIIIOMV DIEE2IOM sd30 models 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4 diffusion auto-encoders with latent diffusion 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Motivation Patching, STFT, and learned transforms can be used to reduce the in- put length during the diffusion process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Those approaches are advan- tageous if we want to train a single model end-to-end, however, this is suboptimal since the waveform is expanded to its original full-length shape multiple times during sampling, slowing down the process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' A more appropriate way would be to first encode the waveform, then do the diffusion loop in the compressed representation, never expanding it to the full waveform until the end of the loop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' This is the idea proposed in [12] (latent diffusion), where a variational autoencoder is first used to compress images by a few factors to a smaller latent space, and later diffusion is applied to that latent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' By compressing the audio before applying diffusion, we can drastically speed up the diffusion sampling procedure, making an important case for an efficient and good quality autoencoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Method There are different ways to implement the autoencoder, however an important property is that we must be able to apply the diffusion pro- cess to its latent space, hence some sort of normalization is required to make sure the values are in the range [−1, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Furthermore, the autoencoder should compress as much as possible without a signifi- cant loss in quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The smaller the latent, the faster will be the inner diffusion model to process and generate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We experimented with different autoencoders, and found that di- rectly compressing the waveform can only provide around 2x-4x com- pression without a significant loss in quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' On the other hand, as we have discussed in the representation section, compressing magnitude or mel spectrograms can provide much higher compression rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The downside is that the spectrogram requires a model (vocoder) to recon- struct the original waveform, even from a non-compressed state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In this work, we propose to use a magnitude diffusion autoencoder, an encoder (ME1d) first encodes the waveform into a magnitude spec- trogram which is then encoded into a latent compressed 64x com- pared to the original waveform, and later uses a diffusion model to re- construct the waveform conditioned on the latent, acting both as a de- terministic compressing encoder and a diffusion vocoder at the same time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In order to make sure the latent space is normalized, we use a tanh function on the bottleneck.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Since the decoding/vocoding pro- cess is a diffusion model, the waveform can be quickly reconstructed from the latent by using a small step count, if instead a more accu- rate reconstruction is desired a higher step count is required.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' To make .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="gribrid oibus-txgt boo 2sd brs I69J-fIU6 &6U619fOU 2b66q OU g 2IU&J6 CLn' gUg J91&6 COUf6Xf J6U&f' 2·4·3 q6COq61 fO 6xbsUg F6 I6bL626UfSIOU pSCk fO MgA61OLUU: q!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2O: 2!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="UC6 f6 I6b626fO 12 gfCJJ COb6226' M6 fO &6U61gf6 f6 JSf6Uf MIf f6Xf COUqIfIOUI U f6 2fAJ6 Ot JSf6Uf Lo &f f6 !" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='UgJ Joq6J M6 gbbja g cgecsg& g!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='tn21ou &eUeifo I6UOA6 ff6UFOU PJOCK2 9Ug 26 COUAOJTIFOUSJ-OUJA 9ICJIf6CfJI6 2L6 f6 q!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="I21OU gIfOGUCOqGI I2 &GUGLIC fO U MSA61OL JGU&' M6 E&nLG I: D!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2JOU gTfOGUCOGI JUIGLGUCG oibuA EI&ILG I3: D!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='LEIOU STfOGUCOGL fISJUU& oibuA IUI TMHTAI HTIW HO-OTUA OIUI 31 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='Igbom roieulib grdt lo tuqri 2u& JUf6IboJSfOU gUg gbb6Ug!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='u& fGJU g2 ggg!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='fIOUJ coUf6Xxf fo f6 boNtM S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=". oib 1 g 26coUqg b2gJbJG Joq6T (s) f JUCL6g26 fU6 2gbJ6 Igf6 O1 6X- g JOM 2gbj6 Igf6 ggIO MIf g bLJgia JUog6J gUg JgfGl bagJbJ6 ie el e () 2gbJI& obGifO: nbegbjG2 c9u p6 26 lOL gG1GUt bnbo262: gnfO6UCo612' MJ6L6 fJ6 6UCoq!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="u& 1UCfIOU J2 X6q fO p6 fJ6 qOMU- 1L2' D!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="tt21OU bagbj612 c9U p6 266 g2 g 2b6cC cg26 Ot qitt21OU JU& GIO2' LG JUOq6I M6 bIobO26' OMGAGI MOIKe qIIGCIA U MSA6- fO 6LO fG fob JgJt Ot fG &LIg (O1 IIg&6) 2SLfIU& gf 2OG l1GdnGUcA Ot 2b6cfLo&Lg2' goMU2gbJ!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='& g MgA61OL cOLL62boUg2 fo 26ffIu bIOAIg6g MSA61OLI (6&: ILOU 3KH fO 8KH) EIOU F6 b612b6CfIA6 MA LI&G I2: IMO-2g&G gIt2JOu &GUGiSfOL MIf geJOU gGCogG1 00 92i0M ibu Ewpeqtue Ewpeqqtua 3s WODE2 MSAG1LOU2: VffGUfIOU PJOCK gU JgL&GI COUfGXf JGU&f2 C9U JGJb b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' COMIUf OL JSAGL2) OT fJ6 JUIfI9J COUAOJIFIOU9J PJOCK2 JU f6 -V6f COL- 161 JOgJ!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="f 29J fO OFG1 JO6J2' UC162I F6 26 (CUJ etx: M6 tonug nbegiubjG12 fo Gxc6j Ou 2b66cu stg' ga If,2 JkGja gu 69e- cu &ef AGiA 8oog 16enJfe pa begb!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='u& guamG16 pefMGG IQx gug Debeuqi& oU f6 cobj6xif ot f6 qgtg26t qittn2jou bagbj612 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3E00mfo f6 I6COU2fLICFIOU 6eb6CI9JIA It b2IbI& 1O AG1 JOM 2gJbJ6 qlgr ot gorsbig Isroitibbs ae bgbivorq gd e gribbadmg rs 1o E&L6 I: DI2O bL GGC6 DoMU29wbr6q 36nb2gwbf6q .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="li t fu6 r8y 2suubj6 Isf6 JGu&ry 9ug 26 fs 29bjiu bloc622 fo 16cou- DHILI& IUIGLGUC6' M6 IUfGLbOJ9f6 fJ6 JOM 2gJbJ6 CJ9UUGJ2 fO JSfCJ ." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='b9 JgfCJ62 f6 Ontbnf Jr&j 29bJ6 CJ9UU6J2 9Ug J6UC6 c9U p6 bLob6IJA o EI&6 I: DI2OU beb6L fgIUI& b9Jqm62nwod oibuA 2 DIEEIOM LVLE 33 1 rrb2:\\/&frpcoAIDIB&ACM EI&nI6 IO: D!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='LI2JOU AOCOqGI IUIGGUC6 W6 2b6cfLogw EI&nL6 I8: D!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="LeJOU AcoqGL FIJUJ& 92io CouAILgubo26J orbuA J9M J6J M6 2tgCK fJ6 gqqIfIOU9J cJ9UJ6J2 OU f6 IUbf CJgUU6J2 Ot fJ6 N-V16f' COUAOJfIOU pgCK fO Ife MSAGIOL 2JsbG: 2IIJSIJa fO fG begbJG LG q!" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='IL2IOU ACOqGI I2 fIUGg pA IL2f COUAGLFI& FG MSAGIOLIU fO boNtoM s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='.2 gICIf6CfL6 MIf 9JO2f UO CS&6 IUfO JI&-dngJ!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="fA UIC AOCog6I fIou' M6 brobo26 g 2bj6 gggbrsfIou fgf 9JJOMe fo fhlU Onl n-M6f dSJIfA 8KHS I2IC AOCoqi& g16 2I JCKIU& IU f6 tOJOMI& 26C- g i - do-- f J6i& pg26g ocoq612: ig6g Aocoq612 c ioqc6 A61 &oog f6Ug fo b1ognC6 9Lfl1gCf2' JU9KIu& fJ6 c926 1O cOUOUJA 26g g66b- i-i 2 do bo viti o ." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content="e Isivi on i iof GAGr biobeuJa fhUI& g 2b6ctlo&igJ pgck fo g bjgagpJ6 gqi MgaG- C6IA6' J9KI& fJ6U gU Ig69J I6bL626UffOU 1O gHgO &6U6L9fOU: HOM- oitoitoM I." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 34 WODE25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='7 training info 35 In order to flatten the spectrogram, we have to match the configu- ration of the STFT used to obtain the spectrogram, with the configu- ration of the 1d transposed convolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The key insight is that the STFT operation can be viewed as a 1D convolution with large ker- nel sizes (or window size) of sine and cosine waves, which is then merged in-place using the absolute value, and later mel-scaled.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' The mel-scaling doesn’t alter the temporal positioning, only the frequency (or channels) of the spectrogram.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Hence, if we set large kernel sizes equivalent to the STFT window length, strides equivalent to the STFT hop-length, and proper padding, the transposed convolution will fo- cus on the same context region of the waveform used to obtain the spectrogram.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Similarly, we will set the input channels of the trans- posed convolution to match the number of channels used for the mel- spectrogram, and the output channels to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Stereo audio is decoded by batching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We used a window-length/kernel-size of 1024 and hop- length/stride of 256, similarly to popular vocoders we used 80 mel- spectrogram channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' With this configuration, the spectrogram has a default 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2x compression factor over the initial waveform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='3 Evaluation This model can produce high quality waveform, as with other mod- els, a good reconstruction of high-frequencies requires more convolu- tional blocks towards the start of the U-Net.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Moreover, we hypothe- size that increasing the number of mel-channels might increase qual- ity for two reasons: first, mel-spectrogram would compress less infor- mation out of the initial waveform, and second, the transposed con- volution would have more channels to flatten the spectrogram and hence more capacity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='7 training info 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='1 Data We trained all of our models on a 2500h mix of audio at 48kHz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In the text-based model, we used metadata such as title, genre, album and artist as conditioning information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' For the autoencoder, upsam- pler, vocoder, we trained on random crops of length 218 (∼5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='5s at 48kHz).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' For the long-context text-conditional audio generation model, we trained on fixed crops of length 221 (∼44s at 48kHz), using the crop index as additional conditioning information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='2 Training We trained all of our models with AdamW, using a learning rate of 10−4, β1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='95, β2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='999, ǫ = 10−6, and wight decay of 10−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' For 36 models all models, we used an exponential moving average with β = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='995 and power of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We trained all models for around 1M steps with a batch size of 32, this takes approximately 1 week on a single A100 GPU for the largest, text-conditional model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 6 F U T U R E W O R K While our models can have a good generation quality on short few- second segments, or a good structure with longer segments, training an efficient model with both high quality and long context remains an open problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' A few promising future modelling approaches that need more experimentation include: (1) train diffusion models using perceptual losses on the waveforms instead of L2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' this might help to decrease the initial size of the U-Net,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' as we wouldn’t have to pro- cess non-percieveable sounds,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' (2) stack multiple upsamplers to gen- erate a song top-down from low-sample rates to high sample rates,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' (3) improve the quality of the diffusion autoencoder by using mel- spectrograms instead of magnitude spectrograms as input,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' (4) other types of conditioning which are not text-based might be useful to nav- igate the audio latent space,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' which is often hard to describe in words DreamBooth-like models [14] could be used to assign symbols to sounds,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' (5) compress mel-spectrograms to a quantized representation with diffusion autoencoders to allow for high compression ratios and later train an autoregressive transformer on top of that.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Other simpler improvements on the current models include: (1) in- crease the training data from 2k hours to 60k-100k hours, (2) use more sophisticated diffusion samplers to get higher quality for the same number of sampling steps, (3) for text-based models, use larger pretrained language to obtain embeddings, which has been shown to be very important for quality in [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 37 7 C O N C L U S I O N Generating high-quality audio efficiently is a challenging task as it in- volves the generation of numerous values to accurately represent the sound waves, especially when aiming for high-fidelity stereo sound at a sample rate of 48kHz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' In this work, we proposed different methods and models to generate high quality audio from a textual descrip- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' From models targeting long-context audio with an emphasis on structure, short-context with an emphasis on quality, to other useful models such as the diffusion upsampler and vocoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' We introduced a new method that utilizes text-conditional diffusion models based on 1D U-Nets, allowing for the generation of multiple minutes of 48kHz audio on a single consumer GPU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Furthermore, we have provided a collection of open-source libraries to streamline future research, in- cluding potential improvements in audio autoencoders and diffusion models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 39 B I B L I O G R A P H Y [1] Zalán Borsos, Raphaël Marinier, Damien Vincent, Eugene Kharitonov, Olivier Pietquin, Matt Sharifi, Olivier Teboul, David Grangier, Marco Tagliasacchi, and Neil Zeghidour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' AudioLM: a Language Modeling Approach to Audio Generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:2209.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 03143.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [2] Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, and Ilya Sutskever.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Jukebox: A Generative Model for Music.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='00341.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [3] Jonathan Ho, Ajay Jain, and Pieter Abbeel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' “Denoising diffu- sion probabilistic models.” In: Advances in Neural Information Processing Systems 33 (Dec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 6840–6851.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [4] Jonathan Ho and Tim Salimans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Classifier-Free Diffusion Guid- ance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:2207.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='12598.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [5] Diederik P Kingma and Max Welling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Auto-Encoding Variational Bayes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:1312.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='6114.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [6] Troy Luhman and Eric Luhman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Improving Diffusion Model Effi- ciency Through Patching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:2207.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='04316.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [7] Ozan Oktay et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Attention U-Net: Learning Where to Look for the Pancreas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:1804.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='03999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [8] Aaron van den Oord, Sander Dieleman, Heiga Zen, Karen Si- monyan, Oriol Vinyals, Alex Graves, Nal Kalchbrenner, Andrew Senior, and Koray Kavukcuoglu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' WaveNet: A Generative Model for Raw Audio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:1609.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='03499.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [9] Aaron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Neural Discrete Representation Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:1711.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 00937.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [10] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sha- ran Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Liu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Exploring the Limits of Transfer Learning with a Unified Text- to-Text Transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:1910.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='10683.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [11] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Hierarchical Text-Conditional Image Generation with CLIP Latents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:2204.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='06125.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [12] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' High-Resolution Image Synthesis with Latent Diffusion Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:2112.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='10752.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [13] Olaf Ronneberger, Philipp Fischer, and Thomas Brox.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' U-Net: Convolutional Networks for Biomedical Image Segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:1505.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='04597.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 41 42 bibliography [14] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' DreamBooth: Fine Tuning Text-to- Image Diffusion Models for Subject-Driven Generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:2208.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='12242.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [15] Chitwan Saharia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Photorealistic Text-to-Image Diffusion Mod- els with Deep Language Understanding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 11487.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [16] Tim Salimans and Jonathan Ho.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Progressive Distillation for Fast Sampling of Diffusion Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='00512.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [17] Jascha Sohl-Dickstein, Eric A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Weiss, Niru Maheswaranathan, and Surya Ganguli.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Deep Unsupervised Learning using Nonequi- librium Thermodynamics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:1503.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='03585.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [18] Jiaming Song, Chenlin Meng, and Stefano Ermon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Denoising Dif- fusion Implicit Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv:2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content='02502.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' [19] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Gomez, Lukasz Kaiser, and Illia Polo- sukhin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' Attention Is All You Need.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' eprint: arXiv : 1706 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'} +page_content=' 03762.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AdFQT4oBgHgl3EQfMTYr/content/2301.13267v1.pdf'}