diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..52de427f0b1803f87bbdc82121692af2c38898dc 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +Amphion/imgs/vc/NoroVC.png filter=lfs diff=lfs merge=lfs -text +Amphion/imgs/vocoder/gan/MSSBCQTD.png filter=lfs diff=lfs merge=lfs -text +Amphion/models/codec/facodec/modules/JDC/bst.t7 filter=lfs diff=lfs merge=lfs -text +Amphion/models/tts/maskgct/g2p/sources/chinese_lexicon.txt filter=lfs diff=lfs merge=lfs -text +Amphion/models/tts/maskgct/wav/prompt.wav filter=lfs diff=lfs merge=lfs -text +Amphion/visualization/SingVisio/System_Introduction_of_SingVisio_V2.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/Amphion/.github/CODE_OF_CONDUCT.md b/Amphion/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..f079435304691a6fcddfaf7d7a9bd9d128188f52 --- /dev/null +++ b/Amphion/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,132 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official email address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/Amphion/.github/CONTRIBUTING.md b/Amphion/.github/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..0bae7e159fd619af03d86c0e8a78d23920851d55 --- /dev/null +++ b/Amphion/.github/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Welcome to the Amphion Community! + +We greatly appreciate your interest in contributing to Amphion. Your involvement plays a pivotal role in our collective growth, and we are dedicated to nurturing a cooperative and inclusive space for all contributors. To ensure a respectful and productive atmosphere, all contributors must adhere to the Amphion [Code of Conduct](CODE_OF_CONDUCT.md). + +## Contributions + +All kinds of contributions are welcome, including but not limited to: +- **Issue Reporting**: Report bugs or suggest features through GitHub Issues. +- **Bug Fixes**: Identify and rectify software issues to boost functionality. +- **Developing New Features**: Bring innovation and impactful enhancements to Amphion. +- **Implementing New Checkpoints**: Introduce checkpoints to optimize workflows. + +- **Recipe Contributions**: Share your unique and practical coding solutions. +- **Diverse Contributions**: Your participation isn't limited! Contribute to documentation, community support, and more. + +## How to Contribute +1. **Fork the Repository**: Start by forking the Amphion repository on GitHub. +2. **Clone Your Fork**: Localize your fork on your development machine. +3. **Create a Branch**: Initiate a new branch for your changes. +4. **Test Your Changes**: Ensure compatibility and non-disruption of your updates. +5. **Commit Your Changes**: Make small, focused commits with clear descriptions. +6. **Update Your Fork**: Upload your modifications to your GitHub fork. +7. **Open a Pull Request**: Suggest a pull request from your fork to the main Amphion repository with our [Pull Request Template](pull_request_template.md). +8. **Participate in Code Reviews**: Collaborate with reviewers and address their feedback. + +## Coding Standards +- **License Headers**: Each new code file should include license headers. +- **Style Consistency**: Align with the project's existing coding style. +- **Code Quality**: Aim for clarity, maintainability, and efficiency. +- **Clear Commenting**: Describe the purpose and usage of each function and other crucial code segments. +- **Code Formatting**: + - Install 'black' formatter: `pip install black`. + - Format files: `black file.py`. + - Format directories: `black directory/`. + +## Contributor Agreement +By contributing to Amphion, you agree to abide by our Code of Conduct, and the Developer Certificate of Origin, Version 1.1: + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +## Need Help? +For any queries or support, feel free to open an issue for community discussions and help. diff --git a/Amphion/.github/ISSUE_TEMPLATE/bug_report.md b/Amphion/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000000000000000000000000000000000..be96e34a27bc2206097e6c589c58b237cc81324a --- /dev/null +++ b/Amphion/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,32 @@ +--- +name: Bug report +about: Create a report to help us improve Amphion. +title: "[BUG]: " +labels: 'bug' +assignees: '' + +--- + +## Describe the bug +(A clear and concise description of what the bug is.) + +## How To Reproduce +Steps to reproduce the behavior: +1. Config/File changes: ... +2. Run command: ... +3. See error: ... + +## Expected behavior +(A clear and concise description of what you expected to happen.) + +## Screenshots +(If applicable, add screenshots to help explain your problem.) + +## Environment Information + - Operating System: [e.g. Ubuntu 20.04.5 LTS] + - Python Version: [e.g. Python 3.9.15] + - Driver & CUDA Version: [e.g. Driver 470.103.01 & CUDA 11.4] + - Error Messages and Logs: [If applicable, provide any error messages or relevant log outputs] + +## Additional context +(Add any other context about the problem here.) diff --git a/Amphion/.github/ISSUE_TEMPLATE/docs_feedback.md b/Amphion/.github/ISSUE_TEMPLATE/docs_feedback.md new file mode 100644 index 0000000000000000000000000000000000000000..a9f85e802b17ef33366abc19b732b94be8eeac71 --- /dev/null +++ b/Amphion/.github/ISSUE_TEMPLATE/docs_feedback.md @@ -0,0 +1,17 @@ +--- +name: Docs feedback +about: Improve documentation about Amphion. +title: "[Docs]: " +labels: 'documentation' +assignees: '' + +--- + +## Documentation Reference +(Path/Link to the documentation file) + +## Feedback on documentation +(Your suggestions to the documentation. e.g., accuracy, complex explanations, structural organization, practical examples, technical reliability, and consistency) + +## Additional context +(Add any other context or screenshots about the documentation here.) diff --git a/Amphion/.github/ISSUE_TEMPLATE/feature_request.md b/Amphion/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000000000000000000000000000000000..e1e07bb7b261394976b432950051913b99027eb0 --- /dev/null +++ b/Amphion/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for Amphion. +title: "[Feature]: " +labels: 'enhancement' +assignees: '' + +--- + +## Is your feature request related to a problem? Please describe. +(A clear and concise description of what the problem is.) + +## Describe the solution you'd like +(A clear and concise description of what you want to happen.) + +## Describe alternatives you've considered +(A clear and concise description of any alternative solutions or features you've considered.) + +## Additional context +(Add any other context or screenshots about the feature request here.) diff --git a/Amphion/.github/ISSUE_TEMPLATE/help_wanted.md b/Amphion/.github/ISSUE_TEMPLATE/help_wanted.md new file mode 100644 index 0000000000000000000000000000000000000000..621b6925ff88d1fee842a3fd1affd2db9d7f689e --- /dev/null +++ b/Amphion/.github/ISSUE_TEMPLATE/help_wanted.md @@ -0,0 +1,32 @@ +--- +name: Help wanted +about: Want help from Amphion team. +title: "[Help]: " +labels: 'help wanted' +assignees: '' + +--- + +## Problem Overview +(Briefly and clearly describe the issue you're facing and seeking help with.) + +## Steps Taken +(Detail your attempts to resolve the issue, including any relevant steps or processes.) +1. Config/File changes: ... +2. Run command: ... +3. See errors: ... + +## Expected Outcome +(A clear and concise description of what you expected to happen.) + +## Screenshots +(If applicable, add screenshots to help explain your problem.) + +## Environment Information + - Operating System: [e.g. Ubuntu 20.04.5 LTS] + - Python Version: [e.g. Python 3.9.15] + - Driver & CUDA Version: [e.g. Driver 470.103.01 & CUDA 11.4] + - Error Messages and Logs: [If applicable, provide any error messages or relevant log outputs] + +## Additional context +(Add any other context about the problem here.) diff --git a/Amphion/.github/pull_request_template.md b/Amphion/.github/pull_request_template.md new file mode 100644 index 0000000000000000000000000000000000000000..25894bb3d98408b19912ef773d0910ef6f082f28 --- /dev/null +++ b/Amphion/.github/pull_request_template.md @@ -0,0 +1,32 @@ + +## ✨ Description + +[Please describe the background, purpose, changes made, and how to test this PR] + +## 🚧 Related Issues + +[List the issue numbers related to this PR] + +## 👨💻 Changes Proposed + +- [ ] change1 +- [ ] ... + +## 🧑🤝🧑 Who Can Review? + +[Please use the '@' symbol to mention any community member who is free to review the PR once the tests have passed. Feel free to tag members or contributors who might be interested in your PR.] + +## 🛠 TODO + +- [ ] task1 +- [ ] ... + +## ✅ Checklist + +- [ ] Code has been reviewed +- [ ] Code complies with the project's code standards and best practices +- [ ] Code has passed all tests +- [ ] Code does not affect the normal use of existing features +- [ ] Code has been commented properly +- [ ] Documentation has been updated (if applicable) +- [ ] Demo/checkpoint has been attached (if applicable) diff --git a/Amphion/.github/workflows/check_format.yml b/Amphion/.github/workflows/check_format.yml new file mode 100644 index 0000000000000000000000000000000000000000..4bd7600432976933b8ed71c53d0a35abcf83fdfd --- /dev/null +++ b/Amphion/.github/workflows/check_format.yml @@ -0,0 +1,12 @@ +name: Check Format + +on: [push, pull_request] + +jobs: + CheckCodeFormat: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: psf/black@stable + with: + options: "--check --diff --color" diff --git a/Amphion/.gitignore b/Amphion/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b288ee0203d4a98a92c7c9fc350878bbc713f758 --- /dev/null +++ b/Amphion/.gitignore @@ -0,0 +1,64 @@ +# Mac OS files +.DS_Store + +# IDEs +.idea +.vs +.vscode +.cache +pyrightconfig.json + +# GitHub files +.github + +# Byte-compiled / optimized / DLL / cached files +__pycache__/ +*.py[cod] +*$py.class +*.pyc +.temp +*.c +*.so +*.o + +# Developing mode +_*.sh +_*.json +*.lst +yard* +*.out +evaluation/evalset_selection +mfa +egs/svc/*wavmark +egs/svc/custom +egs/svc/*/dev* +egs/svc/dev_exp_config.json +egs/svc/dev +bins/svc/demo* +bins/svc/preprocess_custom.py +data +ckpts + +# Data and ckpt +*.pkl +*.pt +*.npy +*.npz +*.tar.gz +*.ckpt +*.wav +*.flac +pretrained/wenet/*conformer_exp +pretrained/bigvgan/args.json +!egs/tts/VALLE/prompt_examples/*.wav + +# Runtime data dirs +processed_data +data +model_ckpt +logs +*.lst +source_audio +result +conversion_results +get_available_gpu.py \ No newline at end of file diff --git a/Amphion/Dockerfile b/Amphion/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..d61b7890b8a9ed143019d63fbf1d8d3de21e06b4 --- /dev/null +++ b/Amphion/Dockerfile @@ -0,0 +1,64 @@ +# Copyright (c) 2023 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +# Other version: https://hub.docker.com/r/nvidia/cuda/tags +FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu18.04 + +ARG DEBIAN_FRONTEND=noninteractive +ARG PYTORCH='2.0.0' +ARG CUDA='cu118' +ARG SHELL='/bin/bash' +ARG MINICONDA='Miniconda3-py39_23.3.1-0-Linux-x86_64.sh' + +ENV LANG=en_US.UTF-8 PYTHONIOENCODING=utf-8 PYTHONDONTWRITEBYTECODE=1 CUDA_HOME=/usr/local/cuda CONDA_HOME=/opt/conda SHELL=${SHELL} +ENV PATH=$CONDA_HOME/bin:$CUDA_HOME/bin:$PATH \ + LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH \ + LIBRARY_PATH=$CUDA_HOME/lib64:$LIBRARY_PATH \ + CONDA_PREFIX=$CONDA_HOME \ + NCCL_HOME=$CUDA_HOME + +# Install ubuntu packages +RUN sed -i 's/archive.ubuntu.com/mirrors.cloud.tencent.com/g' /etc/apt/sources.list \ + && sed -i 's/security.ubuntu.com/mirrors.cloud.tencent.com/g' /etc/apt/sources.list \ + && rm /etc/apt/sources.list.d/cuda.list \ + && apt-get update \ + && apt-get -y install \ + python3-pip ffmpeg git less wget libsm6 libxext6 libxrender-dev \ + build-essential cmake pkg-config libx11-dev libatlas-base-dev \ + libgtk-3-dev libboost-python-dev vim libgl1-mesa-glx \ + libaio-dev software-properties-common tmux \ + espeak-ng + +# Install miniconda with python 3.9 +USER root +# COPY Miniconda3-py39_23.3.1-0-Linux-x86_64.sh /root/anaconda.sh +RUN wget -t 0 -c -O /tmp/anaconda.sh https://repo.anaconda.com/miniconda/${MINICONDA} \ + && mv /tmp/anaconda.sh /root/anaconda.sh \ + && ${SHELL} /root/anaconda.sh -b -p $CONDA_HOME \ + && rm /root/anaconda.sh + +RUN conda create -y --name amphion python=3.9.15 + +WORKDIR /app +COPY env.sh env.sh +RUN chmod +x ./env.sh + +RUN ["conda", "run", "-n", "amphion", "-vvv", "--no-capture-output", "./env.sh"] + +RUN conda init \ + && echo "\nconda activate amphion\n" >> ~/.bashrc + +CMD ["/bin/bash"] + +# *** Build *** +# docker build -t realamphion/amphion . + +# *** Run *** +# cd Amphion +# docker run --runtime=nvidia --gpus all -it -v .:/app -v /mnt:/mnt_host realamphion/amphion + +# *** Push and release *** +# docker login +# docker push realamphion/amphion diff --git a/Amphion/LICENSE b/Amphion/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..56a1593e357b8f3002751884a2c353fcf421c04b --- /dev/null +++ b/Amphion/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Amphion + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Amphion/README.md b/Amphion/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ec753f83ec194f19f3f70c1404e9239f545d64a1 --- /dev/null +++ b/Amphion/README.md @@ -0,0 +1,192 @@ +# Amphion: An Open-Source Audio, Music, and Speech Generation Toolkit + +
+
+ for p in group["params"]:
+ state = self.state[p]
+ ...
+
+ you can do:
+
+ with self.batched_params(group["params"]) as batches:
+ for p, state, p_names in batches:
+ ...
+
+
+ Args:
+ group: a parameter group, which is a list of parameters; should be
+ one of self.param_groups.
+ group_params_names: name for each parameter in group,
+ which is List[str].
+ """
+ batches = defaultdict(
+ list
+ ) # `batches` maps from tuple (dtype_as_str,*shape) to list of nn.Parameter
+ batches_names = defaultdict(
+ list
+ ) # `batches` maps from tuple (dtype_as_str,*shape) to list of str
+
+ assert len(param_group) == len(group_params_names)
+ for p, named_p in zip(param_group, group_params_names):
+ key = (str(p.dtype), *p.shape)
+ batches[key].append(p)
+ batches_names[key].append(named_p)
+
+ batches_names_keys = list(batches_names.keys())
+ sorted_idx = sorted(
+ range(len(batches_names)), key=lambda i: batches_names_keys[i]
+ )
+ batches_names = [batches_names[batches_names_keys[idx]] for idx in sorted_idx]
+ batches = [batches[batches_names_keys[idx]] for idx in sorted_idx]
+
+ stacked_params_dict = dict()
+
+ # turn batches into a list, in deterministic order.
+ # tuples will contain tuples of (stacked_param, state, stacked_params_names),
+ # one for each batch in `batches`.
+ tuples = []
+
+ for batch, batch_names in zip(batches, batches_names):
+ p = batch[0]
+ # we arbitrarily store the state in the
+ # state corresponding to the 1st parameter in the
+ # group. class Optimizer will take care of saving/loading state.
+ state = self.state[p]
+ p_stacked = torch.stack(batch)
+ grad = torch.stack(
+ [torch.zeros_like(p) if p.grad is None else p.grad for p in batch]
+ )
+ p_stacked.grad = grad
+ stacked_params_dict[key] = p_stacked
+ tuples.append((p_stacked, state, batch_names))
+
+ yield tuples
+
+ for (stacked_params, _state, _names), batch in zip(tuples, batches):
+ for i, p in enumerate(batch):
+ p.copy_(stacked_params[i])
+
+
+class ScaledAdam(BatchedOptimizer):
+ """
+ Implements 'Scaled Adam', a variant of Adam where we scale each parameter's update
+ proportional to the norm of that parameter; and also learn the scale of the parameter,
+ in log space, subject to upper and lower limits (as if we had factored each parameter as
+ param = underlying_param * log_scale.exp())
+
+
+ Args:
+ params: The parameters or param_groups to optimize (like other Optimizer subclasses)
+ lr: The learning rate. We will typically use a learning rate schedule that starts
+ at 0.03 and decreases over time, i.e. much higher than other common
+ optimizers.
+ clipping_scale: (e.g. 2.0)
+ A scale for gradient-clipping: if specified, the normalized gradients
+ over the whole model will be clipped to have 2-norm equal to
+ `clipping_scale` times the median 2-norm over the most recent period
+ of `clipping_update_period` minibatches. By "normalized gradients",
+ we mean after multiplying by the rms parameter value for this tensor
+ [for non-scalars]; this is appropriate because our update is scaled
+ by this quantity.
+ betas: beta1,beta2 are momentum constants for regular momentum, and moving sum-sq grad.
+ Must satisfy 0 < beta <= beta2 < 1.
+ scalar_lr_scale: A scaling factor on the learning rate, that we use to update the
+ scale of each parameter tensor and scalar parameters of the mode..
+ If each parameter were decomposed
+ as p * p_scale.exp(), where (p**2).mean().sqrt() == 1.0, scalar_lr_scale
+ would be a the scaling factor on the learning rate of p_scale.
+ eps: A general-purpose epsilon to prevent division by zero
+ param_min_rms: Minimum root-mean-square value of parameter tensor, for purposes of
+ learning the scale on the parameters (we'll constrain the rms of each non-scalar
+ parameter tensor to be >= this value)
+ param_max_rms: Maximum root-mean-square value of parameter tensor, for purposes of
+ learning the scale on the parameters (we'll constrain the rms of each non-scalar
+ parameter tensor to be <= this value)
+ scalar_max: Maximum absolute value for scalar parameters (applicable if your
+ model has any parameters with numel() == 1).
+ size_update_period: The periodicity, in steps, with which we update the size (scale)
+ of the parameter tensor. This is provided to save a little time
+ in the update.
+ clipping_update_period: if clipping_scale is specified, this is the period
+ """
+
+ def __init__(
+ self,
+ params,
+ lr=3e-02,
+ clipping_scale=None,
+ betas=(0.9, 0.98),
+ scalar_lr_scale=0.1,
+ eps=1.0e-08,
+ param_min_rms=1.0e-05,
+ param_max_rms=3.0,
+ scalar_max=10.0,
+ size_update_period=4,
+ clipping_update_period=100,
+ parameters_names=None,
+ show_dominant_parameters=True,
+ ):
+ assert parameters_names is not None, (
+ "Please prepare parameters_names,"
+ "which is a List[List[str]]. Each List[str] is for a group"
+ "and each str is for a parameter"
+ )
+ defaults = dict(
+ lr=lr,
+ clipping_scale=clipping_scale,
+ betas=betas,
+ scalar_lr_scale=scalar_lr_scale,
+ eps=eps,
+ param_min_rms=param_min_rms,
+ param_max_rms=param_max_rms,
+ scalar_max=scalar_max,
+ size_update_period=size_update_period,
+ clipping_update_period=clipping_update_period,
+ )
+
+ super(ScaledAdam, self).__init__(params, defaults)
+ assert len(self.param_groups) == len(parameters_names)
+ self.parameters_names = parameters_names
+ self.show_dominant_parameters = show_dominant_parameters
+
+ def __setstate__(self, state):
+ super(ScaledAdam, self).__setstate__(state)
+
+ @torch.no_grad()
+ def step(self, closure=None):
+ """Performs a single optimization step.
+
+ Arguments:
+ closure (callable, optional): A closure that reevaluates the model
+ and returns the loss.
+ """
+ loss = None
+ if closure is not None:
+ with torch.enable_grad():
+ loss = closure()
+
+ batch = True
+
+ for group, group_params_names in zip(self.param_groups, self.parameters_names):
+ with self.batched_params(group["params"], group_params_names) as batches:
+ # batches is list of pairs (stacked_param, state). stacked_param is like
+ # a regular parameter, and will have a .grad, but the 1st dim corresponds to
+ # a stacking dim, it is not a real dim.
+
+ if len(batches[0][1]) == 0:
+ clipping_scale = 1
+ else:
+ clipping_scale = self._get_clipping_scale(group, batches)
+
+ for p, state, _ in batches:
+ # Perform optimization step.
+ # grad is not going to be None, we handled that when creating the batches.
+ grad = p.grad
+ if grad.is_sparse:
+ raise RuntimeError(
+ "ScaledAdam optimizer does not support sparse gradients"
+ )
+ # State initialization
+ if len(state) == 0:
+ self._init_state(group, p, state)
+
+ self._step_one_batch(group, p, state, clipping_scale)
+
+ return loss
+
+ def _init_state(self, group: dict, p: Tensor, state: dict):
+ """
+ Initializes state dict for parameter 'p'. Assumes that dim 0 of tensor p
+ is actually the batch dimension, corresponding to batched-together
+ parameters of a given shape.
+
+
+ Args:
+ group: Dict to look up configuration values.
+ p: The parameter that we are initializing the state for
+ state: Dict from string to whatever state we are initializing
+ """
+ size_update_period = group["size_update_period"]
+
+ state["step"] = 0
+
+ kwargs = {"device": p.device, "dtype": p.dtype}
+
+ # 'delta' implements conventional momentum. There are
+ # several different kinds of update going on, so rather than
+ # compute "exp_avg" like in Adam, we store and decay a
+ # parameter-change "delta", which combines all forms of
+ # update. this is equivalent to how it's done in Adam,
+ # except for the first few steps.
+ state["delta"] = torch.zeros_like(p, memory_format=torch.preserve_format)
+
+ batch_size = p.shape[0]
+ numel = p.numel() // batch_size
+ numel = p.numel()
+
+ if numel > 1:
+ # "param_rms" just periodically records the scalar root-mean-square value of
+ # the parameter tensor.
+ # it has a shape like (batch_size, 1, 1, 1, 1)
+ param_rms = (p**2).mean(dim=list(range(1, p.ndim)), keepdim=True).sqrt()
+ state["param_rms"] = param_rms
+
+ state["scale_exp_avg_sq"] = torch.zeros_like(param_rms)
+ state["scale_grads"] = torch.zeros(
+ size_update_period, *param_rms.shape, **kwargs
+ )
+
+ # exp_avg_sq is the weighted sum of scaled gradients. as in Adam.
+ state["exp_avg_sq"] = torch.zeros_like(p, memory_format=torch.preserve_format)
+
+ def _get_clipping_scale(
+ self, group: dict, tuples: List[Tuple[Tensor, dict, List[str]]]
+ ) -> float:
+ """
+ Returns a scalar factor <= 1.0 that dictates gradient clipping, i.e. we will scale the gradients
+ by this amount before applying the rest of the update.
+
+ Args:
+ group: the parameter group, an item in self.param_groups
+ tuples: a list of tuples of (param, state, param_names)
+ where param is a batched set of parameters,
+ with a .grad (1st dim is batch dim)
+ and state is the state-dict where optimization parameters are kept.
+ param_names is a List[str] while each str is name for a parameter
+ in batched set of parameters "param".
+ """
+ assert len(tuples) >= 1
+ clipping_scale = group["clipping_scale"]
+ (first_p, first_state, _) = tuples[0]
+ step = first_state["step"]
+ if clipping_scale is None or step == 0:
+ # no clipping. return early on step == 0 because the other
+ # parameters' state won't have been initialized yet.
+ return 1.0
+ clipping_update_period = group["clipping_update_period"]
+
+ tot_sumsq = torch.tensor(0.0, device=first_p.device)
+ for p, state, param_names in tuples:
+ grad = p.grad
+ if grad.is_sparse:
+ raise RuntimeError(
+ "ScaledAdam optimizer does not support sparse gradients"
+ )
+ if p.numel() == p.shape[0]: # a batch of scalars
+ tot_sumsq += (grad**2).sum() # sum() to change shape [1] to []
+ else:
+ tot_sumsq += ((grad * state["param_rms"]) ** 2).sum()
+
+ tot_norm = tot_sumsq.sqrt()
+ if "model_norms" not in first_state:
+ first_state["model_norms"] = torch.zeros(
+ clipping_update_period, device=p.device
+ )
+ first_state["model_norms"][step % clipping_update_period] = tot_norm
+
+ if step % clipping_update_period == 0:
+ # Print some stats.
+ # We don't reach here if step == 0 because we would have returned
+ # above.
+ sorted_norms = first_state["model_norms"].sort()[0].to("cpu")
+ quartiles = []
+ for n in range(0, 5):
+ index = min(
+ clipping_update_period - 1,
+ (clipping_update_period // 4) * n,
+ )
+ quartiles.append(sorted_norms[index].item())
+
+ median = quartiles[2]
+ threshold = clipping_scale * median
+ first_state["model_norm_threshold"] = threshold
+ percent_clipped = (
+ first_state["num_clipped"] * 100.0 / clipping_update_period
+ if "num_clipped" in first_state
+ else 0.0
+ )
+ first_state["num_clipped"] = 0
+ quartiles = " ".join(["%.3e" % x for x in quartiles])
+ logging.info(
+ f"Clipping_scale={clipping_scale}, grad-norm quartiles {quartiles}, "
+ f"threshold={threshold:.3e}, percent-clipped={percent_clipped:.1f}"
+ )
+
+ if step < clipping_update_period:
+ return 1.0 # We have not yet estimated a norm to clip to.
+ else:
+ try:
+ model_norm_threshold = first_state["model_norm_threshold"]
+ except KeyError:
+ logging.info(
+ "Warning: model_norm_threshold not in state: possibly "
+ "you changed config when restarting, adding clipping_scale option?"
+ )
+ return 1.0
+ ans = min(1.0, (model_norm_threshold / (tot_norm + 1.0e-20)).item())
+ if ans < 1.0:
+ first_state["num_clipped"] += 1
+ if ans < 0.1:
+ logging.warn(
+ f"Scaling gradients by {ans}, model_norm_threshold={model_norm_threshold}"
+ )
+ if self.show_dominant_parameters:
+ assert p.shape[0] == len(param_names)
+ self._show_gradient_dominating_parameter(tuples, tot_sumsq)
+ return ans
+
+ def _show_gradient_dominating_parameter(
+ self, tuples: List[Tuple[Tensor, dict, List[str]]], tot_sumsq: Tensor
+ ):
+ """
+ Show information of parameter wihch dominanting tot_sumsq.
+
+ Args:
+ tuples: a list of tuples of (param, state, param_names)
+ where param is a batched set of parameters,
+ with a .grad (1st dim is batch dim)
+ and state is the state-dict where optimization parameters are kept.
+ param_names is a List[str] while each str is name for a parameter
+ in batched set of parameters "param".
+ tot_sumsq: sumsq of all parameters. Though it's could be calculated
+ from tuples, we still pass it to save some time.
+ """
+ all_sumsq_orig = {}
+ for p, state, batch_param_names in tuples:
+ # p is a stacked batch parameters.
+ batch_grad = p.grad
+ if p.numel() == p.shape[0]: # a batch of scalars
+ batch_sumsq_orig = batch_grad**2
+ # Dummpy values used by following `zip` statement.
+ batch_rms_orig = torch.ones(p.shape[0])
+ else:
+ batch_rms_orig = state["param_rms"]
+ batch_sumsq_orig = ((batch_grad * batch_rms_orig) ** 2).sum(
+ dim=list(range(1, batch_grad.ndim))
+ )
+
+ for name, sumsq_orig, rms, grad in zip(
+ batch_param_names, batch_sumsq_orig, batch_rms_orig, batch_grad
+ ):
+ proportion_orig = sumsq_orig / tot_sumsq
+ all_sumsq_orig[name] = (proportion_orig, sumsq_orig, rms, grad)
+
+ assert torch.isclose(
+ sum([value[0] for value in all_sumsq_orig.values()]).cpu(),
+ torch.tensor(1.0),
+ )
+ sorted_by_proportion = {
+ k: v
+ for k, v in sorted(
+ all_sumsq_orig.items(),
+ key=lambda item: item[1][0],
+ reverse=True,
+ )
+ }
+ dominant_param_name = next(iter(sorted_by_proportion))
+ (
+ dominant_proportion,
+ dominant_sumsq,
+ dominant_rms,
+ dominant_grad,
+ ) = sorted_by_proportion[dominant_param_name]
+ logging.info(
+ f"Parameter Dominanting tot_sumsq {dominant_param_name}"
+ f" with proportion {dominant_proportion:.2f},"
+ f" where dominant_sumsq=(grad_sumsq*orig_rms_sq)"
+ f"={dominant_sumsq:.3e},"
+ f" grad_sumsq = {(dominant_grad**2).sum():.3e},"
+ f" orig_rms_sq={(dominant_rms**2).item():.3e}"
+ )
+
+ def _step_one_batch(
+ self, group: dict, p: Tensor, state: dict, clipping_scale: float
+ ):
+ """
+ Do the step for one parameter, which is actually going to be a batch of
+ `real` parameters, with dim 0 as the batch dim.
+ Args:
+ group: dict to look up configuration values
+ p: parameter to update (actually multiple parameters stacked together
+ as a batch)
+ state: state-dict for p, to look up the optimizer state
+ """
+ lr = group["lr"]
+ size_update_period = group["size_update_period"]
+ beta1 = group["betas"][0]
+
+ grad = p.grad
+ if clipping_scale != 1.0:
+ grad = grad * clipping_scale
+ step = state["step"]
+ delta = state["delta"]
+
+ delta.mul_(beta1)
+ batch_size = p.shape[0]
+ numel = p.numel() // batch_size
+ if numel > 1:
+ # Update the size/scale of p, and set param_rms
+ scale_grads = state["scale_grads"]
+ scale_grads[step % size_update_period] = (p * grad).sum(
+ dim=list(range(1, p.ndim)), keepdim=True
+ )
+ if step % size_update_period == size_update_period - 1:
+ param_rms = state["param_rms"] # shape: (batch_size, 1, 1, ..)
+ param_rms.copy_(
+ (p**2).mean(dim=list(range(1, p.ndim)), keepdim=True).sqrt()
+ )
+ if step > 0:
+ # self._size_update() learns the overall scale on the
+ # parameter, by shrinking or expanding it.
+ self._size_update(group, scale_grads, p, state)
+
+ if numel == 1:
+ # For parameters with 1 element we just use regular Adam.
+ # Updates delta.
+ self._step_scalar(group, p, state)
+ else:
+ self._step(group, p, state)
+
+ state["step"] = step + 1
+
+ def _size_update(
+ self, group: dict, scale_grads: Tensor, p: Tensor, state: dict
+ ) -> None:
+ """
+ Called only where p.numel() > 1, this updates the scale of the parameter.
+ If we imagine: p = underlying_param * scale.exp(), and we are doing
+ gradient descent on underlying param and on scale, this function does the update
+ on `scale`.
+
+ Args:
+ group: dict to look up configuration values
+ scale_grads: a tensor of shape (size_update_period, batch_size, 1, 1,...) containing
+ grads w.r.t. the scales.
+ p: The parameter to update
+ state: The state-dict of p
+ """
+
+ param_rms = state["param_rms"]
+ beta1, beta2 = group["betas"]
+ size_lr = group["lr"] * group["scalar_lr_scale"]
+ param_min_rms = group["param_min_rms"]
+ param_max_rms = group["param_max_rms"]
+ eps = group["eps"]
+ step = state["step"]
+ batch_size = p.shape[0]
+
+ size_update_period = scale_grads.shape[0]
+ # correct beta2 for the size update period: we will have
+ # faster decay at this level.
+ beta2_corr = beta2**size_update_period
+
+ scale_exp_avg_sq = state["scale_exp_avg_sq"] # shape: (batch_size, 1, 1, ..)
+ scale_exp_avg_sq.mul_(beta2_corr).add_(
+ (scale_grads**2).mean(dim=0), # mean over dim `size_update_period`
+ alpha=1 - beta2_corr,
+ ) # shape is (batch_size, 1, 1, ...)
+
+ # The 1st time we reach here is when size_step == 1.
+ size_step = (step + 1) // size_update_period
+ bias_correction2 = 1 - beta2_corr**size_step
+ # we don't bother with bias_correction1; this will help prevent divergence
+ # at the start of training.
+
+ denom = scale_exp_avg_sq.sqrt() + eps
+
+ scale_step = -size_lr * (bias_correction2**0.5) * scale_grads.sum(dim=0) / denom
+
+ is_too_small = param_rms < param_min_rms
+ is_too_large = param_rms > param_max_rms
+
+ # when the param gets too small, just don't shrink it any further.
+ scale_step.masked_fill_(is_too_small, 0.0)
+ # when it gets too large, stop it from getting any larger.
+ scale_step.masked_fill_(is_too_large, -size_lr * size_update_period)
+ delta = state["delta"]
+ # the factor of (1-beta1) relates to momentum.
+ delta.add_(p * scale_step, alpha=(1 - beta1))
+
+ def _step(self, group: dict, p: Tensor, state: dict):
+ """
+ This function does the core update of self.step(), in the case where the members of
+ the batch have more than 1 element.
+
+ Args:
+ group: A dict which will be used to look up configuration values
+ p: The parameter to be updated
+ grad: The grad of p
+ state: The state-dict corresponding to parameter p
+
+ This function modifies p.
+ """
+ grad = p.grad
+ lr = group["lr"]
+ beta1, beta2 = group["betas"]
+ eps = group["eps"]
+ param_min_rms = group["param_min_rms"]
+ step = state["step"]
+
+ exp_avg_sq = state["exp_avg_sq"]
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
+
+ this_step = state["step"] - (state["zero_step"] if "zero_step" in state else 0)
+ bias_correction2 = 1 - beta2 ** (this_step + 1)
+ if bias_correction2 < 0.99:
+ # note: not in-place.
+ exp_avg_sq = exp_avg_sq * (1.0 / bias_correction2)
+
+ denom = exp_avg_sq.sqrt()
+ denom += eps
+ grad = grad / denom
+
+ alpha = -lr * (1 - beta1) * state["param_rms"].clamp(min=param_min_rms)
+
+ delta = state["delta"]
+ delta.add_(grad * alpha)
+ p.add_(delta)
+
+ def _step_scalar(self, group: dict, p: Tensor, state: dict):
+ """
+ A simplified form of the core update for scalar tensors, where we cannot get a good
+ estimate of the parameter rms.
+ """
+ beta1, beta2 = group["betas"]
+ scalar_max = group["scalar_max"]
+ eps = group["eps"]
+ lr = group["lr"] * group["scalar_lr_scale"]
+ grad = p.grad
+
+ exp_avg_sq = state["exp_avg_sq"] # shape: (batch_size,)
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
+
+ # bias_correction2 is like in Adam. Don't bother with bias_correction1;
+ # slower update at the start will help stability anyway.
+ bias_correction2 = 1 - beta2 ** (state["step"] + 1)
+ denom = (exp_avg_sq / bias_correction2).sqrt() + eps
+
+ delta = state["delta"]
+ delta.add_(grad / denom, alpha=-lr * (1 - beta1))
+ p.clamp_(min=-scalar_max, max=scalar_max)
+ p.add_(delta)
diff --git a/Amphion/preprocessors/Emilia/README.md b/Amphion/preprocessors/Emilia/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..81c437bbdab1e803ebfdadbbbb232214c5fa2d09
--- /dev/null
+++ b/Amphion/preprocessors/Emilia/README.md
@@ -0,0 +1,230 @@
+# Emilia: An Extensive, Multilingual, and Diverse Speech Dataset for Large-Scale Speech Generation
+[](https://arxiv.org/abs/2407.05361) [](https://huggingface.co/datasets/amphion/Emilia-Dataset) [](https://opendatalab.com/Amphion/Emilia) [](https://github.com/open-mmlab/Amphion/tree/main/preprocessors/Emilia) [](https://emilia-dataset.github.io/Emilia-Demo-Page/)
+
+This is the official repository 👑 for the **Emilia** dataset and the source code for **Emilia-Pipe** speech data preprocessing pipeline.
+
+>u),l({width:y,height:p,data:o},{width:y,height:p,data:i},f>>u),d({width:y,height:p,data:i},{width:y,height:p,data:o},f>>u),l({width:y,height:p,data:o},{width:y,height:p,data:i},f>>u),d({width:y,height:p,data:i},{width:y,height:p,data:o},f>>u),l({width:y,height:p,data:o},{width:y,height:p,data:i},f>>u);var h=M(i);if(!Array.isArray(h)){var s=r.max(i);h=r.tickStep(0,s,h),(h=r.range(0,Math.floor(s/h)*h,h)).shift()}return c().thresholds(h).size([y,p])(i).map(A)}function A(t){return t.value*=Math.pow(2,-2*u),t.coordinates.forEach(m),t}function m(t){t.forEach(z)}function z(t){t.forEach(x)}function x(t){t[0]=t[0]*Math.pow(2,u)-w,t[1]=t[1]*Math.pow(2,u)-w}function b(){return y=o+2*(w=3*f)>>u,p=h+2*w>>u,E}return E.x=function(r){return arguments.length?(t="function"==typeof r?r:i(+r),E):t},E.y=function(t){return arguments.length?(e="function"==typeof t?t:i(+t),E):e},E.weight=function(t){return arguments.length?(a="function"==typeof t?t:i(+t),E):a},E.size=function(t){if(!arguments.length)return[o,h];var r=Math.ceil(t[0]),n=Math.ceil(t[1]);if(!(r>=0||r>=0))throw new Error("invalid size");return o=r,h=n,b()},E.cellSize=function(t){if(!arguments.length)return 1<=1))throw new Error("invalid cell size");return u=Math.floor(Math.log(t)/Math.LN2),b()},E.thresholds=function(t){return arguments.length?(M="function"==typeof t?t:Array.isArray(t)?i(n.call(t)):i(t),E):M},E.bandwidth=function(t){if(!arguments.length)return Math.sqrt(f*(f+1));if(!((t=+t)>=0))throw new Error("invalid bandwidth");return f=Math.round((Math.sqrt(4*t*t+1)-1)/2),b()},E},Object.defineProperty(t,"__esModule",{value:!0})});
diff --git a/Amphion/visualization/SingVisio/webpage/resources/d3-scale-chromatic.v1.min.js b/Amphion/visualization/SingVisio/webpage/resources/d3-scale-chromatic.v1.min.js
new file mode 100644
index 0000000000000000000000000000000000000000..3298de90cb6cbaf2dcfef437f2bcd3da42e7131a
--- /dev/null
+++ b/Amphion/visualization/SingVisio/webpage/resources/d3-scale-chromatic.v1.min.js
@@ -0,0 +1,2 @@
+// https://d3js.org/d3-scale-chromatic/ v1.5.0 Copyright 2019 Mike Bostock
+!function(f,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("d3-interpolate"),require("d3-color")):"function"==typeof define&&define.amd?define(["exports","d3-interpolate","d3-color"],e):e((f=f||self).d3=f.d3||{},f.d3,f.d3)}(this,function(f,e,d){"use strict";function a(f){for(var e=f.length/6|0,d=new Array(e),a=0;an?1:t>=n?0:NaN}function F(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function I(t,n){return t.style.getPropertyValue(n)||F(t).getComputedStyle(t,null).getPropertyValue(n)}function Y(t){return t.trim().split(/^|\s+/)}function B(t){return t.classList||new H(t)}function H(t){this._node=t,this._names=Y(t.getAttribute("class")||"")}function j(t,n){for(var e=B(t),r=-1,i=n.length;++r>8&15|n>>4&240,n>>4&15|240&n,(15&n)<<4|15&n,1)):(n=df.exec(t))?At(parseInt(n[1],16)):(n=vf.exec(t))?new Rt(n[1],n[2],n[3],1):(n=gf.exec(t))?new Rt(255*n[1]/100,255*n[2]/100,255*n[3]/100,1):(n=_f.exec(t))?Ct(n[1],n[2],n[3],n[4]):(n=yf.exec(t))?Ct(255*n[1]/100,255*n[2]/100,255*n[3]/100,n[4]):(n=mf.exec(t))?Lt(n[1],n[2]/100,n[3]/100,1):(n=xf.exec(t))?Lt(n[1],n[2]/100,n[3]/100,n[4]):bf.hasOwnProperty(t)?At(bf[t]):"transparent"===t?new Rt(NaN,NaN,NaN,0):null}function At(t){return new Rt(t>>16&255,t>>8&255,255&t,1)}function Ct(t,n,e,r){return r<=0&&(t=n=e=NaN),new Rt(t,n,e,r)}function zt(t){return t instanceof St||(t=Et(t)),t?(t=t.rgb(),new Rt(t.r,t.g,t.b,t.opacity)):new Rt}function Pt(t,n,e,r){return 1===arguments.length?zt(t):new Rt(t,n,e,null==r?1:r)}function Rt(t,n,e,r){this.r=+t,this.g=+n,this.b=+e,this.opacity=+r}function Lt(t,n,e,r){return r<=0?t=n=e=NaN:e<=0||e>=1?t=n=NaN:n<=0&&(t=NaN),new Dt(t,n,e,r)}function qt(t,n,e,r){return 1===arguments.length?function(t){if(t instanceof Dt)return new Dt(t.h,t.s,t.l,t.opacity);if(t instanceof St||(t=Et(t)),!t)return new Dt;if(t instanceof Dt)return t;var n=(t=t.rgb()).r/255,e=t.g/255,r=t.b/255,i=Math.min(n,e,r),o=Math.max(n,e,r),u=NaN,a=o-i,c=(o+i)/2;return a?(u=n===o?(e-r)/a+6*(e0&&(a=s-_),k<0?v=g-y:k>0&&(l=h-y),T=th,F.attr("cursor",uh.selection),o());break;default:return}$n()},!0).on("keyup.brush",function(){switch(t.event.keyCode){case 16:L&&(x=b=L=!1,o());break;case 18:T===eh&&(N<0?p=d:N>0&&(a=s),k<0?v=g:k>0&&(l=h),T=nh,o());break;case 32:T===th&&(t.event.altKey?(N&&(p=d-_*N,a=s+_*N),k&&(v=g-y*k,l=h+y*k),T=eh):(N<0?p=d:N>0&&(a=s),k<0?v=g:k>0&&(l=h),T=nh),F.attr("cursor",uh[M]),o());break;default:return}$n()},!0).on("mousemove.brush",e,!0).on("mouseup.brush",u,!0);_t(t.event.view)}Vn(),Pn(w),r.call(w),U.start()}}function a(){var t=this.__brush||{selection:null};return t.extent=s.apply(this,arguments),t.dim=n,t}var c,s=Gn,f=Zn,l=N(e,"start","brush","end"),h=6;return e.move=function(t,e){t.selection?t.on("start.brush",function(){i(this,arguments).beforestart().start()}).on("interrupt.brush end.brush",function(){i(this,arguments).end()}).tween("brush",function(){function t(t){u.selection=1===t&&Jn(s)?null:f(t),r.call(o),a.brush()}var o=this,u=o.__brush,a=i(o,arguments),c=u.selection,s=n.input("function"==typeof e?e.apply(this,arguments):e,u.extent),f=fn(c,s);return c&&s?t:t(1)}):t.each(function(){var t=arguments,o=this.__brush,u=n.input("function"==typeof e?e.apply(this,t):e,o.extent),a=i(this,t).beforestart();Pn(this),o.selection=null==u||Jn(u)?null:u,r.call(this),a.start().brush().end()})},o.prototype={beforestart:function(){return 1==++this.active&&(this.state.emitter=this,this.starting=!0),this},start:function(){return this.starting&&(this.starting=!1,this.emit("start")),this},brush:function(){return this.emit("brush"),this},end:function(){return 0==--this.active&&(delete this.state.emitter,this.emit("end")),this},emit:function(t){it(new function(t,n,e){this.target=t,this.type=n,this.selection=e}(e,t,n.output(this.state.selection)),l.apply,l,[t,this.that,this.args])}},e.extent=function(t){return arguments.length?(s="function"==typeof t?t:Xn([[+t[0][0],+t[0][1]],[+t[1][0],+t[1][1]]]),e):s},e.filter=function(t){return arguments.length?(f="function"==typeof t?t:Xn(!!t),e):f},e.handleSize=function(t){return arguments.length?(h=+t,e):h},e.on=function(){var t=l.on.apply(l,arguments);return t===l?e:t},e}function te(t){return function(){return t}}function ne(){this._x0=this._y0=this._x1=this._y1=null,this._=""}function ee(){return new ne}function re(t){return t.source}function ie(t){return t.target}function oe(t){return t.radius}function ue(t){return t.startAngle}function ae(t){return t.endAngle}function ce(){}function se(t,n){var e=new ce;if(t instanceof ce)t.each(function(t,n){e.set(n,t)});else if(Array.isArray(t)){var r,i=-1,o=t.length;if(null==n)for(;++i0){if(u>f)return;u>s&&(s=u)}if(u=i-a,l||!(u<0)){if(u/=l,l<0){if(u>f)return;u>s&&(s=u)}else if(l>0){if(u0)){if(u/=h,h<0){if(u0){if(u>f)return;u>s&&(s=u)}if(u=o-c,h||!(u<0)){if(u/=h,h<0){if(u>f)return;u>s&&(s=u)}else if(h>0){if(u0&&(t[0]=a+s*l,t[1]=c+s*h),f<1&&(n[0]=a+f*l,n[1]=c+f*h),!0}}}}}(c,s,t,n,e,r)?a&&(w.lineStart(),w.point(o,u),b=!1):(m||(w.lineStart(),w.point(c[0],c[1])),w.point(s[0],s[1]),a||w.lineEnd(),b=!1)}_=o,y=u,m=a}var f,l,h,p,d,v,_,y,m,x,b,w=u,M=Br(),T={point:c,lineStart:function(){T.point=s,l&&l.push(h=[]),x=!0,m=!1,_=y=NaN},lineEnd:function(){f&&(s(p,d),v&&m&&M.rejoin(),f.push(M.result())),T.point=c,m&&w.lineEnd()},polygonStart:function(){w=M,f=[],l=[],b=!0},polygonEnd:function(){var n=function(){for(var n=0,e=0,i=l.length;er&&(h-o)*(r-u)>(p-u)*(t-o)&&++n:p<=r&&(h-o)*(r-u)<(p-u)*(t-o)&&--n;return n}(),e=b&&n,i=(f=g(f)).length;(e||i)&&(u.polygonStart(),e&&(u.lineStart(),o(null,null,1,u),u.lineEnd()),i&&Xr(f,a,n,o,u),u.polygonEnd()),w=u,f=l=h=null}};return T}}function Kr(){pd.point=pd.lineEnd=Ge}function ti(t,n){Qp=t*=Cp,Jp=Fp(n*=Cp),Kp=Lp(n),pd.point=ni}function ni(t,n){t*=Cp;var e=Fp(n*=Cp),r=Lp(n),i=zp(t-Qp),o=Lp(i),u=r*Fp(i),a=Kp*e-Jp*r*o,c=Jp*e+Kp*r*o;hd.add(Rp(Yp(u*u+a*a),c)),Qp=t,Jp=e,Kp=r}function ei(t){return hd.reset(),tr(t,pd),+hd}function ri(t,n){return dd[0]=t,dd[1]=n,ei(vd)}function ii(t,n){return!(!t||!_d.hasOwnProperty(t.type))&&_d[t.type](t,n)}function oi(t,n){return 0===ri(t,n)}function ui(t,n){var e=ri(t[0],t[1]);return ri(t[0],n)+ri(n,t[1])<=e+Mp}function ai(t,n){return!!$r(t.map(ci),si(n))}function ci(t){return(t=t.map(si)).pop(),t}function si(t){return[t[0]*Cp,t[1]*Cp]}function fi(t,n,e){var r=f(t,n-Mp,e).concat(n);return function(t){return r.map(function(n){return[t,n]})}}function li(t,n,e){var r=f(t,n-Mp,e).concat(n);return function(t){return r.map(function(n){return[n,t]})}}function hi(){function t(){return{type:"MultiLineString",coordinates:n()}}function n(){return f(qp(o/_)*_,i,_).map(p).concat(f(qp(s/y)*y,c,y).map(d)).concat(f(qp(r/v)*v,e,v).filter(function(t){return zp(t%_)>Mp}).map(l)).concat(f(qp(a/g)*g,u,g).filter(function(t){return zp(t%y)>Mp}).map(h))}var e,r,i,o,u,a,c,s,l,h,p,d,v=10,g=v,_=90,y=360,m=2.5;return t.lines=function(){return n().map(function(t){return{type:"LineString",coordinates:t}})},t.outline=function(){return{type:"Polygon",coordinates:[p(o).concat(d(c).slice(1),p(i).reverse().slice(1),d(s).reverse().slice(1))]}},t.extent=function(n){return arguments.length?t.extentMajor(n).extentMinor(n):t.extentMinor()},t.extentMajor=function(n){return arguments.length?(o=+n[0][0],i=+n[1][0],s=+n[0][1],c=+n[1][1],o>i&&(n=o,o=i,i=n),s>c&&(n=s,s=c,c=n),t.precision(m)):[[o,s],[i,c]]},t.extentMinor=function(n){return arguments.length?(r=+n[0][0],e=+n[1][0],a=+n[0][1],u=+n[1][1],r>e&&(n=r,r=e,e=n),a>u&&(n=a,a=u,u=n),t.precision(m)):[[r,a],[e,u]]},t.step=function(n){return arguments.length?t.stepMajor(n).stepMinor(n):t.stepMinor()},t.stepMajor=function(n){return arguments.length?(_=+n[0],y=+n[1],t):[_,y]},t.stepMinor=function(n){return arguments.length?(v=+n[0],g=+n[1],t):[v,g]},t.precision=function(n){return arguments.length?(m=+n,l=fi(a,u,90),h=li(r,e,m),p=fi(s,c,90),d=li(o,i,m),t):m},t.extentMajor([[-180,-90+Mp],[180,90-Mp]]).extentMinor([[-180,-80-Mp],[180,80+Mp]])}function pi(t){return t}function di(){xd.point=vi}function vi(t,n){xd.point=gi,td=ed=t,nd=rd=n}function gi(t,n){md.add(rd*t-ed*n),ed=t,rd=n}function _i(){gi(td,nd)}function yi(t,n){kd+=t,Sd+=n,++Ed}function mi(){qd.point=xi}function xi(t,n){qd.point=bi,yi(ud=t,ad=n)}function bi(t,n){var e=t-ud,r=n-ad,i=Yp(e*e+r*r);Ad+=i*(ud+t)/2,Cd+=i*(ad+n)/2,zd+=i,yi(ud=t,ad=n)}function wi(){qd.point=yi}function Mi(){qd.point=Ni}function Ti(){ki(id,od)}function Ni(t,n){qd.point=ki,yi(id=ud=t,od=ad=n)}function ki(t,n){var e=t-ud,r=n-ad,i=Yp(e*e+r*r);Ad+=i*(ud+t)/2,Cd+=i*(ad+n)/2,zd+=i,Pd+=(i=ad*t-ud*n)*(ud+t),Rd+=i*(ad+n),Ld+=3*i,yi(ud=t,ad=n)}function Si(t){this._context=t}function Ei(t,n){Bd.point=Ai,Ud=Fd=t,Od=Id=n}function Ai(t,n){Fd-=t,Id-=n,Yd.add(Yp(Fd*Fd+Id*Id)),Fd=t,Id=n}function Ci(){this._string=[]}function zi(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}function Pi(t){return function(n){var e=new Ri;for(var r in t)e[r]=t[r];return e.stream=n,e}}function Ri(){}function Li(t,n,e){var r=t.clipExtent&&t.clipExtent();return t.scale(150).translate([0,0]),null!=r&&t.clipExtent(null),tr(e,t.stream(Nd)),n(Nd.result()),null!=r&&t.clipExtent(r),t}function qi(t,n,e){return Li(t,function(e){var r=n[1][0]-n[0][0],i=n[1][1]-n[0][1],o=Math.min(r/(e[1][0]-e[0][0]),i/(e[1][1]-e[0][1])),u=+n[0][0]+(r-o*(e[1][0]+e[0][0]))/2,a=+n[0][1]+(i-o*(e[1][1]+e[0][1]))/2;t.scale(150*o).translate([u,a])},e)}function Di(t,n,e){return qi(t,[[0,0],n],e)}function Ui(t,n,e){return Li(t,function(e){var r=+n,i=r/(e[1][0]-e[0][0]),o=(r-i*(e[1][0]+e[0][0]))/2,u=-i*e[0][1];t.scale(150*i).translate([o,u])},e)}function Oi(t,n,e){return Li(t,function(e){var r=+n,i=r/(e[1][1]-e[0][1]),o=-i*e[0][0],u=(r-i*(e[1][1]+e[0][1]))/2;t.scale(150*i).translate([o,u])},e)}function Fi(t,n){return+n?function(t,n){function e(r,i,o,u,a,c,s,f,l,h,p,d,v,g){var _=s-r,y=f-i,m=_*_+y*y;if(m>4*n&&v--){var x=u+h,b=a+p,w=c+d,M=Yp(x*x+b*b+w*w),T=We(w/=M),N=zp(zp(w)-1)1&&Vo(t[e[r-2]],t[e[r-1]],t[i])<=0;)--r;e[r++]=i}return e.slice(0,r)}function Zo(t){this._size=t,this._call=this._error=null,this._tasks=[],this._data=[],this._waiting=this._active=this._ended=this._start=0}function Go(t){if(!t._start)try{(function(t){for(;t._start=t._waiting&&t._active53)return null;"w"in u||(u.w=1),"Z"in u?(i=(o=(i=Lu(qu(u.y))).getUTCDay())>4||0===o?og.ceil(i):og(i),i=eg.offset(i,7*(u.V-1)),u.y=i.getUTCFullYear(),u.m=i.getUTCMonth(),u.d=i.getUTCDate()+(u.w+6)%7):(i=(o=(i=n(qu(u.y))).getDay())>4||0===o?qv.ceil(i):qv(i),i=Pv.offset(i,7*(u.V-1)),u.y=i.getFullYear(),u.m=i.getMonth(),u.d=i.getDate()+(u.w+6)%7)}else("W"in u||"U"in u)&&("w"in u||(u.w="u"in u?u.u%7:"W"in u?1:0),o="Z"in u?Lu(qu(u.y)).getUTCDay():n(qu(u.y)).getDay(),u.m=0,u.d="W"in u?(u.w+6)%7+7*u.W-(o+5)%7:u.w+7*u.U-(o+6)%7);return"Z"in u?(u.H+=u.Z/100|0,u.M+=u.Z%100,Lu(u)):n(u)}}function r(t,n,e,r){for(var i,o,u=0,a=n.length,c=e.length;u=c)return-1;if(37===(i=n.charCodeAt(u++))){if(i=n.charAt(u++),!(o=T[i in Mg?n.charAt(u++):i])||(r=o(t,e,r))<0)return-1}else if(i!=e.charCodeAt(r++))return-1}return r}var i=t.dateTime,o=t.date,u=t.time,a=t.periods,c=t.days,s=t.shortDays,f=t.months,l=t.shortMonths,h=Fu(a),p=Iu(a),d=Fu(c),v=Iu(c),g=Fu(s),_=Iu(s),y=Fu(f),m=Iu(f),x=Fu(l),b=Iu(l),w={a:function(t){return s[t.getDay()]},A:function(t){return c[t.getDay()]},b:function(t){return l[t.getMonth()]},B:function(t){return f[t.getMonth()]},c:null,d:ua,e:ua,f:la,H:aa,I:ca,j:sa,L:fa,m:ha,M:pa,p:function(t){return a[+(t.getHours()>=12)]},Q:Ya,s:Ba,S:da,u:va,U:ga,V:_a,w:ya,W:ma,x:null,X:null,y:xa,Y:ba,Z:wa,"%":Ia},M={a:function(t){return s[t.getUTCDay()]},A:function(t){return c[t.getUTCDay()]},b:function(t){return l[t.getUTCMonth()]},B:function(t){return f[t.getUTCMonth()]},c:null,d:Ma,e:Ma,f:Ea,H:Ta,I:Na,j:ka,L:Sa,m:Aa,M:Ca,p:function(t){return a[+(t.getUTCHours()>=12)]},Q:Ya,s:Ba,S:za,u:Pa,U:Ra,V:La,w:qa,W:Da,x:null,X:null,y:Ua,Y:Oa,Z:Fa,"%":Ia},T={a:function(t,n,e){var r=g.exec(n.slice(e));return r?(t.w=_[r[0].toLowerCase()],e+r[0].length):-1},A:function(t,n,e){var r=d.exec(n.slice(e));return r?(t.w=v[r[0].toLowerCase()],e+r[0].length):-1},b:function(t,n,e){var r=x.exec(n.slice(e));return r?(t.m=b[r[0].toLowerCase()],e+r[0].length):-1},B:function(t,n,e){var r=y.exec(n.slice(e));return r?(t.m=m[r[0].toLowerCase()],e+r[0].length):-1},c:function(t,n,e){return r(t,i,n,e)},d:Gu,e:Gu,f:ea,H:Ju,I:Ju,j:Qu,L:na,m:Zu,M:Ku,p:function(t,n,e){var r=h.exec(n.slice(e));return r?(t.p=p[r[0].toLowerCase()],e+r[0].length):-1},Q:ia,s:oa,S:ta,u:Bu,U:Hu,V:ju,w:Yu,W:Xu,x:function(t,n,e){return r(t,o,n,e)},X:function(t,n,e){return r(t,u,n,e)},y:$u,Y:Vu,Z:Wu,"%":ra};return w.x=n(o,w),w.X=n(u,w),w.c=n(i,w),M.x=n(o,M),M.X=n(u,M),M.c=n(i,M),{format:function(t){var e=n(t+="",w);return e.toString=function(){return t},e},parse:function(t){var n=e(t+="",Ru);return n.toString=function(){return t},n},utcFormat:function(t){var e=n(t+="",M);return e.toString=function(){return t},e},utcParse:function(t){var n=e(t,Lu);return n.toString=function(){return t},n}}}function Uu(t,n,e){var r=t<0?"-":"",i=(r?-t:t)+"",o=i.length;return r+(o