rakhlin commited on
Commit
c91d9f1
·
1 Parent(s): 7b311df

Upload 16 files

Browse files
models/tts_models--en--ljspeech--tacotron2-DDC/config.json ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Tacotron2",
3
+ "run_name": "ljspeech-ddc",
4
+ "run_description": "Tacotron2 with DDC. It uses only characters and the 2nd decoder is prunned for efficient inference.",
5
+ // AUDIO PARAMETERS
6
+ "audio":{
7
+ // stft parameters
8
+ "fft_size": 1024, // number of stft frequency levels. Size of the linear spectogram frame.
9
+ "win_length": 1024, // stft window length in ms.
10
+ "hop_length": 256, // stft window hop-lengh in ms.
11
+ "frame_length_ms": null, // stft window length in ms.If null, 'win_length' is used.
12
+ "frame_shift_ms": null, // stft window hop-lengh in ms. If null, 'hop_length' is used.
13
+
14
+ // Audio processing parameters
15
+ "sample_rate": 22050, // DATASET-RELATED: wav sample-rate.
16
+ "preemphasis": 0.0, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis.
17
+ "ref_level_db": 20, // reference level db, theoretically 20db is the sound of air.
18
+ "log_func": "np.log",
19
+
20
+ // Silence trimming
21
+ "do_trim_silence": true,// enable trimming of slience of audio as you load it. LJspeech (true), TWEB (false), Nancy (true)
22
+ "trim_db": 60, // threshold for timming silence. Set this according to your dataset.
23
+
24
+ // Griffin-Lim
25
+ "power": 1.5, // value to sharpen wav signals after GL algorithm.
26
+ "griffin_lim_iters": 60,// #griffin-lim iterations. 30-60 is a good range. Larger the value, slower the generation.
27
+
28
+ // MelSpectrogram parameters
29
+ "num_mels": 80, // size of the mel spec frame.
30
+ "mel_fmin": 0.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!!
31
+ "mel_fmax": 8000.0, // maximum freq level for mel-spec. Tune for dataset!!
32
+ "spec_gain": 1,
33
+
34
+ // Normalization parameters
35
+ "signal_norm": false, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params.
36
+ "min_level_db": -100, // lower bound for normalization
37
+ "symmetric_norm": true, // move normalization to range [-1, 1]
38
+ "max_norm": 4.0, // scale normalization to range [-max_norm, max_norm] or [0, max_norm]
39
+ "clip_norm": true, // clip normalized values into the range.
40
+ "stats_path": null // DO NOT USE WITH MULTI_SPEAKER MODEL. scaler stats file computed by 'compute_statistics.py'. If it is defined, mean-std based notmalization is used and other normalization params are ignored
41
+ },
42
+
43
+ // VOCABULARY PARAMETERS
44
+ "characters":{
45
+ "pad": "",
46
+ "eos": "",
47
+ "bos": "",
48
+ "characters": "_-!'(),.:;? ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
49
+ "punctuations":"",
50
+ "phonemes":""
51
+ },
52
+
53
+ // DISTRIBUTED TRAINING
54
+ "distributed":{
55
+ "backend": "nccl",
56
+ "url": "tcp:\/\/localhost:54321"
57
+ },
58
+
59
+ "reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.
60
+
61
+ // TRAINING
62
+ "batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
63
+ "eval_batch_size":16,
64
+ "r": 1, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
65
+ "gradual_training": null, //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
66
+ "mixed_precision": false, // level of optimization with NVIDIA's apex feature for automatic mixed FP16/FP32 precision (AMP), NOTE: currently only O1 is supported, and use "O1" to activate.
67
+
68
+ // LOSS SETTINGS
69
+ "loss_masking": true, // enable / disable loss masking against the sequence padding.
70
+ "decoder_loss_alpha": 1, // original decoder loss weight. If > 0, it is enabled
71
+ "postnet_loss_alpha": 1, // original postnet loss weight. If > 0, it is enabled
72
+ "postnet_diff_spec_alpha": 0, // differential spectral loss weight. If > 0, it is enabled
73
+ "decoder_diff_spec_alpha": 0, // differential spectral loss weight. If > 0, it is enabled
74
+ "decoder_ssim_alpha": 0, // decoder ssim loss weight. If > 0, it is enabled
75
+ "postnet_ssim_alpha": 0, // postnet ssim loss weight. If > 0, it is enabled
76
+ "ga_alpha": 5.0, // weight for guided attention loss. If > 0, guided attention is enabled.
77
+ "stopnet_pos_weight": 15.0, // pos class weight for stopnet loss since there are way more negative samples than positive samples.
78
+
79
+
80
+ // VALIDATION
81
+ "run_eval": true,
82
+ "test_delay_epochs": 10, //Until attention is aligned, testing only wastes computation time.
83
+ "test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences.
84
+
85
+ // OPTIMIZER
86
+ "noam_schedule": false, // use noam warmup and lr schedule.
87
+ "grad_clip": 1.0, // upper limit for gradients for clipping.
88
+ "epochs": 1000, // total number of epochs to train.
89
+ "lr": 0.00001, // Initial learning rate. If Noam decay is active, maximum learning rate.
90
+ "wd": 0.000001, // Weight decay weight.
91
+ "warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr"
92
+ "seq_len_norm": false, // Normalize eash sample loss with its length to alleviate imbalanced datasets. Use it if your dataset is small or has skewed distribution of sequence lengths.
93
+
94
+ // TACOTRON PRENET
95
+ "memory_size": -1, // ONLY TACOTRON - size of the memory queue used fro storing last decoder predictions for auto-regression. If < 0, memory queue is disabled and decoder only uses the last prediction frame.
96
+ "prenet_type": "original", // "original" or "bn".
97
+ "prenet_dropout": true, // enable/disable dropout at prenet.
98
+ "prenet_dropout_at_inference": true,
99
+
100
+ // TACOTRON ATTENTION
101
+ "attention_type": "original", // 'original' , 'graves', 'dynamic_convolution'
102
+ "attention_heads": 4, // number of attention heads (only for 'graves')
103
+ "attention_norm": "softmax", // softmax or sigmoid.
104
+ "windowing": false, // Enables attention windowing. Used only in eval mode.
105
+ "use_forward_attn": false, // if it uses forward attention. In general, it aligns faster.
106
+ "forward_attn_mask": false, // Additional masking forcing monotonicity only in eval mode.
107
+ "transition_agent": false, // enable/disable transition agent of forward attention.
108
+ "location_attn": true, // enable_disable location sensitive attention. It is enabled for TACOTRON by default.
109
+ "bidirectional_decoder": false, // use https://arxiv.org/abs/1907.09006. Use it, if attention does not work well with your dataset.
110
+ "double_decoder_consistency": false, // use DDC explained here https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency-draft/
111
+ "ddc_r": 7, // reduction rate for coarse decoder.
112
+
113
+ // STOPNET
114
+ "stopnet": true, // Train stopnet predicting the end of synthesis.
115
+ "separate_stopnet": false, // Train stopnet seperately if 'stopnet==true'. It prevents stopnet loss to influence the rest of the model. It causes a better model, but it trains SLOWER.
116
+
117
+ // TENSORBOARD and LOGGING
118
+ "print_step": 25, // Number of steps to log training on console.
119
+ "tb_plot_step": 100, // Number of steps to plot TB training figures.
120
+ "print_eval": false, // If True, it prints intermediate loss values in evalulation.
121
+ "save_step": 1000, // Number of training steps expected to save traninpg stats and checkpoints.
122
+ "checkpoint": true, // If true, it saves checkpoints per "save_step"
123
+ "keep_all_best": false, // If true, keeps all best_models after keep_after steps
124
+ "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
125
+ "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
126
+
127
+ // DATA LOADING
128
+ "text_cleaner": "english_cleaners",
129
+ "enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars.
130
+ "num_loader_workers": 4, // number of training data loader processes. Don't set it too big. 4-8 are good values.
131
+ "num_val_loader_workers": 4, // number of evaluation data loader processes.
132
+ "batch_group_size": 4, //Number of batches to shuffle after bucketing.
133
+ "min_seq_len": 6, // DATASET-RELATED: minimum text length to use in training
134
+ "max_seq_len": 153, // DATASET-RELATED: maximum text length
135
+ "compute_input_seq_cache": false, // if true, text sequences are computed before starting training. If phonemes are enabled, they are also computed at this stage.
136
+ "use_noise_augment": false,
137
+
138
+ // PATHS
139
+ "output_path": "/content/gdrive/MyDrive/Trainings/LJSpeech/",
140
+
141
+ // PHONEMES
142
+ "phoneme_cache_path": "/home/erogol/Models/phoneme_cache/", // phoneme computation is slow, therefore, it caches results in the given folder.
143
+ "use_phonemes": false, // use phonemes instead of raw characters. It is suggested for better pronounciation.
144
+ "phoneme_language": "en-us", // depending on your target language, pick one from https://github.com/bootphon/phonemizer#languages
145
+
146
+ // MULTI-SPEAKER and GST
147
+ "use_speaker_embedding": false, // use speaker embedding to enable multi-speaker learning.
148
+ "use_gst": false, // use global style tokens
149
+ "use_external_speaker_embedding_file": false, // if true, forces the model to use external embedding per sample instead of nn.embeddings, that is, it supports external embeddings such as those used at: https://arxiv.org/abs /1806.04558
150
+ "external_speaker_embedding_file": "../../speakers-vctk-en.json", // if not null and use_external_speaker_embedding_file is true, it is used to load a specific embedding file and thus uses these embeddings instead of nn.embeddings, that is, it supports external embeddings such as those used at: https://arxiv.org/abs /1806.04558
151
+ "gst": { // gst parameter if gst is enabled
152
+ "gst_style_input": null, // Condition the style input either on a
153
+ // -> wave file [path to wave] or
154
+ // -> dictionary using the style tokens {'token1': 'value', 'token2': 'value'} example {"0": 0.15, "1": 0.15, "5": -0.15}
155
+ // with the dictionary being len(dict) <= len(gst_style_tokens).
156
+ "gst_embedding_dim": 512,
157
+ "gst_num_heads": 4,
158
+ "gst_style_tokens": 10,
159
+ "gst_use_speaker_embedding": false
160
+ },
161
+
162
+ // DATASETS
163
+ "datasets": // List of datasets. They all merged and they get different speaker_ids.
164
+ [
165
+ {
166
+ "name": "ljspeech",
167
+ "path": "/content/LJSpeech-1.1/",
168
+ "meta_file_train": "metadata.csv", // for vtck if list, ignore speakers id in list for train, its useful for test cloning with new speakers
169
+ "meta_file_val": null
170
+ }
171
+ ]
172
+ }
173
+
models/tts_models--en--ljspeech--tacotron2-DDC/model_file.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d121d9068896e5e1427ffc6dd069f1785baf1ed0d63d5a3d6855c47aed340ad
3
+ size 112672337
models/tts_models--multilingual--multi-dataset--your_tts/config.json ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_path": "../checkpoints/VITS-multilingual/VITS_fixes/new/new-SE/use_noise_aument_false/xlarge-ZS-PT-VCTK/pt-en+LibriTTS-fr/speaker_encoder_as_loss_9_alpha/mixed-p-false-bug-SDP-fixed/",
3
+ "logger_uri": null,
4
+ "run_name": "vits_tts-portuguese",
5
+ "project_name": null,
6
+ "run_description": "",
7
+ "print_step": 25,
8
+ "plot_step": 100,
9
+ "model_param_stats": false,
10
+ "wandb_entity": null,
11
+ "dashboard_logger": "tensorboard",
12
+ "log_model_step": 10000,
13
+ "save_step": 10000,
14
+ "save_n_checkpoints": 5,
15
+ "save_checkpoints": true,
16
+ "save_all_best": false,
17
+ "save_best_after": 10000,
18
+ "target_loss": null,
19
+ "print_eval": true,
20
+ "test_delay_epochs": -1,
21
+ "run_eval": true,
22
+ "run_eval_steps": null,
23
+ "distributed_backend": "nccl",
24
+ "distributed_url": "tcp://localhost:54321",
25
+ "mixed_precision": false,
26
+ "epochs": 1000,
27
+ "batch_size": 52,
28
+ "eval_batch_size": 52,
29
+ "grad_clip": [
30
+ 5.0,
31
+ 5.0
32
+ ],
33
+ "scheduler_after_epoch": true,
34
+ "lr": 0.001,
35
+ "optimizer": "AdamW",
36
+ "optimizer_params": {
37
+ "betas": [
38
+ 0.8,
39
+ 0.99
40
+ ],
41
+ "eps": 1e-09,
42
+ "weight_decay": 0.01
43
+ },
44
+ "lr_scheduler": "",
45
+ "lr_scheduler_params": null,
46
+ "use_grad_scaler": false,
47
+ "cudnn_enable": true,
48
+ "cudnn_deterministic": false,
49
+ "cudnn_benchmark": true,
50
+ "training_seed": 54321,
51
+ "model": "vits",
52
+ "num_loader_workers": 4,
53
+ "num_eval_loader_workers": 4,
54
+ "use_noise_augment": false,
55
+ "audio": {
56
+ "fft_size": 1024,
57
+ "sample_rate": 16000,
58
+ "win_length": 1024,
59
+ "hop_length": 256,
60
+ "num_mels": 80,
61
+ "mel_fmin": 0,
62
+ "mel_fmax": null
63
+ },
64
+ "use_phonemes": false,
65
+ "phonemizer": null,
66
+ "phoneme_language": "pt-br",
67
+ "compute_input_seq_cache": false,
68
+ "text_cleaner": "multilingual_cleaners",
69
+ "enable_eos_bos_chars": false,
70
+ "test_sentences_file": "",
71
+ "phoneme_cache_path": null,
72
+ "characters": {
73
+ "characters_class": "TTS.tts.models.vits.VitsCharacters",
74
+ "vocab_dict": null,
75
+ "pad": "_",
76
+ "eos": "&",
77
+ "bos": "*",
78
+ "blank": null,
79
+ "characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\u00af\u00b7\u00df\u00e0\u00e1\u00e2\u00e3\u00e4\u00e6\u00e7\u00e8\u00e9\u00ea\u00eb\u00ec\u00ed\u00ee\u00ef\u00f1\u00f2\u00f3\u00f4\u00f5\u00f6\u00f9\u00fa\u00fb\u00fc\u00ff\u0101\u0105\u0107\u0113\u0119\u011b\u012b\u0131\u0142\u0144\u014d\u0151\u0153\u015b\u016b\u0171\u017a\u017c\u01ce\u01d0\u01d2\u01d4\u0430\u0431\u0432\u0433\u0434\u0435\u0436\u0437\u0438\u0439\u043a\u043b\u043c\u043d\u043e\u043f\u0440\u0441\u0442\u0443\u0444\u0445\u0446\u0447\u0448\u0449\u044a\u044b\u044c\u044d\u044e\u044f\u0451\u0454\u0456\u0457\u0491\u2013!'(),-.:;? ",
80
+ "punctuations": "!'(),-.:;? ",
81
+ "phonemes": "",
82
+ "is_unique": true,
83
+ "is_sorted": true
84
+ },
85
+ "add_blank": true,
86
+ "batch_group_size": 0,
87
+ "loss_masking": null,
88
+ "min_audio_len": 1,
89
+ "max_audio_len": Infinity,
90
+ "min_text_len": 1,
91
+ "max_text_len": Infinity,
92
+ "compute_f0": false,
93
+ "compute_energy": false,
94
+ "compute_linear_spec": true,
95
+ "precompute_num_workers": 0,
96
+ "start_by_longest": false,
97
+ "shuffle": false,
98
+ "drop_last": false,
99
+ "datasets": [
100
+ {
101
+ "formatter": "",
102
+ "dataset_name": "",
103
+ "path": "../../datasets/VCTK-Corpus-removed-silence_16Khz/",
104
+ "meta_file_train": null,
105
+ "ignored_speakers": null,
106
+ "language": "en",
107
+ "phonemizer": "",
108
+ "meta_file_val": null,
109
+ "meta_file_attn_mask": ""
110
+ },
111
+ {
112
+ "formatter": "",
113
+ "dataset_name": "",
114
+ "path": "../../datasets/LibriTTS/LibriTTS/dataset-preprocessed-clean-100-and-360/dataset-22k/",
115
+ "meta_file_train": "metadata_all.csv",
116
+ "ignored_speakers": null,
117
+ "language": "en",
118
+ "phonemizer": "",
119
+ "meta_file_val": "dev-clean_500.csv",
120
+ "meta_file_attn_mask": ""
121
+ },
122
+ {
123
+ "formatter": "",
124
+ "dataset_name": "",
125
+ "path": "../../datasets/TTS-Portuguese-Corpus_16khz/",
126
+ "meta_file_train": "train_TTS-Portuguese_Corpus_metadata.csv",
127
+ "ignored_speakers": null,
128
+ "language": "pt-br",
129
+ "phonemizer": "",
130
+ "meta_file_val": "eval_TTS-Portuguese_Corpus_metadata.csv",
131
+ "meta_file_attn_mask": ""
132
+ },
133
+ {
134
+ "formatter": "",
135
+ "dataset_name": "",
136
+ "path": "../../datasets/M-AILABS/fr_FR",
137
+ "meta_file_train": "",
138
+ "ignored_speakers": null,
139
+ "language": "fr-fr",
140
+ "phonemizer": "",
141
+ "meta_file_val": null,
142
+ "meta_file_attn_mask": null
143
+ }
144
+ ],
145
+ "test_sentences": [
146
+ [
147
+ "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.",
148
+ "VCTK_p225",
149
+ null,
150
+ "en"
151
+ ],
152
+ [
153
+ "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.",
154
+ "ED",
155
+ null,
156
+ "en"
157
+ ],
158
+ [
159
+ "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.",
160
+ "bernard",
161
+ null,
162
+ "en"
163
+ ],
164
+ [
165
+ "This cake is great. It's so delicious and moist.",
166
+ "VCTK_p234",
167
+ null,
168
+ "en"
169
+ ],
170
+ [
171
+ "This cake is great. It's so delicious and moist.",
172
+ "ED",
173
+ null,
174
+ "en"
175
+ ],
176
+ [
177
+ "This cake is great. It's so delicious and moist.",
178
+ "ezwa",
179
+ null,
180
+ "en"
181
+ ],
182
+ [
183
+ "Hoje \u00e9 fundamental encontrar a raz\u00e3o da exist\u00eancia humana.",
184
+ "ED",
185
+ null,
186
+ "pt-br"
187
+ ],
188
+ [
189
+ "Hoje \u00e9 fundamental encontrar a raz\u00e3o da exist\u00eancia humana.",
190
+ "VCTK_p238",
191
+ null,
192
+ "pt-br"
193
+ ],
194
+ [
195
+ "Hoje \u00e9 fundamental encontrar a raz\u00e3o da exist\u00eancia humana.",
196
+ "gilles_g_le_blanc",
197
+ null,
198
+ "pt-br"
199
+ ],
200
+ [
201
+ "Em muitas cidades a popula\u00e7\u00e3o est\u00e1 diminuindo.",
202
+ "ED",
203
+ null,
204
+ "pt-br"
205
+ ],
206
+ [
207
+ "Em muitas cidades a popula\u00e7\u00e3o est\u00e1 diminuindo.",
208
+ "VCTK_p245",
209
+ null,
210
+ "pt-br"
211
+ ],
212
+ [
213
+ "Em muitas cidades a popula\u00e7\u00e3o est\u00e1 diminuindo.",
214
+ "nadine_eckert_boulet",
215
+ null,
216
+ "pt-br"
217
+ ],
218
+ [
219
+ "Il m'a fallu beaucoup de temps pour d\u00e9velopper une voix, et maintenant que je l'ai, je ne vais pas me taire.",
220
+ "VCTK_p245",
221
+ null,
222
+ "fr-fr"
223
+ ],
224
+ [
225
+ "Il m'a fallu beaucoup de temps pour d\u00e9velopper une voix, et maintenant que je l'ai, je ne vais pas me taire.",
226
+ "ED",
227
+ null,
228
+ "fr-fr"
229
+ ],
230
+ [
231
+ "Il m'a fallu beaucoup de temps pour d\u00e9velopper une voix, et maintenant que je l'ai, je ne vais pas me taire.",
232
+ "ezwa",
233
+ null,
234
+ "fr-fr"
235
+ ],
236
+ [
237
+ "Il m'a fallu beaucoup de temps pour d\u00e9velopper une voix, et maintenant que je l'ai, je ne vais pas me taire.",
238
+ "bernard",
239
+ null,
240
+ "fr-fr"
241
+ ],
242
+ [
243
+ "Il m'a fallu beaucoup de temps pour d\u00e9velopper une voix, et maintenant que je l'ai, je ne vais pas me taire.",
244
+ "gilles_g_le_blanc",
245
+ null,
246
+ "fr-fr"
247
+ ],
248
+ [
249
+ "Il m'a fallu beaucoup de temps pour d\u00e9velopper une voix, et maintenant que je l'ai, je ne vais pas me taire.",
250
+ "nadine_eckert_boulet",
251
+ null,
252
+ "fr-fr"
253
+ ],
254
+ [
255
+ "Il m'a fallu beaucoup de temps pour d\u00e9velopper une voix, et maintenant que je l'ai, je ne vais pas me taire.",
256
+ "zeckou",
257
+ null,
258
+ "fr-fr"
259
+ ]
260
+ ],
261
+ "eval_split_max_size": null,
262
+ "eval_split_size": 0.01,
263
+ "use_speaker_weighted_sampler": false,
264
+ "speaker_weighted_sampler_alpha": 1.0,
265
+ "use_language_weighted_sampler": true,
266
+ "language_weighted_sampler_alpha": 1.0,
267
+ "use_length_weighted_sampler": false,
268
+ "length_weighted_sampler_alpha": 1.0,
269
+ "model_args": {
270
+ "num_chars": 165,
271
+ "out_channels": 513,
272
+ "spec_segment_size": 62,
273
+ "hidden_channels": 192,
274
+ "hidden_channels_ffn_text_encoder": 768,
275
+ "num_heads_text_encoder": 2,
276
+ "num_layers_text_encoder": 10,
277
+ "kernel_size_text_encoder": 3,
278
+ "dropout_p_text_encoder": 0.1,
279
+ "dropout_p_duration_predictor": 0.5,
280
+ "kernel_size_posterior_encoder": 5,
281
+ "dilation_rate_posterior_encoder": 1,
282
+ "num_layers_posterior_encoder": 16,
283
+ "kernel_size_flow": 5,
284
+ "dilation_rate_flow": 1,
285
+ "num_layers_flow": 4,
286
+ "resblock_type_decoder": "2",
287
+ "resblock_kernel_sizes_decoder": [
288
+ 3,
289
+ 7,
290
+ 11
291
+ ],
292
+ "resblock_dilation_sizes_decoder": [
293
+ [
294
+ 1,
295
+ 3,
296
+ 5
297
+ ],
298
+ [
299
+ 1,
300
+ 3,
301
+ 5
302
+ ],
303
+ [
304
+ 1,
305
+ 3,
306
+ 5
307
+ ]
308
+ ],
309
+ "upsample_rates_decoder": [
310
+ 8,
311
+ 8,
312
+ 2,
313
+ 2
314
+ ],
315
+ "upsample_initial_channel_decoder": 512,
316
+ "upsample_kernel_sizes_decoder": [
317
+ 16,
318
+ 16,
319
+ 4,
320
+ 4
321
+ ],
322
+ "periods_multi_period_discriminator": [
323
+ 2,
324
+ 3,
325
+ 5,
326
+ 7,
327
+ 11
328
+ ],
329
+ "use_sdp": true,
330
+ "noise_scale": 1.0,
331
+ "inference_noise_scale": 0.3,
332
+ "length_scale": 1.5,
333
+ "noise_scale_dp": 0.6,
334
+ "inference_noise_scale_dp": 0.3,
335
+ "max_inference_len": null,
336
+ "init_discriminator": true,
337
+ "use_spectral_norm_disriminator": false,
338
+ "use_speaker_embedding": false,
339
+ "num_speakers": 1244,
340
+ "speakers_file": null,
341
+ "d_vector_file": [
342
+ "C:/Users/Torch/AppData/Local\\tts\\tts_models--multilingual--multi-dataset--your_tts\\speakers.json"
343
+ ],
344
+ "speaker_embedding_channels": 512,
345
+ "use_d_vector_file": true,
346
+ "d_vector_dim": 512,
347
+ "detach_dp_input": true,
348
+ "use_language_embedding": true,
349
+ "embedded_language_dim": 4,
350
+ "num_languages": 3,
351
+ "language_ids_file": null,
352
+ "use_speaker_encoder_as_loss": true,
353
+ "speaker_encoder_config_path": "C:/Users/Torch/AppData/Local\\tts\\tts_models--multilingual--multi-dataset--your_tts\\config_se.json",
354
+ "speaker_encoder_model_path": "C:/Users/Torch/AppData/Local\\tts\\tts_models--multilingual--multi-dataset--your_tts\\model_se.pth",
355
+ "condition_dp_on_speaker": true,
356
+ "freeze_encoder": false,
357
+ "freeze_DP": false,
358
+ "freeze_PE": false,
359
+ "freeze_flow_decoder": false,
360
+ "freeze_waveform_decoder": false,
361
+ "encoder_sample_rate": null,
362
+ "interpolate_z": true,
363
+ "reinit_DP": false,
364
+ "reinit_text_encoder": false
365
+ },
366
+ "lr_gen": 0.0002,
367
+ "lr_disc": 0.0002,
368
+ "lr_scheduler_gen": "ExponentialLR",
369
+ "lr_scheduler_gen_params": {
370
+ "gamma": 0.999875,
371
+ "last_epoch": -1
372
+ },
373
+ "lr_scheduler_disc": "ExponentialLR",
374
+ "lr_scheduler_disc_params": {
375
+ "gamma": 0.999875,
376
+ "last_epoch": -1
377
+ },
378
+ "kl_loss_alpha": 1.0,
379
+ "disc_loss_alpha": 1.0,
380
+ "gen_loss_alpha": 1.0,
381
+ "feat_loss_alpha": 1.0,
382
+ "mel_loss_alpha": 45.0,
383
+ "dur_loss_alpha": 1.0,
384
+ "speaker_encoder_loss_alpha": 9.0,
385
+ "return_wav": true,
386
+ "use_weighted_sampler": false,
387
+ "weighted_sampler_attrs": {},
388
+ "weighted_sampler_multipliers": {},
389
+ "r": 1,
390
+ "num_speakers": 0,
391
+ "use_speaker_embedding": false,
392
+ "speakers_file": null,
393
+ "speaker_embedding_channels": 256,
394
+ "language_ids_file": null,
395
+ "use_language_embedding": false,
396
+ "use_d_vector_file": true,
397
+ "d_vector_file": [
398
+ "C:/Users/Torch/AppData/Local\\tts\\tts_models--multilingual--multi-dataset--your_tts\\speakers.json"
399
+ ],
400
+ "d_vector_dim": 512
401
+ }
models/tts_models--multilingual--multi-dataset--your_tts/config_se.json ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "speaker_encoder",
3
+ "run_name": "speaker_encoder",
4
+ "run_description": "resnet speaker encoder trained with commonvoice all languages dev and train, Voxceleb 1 dev and Voxceleb 2 dev",
5
+ "epochs": 100000,
6
+ "batch_size": null,
7
+ "eval_batch_size": null,
8
+ "mixed_precision": false,
9
+ "run_eval": true,
10
+ "test_delay_epochs": 0,
11
+ "print_eval": false,
12
+ "print_step": 50,
13
+ "tb_plot_step": 100,
14
+ "tb_model_param_stats": false,
15
+ "save_step": 1000,
16
+ "checkpoint": true,
17
+ "keep_all_best": false,
18
+ "keep_after": 10000,
19
+ "num_loader_workers": 8,
20
+ "num_val_loader_workers": 0,
21
+ "use_noise_augment": false,
22
+ "output_path": "../checkpoints/speaker_encoder/language_balanced/normalized/angleproto-4-samples-by-speakers/",
23
+ "distributed_backend": "nccl",
24
+ "distributed_url": "tcp://localhost:54321",
25
+ "audio": {
26
+ "fft_size": 512,
27
+ "win_length": 400,
28
+ "hop_length": 160,
29
+ "frame_shift_ms": null,
30
+ "frame_length_ms": null,
31
+ "stft_pad_mode": "reflect",
32
+ "sample_rate": 16000,
33
+ "resample": false,
34
+ "preemphasis": 0.97,
35
+ "ref_level_db": 20,
36
+ "do_sound_norm": false,
37
+ "do_trim_silence": false,
38
+ "trim_db": 60,
39
+ "power": 1.5,
40
+ "griffin_lim_iters": 60,
41
+ "num_mels": 64,
42
+ "mel_fmin": 0.0,
43
+ "mel_fmax": 8000.0,
44
+ "spec_gain": 20,
45
+ "signal_norm": false,
46
+ "min_level_db": -100,
47
+ "symmetric_norm": false,
48
+ "max_norm": 4.0,
49
+ "clip_norm": false,
50
+ "stats_path": null,
51
+ "do_rms_norm": true,
52
+ "db_level": -27.0
53
+ },
54
+ "datasets": [
55
+ {
56
+ "name": "voxceleb2",
57
+ "path": "/workspace/scratch/ecasanova/datasets/VoxCeleb/vox2_dev_aac/",
58
+ "meta_file_train": null,
59
+ "ununsed_speakers": null,
60
+ "meta_file_val": null,
61
+ "meta_file_attn_mask": "",
62
+ "language": "voxceleb"
63
+ }
64
+ ],
65
+ "model_params": {
66
+ "model_name": "resnet",
67
+ "input_dim": 64,
68
+ "use_torch_spec": true,
69
+ "log_input": true,
70
+ "proj_dim": 512
71
+ },
72
+ "audio_augmentation": {
73
+ "p": 0.5,
74
+ "rir": {
75
+ "rir_path": "/workspace/store/ecasanova/ComParE/RIRS_NOISES/simulated_rirs/",
76
+ "conv_mode": "full"
77
+ },
78
+ "additive": {
79
+ "sounds_path": "/workspace/store/ecasanova/ComParE/musan/",
80
+ "speech": {
81
+ "min_snr_in_db": 13,
82
+ "max_snr_in_db": 20,
83
+ "min_num_noises": 1,
84
+ "max_num_noises": 1
85
+ },
86
+ "noise": {
87
+ "min_snr_in_db": 0,
88
+ "max_snr_in_db": 15,
89
+ "min_num_noises": 1,
90
+ "max_num_noises": 1
91
+ },
92
+ "music": {
93
+ "min_snr_in_db": 5,
94
+ "max_snr_in_db": 15,
95
+ "min_num_noises": 1,
96
+ "max_num_noises": 1
97
+ }
98
+ },
99
+ "gaussian": {
100
+ "p": 0.0,
101
+ "min_amplitude": 0.0,
102
+ "max_amplitude": 1e-05
103
+ }
104
+ },
105
+ "storage": {
106
+ "sample_from_storage_p": 0.5,
107
+ "storage_size": 40
108
+ },
109
+ "max_train_step": 1000000,
110
+ "loss": "angleproto",
111
+ "grad_clip": 3.0,
112
+ "lr": 0.0001,
113
+ "lr_decay": false,
114
+ "warmup_steps": 4000,
115
+ "wd": 1e-06,
116
+ "steps_plot_stats": 100,
117
+ "num_speakers_in_batch": 100,
118
+ "num_utters_per_speaker": 4,
119
+ "skip_speakers": true,
120
+ "voice_len": 2.0
121
+ }
models/tts_models--multilingual--multi-dataset--your_tts/language_ids.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "en": 0,
3
+ "fr-fr": 1,
4
+ "pt-br": 2
5
+ }
models/tts_models--multilingual--multi-dataset--your_tts/model_file.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:017bfd8907c80bb5857d65d0223f0e4e4b9d699ef52e2a853d9cc7eb7e308cf0
3
+ size 379957289
models/tts_models--multilingual--multi-dataset--your_tts/model_se.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f96efb20cbeeefd81fd8336d7f0155bf8902f82f9474e58ccb19d9e12345172
3
+ size 44610930
models/tts_models--multilingual--multi-dataset--your_tts/speakers.json ADDED
The diff for this file is too large to render. See raw diff
 
models/voice_conversion_models--multilingual--vctk--freevc24/._config.json ADDED
Binary file (386 Bytes). View file
 
models/voice_conversion_models--multilingual--vctk--freevc24/._model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fa468ed77a9726751b4d321242e069c77dbcd8ecb2e30a212dc0f38f69b852a
3
+ size 230
models/voice_conversion_models--multilingual--vctk--freevc24/._voice_conversion_models--multilingual--vctk--freevc24 ADDED
Binary file (330 Bytes). View file
 
models/voice_conversion_models--multilingual--vctk--freevc24/__MACOSX/._voice_conversion_models--multilingual--vctk--freevc24 ADDED
Binary file (330 Bytes). View file
 
models/voice_conversion_models--multilingual--vctk--freevc24/__MACOSX/voice_conversion_models--multilingual--vctk--freevc24/._config.json ADDED
Binary file (386 Bytes). View file
 
models/voice_conversion_models--multilingual--vctk--freevc24/__MACOSX/voice_conversion_models--multilingual--vctk--freevc24/._model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fa468ed77a9726751b4d321242e069c77dbcd8ecb2e30a212dc0f38f69b852a
3
+ size 230
models/voice_conversion_models--multilingual--vctk--freevc24/config.json ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_path": "output",
3
+ "logger_uri": null,
4
+ "run_name": "run",
5
+ "project_name": null,
6
+ "run_description": "\ud83d\udc38Coqui trainer run.",
7
+ "print_step": 25,
8
+ "plot_step": 100,
9
+ "model_param_stats": false,
10
+ "wandb_entity": null,
11
+ "dashboard_logger": "tensorboard",
12
+ "log_model_step": null,
13
+ "save_step": 10000,
14
+ "save_n_checkpoints": 5,
15
+ "save_checkpoints": true,
16
+ "save_all_best": false,
17
+ "save_best_after": 10000,
18
+ "target_loss": null,
19
+ "print_eval": false,
20
+ "test_delay_epochs": 0,
21
+ "run_eval": true,
22
+ "run_eval_steps": null,
23
+ "distributed_backend": "nccl",
24
+ "distributed_url": "tcp://localhost:54321",
25
+ "mixed_precision": false,
26
+ "epochs": 1000,
27
+ "batch_size": 32,
28
+ "eval_batch_size": 16,
29
+ "grad_clip": [
30
+ 1000,
31
+ 1000
32
+ ],
33
+ "scheduler_after_epoch": true,
34
+ "lr": 0.001,
35
+ "optimizer": "AdamW",
36
+ "optimizer_params": {
37
+ "betas": [
38
+ 0.8,
39
+ 0.99
40
+ ],
41
+ "eps": 1e-09,
42
+ "weight_decay": 0.01
43
+ },
44
+ "lr_scheduler": null,
45
+ "lr_scheduler_params": {},
46
+ "use_grad_scaler": false,
47
+ "cudnn_enable": true,
48
+ "cudnn_deterministic": false,
49
+ "cudnn_benchmark": false,
50
+ "training_seed": 54321,
51
+ "model": "freevc",
52
+ "num_loader_workers": 0,
53
+ "num_eval_loader_workers": 0,
54
+ "use_noise_augment": false,
55
+ "audio": {
56
+ "max_wav_value": 32768.0,
57
+ "input_sample_rate": 16000,
58
+ "output_sample_rate": 24000,
59
+ "filter_length": 1280,
60
+ "hop_length": 320,
61
+ "win_length": 1280,
62
+ "n_mel_channels": 80,
63
+ "mel_fmin": 0.0,
64
+ "mel_fmax": null
65
+ },
66
+ "batch_group_size": 0,
67
+ "loss_masking": null,
68
+ "min_audio_len": 1,
69
+ "max_audio_len": Infinity,
70
+ "min_text_len": 1,
71
+ "max_text_len": Infinity,
72
+ "compute_f0": false,
73
+ "compute_energy": false,
74
+ "compute_linear_spec": true,
75
+ "precompute_num_workers": 0,
76
+ "start_by_longest": false,
77
+ "shuffle": false,
78
+ "drop_last": false,
79
+ "datasets": [
80
+ {
81
+ "formatter": "",
82
+ "dataset_name": "",
83
+ "path": "",
84
+ "meta_file_train": "",
85
+ "ignored_speakers": null,
86
+ "language": "",
87
+ "phonemizer": "",
88
+ "meta_file_val": "",
89
+ "meta_file_attn_mask": ""
90
+ }
91
+ ],
92
+ "test_sentences": [
93
+ [
94
+ "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent."
95
+ ],
96
+ [
97
+ "Be a voice, not an echo."
98
+ ],
99
+ [
100
+ "I'm sorry Dave. I'm afraid I can't do that."
101
+ ],
102
+ [
103
+ "This cake is great. It's so delicious and moist."
104
+ ],
105
+ [
106
+ "Prior to November 22, 1963."
107
+ ]
108
+ ],
109
+ "eval_split_max_size": null,
110
+ "eval_split_size": 0.01,
111
+ "use_speaker_weighted_sampler": false,
112
+ "speaker_weighted_sampler_alpha": 1.0,
113
+ "use_language_weighted_sampler": false,
114
+ "language_weighted_sampler_alpha": 1.0,
115
+ "use_length_weighted_sampler": false,
116
+ "length_weighted_sampler_alpha": 1.0,
117
+ "model_args": {
118
+ "spec_channels": 641,
119
+ "inter_channels": 192,
120
+ "hidden_channels": 192,
121
+ "filter_channels": 768,
122
+ "n_heads": 2,
123
+ "n_layers": 6,
124
+ "kernel_size": 3,
125
+ "p_dropout": 0.1,
126
+ "resblock": "1",
127
+ "resblock_kernel_sizes": [
128
+ 3,
129
+ 7,
130
+ 11
131
+ ],
132
+ "resblock_dilation_sizes": [
133
+ [
134
+ 1,
135
+ 3,
136
+ 5
137
+ ],
138
+ [
139
+ 1,
140
+ 3,
141
+ 5
142
+ ],
143
+ [
144
+ 1,
145
+ 3,
146
+ 5
147
+ ]
148
+ ],
149
+ "upsample_rates": [
150
+ 10,
151
+ 6,
152
+ 4,
153
+ 2
154
+ ],
155
+ "upsample_initial_channel": 512,
156
+ "upsample_kernel_sizes": [
157
+ 16,
158
+ 16,
159
+ 4,
160
+ 4
161
+ ],
162
+ "n_layers_q": 3,
163
+ "use_spectral_norm": false,
164
+ "gin_channels": 256,
165
+ "ssl_dim": 1024,
166
+ "use_spk": true,
167
+ "num_spks": 0,
168
+ "segment_size": 8960
169
+ },
170
+ "lr_gen": 0.0002,
171
+ "lr_disc": 0.0002,
172
+ "lr_scheduler_gen": "ExponentialLR",
173
+ "lr_scheduler_gen_params": {
174
+ "gamma": 0.999875,
175
+ "last_epoch": -1
176
+ },
177
+ "lr_scheduler_disc": "ExponentialLR",
178
+ "lr_scheduler_disc_params": {
179
+ "gamma": 0.999875,
180
+ "last_epoch": -1
181
+ },
182
+ "kl_loss_alpha": 1.0,
183
+ "disc_loss_alpha": 1.0,
184
+ "gen_loss_alpha": 1.0,
185
+ "feat_loss_alpha": 1.0,
186
+ "mel_loss_alpha": 45.0,
187
+ "dur_loss_alpha": 1.0,
188
+ "speaker_encoder_loss_alpha": 1.0,
189
+ "return_wav": true,
190
+ "use_weighted_sampler": false,
191
+ "weighted_sampler_attrs": {},
192
+ "weighted_sampler_multipliers": {},
193
+ "r": 1,
194
+ "add_blank": true,
195
+ "num_speakers": 0,
196
+ "use_speaker_embedding": false,
197
+ "speakers_file": null,
198
+ "speaker_embedding_channels": 256,
199
+ "language_ids_file": null,
200
+ "use_language_embedding": false,
201
+ "use_d_vector_file": false,
202
+ "d_vector_file": null,
203
+ "d_vector_dim": null
204
+ }
models/voice_conversion_models--multilingual--vctk--freevc24/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18d4ce44e7c803d675be1984b174e0f7bf05ce937419f19a818877e83f197007
3
+ size 1425242419