|
import os |
|
|
|
os.system('mkdir /home/user/app/monotonic_align/monotonic_align') |
|
os.system('cd monotonic_align && python setup.py build_ext --inplace') |
|
|
|
|
|
os.system("gdown 'https://drive.google.com/uc?id=1q86w74Ygw2hNzYP9cWkeClGT5X25PvBT'") |
|
|
|
|
|
import matplotlib.pyplot as plt |
|
|
|
import os |
|
import json |
|
import math |
|
import torch |
|
from torch import nn |
|
from torch.nn import functional as F |
|
from torch.utils.data import DataLoader |
|
|
|
import commons |
|
import utils |
|
from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate |
|
from models import SynthesizerTrn |
|
from text.symbols import symbols |
|
from text import text_to_sequence |
|
|
|
from scipy.io.wavfile import write |
|
|
|
import streamlit as st |
|
|
|
def get_vi_audio(text): |
|
os.system(f"echo {text} | piper --model vi_VN-vivos-x_low --output_file vi_output.wav") |
|
|
|
|
|
def get_text(text, hps): |
|
text_norm = text_to_sequence(text, hps.data.text_cleaners) |
|
if hps.data.add_blank: |
|
text_norm = commons.intersperse(text_norm, 0) |
|
text_norm = torch.LongTensor(text_norm) |
|
return text_norm |
|
|
|
|
|
hps = utils.get_hparams_from_file("./configs/ljs_base.json") |
|
|
|
|
|
net_g = SynthesizerTrn( |
|
len(symbols), |
|
hps.data.filter_length // 2 + 1, |
|
hps.train.segment_size // hps.data.hop_length, |
|
**hps.model) |
|
_ = net_g.eval() |
|
|
|
_ = utils.load_checkpoint("pretrained_ljs.pth", net_g, None) |
|
|
|
|
|
|
|
|
|
|
|
st.title("VITS Text-to-Speech Demo") |
|
|
|
|
|
text_input = st.text_input("Enter text to convert to speech", value="Chào mừng các bạn đến với môn Xử lí tiếng nói") |
|
|
|
|
|
|
|
stn_tst = get_text(text_input, hps) |
|
|
|
with torch.no_grad(): |
|
x_tst = stn_tst.unsqueeze(0) |
|
x_tst_lengths = torch.LongTensor([stn_tst.size(0)]) |
|
audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.float().numpy() |
|
|
|
|
|
st.text("Before Fine-tuned:") |
|
st.audio(audio, format="audio/wav", sample_rate=hps.data.sampling_rate) |
|
|
|
get_vi_audio(text_input) |
|
|
|
st.text("After Fine-tuned:") |
|
st.audio("vi_output.wav", format="audio/wav") |
|
|
|
|
|
|
|
if st.button("Generate Speech"): |
|
|
|
stn_tst = get_text(text_input, hps) |
|
|
|
with torch.no_grad(): |
|
x_tst = stn_tst.unsqueeze(0) |
|
x_tst_lengths = torch.LongTensor([stn_tst.size(0)]) |
|
audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.float().numpy() |
|
|
|
|
|
st.text("Before Fine-tuned:") |
|
st.audio(audio, format="audio/wav", sample_rate=hps.data.sampling_rate) |
|
|
|
get_vi_audio(text_input) |
|
|
|
st.text("After Fine-tuned:") |
|
st.audio("vi_output.wav", format="audio/wav") |