File size: 2,487 Bytes
a152706
 
200e5f9
978608b
 
a152706
dc4f75a
 
 
a152706
 
 
200e5f9
 
 
 
 
dc4f75a
200e5f9
dc4f75a
 
 
 
200e5f9
 
 
a152706
 
 
 
 
dc4f75a
a152706
 
 
 
200e5f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import gradio as gr
import torch
import soundfile as sf
from speechbrain.inference.TTS import Tacotron2
from speechbrain.inference.vocoders import HIFIGAN

# モデルのロード
hifi_gan = HIFIGAN.from_hparams(source="speechbrain/tts-hifigan-ljspeech", savedir="tmpdir_vocoder")
tacotron2 = Tacotron2.from_hparams(source="speechbrain/tts-tacotron2-ljspeech", savedir="tmpdir_tts")

# 推論関数の定義
def synthesize_speech(text):
    # テキストをトークンIDに変換
    tokenized = tacotron2.tokenizer(text, phonemize=False)
    # トークンIDをLong型のテンソルに変換
    tokens = torch.LongTensor(tokenized)

    # Tacotron2でmel spectrogramを生成
    mel_output, mel_length, alignment = tacotron2.encode_batch(tokens)

    # HiFi-GANでmel spectrogramから音声を生成
    waveforms = hifi_gan.decode_batch(mel_output)

    # 音声を .wav 形式で保存
    sf.write("speech.wav", waveforms.squeeze().cpu().numpy(), samplerate=hifi_gan.hparams.sample_rate)
    return "speech.wav"

# Gradioインターフェースの作成
iface = gr.Interface(
    fn=synthesize_speech,
    inputs=gr.Textbox(lines=5, label="Input Text"),
    outputs=gr.Audio(label="Output Audio", type="filepath"),
    title="TTS Demo",
    description="Enter text to synthesize speech."
)

iface.launch()

# import gradio as gr
# import torch
# from speechbrain.inference.TTS import Tacotron2
# from speechbrain.inference.vocoders import HIFIGAN

# # モデルのロード
# hifi_gan = HIFIGAN.from_hparams(source="speechbrain/tts-hifigan-ljspeech", savedir="tmpdir_vocoder")
# tacotron2 = Tacotron2.from_hparams(source="speechbrain/tts-tacotron2-ljspeech", savedir="tmpdir_tts")

# # 推論関数の定義
# def synthesize_speech(text):
#     # Tacotron2でmel spectrogramを生成
#     # テキストを直接入力として、LongTensorでラップする
#     mel_output, mel_length, alignment = tacotron2.encode_batch([text])

#     # HiFi-GANでmel spectrogramから音声を生成
#     waveforms = hifi_gan.decode_batch(mel_output)

#     # torch tensorをwavfileとして保存
#     torch.save(waveforms, "speech.pt")
#     return "speech.pt"

# # Gradioインターフェースの作成
# iface = gr.Interface(
#     fn=synthesize_speech,
#     inputs=gr.Textbox(lines=5, label="Input Text"),
#     outputs=gr.Audio(label="Output Audio", type="filepath"),
#     title="TTS Demo",
#     description="Enter text to synthesize speech."
# )

# iface.launch()