File size: 15,695 Bytes
ef51ddd
a8b6e59
7833553
ef51ddd
a8b6e59
 
fe1810a
b71a9e5
7833553
1b8a47d
ef51ddd
 
019d245
 
a8b6e59
2211110
5858004
cd4de4c
 
2211110
ef51ddd
a8b6e59
c4814b5
0d09f4a
2211110
 
 
0d09f4a
 
 
c4814b5
a8b6e59
1b8a47d
a8b6e59
 
ea40be6
a8b6e59
 
ef51ddd
 
c5bcdb3
 
 
 
 
 
 
 
 
 
ef51ddd
1b8a47d
ef51ddd
 
9e56b98
019d245
 
7833553
9e56b98
b91fc6b
7c3ad3d
 
264de1a
 
 
7c3ad3d
 
264de1a
 
 
 
 
7c3ad3d
 
 
 
 
264de1a
 
 
7c3ad3d
 
 
 
 
 
 
 
 
 
1b8a47d
9e56b98
019d245
2211110
 
 
 
 
 
 
 
 
 
 
fe1810a
2211110
 
 
 
fe1810a
2211110
021306c
a3571c2
 
 
 
019d245
 
9b896b6
019d245
7f58b81
019d245
 
a3571c2
019d245
021306c
7833553
 
 
c5bcdb3
d0509e1
 
 
 
 
 
1b8a47d
d0509e1
 
 
 
ac0d0ca
7833553
 
9e56b98
4199235
a8b76d6
c3abe72
 
 
 
 
 
 
 
7f58b81
 
 
 
 
 
 
 
 
 
 
 
b71a9e5
7f58b81
 
 
 
 
 
 
7f3b65a
a8b76d6
7f58b81
 
 
 
 
 
 
 
c3abe72
f0a0056
a8b76d6
c3abe72
 
 
 
 
 
 
 
7f58b81
 
 
 
 
 
 
 
 
 
 
 
 
b71a9e5
7f58b81
 
 
 
 
 
 
7f3b65a
a8b76d6
7f58b81
 
 
 
 
 
9e56b98
a8b76d6
c3abe72
 
a8b76d6
c3abe72
a8b76d6
4199235
 
2483f92
7f58b81
2483f92
 
 
7f58b81
 
 
 
 
 
2dc5e25
7f58b81
 
 
 
 
48b5c5f
2dc5e25
48b5c5f
 
 
7f58b81
 
2483f92
7f58b81
 
 
 
 
2483f92
 
fbecde0
2483f92
7f58b81
2483f92
 
 
7f58b81
 
 
 
 
 
 
2dc5e25
0ae11fc
7f58b81
 
 
 
 
48b5c5f
2dc5e25
48b5c5f
 
 
7f58b81
0ae11fc
 
 
 
 
 
7f58b81
 
021306c
a3571c2
2483f92
 
 
 
 
 
7f58b81
e792db9
 
a3571c2
ef51ddd
38ee90c
 
c3c795c
 
 
 
38ee90c
 
 
021306c
7f58b81
a3571c2
2feb059
7207e01
9e56b98
 
2feb059
9e56b98
697effb
 
5ace2c9
 
2d01cbb
697effb
 
 
30ccf3c
697effb
9e56b98
5ace2c9
 
697effb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a8b76d6
697effb
 
 
 
7f58b81
697effb
 
 
9e56b98
a8b6e59
021306c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
import os
import re
import tempfile

import torch
import gradio as gr
from faster_whisper import BatchedInferencePipeline, WhisperModel
from pydub import AudioSegment, effects
from pyannote.audio import Pipeline as DiarizationPipeline
import opencc

import spaces  # zeroGPU support
from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess

from termcolor import cprint
import time
import torchaudio
from pyannote.audio.pipelines.utils.hook import ProgressHook

# —————— Model Lists ——————
WHISPER_MODELS = [
    "deepdml/faster-whisper-large-v3-turbo-ct2",
    "guillaumekln/faster-whisper-tiny",
    "Systran/faster-whisper-large-v3",
    "XA9/Belle-faster-whisper-large-v3-zh-punct",
    "asadfgglie/faster-whisper-large-v3-zh-TW",
    "guillaumekln/faster-whisper-medium",
    "guillaumekln/faster-whisper-small",
    "guillaumekln/faster-whisper-base",
    "Luigi/whisper-small-zh_tw-ct2",
]

SENSEVOICE_MODELS = [
    "FunAudioLLM/SenseVoiceSmall",
    "funasr/paraformer-zh",
]

# —————— Language Options ——————
WHISPER_LANGUAGES = [
    "auto", "af","am","ar","as","az","ba","be","bg","bn","bo",
    "br","bs","ca","cs","cy","da","de","el","en","es","et",
    "eu","fa","fi","fo","fr","gl","gu","ha","haw","he","hi",
    "hr","ht","hu","hy","id","is","it","ja","jw","ka","kk",
    "km","kn","ko","la","lb","ln","lo","lt","lv","mg","mi",
    "mk","ml","mn","mr","ms","mt","my","ne","nl","nn","no",
    "oc","pa","pl","ps","pt","ro","ru","sa","sd","si","sk",
    "sl","sn","so","sq","sr","su","sv","sw","ta","te","tg",
    "th","tk","tl","tr","tt","uk","ur","uz","vi","yi","yo",
    "zh","yue"
]

SENSEVOICE_LANGUAGES = ["auto", "zh", "yue", "en", "ja", "ko", "nospeech"]

# —————— Caches ——————
whisper_pipes = {}
sense_models = {}
dar_pipe = None

converter = opencc.OpenCC('s2t')

# —————— Diarization Formatter ——————
def format_diarization_html(snippets):
    palette = ["#e74c3c", "#3498db", "#27ae60", "#e67e22", "#9b59b6", "#16a085", "#f1c40f"]
    speaker_colors = {}
    html_lines = []
    last_spk = None
    for s in snippets:
        if s.startswith("[") and "]" in s:
            spk, txt = s[1:].split("]", 1)
            spk, txt = spk.strip(), txt.strip()
        else:
            spk, txt = "", s.strip()
        # hide empty lines
        if not txt:
            continue
        # assign color if new speaker
        if spk not in speaker_colors:
            speaker_colors[spk] = palette[len(speaker_colors) % len(palette)]
        color = speaker_colors[spk]
        # simplify tag for same speaker
        if spk == last_spk:
            display = txt
        else:
            display = f"<strong>{spk}:</strong> {txt}"
        last_spk = spk
        html_lines.append(
            f"<p style='margin:4px 0; font-family:monospace; color:{color};'>{display}</p>"
        )
    return "<div>" + "".join(html_lines) + "</div>"

# —————— Helpers ——————

# —————— Faster-Whisper Cache & Factory ——————
_fwhisper_models: dict[tuple[str, str], WhisperModel] = {}

def get_fwhisper_model(model_id: str, device: str) -> WhisperModel:
    """
    Lazily load and cache WhisperModel(model_id) on 'cpu' or 'cuda:0'.
    Uses float16 on GPU and int8 on CPU for speed.
    """
    key = (model_id, device)
    if key not in _fwhisper_models:
        compute_type = "float16" if device.startswith("cuda") else "int8"
        model = WhisperModel(
            model_id,
            device=device,
            compute_type=compute_type,
        )
        _fwhisper_models[key] = BatchedInferencePipeline(model=model)
    return _fwhisper_models[key]

def get_sense_model(model_id: str, device_str: str):
    key = (model_id, device_str)
    if key not in sense_models:
        sense_models[key] = AutoModel(
            model=model_id,
            vad_model="fsmn-vad",
            vad_kwargs={"max_single_segment_time": 300000},
            device=device_str,
            ban_emo_unk=False,
            hub="hf",
        )
    return sense_models[key]


def get_diarization_pipe():
    global dar_pipe
    if dar_pipe is None:
        token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_TOKEN")
        try:
            dar_pipe = DiarizationPipeline.from_pretrained(
                "pyannote/speaker-diarization-3.1",
                use_auth_token=token or True
            )
        except Exception as e:
            print(f"Failed to load pyannote/speaker-diarization-3.1: {e}\nFalling back to pyannote/speaker-diarization@2.1.")
            dar_pipe = DiarizationPipeline.from_pretrained(
                "pyannote/speaker-diarization@2.1",
                use_auth_token=token or True
            )

    return dar_pipe


# —————— Whisper Transcription ——————
def _transcribe_fwhisper_cpu_stream(model_id, language, audio_path, whisper_multilingual_en):
    """
    Generator-based streaming transcription with accumulation using Faster-Whisper on CPU.
    Yields (accumulated_text, diar_html) tuples for Gradio streaming.
    """
    pipe = get_fwhisper_model(model_id, "cpu")
    cprint('Whisper (faster-whisper) using CPU [stream]', 'red')

    # Diarization branch: accumulate snippets and yield full HTML each turn
    diarizer = get_diarization_pipe()
    waveform, sample_rate = torchaudio.load(audio_path)
    diarizer.to(torch.device('cpu'))
    with ProgressHook() as hook:
        diary = diarizer({"waveform": waveform, "sample_rate": sample_rate}, hook=hook)
    snippets = []
    for turn, _, speaker in diary.itertracks(yield_label=True):
        # extract segment
        start_ms = int(turn.start * 1000)
        end_ms = int(turn.end * 1000)
        segment = AudioSegment.from_file(audio_path)[start_ms:end_ms]
        with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
            segment = effects.normalize(segment)
            segment.export(tmp.name, format="wav")
            segments, _ = pipe.transcribe(
                tmp.name,
                beam_size=3,
                best_of=3,
                language=None if language == "auto" else language,
                vad_filter=True,
                batch_size=16,
                multilingual=whisper_multilingual_en,
            )
        os.unlink(tmp.name)
        text = converter.convert("".join(s.text for s in segments).strip())
        snippets.append(f"[{speaker}] {text}")
        # yield accumulated diarization HTML
        yield "", format_diarization_html(snippets)
    return


@spaces.GPU
def _transcribe_fwhisper_gpu_stream(model_id, language, audio_path, whisper_multilingual_en):
    """
    Generator-based streaming transcription with accumulation using Faster-Whisper on CUDA.
    Yields (accumulated_text, diar_html) tuples for Gradio streaming.
    """
    pipe = get_fwhisper_model(model_id, "cuda")
    cprint('Whisper (faster-whisper) using CUDA [stream]', 'green')

    # Diarization branch: accumulate snippets and yield full HTML each turn
    diarizer = get_diarization_pipe()
    device = torch.device('cuda')
    diarizer.to(device)
    waveform, sample_rate = torchaudio.load(audio_path)
    waveform = waveform.to(device)
    with ProgressHook() as hook:
        diary = diarizer({"waveform": waveform, "sample_rate": sample_rate}, hook=hook)
    snippets = []
    for turn, _, speaker in diary.itertracks(yield_label=True):
        start_ms = int(turn.start * 1000)
        end_ms = int(turn.end * 1000)
        segment = AudioSegment.from_file(audio_path)[start_ms:end_ms]
        with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
            segment = effects.normalize(segment)
            segment.export(tmp.name, format="wav")
            segments, _ = pipe.transcribe(
                tmp.name,
                beam_size=3,
                best_of=3,
                language=None if language == "auto" else language,
                vad_filter=True,
                batch_size=16,
                multilingual=whisper_multilingual_en,
            )
        os.unlink(tmp.name)
        text = converter.convert("".join(s.text for s in segments).strip())
        snippets.append(f"[{speaker}] {text}")
        yield "", format_diarization_html(snippets)
    return

def transcribe_fwhisper_stream(model_id, language, audio_path, device_sel, whisper_multilingual_en):
    """Dispatch to CPU or GPU streaming generators, preserving two-value yields."""
    if device_sel == "GPU" and torch.cuda.is_available():
        yield from _transcribe_fwhisper_gpu_stream(model_id, language, audio_path, whisper_multilingual_en)
    else:
        yield from _transcribe_fwhisper_cpu_stream(model_id, language, audio_path, whisper_multilingual_en)

# —————— SenseVoice Transcription ——————
def _transcribe_sense_cpu_stream(model_id: str, language: str, audio_path: str,
                                 enable_punct: bool):
    model = get_sense_model(model_id, "cpu")
    cprint('SenseVoiceSmall using CPU [stream]', 'red')

    diarizer = get_diarization_pipe()
    diarizer.to(torch.device('cpu'))
    waveform, sample_rate = torchaudio.load(audio_path)
    with ProgressHook() as hook:
        diary = diarizer({"waveform": waveform, "sample_rate": sample_rate}, hook=hook)
    snippets = []
    cache={}
    for turn, _, speaker in diary.itertracks(yield_label=True):
        start_ms, end_ms = int(turn.start*1000), int(turn.end*1000)
        segment = AudioSegment.from_file(audio_path)[start_ms:end_ms]
        with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
            segment.export(tmp.name, format="wav")
            try:
                segs = model.generate(input=tmp.name, cache=cache, language=language,
                                        use_itn=enable_punct, batch_size_s=300)
            except Exception as e:
                cprint(f'Error: {e}','red')
        os.unlink(tmp.name)
        txt = rich_transcription_postprocess(segs[0]['text'])
        if not enable_punct:
            txt = re.sub(r"[^\w\s]", "", txt)
        txt = converter.convert(txt)
        snippets.append(f"[{speaker}] {txt}")
        yield "", format_diarization_html(snippets)
    return


@spaces.GPU(duration=120)
def _transcribe_sense_gpu_stream(model_id: str, language: str, audio_path: str,
                                 enable_punct: bool):
    model = get_sense_model(model_id, "cuda:0")
    cprint('SenseVoiceSmall using CUDA [stream]', 'green')

    diarizer = get_diarization_pipe()
    diarizer.to(torch.device('cuda'))
    waveform, sample_rate = torchaudio.load(audio_path)
    waveform = waveform.to(torch.device('cuda'))
    with ProgressHook() as hook:
        diary = diarizer({"waveform": waveform, "sample_rate": sample_rate}, hook=hook)
    snippets = []
    cache = {}
    segs = None
    for turn, _, speaker in diary.itertracks(yield_label=True):
        start_ms, end_ms = int(turn.start*1000), int(turn.end*1000)
        segment = AudioSegment.from_file(audio_path)[start_ms:end_ms]
        with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
            segment.export(tmp.name, format="wav")
            try:
                segs = model.generate(input=tmp.name, cache=cache, language=language,
                                        use_itn=enable_punct, batch_size_s=300)
            except Exception as e:
                cprint(f'Error: {e}','red')
        os.unlink(tmp.name)
        if segs:
            txt = rich_transcription_postprocess(segs[0]['text'])
            if not enable_punct:
                txt = re.sub(r"[^\w\s]", "", txt)
            txt = converter.convert(txt)
            snippets.append(f"[{speaker}] {txt}")
        yield "", format_diarization_html(snippets)
    return


def transcribe_sense_steam(model_id: str,
                     language: str,
                     audio_path: str,
                     enable_punct: bool,
                     device_sel: str):
    if device_sel == "GPU" and torch.cuda.is_available():
        yield from _transcribe_sense_gpu_stream(model_id, language, audio_path, enable_punct)
    else:
        yield from _transcribe_sense_cpu_stream(model_id, language, audio_path, enable_punct)

# —————— Gradio UI ——————
DEMO_CSS = """
.diar {
    padding: 0.5rem;
    color: #f1f1f1;
    font-family: monospace;
    font-size: 0.9rem;
}
"""
Demo = gr.Blocks(css=DEMO_CSS)
with Demo:
    gr.Markdown("## Faster-Whisper vs. SenseVoice")
    audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Audio Input")
    examples = gr.Examples(
        examples=[["interview.mp3"], ["news.mp3"], ["meeting.mp3"]],
        inputs=[audio_input],
        label="Example Audio Files"
    )

    # ────────────────────────────────────────────────────────────────
    # 1) CONTROL PANELS (still side-by-side)
    with gr.Row():
        with gr.Column():
            gr.Markdown("### Faster-Whisper ASR")
            whisper_dd      = gr.Dropdown(choices=WHISPER_MODELS, value=WHISPER_MODELS[0], label="Whisper Model")
            whisper_lang    = gr.Dropdown(choices=WHISPER_LANGUAGES, value="auto",      label="Whisper Language")
            device_radio    = gr.Radio(choices=["GPU","CPU"], value="GPU", label="Device")
            whisper_multilingual_en = gr.Checkbox(label="Multilingual", value=False)
            btn_w           = gr.Button("Transcribe with Faster-Whisper")

        with gr.Column():
            gr.Markdown("### FunASR SenseVoice ASR")
            sense_dd         = gr.Dropdown(choices=SENSEVOICE_MODELS, value=SENSEVOICE_MODELS[0], label="SenseVoice Model")
            sense_lang       = gr.Dropdown(choices=SENSEVOICE_LANGUAGES, value="auto", label="SenseVoice Language")
            device_radio_s   = gr.Radio(choices=["GPU","CPU"], value="GPU",     label="Device")
            punct_chk        = gr.Checkbox(label="Enable Punctuation", value=True)
            btn_s            = gr.Button("Transcribe with SenseVoice")

    # ────────────────────────────────────────────────────────────────
    # 2) SHARED TRANSCRIPT ROW (aligned side-by-side)
    with gr.Row():
        with gr.Column():
            gr.Markdown("### Faster-Whisper Output")
            out_w   = gr.Textbox(label="Raw Transcript", visible=False)
            out_w_d = gr.HTML(label="Diarized Transcript", elem_classes=["diar"])

        with gr.Column():
            gr.Markdown("### SenseVoice Output")
            out_s   = gr.Textbox(label="Raw Transcript", visible=False)
            out_s_d = gr.HTML(label="Diarized Transcript", elem_classes=["diar"])

    # ────────────────────────────────────────────────────────────────
    # 3) WIRING UP TOGGLES & BUTTONS

    # wire the callbacks into those shared boxes
    btn_w.click(
        fn=transcribe_fwhisper_stream,
        inputs=[whisper_dd, whisper_lang, audio_input, device_radio, whisper_multilingual_en],
        outputs=[out_w, out_w_d]
    )
    btn_s.click(
        fn=transcribe_sense_steam,
        inputs=[sense_dd, sense_lang, audio_input, punct_chk, device_radio_s],
        outputs=[out_s, out_s_d]
    )


if __name__ == "__main__":
    Demo.launch()