Spaces:
Runtime error
Runtime error
fix on way to apply batched fater-whiper
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import tempfile
|
|
4 |
|
5 |
import torch
|
6 |
import gradio as gr
|
7 |
-
from faster_whisper import WhisperModel
|
8 |
from pydub import AudioSegment
|
9 |
from pyannote.audio import Pipeline as DiarizationPipeline
|
10 |
import opencc
|
@@ -101,11 +101,12 @@ def get_fwhisper_model(model_id: str, device: str) -> WhisperModel:
|
|
101 |
key = (model_id, device)
|
102 |
if key not in _fwhisper_models:
|
103 |
compute_type = "float16" if device.startswith("cuda") else "int8"
|
104 |
-
|
105 |
model_id,
|
106 |
device=device,
|
107 |
compute_type=compute_type,
|
108 |
)
|
|
|
109 |
return _fwhisper_models[key]
|
110 |
|
111 |
def get_sense_model(model_id: str, device_str: str):
|
|
|
4 |
|
5 |
import torch
|
6 |
import gradio as gr
|
7 |
+
from faster_whisper import BatchedInferencePipeline, WhisperModel
|
8 |
from pydub import AudioSegment
|
9 |
from pyannote.audio import Pipeline as DiarizationPipeline
|
10 |
import opencc
|
|
|
101 |
key = (model_id, device)
|
102 |
if key not in _fwhisper_models:
|
103 |
compute_type = "float16" if device.startswith("cuda") else "int8"
|
104 |
+
model = WhisperModel(
|
105 |
model_id,
|
106 |
device=device,
|
107 |
compute_type=compute_type,
|
108 |
)
|
109 |
+
_fwhisper_models[key] = BatchedInferencePipeline(model=model)
|
110 |
return _fwhisper_models[key]
|
111 |
|
112 |
def get_sense_model(model_id: str, device_str: str):
|