|
import os |
|
import csv |
|
from tqdm import tqdm |
|
from funasr import AutoModel |
|
from funasr.utils.postprocess_utils import rich_transcription_postprocess |
|
|
|
|
|
model_dir = "iic/SenseVoiceSmall" |
|
|
|
|
|
model = AutoModel( |
|
model=model_dir, |
|
trust_remote_code=True, |
|
remote_code="./model.py", |
|
vad_model="fsmn-vad", |
|
vad_kwargs={"max_single_segment_time": 30000}, |
|
device="cuda:0", |
|
) |
|
|
|
|
|
audio_folder = "" |
|
|
|
|
|
output_csv = "./recognition_results.csv" |
|
|
|
|
|
audio_files = [f for f in os.listdir(audio_folder) if f.endswith(".flac")] |
|
|
|
|
|
if not os.path.exists(output_csv) or os.path.getsize(output_csv) == 0: |
|
with open(output_csv, mode="w", newline="", encoding="utf-8") as file: |
|
writer = csv.writer(file) |
|
writer.writerow(["Audio File", "Transcription"]) |
|
|
|
|
|
existing_files = set() |
|
with open(output_csv, mode="r", newline="", encoding="utf-8") as file: |
|
reader = csv.reader(file) |
|
next(reader) |
|
for row in reader: |
|
existing_files.add(row[0]) |
|
|
|
|
|
with open(output_csv, mode="a", newline="", encoding="utf-8") as file: |
|
writer = csv.writer(file) |
|
|
|
|
|
for audio_file in tqdm(audio_files, desc="Processing", unit="file"): |
|
|
|
if audio_file in existing_files: |
|
continue |
|
|
|
audio_path = os.path.join(audio_folder, audio_file) |
|
|
|
try: |
|
|
|
res = model.generate( |
|
input=audio_path, |
|
cache={}, |
|
language="auto", |
|
use_itn=True, |
|
batch_size_s=60, |
|
merge_vad=True, |
|
merge_length_s=15, |
|
) |
|
|
|
|
|
transcription = rich_transcription_postprocess(res[0]["text"]) |
|
|
|
|
|
if not transcription.strip(): |
|
transcription = "none!" |
|
|
|
except Exception as e: |
|
|
|
transcription = f"Error: {str(e)}" |
|
|
|
|
|
writer.writerow([audio_file, transcription]) |
|
|
|
print("Recognition completed and saved to CSV.") |