File size: 3,596 Bytes
6e645b6 0eb093e 2ffc60e 6e645b6 0eb093e c74678d 0eb093e 6e645b6 0eb093e 2ffc60e 0eb093e 6e645b6 0eb093e 2ffc60e 95e8e86 6e645b6 2ffc60e 6e645b6 2ffc60e 6e645b6 0eb093e 6e645b6 0eb093e 6e645b6 0eb093e 6e645b6 0eb093e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import streamlit as st
import torch
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
import torchaudio
import os
import re
import jieba
# Device setup
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load Whisper model for transcription
MODEL_NAME = "alvanlii/whisper-small-cantonese"
language = "zh"
pipe = pipeline(task="automatic-speech-recognition", model=MODEL_NAME, chunk_length_s=60, device=device)
pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=language, task="transcribe")
def transcribe_audio(audio_path):
waveform, sample_rate = torchaudio.load(audio_path)
duration = waveform.shape[1] / sample_rate
if duration > 60:
results = []
for start in range(0, int(duration), 50):
end = min(start + 60, int(duration))
chunk = waveform[:, start * sample_rate:end * sample_rate]
temp_filename = f"temp_chunk_{start}.wav"
torchaudio.save(temp_filename, chunk, sample_rate)
result = pipe(temp_filename)["text"]
results.append(result)
os.remove(temp_filename)
return " ".join(results)
return pipe(audio_path)["text"]
# Load translation model
tokenizer = AutoTokenizer.from_pretrained("botisan-ai/mt5-translate-yue-zh")
model = AutoModelForSeq2SeqLM.from_pretrained("botisan-ai/mt5-translate-yue-zh").to(device)
def split_sentences(text):
return [s for s in re.split(r'(?<=[。!?])', text) if s]
def translate(text):
sentences = split_sentences(text)
translations = []
for sentence in sentences:
inputs = tokenizer(sentence, return_tensors="pt").to(device)
outputs = model.generate(inputs["input_ids"], max_length=1000, num_beams=5)
translations.append(tokenizer.decode(outputs[0], skip_special_tokens=True))
return " ".join(translations)
# Load quality rating model
rating_pipe = pipeline("text-classification", model="Leo0129/CustomModel_dianping-chinese")
def split_text(text, max_length=512):
words = list(jieba.cut(text))
chunks, current_chunk = [], ""
for word in words:
if len(current_chunk) + len(word) < max_length:
current_chunk += word
else:
chunks.append(current_chunk)
current_chunk = word
if current_chunk:
chunks.append(current_chunk)
return chunks
def rate_quality(text):
chunks = split_text(text)
results = []
for chunk in chunks:
result = rating_pipe(chunk)[0]
label_map = {"LABEL_0": "Poor", "LABEL_1": "Neutral", "LABEL_2": "Good"}
results.append(label_map.get(result["label"], "Unknown"))
return max(set(results), key=results.count) # Return most frequent rating
# Streamlit UI
st.title("Cantonese Audio Analysis")
st.write("Upload a Cantonese audio file to transcribe, translate, and rate the conversation quality.")
uploaded_file = st.file_uploader("Upload Audio File", type=["wav", "mp3", "flac"])
if uploaded_file is not None:
st.audio(uploaded_file, format="audio/wav")
temp_audio_path = "uploaded_audio.wav"
with open(temp_audio_path, "wb") as f:
f.write(uploaded_file.getbuffer())
st.write("### Processing...")
transcript = transcribe_audio(temp_audio_path)
st.write("**Transcript:**", transcript)
translated_text = translate(transcript)
st.write("**Translation:**", translated_text)
quality_rating = rate_quality(translated_text)
st.write("**Quality Rating:**", quality_rating)
os.remove(temp_audio_path)
|