Spaces:
Running
on
Zero
Running
on
Zero
File size: 15,432 Bytes
f04256e bcc4dc9 0ac86ac aeba77b 0ac86ac 09c3b97 0ac86ac 996cbb7 1ede177 6b3eadf 1942a0e 6b3eadf 996cbb7 6b3eadf 0ac86ac 1e980e0 0ac86ac 1e980e0 0ac86ac 1e980e0 1942a0e aeba77b 1e980e0 0ac86ac 1e980e0 0ac86ac aeba77b 0ac86ac 0f7b778 aeba77b 0ac86ac 0f7b778 aeba77b 0f7b778 aeba77b 0ac86ac 0f7b778 1e980e0 0f7b778 0ac86ac bcc4dc9 f04256e 1942a0e f04256e 0ac86ac 6b3eadf 0ac86ac f04256e ef47585 f04256e 0ac86ac f04256e d195e89 52ad00f c049b32 52ad00f c049b32 52ad00f c049b32 fb9bcbe 52ad00f c049b32 fb9bcbe 52ad00f c049b32 52ad00f c049b32 52ad00f c049b32 52ad00f c049b32 fb9bcbe c049b32 fb9bcbe c049b32 fb9bcbe 6ffa9fc fb9bcbe 52ad00f fb9bcbe 52ad00f fb9bcbe 1ede177 0a6b408 1ede177 0a6b408 1ede177 45a4cbb 1ede177 1942a0e 1ede177 1942a0e 895f497 45a4cbb 1ede177 895f497 1ede177 45a4cbb 09c3b97 1ede177 45a4cbb 1ede177 45a4cbb 1ede177 45a4cbb 895f497 45a4cbb 895f497 45a4cbb d097c66 0a6b408 1ede177 00a8953 d097c66 00a8953 2d06337 ef47585 2d06337 f04256e 1e980e0 f8c0daa 996cbb7 52ad00f 0b84e30 a746379 0b84e30 a746379 2d06337 d097c66 1942a0e 510db0c 1942a0e d097c66 1ede177 1942a0e 510db0c 1942a0e 1ede177 d195e89 1942a0e d195e89 fb9bcbe 0a6b408 1942a0e 510db0c 1942a0e 0a6b408 fb9bcbe 52ad00f 1942a0e 1ede177 0a6b408 1942a0e 510db0c 1942a0e 0a6b408 1ede177 624a647 1942a0e fb9bcbe f04256e 00a8953 1942a0e 00a8953 f04256e 1942a0e f04256e 1942a0e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 |
import gradio as gr
import spaces
import glob
import os
import shutil
import tempfile
from pydub import AudioSegment
from moviepy.editor import VideoFileClip, AudioFileClip, concatenate_videoclips, ImageClip
is_shared_ui = True if "fffiloni/Hibiki-simple" in os.environ['SPACE_ID'] else False
def extract_audio_as_mp3(video_path: str) -> str:
"""
Extracts the audio from a video file and saves it as a temporary MP3 file.
:param video_path: Path to the input video file.
:return: Path to the temporary MP3 file.
"""
# Load the video
video = VideoFileClip(video_path)
# Create a temporary file for the extracted audio
temp_audio = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
# Extract and export the audio as MP3
video.audio.write_audiofile(temp_audio.name, codec="mp3")
return temp_audio.name # Return the temp file path
def process_audio(input_file):
"""
Processes the input audio file:
- Converts it to MP3 format
- Trims the audio to 1 minute if in shared UI mode
Args:
input_file (str): Path to the audio file (WAV/MP3/etc.)
Returns:
str: Path to the converted MP3 file
"""
# Load the audio file
audio = AudioSegment.from_file(input_file)
# Ensure it's in MP3 format
output_file = os.path.splitext(input_file)[0] + ".mp3"
if is_shared_ui:
# Limit duration to 1 minute (60,000 ms)
if len(audio) > 60000:
audio = audio[:60000] # Trim to 60 seconds
# Export as MP3
audio.export(output_file, format="mp3")
return output_file
def cleanup_old_audio():
"""Remove old audio files before starting a new inference."""
files_to_remove = glob.glob("out_en-*.wav") + glob.glob("final_output.wav")
if files_to_remove:
print(f"Cleaning up {len(files_to_remove)} old audio files...")
for file in files_to_remove:
try:
os.remove(file)
print(f"Deleted: {file}")
except Exception as e:
print(f"Error deleting {file}: {e}")
else:
print("No old audio files found.")
def find_audio_chunks():
"""Finds all out_en-*.wav files, sorts them, and returns the file paths."""
wav_files = glob.glob("out_en-*.wav")
# Extract numbers and sort properly
wav_files.sort(key=lambda x: int(x.split('-')[-1].split('.')[0]))
print(f"Found {len(wav_files)} audio chunks: {wav_files}")
return wav_files # Returning the list of file paths
def concatenate_audio(output_filename="final_output.wav"):
"""
Concatenates audio chunks created by the translation model.
Finds all chunk files matching the `out_en-*.wav` pattern,
sorts them numerically, moves them to a temp folder,
and returns the path to the first file and the list of all files.
Returns:
Tuple[str, List[str]]: Path to first chunk and list of all chunks
"""
wav_files = find_audio_chunks() # Get sorted audio file paths
if not wav_files:
print("No audio files found.")
return []
# Create a temporary directory
temp_dir = tempfile.mkdtemp()
# Load and concatenate all audio files
#combined = AudioSegment.empty()
temp_wav_files = []
for file in wav_files:
#audio = AudioSegment.from_wav(file)
#combined += audio
# Move individual files to the temp directory
temp_file_path = os.path.join(temp_dir, os.path.basename(file))
shutil.move(file, temp_file_path)
temp_wav_files.append(temp_file_path)
# Define the final output path in the temporary directory
#temp_output_path = os.path.join(temp_dir, output_filename)
# Export the final combined audio
#combined.export(temp_output_path, format="wav")
#print(f"Concatenated audio saved at {temp_output_path}")
return temp_wav_files[0], temp_wav_files # Returning temp paths
@spaces.GPU()
def infer(audio_input_path):
"""
Perform translation inference on an audio file using the Hibiki model.
This function orchestrates the full processing and translation pipeline:
- Cleans up old audio files from previous runs
- Processes the input audio (trims it and ensures correct format)
- Executes the Hibiki translation model via subprocess
- Concatenates translated audio chunks
- Returns the translated audio and UI updates
Args:
audio_input_path (str): Path to the input audio file (e.g., MP3 or WAV)
Returns:
A tuple with:
- Path to the first translated audio chunk (WAV file)
- Updated Gradio Dropdown component with all chunk file choices
- Visibility settings for additional audio controls and result components
"""
cleanup_old_audio()
audio_input_path = process_audio(audio_input_path)
print(f"Processed file saved as: {audio_input_path}")
import subprocess
command = [
"python", "-m", "moshi.run_inference",
f"{audio_input_path}", "out_en.wav",
"--hf-repo", "kyutai/hibiki-1b-pytorch-bf16"
]
result = subprocess.run(command, capture_output=True, text=True)
# Print the standard output and error
print("STDOUT:", result.stdout)
print("STDERR:", result.stderr)
# Check if the command was successful
if result.returncode == 0:
print("Command executed successfully.")
first_out, file_list = concatenate_audio()
return first_out, gr.update(choices=file_list, value=file_list[0], visible=True), gr.update(visible=True), gr.update(value=file_list, visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
else:
print("Error executing command.")
raise gr.Error("Error executing command")
def load_chosen_audio(audio_path):
return audio_path
def overlay_audio(
original_mp3: str,
translated_wav: str,
volume_reduction_db: int = 10,
cut_start: float = 0.0
) -> str:
"""
Overlays translated audio on top of the original, reduces the original volume,
and ensures the final audio lasts as long as the longer of the two tracks.
:param original_mp3: Path to the original MP3 file.
:param translated_wav: Path to the translated WAV file.
:param volume_reduction_db: Volume reduction in dB (default is -10 dB).
:param cut_start: Number of seconds to trim from the start of the translated audio (default: 0.0).
:return: Path to the temporary output WAV file.
"""
# Load original MP3 and convert to WAV
original = AudioSegment.from_mp3(original_mp3).set_frame_rate(16000).set_channels(1)
# Lower the volume
original = original - volume_reduction_db
# Load the translated WAV
translated = AudioSegment.from_wav(translated_wav).set_frame_rate(16000).set_channels(1)
# Trim the start of the translated audio if needed
if cut_start > 0:
cut_ms = int(cut_start * 1000) # Convert seconds to milliseconds
translated = translated[cut_ms:]
# Determine the final length (longer of the two)
final_length = max(len(original), len(translated))
# Extend the shorter track with silence to match the longer track
if len(original) < final_length:
original += AudioSegment.silent(duration=final_length - len(original))
if len(translated) < final_length:
translated += AudioSegment.silent(duration=final_length - len(translated))
# Overlay the translated speech over the original
combined = original.overlay(translated)
# Create a temporary file to save the output
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
combined.export(temp_file.name, format="wav")
print(f"Final audio saved at: {temp_file.name}")
return temp_file.name
def process_final_combination(audio_in, chosen_translated, volume, cut_start, video_input):
audio_in = process_audio(audio_in)
temp_output_path = overlay_audio(audio_in, chosen_translated, volume, cut_start)
if video_input:
return gr.update(value=temp_output_path, visible=True), gr.update(visible=True)
else:
return gr.update(value=temp_output_path, visible=True), gr.update(visible=False)
import tempfile
import gradio as gr
from moviepy.editor import VideoFileClip, AudioFileClip, ImageClip, concatenate_videoclips
def replace_video_audio(video_path: str, new_audio_path: str) -> str:
"""
Replaces a video's audio with new translated audio.
If the new audio is longer than the video, extends the video
by freezing the last frame.
Args:
video_path (str): Path to the video file
new_audio_path (str): Path to the new audio file
Returns:
gr.update: Gradio update pointing to the new video file path
"""
# Debugging: Ensure video_path is a string
print(f"DEBUG: video_path = {video_path}, type = {type(video_path)}")
if not isinstance(video_path, str):
raise ValueError(f"video_path must be a string, got {type(video_path)}")
# Load video and audio
video = VideoFileClip(video_path)
new_audio = AudioFileClip(new_audio_path)
# Extend video if new audio is longer
if new_audio.duration > video.duration:
# Safely extract last frame
print("Extending video to match longer audio...")
last_frame = None
for frame in video.iter_frames(): # Iterates through all frames
last_frame = frame
if last_frame is None:
raise RuntimeError("Failed to extract last frame from video.")
freeze_duration = new_audio.duration - video.duration
freeze_frame = ImageClip(last_frame).set_duration(freeze_duration).set_fps(video.fps)
video = concatenate_videoclips([video, freeze_frame])
# Set the new audio track
video = video.set_audio(new_audio)
# Create a temp file for output
temp_video = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
# Save the final video
video.write_videofile(
temp_video.name,
codec="libx264",
audio_codec="aac",
fps=video.fps,
preset="medium"
)
# Clean up resources
video.close()
new_audio.close()
# Return path to new video
return gr.update(value=temp_video.name, visible=True)
def clean_previous_video_input():
return gr.update(value=None)
def show_upcoming_component():
return gr.update(visible=True)
def hide_previous():
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
css="""
div#col-container{
margin: 0 auto;
max-width: 1200px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("# Hibiki ")
gr.Markdown("This is a simple demo for Kyutai's Hibiki translation models • Currently supports French to English only.")
gr.HTML("""
<div style="display:flex;column-gap:4px;">
<a href="https://huggingface.co/spaces/fffiloni/Hibiki-simple?duplicate=true">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-sm.svg" alt="Duplicate this Space">
</a>
</div>
""")
with gr.Row():
with gr.Column(scale=2):
video_input = gr.Video(label="Video IN (Optional)")
audio_input = gr.Audio(label="Audio IN", type="filepath")
submit_btn = gr.Button("Generate translations")
gr.Examples(
examples = [
"./examples/sample_fr_hibiki_intro.mp3",
"./examples/sample_fr_hibiki_crepes.mp3",
"./examples/sample_fr_hibiki_monologue_otis.mp3"
],
inputs = [audio_input]
)
with gr.Column(scale=3):
output_result = gr.Audio(label="Translated result")
with gr.Row():
dropdown_wav_selector = gr.Dropdown(
label="Pick a generated translated audio to load",
value = None,
visible=False,
scale=2
)
choose_this_btn = gr.Button("Apply and check this one as translated audio overlay", scale=1, visible=False)
with gr.Row():
volume_reduction = gr.Slider(label="Original audio Volume reduction", minimum=0, maximum=60, step=1, value=30, visible=False)
cut_start = gr.Slider(label="Reduce translator delay (seconds)", minimum=0.0, maximum=4.0, step=0.1, value=2.0, visible=False)
combined_output = gr.Audio(label="Combinated Audio", type="filepath", visible=False, show_download_button=True)
apply_to_video_btn = gr.Button("Apply this combination to your video", visible=False)
final_video_out = gr.Video(label="Video + Translated Audio", visible=False)
with gr.Accordion("Downloadable audio Output list", open=False, visible=False) as result_accordion:
wav_list = gr.Files(label="Output Audio List", visible=False)
audio_input.upload(
fn = clean_previous_video_input,
inputs = None,
outputs = [video_input],
queue=False,
show_api=False
)
video_input.upload(
fn = extract_audio_as_mp3,
inputs = [video_input],
outputs = [audio_input],
queue=False,
show_api=False
)
dropdown_wav_selector.select(
fn = load_chosen_audio,
inputs = [dropdown_wav_selector],
outputs = [output_result],
queue = False,
show_api=False
)
choose_this_btn.click(
fn = show_upcoming_component,
inputs=None,
outputs=[combined_output],
queue=False,
show_api=False
).then(
fn = process_final_combination,
inputs = [audio_input, dropdown_wav_selector, volume_reduction, cut_start, video_input],
outputs = [combined_output, apply_to_video_btn],
show_api=False
)
apply_to_video_btn.click(
fn = show_upcoming_component,
inputs=None,
outputs=[final_video_out],
queue=False,
show_api=False
).then(
fn = replace_video_audio,
inputs = [video_input, combined_output],
outputs = [final_video_out],
show_api=False
)
submit_btn.click(
fn = hide_previous,
inputs = None,
outputs = [dropdown_wav_selector, result_accordion, wav_list, choose_this_btn, combined_output, apply_to_video_btn, final_video_out, volume_reduction, cut_start],
show_api=False
).then(
fn = infer,
inputs = [audio_input],
outputs = [output_result, dropdown_wav_selector, result_accordion, wav_list, choose_this_btn, volume_reduction, cut_start],
show_api=True
)
demo.queue().launch(show_api=True, show_error=True, ssr_mode=False, mcp_server=True) |