Spaces:
Runtime error
Runtime error
''' | |
conda create --name animeins python=3.10 | |
conda activate animeins | |
pip install ipykernel | |
python -m ipykernel install --user --name animeins --display-name "animeins" | |
pip install -r requirements.txt | |
pip install torch==2.1.1 torchvision | |
pip install mmcv==2.1.0 -f https://download.openmmlab.com/mmcv/dist/cu121/torch2.1/index.html | |
pip install mmdet | |
pip install "numpy<2.0.0" | |
pip install moviepy==1.0.3 | |
pip install "httpx[socks]" | |
''' | |
import gradio as gr | |
import os | |
import cv2 | |
import numpy as np | |
from PIL import Image | |
from typing import Literal | |
import pathlib | |
from animeinsseg import AnimeInsSeg, AnimeInstances | |
from animeinsseg.anime_instances import get_color | |
# Install required packages | |
os.system("mim install mmengine") | |
os.system('mim install mmcv==2.1.0') | |
os.system("mim install mmdet==3.2.0") | |
# Download model if not exists | |
if not os.path.exists("models"): | |
os.mkdir("models") | |
os.system("huggingface-cli lfs-enable-largefiles .") | |
os.system("git clone https://huggingface.co/dreMaz/AnimeInstanceSegmentation models/AnimeInstanceSegmentation") | |
# Initialize segmentation model | |
ckpt = r'models/AnimeInstanceSegmentation/rtmdetl_e60.ckpt' | |
mask_thres = 0.3 | |
instance_thres = 0.3 | |
refine_kwargs = {'refine_method': 'refinenet_isnet'} | |
net = AnimeInsSeg(ckpt, mask_thr=mask_thres, refine_kwargs=refine_kwargs) | |
def image_to_sketch(image: np.ndarray) -> np.ndarray: | |
"""Convert image to pencil sketch""" | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
inverted = 255 - gray | |
blurred = cv2.GaussianBlur(inverted, (21, 21), 0) | |
inverted_blurred = 255 - blurred | |
sketch = cv2.divide(gray, inverted_blurred, scale=256.0) | |
return cv2.cvtColor(sketch, cv2.COLOR_GRAY2BGR) # Return 3-channel image | |
# ... (previous imports remain the same) | |
def generate_combined_transition_video( | |
original_image: np.ndarray, | |
depth_map: np.ndarray, | |
first_transition: str = 'character_first', | |
second_transition: str = 'character_first', | |
duration_sec: float = 6.0, # Total duration for both transitions | |
frame_rate: int = 30, | |
depth_blur: int = 15, | |
debug_visualize: bool = False | |
) -> str: | |
""" | |
Generate combined transition video with two phases: | |
1. Blank to sketch | |
2. Sketch to original | |
Each phase has its own transition options: | |
- 'character_first': Segmented instances transition first | |
- 'near_to_far': Transition from nearest to farthest based on depth | |
- 'far_to_near': Transition from farthest to nearest based on depth | |
""" | |
# Convert images to proper format | |
original = cv2.cvtColor(original_image, cv2.COLOR_RGB2BGR) | |
depth_map = cv2.cvtColor(depth_map, cv2.COLOR_RGB2GRAY) | |
# Get sketch version | |
sketch = image_to_sketch(original) | |
h, w = original.shape[:2] | |
# Perform instance segmentation | |
instances: AnimeInstances = net.infer( | |
original, | |
output_type='numpy', | |
pred_score_thr=instance_thres | |
) | |
# Prepare depth map | |
depth_map = cv2.resize(depth_map, (w, h)) | |
depth_map = cv2.GaussianBlur(depth_map, (depth_blur, depth_blur), 0) | |
depth_map = depth_map.astype(np.float32) / 255.0 | |
# Create layer masks for both transitions | |
def create_layer_masks(transition_type): | |
layer_masks = [] | |
layer_depths = [] | |
# Process segmented instances | |
if instances.bboxes is not None: | |
for mask in instances.masks: | |
instance_depth = np.mean(depth_map[mask.astype(bool)]) | |
if transition_type == 'character_first': | |
layer_masks.append(mask.astype(np.float32)) | |
layer_depths.append(0) | |
else: | |
if transition_type == 'near_to_far': | |
instance_depth = 1.0 - instance_depth | |
layer_masks.append(mask.astype(np.float32)) | |
layer_depths.append(instance_depth) | |
# Create a full mask for the remaining areas | |
if layer_masks: | |
full_mask = 1.0 - np.clip(np.sum(layer_masks, axis=0), 0, 1) | |
else: | |
full_mask = np.ones((h, w), dtype=np.float32) | |
# Process remaining areas | |
if transition_type == 'character_first': | |
if np.sum(full_mask) > 0: | |
layer_masks.append(full_mask) | |
layer_depths.append(1) | |
else: | |
remaining_depth = depth_map * full_mask | |
num_depth_bands = 10 | |
min_depth = np.min(remaining_depth[full_mask > 0]) if np.sum(full_mask) > 0 else 0 | |
max_depth = np.max(remaining_depth[full_mask > 0]) if np.sum(full_mask) > 0 else 1 | |
depth_bands = np.linspace(min_depth, max_depth, num_depth_bands + 1) | |
for i in range(num_depth_bands): | |
lower = depth_bands[i] | |
upper = depth_bands[i+1] | |
band_mask = ((remaining_depth >= lower) & (remaining_depth < upper)).astype(np.float32) | |
if np.sum(band_mask) > 0: | |
band_depth = np.mean(remaining_depth[band_mask.astype(bool)]) | |
if transition_type == 'near_to_far': | |
band_depth = 1.0 - band_depth | |
layer_masks.append(band_mask) | |
layer_depths.append(band_depth) | |
# Sort layers if needed | |
if transition_type != 'character_first' and layer_masks: | |
sorted_indices = np.argsort(layer_depths) | |
layer_masks = [layer_masks[i] for i in sorted_indices] | |
return layer_masks | |
# Get masks for both transitions | |
first_masks = create_layer_masks(first_transition) | |
second_masks = create_layer_masks(second_transition) | |
# Generate video | |
output_path = "output_video.mp4" | |
fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
video = cv2.VideoWriter(output_path, fourcc, frame_rate, (w, h)) | |
total_frames = int(duration_sec * frame_rate) | |
half_duration = duration_sec / 2 | |
for frame_idx in range(total_frames): | |
current_time = frame_idx / frame_rate | |
# Determine which transition we're in | |
if current_time < half_duration: | |
# First transition: blank to sketch | |
progress = np.clip(current_time / half_duration, 0, 1) | |
num_layers = len(first_masks) | |
layer_duration = half_duration / num_layers if num_layers > 0 else half_duration | |
# Start with blank (white) image | |
blended = np.ones_like(original) * 255 | |
for layer_idx, layer_mask in enumerate(first_masks): | |
layer_start = layer_idx * layer_duration | |
layer_progress = np.clip((current_time - layer_start) / layer_duration, 0, 1) | |
layer_alpha = layer_mask * layer_progress | |
layer_alpha = np.repeat(layer_alpha[..., np.newaxis], 3, axis=2) | |
blended = blended * (1 - layer_alpha) + sketch.astype(np.float32) * layer_alpha | |
else: | |
# Second transition: sketch to original | |
progress = np.clip((current_time - half_duration) / half_duration, 0, 1) | |
num_layers = len(second_masks) | |
layer_duration = half_duration / num_layers if num_layers > 0 else half_duration | |
# Start with sketch | |
blended = sketch.copy().astype(np.float32) | |
for layer_idx, layer_mask in enumerate(second_masks): | |
layer_start = half_duration + layer_idx * layer_duration | |
layer_progress = np.clip((current_time - layer_start) / layer_duration, 0, 1) | |
layer_alpha = layer_mask * layer_progress | |
layer_alpha = np.repeat(layer_alpha[..., np.newaxis], 3, axis=2) | |
blended = blended * (1 - layer_alpha) + original.astype(np.float32) * layer_alpha | |
blended = np.clip(blended, 0, 255).astype(np.uint8) | |
if debug_visualize: | |
cv2.imshow('Blended', blended) | |
if cv2.waitKey(1) == 27: | |
break | |
video.write(blended) | |
video.release() | |
if debug_visualize: | |
cv2.destroyAllWindows() | |
return output_path | |
def process_images(original_image, depth_map, first_transition, second_transition, duration): | |
# Convert PIL Images to numpy arrays | |
original_np = np.array(original_image) | |
depth_np = np.array(depth_map) | |
# Generate video | |
video_path = generate_combined_transition_video( | |
original_image=original_np, | |
depth_map=depth_np, | |
first_transition=first_transition, | |
second_transition=second_transition, | |
duration_sec=float(duration), | |
debug_visualize=False | |
) | |
return video_path | |
# Create Gradio interface | |
with gr.Blocks() as demo: | |
gr.Markdown("# Anime Image Transition Video Generator") | |
gr.Markdown("Upload an image and its depth map to generate a two-phase transition video:") | |
gr.Markdown("1. From blank to sketch") | |
gr.Markdown("2. From sketch to original image") | |
with gr.Row(): | |
with gr.Column(): | |
original_image = gr.Image(label="Original Image", type="pil") | |
depth_map = gr.Image(label="Depth Map", type="pil") | |
with gr.Group(): | |
gr.Markdown("### First Transition (Blank → Sketch)") | |
first_transition = gr.Radio( | |
choices=["character_first", "near_to_far", "far_to_near"], | |
value="character_first", | |
label="Render Order", | |
info="How elements appear from blank to sketch" | |
) | |
with gr.Group(): | |
gr.Markdown("### Second Transition (Sketch → Original)") | |
second_transition = gr.Radio( | |
choices=["character_first", "near_to_far", "far_to_near"], | |
value="character_first", | |
label="Render Order", | |
info="How elements transition from sketch to original" | |
) | |
duration = gr.Slider(2, 20, value=6, step=0.5, label="Total Duration (seconds)") | |
submit_btn = gr.Button("Generate Video") | |
with gr.Column(): | |
output_video = gr.Video(label="Output Video") | |
submit_btn.click( | |
fn=process_images, | |
inputs=[original_image, depth_map, first_transition, second_transition, duration], | |
outputs=output_video | |
) | |
# Add examples if available | |
gr.Examples( | |
[ | |
["化物语封面.jpeg", "化物语封面深度.png", "character_first", "far_to_near"], | |
["可莉风景.png", "可莉风景_depth.png", "near_to_far", "character_first"], | |
["竹林万叶.jpg", "竹林万叶_depth.png", "far_to_near", "near_to_far"], | |
["重云行秋.jpg", "重云行秋_depth.png", "character_first", "character_first"], | |
], | |
inputs=[original_image, depth_map, first_transition, second_transition] | |
) | |
if __name__ == "__main__": | |
demo.launch(share=True) |