Spaces:
Runtime error
Runtime error
Upload sketch_video_app_from_blank.py
Browse files- sketch_video_app_from_blank.py +289 -0
sketch_video_app_from_blank.py
ADDED
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
conda create --name animeins python=3.10
|
3 |
+
conda activate animeins
|
4 |
+
pip install ipykernel
|
5 |
+
python -m ipykernel install --user --name animeins --display-name "animeins"
|
6 |
+
pip install -r requirements.txt
|
7 |
+
|
8 |
+
pip install torch==2.1.1 torchvision
|
9 |
+
pip install mmcv==2.1.0 -f https://download.openmmlab.com/mmcv/dist/cu121/torch2.1/index.html
|
10 |
+
pip install mmdet
|
11 |
+
pip install "numpy<2.0.0"
|
12 |
+
pip install moviepy==1.0.3
|
13 |
+
pip install "httpx[socks]"
|
14 |
+
'''
|
15 |
+
|
16 |
+
import gradio as gr
|
17 |
+
import os
|
18 |
+
import cv2
|
19 |
+
import numpy as np
|
20 |
+
from PIL import Image
|
21 |
+
from typing import Literal
|
22 |
+
import pathlib
|
23 |
+
from animeinsseg import AnimeInsSeg, AnimeInstances
|
24 |
+
from animeinsseg.anime_instances import get_color
|
25 |
+
|
26 |
+
# Install required packages
|
27 |
+
os.system("mim install mmengine")
|
28 |
+
os.system('mim install mmcv==2.1.0')
|
29 |
+
os.system("mim install mmdet==3.2.0")
|
30 |
+
|
31 |
+
# Download model if not exists
|
32 |
+
if not os.path.exists("models"):
|
33 |
+
os.mkdir("models")
|
34 |
+
os.system("huggingface-cli lfs-enable-largefiles .")
|
35 |
+
os.system("git clone https://huggingface.co/dreMaz/AnimeInstanceSegmentation models/AnimeInstanceSegmentation")
|
36 |
+
|
37 |
+
# Initialize segmentation model
|
38 |
+
ckpt = r'models/AnimeInstanceSegmentation/rtmdetl_e60.ckpt'
|
39 |
+
mask_thres = 0.3
|
40 |
+
instance_thres = 0.3
|
41 |
+
refine_kwargs = {'refine_method': 'refinenet_isnet'}
|
42 |
+
net = AnimeInsSeg(ckpt, mask_thr=mask_thres, refine_kwargs=refine_kwargs)
|
43 |
+
|
44 |
+
def image_to_sketch(image: np.ndarray) -> np.ndarray:
|
45 |
+
"""Convert image to pencil sketch"""
|
46 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
47 |
+
inverted = 255 - gray
|
48 |
+
blurred = cv2.GaussianBlur(inverted, (21, 21), 0)
|
49 |
+
inverted_blurred = 255 - blurred
|
50 |
+
sketch = cv2.divide(gray, inverted_blurred, scale=256.0)
|
51 |
+
return cv2.cvtColor(sketch, cv2.COLOR_GRAY2BGR) # Return 3-channel image
|
52 |
+
|
53 |
+
# ... (previous imports remain the same)
|
54 |
+
|
55 |
+
def generate_combined_transition_video(
|
56 |
+
original_image: np.ndarray,
|
57 |
+
depth_map: np.ndarray,
|
58 |
+
first_transition: str = 'character_first',
|
59 |
+
second_transition: str = 'character_first',
|
60 |
+
duration_sec: float = 6.0, # Total duration for both transitions
|
61 |
+
frame_rate: int = 30,
|
62 |
+
depth_blur: int = 15,
|
63 |
+
debug_visualize: bool = False
|
64 |
+
) -> str:
|
65 |
+
"""
|
66 |
+
Generate combined transition video with two phases:
|
67 |
+
1. Blank to sketch
|
68 |
+
2. Sketch to original
|
69 |
+
|
70 |
+
Each phase has its own transition options:
|
71 |
+
- 'character_first': Segmented instances transition first
|
72 |
+
- 'near_to_far': Transition from nearest to farthest based on depth
|
73 |
+
- 'far_to_near': Transition from farthest to nearest based on depth
|
74 |
+
"""
|
75 |
+
# Convert images to proper format
|
76 |
+
original = cv2.cvtColor(original_image, cv2.COLOR_RGB2BGR)
|
77 |
+
depth_map = cv2.cvtColor(depth_map, cv2.COLOR_RGB2GRAY)
|
78 |
+
|
79 |
+
# Get sketch version
|
80 |
+
sketch = image_to_sketch(original)
|
81 |
+
h, w = original.shape[:2]
|
82 |
+
|
83 |
+
# Perform instance segmentation
|
84 |
+
instances: AnimeInstances = net.infer(
|
85 |
+
original,
|
86 |
+
output_type='numpy',
|
87 |
+
pred_score_thr=instance_thres
|
88 |
+
)
|
89 |
+
|
90 |
+
# Prepare depth map
|
91 |
+
depth_map = cv2.resize(depth_map, (w, h))
|
92 |
+
depth_map = cv2.GaussianBlur(depth_map, (depth_blur, depth_blur), 0)
|
93 |
+
depth_map = depth_map.astype(np.float32) / 255.0
|
94 |
+
|
95 |
+
# Create layer masks for both transitions
|
96 |
+
def create_layer_masks(transition_type):
|
97 |
+
layer_masks = []
|
98 |
+
layer_depths = []
|
99 |
+
|
100 |
+
# Process segmented instances
|
101 |
+
if instances.bboxes is not None:
|
102 |
+
for mask in instances.masks:
|
103 |
+
instance_depth = np.mean(depth_map[mask.astype(bool)])
|
104 |
+
|
105 |
+
if transition_type == 'character_first':
|
106 |
+
layer_masks.append(mask.astype(np.float32))
|
107 |
+
layer_depths.append(0)
|
108 |
+
else:
|
109 |
+
if transition_type == 'near_to_far':
|
110 |
+
instance_depth = 1.0 - instance_depth
|
111 |
+
layer_masks.append(mask.astype(np.float32))
|
112 |
+
layer_depths.append(instance_depth)
|
113 |
+
|
114 |
+
# Create a full mask for the remaining areas
|
115 |
+
if layer_masks:
|
116 |
+
full_mask = 1.0 - np.clip(np.sum(layer_masks, axis=0), 0, 1)
|
117 |
+
else:
|
118 |
+
full_mask = np.ones((h, w), dtype=np.float32)
|
119 |
+
|
120 |
+
# Process remaining areas
|
121 |
+
if transition_type == 'character_first':
|
122 |
+
if np.sum(full_mask) > 0:
|
123 |
+
layer_masks.append(full_mask)
|
124 |
+
layer_depths.append(1)
|
125 |
+
else:
|
126 |
+
remaining_depth = depth_map * full_mask
|
127 |
+
num_depth_bands = 10
|
128 |
+
|
129 |
+
min_depth = np.min(remaining_depth[full_mask > 0]) if np.sum(full_mask) > 0 else 0
|
130 |
+
max_depth = np.max(remaining_depth[full_mask > 0]) if np.sum(full_mask) > 0 else 1
|
131 |
+
depth_bands = np.linspace(min_depth, max_depth, num_depth_bands + 1)
|
132 |
+
|
133 |
+
for i in range(num_depth_bands):
|
134 |
+
lower = depth_bands[i]
|
135 |
+
upper = depth_bands[i+1]
|
136 |
+
band_mask = ((remaining_depth >= lower) & (remaining_depth < upper)).astype(np.float32)
|
137 |
+
|
138 |
+
if np.sum(band_mask) > 0:
|
139 |
+
band_depth = np.mean(remaining_depth[band_mask.astype(bool)])
|
140 |
+
if transition_type == 'near_to_far':
|
141 |
+
band_depth = 1.0 - band_depth
|
142 |
+
|
143 |
+
layer_masks.append(band_mask)
|
144 |
+
layer_depths.append(band_depth)
|
145 |
+
|
146 |
+
# Sort layers if needed
|
147 |
+
if transition_type != 'character_first' and layer_masks:
|
148 |
+
sorted_indices = np.argsort(layer_depths)
|
149 |
+
layer_masks = [layer_masks[i] for i in sorted_indices]
|
150 |
+
|
151 |
+
return layer_masks
|
152 |
+
|
153 |
+
# Get masks for both transitions
|
154 |
+
first_masks = create_layer_masks(first_transition)
|
155 |
+
second_masks = create_layer_masks(second_transition)
|
156 |
+
|
157 |
+
# Generate video
|
158 |
+
output_path = "output_video.mp4"
|
159 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
160 |
+
video = cv2.VideoWriter(output_path, fourcc, frame_rate, (w, h))
|
161 |
+
total_frames = int(duration_sec * frame_rate)
|
162 |
+
half_duration = duration_sec / 2
|
163 |
+
|
164 |
+
for frame_idx in range(total_frames):
|
165 |
+
current_time = frame_idx / frame_rate
|
166 |
+
|
167 |
+
# Determine which transition we're in
|
168 |
+
if current_time < half_duration:
|
169 |
+
# First transition: blank to sketch
|
170 |
+
progress = np.clip(current_time / half_duration, 0, 1)
|
171 |
+
num_layers = len(first_masks)
|
172 |
+
layer_duration = half_duration / num_layers if num_layers > 0 else half_duration
|
173 |
+
|
174 |
+
# Start with blank (white) image
|
175 |
+
blended = np.ones_like(original) * 255
|
176 |
+
|
177 |
+
for layer_idx, layer_mask in enumerate(first_masks):
|
178 |
+
layer_start = layer_idx * layer_duration
|
179 |
+
layer_progress = np.clip((current_time - layer_start) / layer_duration, 0, 1)
|
180 |
+
|
181 |
+
layer_alpha = layer_mask * layer_progress
|
182 |
+
layer_alpha = np.repeat(layer_alpha[..., np.newaxis], 3, axis=2)
|
183 |
+
|
184 |
+
blended = blended * (1 - layer_alpha) + sketch.astype(np.float32) * layer_alpha
|
185 |
+
else:
|
186 |
+
# Second transition: sketch to original
|
187 |
+
progress = np.clip((current_time - half_duration) / half_duration, 0, 1)
|
188 |
+
num_layers = len(second_masks)
|
189 |
+
layer_duration = half_duration / num_layers if num_layers > 0 else half_duration
|
190 |
+
|
191 |
+
# Start with sketch
|
192 |
+
blended = sketch.copy().astype(np.float32)
|
193 |
+
|
194 |
+
for layer_idx, layer_mask in enumerate(second_masks):
|
195 |
+
layer_start = half_duration + layer_idx * layer_duration
|
196 |
+
layer_progress = np.clip((current_time - layer_start) / layer_duration, 0, 1)
|
197 |
+
|
198 |
+
layer_alpha = layer_mask * layer_progress
|
199 |
+
layer_alpha = np.repeat(layer_alpha[..., np.newaxis], 3, axis=2)
|
200 |
+
|
201 |
+
blended = blended * (1 - layer_alpha) + original.astype(np.float32) * layer_alpha
|
202 |
+
|
203 |
+
blended = np.clip(blended, 0, 255).astype(np.uint8)
|
204 |
+
|
205 |
+
if debug_visualize:
|
206 |
+
cv2.imshow('Blended', blended)
|
207 |
+
if cv2.waitKey(1) == 27:
|
208 |
+
break
|
209 |
+
|
210 |
+
video.write(blended)
|
211 |
+
|
212 |
+
video.release()
|
213 |
+
if debug_visualize:
|
214 |
+
cv2.destroyAllWindows()
|
215 |
+
|
216 |
+
return output_path
|
217 |
+
|
218 |
+
def process_images(original_image, depth_map, first_transition, second_transition, duration):
|
219 |
+
# Convert PIL Images to numpy arrays
|
220 |
+
original_np = np.array(original_image)
|
221 |
+
depth_np = np.array(depth_map)
|
222 |
+
|
223 |
+
# Generate video
|
224 |
+
video_path = generate_combined_transition_video(
|
225 |
+
original_image=original_np,
|
226 |
+
depth_map=depth_np,
|
227 |
+
first_transition=first_transition,
|
228 |
+
second_transition=second_transition,
|
229 |
+
duration_sec=float(duration),
|
230 |
+
debug_visualize=False
|
231 |
+
)
|
232 |
+
|
233 |
+
return video_path
|
234 |
+
|
235 |
+
# Create Gradio interface
|
236 |
+
with gr.Blocks() as demo:
|
237 |
+
gr.Markdown("# Anime Image Transition Video Generator")
|
238 |
+
gr.Markdown("Upload an image and its depth map to generate a two-phase transition video:")
|
239 |
+
gr.Markdown("1. From blank to sketch")
|
240 |
+
gr.Markdown("2. From sketch to original image")
|
241 |
+
|
242 |
+
with gr.Row():
|
243 |
+
with gr.Column():
|
244 |
+
original_image = gr.Image(label="Original Image", type="pil")
|
245 |
+
depth_map = gr.Image(label="Depth Map", type="pil")
|
246 |
+
|
247 |
+
with gr.Group():
|
248 |
+
gr.Markdown("### First Transition (Blank → Sketch)")
|
249 |
+
first_transition = gr.Radio(
|
250 |
+
choices=["character_first", "near_to_far", "far_to_near"],
|
251 |
+
value="character_first",
|
252 |
+
label="Render Order",
|
253 |
+
info="How elements appear from blank to sketch"
|
254 |
+
)
|
255 |
+
|
256 |
+
with gr.Group():
|
257 |
+
gr.Markdown("### Second Transition (Sketch → Original)")
|
258 |
+
second_transition = gr.Radio(
|
259 |
+
choices=["character_first", "near_to_far", "far_to_near"],
|
260 |
+
value="character_first",
|
261 |
+
label="Render Order",
|
262 |
+
info="How elements transition from sketch to original"
|
263 |
+
)
|
264 |
+
|
265 |
+
duration = gr.Slider(2, 20, value=6, step=0.5, label="Total Duration (seconds)")
|
266 |
+
submit_btn = gr.Button("Generate Video")
|
267 |
+
|
268 |
+
with gr.Column():
|
269 |
+
output_video = gr.Video(label="Output Video")
|
270 |
+
|
271 |
+
submit_btn.click(
|
272 |
+
fn=process_images,
|
273 |
+
inputs=[original_image, depth_map, first_transition, second_transition, duration],
|
274 |
+
outputs=output_video
|
275 |
+
)
|
276 |
+
|
277 |
+
# Add examples if available
|
278 |
+
gr.Examples(
|
279 |
+
[
|
280 |
+
["化物语封面.jpeg", "化物语封面深度.png", "character_first", "far_to_near"],
|
281 |
+
["可莉风景.png", "可莉风景_depth.png", "near_to_far", "character_first"],
|
282 |
+
["竹林万叶.jpg", "竹林万叶_depth.png", "far_to_near", "near_to_far"],
|
283 |
+
["重云行秋.jpg", "重云行秋_depth.png", "character_first", "character_first"],
|
284 |
+
],
|
285 |
+
inputs=[original_image, depth_map, first_transition, second_transition]
|
286 |
+
)
|
287 |
+
|
288 |
+
if __name__ == "__main__":
|
289 |
+
demo.launch(share=True)
|