svjack commited on
Commit
cb721ad
·
verified ·
1 Parent(s): 53a547b

Create sketch_video_app_from_blank_direction.py

Browse files
sketch_video_app_from_blank_direction.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ conda create --name animeins python=3.10
3
+ conda activate animeins
4
+ pip install ipykernel
5
+ python -m ipykernel install --user --name animeins --display-name "animeins"
6
+ pip install -r requirements.txt
7
+
8
+ pip install torch==2.1.1 torchvision
9
+ pip install mmcv==2.1.0 -f https://download.openmmlab.com/mmcv/dist/cu121/torch2.1/index.html
10
+ pip install mmdet
11
+ pip install "numpy<2.0.0"
12
+ pip install moviepy==1.0.3
13
+ pip install "httpx[socks]"
14
+ '''
15
+
16
+ import gradio as gr
17
+ import os
18
+ import cv2
19
+ import numpy as np
20
+ from PIL import Image
21
+ from typing import Literal
22
+ import pathlib
23
+ from animeinsseg import AnimeInsSeg, AnimeInstances
24
+ from animeinsseg.anime_instances import get_color
25
+
26
+ # Install required packages
27
+ os.system("mim install mmengine")
28
+ os.system('mim install mmcv==2.1.0')
29
+ os.system("mim install mmdet==3.2.0")
30
+
31
+ # Download model if not exists
32
+ if not os.path.exists("models"):
33
+ os.mkdir("models")
34
+ os.system("huggingface-cli lfs-enable-largefiles .")
35
+ os.system("git clone https://huggingface.co/dreMaz/AnimeInstanceSegmentation models/AnimeInstanceSegmentation")
36
+
37
+ # Initialize segmentation model
38
+ ckpt = r'models/AnimeInstanceSegmentation/rtmdetl_e60.ckpt'
39
+ mask_thres = 0.3
40
+ instance_thres = 0.3
41
+ refine_kwargs = {'refine_method': 'refinenet_isnet'}
42
+ net = AnimeInsSeg(ckpt, mask_thr=mask_thres, refine_kwargs=refine_kwargs)
43
+
44
+ def image_to_sketch(image: np.ndarray) -> np.ndarray:
45
+ """Convert image to pencil sketch"""
46
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
47
+ inverted = 255 - gray
48
+ blurred = cv2.GaussianBlur(inverted, (21, 21), 0)
49
+ inverted_blurred = 255 - blurred
50
+ sketch = cv2.divide(gray, inverted_blurred, scale=256.0)
51
+ return cv2.cvtColor(sketch, cv2.COLOR_GRAY2BGR)
52
+
53
+ def generate_combined_transition_video(
54
+ original_image: np.ndarray,
55
+ depth_map: np.ndarray,
56
+ first_transition: str = 'character_first',
57
+ second_transition: str = 'character_first',
58
+ first_direction: str = 'left_to_right',
59
+ second_direction: str = 'left_to_right',
60
+ duration_sec: float = 6.0,
61
+ frame_rate: int = 30,
62
+ depth_blur: int = 15,
63
+ debug_visualize: bool = False
64
+ ) -> str:
65
+ """
66
+ Generate combined transition video with customizable scanline directions
67
+ """
68
+ # Convert images to proper format
69
+ original = cv2.cvtColor(original_image, cv2.COLOR_RGB2BGR)
70
+ depth_map = cv2.cvtColor(depth_map, cv2.COLOR_RGB2GRAY)
71
+
72
+ # Get sketch version
73
+ sketch = image_to_sketch(original)
74
+ h, w = original.shape[:2]
75
+
76
+ # Perform instance segmentation
77
+ instances: AnimeInstances = net.infer(
78
+ original,
79
+ output_type='numpy',
80
+ pred_score_thr=instance_thres
81
+ )
82
+
83
+ # Prepare depth map
84
+ depth_map = cv2.resize(depth_map, (w, h))
85
+ depth_map = cv2.GaussianBlur(depth_map, (depth_blur, depth_blur), 0)
86
+ depth_map = depth_map.astype(np.float32) / 255.0
87
+
88
+ def create_scanline_mask(mask, progress, direction):
89
+ """Create scanline mask based on direction"""
90
+ scanline_mask = np.zeros_like(mask)
91
+
92
+ if direction == 'left_to_right':
93
+ scan_width = int(w * progress)
94
+ scanline_mask[:, :scan_width] = mask[:, :scan_width]
95
+ elif direction == 'right_to_left':
96
+ scan_width = int(w * progress)
97
+ scanline_mask[:, -scan_width:] = mask[:, -scan_width:]
98
+ elif direction == 'top_to_bottom':
99
+ scan_height = int(h * progress)
100
+ scanline_mask[:scan_height, :] = mask[:scan_height, :]
101
+ elif direction == 'bottom_to_top':
102
+ scan_height = int(h * progress)
103
+ scanline_mask[-scan_height:, :] = mask[-scan_height:, :]
104
+
105
+ return scanline_mask
106
+
107
+ def create_layer_masks(transition_type):
108
+ layer_masks = []
109
+ layer_depths = []
110
+
111
+ # Process segmented instances
112
+ if instances.bboxes is not None:
113
+ for mask in instances.masks:
114
+ instance_depth = np.mean(depth_map[mask.astype(bool)])
115
+
116
+ if transition_type == 'character_first':
117
+ layer_masks.append(mask.astype(np.float32))
118
+ layer_depths.append(0)
119
+ else:
120
+ if transition_type == 'near_to_far':
121
+ instance_depth = 1.0 - instance_depth
122
+ layer_masks.append(mask.astype(np.float32))
123
+ layer_depths.append(instance_depth)
124
+
125
+ # Create a full mask for the remaining areas
126
+ if layer_masks:
127
+ full_mask = 1.0 - np.clip(np.sum(layer_masks, axis=0), 0, 1)
128
+ else:
129
+ full_mask = np.ones((h, w), dtype=np.float32)
130
+
131
+ # Process remaining areas
132
+ if transition_type == 'character_first':
133
+ if np.sum(full_mask) > 0:
134
+ layer_masks.append(full_mask)
135
+ layer_depths.append(1)
136
+ else:
137
+ remaining_depth = depth_map * full_mask
138
+ num_depth_bands = 10
139
+
140
+ min_depth = np.min(remaining_depth[full_mask > 0]) if np.sum(full_mask) > 0 else 0
141
+ max_depth = np.max(remaining_depth[full_mask > 0]) if np.sum(full_mask) > 0 else 1
142
+ depth_bands = np.linspace(min_depth, max_depth, num_depth_bands + 1)
143
+
144
+ for i in range(num_depth_bands):
145
+ lower = depth_bands[i]
146
+ upper = depth_bands[i+1]
147
+ band_mask = ((remaining_depth >= lower) & (remaining_depth < upper)).astype(np.float32)
148
+
149
+ if np.sum(band_mask) > 0:
150
+ band_depth = np.mean(remaining_depth[band_mask.astype(bool)])
151
+ if transition_type == 'near_to_far':
152
+ band_depth = 1.0 - band_depth
153
+
154
+ layer_masks.append(band_mask)
155
+ layer_depths.append(band_depth)
156
+
157
+ # Sort layers if needed
158
+ if transition_type != 'character_first' and layer_masks:
159
+ sorted_indices = np.argsort(layer_depths)
160
+ layer_masks = [layer_masks[i] for i in sorted_indices]
161
+
162
+ return layer_masks
163
+
164
+ # Get masks for both transitions
165
+ first_masks = create_layer_masks(first_transition)
166
+ second_masks = create_layer_masks(second_transition)
167
+
168
+ # Generate video
169
+ output_path = "output_video.mp4"
170
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
171
+ video = cv2.VideoWriter(output_path, fourcc, frame_rate, (w, h))
172
+ total_frames = int(duration_sec * frame_rate)
173
+ half_duration = duration_sec / 2
174
+
175
+ for frame_idx in range(total_frames):
176
+ current_time = frame_idx / frame_rate
177
+
178
+ # Determine which transition we're in
179
+ if current_time < half_duration:
180
+ # First transition: blank to sketch
181
+ progress = np.clip(current_time / half_duration, 0, 1)
182
+ num_layers = len(first_masks)
183
+ layer_duration = half_duration / num_layers if num_layers > 0 else half_duration
184
+
185
+ # Start with blank (white) image
186
+ blended = np.ones_like(original) * 255
187
+
188
+ for layer_idx, layer_mask in enumerate(first_masks):
189
+ layer_start = layer_idx * layer_duration
190
+ layer_progress = np.clip((current_time - layer_start) / layer_duration, 0, 1)
191
+
192
+ # Apply scanline effect based on direction
193
+ scanline_mask = create_scanline_mask(layer_mask, layer_progress, first_direction)
194
+ scanline_alpha = np.repeat(scanline_mask[..., np.newaxis], 3, axis=2)
195
+
196
+ blended = blended * (1 - scanline_alpha) + sketch.astype(np.float32) * scanline_alpha
197
+ else:
198
+ # Second transition: sketch to original
199
+ progress = np.clip((current_time - half_duration) / half_duration, 0, 1)
200
+ num_layers = len(second_masks)
201
+ layer_duration = half_duration / num_layers if num_layers > 0 else half_duration
202
+
203
+ # Start with sketch
204
+ blended = sketch.copy().astype(np.float32)
205
+
206
+ for layer_idx, layer_mask in enumerate(second_masks):
207
+ layer_start = half_duration + layer_idx * layer_duration
208
+ layer_progress = np.clip((current_time - layer_start) / layer_duration, 0, 1)
209
+
210
+ # Apply scanline effect based on direction
211
+ scanline_mask = create_scanline_mask(layer_mask, layer_progress, second_direction)
212
+ scanline_alpha = np.repeat(scanline_mask[..., np.newaxis], 3, axis=2)
213
+
214
+ blended = blended * (1 - scanline_alpha) + original.astype(np.float32) * scanline_alpha
215
+
216
+ blended = np.clip(blended, 0, 255).astype(np.uint8)
217
+
218
+ if debug_visualize:
219
+ cv2.imshow('Blended', blended)
220
+ if cv2.waitKey(1) == 27:
221
+ break
222
+
223
+ video.write(blended)
224
+
225
+ video.release()
226
+ if debug_visualize:
227
+ cv2.destroyAllWindows()
228
+
229
+ return output_path
230
+
231
+ def process_images(
232
+ original_image,
233
+ depth_map,
234
+ first_transition,
235
+ second_transition,
236
+ first_direction,
237
+ second_direction,
238
+ duration
239
+ ):
240
+ # Convert PIL Images to numpy arrays
241
+ original_np = np.array(original_image)
242
+ depth_np = np.array(depth_map)
243
+
244
+ # Generate video
245
+ video_path = generate_combined_transition_video(
246
+ original_image=original_np,
247
+ depth_map=depth_np,
248
+ first_transition=first_transition,
249
+ second_transition=second_transition,
250
+ first_direction=first_direction,
251
+ second_direction=second_direction,
252
+ duration_sec=float(duration),
253
+ debug_visualize=False
254
+ )
255
+
256
+ return video_path
257
+
258
+ # Create Gradio interface
259
+ with gr.Blocks() as demo:
260
+ gr.Markdown("# Anime Image Transition Video Generator")
261
+ gr.Markdown("Upload an image and its depth map to generate a two-phase transition video:")
262
+ gr.Markdown("1. From blank to sketch")
263
+ gr.Markdown("2. From sketch to original image")
264
+
265
+ with gr.Row():
266
+ with gr.Column():
267
+ original_image = gr.Image(label="Original Image", type="pil")
268
+ depth_map = gr.Image(label="Depth Map", type="pil")
269
+
270
+ with gr.Group():
271
+ gr.Markdown("### First Transition (Blank → Sketch)")
272
+ first_transition = gr.Radio(
273
+ choices=["character_first", "near_to_far", "far_to_near"],
274
+ value="character_first",
275
+ label="Render Order",
276
+ info="How elements appear from blank to sketch"
277
+ )
278
+ first_direction = gr.Radio(
279
+ choices=["left_to_right", "right_to_left", "top_to_bottom", "bottom_to_top"],
280
+ value="left_to_right",
281
+ label="Scanline Direction",
282
+ info="Direction of the reveal effect"
283
+ )
284
+
285
+ with gr.Group():
286
+ gr.Markdown("### Second Transition (Sketch → Original)")
287
+ second_transition = gr.Radio(
288
+ choices=["character_first", "near_to_far", "far_to_near"],
289
+ value="character_first",
290
+ label="Render Order",
291
+ info="How elements transition from sketch to original"
292
+ )
293
+ second_direction = gr.Radio(
294
+ choices=["left_to_right", "right_to_left", "top_to_bottom", "bottom_to_top"],
295
+ value="left_to_right",
296
+ label="Scanline Direction",
297
+ info="Direction of the reveal effect"
298
+ )
299
+
300
+ duration = gr.Slider(2, 20, value=6, step=0.5, label="Total Duration (seconds)")
301
+ submit_btn = gr.Button("Generate Video")
302
+
303
+ with gr.Column():
304
+ output_video = gr.Video(label="Output Video")
305
+
306
+ submit_btn.click(
307
+ fn=process_images,
308
+ inputs=[
309
+ original_image,
310
+ depth_map,
311
+ first_transition,
312
+ second_transition,
313
+ first_direction,
314
+ second_direction,
315
+ duration
316
+ ],
317
+ outputs=output_video
318
+ )
319
+
320
+ # Add examples
321
+ gr.Examples(
322
+ [
323
+ ["化物语封面.jpeg", "化物语封面深度.png", "character_first", "far_to_near", "left_to_right", "right_to_left"],
324
+ ["可莉风景.png", "可莉风景_depth.png", "near_to_far", "character_first", "top_to_bottom", "bottom_to_top"],
325
+ ["竹林万叶.jpg", "竹林万叶_depth.png", "far_to_near", "near_to_far", "right_to_left", "left_to_right"],
326
+ ["重云行秋.jpg", "重云行秋_depth.png", "character_first", "character_first", "bottom_to_top", "top_to_bottom"],
327
+ ],
328
+ inputs=[
329
+ original_image,
330
+ depth_map,
331
+ first_transition,
332
+ second_transition,
333
+ first_direction,
334
+ second_direction
335
+ ]
336
+ )
337
+
338
+ if __name__ == "__main__":
339
+ demo.launch(share=True)