Spaces:
Sleeping
Sleeping
File size: 3,656 Bytes
327b68f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
import cv2 # OpenCV ๋ผ์ด๋ธ๋ฌ๋ฆฌ
import numpy as np
from skimage import transform as tf # ์ด๋ฏธ์ง ๋ณํ ๋ชจ๋
# -- Landmark interpolation:
def linear_interpolate(landmarks, start_idx, stop_idx):
start_landmarks = landmarks[start_idx] # ๋๋๋งํฌ ์์
stop_landmarks = landmarks[stop_idx] # ๋๋๋งํฌ ๋
delta = stop_landmarks - start_landmarks # ๋๋๋งํฌ ๊ฐ ์ฐจ์ด
for idx in range(1, stop_idx-start_idx):
landmarks[start_idx+idx] = start_landmarks + idx/float(stop_idx-start_idx) * delta # ๋๋๋งํฌ ์
๋ฐ์ดํธ(๋ณด๊ฐ)
return landmarks
# -- Face Transformation
# src: ์
๋ ฅ ์์, dst: ์ถ๋ ฅ/๊ฒฐ๊ณผ ์์
def warp_img(src, dst, img, std_size):
tform = tf.estimate_transform('similarity', src, dst) # find the transformation matrix # ๋ณํ ํ๋ ฌ ๊ตฌํ๊ธฐ
warped = tf.warp(img, inverse_map=tform.inverse, output_shape=std_size) # wrap the frame image # ์ฃผ์ด์ง ์ขํ ๋ณํ์ ๋ฐ๋ผ ํ๋ ์ ์ด๋ฏธ์ง ์๊ณก
warped = warped * 255 # note output from wrap is double image (value range [0,1])
warped = warped.astype('uint8') # numpy ๋ฐ์ดํฐ ํ์
uint8 ์ผ๋ก ๋ณ๊ฒฝ
return warped, tform
def apply_transform(transform, img, std_size):
warped = tf.warp(img, inverse_map=transform.inverse, output_shape=std_size) # wrap the frame image # ์ฃผ์ด์ง ์ขํ ๋ณํ์ ๋ฐ๋ผ ํ๋ ์ ์ด๋ฏธ์ง ์๊ณก
warped = warped * 255 # note output from wrap is double image (value range [0,1])
warped = warped.astype('uint8') # numpy ๋ฐ์ดํฐ ํ์
uint8 ์ผ๋ก ๋ณ๊ฒฝ
return warped
# -- Crop
def cut_patch(img, landmarks, height, width, threshold=5):
center_x, center_y = np.mean(landmarks, axis=0) # ๊ฐ ๊ทธ๋ฃน์ ๊ฐ์ ์์๋ผ๋ฆฌ ํ๊ท
# ์ขํ ์ฒ๋ฆฌ
if center_y - height < 0:
center_y = height
if center_y - height < 0 - threshold:
raise Exception('too much bias in height')
if center_x - width < 0:
center_x = width
if center_x - width < 0 - threshold:
raise Exception('too much bias in width')
if center_y + height > img.shape[0]:
center_y = img.shape[0] - height
if center_y + height > img.shape[0] + threshold:
raise Exception('too much bias in height')
if center_x + width > img.shape[1]:
center_x = img.shape[1] - width
if center_x + width > img.shape[1] + threshold:
raise Exception('too much bias in width')
# ๋ฐฐ์ด ๋ณต์ฌ
cutted_img = np.copy(img[ int(round(center_y) - round(height)): int(round(center_y) + round(height)),
int(round(center_x) - round(width)): int(round(center_x) + round(width))])
return cutted_img
# -- RGB to GRAY
def convert_bgr2gray(data):
# np.stack(๋ฐฐ์ด_1, ๋ฐฐ์ด_2, axis=0): ์ง์ ํ axis๋ฅผ ์์ ํ ์๋ก์ด axis๋ก ์๊ฐ
return np.stack([cv2.cvtColor(_, cv2.COLOR_BGR2GRAY) for _ in data], axis=0) # gray ๋ณํ
|