|
| 1 | +""" |
| 2 | +brief: face alignment with FFHQ method (https://github.com/NVlabs/ffhq-dataset) |
| 3 | +author: lzhbrian (https://lzhbrian.me) |
| 4 | +date: 2020.1.5 |
| 5 | +note: code is heavily borrowed from |
| 6 | + https://github.com/NVlabs/ffhq-dataset |
| 7 | + http://dlib.net/face_landmark_detection.py.html |
| 8 | +
|
| 9 | +requirements: |
| 10 | + apt install cmake |
| 11 | + conda install Pillow numpy scipy |
| 12 | + pip install dlib |
| 13 | + # download face landmark model from: |
| 14 | + # http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 |
| 15 | +""" |
| 16 | + |
| 17 | +import numpy as np |
| 18 | +import PIL |
| 19 | +import PIL.Image |
| 20 | +import sys |
| 21 | +import os |
| 22 | +import glob |
| 23 | +import scipy |
| 24 | +import scipy.ndimage |
| 25 | +import dlib |
| 26 | +from drive import open_url |
| 27 | +from pathlib import Path |
| 28 | +import argparse |
| 29 | +from bicubic import BicubicDownSample |
| 30 | +import torchvision |
| 31 | + |
| 32 | +def get_landmark(filepath): |
| 33 | + """get landmark with dlib |
| 34 | + :return: np.array shape=(68, 2) |
| 35 | + """ |
| 36 | + detector = dlib.get_frontal_face_detector() |
| 37 | + |
| 38 | + img = dlib.load_rgb_image(filepath) |
| 39 | + dets = detector(img, 1) |
| 40 | + filepath = Path(filepath) |
| 41 | + print(f"{filepath.name}: Number of faces detected: {len(dets)}") |
| 42 | + shapes = [predictor(img, d) for k,d in enumerate(dets)] |
| 43 | + |
| 44 | + lms = [np.array([[tt.x,tt.y] for tt in shape.parts()]) for shape in shapes] |
| 45 | + |
| 46 | + return lms |
| 47 | + |
| 48 | + |
| 49 | +def align_face(filepath): |
| 50 | + """ |
| 51 | + :param filepath: str |
| 52 | + :return: PIL Image |
| 53 | + """ |
| 54 | + |
| 55 | + lms = get_landmark(filepath) |
| 56 | + imgs=[] |
| 57 | + for lm in lms: |
| 58 | + lm_chin = lm[0 : 17] # left-right |
| 59 | + lm_eyebrow_left = lm[17 : 22] # left-right |
| 60 | + lm_eyebrow_right = lm[22 : 27] # left-right |
| 61 | + lm_nose = lm[27 : 31] # top-down |
| 62 | + lm_nostrils = lm[31 : 36] # top-down |
| 63 | + lm_eye_left = lm[36 : 42] # left-clockwise |
| 64 | + lm_eye_right = lm[42 : 48] # left-clockwise |
| 65 | + lm_mouth_outer = lm[48 : 60] # left-clockwise |
| 66 | + lm_mouth_inner = lm[60 : 68] # left-clockwise |
| 67 | + |
| 68 | + # Calculate auxiliary vectors. |
| 69 | + eye_left = np.mean(lm_eye_left, axis=0) |
| 70 | + eye_right = np.mean(lm_eye_right, axis=0) |
| 71 | + eye_avg = (eye_left + eye_right) * 0.5 |
| 72 | + eye_to_eye = eye_right - eye_left |
| 73 | + mouth_left = lm_mouth_outer[0] |
| 74 | + mouth_right = lm_mouth_outer[6] |
| 75 | + mouth_avg = (mouth_left + mouth_right) * 0.5 |
| 76 | + eye_to_mouth = mouth_avg - eye_avg |
| 77 | + |
| 78 | + # Choose oriented crop rectangle. |
| 79 | + x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] |
| 80 | + x /= np.hypot(*x) |
| 81 | + x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) |
| 82 | + y = np.flipud(x) * [-1, 1] |
| 83 | + c = eye_avg + eye_to_mouth * 0.1 |
| 84 | + quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) |
| 85 | + qsize = np.hypot(*x) * 2 |
| 86 | + |
| 87 | + |
| 88 | + # read image |
| 89 | + img = PIL.Image.open(filepath) |
| 90 | + |
| 91 | + output_size=1024 |
| 92 | + transform_size=4096 |
| 93 | + enable_padding=True |
| 94 | + |
| 95 | + # Shrink. |
| 96 | + shrink = int(np.floor(qsize / output_size * 0.5)) |
| 97 | + if shrink > 1: |
| 98 | + rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink))) |
| 99 | + img = img.resize(rsize, PIL.Image.ANTIALIAS) |
| 100 | + quad /= shrink |
| 101 | + qsize /= shrink |
| 102 | + |
| 103 | + # Crop. |
| 104 | + border = max(int(np.rint(qsize * 0.1)), 3) |
| 105 | + crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1])))) |
| 106 | + crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1])) |
| 107 | + if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: |
| 108 | + img = img.crop(crop) |
| 109 | + quad -= crop[0:2] |
| 110 | + |
| 111 | + # Pad. |
| 112 | + pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1])))) |
| 113 | + pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0)) |
| 114 | + if enable_padding and max(pad) > border - 4: |
| 115 | + pad = np.maximum(pad, int(np.rint(qsize * 0.3))) |
| 116 | + img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') |
| 117 | + h, w, _ = img.shape |
| 118 | + y, x, _ = np.ogrid[:h, :w, :1] |
| 119 | + mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3])) |
| 120 | + blur = qsize * 0.02 |
| 121 | + img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) |
| 122 | + img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0) |
| 123 | + img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB') |
| 124 | + quad += pad[:2] |
| 125 | + |
| 126 | + # Transform. |
| 127 | + img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR) |
| 128 | + if output_size < transform_size: |
| 129 | + img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS) |
| 130 | + |
| 131 | + # Save aligned image. |
| 132 | + imgs.append(img) |
| 133 | + return imgs |
| 134 | + |
| 135 | +parser = argparse.ArgumentParser(description='PULSE') |
| 136 | + |
| 137 | +parser.add_argument('-input_dir', type=str, default='realpics', help='directory with unprocessed images') |
| 138 | +parser.add_argument('-output_dir', type=str, default='input', help='output directory') |
| 139 | +parser.add_argument('-output_size', type=int, default=32, help='size to downscale the input images to, must be power of 2') |
| 140 | +parser.add_argument('-seed', type=int, help='manual seed to use') |
| 141 | +parser.add_argument('-cache_dir', type=str, default='cache', help='cache directory for model weights') |
| 142 | + |
| 143 | +args = parser.parse_args() |
| 144 | + |
| 145 | +cache_dir = Path(args.cache_dir) |
| 146 | +cache_dir.mkdir(parents=True, exist_ok=True) |
| 147 | + |
| 148 | +output_dir = Path(args.output_dir) |
| 149 | +output_dir.mkdir(parents=True,exist_ok=True) |
| 150 | + |
| 151 | +print("Downloading Shape Predictor") |
| 152 | +f=open_url("https://drive.google.com/uc?id=1huhv8PYpNNKbGCLOaYUjOgR1pY5pmbJx", cache_dir=cache_dir, return_path=True) |
| 153 | +predictor = dlib.shape_predictor(f) |
| 154 | + |
| 155 | +for im in Path(args.input_dir).glob("*.*"): |
| 156 | + faces = align_face(str(im)) |
| 157 | + |
| 158 | + for i,face in enumerate(faces): |
| 159 | + if(args.output_size): |
| 160 | + factor = 1024//args.output_size |
| 161 | + assert args.output_size*factor == 1024 |
| 162 | + D = BicubicDownSample(factor=factor) |
| 163 | + face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(0).cuda() |
| 164 | + face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1) |
| 165 | + face = torchvision.transforms.ToPILImage()(face_tensor_lr) |
| 166 | + |
| 167 | + face.save(Path(args.output_dir) / (im.stem+f"_{i}.png")) |
0 commit comments