Skip to content

Commit

Permalink
Support Simswap 512
Browse files Browse the repository at this point in the history
Support Simswap 512
  • Loading branch information
NNNNAI committed Nov 24, 2021
1 parent 0f2e4fd commit 589e31a
Show file tree
Hide file tree
Showing 22 changed files with 549 additions and 54 deletions.
2 changes: 1 addition & 1 deletion MultiSpecific.ipynb

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions SimSwap colab.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -398,7 +398,7 @@
"opt.isTrain = False\n",
"opt.use_mask = True ## new feature up-to-date\n",
"\n",
"crop_size = 224\n",
"crop_size = opt.crop_size\n",
"\n",
"torch.nn.Module.dump_patches = True\n",
"model = create_model(opt)\n",
Expand All @@ -420,7 +420,7 @@
" img_id = img_id.cuda()\n",
"\n",
" #create latent id\n",
" img_id_downsample = F.interpolate(img_id, scale_factor=0.5)\n",
" img_id_downsample = F.interpolate(img_id, size=(112,112))\n",
" latend_id = model.netArc(img_id_downsample)\n",
" latend_id = latend_id.detach().to('cpu')\n",
" latend_id = latend_id/np.linalg.norm(latend_id,axis=1,keepdims=True)\n",
Expand Down
15 changes: 12 additions & 3 deletions insightface_func/face_detect_crop_multi.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
'''
Author: Naiyuan liu
Github: https://github.com/NNNNAI
Date: 2021-11-23 17:03:58
LastEditors: Naiyuan liu
LastEditTime: 2021-11-24 16:45:41
Description:
'''
from __future__ import division
import collections
import numpy as np
Expand All @@ -6,7 +14,7 @@
import os.path as osp
import cv2
from insightface.model_zoo import model_zoo
from insightface.utils import face_align
from insightface_func.utils import face_align_ffhqandnewarc as face_align

__all__ = ['Face_detect_crop', 'Face']

Expand Down Expand Up @@ -40,8 +48,9 @@ def __init__(self, name, root='~/.insightface_func/models'):
self.det_model = self.models['detection']


def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640)):
def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640), mode ='None'):
self.det_thresh = det_thresh
self.mode = mode
assert det_size is not None
print('set det-size:', det_size)
self.det_size = det_size
Expand Down Expand Up @@ -73,7 +82,7 @@ def get(self, img, crop_size, max_num=0):
kps = None
if kpss is not None:
kps = kpss[i]
M, _ = face_align.estimate_norm(kps, crop_size, mode ='None')
M, _ = face_align.estimate_norm(kps, crop_size, mode = self.mode)
align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)
align_img_list.append(align_img)
M_list.append(M)
Expand Down
15 changes: 12 additions & 3 deletions insightface_func/face_detect_crop_single.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
'''
Author: Naiyuan liu
Github: https://github.com/NNNNAI
Date: 2021-11-23 17:03:58
LastEditors: Naiyuan liu
LastEditTime: 2021-11-24 16:46:04
Description:
'''
from __future__ import division
import collections
import numpy as np
Expand All @@ -6,7 +14,7 @@
import os.path as osp
import cv2
from insightface.model_zoo import model_zoo
from insightface.utils import face_align
from insightface_func.utils import face_align_ffhqandnewarc as face_align

__all__ = ['Face_detect_crop', 'Face']

Expand Down Expand Up @@ -40,8 +48,9 @@ def __init__(self, name, root='~/.insightface_func/models'):
self.det_model = self.models['detection']


def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640)):
def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640), mode ='None'):
self.det_thresh = det_thresh
self.mode = mode
assert det_size is not None
print('set det-size:', det_size)
self.det_size = det_size
Expand Down Expand Up @@ -82,7 +91,7 @@ def get(self, img, crop_size, max_num=0):
kps = None
if kpss is not None:
kps = kpss[best_index]
M, _ = face_align.estimate_norm(kps, crop_size, mode ='None')
M, _ = face_align.estimate_norm(kps, crop_size, mode = self.mode)
align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)

return [align_img], [M]
159 changes: 159 additions & 0 deletions insightface_func/utils/face_align_ffhqandnewarc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
'''
Author: Naiyuan liu
Github: https://github.com/NNNNAI
Date: 2021-11-15 19:42:42
LastEditors: Naiyuan liu
LastEditTime: 2021-11-15 20:01:47
Description:
'''

import cv2
import numpy as np
from skimage import transform as trans

src1 = np.array([[51.642, 50.115], [57.617, 49.990], [35.740, 69.007],
[51.157, 89.050], [57.025, 89.702]],
dtype=np.float32)
#<--left
src2 = np.array([[45.031, 50.118], [65.568, 50.872], [39.677, 68.111],
[45.177, 86.190], [64.246, 86.758]],
dtype=np.float32)

#---frontal
src3 = np.array([[39.730, 51.138], [72.270, 51.138], [56.000, 68.493],
[42.463, 87.010], [69.537, 87.010]],
dtype=np.float32)

#-->right
src4 = np.array([[46.845, 50.872], [67.382, 50.118], [72.737, 68.111],
[48.167, 86.758], [67.236, 86.190]],
dtype=np.float32)

#-->right profile
src5 = np.array([[54.796, 49.990], [60.771, 50.115], [76.673, 69.007],
[55.388, 89.702], [61.257, 89.050]],
dtype=np.float32)

src = np.array([src1, src2, src3, src4, src5])
src_map = src

ffhq_src = np.array([[192.98138, 239.94708], [318.90277, 240.1936], [256.63416, 314.01935],
[201.26117, 371.41043], [313.08905, 371.15118]])
ffhq_src = np.expand_dims(ffhq_src, axis=0)

# arcface_src = np.array(
# [[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366],
# [41.5493, 92.3655], [70.7299, 92.2041]],
# dtype=np.float32)

# arcface_src = np.expand_dims(arcface_src, axis=0)

# In[66]:


# lmk is prediction; src is template
def estimate_norm(lmk, image_size=112, mode='ffhq'):
assert lmk.shape == (5, 2)
tform = trans.SimilarityTransform()
lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1)
min_M = []
min_index = []
min_error = float('inf')
if mode == 'ffhq':
# assert image_size == 112
src = ffhq_src * image_size / 512
else:
src = src_map * image_size / 112
for i in np.arange(src.shape[0]):
tform.estimate(lmk, src[i])
M = tform.params[0:2, :]
results = np.dot(M, lmk_tran.T)
results = results.T
error = np.sum(np.sqrt(np.sum((results - src[i])**2, axis=1)))
# print(error)
if error < min_error:
min_error = error
min_M = M
min_index = i
return min_M, min_index


def norm_crop(img, landmark, image_size=112, mode='ffhq'):
if mode == 'Both':
M_None, _ = estimate_norm(landmark, image_size, mode = 'newarc')
M_ffhq, _ = estimate_norm(landmark, image_size, mode='ffhq')
warped_None = cv2.warpAffine(img, M_None, (image_size, image_size), borderValue=0.0)
warped_ffhq = cv2.warpAffine(img, M_ffhq, (image_size, image_size), borderValue=0.0)
return warped_ffhq, warped_None
else:
M, pose_index = estimate_norm(landmark, image_size, mode)
warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)
return warped

def square_crop(im, S):
if im.shape[0] > im.shape[1]:
height = S
width = int(float(im.shape[1]) / im.shape[0] * S)
scale = float(S) / im.shape[0]
else:
width = S
height = int(float(im.shape[0]) / im.shape[1] * S)
scale = float(S) / im.shape[1]
resized_im = cv2.resize(im, (width, height))
det_im = np.zeros((S, S, 3), dtype=np.uint8)
det_im[:resized_im.shape[0], :resized_im.shape[1], :] = resized_im
return det_im, scale


def transform(data, center, output_size, scale, rotation):
scale_ratio = scale
rot = float(rotation) * np.pi / 180.0
#translation = (output_size/2-center[0]*scale_ratio, output_size/2-center[1]*scale_ratio)
t1 = trans.SimilarityTransform(scale=scale_ratio)
cx = center[0] * scale_ratio
cy = center[1] * scale_ratio
t2 = trans.SimilarityTransform(translation=(-1 * cx, -1 * cy))
t3 = trans.SimilarityTransform(rotation=rot)
t4 = trans.SimilarityTransform(translation=(output_size / 2,
output_size / 2))
t = t1 + t2 + t3 + t4
M = t.params[0:2]
cropped = cv2.warpAffine(data,
M, (output_size, output_size),
borderValue=0.0)
return cropped, M


def trans_points2d(pts, M):
new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
for i in range(pts.shape[0]):
pt = pts[i]
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32)
new_pt = np.dot(M, new_pt)
#print('new_pt', new_pt.shape, new_pt)
new_pts[i] = new_pt[0:2]

return new_pts


def trans_points3d(pts, M):
scale = np.sqrt(M[0][0] * M[0][0] + M[0][1] * M[0][1])
#print(scale)
new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
for i in range(pts.shape[0]):
pt = pts[i]
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32)
new_pt = np.dot(M, new_pt)
#print('new_pt', new_pt.shape, new_pt)
new_pts[i][0:2] = new_pt[0:2]
new_pts[i][2] = pts[i][2] * scale

return new_pts


def trans_points(pts, M):
if pts.shape[1] == 2:
return trans_points2d(pts, M)
else:
return trans_points3d(pts, M)

9 changes: 6 additions & 3 deletions models/fs_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,8 @@
import torch.nn.functional as F
import os
from torch.autograd import Variable
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
from .fs_networks import Generator_Adain_Upsample, Discriminator

class SpecificNorm(nn.Module):
def __init__(self, epsilon=1e-8):
Expand Down Expand Up @@ -52,6 +50,11 @@ def initialize(self, opt):

device = torch.device("cuda:0")

if opt.crop_size == 224:
from .fs_networks import Generator_Adain_Upsample, Discriminator
elif opt.crop_size == 512:
from .fs_networks_512 import Generator_Adain_Upsample, Discriminator

# Generator network
self.netG = Generator_Adain_Upsample(input_nc=3, output_nc=3, latent_size=512, n_blocks=9, deep=False)
self.netG.to(device)
Expand Down Expand Up @@ -197,7 +200,7 @@ def forward(self, img_id, img_att, latent_id, latent_att, for_G=False):


#G_ID
img_fake_down = F.interpolate(img_fake, scale_factor=0.5)
img_fake_down = F.interpolate(img_fake, size=(112,112))
img_fake_down = self.spNorm(img_fake_down)
latent_fake = self.netArc(img_fake_down)
loss_G_ID = (1 - self.cosin_metric(latent_fake, latent_id))
Expand Down
Loading

0 comments on commit 589e31a

Please sign in to comment.