forked from neuralchen/SimSwap
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Script for image face swaping
- Loading branch information
Showing
2 changed files
with
164 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
|
||
import cv2 | ||
import torch | ||
import fractions | ||
import numpy as np | ||
from PIL import Image | ||
import torch.nn.functional as F | ||
from torchvision import transforms | ||
from models.models import create_model | ||
from options.test_options import TestOptions | ||
from insightface_func.face_detect_crop_mutil import Face_detect_crop | ||
from util.reverse2original import reverse2wholeimage | ||
import os | ||
from util.add_watermark import watermark_image | ||
|
||
def lcm(a, b): return abs(a * b) / fractions.gcd(a, b) if a and b else 0 | ||
|
||
transformer_Arcface = transforms.Compose([ | ||
transforms.ToTensor(), | ||
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) | ||
]) | ||
|
||
def _totensor(array): | ||
tensor = torch.from_numpy(array) | ||
img = tensor.transpose(0, 1).transpose(0, 2).contiguous() | ||
return img.float().div(255) | ||
|
||
if __name__ == '__main__': | ||
opt = TestOptions().parse() | ||
|
||
start_epoch, epoch_iter = 1, 0 | ||
crop_size = 224 | ||
|
||
torch.nn.Module.dump_patches = True | ||
logoclass = watermark_image('./simswaplogo/simswaplogo.png') | ||
model = create_model(opt) | ||
model.eval() | ||
|
||
|
||
app = Face_detect_crop(name='antelope', root='./insightface_func/models') | ||
app.prepare(ctx_id= 0, det_thresh=0.8, det_size=(640,640)) | ||
|
||
pic_a = opt.pic_a_path | ||
|
||
img_a_whole = cv2.imread(pic_a) | ||
img_a_align_crop, _ = app.get(img_a_whole,crop_size) | ||
img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) | ||
img_a = transformer_Arcface(img_a_align_crop_pil) | ||
img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2]) | ||
|
||
# convert numpy to tensor | ||
img_id = img_id.cuda() | ||
|
||
#create latent id | ||
img_id_downsample = F.interpolate(img_id, scale_factor=0.5) | ||
latend_id = model.netArc(img_id_downsample) | ||
latend_id = latend_id.detach().to('cpu') | ||
latend_id = latend_id/np.linalg.norm(latend_id,axis=1,keepdims=True) | ||
latend_id = latend_id.to('cuda') | ||
|
||
|
||
############## Forward Pass ###################### | ||
|
||
pic_b = opt.pic_b_path | ||
img_b_whole = cv2.imread(pic_b) | ||
img_b_align_crop, b_mat = app.get(img_b_whole,crop_size) | ||
|
||
img_b_align_crop_list, b_mat_list = app.get(img_b_whole,crop_size) | ||
# detect_results = None | ||
swap_result_list = [] | ||
|
||
for b_align_crop in img_b_align_crop_list: | ||
|
||
b_align_crop_tenor = _totensor(cv2.cvtColor(b_align_crop,cv2.COLOR_BGR2RGB))[None,...].cuda() | ||
|
||
swap_result = model(None, b_align_crop_tenor, latend_id, None, True)[0] | ||
swap_result_list.append(swap_result) | ||
|
||
reverse2wholeimage(swap_result_list, b_mat_list, crop_size, img_b_whole, logoclass, os.path.join(opt.output_path, 'result_whole_swapmutil.jpg')) | ||
print(' ') | ||
|
||
print('************ Done ! ************') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
|
||
import cv2 | ||
import torch | ||
import fractions | ||
import numpy as np | ||
from PIL import Image | ||
import torch.nn.functional as F | ||
from torchvision import transforms | ||
from models.models import create_model | ||
from options.test_options import TestOptions | ||
from insightface_func.face_detect_crop_single import Face_detect_crop | ||
from util.reverse2original import reverse2wholeimage | ||
import os | ||
from util.add_watermark import watermark_image | ||
|
||
def lcm(a, b): return abs(a * b) / fractions.gcd(a, b) if a and b else 0 | ||
|
||
transformer_Arcface = transforms.Compose([ | ||
transforms.ToTensor(), | ||
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) | ||
]) | ||
|
||
def _totensor(array): | ||
tensor = torch.from_numpy(array) | ||
img = tensor.transpose(0, 1).transpose(0, 2).contiguous() | ||
return img.float().div(255) | ||
if __name__ == '__main__': | ||
opt = TestOptions().parse() | ||
|
||
start_epoch, epoch_iter = 1, 0 | ||
crop_size = 224 | ||
|
||
torch.nn.Module.dump_patches = True | ||
logoclass = watermark_image('./simswaplogo/simswaplogo.png') | ||
model = create_model(opt) | ||
model.eval() | ||
|
||
|
||
app = Face_detect_crop(name='antelope', root='./insightface_func/models') | ||
app.prepare(ctx_id= 0, det_thresh=0.8, det_size=(640,640)) | ||
|
||
pic_a = opt.pic_a_path | ||
|
||
img_a_whole = cv2.imread(pic_a) | ||
img_a_align_crop, _ = app.get(img_a_whole,crop_size) | ||
img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) | ||
img_a = transformer_Arcface(img_a_align_crop_pil) | ||
img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2]) | ||
|
||
# convert numpy to tensor | ||
img_id = img_id.cuda() | ||
|
||
#create latent id | ||
img_id_downsample = F.interpolate(img_id, scale_factor=0.5) | ||
latend_id = model.netArc(img_id_downsample) | ||
latend_id = latend_id.detach().to('cpu') | ||
latend_id = latend_id/np.linalg.norm(latend_id,axis=1,keepdims=True) | ||
latend_id = latend_id.to('cuda') | ||
|
||
|
||
############## Forward Pass ###################### | ||
|
||
pic_b = opt.pic_b_path | ||
img_b_whole = cv2.imread(pic_b) | ||
img_b_align_crop, b_mat = app.get(img_b_whole,crop_size) | ||
|
||
img_b_align_crop_list, b_mat_list = app.get(img_b_whole,crop_size) | ||
# detect_results = None | ||
swap_result_list = [] | ||
|
||
for b_align_crop in img_b_align_crop_list: | ||
|
||
b_align_crop_tenor = _totensor(cv2.cvtColor(b_align_crop,cv2.COLOR_BGR2RGB))[None,...].cuda() | ||
|
||
swap_result = model(None, b_align_crop_tenor, latend_id, None, True)[0] | ||
swap_result_list.append(swap_result) | ||
|
||
reverse2wholeimage(swap_result_list, b_mat_list, crop_size, img_b_whole, logoclass, os.path.join(opt.output_path, 'result_whole_swapsingle.jpg')) | ||
|
||
print(' ') | ||
|
||
print('************ Done ! ************') |