Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/temp'
Browse files Browse the repository at this point in the history
# Conflicts:
#	.gitignore
#	README.md
  • Loading branch information
pminhtam committed Oct 18, 2021
2 parents 541f32b + 32e969d commit f39e506
Show file tree
Hide file tree
Showing 95 changed files with 18,618 additions and 389 deletions.
14 changes: 10 additions & 4 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,13 @@ checkpoint/
events.out.tf*
*tfrecords*
*labels

#*.png
#*.jpg
#*.jpeg
*.png
*.jpg
*.jpeg
preprocess_data/test_fft.py
preprocess_data/test_fft2.py
test_*.py
output/*
generated/*
shape_predictor_68_face_landmarks.dat
*.pkl
47 changes: 46 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,55 @@ Extract fame from video and detect face in frame to save *.jpg image.
`--duration` : number of frame skip each extract time

## Train
Preprocess for GAN-fingerprint

`python train.py --train_set data/Celeb-DF/image/train/ --val_set data/Celeb-DF/image/test/ --batch_size 32 --image_size 256 --workers 16 --checkpoint resnext50_celeb_checkpoint/ --gpu_id 0 --resume model_pytorch_1.pt --print_every 10000000 resnext50`
`python data_preparation_gan.py in_dir /hdd/tam/df_in_the_wild/image/train --out_dir /hdd/tam/df_in_the_wild/gan/train resolution 128`

Preprocess for visual model

`python -m feature_model.visual_artifact.process_data --input_real /hdd/tam/df_in_the_wild/image/train/0_real --input_fake /hdd/tam/df_in_the_wild/image/train/1_df --output /hdd/tam/df_in_the_wild/train_visual.pkl --number_iter 1000`

Preprocess for headpose model

`python -m feature_model.headpose_forensic.process_data --input_real /hdd/tam/df_in_the_wild/image/train/0_real --input_fake /hdd/tam/df_in_the_wild/image/train/1_df --output /hdd/tam/df_in_the_wild/train_visual.pkl --number_iter 1000`

Preprocess for spectrum

`python -m feature_model.spectrum.process_data --input_real /hdd/tam/df_in_the_wild/image/train/0_real --input_fake /hdd/tam/df_in_the_wild/image/train/1_df --output /hdd/tam/df_in_the_wild/train_spectrum.pkl --number_iter 1000`


# Train

Train for cnn

`python train.py --train_set data/Celeb-DF/image/train/ --val_set data/Celeb-DF/image/test/ --batch_size 32 --image_size 128 --workers 16 --checkpoint xception_128_df_inthewild_checkpoint/ --gpu_id 0 --resume model_pytorch_1.pt --print_every 10000000 xception_torch`

Train for feature model

`python train.py --train_set /hdd/tam/df_in_the_wild/train_visual.pkl --checkpoint spectrum_128_df_inthewild_checkpoint/ --gpu_id 0 --resume model_pytorch_1.pt spectrum`


# Eval

Eval for cnn

`python eval.py --val_set /hdd/tam/df_in_the_wild/image/test/ --adj_brightness 1.0 --adj_contrast 1.0 --batch_size 32 --image_size 128 --workers 16 --checkpoint efficientdual_128_df_inthewild_checkpoint/ --resume model_dualpytorch3_1.pt efficientdual`

`python eval.py --val_set /hdd/tam/df_in_the_wild/image/test/ --adj_brightness 1.0 --adj_contrast 1.5 --batch_size 32 --image_size 128 --workers 16 --checkpoint capsule_128_df_inthewild_checkpoint/ --resume 4 capsule`

``

Eval for feature model

`python eval.py --val_set ../DeepFakeDetection/Experiments_DeepFakeDetection/test_dfinthewild.pkl --checkpoint ../DeepFakeDetection/Experiments_DeepFakeDetection/model_df_inthewild.pkl --resume model_df_inthewild.pkl spectrum`

# Detect

`python detect_img.py --img_path /hdd/tam/extend_data/image/test/1_df/reference_0_113.jpg --model_path efficientdual_mydata_checkpoint/model_dualpytorch3_1.pt --gpu_id 0 efficientdual`

`python detect_img.py --img_path /hdd/tam/extend_data/image/test/1_df/reference_0_113.jpg --model_path xception_mydata_checkpoint/model_pytorch_0.pt --gpu_id 0 xception_torch`

`python detect_img.py --img_path /hdd/tam/extend_data/image/test/1_df/reference_0_113.jpg --model_path capsule_mydata_checkpoint/capsule_1.pt --gpu_id 0 capsule`

## References
[1] https://github.com/nii-yamagishilab/Capsule-Forensics-v2
Expand Down
24 changes: 24 additions & 0 deletions cnn_test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
python eval.py --val_set /hdd/tam/extend_data/image_split/3dmm/ --image_size 256 --checkpoint xception_mydata_checkpoint/ --resume model_pytorch_2.pt --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 1 xception_torch
python eval.py --val_set /hdd/tam/extend_data/image_split/3dmm/ --image_size 256 --checkpoint capsule_mydata_checkpoint/ --resume 4 --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 16 capsule


python eval.py --val_set /hdd/tam/extend_data/image_split/deepfake/ --image_size 256 --checkpoint xception_mydata_checkpoint/ --resume model_pytorch_2.pt --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 1 xception_torch
python eval.py --val_set /hdd/tam/extend_data/image_split/deepfake/ --image_size 256 --checkpoint capsule_mydata_checkpoint/ --resume 4 --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 16 capsule



python eval.py --val_set /hdd/tam/extend_data/image_split/image_swap2d/ --image_size 256 --checkpoint xception_mydata_checkpoint/ --resume model_pytorch_2.pt --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 1 xception_torch
python eval.py --val_set /hdd/tam/extend_data/image_split/image_swap2d/ --image_size 256 --checkpoint capsule_mydata_checkpoint/ --resume 4 --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 16 capsule

python eval.py --val_set /hdd/tam/extend_data/image_split/image_swap3d/ --image_size 256 --checkpoint xception_mydata_checkpoint/ --resume model_pytorch_2.pt --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 1 xception_torch
python eval.py --val_set /hdd/tam/extend_data/image_split/image_swap3d/ --image_size 256 --checkpoint capsule_mydata_checkpoint/ --resume 4 --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 16 capsule

python eval.py --val_set /hdd/tam/extend_data/image_split/monkey/ --image_size 256 --checkpoint xception_mydata_checkpoint/ --resume model_pytorch_2.pt --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 1 xception_torch
python eval.py --val_set /hdd/tam/extend_data/image_split/monkey/ --image_size 256 --checkpoint capsule_mydata_checkpoint/ --resume 4 --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 16 capsule

python eval.py --val_set /hdd/tam/extend_data/image_split/reenact/ --image_size 256 --checkpoint xception_mydata_checkpoint/ --resume model_pytorch_2.pt --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 1 xception_torch
python eval.py --val_set /hdd/tam/extend_data/image_split/reenact/ --image_size 256 --checkpoint capsule_mydata_checkpoint/ --resume 4 --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 16 capsule

python eval.py --val_set /hdd/tam/extend_data/image_split/stargan/ --image_size 256 --checkpoint xception_mydata_checkpoint/ --resume model_pytorch_2.pt --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 1 xception_torch
python eval.py --val_set /hdd/tam/extend_data/image_split/stargan/ --image_size 256 --checkpoint capsule_mydata_checkpoint/ --resume 4 --adj_brightness=1.0 --adj_contrast=1.0 --gpu_id 0 --batch_size 16 --worker 16 capsule

1 change: 1 addition & 0 deletions cnn_visualization/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
generated/*
Empty file added cnn_visualization/__init__.py
Empty file.
186 changes: 186 additions & 0 deletions cnn_visualization/generate_class_specific_samples.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
"""
Created on Thu Oct 26 14:19:44 2017
@author: Utku Ozbulak - github.com/utkuozbulak
"""
import os
import numpy as np

import torch
from torch.optim import SGD

from cnn_visualization.misc_functions import preprocess_image, recreate_image, save_image
import argparse
import torch.nn as nn


class ClassSpecificImageGeneration():
"""
Produces an image that maximizes a certain class with gradient ascent
"""
def __init__(self, model, target_class,image_size):
self.mean = [-0.485, -0.456, -0.406]
self.std = [1/0.229, 1/0.224, 1/0.225]
self.model = model
self.model.eval()
self.target_class = target_class
self.image_size = image_size
# Generate a random image
self.created_image = np.uint8(np.random.uniform(0, 255, (image_size, image_size, 3)))
# Create the folder to export images if not exists
if not os.path.exists('generated/class_'+str(self.target_class)):
os.makedirs('generated/class_'+str(self.target_class))
print("init xong ... ")
self.device = torch.device("cuda" if torch.cuda.is_available()
else "cpu")
def generate(self, iterations=150):
"""Generates class specific image
Keyword Arguments:
iterations {int} -- Total iterations for gradient ascent (default: {150})
Returns:
np.ndarray -- Final maximally activated class image
"""
print("bat dau generate xong ... ")
initial_learning_rate = 200
for i in range(1, iterations):
print(i)
# Process image and return variable
self.processed_image = preprocess_image(self.created_image, False)

# Define optimizer for the image
optimizer = SGD([self.processed_image], lr=initial_learning_rate)
# Forward
output = self.model(self.processed_image.to(self.device))
# Target specific class
print(output)
class_loss = -output[0, self.target_class]

if i % 1 == 0 or i == iterations-1:
print('Iteration:', str(i), 'Loss',
"{0:.2f}".format(class_loss.cpu().data.numpy()))
# Zero grads
self.model.zero_grad()
# Backward
class_loss.backward()
# Update image
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
print(self.created_image.size)
if i % 1 == 0 or i == iterations-1:
# Save image
initial_learning_rate /=2
im_path = 'generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png'
save_image(self.created_image, im_path)

return self.processed_image

def parse_args():
parser = argparse.ArgumentParser(description="Deepfake detection")
parser.add_argument('--model_path', default="../../../model/xception/model_pytorch_4.pt", help='path to model ')
parser.add_argument('--gpu_id',type=int, default=-1, help='path to model ')
parser.add_argument('--image_size',type=int, default=256, help='path to model ')
parser.add_argument('--iterations',type=int, default=256, help='iterations random number')

subparsers = parser.add_subparsers(dest="model", help='Choose 1 of the model from: capsule,drn,resnext50, resnext ,gan,meso,xception')
## torch
parser_capsule = subparsers.add_parser('capsule', help='Capsule')
parser_drn = subparsers.add_parser('drn', help='DRN ')
parser_local_nn = subparsers.add_parser('local_nn', help='Local NN ')
parser_self_attention = subparsers.add_parser('self_attention', help='Self Attention ')
parser_resnext50 = subparsers.add_parser('resnext50', help='Resnext50 ')
parser_resnext101 = subparsers.add_parser('resnext101', help='Resnext101 ')
parser_myresnext = subparsers.add_parser('myresnext', help='My Resnext ')
parser_mnasnet = subparsers.add_parser('mnasnet', help='mnasnet pytorch ')
parser_xception_torch = subparsers.add_parser('xception_torch', help='Xception pytorch ')
parser_xception2_torch = subparsers.add_parser('xception2_torch', help='Xception2 pytorch ')
parser_dsp_fwa = subparsers.add_parser('dsp_fwa', help='DSP_SWA pytorch ')

parser_xception = subparsers.add_parser('xception', help='Xceptionnet')
parser_efficient = subparsers.add_parser('efficient', help='Efficient Net')
parser_efficient.add_argument("--type",type=str,required=False,default="0",help="Type efficient net 0-8")
parser_efficientdual = subparsers.add_parser('efficientdual', help='Efficient Net')
parser_efft = subparsers.add_parser('efft', help='Efficient Net fft')
parser_efft.add_argument("--type", type=str, required=False, default="0", help="Type efficient net 0-8")

parser_e4dfft = subparsers.add_parser('e4dfft', help='Efficient Net 4d fft')
parser_e4dfft.add_argument("--type", type=str, required=False, default="0", help="Type efficient net 0-8")

return parser.parse_args()
if __name__ == '__main__':
target_class = 0 # Flamingo
# pretrained_model = models.alexnet(pretrained=True)
args = parse_args()
print(args)
model = args.model
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
gpu_id = 0 if int(args.gpu_id) >=0 else -1
image_size = args.image_size
iterations= args.iterations
if model== "capsule":
exit(0)
pass
elif model == "drn" :
from pytorch_model.drn.drn_seg import DRNSub
model = DRNSub(1)
pass
elif model == "local_nn" :
from pytorch_model.local_nn import local_nn
model = local_nn()
elif model == "self_attention":
from pytorch_model.self_attention import self_attention
model = self_attention()
elif model == "resnext50":
from pytorch_model.model_cnn_pytorch import resnext50
model = resnext50(False)
elif model == "resnext101":
from pytorch_model.model_cnn_pytorch import resnext101
model = resnext101(False)
elif model == "myresnext":
from pytorch_model.model_cnn_pytorch import MyResNetX
model = MyResNetX()
elif model == "mnasnet":
from pytorch_model.model_cnn_pytorch import mnasnet
model = mnasnet(False)
elif model == "xception_torch":
from pytorch_model.xception import xception
model = xception(pretrained=False)
elif model == "xception2_torch":
from pytorch_model.xception import xception2
model = xception2(pretrained=False)
elif model == "dsp_fwa":
from pytorch_model.DSP_FWA.models.classifier import SPPNet
model = SPPNet(backbone=50, num_class=1)
elif model == "siamese_torch":
from pytorch_model.siamese import SiameseNetworkResnet
model = SiameseNetworkResnet(length_embed = args.length_embed,pretrained=True)
elif model == "efficient":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b'+args.type,num_classes=1)
model = nn.Sequential(model,nn.Sigmoid())

elif model == "efft":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b' + args.type, num_classes=1,in_channels=1)
model = nn.Sequential(model, nn.Sigmoid())
elif model == "e4dfft":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b' + args.type, num_classes=1,in_channels=4)
model = nn.Sequential(model, nn.Sigmoid())
elif model == "efficientdual":
pass


from pytorch_model.xception import xception

model = xception(pretrained=False)
device = torch.device("cuda" if torch.cuda.is_available()
else "cpu")
model = model.to(device)
model.load_state_dict(torch.load(args.model_path,map_location=torch.device('cpu')))
print("Load xong ... ")
model.eval()
csig = ClassSpecificImageGeneration(model, target_class,image_size)
csig.generate(iterations = iterations)
Loading

0 comments on commit f39e506

Please sign in to comment.