add option to bypass face detection step

pull/50/head
Shawn-Shan 2020-08-01 00:30:40 -05:00
rodzic 641e020e09
commit 8ceeaf54b0
2 zmienionych plików z 33 dodań i 18 usunięć

Wyświetl plik

@ -84,7 +84,7 @@ class Fawkes(object):
return th, max_step, lr return th, max_step, lr
def run_protection(self, image_paths, mode='min', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png', def run_protection(self, image_paths, mode='min', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png',
separate_target=True, debug=False): separate_target=True, debug=False, no_align=False):
if mode == 'custom': if mode == 'custom':
pass pass
else: else:
@ -100,9 +100,9 @@ class Fawkes(object):
return 3 return 3
with graph.as_default(): with graph.as_default():
faces = Faces(image_paths, loaded_images, self.aligner, verbose=1) faces = Faces(image_paths, loaded_images, self.aligner, verbose=1, no_align=no_align)
original_images = faces.cropped_faces original_images = faces.cropped_faces
if len(original_images) == 0: if len(original_images) == 0:
print("No face detected. ") print("No face detected. ")
return 2 return 2
@ -184,12 +184,14 @@ def main(*argv):
parser.add_argument('--max-step', help='only relevant with mode=custom, number of steps for optimization', type=int, parser.add_argument('--max-step', help='only relevant with mode=custom, number of steps for optimization', type=int,
default=1000) default=1000)
parser.add_argument('--sd', type=int, help='only relevant with mode=custom, penalty number, read more in the paper', parser.add_argument('--sd', type=int, help='only relevant with mode=custom, penalty number, read more in the paper',
default=1e6) default=1e9)
parser.add_argument('--lr', type=float, help='only relevant with mode=custom, learning rate', default=2) parser.add_argument('--lr', type=float, help='only relevant with mode=custom, learning rate', default=2)
parser.add_argument('--batch-size', help="number of images to run optimization together", type=int, default=1) parser.add_argument('--batch-size', help="number of images to run optimization together", type=int, default=1)
parser.add_argument('--separate_target', help="whether select separate targets for each faces in the directory", parser.add_argument('--separate_target', help="whether select separate targets for each faces in the directory",
action='store_true') action='store_true')
parser.add_argument('--no-align', help="whether to detect and crop faces",
action='store_true')
parser.add_argument('--debug', help="turn on debug and copy/paste the stdout when reporting an issue on github", parser.add_argument('--debug', help="turn on debug and copy/paste the stdout when reporting an issue on github",
action='store_true') action='store_true')
parser.add_argument('--format', type=str, parser.add_argument('--format', type=str,
@ -209,7 +211,7 @@ def main(*argv):
protector.run_protection(image_paths, mode=args.mode, th=args.th, sd=args.sd, lr=args.lr, protector.run_protection(image_paths, mode=args.mode, th=args.th, sd=args.sd, lr=args.lr,
max_step=args.max_step, max_step=args.max_step,
batch_size=args.batch_size, format=args.format, batch_size=args.batch_size, format=args.format,
separate_target=args.separate_target, debug=args.debug) separate_target=args.separate_target, debug=args.debug, no_align=args.no_align)
if __name__ == '__main__': if __name__ == '__main__':

Wyświetl plik

@ -120,9 +120,11 @@ def filter_image_paths(image_paths):
class Faces(object): class Faces(object):
def __init__(self, image_paths, loaded_images, aligner, verbose=1, eval_local=False, preprocessing=True): def __init__(self, image_paths, loaded_images, aligner, verbose=1, eval_local=False, preprocessing=True,
no_align=False):
self.image_paths = image_paths self.image_paths = image_paths
self.verbose = verbose self.verbose = verbose
self.no_align = no_align
self.aligner = aligner self.aligner = aligner
self.org_faces = [] self.org_faces = []
self.cropped_faces = [] self.cropped_faces = []
@ -132,25 +134,27 @@ class Faces(object):
for i in range(0, len(loaded_images)): for i in range(0, len(loaded_images)):
cur_img = loaded_images[i] cur_img = loaded_images[i]
p = image_paths[i] p = image_paths[i]
self.org_faces.append(cur_img) self.org_faces.append(cur_img)
if eval_local: if eval_local:
margin = 0 margin = 0
else: else:
margin = 0.7 margin = 0.7
align_img = align(cur_img, self.aligner, margin=margin)
if align_img is None: if not no_align:
print("Find 0 face(s)".format(p.split("/")[-1])) align_img = align(cur_img, self.aligner, margin=margin)
continue if align_img is None:
print("Find 0 face(s)".format(p.split("/")[-1]))
continue
cur_faces = align_img[0] cur_faces = align_img[0]
else:
cur_faces = [cur_img]
cur_shapes = [f.shape[:-1] for f in cur_faces] cur_shapes = [f.shape[:-1] for f in cur_faces]
cur_faces_square = [] cur_faces_square = []
if verbose: if verbose and not no_align:
print("Find {} face(s) in {}".format(len(cur_faces), p.split("/")[-1])) print("Find {} face(s) in {}".format(len(cur_faces), p.split("/")[-1]))
if eval_local: if eval_local:
cur_faces = cur_faces[:1] cur_faces = cur_faces[:1]
@ -161,15 +165,21 @@ class Faces(object):
else: else:
long_size = max([img.shape[1], img.shape[0]]) long_size = max([img.shape[1], img.shape[0]])
base = np.zeros((long_size, long_size, 3)) base = np.zeros((long_size, long_size, 3))
# import pdb
# pdb.set_trace()
base[0:img.shape[0], 0:img.shape[1], :] = img base[0:img.shape[0], 0:img.shape[1], :] = img
cur_faces_square.append(base) cur_faces_square.append(base)
cur_index = align_img[1]
cur_faces_square = [resize(f, (224, 224)) for f in cur_faces_square]
self.cropped_faces_shape.extend(cur_shapes) cur_faces_square = [resize(f, (224, 224)) for f in cur_faces_square]
self.cropped_faces.extend(cur_faces_square) self.cropped_faces.extend(cur_faces_square)
self.cropped_index.extend(cur_index)
self.callback_idx.extend([i] * len(cur_faces_square)) if not self.no_align:
cur_index = align_img[1]
self.cropped_faces_shape.extend(cur_shapes)
self.cropped_index.extend(cur_index)
self.callback_idx.extend([i] * len(cur_faces_square))
if len(self.cropped_faces) == 0: if len(self.cropped_faces) == 0:
return return
@ -186,6 +196,8 @@ class Faces(object):
return self.cropped_faces return self.cropped_faces
def merge_faces(self, protected_images, original_images): def merge_faces(self, protected_images, original_images):
if self.no_align:
return np.clip(protected_images, 0.0, 255.0)
self.cloaked_faces = np.copy(self.org_faces) self.cloaked_faces = np.copy(self.org_faces)
@ -520,6 +532,7 @@ def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, m
https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/keras/utils/data_utils.py#L168-L297 https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/keras/utils/data_utils.py#L168-L297
""" """
def get_file(fname, def get_file(fname,
origin, origin,
untar=False, untar=False,