replace skimage resize with image resize

pull/50/head
Shawn-Shan 2020-07-26 14:37:55 -05:00
rodzic 2b2d054118
commit c52b38e152
3 zmienionych plików z 52 dodań i 28 usunięć

Wyświetl plik

@ -407,6 +407,8 @@ class FawkesMaskGeneration:
if iteration != 0 and iteration % (self.MAX_ITERATIONS // 3) == 0: if iteration != 0 and iteration % (self.MAX_ITERATIONS // 3) == 0:
LR = LR * 0.8 LR = LR * 0.8
if self.verbose:
print("Learning rate: ", LR)
if iteration % (self.MAX_ITERATIONS // 5) == 0: if iteration % (self.MAX_ITERATIONS // 5) == 0:
if self.verbose == 1: if self.verbose == 1:

Wyświetl plik

@ -26,16 +26,6 @@ def generate_cloak_images(protector, image_X, target_emb=None):
return cloaked_image_X return cloaked_image_X
def check_imgs(imgs):
if np.max(imgs) <= 1 and np.min(imgs) >= 0:
imgs = imgs * 255.0
elif np.max(imgs) <= 255 and np.min(imgs) >= 0:
pass
else:
raise Exception("Image values ")
return imgs
class Fawkes(object): class Fawkes(object):
def __init__(self, feature_extractor, gpu, batch_size): def __init__(self, feature_extractor, gpu, batch_size):
@ -66,7 +56,7 @@ class Fawkes(object):
def mode2param(self, mode): def mode2param(self, mode):
if mode == 'low': if mode == 'low':
th = 0.003 th = 0.003
max_step = 50 max_step = 40
lr = 20 lr = 20
elif mode == 'mid': elif mode == 'mid':
th = 0.005 th = 0.005
@ -89,7 +79,6 @@ class Fawkes(object):
def run_protection(self, image_paths, mode='low', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png', def run_protection(self, image_paths, mode='low', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png',
separate_target=True, debug=False): separate_target=True, debug=False):
if mode == 'custom': if mode == 'custom':
pass pass
else: else:
@ -101,11 +90,16 @@ class Fawkes(object):
image_paths, loaded_images = filter_image_paths(image_paths) image_paths, loaded_images = filter_image_paths(image_paths)
if not image_paths: if not image_paths:
raise Exception("No images in the directory") print("No images in the directory")
return 3
with graph.as_default(): with graph.as_default():
faces = Faces(image_paths, loaded_images, self.aligner, verbose=1) faces = Faces(image_paths, loaded_images, self.aligner, verbose=1)
original_images = faces.cropped_faces original_images = faces.cropped_faces
if len(original_images) == 0:
print("No face detected. ")
return 2
original_images = np.array(original_images) original_images = np.array(original_images)
with sess.as_default(): with sess.as_default():
@ -143,16 +137,19 @@ class Fawkes(object):
faces.cloaked_cropped_faces = protected_images faces.cloaked_cropped_faces = protected_images
cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked( # cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked(
original_images) # original_images)
final_images = faces.merge_faces(cloak_perturbation) # final_images = faces.merge_faces(cloak_perturbation)
final_images = faces.merge_faces(reverse_process_cloaked(protected_images),
reverse_process_cloaked(original_images))
for p_img, path in zip(final_images, image_paths): for p_img, path in zip(final_images, image_paths):
file_name = "{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), mode, format) file_name = "{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), mode, format)
dump_image(p_img, file_name, format=format) dump_image(p_img, file_name, format=format)
print("Done!") print("Done!")
return None return 1
def main(*argv): def main(*argv):
@ -201,9 +198,17 @@ def main(*argv):
image_paths = [path for path in image_paths if "_cloaked" not in path.split("/")[-1]] image_paths = [path for path in image_paths if "_cloaked" not in path.split("/")[-1]]
protector = Fawkes(args.feature_extractor, args.gpu, args.batch_size) protector = Fawkes(args.feature_extractor, args.gpu, args.batch_size)
protector.run_protection(image_paths, mode=args.mode, th=args.th, sd=args.sd, lr=args.lr, max_step=args.max_step, if args.mode != 'all':
batch_size=args.batch_size, format=args.format, protector.run_protection(image_paths, mode=args.mode, th=args.th, sd=args.sd, lr=args.lr,
separate_target=args.separate_target, debug=args.debug) max_step=args.max_step,
batch_size=args.batch_size, format=args.format,
separate_target=args.separate_target, debug=args.debug)
else:
for m in ['low', 'mid', 'high']:
protector.run_protection(image_paths, mode=m, th=args.th, sd=args.sd, lr=args.lr,
max_step=args.max_step,
batch_size=args.batch_size, format=args.format,
separate_target=args.separate_target, debug=args.debug)
if __name__ == '__main__': if __name__ == '__main__':

Wyświetl plik

@ -25,7 +25,7 @@ from PIL import Image, ExifTags
from keras.layers import Dense, Activation, Dropout from keras.layers import Dense, Activation, Dropout
from keras.models import Model from keras.models import Model
from keras.preprocessing import image from keras.preprocessing import image
from skimage.transform import resize # from skimage.transform import resize
from fawkes.align_face import align from fawkes.align_face import align
from six.moves.urllib.request import urlopen from six.moves.urllib.request import urlopen
@ -140,7 +140,6 @@ class Faces(object):
cur_faces_square = [] cur_faces_square = []
if verbose: if verbose:
print("Find {} face(s) in {}".format(len(cur_faces), p.split("/")[-1])) print("Find {} face(s) in {}".format(len(cur_faces), p.split("/")[-1]))
if eval_local: if eval_local:
cur_faces = cur_faces[:1] cur_faces = cur_faces[:1]
@ -152,16 +151,16 @@ class Faces(object):
base = np.zeros((long_size, long_size, 3)) base = np.zeros((long_size, long_size, 3))
base[0:img.shape[0], 0:img.shape[1], :] = img base[0:img.shape[0], 0:img.shape[1], :] = img
cur_faces_square.append(base) cur_faces_square.append(base)
cur_index = align_img[1] cur_index = align_img[1]
cur_faces_square = [resize(f, (224, 224)) for f in cur_faces_square] cur_faces_square = [resize(f, (224, 224)) for f in cur_faces_square]
self.cropped_faces_shape.extend(cur_shapes) self.cropped_faces_shape.extend(cur_shapes)
self.cropped_faces.extend(cur_faces_square) self.cropped_faces.extend(cur_faces_square)
self.cropped_index.extend(cur_index) self.cropped_index.extend(cur_index)
self.callback_idx.extend([i] * len(cur_faces_square)) self.callback_idx.extend([i] * len(cur_faces_square))
if not self.cropped_faces: if len(self.cropped_faces) == 0:
raise Exception("No faces detected") return
self.cropped_faces = np.array(self.cropped_faces) self.cropped_faces = np.array(self.cropped_faces)
@ -173,15 +172,24 @@ class Faces(object):
def get_faces(self): def get_faces(self):
return self.cropped_faces return self.cropped_faces
def merge_faces(self, cloaks): def merge_faces(self, protected_images, original_images):
self.cloaked_faces = np.copy(self.org_faces) self.cloaked_faces = np.copy(self.org_faces)
for i in range(len(self.cropped_faces)): for i in range(len(self.cropped_faces)):
cur_cloak = cloaks[i] # cur_cloak = cloaks[i]
cur_protected = protected_images[i]
cur_original = original_images[i]
org_shape = self.cropped_faces_shape[i] org_shape = self.cropped_faces_shape[i]
old_square_shape = max([org_shape[0], org_shape[1]]) old_square_shape = max([org_shape[0], org_shape[1]])
reshape_cloak = resize(cur_cloak, (old_square_shape, old_square_shape))
# reshape_cloak = resize(cur_cloak, (old_square_shape, old_square_shape))
cur_protected = resize(cur_protected, (old_square_shape, old_square_shape))
cur_original = resize(cur_original, (old_square_shape, old_square_shape))
reshape_cloak = cur_protected - cur_original
reshape_cloak = reshape_cloak[0:org_shape[0], 0:org_shape[1], :] reshape_cloak = reshape_cloak[0:org_shape[0], 0:org_shape[1], :]
callback_id = self.callback_idx[i] callback_id = self.callback_idx[i]
@ -211,6 +219,15 @@ def load_victim_model(number_classes, teacher_model=None, end2end=False, dropout
return model return model
def resize(img, sz):
assert np.min(img) >= 0 and np.max(img) <= 255.0
from keras.preprocessing import image
im_data = image.array_to_img(img).resize((sz[1], sz[0]))
im_data = image.img_to_array(im_data)
return im_data
def init_gpu(gpu_index, force=False): def init_gpu(gpu_index, force=False):
if isinstance(gpu_index, list): if isinstance(gpu_index, list):
gpu_num = ','.join([str(i) for i in gpu_index]) gpu_num = ','.join([str(i) for i in gpu_index])