Remove depreciated code and clean up imports

pull/180/head
Jacob Logas 2023-08-01 15:23:57 -04:00
rodzic aedaa82d22
commit 953a473903
Nie znaleziono w bazie danych klucza dla tego podpisu
ID klucza GPG: 3A070DF145C222B6
6 zmienionych plików z 25 dodań i 146 usunięć

5
.gitignore vendored
Wyświetl plik

@ -6,6 +6,7 @@ build/*
keras/datasets/data/*
keras/datasets/temp/*
tags
fawkes/model/
fawkes.egg-info
# test-related
@ -17,4 +18,6 @@ fawkes_dev/protect_personId.txt
# developer environments
.idea
.vscode
.vscode
photos/*

Wyświetl plik

@ -8,13 +8,12 @@ __version__ = '1.0.2'
from .differentiator import FawkesMaskGeneration
from .protection import main, Fawkes
from .utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, get_file, \
from .utils import load_extractor, init_gpu, dump_image, reverse_process_cloaked, Faces, get_file, \
filter_image_paths
__all__ = (
'__version__',
'FawkesMaskGeneration', 'load_extractor',
'init_gpu',
'select_target_label', 'dump_image', 'reverse_process_cloaked',
'init_gpu', 'dump_image', 'reverse_process_cloaked',
'Faces', 'get_file', 'filter_image_paths', 'main', 'Fawkes'
)

Wyświetl plik

@ -8,6 +8,7 @@ import time
import numpy as np
import tensorflow as tf
from tensorflow.keras.optimizers.legacy import Adadelta
from fawkes.utils import preprocess, reverse_preprocess
from keras.utils import Progbar
@ -146,9 +147,9 @@ class FawkesMaskGeneration:
def compute_feature_loss(self, tape, aimg_raw, simg_raw, aimg_input, timg_input, simg_input):
""" Compute input space + feature space loss.
"""
input_space_loss, dist_raw, input_space_loss_sum, input_space_loss_raw_avg = self.calc_dissim(aimg_raw,
input_space_loss, dist_raw, _, input_space_loss_raw_avg = self.calc_dissim(aimg_raw,
simg_raw)
feature_space_loss, feature_space_loss_sum = self.calc_bottlesim(tape, aimg_input, timg_input, simg_input)
feature_space_loss, _ = self.calc_bottlesim(tape, aimg_input, timg_input, simg_input)
if self.maximize:
loss = self.const * tf.square(input_space_loss) - feature_space_loss * self.const_diff
@ -172,7 +173,7 @@ class FawkesMaskGeneration:
print('protection cost %f s' % elapsed_time)
return np.array(adv_imgs)
def compute_batch(self, source_imgs, target_imgs=None, retry=True):
def compute_batch(self, source_imgs, target_imgs=None):
""" TF2 method to generate the cloak. """
# preprocess images.
global progressbar
@ -195,7 +196,7 @@ class FawkesMaskGeneration:
dtype=tf.float32)
# make the optimizer
optimizer = tf.keras.optimizers.Adadelta(float(self.learning_rate))
optimizer = Adadelta(float(self.learning_rate))
const_numpy = np.ones(len(source_imgs)) * self.initial_const
self.const = tf.Variable(const_numpy, dtype=np.float32)

Wyświetl plik

@ -79,7 +79,7 @@ class Fawkes(object):
return th, max_step, lr, extractors
def run_protection(self, image_paths, th=0.04, sd=1e7, lr=10, max_step=500, batch_size=1, format='png',
separate_target=True, debug=False, no_align=False, exp="", maximize=True,
separate_target=True, debug=False, no_align=False, maximize=True,
save_last_on_failed=True):
current_param = "-".join([str(x) for x in [self.th, sd, self.lr, self.max_step, batch_size, format,
@ -122,7 +122,6 @@ class Fawkes(object):
save_last_on_failed=save_last_on_failed,
)
protected_images = generate_cloak_images(self.protector, original_images)
faces.cloaked_cropped_faces = protected_images
final_images, images_without_face = faces.merge_faces(
reverse_process_cloaked(protected_images, preprocess=PREPROCESS),

Wyświetl plik

@ -9,7 +9,6 @@ import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
@ -18,7 +17,6 @@ import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
@ -26,16 +24,14 @@ from six.moves.urllib.error import HTTPError, URLError
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import keras
from tensorflow.keras.models import load_model
sys.stderr = stderr
import keras.backend as K
from tensorflow.keras.backend import image_data_format
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from PIL import Image, ExifTags, UnidentifiedImageError
from tensorflow.keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
@ -65,14 +61,6 @@ if sys.version_info[0] == 2:
else:
from six.moves.urllib.request import urlretrieve
def clip_img(X, preprocessing='raw'):
X = reverse_preprocess(X, preprocessing)
X = np.clip(X, 0.0, 255.0)
X = preprocess(X, preprocessing)
return X
IMG_SIZE = 112
PREPROCESS = 'raw'
@ -80,7 +68,7 @@ PREPROCESS = 'raw'
def load_image(path):
try:
img = Image.open(path)
except PIL.UnidentifiedImageError:
except UnidentifiedImageError:
return None
except IsADirectoryError:
return None
@ -199,12 +187,8 @@ class Faces(object):
if preprocessing:
self.cropped_faces = preprocess(self.cropped_faces, PREPROCESS)
self.cloaked_cropped_faces = None
self.cloaked_faces = np.copy(self.org_faces)
def get_faces(self):
return self.cropped_faces
def merge_faces(self, protected_images, original_images):
if self.no_align:
return np.clip(protected_images, 0.0, 255.0), self.images_without_face
@ -242,25 +226,6 @@ def get_ends(longsize, window):
return start, end
def dump_dictionary_as_json(dict, outfile):
j = json.dumps(dict)
with open(outfile, "wb") as f:
f.write(j.encode())
def load_victim_model(number_classes, teacher_model=None, end2end=False):
for l in teacher_model.layers:
l.trainable = end2end
x = teacher_model.layers[-1].output
x = Dense(number_classes)(x)
x = Activation('softmax', name="act")(x)
model = Model(teacher_model.input, x)
opt = keras.optimizers.Adadelta()
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
def resize(img, sz):
assert np.min(img) >= 0 and np.max(img) <= 255.0
from keras.preprocessing import image
@ -290,21 +255,6 @@ def init_gpu(gpu):
print(e)
def fix_gpu_memory(mem_fraction=1):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf_config = None
if tf.test.is_gpu_available():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_fraction)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
tf_config.gpu_options.allow_growth = True
tf_config.log_device_placement = False
init_op = tf.global_variables_initializer()
sess = tf.Session(config=tf_config)
sess.run(init_op)
K.set_session(sess)
return sess
def preprocess(X, method):
assert method in {'raw', 'imagenet', 'inception', 'mnist'}
@ -333,7 +283,7 @@ def reverse_preprocess(X, method):
def imagenet_preprocessing(x, data_format=None):
if data_format is None:
data_format = K.image_data_format()
data_format = image_data_format()
assert data_format in ('channels_last', 'channels_first')
x = np.array(x)
@ -381,10 +331,9 @@ def imagenet_preprocessing(x, data_format=None):
def imagenet_reverse_preprocessing(x, data_format=None):
import keras.backend as K
x = np.array(x)
if data_format is None:
data_format = K.image_data_format()
data_format = image_data_format()
assert data_format in ('channels_last', 'channels_first')
if data_format == 'channels_first':
@ -411,18 +360,9 @@ def imagenet_reverse_preprocessing(x, data_format=None):
def reverse_process_cloaked(x, preprocess='imagenet'):
# x = clip_img(x, preprocess)
return reverse_preprocess(x, preprocess)
def build_bottleneck_model(model, cut_off):
bottleneck_model = Model(model.input, model.get_layer(cut_off).output)
bottleneck_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return bottleneck_model
def load_extractor(name):
hash_map = {"extractor_2": "ce703d481db2b83513bbdafa27434703",
"extractor_0": "94854151fd9077997d69ceda107f9c6b"}
@ -434,7 +374,7 @@ def load_extractor(name):
get_file("{}.h5".format(name), "http://mirror.cs.uchicago.edu/fawkes/files/{}.h5".format(name),
cache_dir=model_dir, cache_subdir='', md5_hash=cur_hash)
model = keras.models.load_model(model_file)
model = load_model(model_file)
model = Extractor(model)
return model
@ -452,20 +392,6 @@ class Extractor(object):
return self.predict(x)
def get_dataset_path(dataset):
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
if not os.path.exists(os.path.join(model_dir, "config.json")):
raise Exception("Please config the datasets before running protection code. See more in README and config.py.")
config = json.load(open(os.path.join(model_dir, "config.json"), 'r'))
if dataset not in config:
raise Exception(
"Dataset {} does not exist, please download to data/ and add the path to this function... Abort".format(
dataset))
return config[dataset]['train_dir'], config[dataset]['test_dir'], config[dataset]['num_classes'], config[dataset][
'num_images']
def dump_image(x, filename, format="png", scale=False):
img = image.array_to_img(x, scale=scale)
img.save(filename, format)
@ -506,60 +432,6 @@ def pairwise_l2_distance(A, B):
ED = np.sqrt(SqED)
return ED
def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, metric='l2'):
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
original_feature_x = extractor_ls_predict(feature_extractors_ls, imgs)
path2emb = load_embeddings(feature_extractors_names)
items = list([(k, v) for k, v in path2emb.items()])
paths = [p[0] for p in items]
embs = [p[1] for p in items]
embs = np.array(embs)
pair_dist = pairwise_l2_distance(original_feature_x, embs)
pair_dist = np.array(pair_dist)
max_sum = np.min(pair_dist, axis=0)
max_id_ls = np.argsort(max_sum)[::-1]
max_id = random.choice(max_id_ls[:20])
target_data_id = paths[int(max_id)]
print("target ID: {}".format(target_data_id))
image_dir = os.path.join(model_dir, "target_data/{}".format(target_data_id))
os.makedirs(os.path.join(model_dir, "target_data"), exist_ok=True)
os.makedirs(image_dir, exist_ok=True)
for i in range(10):
if os.path.exists(os.path.join(model_dir, "target_data/{}/{}.jpg".format(target_data_id, i))):
continue
try:
get_file("{}.jpg".format(i),
"http://mirror.cs.uchicago.edu/fawkes/files/target_data/{}/{}.jpg".format(target_data_id, i),
cache_dir=model_dir, cache_subdir='target_data/{}/'.format(target_data_id))
except Exception:
pass
image_paths = glob.glob(image_dir + "/*.jpg")
target_images = [image.img_to_array(image.load_img(cur_path)) for cur_path in
image_paths]
target_images = np.array([resize(x, (IMG_SIZE, IMG_SIZE)) for x in target_images])
target_images = preprocess(target_images, PREPROCESS)
target_images = list(target_images)
while len(target_images) < len(imgs):
target_images += target_images
target_images = random.sample(target_images, len(imgs))
return np.array(target_images)
def l2_norm(x, axis=1):
"""l2 norm"""
norm = tf.norm(x, axis=axis, keepdims=True)

5
test.py 100644
Wyświetl plik

@ -0,0 +1,5 @@
from fawkes.protection import Fawkes
import os
fwks = Fawkes("extractor_2", '0', 1, mode="low")
fwks.run_protection(['./photos/P2.jpg'], format='jpeg')