From e9f1a506530b2ee7e70325f81d62c7f089124fb2 Mon Sep 17 00:00:00 2001 From: Shawn-Shan Date: Tue, 7 Jul 2020 11:14:38 -0500 Subject: [PATCH] 0.0.6 Former-commit-id: 14c0173d9f573e7ccb275b3e366505057ac2c9b1 [formerly e359682d967212b4b3f27923fd659bbade7880e5] Former-commit-id: a44577686ff64da031231ea323c681185daa8b0d --- README.md | 6 +- fawkes.egg-info/PKG-INFO | 71 -------- fawkes.egg-info/SOURCES.txt | 10 -- fawkes.egg-info/dependency_links.txt | 1 - fawkes.egg-info/top_level.txt | 1 - fawkes/Untitled.ipynb.REMOVED.git-id | 1 - fawkes/__init__.py | 18 +- fawkes/__pycache__/align_face.cpython-36.pyc | Bin 2298 -> 2305 bytes fawkes/__pycache__/detect_face.cpython-36.pyc | Bin 22110 -> 0 bytes .../__pycache__/differentiator.cpython-36.pyc | Bin 9086 -> 9088 bytes fawkes/__pycache__/utils.cpython-36.pyc | Bin 16270 -> 16607 bytes fawkes/align_face.py | 13 +- fawkes/{detect_face.py => detect_faces.py} | 80 ++++----- fawkes/differentiator.py | 2 +- fawkes/protection.py | 45 +++-- fawkes/utils.py | 163 ++---------------- fawkes_dev/azure.py | 39 +++-- requirements.txt | 4 - setup.py | 6 +- 19 files changed, 112 insertions(+), 348 deletions(-) delete mode 100644 fawkes.egg-info/PKG-INFO delete mode 100644 fawkes.egg-info/SOURCES.txt delete mode 100644 fawkes.egg-info/dependency_links.txt delete mode 100644 fawkes.egg-info/top_level.txt delete mode 100644 fawkes/Untitled.ipynb.REMOVED.git-id delete mode 100644 fawkes/__pycache__/detect_face.cpython-36.pyc rename fawkes/{detect_face.py => detect_faces.py} (93%) delete mode 100644 requirements.txt diff --git a/README.md b/README.md index 67cacde..ee07af0 100644 --- a/README.md +++ b/README.md @@ -6,11 +6,11 @@ We published an academic paper to summary our work "[Fawkes: Protecting Personal ### BEFORE YOU RUN OUR CODE + +If you would like to use Fawkes to protect your images, please check out our binary implementation on the [website](http://sandlab.cs.uchicago.edu/fawkes/#code). + If you are a developer or researcher planning to customize and modify on our existing code. Please refer to [fawkes_dev](https://github.com/Shawn-Shan/fawkes/tree/master/fawkes_dev). -### How to protect my image - - ### Citation ``` @inproceedings{shan2020fawkes, diff --git a/fawkes.egg-info/PKG-INFO b/fawkes.egg-info/PKG-INFO deleted file mode 100644 index 20efb8e..0000000 --- a/fawkes.egg-info/PKG-INFO +++ /dev/null @@ -1,71 +0,0 @@ -Metadata-Version: 2.1 -Name: fawkes -Version: 0.0.1 -Summary: Fawkes protect user privacy -Home-page: https://github.com/Shawn-Shan/fawkes -Author: Shawn Shan -Author-email: shansixiong@cs.uchicago.edu -License: UNKNOWN -Description: # Fawkes - Code implementation of the paper "[Fawkes: Protecting Personal Privacy against Unauthorized Deep Learning Models](https://arxiv.org/pdf/2002.08327.pdf)", at *USENIX Security 2020*. - - ### BEFORE YOU RUN OUR CODE - We appreciate your interest in our work and for trying out our code. We've noticed several cases where incorrect configuration leads to poor performances of protection. If you also observe low detection performance far away from what we presented in the paper, please feel free to open an issue in this repo or contact any of the authors directly. We are more than happy to help you debug your experiment and find out the correct configuration. - - ### ABOUT - - This repository contains code implementation of the paper "[Fawkes: Protecting Personal Privacy against Unauthorized Deep Learning Models](https://arxiv.org/pdf/2002.08327.pdf)", at *USENIX Security 2020*. - - ### DEPENDENCIES - - Our code is implemented and tested on Keras with TensorFlow backend. Following packages are used by our code. - - - `keras==2.3.1` - - `numpy==1.18.4` - - `tensorflow-gpu==1.13.1` - - Our code is tested on `Python 3.6.8` - - ### HOWTO - - #### Download and Config Datasets - The first step is to download several datasets for protection and target selection. - 1. Download the following dataset to your local machine. After downloading the datasets, restructure it the same way as the FaceScrub dataset downloaded. - - FaceScrub -- used for protection evaluation (link) - - VGGFace1 -- used for target select (link) - - VGGFace2 -- used for target select (link) - - WebFace -- used for target select (link) - - 2. Config datasets - open `fawkes/config.py` and update the `DATASETS` dictionary with the path to each dataset. Then run `python fawkes/config.py`. Every time the datasets are updated or moved, remember to rerun the command with the updated path. - - 3. Calculate embeddings using feature extractor. - Run `python3 fawkes/prepare_feature_extractor.py --candidate-datasets scrub vggface1 vggface2 webface`. This will calculate and cache the embeddings using the default feature extractor we provide. To use a customized feature extractor, please look at the Advance section at the end. - - #### Generate Cloak for Images - To generate cloak, run - `python3 fawkes/protection.py --gpu 0 --dataset scrub --feature-extractor webface_dense_robust_extract` - For more information about the detailed parameters, please read `fawkes/protection.py`. - The code will output a directory in `results/` with `cloak_data.p` inside. You can check the cloaked images or inspect the changes in `this notebook`. - - #### Evaluate Cloak Effectiveness - To evaluate the cloak, run `python3 fawkes/eval_cloak.py --gpu 0 --cloak_data PATH-TO-RESULT-DIRECTORY --transfer_model vggface2_inception_extract`. - - The code will print out the tracker model accuracy on uncloaked/original test images of the protected user, which should be close to 0. - - - ### Citation - ``` - @inproceedings{shan2020fawkes, - title={Fawkes: Protecting Personal Privacy against Unauthorized Deep Learning Models}, - author={Shan, Shawn and Wenger, Emily and Zhang, Jiayun and Li, Huiying and Zheng, Haitao and Zhao, Ben Y}, - booktitle="Proc. of USENIX Security", - year={2020} - } - ``` -Platform: UNKNOWN -Classifier: Programming Language :: Python :: 3 -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Requires-Python: >=3.5 -Description-Content-Type: text/markdown diff --git a/fawkes.egg-info/SOURCES.txt b/fawkes.egg-info/SOURCES.txt deleted file mode 100644 index 2868117..0000000 --- a/fawkes.egg-info/SOURCES.txt +++ /dev/null @@ -1,10 +0,0 @@ -README.md -setup.py -fawkes/__init__.py -fawkes/differentiator.py -fawkes/protection.py -fawkes/utils.py -fawkes.egg-info/PKG-INFO -fawkes.egg-info/SOURCES.txt -fawkes.egg-info/dependency_links.txt -fawkes.egg-info/top_level.txt \ No newline at end of file diff --git a/fawkes.egg-info/dependency_links.txt b/fawkes.egg-info/dependency_links.txt deleted file mode 100644 index 8b13789..0000000 --- a/fawkes.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/fawkes.egg-info/top_level.txt b/fawkes.egg-info/top_level.txt deleted file mode 100644 index 83397e2..0000000 --- a/fawkes.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -fawkes diff --git a/fawkes/Untitled.ipynb.REMOVED.git-id b/fawkes/Untitled.ipynb.REMOVED.git-id deleted file mode 100644 index c26ad05..0000000 --- a/fawkes/Untitled.ipynb.REMOVED.git-id +++ /dev/null @@ -1 +0,0 @@ -58d500da850206b845bdd0150fa182a0ff8c50f0 \ No newline at end of file diff --git a/fawkes/__init__.py b/fawkes/__init__.py index 1c4f29b..098d7fa 100644 --- a/fawkes/__init__.py +++ b/fawkes/__init__.py @@ -4,21 +4,17 @@ # @Link : https://www.shawnshan.com/ -__version__ = '0.0.2' +__version__ = '0.0.5' +from .detect_faces import create_mtcnn, run_detect_face from .differentiator import FawkesMaskGeneration -from .utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, \ - Faces from .protection import main -import logging -import sys -import os -logging.getLogger('tensorflow').disabled = True - +from .utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, get_file __all__ = ( - '__version__', + '__version__', 'create_mtcnn', 'run_detect_face', 'FawkesMaskGeneration', 'load_extractor', 'init_gpu', - 'select_target_label', 'dump_image', 'reverse_process_cloaked', 'Faces', 'main' -) \ No newline at end of file + 'select_target_label', 'dump_image', 'reverse_process_cloaked', + 'Faces', 'get_file', 'main', +) diff --git a/fawkes/__pycache__/align_face.cpython-36.pyc b/fawkes/__pycache__/align_face.cpython-36.pyc index ab5f8cde41137e3f251767ffaa402e569c767f3e..294bd92dff6223b91c0192296474e15b1f696fdb 100644 GIT binary patch delta 492 zcmZ8dJxjzu5Y29OlQnne`Gq#xSeaTxP*Jfzuh1$WA!gP?z2sz)Lo7svtL%>8zp%3s z|A_1#kgIIP%Gpy;9C&Zu3_D-@Iej-1tTq|}ND;S!VX%A?F>ru)dFDoLCL+Nc&CX=x zfeT_lI+h^88#s{7!fJGC)4xDguTQ#6_-4BXG<;;_s^8Qa|aQc5;o)rx$+jZrTx8BO2LtutC%zkbUg@2xm8B zSf@-lYeWvG87@$OZguNDGgEE&uk-)blj9k4)rva0W{TscE#|mH?l26D%=_8J6;4qY zc_+!`xh~Kifi95|-ChD{zv6q&qE)uTpG$tvQCnFL`dz(k4=L=HicN8k1+May`~n0E BXm0=j delta 467 zcmZ8dJ4?hs5Z+gk%}YvMCgxqeNT%2!H|FfXV#Q`#T{a!bds`2dPY5Cxm@Qb9m0v)(PE-j!5s&-(zh$U rb!B9i>F`Q5G-v%mER_C&S^pKywDQjdvop$9-t-!nQxiLsTio~tyHR2f diff --git a/fawkes/__pycache__/detect_face.cpython-36.pyc b/fawkes/__pycache__/detect_face.cpython-36.pyc deleted file mode 100644 index 8ee42f7ecc7e8feb24fa958478aaa93a7249fbca..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22110 zcmdUX3v?XUdEU-r-wR*~f&d7#rJGXNG?>@MyM zND!Izi*lsngf9~-+Z-z-%P8xQL2_U zMyeyopLKFCrmCagm2&b<;l-3waL2A$)iT0Gr-X3HEhD@K;SpyP;ZX^XBRuAm5iU!3 zud~M)M?DkHUS|To`<#8wBz`BIDQ6nLQ_g;82EWtJ0p}oo_dAE2!}y(XjyM(k9&m1R zj^g*AlYTlicl*wdRGx9$UPmprI#(*qjc&``aNB;}Z+6<1&T_?Hbt}vDhFfu5-)%_j zsY<=oTxl~Kd@CK*^j9}3%bl&ZQ>pufRp0M==TDtlLG;$re512*YQ4L&T5qqs^X#c7 zpLuj~vBnB(Nrknfy24G2e`?MM$LdR7r?us~HMFRsd_?7(=H;e`R>42oRn4|vTi$9* zix%h1u;lrOTdDb1yRH}Jo4$)?xf@=?V3qi1;<2K~E5q|rYT3XXnJ?ygMqpqptexW* z-9|^%{fu@%>^ZMB=5TW*aNhPj&j)@q^Ea@%1Z z1H92}*IQm#y0q2yn;Y(9N_A9dx0;?GX7oUX_Ok0bbD7Zg+}3hv*VT#_X4bFpS7q_m zYEd%Qq?Of@4s^DBv>EekcDjE)_1a^np7k*3r@ZDirhml)luliAy>-9SJ*8aFt*gfB zsYb_fPc7H4th-+PqX8yho9|u?$8_b6o?^5LulFY4q|6C()ZkyqESTeX3g*sX$vuCe z)oGxQ56tf>tR_)z&Q$yGqo(OdgrOs*5D})O$T4_5A|z#%_Nx8(Z7>l3OT+UJ9`8@U zaZ^>pNmWg!;+T&0qEWRR+ew2~ZO|;ahXHOQyaU`uct$m=c?GzR@Q7+Y;tmD3=o}T! zU|ch*C4_`Kz&`^2?ZP9NGK$cwa0-UT5W2%T>>PQKp|W!bh&mQ#7d7~wX^?*SkH4g+ zNAY;a;RHZ@Ewus+)(jswy~eN=m~d?%G<_`v{zrToivUALX4_LEC>h#Lv*9ZeBrv7w zfD~XFrUAvR?wqB@k)RIHIYft}tQ{F+fU?CrKT4@^DUX@j3^HGJWJ5%JGgc4nX4^8Ct@+h>%nQ(3wyxxB5VMyu|5 z^NT?4-o=jpM9fTGM@VDN(7hS%gF1}&KZ(a)5Uw)lKL(Ev zCKCt?yKaKjfN5;+1op+$3#ym)tu;FUqjBu(<|nI0YWtF(4iK{+YzH|WdB54q^s-kE zBVQ)STr+w(Fs^Kn12f5jX@Q+QpV~Zcr~LeSLG2irJ}?-|yZq7A_aXj$c%DZNC;h#t zODTk=>BrDzcE0i6PTfh^x9+c=uRMd5g}#d4(WAVz(Y<;S zl-|U8u!C`phP@J1lGNe39@ha~Lt{%RE<}0}dz_0{l%C|rTGdf*ZKH`|?UnPDC$Ori zE%yX6Chv;gA#6X&81ec_U17SV3a8%>GC054K7*;wxot2X)cgzrY2|Kt?&4e_G}~S9 zLJUS|b0G>%e>ohD*>KHkbh@tM>JgU6Y2$>@Qd{jX0~q0zVd1%YYfJOhxx9cU%qJrl zW}}W)M~8bDKoc`vyRZhJ9p8Ox&saRUObvFX21)LvN36xM!}c> zf3%IAQjAV^Eg>i}1k%xWv1}WT50-!h&sev-vJb)%)?jX53W&aU_e?B%rf**}RrTs9 z8AQNE&wM`Bt^jixVD5bybAHCblBXBT?@P7K_i@1^Qn9oesm+7W>t)Z{`SGV+O{~IO zj(c-5m6TrE?zUgyXlt1U)T-QWtKPtj3GU*BtkT93G5}TL6YJC#j_>zv2xJC{=&xp4Gh4|t@2 zCCoxI#{wiCaH4v<;VLc_nos85<6hN^ZKmGsx@{-slY5b+nn%SoS$lTR?j9sJ@aWmK z%SHzCTEOgb?Hk8T_h*;Ob~3v|?2=3tbC9B#BFyskOJLQi?a3&_h{Pj-2@dqS3G}It#ya=`jI9gKx znL~NXTn4Dn{PfPJ;^t$5n{CJ4KIv^OFE_WrfVB8i(X$mz^Gfrw+pct#i!6P1@XTt{ ztKdIq-x1)|xBSk@6}K&@1JGnXq$aPjRA(-T*Mtq_)swrcOGciZ;&YarI|wI8y)XgZ zZS=6nuBBc^x1IxM-ZbOI)qoJ^*Ol*G0Z`|x(40RV8h&Whz?(AqHT~uDwfqG(al5M? z*x?H}q;_iOuEEhB5CQwbP*P0hbtip?SGxls3@vYKLqjwz!gq|z2T*aCZaSbOc{^sG zQHpE0BA_ng(NdCKF?OcjpaDbj2g%`47NdlqmqN-LYo=pdw=lEj&e2DeTlYbsb?|^I zgDXVLZRgPtiWO+*o%m4{3F8P6JSd_<7Fn%>eGD(#VQ!c;S-0+RX(6^cZ8uiPysa*^B0&*Y zR%k!{@RN^)>E|AP?-LiH45`OZzj_=F`ge{$rd~XDLQ$|AiYvh3JR0pBI~Aimv4R4 zfOHav&olf|>LuePbJ?N{0~Kw=x3NkZSdR#Fw4%UpoVl6XJj#g|ZbKUHX1tSne z$DnkTgQEJhwTP zho+LiAh#8wB>L8XJqlwl8!rQaE8sDXdEF+1)Ky?6xHv{ggLHsZzR_u4UT~ni8qLsH zM-p>@h-#~JuJj2t%w1^JH61RDS5#-K8@0({=4O~?&`6#gk4K~Hp5+iiuJXhEQ0 zqS=QYse$T68L(Nw$B;=TB{f#ajTcJ2cBnyrH06fB0 zAZdK^-M}w7t_A$sf?rYTA)8n*B3M9}5Z4kcI+H1%e;b#*SIHH+t%Nrw78^nHL1`>~te z(^v^wTR)4GNM|Zx@h=7 zj5bvVy43b8dW=>0V~~nq>Vd%VLb-?DL+ipQy))IzY@U0b^#CCb;#W-Ypj^8QZk2X& zD3di)y{ogTumqL^tPN z=KCRdWWIrCng_32LffGJz@FyHl}4%s16-WSc>vxqXVx_fbDkBnQyI*yyKa|*H zI?FyX?H(G6c3;60k$M?J!DnYI9*U^koPjn!K4(AEU)K>K0ZCe%+oOu8CoI%z8y#n> z<-!(Ns?|23_E1Ew)fBCuV$TpoQte^zopdPUX^ea~eKdoq8l5FNq*28gRBY=&`$gmu zA(7MiARg~HoSa!G>)g&0VU=( z8i7+VwowYRL;EQVOd^3(_hIV=8$S;ZIUkKhYbZrC9ZYmE(m5F#=&t7=e!@U^9V-P4 zbo#G^a5~C8lH?wZa*s)FT~|3t-4msbN2z>AQBpHB`(Mw}Obqj0;W{=bq($S>BIcWn z@=Y;cWOkcgR10YPO^c391~D%p;^SeOi{P0^{E(GJ2(y}b>XQ53XJO)y;7yKQm?k$q zbF(admRpFjE!?amac85rvpZR4Id}351W;N0xrU$))mb{Dmf?>xEPz4;ExSNLJmCGp z`-kOkf!S{%*`xI}1*Sd;ny0ZMC(i`vzW{pYWX_8vZE?;P{Hh;BeuXWd2&(;h`*5R7 z|5ZF91Lq8p@!vj>F1;a;zKpuxGLQ%}Lg}pl$r4a9ciJgCd#+~yJch;v6ol#C2wD>Y zEgK-p05Vh8EkJiViYp>+zr>|cZe{>x(}0-`dvV4&$n*$ahcvv@2hpvYR8#e0Lk;9{ z^Co)>aHIMTJ9s6wb}l^t}it%Cev_w*Uyq`c4Sle2AHTsGsTV%?~pmNIo|N zHY&ZXt#cY^cb`0?USiQc_U=Bp0O{%Vu$M5T?*PF6gvz|baE1XmE)e7*REl6oD7<|b zzKn{$$ybo}07Hh~J`BI-4PiJ6?)FCpL&CVv|HdO2?iI-Oxg}vqZb{JYOF%mr<(`TF zIxPU55zrop;t1M<4&w;gLx_!e;{gHfVWb~ndc-kxYz48m=~&VDjt)RseGKFMrchQN zN3MuT_HnE}!Lr1$`Vl(UnP~vRf?oAW#ARX3RJN9p_+oSRtCB0%3g2521oM!`VLoydmD}75DYvt$A%92qjZLU|` z0pjB29VKK9?;u6AHB(#*hGZ1){)Ps$Yp?da2^^}r&s0Q@w$-O~6`CavlNA-XlqV`}DW}u$p zt@X%mb;%ilDI_cYQSs-*KPLV>)Ga6i%M@ewVjsBMc70*VFuidTSVVxGa%2}ch3(NZ z^KER5+R(E7n_NbR5*KtL%uc@g;oXU6nK&dQQJCJ#8RBA%y>Dy?L6J!{VdlEdUA4^O zYm;mdc4{dSES!1-wuFZr=lto{N)j(p!THlWIwUPVe;QIW?Ag!)xSIhw_$gGbei{x4 z?k@R(^fF@Eh&+t=5va-4hO(^aSfi{L7w5 ze9{vMB|TA}VS`Qer|_fxH2rZO)X(V9XPMHMLfC~9aTngCi5WnNgPoSa&SJ1Randw! zg!mUt9E?93#h+!X_&GX%7S3B~|4Fu=%U8yjvlkZ^1(5KmPqP+r zC^ayKAk&V-jAHxW1B$Ul2#Zj+)%3$0Cb1; zKf55Jo2Vn9z1D#)$u(4wzswcXXYmsjpv<@}*_2fGGm(J7=W{rmWc0FRTMh)VUQT>uXS5PPAH^P8L3|c`MWkA=3KU^$fV~y?Fb<@8Be0l` zOr(0Hl~iwZdm$(}>Gibwa4;H-AioVQHhaySNUa$6A{}`%%o}7K3*qeMQ_rVv7~3+eA59$VZLjbyOq;u%_tb| zG~P1;D?x7a_EDC12)rvbP0pr=ADDr%-}?Xs{#55 zPA=OQQf=WJ*RTP4!`l1+P(9^pfvuT$7xIfWh%&>=ztREW?ND9{?9iqj}R z6`Tfjp~dSQ;WLPt2A!M{I!?JrZuSD?j)ka~GVWQrDO>`5o$#HaNEr)HReJd?#AFgu+M#LsS$| zPJb<%l67+TS{9GKP#feDDkK#DXX{Tv47waVXQh%53A@T z$r8^JbK85Vv(5c`By}OX;TR6jt>W0khCX@$$^U$Gz*TCIQ#Ux3$_=LT6?mE(TR54_ zm)%)mq-3BdccMZu?gTg0{qs27v%Ktz7$_P1Rqk#;jy#WdMc(_r-(Nj*0^tQZXJ4Ru z=Hyv99mY&<#q-^+cPIDmqasolUXJ6$tVqsL^JuXg62YN2EhARy6%R*D+1u__9MNjx zXvnfYa?KW_zD{)Zn&ZW4q^o)WWMld!PX>yaVg(-jJO0p3?snI%y&%fD|tc#|6AmAD*ozq$W_qJ$o%-Hon)RqvSF zn2a{9Ggn-tcfIU}+iZn}<_2cC&eNS?5t}f8C)87Hk@5U$+ryD@H{2VY{c#_Op!`T@ zJH!Sx)NQMctTyLX-OE>IF5|>bW1U9_mD>m{94z7iKpqUD;tm}qv~VmVOrro4qBP#I zf|M&bhlV}!)u`~&l5$r<+lNv#cTV)lFdI?AE?oxAj1d}UG(5wS{;f68T0boM9lzeH z3Gu-SE0h-+tD$ivv{7d(G&aHvL5q|2*sI5HXs&vpc?G+K&5asQ7gp1t&Du6PfFJZ3 zyM3Vz=d`$I!n9N_`W95YCc2-&rXV7DvS0=8fs2y_o116oU<5Wd&(b*;nhvUS;g}9L z(cl>HyH`VF8$T%TZ*)-}?rNCD!OJdMpx(=Ywh&j%MNw!1P8nbX)6FhUTZZPwWog>N z!1j}apT% z8ySyOmZASujM?b#81gO{vrNO`gmI*Nz<3B@T}KXeam#KTdANs3y%E}DlzUd*_If2) z#zyJ{%b9%Fd-12+!DA8X|DYdqck~9 zJlV@TrjJtqSksIqr1cqe^QrB>;KM2*c|HR@tI#Xr9B4KuV67>_N(%`Qx+Q%$ zmy8oW*c(SJS)Ac1Q3u7EKk8V}F-=H`&{{<{#919Dvu>!*IT@s8gHhUa4TNR|0u4@wuH)#6MLYlQ8$EIMsD7<1ClX~#By0VkaopHDTRk8Xbn?ZtjQ z^nlMxd-p=tXFYLS`$n)AIbZHg_!+#Vh&E0{TJXLoW+aK33?_qpxWX~Mp1onH zHH^c!j=>sU(r*d&1(Tv9Q>M;AN6w>9Q>b|aVaNcz>9~yaY$BM(%3u5`=p+WT@y+AV z%~PA-77cwq7#9tFwznVKMk6osZTt7jF-YjPW-x=XJs_G7=kWk%f?64R^G#R~Brp5z zjAC9U2EtSESe>6A@`Nn`uqTTc|-m^9a-Tcr{yrd>+ z6Iyo=&HPBO0$KeC%Wyjj631-_!3+yc$WX5aw+;A@1h?s0MPE3A7E$^*h+aUh3XVW_ z9KUYGF&8*uuX3z5e+90!y@r|3aRhEyFT7vo{%CNNC8N2&J-A)oYN~I2CrqHVx)fWC zt6I`xi5YAi^MD2-V!mu@krCOEU&`f;)!tX+3}N4L~2>Fe|$Lv7x+D z!%z%xh`e`5-aCg4AFZh#5TN(5CN%{yaBSc0Y4!TLI%iC3|%` zu>gyJjx*OLhDuq^{={~$4|PxKoYb8Khfz9eo9fpl{Dw9J*bI6ndMA7Hy;Hr@y)(T9 z=o~VFF$PfMG`2%$oQdFeu^yc3qXN`q`#1|00Q;b$S9^CeH+b6F;BK)aWP%0lb0)-& za8E?_^Ne-UuqB+L51PyAB<75ciEs_-3GRV4BF_JgDF2Bh=HzgGVC){a zQVD0g%s@KzySoO`B@Etlk24GH;#@R>dK|?K?v88;;QV4sNN+v>eeYbtyUt+*Mi9OO z<&QcCKy&8iw{QxIE5$KzpnJE^#Z>pgaZPj1f#B$R1}$kLS3bCx<=9%%Ztvb3nawud zUF>_6P3*-@@BP3%wsPGa5U={cIt65FwNJ zMd|_Mzh84hZa;BtV}8uo1>J64PU?R^*EBGbdG(4$Pt?+A1wjZcz)Bi%p*Q0|{=ukb z-KLS?f#}`0@x9debemp}_6FP3i@77(g}zN=ANF9MMi3KGDJ&`jRQcAa@Q(p)<<%nt zv}51MdrL#Kl~+GBkZ#^USZM3cw@6#L%}>8BZ5@|6It1JRr;umzL~%r8bD)F=Ux&sH z3615|S1>dA;6cpfp@haBypiAh3f^P~555_Vq4Y>_p947oIUfoh8m745aPMJK9rhU> z0p$e)l-G_ZkL}3b!2Pn|ku_L0M2;zJzE8I&LiVFWH1{YeQPW&Rn-T-ezbkk+!Y(Pc zpYI}Csc94K)U+8~yoJJ;>kpk?^;?`-f@Rea{>Bx7Kmf~3x-qgED%;@(87a=u625uMnI3fmw8h|ID`4L#4 z=IfWCzTIkps_;}RZ7yP0@x*GQz2|ijHmrTzOOdFF*b z0z1{B`lrw>`$i&d&-qJ;i|kOxBO}!<)+(GF3z3b61egi&!z)_JUN|?FZ9Groku#mV zXZS*i7y6yl{?w0}o4kTysQ-&}y-RDVchUczIHJGZ{#xMzk-$s;4}61)zU*43c4n8h zTId<8_aW9 zu0^UZv%fUWsn5d+^DfLpyj16kjV?Nj9?_TL32-s0slP_wU#Ig0I=@WkZ_@c&biPRE zSLpn0I<)AhzeDF&>HJ+fzeeZp(fRvy{sEo8LFXUR`A2Z(9>Ya;g3`k`HR6wB+{y-a zS>M#Jv;04SQ!Rl0aS@niwCWyR6N>w*+VG|!N<@h@PyKWHeuK_Grt=j#ze(p`z^U$! z%6FF5YPhRbgMpIY&j347|B{(VL+Yz^ev8h(qVqL4Jb@nPtnP~{8LmLgVqUcbsaN4t$AmTq=;dEC>3=b)+>g~}&FYvK{A75fmDN!i zB>Q3YEf)UY{dxz;?lmU;GdQ@Q?fTVnKPjrbtj(kYwaGaiai&(w8mjBq=9e(hC7-SZ4;(NglA|*%-A-~ilZ?6!AMBsAj;6lcnp4&G^b(Qqj_(_ zd>rORhDLF>_n8*YOO$H62l`7qfBP_9=scE5Cf#i z>l6jFiH5~#y!$Zfo-lbYTw+i!zM~=a%%D!1F{9dOvNT8g^`Qn`#zvjGR;gEdz?vsa z)D0KyG@ruX1Vii*-dDgoR2svUoDbwC12-zfbUBX{UX9h8&A6U(7cVH`@&@A|Cdn@p zGBLE_D-}Fq3b99o$|MvET?T|M-0O?-Xp>4Dinslk2j|?PJld3kGjCF>4#Dt3VQQD9WST6X($(T~8K$w(#JTqg+hMuE+_=q$JLP zQ;ded#yim2o#{uh0kuS~iXFGXtv9`_d<26lOngzV;9RP3V4vfyS#y&<;j!eWbGfh> zS3`?FC}u@-F7mL@Tsnhi0nb@H=XTlKg};6RORp^FDM00**sfh6=aMkJ&`h{TN_hbHJ227OwP|fc8t?AxP7^C7(&U`gu5w0HPR>v4 zO3OrP+->G%P-`70T}0Et85LMOa4rdFtxWZ0q~n}b^mef~%e%z>C}+Ct00%~Sri-Vy zMBBkF;Wb!laRZiTtt5^X?#P!*e7prFsd~aG2jMs@J}m@m=`6};9ay(}tV!2~+_XF( zj;FtJfD6usQP&8}100{xer$kPTB!z7$B>%G$ua&Gc#;fy1aAGI9PZHu05-st+c}1> z_f%GK6MLiHrQ93ujO~V+po}76gfh-*EIMFbBY#mNPQWHBGbbMVSgmGv%hAiZEbNpP z^qQV5-E({JDGoG3;Z{tvB3_Wf`X*vE*Kk`c%I~;}wT*oz=PI3TY(68yu?zARav5+ETvEih&U!@{X>g!{oL7wVpW#6a zyiG282SxCjq8uVD%AtoMFG>P*fG00;gD+db2uRWdjzSKbHpV))T~eg>*0#v;(YD+C1)Kq-Rr2QiBS>{NIt zz8B+gpEuGOkl`1RE0*k~#CbCX(Dqx>0WHe7$Ty9wfpsNk1K5a;3HeFD7?N|A-a z3PUy?3Ou|v12HD~6A=i)!6f2Ec2CG-3{@rPW4|rr5!pWUk_j+syFY=C>9?nFFdfsz zQ)VO+ovN4VI3z@}D=$A{g_VQEgVI=3S%*MBvnz~`vhj0Q$y>lvhr;0MgKZj0!`a;^ zP1-P&R!q{QXG3YQ{totJC{2@C2_Pk3CUIr4PlSHoM)8GLXLro$ERQRim7b+A*QPv+D`hcf(OIeht zDdO9A_(s&+m^OX<21|(!*H>mlLlVtL6d_T6L>rQ2Mie1Ye?%L4l@;t-WPS~CUnNbQ zh9^aF87BxdMU~BHv4I3N3*{hop-R9{S5(}@l47HRM2P+x0tu#Gmy#e^#F!JTCLy6E z>_*Df9PBjnyyb361LaWM<$d)DAY!mA<~FW37H?laFH#5c;>MS@3qum&oAH4~2bXcK z5#e!twv!zYdh-x^aS;VyT&gTVNPSo=ax(w(`i{Z?uO<>A7X9@H#ZUtl{Sib;9vnat zH1L?e7J|h(U{}*pabIx{jn?i7{1PhpeNNygJSnrngB_xq-D8MVBGGXyBqwsfQ3TLQ zOaVkttUPeR-j>;M2)NiN!zZ&q47gGuG8CJS3o;|ZnY$f+xC~#>#MgQnmlwhe^nZvv z)xr~t&pdXi_VA_09)`*7XHXdQGfA@pp*;a_zuX@ z8a_cPx4QMUj+dGCDLOw#hX-0jHPl8LvG;tDaikM%O8zdzy=^*nb$-)Vnp5+do(6OXbT zP`xA}QjGf63a#jZU&tNI_rjo1;&*dvS}*U+&%B_e`EPr>kSs3Z)dNDi9=ZP;(_J{1 zeg#%p!hGTQ2-FpgMFH$pWPdF(=lp=Mz*r>Ae}3TOg86zHE(M?nv`I9d-oF3j+2-bn zjZOKmz0mLFSK97r-TaTXQ}U!E#qkoZBr@+4W|29Yin!~Ey9T zbxd?ln#81bK+b!C<;XrVk#Q_niL%!;2tAP^o=06%{`$VvbOrydV??X+uFm1rQC#Jy zM6D=mF|2?x5?C$QXK%oA2p9vpfy2NN36YnxZ^}~DEZ>CsEnplt4u^z8iOc%%UIvDN zL133etymA4L9Ah?X;`io25bnX-7vM!wSvH=F0C&ZrqyT&{!8cneIna>e!*HsUPg^g zWL&zmH9$G$3euF;$GSe#CK8a<4fa8LTSBA>s|UoLujtIjd%|*>ek}`?0r7=Z`n&u4 z!&^yIe4=h!UepX-##iDh4ZDE@)B;9Zxc+!YIWGSRPhLJbg*Y|?U@h4(U=kPsCV>4C zA|dBT8H1=>ZXgnMYS}`UonWJUwdbhj^G7`c+FAZ{&z_PfP@8x@H4PQD{pjAE3rTtO z?fy%0%|!nvs~B%|wwku8=L_gg10Mq@AgWP~y((XY&O(=6gJBAo094%$r1yYX;39Ad zxD1>JE&ys#bZgaZuq$}Snuuh>ZjpdBptuUC?J^+E0k}Vl15H3JR1FdVK9CSu>V~pY zB%nfHk<(a%xZBrwhI@ACWqtF@L7=HS3T%ZdsRUh;KN^~}l2WV`*ez&QB}CecKEtYIA}Jf=Ek~Q4 n3#jY2m~8iw`q+v>gDfpjzZwRX?K5mIQ_GE<|2vX@xO?ay*w>N> delta 1554 zcmZXUU2GIp6vsO|`~B5V+0xo}TN}z~S&CH@sg`P?6i7ipl&f}`-MQV4%*-xlW($;H zF<>My0m+RozH8!heKkJ&z=Kg^GEcsk*uUdmlZBkiP!D_H|riVk$YP>Ei9=H7Wm;JggPg2U1CGOqY(-E@4E&n7?c` zICVqSBf;XteS4J~Iuj?-`!WR>6ne$&^i8cGvYGkEv==g;?(9UcNEj(igxZys`@eVc z*@Z+OtTKga_LXs{4UJkCXc*B)lqyX7fk{Dm#U%f~2k&RC-C4NgfIN_ps6F`blRqDB zZccA(%8zzIze}9Y-O;+lueoFLrOEukK9t0g=S;HAio?T**9c>)oa#OkxATXj)3^D4 z$zSqcO5Sas?q>Gw8Dw$)Qqv^PqE6lS!z2i6sQWx% z;(AA^hjq?arp2AWJlP0rSN4&Z_@<*b#p0&#+cl4gpF74iL*zO~*GkCyIXsn@50~wL zlNs1`&!;`G>;)!(E?^%pDZw&w+BKP%8r-u`KM(8&UW7x`<)%kFaefjg0s}z51j|>O zR5tBVw^p{jMl+y6n1*0#uhfIU;U2GEEL--{5)(gk?%l&u^+(ktxo4>vv82Z@Z%s?i zbQEEF*C)C@(26n0s)F5+rVu<~QZ+EWWkaV$@m=>5k8RN(-CykAx}^N0RmX0$ijT_s zMuZj->jJQCE!Xe&6eDuYf8TL*7WZfdz|PSNz(HUPcnNq~B8qEc*N+3&*D&W+QfllfQ3twL0TV!Gt>o4G4S5W8YI^hz3@-z!0#y*h@d$7fmn>SjRapg0YvHG3V>Spbcp2=FGLHtQs$Gr)pGnBrbj zw$KcCsIST~)nFM1&I513r}H?!TB)04&3oe1P;#;&mBw&uiY~l7U{-=fPMl^5uhQ!7 zYfv8orp3oY{=lO=SteMPzo+GLeT5UX@)iBXH!|Cj6W5A;9ot5MFz>B@UYyc0s=+G9 zXT)#ApOwn6sn#Pix};ozt?Nqag)T0>8kwoYrD$Yn8JemDOIWSHuWA>I$)+@}s&x<3 h0r&hm$yQ%hzt=F<3~a|I>@ulUN3j3BB^I9=`3EikbQb^s diff --git a/fawkes/__pycache__/utils.cpython-36.pyc b/fawkes/__pycache__/utils.cpython-36.pyc index 77639db2bb7185e2746c485fa2e088df0ffdcf39..59f6fc676582bea9f14ed018b8e888fcf8565550 100644 GIT binary patch delta 4230 zcmZu!4RBk<5q|e1Jv}}B%fG_1V;je|Y)Ae}h=Vc7P&>p)a0m`0F*ri@$%JMpQa=p z6X+D^66nrb*gb}aI#OQ4mnZY$L!<>VGh+DXkkJpF(XBVh1zSNeJb7iFfXV>21$$mf zIZ`EgyWo}Pv9D5%IuBb4YF?$%q{{vZi@Q?Zypk%T?s-Bhf)Xkr(!L`g;SEX!2lY|^ zAt~?Zhmiu9Sa9Yoa8@10oQDR4T@KjExdkd=0oWOlW+rgT=$?YhD9^i671R@yL<@^1 zK%(F_DpSF{JMX3?6Bg)u@~*sxde6z+DKF754`>LRJLL(>6;j>_Tk@cWxdpQCvYisx z2_gkAEfsb;g`G~=t21>;UPD2kHK3hKvuay3r6{=`Wnu(#+upEjma#n?WmoIjmMky zaBVVMdu!n{(#|kO`2$)X3GmmnHDsE*oC8+28kn)}Zoc4DgTo-f5W<8xv60!e$lA`2 zIZJHU0eJ_1#o0EMu1OT#0OYO zWLcFxvK1t1WDWX0(nx&#q3JZZRa@_B!CVa8o<04UXBYI6@c+bNwwhHMC!c7QS z0Mo`|*+iU;BLf#%fEAOOoA=D2%E=g0Y2`3%f_w;nr{xQ zCwK8L2j;wYLD#e=vq=c2DYj26(OTjqPx9W9I&wcBE%^!wEWTS(M#%H`mQ|4_c&O}0 ziUyUihT(ohHyW>y}XVH6`CqECZ?ue;V+d}k>mXR zialfp@2!aVcA#u4!uJuL=C@SbNM7S-D)xKC(9`FJ__YRRSsLgtBs+RZgF-pdk#gO=et5R&3LY1IZ`}TXvtFlfA|XH{NYeV z9Q-6urZr2GnXr`|<*$ZXZ6|>BA^&%18#%FfP4x-VSJZ-2Fm4j5=L}wXcJ~Hx zBNI3O>2&?;oueCs;>2IJpQyR>y*(QuVE>YFqoJT<*$7ToG}oa{_5y;i<3*rg??;jG z5&{={{sEA6{N)v$3Q}wbKf59r(ttD|`9M98A>EK62yb)_s5{Cx)ONVn!@hMHIFig> z=5w`M1|9_3w8m&G!yZCO3;Q}W>|um&Aj8HSNPi2!bl@#d$8L!;_9!wRLjenWoO|mA z+WT>Bv_K-IaNA@EQuZ_Gjiaye&jw!%ZBaSj<+p`{{GqySa~+08i(75WQ?lPXJdoc}k+mA9tc)CaE$_q@#B7CnDZN_nVf z68eUhLiWG<4wAQmvjRhG6BxIgogpK$L1fr&EkyRNnRVV;h{iKxB3sV}`f~@eaHnal z$WVGD65$Wjcl+69m;v720u9>=!2eu-c>vGX)MDdCa(~Q7=CbSozWUPHNni0QVU8_c zuboI?24G_dyAZw&U@Ao|riLn`qEozlm<&z(-k2UU48|V838xUm9kH<^NHYK)X{aEN zi!`vi!MkcJ9#uW?F_C)UxJn_Pcw`llCuA1cg{@o*N2sXmH0KS+$fm`Cl}^$x?*2sv z3o}t6I|yJ}(MHyQecVOZV%llUi0N@K5Sd>TRy@||Cn^4HV>LO$f77@Y?ya{eLXN^% zX?ZrsGO=`WHa@c)t%R9;Z_}t#+=3>tHb33e;?JW5#*_rP9a7>O+}V71pjhNgrx9Zq zusSToFr&c1oG*gz3%oD3fVa)Gr=bKxy#d{F^OZ>psV4rZq8}oI39$Q<+MRPp0GS*D%Yp zLA8MT8RxFnwS@8h)~)0n{uPenpyqF`cr3p-2_V}D1*AM-aO+Z926%6r;t zAWC<(SCg}g)9ud^aB_R+V6iS4%XR5ReqSdQmr5vxQx)b2flBoIOBWabr0^48+tok@ z`AuC1mEz^(zwa7${{csr=Kpb7!5O}*yRUY*IEKyVJ)4fA_#CSH?FH&Ym{nd7TB2_5 z#5F!a_z2+`!j%ZiMTWVs`wsw9fvClYQaN()3Kdhr3{oOc#ih3<(z!9Fv?F(%U$Z*2 zB|0~iV9STd2!#?sM=c+8k)@#qZu5JyqFX%i2w5i6uMeZ(;pOEP$B=?43h zA79<1=qNYE-&}neIl(K}3=r$$o;Cl(GugFvkMH8B=09EASSHMC!Fe)*ND5<|^c2We zKG##NIKWeE3xBj{+=iQYi)-tyCnJkDuXB(R1;I>TESg=d@NH8S&8xXIw+bJvQ>)Qzs;buT^!ob$0&-K~FaQ7m delta 4093 zcmZ`+eQ;aF5r6k2JxNbr{)%M{fIOZ+xtY4q^W1+1|l;LKVvyb z>i84Z4bC!P_k=90gjd?GCG$D6y+k^vRw}0mxtv!xuAA_HK*9=ue?E8|@LUd$ejH5Z zEqN&>86+=H5}-&@0+a<>1X=~!1S)w8TWQ!SNjnT@o=l1#kq(ftW<$mCN}#vaY>^Ar zg3WN{ZIh%hq2#5soOb7xNkSyUlgBZkGPT@fDcJLNY7IyQ2enaUx0H9Rlt9IqlnN>= z^*AhL7fU(7Ia}IG;Z3~(X%d_z(ilbgty>B%!l%~rj^6tEw zssk2ac=E2ihgv@ux>25{K@n(3?2Yn(rBlkQ;I0Rk*l~Qb1TMmS!Ao7jMOC<{!X~Wg z^1KtJfVKdwPSZA^z0PPevqgFZZTZ}Ds7ZPz8>D^YT);{mJ$zt+ton^x!>LGI7eC;CZBI{ zFK_Dsf+2(mbKy&7Yml`Tp>_s>NUZ}H<0swCp3q#=KB}?)oUSn+U*XA@d1219>BE_n zVOp~BF}9kY@`Ol~f9h$zz-L-@Ei+)cQrUP?iw(z(?K-k_ovjBw-s>&6t_4EB0ze}C zZEvODO-hJE_R3abB{i~2s$jf;czLC-*YPco0)G=c>#HW;o7(GhlNj!ZU5HSMvu@Uc zbR1y=LO+0MWAUM+#;!&N8etm|wjki<*_8;J076cdfLByWWODIAoel6$N|IzB?WuC5BFa2Hs|c(Cd`a)|c?`baPTNmZoeJ~R_X=tcM? zH><8CC;8&QsJF;KE(-A@RRRB3rGXE9>wf-^KwbRsrI=W zEs&Ny)%6~ce3?CrVn_JV>Y7F@jF22DmIb__xry^x*zpK&n#ah};wq?ug_le}VeqPdU(+Q{S905Xf1LMv+tx0z zaPstq$7*i;psy>u3R0|>Zw&^*6+jx$7NGaa1?=L2f|fvfVEsiXh>-*OAip)Z(ANP$(ZyW=#l(*C zQ^B>}H-ms_jnjCR9Y9$Ny9M=bMc~M=F(=YL1b|}Ej94bVQ)BE0$ow%1SlAsrHm|$& z611)YM5%Aj!oUX zz(rPyCNbkbdj|B`?-9@iwgbSlLa(H(4#&8Wkj1pqxDnSigPla?IDfOD?t&Puj|GbB zS3-}UOB}GC6|j!5mwB-9A~?&}G)Bll{@uoEPZs56Aez=%($(OU-l{afT7pQ{$RwM^N-QRlM{(v+^Q#g9p#FXP^Qgjv`eI zE7x@3-eRe2lC3~x?Do@rg0E>>T|I^_lmSzMj%|OqF(=l?44~MssHT=bJ zm2Dl!9pE2?7cKdksG8Q~cxw2IFq+D6DlwSR*h`>g+TcQhnu+qOnlFNuy1#iH`5S+| zIY4&vkDG(!4EMLh8$?$j>=jT5*-Sg!C^VHYW;y)<%s?>jZ230%KCg}V$yt7JO*E9H&X*>|>zVhdjJ+L%kqB!ZBJgD-!WcVk8qcG$>>^hI%5$v^)7v7gm#V zQ^yt_B@oD+i&hsex+JRA&cKWtkMg$`(WMv#F$)C zRn00@Y$3%XcowNM2=^i^Luf(RhTxVa06ziJRNxrkcUdL!@p!Ul5e5-N)X7|2s#XAq6^kW^lnT5y{=!oK1X3Znrw3{- zj0+HPg8*k$h`>MK<5VwB$?P?F0h9f!C@Ef!RXsh-f@|Q0R|&*RKKMB+bOrkl$g{sA zV4p5J{ytKl1rQ@3YFhN2I;%&e288)MzpS(5cAWkV!ea<~_#c*?PYV3KWt-tMa_RC& zn;Orgl0&h9ctT@d6!8I=ReEYHlFf~3dSrxUGO7Lu({_w#x&Z;cXL)2|6eaf}+=Xxm zLDXC`Qil;9LO66S qss;;uIm_@=x-F{R;V)U~P*k7lRy_{*scmYFYI8Uo)%>?z^Zo~RB*ka| diff --git a/fawkes/align_face.py b/fawkes/align_face.py index 0d7aa96..59e52f2 100644 --- a/fawkes/align_face.py +++ b/fawkes/align_face.py @@ -1,7 +1,6 @@ -from .detect_face import detect_face, create_mtcnn import numpy as np +from fawkes import create_mtcnn, run_detect_face -# modify the default parameters of np.load np_load_old = np.load np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k) @@ -30,7 +29,7 @@ def align(orig_img, aligner, margin=0.8, detect_multiple_faces=True): orig_img = to_rgb(orig_img) orig_img = orig_img[:, :, 0:3] - bounding_boxes, _ = detect_face(orig_img, minsize, pnet, rnet, onet, threshold, factor) + bounding_boxes, _ = run_detect_face(orig_img, minsize, pnet, rnet, onet, threshold, factor) nrof_faces = bounding_boxes.shape[0] if nrof_faces > 0: det = bounding_boxes[:, 0:4] @@ -66,14 +65,6 @@ def align(orig_img, aligner, margin=0.8, detect_multiple_faces=True): cropped = orig_img[bb[1]:bb[3], bb[0]:bb[2], :] cropped_arr.append(cropped) bounding_boxes_arr.append([bb[0], bb[1], bb[2], bb[3]]) - # scaled = misc.imresize(cropped, (image_size, image_size), interp='bilinear') return cropped_arr, bounding_boxes_arr else: return None -# -# if __name__ == '__main__': -# orig_img = misc.imread('orig_img.jpeg') -# cropped_arr, bounding_boxes_arr = align(orig_img) -# misc.imsave('test_output.jpeg', cropped_arr[0]) -# print(bounding_boxes_arr) -# diff --git a/fawkes/detect_face.py b/fawkes/detect_faces.py similarity index 93% rename from fawkes/detect_face.py rename to fawkes/detect_faces.py index dff56c0..9d6f317 100644 --- a/fawkes/detect_face.py +++ b/fawkes/detect_faces.py @@ -1,38 +1,15 @@ """ Tensorflow implementation of the face detection / alignment algorithm found at https://github.com/kpzhang93/MTCNN_face_detection_alignment """ -# MIT License -# -# Copyright (c) 2016 David Sandberg -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +import gzip import os +import pickle -import cv2 import numpy as np import tensorflow as tf from six import string_types, iteritems +from skimage.transform import resize def layer(op): @@ -78,13 +55,12 @@ class Network(object): """Construct the network. """ raise NotImplementedError('Must be implemented by the subclass.') - def load(self, data_path, session, ignore_missing=False): + def load(self, data_dict, session, ignore_missing=False): """Load network weights. data_path: The path to the numpy-serialized network weights session: The current TensorFlow session ignore_missing: If true, serialized weights for missing layers are ignored. """ - data_dict = np.load(data_path, encoding='latin1').item() # pylint: disable=no-member for op_name in data_dict: with tf.variable_scope(op_name, reuse=True): @@ -280,21 +256,27 @@ class ONet(Network): def create_mtcnn(sess, model_path): - if not model_path: - model_path, _ = os.path.split(os.path.realpath(__file__)) + model_dir = os.path.join(os.path.expanduser('~'), '.fawkes') + os.makedirs(model_dir, exist_ok=True) + + fp = gzip.open(os.path.join(model_dir, "mtcnn.p.gz"), 'rb') + dnet_weights = pickle.load(fp) + fp.close() with tf.variable_scope('pnet'): data = tf.placeholder(tf.float32, (None, None, None, 3), 'input') pnet = PNet({'data': data}) - pnet.load(os.path.join(model_path, 'weights/det1.npy'), sess) + + # data_dict = np.load(data_path, encoding='latin1').item() # pylint: disable=no-member + pnet.load(dnet_weights[0], sess) with tf.variable_scope('rnet'): data = tf.placeholder(tf.float32, (None, 24, 24, 3), 'input') rnet = RNet({'data': data}) - rnet.load(os.path.join(model_path, 'weights/det2.npy'), sess) + rnet.load(dnet_weights[1], sess) with tf.variable_scope('onet'): data = tf.placeholder(tf.float32, (None, 48, 48, 3), 'input') onet = ONet({'data': data}) - onet.load(os.path.join(model_path, 'weights/det3.npy'), sess) + onet.load(dnet_weights[2], sess) pnet_fun = lambda img: sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0': img}) rnet_fun = lambda img: sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0': img}) @@ -303,7 +285,7 @@ def create_mtcnn(sess, model_path): return pnet_fun, rnet_fun, onet_fun -def detect_face(img, minsize, pnet, rnet, onet, threshold, factor): +def run_detect_face(img, minsize, pnet, rnet, onet, threshold, factor): """Detects faces in an image, and returns bounding boxes and points for them. img: input image minsize: minimum faces' size @@ -367,11 +349,15 @@ def detect_face(img, minsize, pnet, rnet, onet, threshold, factor): tempimg = np.zeros((24, 24, 3, numbox)) for k in range(0, numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) + # try: tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :] + # except ValueError: + # continue if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = imresample(tmp, (24, 24)) else: return np.empty() + tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = rnet(tempimg1) @@ -776,18 +762,20 @@ def rerec(bboxA): def imresample(img, sz): - im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) # @UndefinedVariable + from keras.preprocessing import image + # im_data = resize(img, (sz[0], sz[1])) + im_data = image.array_to_img(img).resize((sz[1], sz[0])) + im_data = image.img_to_array(im_data) return im_data - # This method is kept for debugging purpose -# h=img.shape[0] -# w=img.shape[1] -# hs, ws = sz -# dx = float(w) / ws -# dy = float(h) / hs -# im_data = np.zeros((hs,ws,3)) -# for a1 in range(0,hs): -# for a2 in range(0,ws): -# for a3 in range(0,3): -# im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3] +# def imresample(img, sz): +# import cv2 +# im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) # @UndefinedVariable # return im_data + + +def to_rgb(img): + w, h = img.shape + ret = np.empty((w, h, 3), dtype=np.uint8) + ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img + return ret diff --git a/fawkes/differentiator.py b/fawkes/differentiator.py index 17e6b2c..29a9a02 100644 --- a/fawkes/differentiator.py +++ b/fawkes/differentiator.py @@ -11,7 +11,7 @@ from decimal import Decimal import numpy as np import tensorflow as tf -from .utils import preprocess, reverse_preprocess +from fawkes.utils import preprocess, reverse_preprocess class FawkesMaskGeneration: diff --git a/fawkes/protection.py b/fawkes/protection.py index 1072807..209985f 100644 --- a/fawkes/protection.py +++ b/fawkes/protection.py @@ -8,13 +8,16 @@ import os import random import sys import time +import tensorflow as tf +import logging +logging.getLogger('tensorflow').disabled = True import numpy as np - -from .differentiator import FawkesMaskGeneration -from .utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, \ +from fawkes.differentiator import FawkesMaskGeneration +from fawkes.utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, \ Faces + random.seed(12243) np.random.seed(122412) @@ -63,11 +66,11 @@ def main(*argv): parser.add_argument('--directory', '-d', type=str, help='directory that contain images for cloaking', default='imgs/') - parser.add_argument('--gpu', type=str, + parser.add_argument('--gpu', '-g', type=str, help='GPU id', default='0') - parser.add_argument('--mode', type=str, - help='cloak generation mode', default='high') + parser.add_argument('--mode', '-m', type=str, + help='cloak generation mode', default='mid') parser.add_argument('--feature-extractor', type=str, help="name of the feature extractor used for optimization", default="high_extract") @@ -88,12 +91,12 @@ def main(*argv): if args.mode == 'low': args.feature_extractor = "high_extract" args.th = 0.003 - args.max_step = 100 - args.lr = 15 + args.max_step = 20 + args.lr = 20 elif args.mode == 'mid': args.feature_extractor = "high_extract" - args.th = 0.005 - args.max_step = 100 + args.th = 0.004 + args.max_step = 50 args.lr = 15 elif args.mode == 'high': args.feature_extractor = "high_extract" @@ -101,10 +104,14 @@ def main(*argv): args.max_step = 100 args.lr = 10 elif args.mode == 'ultra': + if not tf.test.is_gpu_available(): + print("Please enable GPU for ultra setting...") + sys.exit(1) + # args.feature_extractor = ["high_extract", 'high2_extract'] args.feature_extractor = "high_extract" - args.th = 0.01 - args.max_step = 1000 - args.lr = 5 + args.th = 0.015 + args.max_step = 2000 + args.lr = 8 elif args.mode == 'custom': pass else: @@ -115,20 +122,23 @@ def main(*argv): args.format = 'jpeg' sess = init_gpu(args.gpu) - fs_names = [args.feature_extractor] - feature_extractors_ls = [load_extractor(name) for name in fs_names] image_paths = glob.glob(os.path.join(args.directory, "*")) image_paths = [path for path in image_paths if "_cloaked" not in path.split("/")[-1]] if not image_paths: - print("No images in the directory") - exit(1) + raise Exception("No images in the directory") faces = Faces(image_paths, sess, verbose=1) orginal_images = faces.cropped_faces orginal_images = np.array(orginal_images) + fs_names = [args.feature_extractor] + if isinstance(args.feature_extractor, list): + fs_names = args.feature_extractor + + feature_extractors_ls = [load_extractor(name) for name in fs_names] + if args.separate_target: target_embedding = [] for org_img in orginal_images: @@ -154,6 +164,7 @@ def main(*argv): elapsed_time = time.time() - start_time print('attack cost %f s' % (elapsed_time)) + print("Done!") if __name__ == '__main__': diff --git a/fawkes/utils.py b/fawkes/utils.py index c769e31..3e1b7d0 100644 --- a/fawkes/utils.py +++ b/fawkes/utils.py @@ -27,8 +27,7 @@ from keras.preprocessing import image from skimage.transform import resize from sklearn.metrics import pairwise_distances - -from .align_face import align, aligner +from fawkes.align_face import align, aligner from six.moves.urllib.request import urlopen if sys.version_info[0] == 2: @@ -89,6 +88,12 @@ def load_image(path): class Faces(object): def __init__(self, image_paths, sess, verbose=1): + model_dir = os.path.join(os.path.expanduser('~'), '.fawkes') + if not os.path.exists(os.path.join(model_dir, "mtcnn.p.gz")): + os.makedirs(model_dir, exist_ok=True) + get_file("mtcnn.p.gz", "http://sandlab.cs.uchicago.edu/fawkes/files/mtcnn.p.gz", cache_dir=model_dir, + cache_subdir='') + self.verbose = verbose self.aligner = aligner(sess) self.org_faces = [] @@ -102,6 +107,10 @@ class Faces(object): cur_img = load_image(p) self.org_faces.append(cur_img) align_img = align(cur_img, self.aligner, margin=0.7) + if align_img is None: + print("Find 0 face(s) in {}".format(p.split("/")[-1])) + continue + cur_faces = align_img[0] cur_shapes = [f.shape[:-1] for f in cur_faces] @@ -327,6 +336,7 @@ def load_extractor(name): if os.path.exists(model_file): model = keras.models.load_model(model_file) else: + print("Download models...") get_file("{}.h5".format(name), "http://sandlab.cs.uchicago.edu/fawkes/files/{}.h5".format(name), cache_dir=model_dir, cache_subdir='') model = keras.models.load_model(model_file) @@ -568,152 +578,3 @@ def _makedirs_exist_ok(datadir): raise else: os.makedirs(datadir, exist_ok=True) # pylint: disable=unexpected-keyword-arg - -# class CloakData(object): -# def __init__(self, protect_directory=None, img_shape=(224, 224)): -# -# self.img_shape = img_shape -# # self.train_data_dir, self.test_data_dir, self.number_classes, self.number_samples = get_dataset_path(dataset) -# # self.all_labels = sorted(list(os.listdir(self.train_data_dir))) -# self.protect_directory = protect_directory -# -# self.protect_X = self.load_label_data(self.protect_directory) -# -# self.cloaked_protect_train_X = None -# -# self.label2path_train, self.label2path_test, self.path2idx = self.build_data_mapping() -# self.all_training_path = self.get_all_data_path(self.label2path_train) -# self.all_test_path = self.get_all_data_path(self.label2path_test) -# self.protect_class_path = self.get_class_image_files(os.path.join(self.train_data_dir, self.protect_class)) -# -# def get_class_image_files(self, path): -# return [os.path.join(path, f) for f in os.listdir(path)] -# -# def extractor_ls_predict(self, feature_extractors_ls, X): -# feature_ls = [] -# for extractor in feature_extractors_ls: -# cur_features = extractor.predict(X) -# feature_ls.append(cur_features) -# concated_feature_ls = np.concatenate(feature_ls, axis=1) -# concated_feature_ls = normalize(concated_feature_ls) -# return concated_feature_ls -# -# def load_embeddings(self, feature_extractors_names): -# dictionaries = [] -# for extractor_name in feature_extractors_names: -# path2emb = pickle.load(open("../feature_extractors/embeddings/{}_emb_norm.p".format(extractor_name), "rb")) -# dictionaries.append(path2emb) -# -# merge_dict = {} -# for k in dictionaries[0].keys(): -# cur_emb = [dic[k] for dic in dictionaries] -# merge_dict[k] = np.concatenate(cur_emb) -# return merge_dict -# -# def select_target_label(self, feature_extractors_ls, feature_extractors_names, metric='l2'): -# original_feature_x = self.extractor_ls_predict(feature_extractors_ls, self.protect_train_X) -# -# path2emb = self.load_embeddings(feature_extractors_names) -# items = list(path2emb.items()) -# paths = [p[0] for p in items] -# embs = [p[1] for p in items] -# embs = np.array(embs) -# -# pair_dist = pairwise_distances(original_feature_x, embs, metric) -# max_sum = np.min(pair_dist, axis=0) -# sorted_idx = np.argsort(max_sum)[::-1] -# -# highest_num = 0 -# paired_target_X = None -# final_target_class_path = None -# for idx in sorted_idx[:5]: -# target_class_path = paths[idx] -# cur_target_X = self.load_dir(target_class_path) -# cur_target_X = np.concatenate([cur_target_X, cur_target_X, cur_target_X]) -# cur_tot_sum, cur_paired_target_X = self.calculate_dist_score(self.protect_train_X, cur_target_X, -# feature_extractors_ls, -# metric=metric) -# if cur_tot_sum > highest_num: -# highest_num = cur_tot_sum -# paired_target_X = cur_paired_target_X -# final_target_class_path = target_class_path -# -# np.random.shuffle(paired_target_X) -# return final_target_class_path, paired_target_X -# -# def calculate_dist_score(self, a, b, feature_extractors_ls, metric='l2'): -# features1 = self.extractor_ls_predict(feature_extractors_ls, a) -# features2 = self.extractor_ls_predict(feature_extractors_ls, b) -# -# pair_cos = pairwise_distances(features1, features2, metric) -# max_sum = np.min(pair_cos, axis=0) -# max_sum_arg = np.argsort(max_sum)[::-1] -# max_sum_arg = max_sum_arg[:len(a)] -# max_sum = [max_sum[i] for i in max_sum_arg] -# paired_target_X = [b[j] for j in max_sum_arg] -# paired_target_X = np.array(paired_target_X) -# return np.min(max_sum), paired_target_X -# -# def get_all_data_path(self, label2path): -# all_paths = [] -# for k, v in label2path.items(): -# cur_all_paths = [os.path.join(k, cur_p) for cur_p in v] -# all_paths.extend(cur_all_paths) -# return all_paths -# -# def load_label_data(self, label): -# train_label_path = os.path.join(self.train_data_dir, label) -# test_label_path = os.path.join(self.test_data_dir, label) -# train_X = self.load_dir(train_label_path) -# test_X = self.load_dir(test_label_path) -# return train_X, test_X -# -# def load_dir(self, path): -# assert os.path.exists(path) -# x_ls = [] -# for file in os.listdir(path): -# cur_path = os.path.join(path, file) -# im = image.load_img(cur_path, target_size=self.img_shape) -# im = image.img_to_array(im) -# x_ls.append(im) -# raw_x = np.array(x_ls) -# return preprocess_input(raw_x) -# -# def build_data_mapping(self): -# label2path_train = {} -# label2path_test = {} -# idx = 0 -# path2idx = {} -# for label_name in self.all_labels: -# full_path_train = os.path.join(self.train_data_dir, label_name) -# full_path_test = os.path.join(self.test_data_dir, label_name) -# label2path_train[full_path_train] = list(os.listdir(full_path_train)) -# label2path_test[full_path_test] = list(os.listdir(full_path_test)) -# for img_file in os.listdir(full_path_train): -# path2idx[os.path.join(full_path_train, img_file)] = idx -# for img_file in os.listdir(full_path_test): -# path2idx[os.path.join(full_path_test, img_file)] = idx -# idx += 1 -# return label2path_train, label2path_test, path2idx -# -# def generate_data_post_cloak(self, sybil=False): -# assert self.cloaked_protect_train_X is not None -# while True: -# batch_X = [] -# batch_Y = [] -# cur_batch_path = random.sample(self.all_training_path, 32) -# for p in cur_batch_path: -# cur_y = self.path2idx[p] -# if p in self.protect_class_path: -# cur_x = random.choice(self.cloaked_protect_train_X) -# elif sybil and (p in self.sybil_class): -# cur_x = random.choice(self.cloaked_sybil_train_X) -# else: -# im = image.load_img(p, target_size=self.img_shape) -# im = image.img_to_array(im) -# cur_x = preprocess_input(im) -# batch_X.append(cur_x) -# batch_Y.append(cur_y) -# batch_X = np.array(batch_X) -# batch_Y = to_categorical(np.array(batch_Y), num_classes=self.number_classes) -# yield batch_X, batch_Y diff --git a/fawkes_dev/azure.py b/fawkes_dev/azure.py index 79312f7..2aee371 100644 --- a/fawkes_dev/azure.py +++ b/fawkes_dev/azure.py @@ -46,7 +46,8 @@ def detect_face(image_url): conn.request("POST", "/face/v1.0/detect?%s" % params, body, headers) response = conn.getresponse() data = json.loads(response.read()) - print(data) + # + # print(data) conn.close() return data[0]["faceId"] @@ -229,7 +230,6 @@ def eval(original_faceIds, personGroupId, protect_personId): response = conn.getresponse() data = json.loads(response.read()) conn.close() - face = data[0] print(face) if len(face["candidates"]) and face["candidates"][0]["personId"] == protect_personId: @@ -270,8 +270,8 @@ def get_trainStatus(personGroupId): conn.request("GET", "/face/v1.0/persongroups/{}/training?%s".format(personGroupId) % params, body, headers) response = conn.getresponse() data = response.read() - print(data) conn.close() + return data def test_cloak(): @@ -279,12 +279,12 @@ def test_cloak(): total_idx = range(0, 82) TRAIN_RANGE = random.sample(total_idx, NUM_TRAIN) - TEST_RANGE = TRAIN_RANGE + TEST_RANGE = random.sample([i for i in total_idx if i not in TRAIN_RANGE], 20) personGroupId = 'all' # delete_personGroup(personGroupId) - create_personGroupId(personGroupId, personGroupId) + # create_personGroupId(personGroupId, personGroupId) with open("protect_personId.txt", 'r') as f: protect_personId = f.read() @@ -305,22 +305,25 @@ def test_cloak(): print("Unable to add {}-th image of protect person".format(idx)) # add other people - for idx_person in range(500): - personId = create_personId(personGroupId, str(idx_person)) - print("Created personId: {}".format(idx_person)) - for idx_image in range(10): - image_url = "http://sandlab.cs.uchicago.edu/fawkes/files/target_data/{}/{}.jpg".format( - idx_person, idx_image) - r = add_persistedFaceId(personGroupId, personId, image_url) - if r is not None: - print("Added {}".format(idx_image)) - else: - print("Unable to add {}-th image".format(idx_image)) + # for idx_person in range(1300, 5000): + # personId = create_personId(personGroupId, str(idx_person)) + # print("Created personId: {}".format(idx_person)) + # for idx_image in range(10): + # image_url = "http://sandlab.cs.uchicago.edu/fawkes/files/target_data/{}/{}.jpg".format( + # idx_person, idx_image) + # r = add_persistedFaceId(personGroupId, personId, image_url) + # if r is not None: + # print("Added {}".format(idx_image)) + # else: + # print("Unable to add {}-th image".format(idx_image)) # train model based on personGroup + train_personGroup(personGroupId) - time.sleep(4) - get_trainStatus(personGroupId) + + while json.loads(get_trainStatus(personGroupId))['status'] != 'succeeded': + time.sleep(2) + # list_personGroupPerson(personGroupId) # test original image diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index e27e028..0000000 --- a/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -scikit-image -argparse -glob -numpy==1.18.4 \ No newline at end of file diff --git a/setup.py b/setup.py index d0532b2..fbec16e 100644 --- a/setup.py +++ b/setup.py @@ -75,13 +75,15 @@ class DeployCommand(Command): setup_requires = [] install_requires = [ - 'numpy>=1.16.4', - 'tensorflow>=1.13.1', + 'numpy==1.16.4', + # 'tensorflow-gpu>=1.13.1, <=1.14.0', + 'tensorflow>=1.13.1, <=1.14.0', 'argparse', 'keras==2.2.5', 'scikit-image', 'pillow>=7.0.0', 'opencv-python>=4.2.0.34', + 'sklearn', ] setup(