From 351c428428e3a8a4140cc9021730a874fa6be356 Mon Sep 17 00:00:00 2001 From: Jamie Date: Tue, 6 Feb 2018 18:26:03 +0000 Subject: [PATCH] initial release --- make_patch.py | 286 ++++++++++++++++++++++++++++++++++++++ pretrained_models_pytorch | 1 + utils.py | 221 +++++++++++++++++++++++++++++ 3 files changed, 508 insertions(+) create mode 100755 make_patch.py create mode 160000 pretrained_models_pytorch create mode 100644 utils.py diff --git a/make_patch.py b/make_patch.py new file mode 100755 index 0000000..8eeb5d2 --- /dev/null +++ b/make_patch.py @@ -0,0 +1,286 @@ +import argparse +import os +import random +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.parallel +import torch.backends.cudnn as cudnn +import torch.optim as optim +import torch.utils.data +import torch.nn.functional as F +import torchvision.datasets as dset +import torchvision.transforms as transforms +import torchvision.utils as vutils +from torch.autograd import Variable +from torch.utils.data.sampler import SubsetRandomSampler + +from pretrained_models_pytorch import pretrainedmodels + +from utils import * + +parser = argparse.ArgumentParser() +parser.add_argument('--workers', type=int, help='number of data loading workers', default=2) +parser.add_argument('--epochs', type=int, default=20, help='number of epochs to train for') +parser.add_argument('--cuda', action='store_true', help='enables cuda') + +parser.add_argument('--target', type=int, default=859, help='The target class: 859 == toaster') +parser.add_argument('--conf_target', type=float, default=0.9, help='Stop attack on image when target classifier reaches this value for target class') + +parser.add_argument('--max_count', type=int, default=1000, help='max number of iterations to find adversarial example') +parser.add_argument('--patch_type', type=str, default='circle', help='patch type: circle or square') +parser.add_argument('--patch_size', type=float, default=0.05, help='patch size. E.g. 0.05 ~= 5% of image ') + +parser.add_argument('--train_size', type=float, default=2000, help='Number of training images') +parser.add_argument('--test_size', type=float, default=2000, help='Number of test images') + +parser.add_argument('--image_size', type=int, default=299, help='the height / width of the input image to network') + +parser.add_argument('--plot_all', type=int, default=1, help='1 == plot all successful adversarial images') + +parser.add_argument('--netClassifier', default='inceptionv3', help="The target classifier") + +parser.add_argument('--outf', default='./logs', help='folder to output images and model checkpoints') +parser.add_argument('--manualSeed', type=int, default=1338, help='manual seed') + +opt = parser.parse_args() +print(opt) + +try: + os.makedirs(opt.outf) +except OSError: + pass + +if opt.manualSeed is None: + opt.manualSeed = random.randint(1, 10000) +print("Random Seed: ", opt.manualSeed) +random.seed(opt.manualSeed) +np.random.seed(opt.manualSeed) +torch.manual_seed(opt.manualSeed) +if opt.cuda: + torch.cuda.manual_seed_all(opt.manualSeed) + +cudnn.benchmark = True + +if torch.cuda.is_available() and not opt.cuda: + print("WARNING: You have a CUDA device, so you should probably run with --cuda") + +target = opt.target +conf_target = opt.conf_target +max_count = opt.max_count +patch_type = opt.patch_type +patch_size = opt.patch_size +image_size = opt.image_size +train_size = opt.train_size +test_size = opt.test_size +plot_all = opt.plot_all + +assert train_size + test_size <= 50000, "Traing set size + Test set size > Total dataset size" + +print("=> creating model ") +netClassifier = pretrainedmodels.__dict__[opt.netClassifier](num_classes=1000, pretrained='imagenet') +if opt.cuda: + netClassifier.cuda() + + +print('==> Preparing data..') +normalize = transforms.Normalize(mean=netClassifier.mean, + std=netClassifier.std) +idx = np.arange(50000) +np.random.shuffle(idx) +training_idx = idx[:train_size] +test_idx = idx[train_size:test_size] + +train_loader = torch.utils.data.DataLoader( + dset.ImageFolder('./data/imagenetdata/val', transforms.Compose([ + transforms.Scale(round(max(netClassifier.input_size)*1.050)), + transforms.CenterCrop(max(netClassifier.input_size)), + transforms.ToTensor(), + ToSpaceBGR(netClassifier.input_space=='BGR'), + ToRange255(max(netClassifier.input_range)==255), + normalize, + ])), + batch_size=1, shuffle=False, sampler=SubsetRandomSampler(training_idx), + num_workers=opt.workers, pin_memory=True) + +test_loader = torch.utils.data.DataLoader( + dset.ImageFolder('./data/imagenetdata/val', transforms.Compose([ + transforms.Scale(round(max(netClassifier.input_size)*1.050)), + transforms.CenterCrop(max(netClassifier.input_size)), + transforms.ToTensor(), + ToSpaceBGR(netClassifier.input_space=='BGR'), + ToRange255(max(netClassifier.input_range)==255), + normalize, + ])), + batch_size=1, shuffle=False, sampler=SubsetRandomSampler(test_idx), + num_workers=opt.workers, pin_memory=True) + +min_in, max_in = netClassifier.input_range[0], netClassifier.input_range[1] +min_in, max_in = np.array([min_in, min_in, min_in]), np.array([max_in, max_in, max_in]) +mean, std = np.array(netClassifier.mean), np.array(netClassifier.std) +min_out, max_out = np.min((min_in-mean)/std), np.max((max_in-mean)/std) + + +def train(epoch, patch, patch_shape): + netClassifier.eval() + success = 0 + total = 0 + recover_time = 0 + for batch_idx, (data, labels) in enumerate(train_loader): + if opt.cuda: + data = data.cuda() + labels = labels.cuda() + data, labels = Variable(data), Variable(labels) + + prediction = netClassifier(data) + + # only computer adversarial examples on examples that are originally classified correctly + if prediction.data.max(1)[1][0] != labels.data[0]: + continue + + total += 1 + + # transform path + data_shape = data.data.cpu().numpy().shape + if patch_type == 'circle': + patch, mask, patch_shape = circle_transform(patch, data_shape, patch_shape, image_size) + elif patch_type == 'square': + patch, mask = square_transform(patch, data_shape, patch_shape, image_size) + patch, mask = torch.FloatTensor(patch), torch.FloatTensor(mask) + if opt.cuda: + patch, mask = patch.cuda(), mask.cuda() + patch, mask = Variable(patch), Variable(mask) + + adv_x, mask, patch = attack(data, patch, mask) + + adv_label = netClassifier(adv_x).data.max(1)[1][0] + ori_label = labels.data[0] + + if adv_label == target: + success += 1 + + if plot_all == 1: + # plot source image + vutils.save_image(data.data, "./%s/%d_%d_original.png" %(opt.outf, batch_idx, ori_label), normalize=True) + + # plot adversarial image + vutils.save_image(adv_x.data, "./%s/%d_%d_adversarial.png" %(opt.outf, batch_idx, adv_label), normalize=True) + + masked_patch = torch.mul(mask, patch) + patch = masked_patch.data.cpu().numpy() + new_patch = np.zeros(patch_shape) + for i in range(new_patch.shape[0]): + for j in range(new_patch.shape[1]): + new_patch[i][j] = submatrix(patch[i][j]) + + patch = new_patch + + # log to file + progress_bar(batch_idx, len(train_loader), "Train Patch Success: {:.3f}".format(success/total)) + + return patch + +def test(epoch, patch, patch_shape): + netClassifier.eval() + success = 0 + total = 0 + for batch_idx, (data, labels) in enumerate(test_loader): + if opt.cuda: + data = data.cuda() + labels = labels.cuda() + data, labels = Variable(data), Variable(labels) + + prediction = netClassifier(data) + + # only computer adversarial examples on examples that are originally classified correctly + if prediction.data.max(1)[1][0] != labels.data[0]: + continue + + total += 1 + + # transform path + data_shape = data.data.cpu().numpy().shape + if patch_type == 'circle': + patch, mask, patch_shape = circle_transform(patch, data_shape, patch_shape, image_size) + elif patch_type == 'square': + patch, mask = square_transform(patch, data_shape, patch_shape, image_size) + patch, mask = torch.FloatTensor(patch), torch.FloatTensor(mask) + if opt.cuda: + patch, mask = patch.cuda(), mask.cuda() + patch, mask = Variable(patch), Variable(mask) + + adv_x = torch.mul((1-mask),data) + torch.mul(mask,patch) + adv_x = torch.clamp(adv_x, min_out, max_out) + + adv_label = netClassifier(adv_x).data.max(1)[1][0] + ori_label = labels.data[0] + + if adv_label == target: + success += 1 + + masked_patch = torch.mul(mask, patch) + patch = masked_patch.data.cpu().numpy() + new_patch = np.zeros(patch_shape) + for i in range(new_patch.shape[0]): + for j in range(new_patch.shape[1]): + new_patch[i][j] = submatrix(patch[i][j]) + + patch = new_patch + + # log to file + progress_bar(batch_idx, len(test_loader), "Test Success: {:.3f}".format(success/total)) + +def attack(x, patch, mask): + netClassifier.eval() + + x_out = F.softmax(netClassifier(x)) + target_prob = x_out.data[0][target] + + adv_x = torch.mul((1-mask),x) + torch.mul(mask,patch) + + count = 0 + + while conf_target > target_prob: + count += 1 + adv_x = Variable(adv_x.data, requires_grad=True) + adv_out = F.log_softmax(netClassifier(adv_x)) + + adv_out_probs, adv_out_labels = adv_out.max(1) + + Loss = -adv_out[0][target] + Loss.backward() + + adv_grad = adv_x.grad.clone() + + adv_x.grad.data.zero_() + + patch -= adv_grad + + adv_x = torch.mul((1-mask),x) + torch.mul(mask,patch) + adv_x = torch.clamp(adv_x, min_out, max_out) + + out = F.softmax(netClassifier(adv_x)) + target_prob = out.data[0][target] + #y_argmax_prob = out.data.max(1)[0][0] + + #print(count, conf_target, target_prob, y_argmax_prob) + + if count >= opt.max_count: + break + + + return adv_x, mask, patch + + +if __name__ == '__main__': + if patch_type == 'circle': + patch, patch_shape = init_patch_circle(image_size, patch_size) + elif patch_type == 'square': + patch, patch_shape = init_patch_circle(image_size, patch_size) + else: + sys.exit("Please choose a square or circle patch") + + for epoch in range(1, opt.epochs + 1): + patch = train(epoch, patch, patch_shape) + test(epoch, patch, patch_shape) diff --git a/pretrained_models_pytorch b/pretrained_models_pytorch new file mode 160000 index 0000000..247b037 --- /dev/null +++ b/pretrained_models_pytorch @@ -0,0 +1 @@ +Subproject commit 247b0375abb6120ab4ef5642be59aee9d9e5542d diff --git a/utils.py b/utils.py new file mode 100644 index 0000000..75d54d6 --- /dev/null +++ b/utils.py @@ -0,0 +1,221 @@ +import os +import sys +import time +import math +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.init as init +from torch.autograd import Variable + +from scipy.ndimage.interpolation import rotate + +_, term_width = os.popen('stty size', 'r').read().split() +term_width = int(term_width) + +TOTAL_BAR_LENGTH = 35. +last_time = time.time() +begin_time = last_time +def progress_bar(current, total, msg=None): + global last_time, begin_time + if current == 0: + begin_time = time.time() # Reset for new bar. + + cur_len = int(TOTAL_BAR_LENGTH*current/total) + rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1 + + sys.stdout.write(' [') + for i in range(cur_len): + sys.stdout.write('=') + sys.stdout.write('>') + for i in range(rest_len): + sys.stdout.write('.') + sys.stdout.write(']') + + cur_time = time.time() + step_time = cur_time - last_time + last_time = cur_time + tot_time = cur_time - begin_time + + L = [] + if msg: + L.append(' ' + msg) + L.append(' | Step: %s' % format_time(step_time)) + L.append(' | Tot: %s' % format_time(tot_time)) + + msg = ''.join(L) + sys.stdout.write(msg) + for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3): + sys.stdout.write(' ') + + # Go back to the center of the bar. + for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2): + sys.stdout.write('\b') + sys.stdout.write(' %d/%d ' % (current+1, total)) + + if current < total-1: + sys.stdout.write('\r') + else: + sys.stdout.write('\n') + sys.stdout.flush() + +def format_time(seconds): + days = int(seconds / 3600/24) + seconds = seconds - days*3600*24 + hours = int(seconds / 3600) + seconds = seconds - hours*3600 + minutes = int(seconds / 60) + seconds = seconds - minutes*60 + secondsf = int(seconds) + seconds = seconds - secondsf + millis = int(seconds*1000) + + f = '' + i = 1 + if days > 0: + f += str(days) + 'D' + i += 1 + if hours > 0 and i <= 2: + f += str(hours) + 'h' + i += 1 + if minutes > 0 and i <= 2: + f += str(minutes) + 'm' + i += 1 + if secondsf > 0 and i <= 2: + f += str(secondsf) + 's' + i += 1 + if millis > 0 and i <= 2: + f += str(millis) + 'ms' + i += 1 + if f == '': + f = '0ms' + return f + + +def submatrix(arr): + x, y = np.nonzero(arr) + # Using the smallest and largest x and y indices of nonzero elements, + # we can find the desired rectangular bounds. + # And don't forget to add 1 to the top bound to avoid the fencepost problem. + return arr[x.min():x.max()+1, y.min():y.max()+1] + + +class ToSpaceBGR(object): + def __init__(self, is_bgr): + self.is_bgr = is_bgr + def __call__(self, tensor): + if self.is_bgr: + new_tensor = tensor.clone() + new_tensor[0] = tensor[2] + new_tensor[2] = tensor[0] + tensor = new_tensor + return tensor + + +class ToRange255(object): + def __init__(self, is_255): + self.is_255 = is_255 + def __call__(self, tensor): + if self.is_255: + tensor.mul_(255) + return tensor + + +def init_patch_circle(image_size, patch_size): + image_size = image_size**2 + noise_size = int(image_size*patch_size) + radius = int(math.sqrt(noise_size/math.pi)) + patch = np.zeros((1, 3, radius*2, radius*2)) + for i in range(3): + a = np.zeros((radius*2, radius*2)) + cx, cy = radius, radius # The center of circle + y, x = np.ogrid[-radius: radius, -radius: radius] + index = x**2 + y**2 <= radius**2 + a[cy-radius:cy+radius, cx-radius:cx+radius][index] = np.random.rand() + idx = np.flatnonzero((a == 0).all((1))) + a = np.delete(a, idx, axis=0) + patch[0][i] = np.delete(a, idx, axis=1) + return patch, patch.shape + + +def circle_transform(patch, data_shape, patch_shape, image_size): + # get dummy image + x = np.zeros(data_shape) + + # get shape + m_size = patch_shape[-1] + + for i in range(x.shape[0]): + + # random rotation + rot = np.random.choice(360) + for j in range(patch[i].shape[0]): + patch[i][j] = rotate(patch[i][j], angle=rot, reshape=False) + + # random location + random_x = np.random.choice(image_size) + if random_x + m_size > x.shape[-1]: + while random_x + m_size > x.shape[-1]: + random_x = np.random.choice(image_size) + random_y = np.random.choice(image_size) + if random_y + m_size > x.shape[-1]: + while random_y + m_size > x.shape[-1]: + random_y = np.random.choice(image_size) + + # apply patch to dummy image + x[i][0][random_x:random_x+patch_shape[-1], random_y:random_y+patch_shape[-1]] = patch[i][0] + x[i][1][random_x:random_x+patch_shape[-1], random_y:random_y+patch_shape[-1]] = patch[i][1] + x[i][2][random_x:random_x+patch_shape[-1], random_y:random_y+patch_shape[-1]] = patch[i][2] + + mask = np.copy(x) + mask[mask != 0] = 1.0 + + return x, mask, patch.shape + + +def init_patch_square(image_size, patch_size): + # get mask + image_size = image_size**2 + noise_size = image_size*patch_size + noise_dim = int(noise_size**(0.5)) + patch = np.random.rand(1,3,noise_dim,noise_dim) + return patch, patch.shape + + +def square_transform(patch, data_shape, patch_shape, image_size): + # get dummy image + x = np.zeros(data_shape) + + # get shape + m_size = patch_shape[-1] + + for i in range(x.shape[0]): + + # random rotation + rot = np.random.choice(4) + for j in range(patch[i].shape[0]): + patch[i][j] = np.rot90(patch[i][j], rot) + + # random location + random_x = np.random.choice(image_size) + if random_x + m_size > x.shape[-1]: + while random_x + m_size > x.shape[-1]: + random_x = np.random.choice(image_size) + random_y = np.random.choice(image_size) + if random_y + m_size > x.shape[-1]: + while random_y + m_size > x.shape[-1]: + random_y = np.random.choice(image_size) + + # apply patch to dummy image + x[i][0][random_x:random_x+patch_shape[-1], random_y:random_y+patch_shape[-1]] = patch[i][0] + x[i][1][random_x:random_x+patch_shape[-1], random_y:random_y+patch_shape[-1]] = patch[i][1] + x[i][2][random_x:random_x+patch_shape[-1], random_y:random_y+patch_shape[-1]] = patch[i][2] + + mask = np.copy(x) + mask[mask != 0] = 1.0 + + return x, mask + + +