Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add frost metric #4

Merged
merged 2 commits into from
Aug 8, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
75 changes: 75 additions & 0 deletions examples/torch_frost_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
""" Test case for Torch """

from __future__ import absolute_import

import torch
import torchvision.models as models
import numpy as np
from perceptron.models.classification.pytorch import PyTorchModel
from perceptron.utils.image import imagenet_example
from perceptron.benchmarks.frost import FrostMetric
from perceptron.utils.criteria.classification import Misclassification
from perceptron.utils.tools import plot_image
from perceptron.utils.tools import bcolors

# instantiate the model
resnet18 = models.resnet18(pretrained=True).eval()
if torch.cuda.is_available():
resnet18 = resnet18.cuda()

# initialize the PyTorchModel
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
fmodel = PyTorchModel(
resnet18, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std))

# get source image and print the predicted label
image, _ = imagenet_example(data_format='channels_first')
image = image / 255. # because our model expects values in [0, 1]

# set the type of noise which will used to generate the adversarial examples
metric = FrostMetric(fmodel, criterion=Misclassification())

# set the label as the predicted one
label = np.argmax(fmodel.predictions(image))

print(bcolors.BOLD + 'Process start' + bcolors.ENDC)
# set 'unpack' as false so we can access the detailed info of adversary
adversary = metric(image, label, scenario=5, verify=True, unpack=False)
print(bcolors.BOLD + 'Process finished' + bcolors.ENDC)

if adversary.image is None:
print(
bcolors.WARNING +
'Warning: Cannot find an adversary!' +
bcolors.ENDC)
exit(-1)

################### print summary info #####################################

keywords = ['PyTorch', 'ResNet18', 'Misclassification', 'Frost']

true_label = np.argmax(fmodel.predictions(image))
fake_label = np.argmax(fmodel.predictions(adversary.image))

# interpret the label as human language
with open('perceptron/utils/labels.txt') as info:
imagenet_dict = eval(info.read())

print(bcolors.HEADER + bcolors.UNDERLINE + 'Summary:' + bcolors.ENDC)
print('Configuration:' + bcolors.CYAN + ' --framework %s '
'--model %s --criterion %s '
'--metric %s' % tuple(keywords) + bcolors.ENDC)
print('The predicted label of original image is '
+ bcolors.GREEN + imagenet_dict[true_label] + bcolors.ENDC)
print('The predicted label of adversary image is '
+ bcolors.RED + imagenet_dict[fake_label] + bcolors.ENDC)
print('Minimum perturbation required: %s' % bcolors.BLUE
+ str(adversary.distance) + bcolors.ENDC)
print('Verifiable bound: %s' % bcolors.BLUE
+ str(adversary.verifiable_bounds) + bcolors.ENDC)
print('\n')

plot_image(adversary,
title=', '.join(keywords),
figname='examples/images/%s.png' % '_'.join(keywords))
112 changes: 112 additions & 0 deletions perceptron/benchmarks/frost.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
# Copyright 2019 Baidu Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Metric that tests models against frost variations."""

import numpy as np
from tqdm import tqdm
from collections import Iterable
from .base import Metric
from .base import call_decorator
from PIL import Image
import warnings


class FrostMetric(Metric):
"""Metric that tests models against frost variations."""

@call_decorator
def __call__(self, adv, scenario=5, annotation=None, unpack=True,
abort_early=True, verify=False, epsilons=1000):
"""Change the frost of the image until it is misclassified.

Parameters
----------
adv : `numpy.ndarray`
The original, unperturbed input as a `numpy.ndarray`.
scenario : int or PIL.Image
Choice of frost backgrounds.
annotation : int
The reference label of the original input. Must be passed
if `a` is a `numpy.ndarray`.
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
abort_early : bool
If true, returns when got first adversarial, otherwise
returns when all the iterations are finished.
verify : bool
If True, return verifiable bound.
epsilons : int or Iterable[float]
Either Iterable of contrast levels or number of brightness
factors between 1 and 0 that should be tried. Epsilons are
one minus the brightness factor. Epsilons are not used if
verify = True.

"""

if verify is True:
warnings.warn('epsilon is not used in verification mode '
'and abort_early is set to True.')

a = adv
del adv
del annotation
del unpack

image = a.original_image
min_, max_ = a.bounds()
axis = a.channel_axis(batch=False)
hw = [image.shape[i] for i in range(image.ndim) if i != axis]
img_height, img_width = hw

if not isinstance(epsilons, Iterable):
epsilons = np.linspace(0, 1, num=epsilons)[1:]
else:
epsilons = epsilons

if isinstance(scenario, Image.Image):
frost_img_pil = scenario
elif isinstance(scenario, int):
frost_img_pil = Image.open(
'perceptron/utils/images/frost{0}.png'.format(scenario))
else:
raise ValueError(
'scenatiro has to be eigher int or PIL.Image.Image')

frost_img = np.array(
frost_img_pil.convert('RGB').resize(
(img_width, img_height))).astype(
np.float32) / 255.
frost_img = frost_img * max_
if(axis == 0):
frost_img = np.transpose(frost_img, (2, 0, 1))

cc0 = [1.0, 0.5]
cc1 = [0.3, 0.8]
for _, epsilon in enumerate(tqdm(epsilons)):
p0 = cc0[0] + epsilon * (cc0[1] - cc0[0])
p1 = cc1[0] + epsilon * (cc1[1] - cc1[0])
perturbed = image * p0 + frost_img * p1
perturbed = np.clip(perturbed, min_, max_)

_, is_adversarial = a.predictions(perturbed)
if is_adversarial:
if abort_early or verify:
break
else:
bound = epsilon
a.verifiable_bounds = (bound, None)

return
Binary file removed perceptron/utils/images/frost4.jpg
Binary file not shown.
Binary file added perceptron/utils/images/frost4.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file removed perceptron/utils/images/frost5.jpg
Binary file not shown.
Binary file added perceptron/utils/images/frost5.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file removed perceptron/utils/images/frost6.jpg
Binary file not shown.
Binary file added perceptron/utils/images/frost6.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.