Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_clip_eta_goldilocks(self):
"""test_clip_eta_goldilocks: Test that the clipping handles perturbations
that are too small, just right, and too big correctly"""
eta = tf.constant([[2.], [3.], [4.]])
self.assertTrue(eta.dtype == tf.float32, eta.dtype)
eps = 3.
for ord_arg in [np.inf, 1, 2]:
for sign in [-1., 1.]:
try:
clipped = utils_tf.clip_eta(eta * sign, ord_arg, eps)
except NotImplementedError:
# Don't raise SkipTest, it skips the rest of the for loop
continue
clipped_value = self.sess.run(clipped)
gold = sign * np.array([[2.], [3.], [3.]])
self.assertClose(clipped_value, gold)
grad, = tf.gradients(clipped, eta)
grad_value = self.sess.run(grad)
# Note: the second 1. is debatable (the left-sided derivative
# and the right-sided derivative do not match, so formally
# the derivative is not defined). This test makes sure that
# we at least handle this oddity consistently across all the
# argument values we test
gold = sign * np.array([[1.], [1.], [0.]])
self.assertClose(grad_value, gold)
def pgd_perturb(x, y, loss_fn, y_target=None, clip_min=None,
clip_max=None, rand_init=False, ord=np.inf, eps=0.3, eps_iter=0.1,
rand_minmax=0.3, nb_iter=20):
# changed nb_iter to 20 and eps_iter to 0.1 for higher eps attack
# Initialize loop variables
if rand_init:
eta = tf.random_uniform(tf.shape(x),
tf.cast(-rand_minmax, x.dtype),
tf.cast(rand_minmax, x.dtype),
dtype=x.dtype)
else:
eta = tf.zeros(tf.shape(x))
# Clip eta
eta = clip_eta(eta, ord, eps)
adv_x = x + eta
if clip_min is not None or clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
if y_target is not None:
y = y_target
targeted = True
elif y is not None:
y = y
targeted = False
else:
raise ValueError
# model_preds = self.model.get_probs(x)
# preds_max = reduce_max(model_preds, 1, keepdims=True)
# y = tf.to_float(tf.equal(model_preds, preds_max))
# y = tf.stop_gradient(y)
if self.clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x,
tf.cast(self.clip_max,
x.dtype)))
# Initialize loop variables
if self.rand_init:
eta = tf.random_uniform(tf.shape(x),
tf.cast(-self.rand_minmax, x.dtype),
tf.cast(self.rand_minmax, x.dtype),
dtype=x.dtype)
else:
eta = tf.zeros(tf.shape(x))
# Clip eta
eta = clip_eta(eta, self.ord, self.eps)
adv_x = x + eta
if self.clip_min is not None or self.clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
if self.y_target is not None:
y = self.y_target
targeted = True
elif self.y is not None:
y = self.y
targeted = False
else:
model_preds = self.model.get_probs(x)
preds_max = tf.reduce_max(model_preds, 1, keepdims=True)
y = tf.to_float(tf.equal(model_preds, preds_max))
y = tf.stop_gradient(y)
targeted = False
def pgd_perturb(x, y, loss_fn, y_target=None, clip_min=None,
clip_max=None, rand_init=False, ord=np.inf, eps=0.3,
eps_iter=0.05, rand_minmax=0.3, nb_iter=10):
# Initialize loop variables
if rand_init:
eta = tf.random_uniform(tf.shape(x),
tf.cast(-rand_minmax, x.dtype),
tf.cast(rand_minmax, x.dtype),
dtype=x.dtype)
else:
eta = tf.zeros(tf.shape(x))
# Clip eta
eta = clip_eta(eta, ord, eps)
adv_x = x + eta
if clip_min is not None or clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
if y_target is not None:
y = y_target
targeted = True
elif y is not None:
y = y
targeted = False
else:
raise ValueError
# model_preds = self.model.get_probs(x)
# preds_max = reduce_max(model_preds, 1, keepdims=True)
# y = tf.to_float(tf.equal(model_preds, preds_max))
# y = tf.stop_gradient(y)
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
import tensorflow as tf
from cleverhans.utils_tf import clip_eta
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
g_feat = self.model.get_layer(g, self.layer)
# Initialize loop variables
eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps)
eta = clip_eta(eta, self.ord, self.eps)
for i in range(self.nb_iter):
eta = self.attack_single_step(x, eta, g_feat)
# Define adversarial example (and clip if necessary)
adv_x = x + eta
if self.clip_min is not None and self.clip_max is not None:
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return adv_x
def body(i, adv_x):
adv_x = fgm_perturb(adv_x, **fgm_params)
# Clipping perturbation eta to self.ord norm ball
eta = adv_x - x
eta = clip_eta(eta, ord, eps)
adv_x = x + eta
# Redo the clipping.
# FGM already did it, but subtracting and re-adding eta can add some
# small numerical error.
if clip_min is not None or clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
return i + 1, adv_x
x.dtype)))
if self.clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x,
tf.cast(self.clip_max,
x.dtype)))
# Initialize loop variables
if self.rand_init:
eta = random_lp_vector(tf.shape(x), ord=1,
eps=tf.cast(self.eps, x.dtype), dtype=x.dtype)
else:
eta = tf.zeros(tf.shape(x))
# Clip eta
eta = clip_eta(eta, ord=1, eps=self.eps)
adv_x = x + eta
if self.clip_min is not None or self.clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
if self.y_target is not None:
y = self.y_target
targeted = True
elif self.y is not None:
y = self.y
targeted = False
else:
model_preds = self.model.get_probs(x)
preds_max = tf.reduce_max(model_preds, 1, keepdims=True)
y = tf.to_float(tf.equal(model_preds, preds_max))
y = tf.stop_gradient(y)
targeted = False
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param g: The target value of the symbolic representation
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
g_feat = self.model.fprop(g)[self.layer]
# Initialize loop variables
eta = tf.random_uniform(
tf.shape(x), -self.eps, self.eps, dtype=self.tf_dtype)
eta = clip_eta(eta, self.ord, self.eps)
def cond(i, _):
return tf.less(i, self.nb_iter)
def body(i, e):
new_eta = self.attack_single_step(x, e, g_feat)
return i + 1, new_eta
_, eta = tf.while_loop(cond, body, (tf.zeros([]), eta), back_prop=True,
maximum_iterations=self.nb_iter)
# Define adversarial example (and clip if necessary)
adv_x = x + eta
if self.clip_min is not None and self.clip_max is not None:
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
:param y: A tensor with the target labels or ground-truth labels.
"""
from cleverhans.utils_tf import clip_eta
adv_x = x + eta
input_batch = tf.concat([x, adv_x], 0)
logits = self.model.get_logits(input_batch)
loss = self.loss()
grad, = tf.gradients(loss, adv_x)
scaled_signed_grad = self.eps_iter * tf.sign(grad)
adv_x = adv_x + scaled_signed_grad
if self.clip_min is not None and self.clip_max is not None:
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
eta = adv_x - x
eta = clip_eta(eta, self.ord, self.eps)
return eta