Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def body(i, adv_x):
adv_x = fgm_perturb(adv_x, **fgm_params)
# Clipping perturbation eta to self.ord norm ball
eta = adv_x - x
eta = clip_eta(eta, ord, eps)
adv_x = x + eta
# Redo the clipping.
# FGM already did it, but subtracting and re-adding eta can add some
# small numerical error.
if clip_min is not None or clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
return i + 1, adv_x
q=self.grad_sparsity,
clip_min=self.clip_min,
clip_max=self.clip_max,
clip_grad=self.clip_grad,
targeted=(self.y_target is not None),
sanity_checks=self.sanity_checks)
# Clipping perturbation eta to the l1-ball
eta = adv_x - x
eta = clip_eta(eta, ord=1, eps=self.eps)
adv_x = x + eta
# Redo the clipping.
# Subtracting and re-adding eta can add some small numerical error.
if self.clip_min is not None or self.clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return i + 1, adv_x
def fgm_perturb(x, y, loss_fn, clip_min=None, clip_max=None, ord=np.inf, eps=0.3):
loss = loss_fn(x)
grad, = tf.gradients(loss, x)
optimal_perturbation = optimize_linear(grad, eps, ord)
adv_x = x + optimal_perturbation
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
return adv_x
"""
if clip_min is None or clip_max is None:
raise NotImplementedError("_project_perturbation currently has clipping "
"hard-coded in.")
# Ensure inputs are in the correct range
with tf.control_dependencies([
utils_tf.assert_less_equal(input_image,
tf.cast(clip_max, input_image.dtype)),
utils_tf.assert_greater_equal(input_image,
tf.cast(clip_min, input_image.dtype))
]):
clipped_perturbation = utils_tf.clip_by_value(
perturbation, -epsilon, epsilon)
new_image = utils_tf.clip_by_value(
input_image + clipped_perturbation, clip_min, clip_max)
return new_image - input_image
idx = tf.stack((tf.range(tf.shape(abs_grad)[0]), k), -1)
percentiles = tf.gather_nd(sorted_grad, idx)
tied_for_max = tf.greater_equal(abs_grad, tf.expand_dims(percentiles, -1))
tied_for_max = tf.reshape(tf.cast(tied_for_max, x.dtype), tf.shape(grad))
num_ties = tf.reduce_sum(tied_for_max, red_ind, keepdims=True)
optimal_perturbation = tf.sign(grad) * tied_for_max / num_ties
# Add perturbation to original example to obtain adversarial example
adv_x = x + utils_tf.mul(eps, optimal_perturbation)
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
if sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x
x.dtype)))
# Initialize loop variables
if self.rand_init:
eta = tf.random_uniform(tf.shape(x),
tf.cast(-self.rand_minmax, x.dtype),
tf.cast(self.rand_minmax, x.dtype),
dtype=x.dtype)
else:
eta = tf.zeros(tf.shape(x))
# Clip eta
eta = clip_eta(eta, self.ord, self.eps)
adv_x = x + eta
if self.clip_min is not None or self.clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
if self.y_target is not None:
y = self.y_target
targeted = True
elif self.y is not None:
y = self.y
targeted = False
else:
model_preds = self.model.get_probs(x)
preds_max = tf.reduce_max(model_preds, 1, keepdims=True)
y = tf.to_float(tf.equal(model_preds, preds_max))
y = tf.stop_gradient(y)
targeted = False
del model_preds
y_kwarg = 'y_target' if targeted else 'y'
rand_minmax=0.3, nb_iter=20):
# changed nb_iter to 20 and eps_iter to 0.1 for higher eps attack
# Initialize loop variables
if rand_init:
eta = tf.random_uniform(tf.shape(x),
tf.cast(-rand_minmax, x.dtype),
tf.cast(rand_minmax, x.dtype),
dtype=x.dtype)
else:
eta = tf.zeros(tf.shape(x))
# Clip eta
eta = clip_eta(eta, ord, eps)
adv_x = x + eta
if clip_min is not None or clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
if y_target is not None:
y = y_target
targeted = True
elif y is not None:
y = y
targeted = False
else:
raise ValueError
# model_preds = self.model.get_probs(x)
# preds_max = reduce_max(model_preds, 1, keepdims=True)
# y = tf.to_float(tf.equal(model_preds, preds_max))
# y = tf.stop_gradient(y)
# targeted = False
# del model_preds
if targeted:
loss = -loss
# Define gradient of loss wrt input
grad, = tf.gradients(loss, x)
optimal_perturbation = optimize_linear(grad, eps, ord)
# Add perturbation to original example to obtain adversarial example
adv_x = x + optimal_perturbation
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
if sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x