How to use the cleverhans.utils_tf function in cleverhans

To help you get started, we’ve selected a few cleverhans examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tensorflow / cleverhans / tests_tf / test_utils_tf.py View on Github external
def test_clip_by_value_numpy_dtype(self):
    # Test that it's possible to use clip_by_value while mixing numpy and tf
    clip_min = np.zeros((1,))
    clip_max = tf.ones((1,))
    x = tf.ones((1,))
    # The point of this test is just to make sure the casting logic doesn't raise an exception
    utils_tf.clip_by_value(x, clip_min, clip_max)
github tensorflow / cleverhans / cleverhans / attacks / sparse_l1_descent.py View on Github external
if LooseVersion(tf.__version__) <= LooseVersion('1.12.0'):
    # `tf.sort` is only available in TF 1.13 onwards
    sorted_grad = -tf.nn.top_k(-abs_grad, k=dim, sorted=True)[0]
  else:
    sorted_grad = tf.sort(abs_grad, axis=-1)

  idx = tf.stack((tf.range(tf.shape(abs_grad)[0]), k), -1)
  percentiles = tf.gather_nd(sorted_grad, idx)
  tied_for_max = tf.greater_equal(abs_grad, tf.expand_dims(percentiles, -1))
  tied_for_max = tf.reshape(tf.cast(tied_for_max, x.dtype), tf.shape(grad))
  num_ties = tf.reduce_sum(tied_for_max, red_ind, keepdims=True)

  optimal_perturbation = tf.sign(grad) * tied_for_max / num_ties

  # Add perturbation to original example to obtain adversarial example
  adv_x = x + utils_tf.mul(eps, optimal_perturbation)

  # If clipping is needed, reset all values outside of [clip_min, clip_max]
  if (clip_min is not None) or (clip_max is not None):
    # We don't currently support one-sided clipping
    assert clip_min is not None and clip_max is not None
    adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)

  if sanity_checks:
    with tf.control_dependencies(asserts):
      adv_x = tf.identity(adv_x)

  return adv_x
github yangarbiter / adversarial-nonparametrics / nnattack / attacks / kernel_sub_tf.py View on Github external
def fgm_perturb(x, y, loss_fn, clip_min=None, clip_max=None, ord=np.inf, eps=0.3):
    loss = loss_fn(x)
    grad, = tf.gradients(loss, x)
    optimal_perturbation = optimize_linear(grad, eps, ord)
    adv_x = x + optimal_perturbation

    if (clip_min is not None) or (clip_max is not None):
        # We don't currently support one-sided clipping
        assert clip_min is not None and clip_max is not None
        adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)

    return adv_x
github tensorflow / cleverhans / cleverhans / attacks / momentum_iterative_method.py View on Github external
reduce_mean(tf.abs(grad), red_ind, keepdims=True))
      m = self.decay_factor * m + grad

      optimal_perturbation = optimize_linear(m, self.eps_iter, self.ord)
      if self.ord == 1:
        raise NotImplementedError("This attack hasn't been tested for ord=1."
                                  "It's not clear that FGM makes a good inner "
                                  "loop step for iterative optimization since "
                                  "it updates just one coordinate at a time.")

      # Update and clip adversarial example in current iteration
      ax = ax + optimal_perturbation
      ax = x + utils_tf.clip_eta(ax - x, self.ord, self.eps)

      if self.clip_min is not None and self.clip_max is not None:
        ax = utils_tf.clip_by_value(ax, self.clip_min, self.clip_max)

      ax = tf.stop_gradient(ax)

      return i + 1, ax, m
github tensorflow / cleverhans / cleverhans / attacks / sparse_l1_descent.py View on Github external
def generate(self, x, **kwargs):
    """
    Generate symbolic graph for adversarial examples and return.

    :param x: The model's symbolic inputs.
    :param kwargs: See `parse_params`
    """
    # Parse and save attack-specific parameters
    assert self.parse_params(**kwargs)

    asserts = []

    # If a data range was specified, check that the input was in that range
    if self.clip_min is not None:
      asserts.append(utils_tf.assert_greater_equal(x,
                                                   tf.cast(self.clip_min,
                                                           x.dtype)))

    if self.clip_max is not None:
      asserts.append(utils_tf.assert_less_equal(x,
                                                tf.cast(self.clip_max,
                                                        x.dtype)))

    # Initialize loop variables
    if self.rand_init:
      eta = random_lp_vector(tf.shape(x), ord=1,
                             eps=tf.cast(self.eps, x.dtype), dtype=x.dtype)
    else:
      eta = tf.zeros(tf.shape(x))

    # Clip eta
github tensorflow / cleverhans / cleverhans / attacks / momentum_iterative_method.py View on Github external
avoid_zero_div = tf.cast(1e-12, grad.dtype)
      grad = grad / tf.maximum(
          avoid_zero_div,
          reduce_mean(tf.abs(grad), red_ind, keepdims=True))
      m = self.decay_factor * m + grad

      optimal_perturbation = optimize_linear(m, self.eps_iter, self.ord)
      if self.ord == 1:
        raise NotImplementedError("This attack hasn't been tested for ord=1."
                                  "It's not clear that FGM makes a good inner "
                                  "loop step for iterative optimization since "
                                  "it updates just one coordinate at a time.")

      # Update and clip adversarial example in current iteration
      ax = ax + optimal_perturbation
      ax = x + utils_tf.clip_eta(ax - x, self.ord, self.eps)

      if self.clip_min is not None and self.clip_max is not None:
        ax = utils_tf.clip_by_value(ax, self.clip_min, self.clip_max)

      ax = tf.stop_gradient(ax)

      return i + 1, ax, m
github tensorflow / cleverhans / cleverhans / model.py View on Github external
def get_probs(self, x, **kwargs):
    """
    :param x: A symbolic representation (Tensor) of the network input
    :return: A symbolic representation (Tensor) of the output
    probabilities (i.e., the output values produced by the softmax layer).
    """
    d = self.fprop(x, **kwargs)
    if self.O_PROBS in d:
      output = d[self.O_PROBS]
      min_prob = tf.reduce_min(output)
      max_prob = tf.reduce_max(output)
      asserts = [utils_tf.assert_greater_equal(min_prob,
                                               tf.cast(0., min_prob.dtype)),
                 utils_tf.assert_less_equal(max_prob,
                                            tf.cast(1., min_prob.dtype))]
      with tf.control_dependencies(asserts):
        output = tf.identity(output)
      return output
    elif self.O_LOGITS in d:
      return tf.nn.softmax(logits=d[self.O_LOGITS])
    else:
      raise ValueError('Cannot find probs or logits.')
github tensorflow / cleverhans / cleverhans / attacks / fast_gradient_method.py View on Github external
tied_for_max = tf.to_float(tf.equal(abs_grad, max_abs_grad))
    num_ties = tf.reduce_sum(tied_for_max, red_ind, keepdims=True)
    optimal_perturbation = sign * tied_for_max / num_ties
  elif ord == 2:
    square = tf.maximum(avoid_zero_div,
                        reduce_sum(tf.square(grad),
                                   reduction_indices=red_ind,
                                   keepdims=True))
    optimal_perturbation = grad / tf.sqrt(square)
  else:
    raise NotImplementedError("Only L-inf, L1 and L2 norms are "
                              "currently implemented.")

  # Scale perturbation to be the solution for the norm=eps rather than
  # norm=1 problem
  scaled_perturbation = utils_tf.mul(eps, optimal_perturbation)
  return scaled_perturbation
github tensorflow / cleverhans / cleverhans / attacks / hop_skip_jump_attack.py View on Github external
def _check_first_dimension(x, tensor_name):
  message = "Tensor {} should have batch_size of 1.".format(tensor_name)
  if x.get_shape().as_list()[0] is None:
    check_batch = utils_tf.assert_equal(tf.shape(x)[0], 1, message=message)
    with tf.control_dependencies([check_batch]):
      x = tf.identity(x)
  elif x.get_shape().as_list()[0] != 1:
    raise ValueError(message)
github tensorflow / cleverhans / cleverhans / attacks / projected_gradient_descent.py View on Github external
def generate(self, x, **kwargs):
    """
    Generate symbolic graph for adversarial examples and return.

    :param x: The model's symbolic inputs.
    :param kwargs: See `parse_params`
    """
    # Parse and save attack-specific parameters
    assert self.parse_params(**kwargs)

    asserts = []

    # If a data range was specified, check that the input was in that range
    if self.clip_min is not None:
      asserts.append(utils_tf.assert_greater_equal(x,
                                                   tf.cast(self.clip_min,
                                                           x.dtype)))

    if self.clip_max is not None:
      asserts.append(utils_tf.assert_less_equal(x,
                                                tf.cast(self.clip_max,
                                                        x.dtype)))

    # Initialize loop variables
    if self.rand_init:
      eta = tf.random_uniform(tf.shape(x),
                              tf.cast(-self.rand_minmax, x.dtype),
                              tf.cast(self.rand_minmax, x.dtype),
                              dtype=x.dtype)
    else:
      eta = tf.zeros(tf.shape(x))