How to use the cleverhans.loss.CrossEntropy function in cleverhans

To help you get started, we’ve selected a few cleverhans examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tensorflow / cleverhans / cleverhans_tutorials / mnist_tutorial_cw.py View on Github external
x_test, y_test = mnist.get_set('test')

  # Obtain Image Parameters
  img_rows, img_cols, nchannels = x_train.shape[1:4]
  nb_classes = y_train.shape[1]

  # Define input TF placeholder
  x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
                                        nchannels))
  y = tf.placeholder(tf.float32, shape=(None, nb_classes))
  nb_filters = 64

  # Define TF model graph
  model = ModelBasicCNN('model1', nb_classes, nb_filters)
  preds = model.get_logits(x)
  loss = CrossEntropy(model, smoothing=0.1)
  print("Defined TensorFlow model graph.")

  ###########################################################################
  # Training the model using TensorFlow
  ###########################################################################

  # Train an MNIST model
  train_params = {
      'nb_epochs': nb_epochs,
      'batch_size': batch_size,
      'learning_rate': learning_rate,
      'filename': os.path.split(model_path)[-1]
  }

  rng = np.random.RandomState([2017, 8, 30])
  # check if we've trained before, and if we have, use that pre-trained model
github tensorflow / cleverhans / cleverhans / loss.py View on Github external
layer_names=None,
               factor=-10.,
               optimize_temperature=True,
               cos_distance=False):
    """Constructor.
    :param model: Model instance, the model on which to apply the loss.
    :param temperature: Temperature used for SNNL.
    :layer_names: The names of the layers at which to calculate SNNL.
                  If not provided, then SNNL is applied to each internal layer.
    :factor: The balance factor between SNNL and ross Entropy. If factor is
             negative, then SNNL will be maximized.
    :optimize_temperature: Optimize temperature at each calculation to minimize
                           the loss. This makes the loss more stable.
    :cos_distance: Use cosine distance when calculating SNNL.
    """
    CrossEntropy.__init__(self, model, smoothing=0.)
    self.temperature = temperature
    self.factor = factor
    self.optimize_temperature = optimize_temperature
    self.cos_distance = cos_distance
    self.layer_names = layer_names
    if not layer_names:
      # omit the final layer, the classification layer
      self.layer_names = model.get_layer_names()[:-1]
github tensorflow / cleverhans / cleverhans / model_zoo / soft_nearest_neighbor_loss / SNNL_regularized_train.py View on Github external
train_params = {
      'nb_epochs': nb_epochs,
      'batch_size': batch_size,
      'learning_rate': learning_rate
  }
  eval_params = {'batch_size': batch_size}
  rng = np.random.RandomState([2017, 8, 30])

  def do_eval(preds, x_set, y_set, report_key):
    acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
    setattr(report, report_key, acc)
    print('Test accuracy on legitimate examples: %0.4f' % (acc))

  model = ModelBasicCNN('model', nb_classes, nb_filters)
  preds = model.get_logits(x)
  cross_entropy_loss = CrossEntropy(model)
  if not SNNL_factor:
    loss = cross_entropy_loss
  else:
    loss = SNNLCrossEntropy(model, factor=SNNL_factor,
                            optimize_temperature=False)

  def evaluate():
    do_eval(preds, x_test, y_test, 'clean_train_clean_eval')

  train(sess, loss, x_train, y_train, evaluate=evaluate,
        args=train_params, rng=rng, var_list=model.get_params())

  do_eval(preds, x_train, y_train, 'train_clean_train_clean_eval')

  def imscatter(points, images, ax=None, zoom=1, cmap="hot"):
    if ax is None:
github tensorflow / cleverhans / cleverhans_tutorials / mnist_tutorial_jsma.py View on Github external
x_test, y_test = mnist.get_set('test')

  # Obtain Image Parameters
  img_rows, img_cols, nchannels = x_train.shape[1:4]
  nb_classes = y_train.shape[1]

  # Define input TF placeholder
  x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
                                        nchannels))
  y = tf.placeholder(tf.float32, shape=(None, nb_classes))

  nb_filters = 64
  # Define TF model graph
  model = ModelBasicCNN('model1', nb_classes, nb_filters)
  preds = model.get_logits(x)
  loss = CrossEntropy(model, smoothing=0.1)
  print("Defined TensorFlow model graph.")

  ###########################################################################
  # Training the model using TensorFlow
  ###########################################################################

  # Train an MNIST model
  train_params = {
      'nb_epochs': nb_epochs,
      'batch_size': batch_size,
      'learning_rate': learning_rate
  }
  sess.run(tf.global_variables_initializer())
  rng = np.random.RandomState([2017, 8, 30])
  train(sess, loss, x_train, y_train, args=train_params, rng=rng)
github tensorflow / cleverhans / cleverhans_tutorials / mnist_blackbox.py View on Github external
:param bbox_preds: output of black-box model predictions
  :param x_sub: initial substitute training data
  :param y_sub: initial substitute training labels
  :param nb_classes: number of output classes
  :param nb_epochs_s: number of epochs to train substitute model
  :param batch_size: size of training batches
  :param learning_rate: learning rate for training
  :param data_aug: number of times substitute training data is augmented
  :param lmbda: lambda from arxiv.org/abs/1602.02697
  :param rng: numpy.random.RandomState instance
  :return:
  """
  # Define TF model graph (for the black-box model)
  model_sub = ModelSubstitute('model_s', nb_classes)
  preds_sub = model_sub.get_logits(x)
  loss_sub = CrossEntropy(model_sub, smoothing=0)

  print("Defined TensorFlow model graph for the substitute.")

  # Define the Jacobian symbolically using TensorFlow
  grads = jacobian_graph(preds_sub, x, nb_classes)

  # Train the substitute and augment dataset alternatively
  for rho in xrange(data_aug):
    print("Substitute training epoch #" + str(rho))
    train_params = {
        'nb_epochs': nb_epochs_s,
        'batch_size': batch_size,
        'learning_rate': learning_rate
    }
    with TemporaryLogLevel(logging.WARNING, "cleverhans.utils.tf"):
      train(sess, loss_sub, x_sub, to_categorical(y_sub, nb_classes),
github tensorflow / cleverhans / cleverhans / loss.py View on Github external
def fprop(self, x, y, **kwargs):
    cross_entropy = CrossEntropy.fprop(self, x, y, **kwargs)
    self.layers = [self.model.get_layer(x, name) for name in self.layer_names]
    loss_fn = self.SNNL
    if self.optimize_temperature:
      loss_fn = self.optimized_temp_SNNL
    layers_SNNL = [loss_fn(tf.layers.flatten(layer),
                           tf.argmax(y, axis=1),
                           self.temperature,
                           self.cos_distance)
                   for layer in self.layers]
    return cross_entropy + self.factor * tf.add_n(layers_SNNL)
github tensorflow / cleverhans / cleverhans_tutorials / mnist_tutorial_keras_tf.py View on Github external
os.mkdir(train_dir)

  ckpt = tf.train.get_checkpoint_state(train_dir)
  print(train_dir, ckpt)
  ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path
  wrap = KerasModelWrapper(model)

  if load_model and ckpt_path:
    saver = tf.train.Saver()
    print(ckpt_path)
    saver.restore(sess, ckpt_path)
    print("Model loaded from: {}".format(ckpt_path))
    evaluate()
  else:
    print("Model was not loaded, training from scratch.")
    loss = CrossEntropy(wrap, smoothing=label_smoothing)
    train(sess, loss, x_train, y_train, evaluate=evaluate,
          args=train_params, rng=rng)

  # Calculate training error
  if testing:
    eval_params = {'batch_size': batch_size}
    acc = model_eval(sess, x, y, preds, x_train, y_train, args=eval_params)
    report.train_clean_train_clean_eval = acc

  # Initialize the Fast Gradient Sign Method (FGSM) attack object and graph
  fgsm = FastGradientMethod(wrap, sess=sess)
  fgsm_params = {'eps': 0.3,
                 'clip_min': 0.,
                 'clip_max': 1.}
  adv_x = fgsm.generate(x, **fgsm_params)
  # Consider the attack to be constant
github tensorflow / cleverhans / cleverhans_tutorials / cifar10_tutorial_tf.py View on Github external
# Calculate training error
    if testing:
      do_eval(preds_adv, x_train, y_train, 'train_clean_train_adv_eval')

    print('Repeating the process, using adversarial training')

  # Create a new model and train it to be robust to FastGradientMethod
  model2 = ModelAllConvolutional('model2', nb_classes, nb_filters,
                                 input_shape=[32, 32, 3])
  fgsm2 = FastGradientMethod(model2, sess=sess)

  def attack(x):
    return fgsm2.generate(x, **fgsm_params)

  loss2 = CrossEntropy(model2, smoothing=label_smoothing, attack=attack)
  preds2 = model2.get_logits(x)
  adv_x2 = attack(x)

  if not backprop_through_attack:
    # For the fgsm attack used in this tutorial, the attack has zero
    # gradient so enabling this flag does not change the gradient.
    # For some other attacks, enabling this flag increases the cost of
    # training, but gives the defender the ability to anticipate how
    # the atacker will change their strategy in response to updates to
    # the defender's parameters.
    adv_x2 = tf.stop_gradient(adv_x2)
  preds2_adv = model2.get_logits(adv_x2)

  def evaluate2():
    # Accuracy of adversarially trained model on legitimate test inputs
    do_eval(preds2, x_test, y_test, 'adv_train_clean_eval', False)
github tensorflow / cleverhans / cleverhans_tutorials / mnist_tutorial_picklable.py View on Github external
report_text = None
    elif is_adv:
      report_text = 'adversarial'
    else:
      report_text = 'legitimate'
    if report_text:
      print('Test accuracy on %s examples: %0.4f' % (report_text, acc))

  if clean_train:
    model = make_basic_picklable_cnn()
    # Tag the model so that when it is saved to disk, future scripts will
    # be able to tell what data it was trained on
    model.dataset_factory = mnist.get_factory()
    preds = model.get_logits(x)
    assert len(model.get_params()) > 0
    loss = CrossEntropy(model, smoothing=label_smoothing)

    def evaluate():
      """
      Run evaluation for the naively trained model on clean examples.
      """
      do_eval(preds, x_test, y_test, 'clean_train_clean_eval', False)

    train(sess, loss, x_train, y_train, evaluate=evaluate,
          args=train_params, rng=rng, var_list=model.get_params())

    with sess.as_default():
      save("clean_model.joblib", model)

      print("Now that the model has been saved, you can evaluate it in a"
            " separate process using `evaluate_pickled_model.py`. "
            "You should get exactly the same result for both clean and "