How to use the cleverhans.utils.AccuracyReport function in cleverhans

To help you get started, we’ve selected a few cleverhans examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tensorflow / cleverhans / examples / multigpu_advtrain / test_run_multigpu.py View on Github external
if extra_flags is not None:
      flags.update(extra_flags)

    # Run the multi-gpu trainer for adversarial training using 2 gpus
    # trainer_multigpu by default sets `allow_soft_placement=True`
    flags.update({'ngpu': 2,
                  'attack_type_train': 'MadryEtAl_y_multigpu',
                  'sync_step': 1})
    HParams = namedtuple('HParams', flags.keys())

    hparams = HParams(**flags)
    np.random.seed(42)
    tf.set_random_seed(42)
    with tf.variable_scope(None, 'runner'):
      report_dict = run_trainer(hparams)
    report_m = AccuracyReport()
    report_m.train_adv_train_clean_eval = report_dict['train']
    report_m.adv_train_clean_eval = report_dict['test']
    report_m.adv_train_adv_eval = report_dict['MadryEtAl_y']

    flags.update({'ngpu': 1, 'attack_type_train': 'MadryEtAl_y'})
    hparams = HParams(**flags)
    np.random.seed(42)
    tf.set_random_seed(42)
    with tf.variable_scope(None, 'runner'):
      report_dict = run_trainer(hparams)
    report_s = AccuracyReport()
    report_s.train_adv_train_clean_eval = report_dict['train']
    report_s.adv_train_clean_eval = report_dict['test']
    report_s.adv_train_adv_eval = report_dict['MadryEtAl_y']

    self.assertClose(report_s.train_adv_train_clean_eval,
github tensorflow / cleverhans / examples / multigpu_advtrain / test_run_multigpu.py View on Github external
np.random.seed(42)
    tf.set_random_seed(42)
    with tf.variable_scope(None, 'runner'):
      report_dict = run_trainer(hparams)
    report_m = AccuracyReport()
    report_m.train_adv_train_clean_eval = report_dict['train']
    report_m.adv_train_clean_eval = report_dict['test']
    report_m.adv_train_adv_eval = report_dict['MadryEtAl_y']

    flags.update({'ngpu': 1, 'attack_type_train': 'MadryEtAl_y'})
    hparams = HParams(**flags)
    np.random.seed(42)
    tf.set_random_seed(42)
    with tf.variable_scope(None, 'runner'):
      report_dict = run_trainer(hparams)
    report_s = AccuracyReport()
    report_s.train_adv_train_clean_eval = report_dict['train']
    report_s.adv_train_clean_eval = report_dict['test']
    report_s.adv_train_adv_eval = report_dict['MadryEtAl_y']

    self.assertClose(report_s.train_adv_train_clean_eval,
                     report_m.train_adv_train_clean_eval,
                     atol=5e-2)
    self.assertClose(report_s.adv_train_clean_eval,
                     report_m.adv_train_clean_eval,
                     atol=2e-2)
    self.assertClose(report_s.adv_train_adv_eval,
                     report_m.adv_train_adv_eval,
                     atol=5e-2)
github tensorflow / cleverhans / cleverhans_tutorials / mnist_tutorial_jsma.py View on Github external
"""
  MNIST tutorial for the Jacobian-based saliency map approach (JSMA)
  :param train_start: index of first training set example
  :param train_end: index of last training set example
  :param test_start: index of first test set example
  :param test_end: index of last test set example
  :param viz_enabled: (boolean) activate plots of adversarial examples
  :param nb_epochs: number of epochs to train model
  :param batch_size: size of training batches
  :param nb_classes: number of output classes
  :param source_samples: number of test inputs to attack
  :param learning_rate: learning rate for training
  :return: an AccuracyReport object
  """
  # Object used to keep track of (and return) key accuracies
  report = AccuracyReport()

  # Set TF random seed to improve reproducibility
  tf.set_random_seed(1234)

  # Create TF session and set as Keras backend session
  sess = tf.Session()
  print("Created TensorFlow session.")

  set_log_level(logging.DEBUG)

  # Get MNIST test data
  mnist = MNIST(train_start=train_start, train_end=train_end,
                test_start=test_start, test_end=test_end)
  x_train, y_train = mnist.get_set('train')
  x_test, y_test = mnist.get_set('test')
github tensorflow / cleverhans / cleverhans_tutorials / mnist_tutorial_cw.py View on Github external
:param train_start: index of first training set example
  :param train_end: index of last training set example
  :param test_start: index of first test set example
  :param test_end: index of last test set example
  :param viz_enabled: (boolean) activate plots of adversarial examples
  :param nb_epochs: number of epochs to train model
  :param batch_size: size of training batches
  :param nb_classes: number of output classes
  :param source_samples: number of test inputs to attack
  :param learning_rate: learning rate for training
  :param model_path: path to the model file
  :param targeted: should we run a targeted attack? or untargeted?
  :return: an AccuracyReport object
  """
  # Object used to keep track of (and return) key accuracies
  report = AccuracyReport()

  # Set TF random seed to improve reproducibility
  tf.set_random_seed(1234)

  # Create TF session
  sess = tf.Session()
  print("Created TensorFlow session.")

  set_log_level(logging.DEBUG)

  # Get MNIST test data
  mnist = MNIST(train_start=train_start, train_end=train_end,
                test_start=test_start, test_end=test_end)
  x_train, y_train = mnist.get_set('train')
  x_test, y_test = mnist.get_set('test')
github tensorflow / cleverhans / cleverhans_tutorials / mnist_tutorial_picklable.py View on Github external
:param nb_epochs: number of epochs to train model
  :param batch_size: size of training batches
  :param learning_rate: learning rate for training
  :param clean_train: perform normal training on clean examples only
                      before performing adversarial training.
  :param testing: if true, complete an AccuracyReport for unit tests
                  to verify that performance is adequate
  :param backprop_through_attack: If True, backprop through adversarial
                                  example construction process during
                                  adversarial training.
  :param label_smoothing: float, amount of label smoothing for cross entropy
  :return: an AccuracyReport object
  """

  # Object used to keep track of (and return) key accuracies
  report = AccuracyReport()

  # Set TF random seed to improve reproducibility
  tf.set_random_seed(1234)

  # Set logging level to see debug information
  set_log_level(logging.DEBUG)

  # Create TF session
  if num_threads:
    config_args = dict(intra_op_parallelism_threads=1)
  else:
    config_args = {}
  sess = tf.Session(config=tf.ConfigProto(**config_args))

  # Get MNIST test data
  mnist = MNIST(train_start=train_start, train_end=train_end,
github tensorflow / cleverhans / cleverhans_tutorials / mnist_tutorial_tfe.py View on Github external
:param clean_train: perform normal training on clean examples only
                      before performing adversarial training.
  :param testing: if true, complete an AccuracyReport for unit tests
                  to verify that performance is adequate.
  :param backprop_through_attack: If True, backprop through adversarial
                                  example construction process during
                                  adversarial training.
  :param nb_filters: number of filters in the CNN used for training.
  :param num_threads: number of threads used for running the process.
  :param attack_string: attack name for crafting adversarial attacks and
                          adversarial training, in string format.
  :return: an AccuracyReport object
  """

  # Object used to keep track of (and return) key accuracies
  report = AccuracyReport()

  # Set TF random seed to improve reproducibility
  tf.set_random_seed(1234)

  # Set logging level to see debug information
  set_log_level(logging.DEBUG)

  # Get MNIST test data
  mnist = MNIST(train_start=train_start, train_end=train_end,
                test_start=test_start, test_end=test_end)
  X_train, Y_train = mnist.get_set('train')
  X_test, Y_test = mnist.get_set('test')

  # Use label smoothing
  assert Y_train.shape[1] == 10
  label_smooth = .1
github tensorflow / cleverhans / cleverhans_tutorials / mnist_tutorial_pytorch.py View on Github external
def mnist_tutorial(nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
                   train_end=-1, test_end=-1, learning_rate=LEARNING_RATE):
  """
  MNIST cleverhans tutorial
  :param nb_epochs: number of epochs to train model
  :param batch_size: size of training batches
  :param learning_rate: learning rate for training
  :return: an AccuracyReport object
  """
  # Train a pytorch MNIST model
  torch_model = PytorchMnistModel()
  if torch.cuda.is_available():
    torch_model = torch_model.cuda()
  report = AccuracyReport()

  train_loader = torch.utils.data.DataLoader(
      datasets.MNIST('data', train=True, download=True,
                     transform=transforms.ToTensor()),
      batch_size=batch_size, shuffle=True)
  test_loader = torch.utils.data.DataLoader(
      datasets.MNIST('data', train=False, transform=transforms.ToTensor()),
      batch_size=batch_size)

  # Truncate the datasets so that our test run more quickly
  train_loader.dataset.train_data = train_loader.dataset.train_data[
      :train_end]
  test_loader.dataset.test_data = test_loader.dataset.test_data[:test_end]

  # Train our model
  optimizer = optim.Adam(torch_model.parameters(), lr=learning_rate)
github tensorflow / cleverhans / examples / iterative_advtrain / train.py View on Github external
:param nb_epochs: number of epochs to train model
    :param batch_size: size of training batches
    :param learning_rate: learning rate for training
    :param clean_train: perform normal training on clean examples only
                        before performing adversarial training.
    :param testing: if true, complete an AccuracyReport for unit tests
                    to verify that performance is adequate
    :param backprop_through_attack: If True, backprop through adversarial
                                    example construction process during
                                    adversarial training.
    :param clean_train: if true, train on clean examples
    :return: an AccuracyReport object
    """

    # Object used to keep track of (and return) key accuracies
    report = AccuracyReport()

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    # Set logging level to see debug information
    set_log_level(logging.DEBUG)

    # Create TF session
    sess = tf.Session()

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
                                                  train_end=train_end,
                                                  test_start=test_start,
                                                  test_end=test_end)
github tensorflow / cleverhans / cleverhans / model_zoo / soft_nearest_neighbor_loss / SNNL_regularized_train.py View on Github external
the adversarial gradients of a trained model. A model with a negative
  SNNL_factor will show little or no class clusters, while a model with a
  0 SNNL_factor will have class clusters in the adversarial gradient direction.
  :param train_start: index of first training set example
  :param train_end: index of last training set example
  :param test_start: index of first test set example
  :param test_end: index of last test set example
  :param nb_epochs: number of epochs to train model
  :param batch_size: size of training batches
  :param learning_rate: learning rate for training
  :param SNNL_factor: multiplier for Soft Nearest Neighbor Loss
  :return: an AccuracyReport object
  """

  # Object used to keep track of (and return) key accuracies
  report = AccuracyReport()

  # Set TF random seed to improve reproducibility
  tf.set_random_seed(1234)

  # Set logging level to see debug information
  set_log_level(logging.DEBUG)

  # Create TF session
  sess = tf.Session()

  # Get MNIST data
  mnist = MNIST(train_start=train_start, train_end=train_end,
                test_start=test_start, test_end=test_end)
  x_train, y_train = mnist.get_set('train')
  x_test, y_test = mnist.get_set('test')
github tensorflow / cleverhans / cleverhans_tutorials / mnist_tutorial_tf.py View on Github external
:param nb_epochs: number of epochs to train model
  :param batch_size: size of training batches
  :param learning_rate: learning rate for training
  :param clean_train: perform normal training on clean examples only
                      before performing adversarial training.
  :param testing: if true, complete an AccuracyReport for unit tests
                  to verify that performance is adequate
  :param backprop_through_attack: If True, backprop through adversarial
                                  example construction process during
                                  adversarial training.
  :param label_smoothing: float, amount of label smoothing for cross entropy
  :return: an AccuracyReport object
  """

  # Object used to keep track of (and return) key accuracies
  report = AccuracyReport()

  # Set TF random seed to improve reproducibility
  tf.set_random_seed(1234)

  # Set logging level to see debug information
  set_log_level(logging.DEBUG)

  # Create TF session
  if num_threads:
    config_args = dict(intra_op_parallelism_threads=1)
  else:
    config_args = {}
  sess = tf.Session(config=tf.ConfigProto(**config_args))

  # Get MNIST data
  mnist = MNIST(train_start=train_start, train_end=train_end,