How to use the art.attacks.fast_gradient.FastGradientMethod function in art

To help you get started, we’ve selected a few art examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github IBM / adversarial-robustness-toolbox / tests / wrappers / test_query_efficient_bb.py View on Github external
def test_without_defences(self):
        (x_train, y_train), (x_test, y_test) = self.mnist

        # Get the ready-trained Keras model and wrap it in query efficient gradient estimator wrapper
        classifier = QueryEfficientBBGradientEstimation(self.classifier_k, 20, 1 / 64., round_samples=1 / 255.)

        attack = FastGradientMethod(classifier, eps=1)
        x_train_adv = attack.generate(x_train)
        x_test_adv = attack.generate(x_test)

        self.assertFalse((x_train == x_train_adv).all())
        self.assertFalse((x_test == x_test_adv).all())

        train_y_pred = get_labels_np_array(classifier.predict(x_train_adv))
        test_y_pred = get_labels_np_array(classifier.predict(x_test_adv))

        self.assertFalse((y_train == train_y_pred).all())
        self.assertFalse((y_test == test_y_pred).all())

        preds = classifier.predict(x_train_adv)
        acc = np.sum(np.argmax(preds, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0]
        logger.info('Accuracy on adversarial train examples with limited query info: %.2f%%',
                    (acc * 100))
github IBM / adversarial-robustness-toolbox / tests / wrappers / test_query_efficient_bb.py View on Github external
def test_iris_clipped(self):
        (_, _), (x_test, y_test) = self.iris

        classifier, _ = get_iris_classifier_kr()
        classifier = QueryEfficientBBGradientEstimation(classifier, 20, 1 / 64., round_samples=1 / 255.)

        # Test untargeted attack
        attack = FastGradientMethod(classifier, eps=.1)
        x_test_adv = attack.generate(x_test)
        self.assertFalse((x_test == x_test_adv).all())
        self.assertTrue((x_test_adv <= 1).all())
        self.assertTrue((x_test_adv >= 0).all())

        preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
        logger.info('Accuracy on Iris with limited query info: %.2f%%', (acc * 100))
github IBM / adversarial-robustness-toolbox / tests / wrappers / test_query_efficient_bb.py View on Github external
def test_iris_unbounded(self):
        (_, _), (x_test, y_test) = self.iris
        classifier, _ = get_iris_classifier_kr()

        # Recreate a classifier without clip values
        classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)
        classifier = QueryEfficientBBGradientEstimation(classifier, 20, 1 / 64., round_samples=1 / 255.)
        attack = FastGradientMethod(classifier, eps=1)
        x_test_adv = attack.generate(x_test)
        self.assertFalse((x_test == x_test_adv).all())
        self.assertTrue((x_test_adv > 1).any())
        self.assertTrue((x_test_adv < 0).any())

        preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
        logger.info('Accuracy on Iris with limited query info: %.2f%%', (acc * 100))
github TrustAI / DeepConcolic / src / run_ssc.py View on Github external
def run_svc(test_object, outs):
  print ('To run svc\n')
  
  f_results, cover_layers, _ = ssc_setup (test_object, outs)

  ## define a global attacker
  classifier = KerasClassifier(clip_values=(MIN, -MIN), model=test_object.dnn)
  adv_crafter = FastGradientMethod(classifier)

  test_cases=[]
  adversarials=[]

  count=0

  while True:
    dec_layer_index, dec_pos=get_ssc_next(cover_layers)

    if dec_layer_index==1 and is_input_layer(test_object.dnn.layers[0]): continue
    print ('dec_layer_index', cover_layers[dec_layer_index].layer_index)

    ###
    cond_layer=cover_layers[dec_layer_index-1]
    dec_layer=cover_layers[dec_layer_index]
    cond_cover=np.ones(cond_layer.ssc_map.shape, dtype=bool)
github IBM / adversarial-robustness-toolbox / art / attacks / fast_gradient.py View on Github external
:param eps: Attack step size (input variation)
        :type eps: `float`
        :param eps_step: Step size of input variation for minimal perturbation computation
        :type eps_step: `float`
        :param targeted: Should the attack target one specific class
        :type targeted: `bool`
        :param num_random_init: Number of random initialisations within the epsilon ball. For random_init=0 starting at
            the original input.
        :type num_random_init: `int`
        :param batch_size: Batch size
        :type batch_size: `int`
        :param minimal: Flag to compute the minimal perturbation.
        :type minimal: `bool`
        """
        # Save attack-specific parameters
        super(FastGradientMethod, self).set_params(**kwargs)

        # Check if order of the norm is acceptable given current implementation
        if self.norm not in [np.inf, int(1), int(2)]:
            raise ValueError('Norm order must be either `np.inf`, 1, or 2.')

        if self.eps <= 0:
            raise ValueError('The perturbation size `eps` has to be positive.')

        if self.eps_step <= 0:
            raise ValueError('The perturbation step-size `eps_step` has to be positive.')

        if not isinstance(self.targeted, bool):
            raise ValueError('The flag `targeted` has to be of type bool.')

        if not isinstance(self.num_random_init, (int, np.int)):
            raise TypeError('The number of random initialisations has to be of type integer')
github IBM / adversarial-robustness-toolbox / art / attacks / fast_gradient.py View on Github external
:param eps: Attack step size (input variation)
        :type eps: `float`
        :param eps_step: Step size of input variation for minimal perturbation computation
        :type eps_step: `float`
        :param targeted: Indicates whether the attack is targeted (True) or untargeted (False)
        :type targeted: `bool`
        :param num_random_init: Number of random initialisations within the epsilon ball. For random_init=0 starting at
            the original input.
        :type num_random_init: `int`
        :param batch_size: Size of the batch on which adversarial samples are generated.
        :type batch_size: `int`
        :param minimal: Indicates if computing the minimal perturbation (True). If True, also define `eps_step` for
                        the step size and eps for the maximum perturbation.
        :type minimal: `bool`
        """
        super(FastGradientMethod, self).__init__(classifier)
        if not isinstance(classifier, ClassifierGradients):
            raise (TypeError('For `' + self.__class__.__name__ + '` classifier must be an instance of '
                             '`art.classifiers.classifier.ClassifierGradients`, the provided classifier is instance of '
                             + str(classifier.__class__.__bases__) + '. '
                             ' The classifier needs to provide gradients.'))

        kwargs = {'norm': norm, 'eps': eps, 'eps_step': eps_step, 'targeted': targeted,
                  'num_random_init': num_random_init, 'batch_size': batch_size, 'minimal': minimal}
        FastGradientMethod.set_params(self, **kwargs)

        self._project = True
github TrustAI / DeepConcolic / src / mcdc.py View on Github external
if is_dense_layer(cover_layers[i].layer) or not (csp[1]==dsp[1] and csp[2]==dsp[2]):
          tmp_decs=cover_layers[i].ssc_map.size
        else:
          ks=cover_layers[i].layer.kernel_size
          dsp=cover_layers[i].ssc_map.shape
          tmp_decs=((dsp[1]-ks[0]+1)*(dsp[2]-ks[1]+1)*dsp[3])
        if is_conv_layer(cover_layers[i].layer):
          if not test_object.feature_indices==None:
             tmp_decs=tmp_decs*(len(test_object.feature_indices)*1.0/dsp[3])
        tot_decs+=tmp_decs
  tot_coverage=0.0

  ## define a global attacker
  #classifier=KerasClassifier((MIN, -MIN), model=test_object.dnn)
  classifier=KerasClassifier(test_object.dnn)
  adv_crafter = FastGradientMethod(classifier)

  test_cases=[]
  adversarials=[]
  count=0

  while count<1000:
    dec_layer_index, dec_pos=get_ssc_next(cover_layers)
    cover_layers[dec_layer_index].ssc_map.itemset(dec_pos, False)
    if dec_layer_index==1 and is_input_layer(test_object.dnn.layers[0]): continue
    #print (dec_layer_index, dec_pos)
    ###
    cond_layer=cover_layers[dec_layer_index-1]
    dec_layer=cover_layers[dec_layer_index]
    cond_cover=np.zeros(cond_layer.ssc_map.shape, dtype=bool)
    ###
github IBM / adversarial-robustness-toolbox / generate_batch.py View on Github external
SAVE_ADV = os.path.join(os.path.abspath(args.save), args.adv_method, "batch%s" % args.batch_idx)
    make_directory(SAVE_ADV)

    with open(os.path.join(SAVE_ADV, "readme.txt"), "w") as wfile:
        wfile.write("Model used for crafting the adversarial examples is in " + MODEL_PATH)

    v_print("Adversarials crafted with", args.adv_method, "on", MODEL_PATH, "will be saved in", SAVE_ADV)

if args.adv_method in ['fgsm', "vat", "rnd_fgsm"]:

    eps_ranges = {'fgsm': [e / 10 for e in range(1, 11)],
                  'rnd_fgsm': [e / 10 for e in range(1, 11)],
                  'vat': [1.5, 2.1, 5, 7, 10]}

    if args.adv_method in ["fgsm", "rnd_fgsm"]:
        adv_crafter = FastGradientMethod(model=classifier, sess=session)
    else:
        adv_crafter = VirtualAdversarialMethod(classifier, sess=session)

    for eps in eps_ranges[args.adv_method]:
        if args.adv_method == "rnd_fgsm":
            x_test = np.clip(X_test + alpha * np.sign(np.random.randn(*X_test.shape)), min_, max_)
            e = eps - alpha
        else:
            x_test = X_test
            e = eps

        X_test_adv = adv_crafter.generate(x_val=x_test, eps=e, clip_min=min_, clip_max=max_)

        if args.save:
            np.save(os.path.join(SAVE_ADV, "eps%.2f_test.npy" % eps), X_test_adv)
github IBM / adversarial-robustness-toolbox / art / attacks / fast_gradient.py View on Github external
:type num_random_init: `int`
        :param batch_size: Batch size
        :type batch_size: `int`
        :param minimal: Flag to compute the minimal perturbation.
        :type minimal: `bool`

        """
        super(FastGradientMethod, self).__init__(classifier)
        if not isinstance(classifier, ClassifierGradients):
            raise (TypeError('For `' + self.__class__.__name__ + '` classifier must be an instance of '
                             '`art.classifiers.classifier.ClassifierGradients`, the provided classifier is instance of '
                             + str(classifier.__class__.__bases__) + '.'))

        kwargs = {'norm': norm, 'eps': eps, 'eps_step': eps_step, 'targeted': targeted,
                  'num_random_init': num_random_init, 'batch_size': batch_size, 'minimal': minimal}
        FastGradientMethod.set_params(self, **kwargs)

        self._project = True
github IBM / adversarial-robustness-toolbox / mlops / kubeflow / robustness_evaluation_fgsm_pytorch / src / robustness.py View on Github external
else:
        loss_fn = torch.nn.CrossEntropyLoss()
    if Optimizer:
        optimizer = eval(Optimizer)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    # create pytorch classifier
    classifier = PyTorchClassifier(clip_values, model, loss_fn, optimizer, input_shape, nb_classes)

    # load test dataset
    x = np.load(dataset_filenamex)
    y = np.load(dataset_filenamey)

    # craft adversarial samples using FGSM
    crafter = FastGradientMethod(classifier, eps=epsilon)
    x_samples = crafter.generate(x)

    # obtain all metrics (robustness score, perturbation metric, reduction in confidence)
    metrics, y_pred_orig, y_pred_adv = get_metrics(model, x, x_samples, y)

    print("metrics:", metrics)
    return metrics