How to use art - 10 common examples

To help you get started, we’ve selected a few art examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github IBM / adversarial-robustness-toolbox / tests / poison_detection / test_activation_defence.py View on Github external
# Create simple keras model
        import keras.backend as k
        from keras.models import Sequential
        from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D

        k.set_learning_phase(1)
        model = Sequential()
        model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:]))
        model.add(MaxPooling2D(pool_size=(3, 3)))
        model.add(Flatten())
        model.add(Dense(10, activation='softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

        from art.classifiers import KerasClassifier
        cls.classifier = KerasClassifier(model=model, clip_values=(min_, max_))

        cls.classifier.fit(x_train, y_train, nb_epochs=1, batch_size=128)

        cls.defence = ActivationDefence(cls.classifier, x_train, y_train)
github IBM / adversarial-robustness-toolbox / tests / defences / test_adversarial_trainer.py View on Github external
def test_fit_predict(self):
        (x_train, y_train), (x_test, y_test) = self.mnist

        attack = FastGradientMethod(self.classifier_k)
        x_test_adv = attack.generate(x_test)
        preds = np.argmax(self.classifier_k.predict(x_test_adv), axis=1)
        acc = np.sum(preds == np.argmax(y_test, axis=1)) / NB_TEST

        adv_trainer = AdversarialTrainer(self.classifier_k, attack)
        adv_trainer.fit(x_train, y_train, nb_epochs=5, batch_size=128)

        preds_new = np.argmax(adv_trainer.predict(x_test_adv), axis=1)
        acc_new = np.sum(preds_new == np.argmax(y_test, axis=1)) / NB_TEST
        self.assertGreaterEqual(acc_new, acc * ACCURACY_DROP)

        logger.info('Accuracy before adversarial training: %.2f%%', (acc * 100))
        logger.info('Accuracy after adversarial training: %.2f%%', (acc_new * 100))
github IBM / adversarial-robustness-toolbox / tests / defences / test_adversarial_trainer.py View on Github external
def test_two_attacks(self):
        (x_train, y_train), (x_test, y_test) = self.mnist

        attack1 = FastGradientMethod(self.classifier_k)
        attack2 = DeepFool(self.classifier_tf)
        x_test_adv = attack1.generate(x_test)
        preds = np.argmax(self.classifier_k.predict(x_test_adv), axis=1)
        acc = np.sum(preds == np.argmax(y_test, axis=1)) / NB_TEST

        adv_trainer = AdversarialTrainer(self.classifier_k, attacks=[attack1, attack2])
        adv_trainer.fit(x_train, y_train, nb_epochs=5, batch_size=128)

        preds_new = np.argmax(adv_trainer.predict(x_test_adv), axis=1)
        acc_new = np.sum(preds_new == np.argmax(y_test, axis=1)) / NB_TEST
        # No reason to assert the newer accuracy is higher. It might go down slightly
        self.assertGreaterEqual(acc_new, acc * ACCURACY_DROP)

        logger.info('Accuracy before adversarial training: %.2f%%', (acc * 100))
        logger.info('\nAccuracy after adversarial training: %.2f%%', (acc_new * 100))
github IBM / adversarial-robustness-toolbox / tests / classifiers / test_keras.py View on Github external
def test_fit(self):
        labels = np.argmax(self.y_test, axis=1)
        classifier = get_classifier_kr()
        accuracy = np.sum(np.argmax(classifier.predict(self.x_test), axis=1) == labels) / NB_TEST
        logger.info('Accuracy: %.2f%%', (accuracy * 100))

        classifier.fit(self.x_train, self.y_train, batch_size=BATCH_SIZE, nb_epochs=2)
        accuracy_2 = np.sum(np.argmax(classifier.predict(self.x_test), axis=1) == labels) / NB_TEST
        logger.info('Accuracy: %.2f%%', (accuracy_2 * 100))

        self.assertEqual(accuracy, 0.32)
        self.assertEqual(accuracy_2, 0.73)
github IBM / adversarial-robustness-toolbox / tests / defences / test_adversarial_trainer.py View on Github external
def setUpClass(cls):
        k.clear_session()
        k.set_learning_phase(1)

        # Get MNIST
        (x_train, y_train), (x_test, y_test), _, _ = load_mnist()
        x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST]
        TestBase.mnist = ((x_train, y_train), (x_test, y_test))

        TestBase.classifier_k = TestBase._cnn_mnist_k(x_train.shape[1:])
        TestBase.classifier_k.fit(x_train, y_train, nb_epochs=2, batch_size=BATCH_SIZE)

        scores = TestBase.classifier_k._model.evaluate(x_train, y_train)
        logger.info('[Keras, MNIST] Accuracy on training set: %.2f%%', (scores[1] * 100))
        scores = TestBase.classifier_k._model.evaluate(x_test, y_test)
        logger.info('[Keras, MNIST] Accuracy on test set: %.2f%%', (scores[1] * 100))

        # Create basic CNN on MNIST using TensorFlow
        TestBase.classifier_tf = TestBase._cnn_mnist_tf(x_train.shape[1:])
        TestBase.classifier_tf.fit(x_train, y_train, nb_epochs=2, batch_size=BATCH_SIZE)

        scores = get_labels_np_array(TestBase.classifier_tf.predict(x_train))
github IBM / adversarial-robustness-toolbox / tests / metrics / test_metrics.py View on Github external
def test_clever_kr(self):
        """
        Test with keras.
        :return:
        """
        # Get MNIST
        batch_size, nb_train, nb_test = 100, 1000, 10
        (x_train, y_train), (x_test, y_test), _, _ = load_mnist()
        x_train, y_train = x_train[:nb_train], y_train[:nb_train]
        x_test, y_test = x_test[:nb_test], y_test[:nb_test]

        # Get the classifier
        krc = self._create_krclassifier()
        krc.fit(x_train, y_train, batch_size=batch_size, nb_epochs=1)

        # Test targeted clever
        res0 = clever_t(krc, x_test[-1], 2, 10, 5, R_L1, norm=1, pool_factor=3)
        res1 = clever_t(krc, x_test[-1], 2, 10, 5, R_L2, norm=2, pool_factor=3)
        res2 = clever_t(krc, x_test[-1], 2, 10, 5, R_LI, norm=np.inf, pool_factor=3)
        logger.info("Targeted Keras: %f %f %f", res0, res1, res2)
        self.assertNotEqual(res0, res1)
        self.assertNotEqual(res1, res2)
        self.assertNotEqual(res2, res0)
github IBM / adversarial-robustness-toolbox / tests / classifiers / test_tensorflow.py View on Github external
def test_pickle(self):
        classifier, sess = get_classifier_tf()
        full_path = os.path.join(DATA_PATH, 'my_classifier')
        folder = os.path.split(full_path)[0]

        if not os.path.exists(folder):
            os.makedirs(folder)

        pickle.dump(classifier, open(full_path, 'wb'))

        # Unpickle:
        with open(full_path, 'rb') as f:
            loaded = pickle.load(f)
            self.assertEqual(classifier._clip_values, loaded._clip_values)
            self.assertEqual(classifier._channel_index, loaded._channel_index)
            self.assertEqual(set(classifier.__dict__.keys()), set(loaded.__dict__.keys()))

        # Test predict
        predictions_1 = classifier.predict(self.x_test)
github IBM / adversarial-robustness-toolbox / tests / test_visualization.py View on Github external
def test_save_image(self):
        (x, _), (_, _), _, _ = load_mnist(raw=True)

        f_name = 'image1.png'
        save_image(x[0], f_name)
        path = os.path.join(DATA_PATH, f_name)
        self.assertTrue(os.path.isfile(path))
        os.remove(path)

        f_name = 'image2.jpg'
        save_image(x[1], f_name)
        path = os.path.join(DATA_PATH, f_name)
        self.assertTrue(os.path.isfile(path))
        os.remove(path)

        folder = 'images123456'
        f_name_with_dir = os.path.join(folder, 'image3.png')
        save_image(x[3], f_name_with_dir)
        path = os.path.join(DATA_PATH, f_name_with_dir)
        self.assertTrue(os.path.isfile(path))
        os.remove(path)
        os.rmdir(os.path.split(path)[0])  # Remove also test folder

        folder = os.path.join('images123456', 'inner')
        f_name_with_dir = os.path.join(folder, 'image4.png')
        save_image(x[3], f_name_with_dir)
        path_nested = os.path.join(DATA_PATH, f_name_with_dir)
        self.assertTrue(os.path.isfile(path_nested))
github IBM / adversarial-robustness-toolbox / tests / test_utils.py View on Github external
def test_master_seed_py(self):
        import random

        master_seed(1234)
        x = random.getrandbits(128)
        y = random.getrandbits(128)

        master_seed(1234)
        z = random.getrandbits(128)
        self.assertNotEqual(x, y)
        self.assertEqual(z, x)