How to use the art.utils.load_mnist function in art

To help you get started, we’ve selected a few art examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github IBM / adversarial-robustness-toolbox / tests / defences / test_adversarial_trainer.py View on Github external
def setUpClass(cls):
        k.clear_session()
        k.set_learning_phase(1)

        # Get MNIST
        (x_train, y_train), (x_test, y_test), _, _ = load_mnist()
        x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST]
        TestBase.mnist = ((x_train, y_train), (x_test, y_test))

        TestBase.classifier_k = TestBase._cnn_mnist_k(x_train.shape[1:])
        TestBase.classifier_k.fit(x_train, y_train, nb_epochs=2, batch_size=BATCH_SIZE)

        scores = TestBase.classifier_k._model.evaluate(x_train, y_train)
        logger.info('[Keras, MNIST] Accuracy on training set: %.2f%%', (scores[1] * 100))
        scores = TestBase.classifier_k._model.evaluate(x_test, y_test)
        logger.info('[Keras, MNIST] Accuracy on test set: %.2f%%', (scores[1] * 100))

        # Create basic CNN on MNIST using TensorFlow
        TestBase.classifier_tf = TestBase._cnn_mnist_tf(x_train.shape[1:])
        TestBase.classifier_tf.fit(x_train, y_train, nb_epochs=2, batch_size=BATCH_SIZE)

        scores = get_labels_np_array(TestBase.classifier_tf.predict(x_train))
github IBM / adversarial-robustness-toolbox / tests / metrics / test_metrics.py View on Github external
def test_clever_kr(self):
        """
        Test with keras.
        :return:
        """
        # Get MNIST
        batch_size, nb_train, nb_test = 100, 1000, 10
        (x_train, y_train), (x_test, y_test), _, _ = load_mnist()
        x_train, y_train = x_train[:nb_train], y_train[:nb_train]
        x_test, y_test = x_test[:nb_test], y_test[:nb_test]

        # Get the classifier
        krc = self._create_krclassifier()
        krc.fit(x_train, y_train, batch_size=batch_size, nb_epochs=1)

        # Test targeted clever
        res0 = clever_t(krc, x_test[-1], 2, 10, 5, R_L1, norm=1, pool_factor=3)
        res1 = clever_t(krc, x_test[-1], 2, 10, 5, R_L2, norm=2, pool_factor=3)
        res2 = clever_t(krc, x_test[-1], 2, 10, 5, R_LI, norm=np.inf, pool_factor=3)
        logger.info("Targeted Keras: %f %f %f", res0, res1, res2)
        self.assertNotEqual(res0, res1)
        self.assertNotEqual(res1, res2)
        self.assertNotEqual(res2, res0)
github IBM / adversarial-robustness-toolbox / tests / defences / test_jpeg_compression.py View on Github external
def test_one_channel(self):
        clip_values = (0, 1)
        (x_train, _), (_, _), _, _ = load_mnist()
        x_train = x_train[:2]
        preprocess = JpegCompression(clip_values=clip_values, quality=70)
        x_compressed, _ = preprocess(x_train)
        self.assertEqual(x_compressed.shape, x_train.shape)
        self.assertTrue((x_compressed >= clip_values[0]).all())
        self.assertTrue((x_compressed <= clip_values[1]).all())
github IBM / adversarial-robustness-toolbox / tests / classifiers / test_tensorflow.py View on Github external
def setUpClass(cls):
        # Get MNIST
        (x_train, y_train), (x_test, y_test), _, _ = load_mnist()
        x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
        x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST]
        cls.mnist = (x_train, y_train), (x_test, y_test)

        cls.classifier, cls.sess = get_classifier_tf()
github IBM / adversarial-robustness-toolbox / tests / classifiers / test_mxnet.py View on Github external
def setUpClass(cls):
        # Get MNIST
        (x_train, y_train), (x_test, y_test), _, _ = load_mnist()
        x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
        x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST]
        x_train = np.swapaxes(x_train, 1, 3)
        x_test = np.swapaxes(x_test, 1, 3)
        cls.mnist = (x_train, y_train), (x_test, y_test)

        # Create a simple CNN - this one comes from the Gluon tutorial
        net = nn.Sequential()
        with net.name_scope():
            net.add(
                nn.Conv2D(channels=6, kernel_size=5, activation='relu'),
                nn.MaxPool2D(pool_size=2, strides=2),
                nn.Conv2D(channels=16, kernel_size=3, activation='relu'),
                nn.MaxPool2D(pool_size=2, strides=2),
                nn.Flatten(),
                nn.Dense(120, activation="relu"),
github IBM / adversarial-robustness-toolbox / tests / test_visualization.py View on Github external
def test_sprites_gray(self):
        # Get MNIST
        (x, _), (_, _), _, _ = load_mnist(raw=True)
        n = 100
        x = x[:n]

        sprite = create_sprite(x)
        f_name = 'test_sprite_mnist.png'
        path = os.path.join(DATA_PATH, f_name)
        save_image(sprite, path)
        self.assertTrue(os.path.isfile(path))

        os.remove(path)  # Remove data added
github IBM / adversarial-robustness-toolbox / tests / test_utils.py View on Github external
def test_projection(self):
        # Get MNIST
        (x, _), (_, _), _, _ = load_mnist()

        # Probably don't need to test everything
        x = x[:100]
        t = tuple(range(1, len(x.shape)))
        rand_sign = 1 - 2 * np.random.randint(0, 2, size=x.shape)

        x_proj = projection(rand_sign * x, 3.14159, 1)
        self.assertEqual(x.shape, x_proj.shape)
        self.assertTrue(np.allclose(np.sum(np.abs(x_proj), axis=t), 3.14159, atol=10e-8))

        x_proj = projection(rand_sign * x, 3.14159, 2)
        self.assertEqual(x.shape, x_proj.shape)
        self.assertTrue(np.allclose(np.sqrt(np.sum(x_proj ** 2, axis=t)), 3.14159, atol=10e-8))

        x_proj = projection(rand_sign * x, 0.314159, np.inf)
        self.assertEqual(x.shape, x_proj.shape)
github IBM / adversarial-robustness-toolbox / tests / poison_detection / test_provenance_defence.py View on Github external
def setUpClass(cls):
        master_seed(301)
        (x_train, y_train), (x_test, y_test), min_, max_ = load_mnist()
        y_train = np.argmax(y_train, axis=1)
        y_test = np.argmax(y_test, axis=1)
        zero_or_four = np.logical_or(y_train == 4, y_train == 0)
        x_train = x_train[zero_or_four]
        y_train = y_train[zero_or_four]
        tr_labels = np.zeros((y_train.shape[0], 2))
        tr_labels[y_train == 0] = np.array([1, 0])
        tr_labels[y_train == 4] = np.array([0, 1])
        y_train = tr_labels

        zero_or_four = np.logical_or(y_test == 4, y_test == 0)
        x_test = x_test[zero_or_four]
        y_test = y_test[zero_or_four]
        te_labels = np.zeros((y_test.shape[0], 2))
        te_labels[y_test == 0] = np.array([1, 0])
        te_labels[y_test == 4] = np.array([0, 1])
github IBM / adversarial-robustness-toolbox / examples / mnist_poison_detection.py View on Github external
def main():
    # Read MNIST dataset (x_raw contains the original images):
    (x_raw, y_raw), (x_raw_test, y_raw_test), min_, max_ = load_mnist(raw=True)

    n_train = np.shape(x_raw)[0]
    num_selection = 5000
    random_selection_indices = np.random.choice(n_train, num_selection)
    x_raw = x_raw[random_selection_indices]
    y_raw = y_raw[random_selection_indices]

    # Poison training data
    perc_poison = .33
    (is_poison_train, x_poisoned_raw, y_poisoned_raw) = generate_backdoor(x_raw, y_raw, perc_poison)
    x_train, y_train = preprocess(x_poisoned_raw, y_poisoned_raw)
    # Add channel axis:
    x_train = np.expand_dims(x_train, axis=3)

    # Poison test data
    (is_poison_test, x_poisoned_raw_test, y_poisoned_raw_test) = generate_backdoor(x_raw_test, y_raw_test, perc_poison)