How to use the art.classifiers.KerasClassifier function in art

To help you get started, we’ve selected a few art examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github IBM / adversarial-robustness-toolbox / tests / poison_detection / test_activation_defence.py View on Github external
# Create simple keras model
        import keras.backend as k
        from keras.models import Sequential
        from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D

        k.set_learning_phase(1)
        model = Sequential()
        model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:]))
        model.add(MaxPooling2D(pool_size=(3, 3)))
        model.add(Flatten())
        model.add(Dense(10, activation='softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

        from art.classifiers import KerasClassifier
        cls.classifier = KerasClassifier(model=model, clip_values=(min_, max_))

        cls.classifier.fit(x_train, y_train, nb_epochs=1, batch_size=128)

        cls.defence = ActivationDefence(cls.classifier, x_train, y_train)
github IBM / adversarial-robustness-toolbox / tests / metrics / test_metrics.py View on Github external
# Initialize a tf session
        session = tf.Session()
        k.set_session(session)

        # Create simple CNN
        model = Sequential()
        model.add(Conv2D(4, kernel_size=(5, 5), activation='relu', input_shape=(28, 28, 1)))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(10, activation='softmax'))

        model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=0.01),
                      metrics=['accuracy'])

        # Get the classifier
        krc = KerasClassifier(model=model, clip_values=(0, 1), use_logits=False)

        return krc
github IBM / adversarial-robustness-toolbox / tests / classifiers / test_keras_tf.py View on Github external
def test_functional_model(self):
        # Need to update the functional_model code to produce a model with more than one input and output layers...
        keras_model = KerasClassifier(self.functional_model, clip_values=(0, 1), input_layer=1, output_layer=1)
        self.assertTrue(keras_model._input.name, "input1")
        self.assertTrue(keras_model._output.name, "output1")

        keras_model = KerasClassifier(self.functional_model, clip_values=(0, 1), input_layer=0, output_layer=0)
        self.assertTrue(keras_model._input.name, "input0")
        self.assertTrue(keras_model._output.name, "output0")
github IBM / adversarial-robustness-toolbox / tests / attacks / test_fast_gradient.py View on Github external
def test_with_defences(self):
        # Get the ready-trained Keras model
        model = self.classifier_k._model
        fs = FeatureSqueezing(bit_depth=1, clip_values=(0, 1))
        classifier = KerasClassifier(model=model, clip_values=(0, 1), defences=fs)

        attack = FastGradientMethod(classifier, eps=1, batch_size=128)
        x_train_adv = attack.generate(self.x_train)
        x_test_adv = attack.generate(self.x_test)

        self.assertFalse((self.x_train == x_train_adv).all())
        self.assertFalse((self.x_test == x_test_adv).all())

        train_y_pred = get_labels_np_array(classifier.predict(x_train_adv))
        test_y_pred = get_labels_np_array(classifier.predict(x_test_adv))

        self.assertFalse((self.y_train == train_y_pred).all())
        self.assertFalse((self.y_test == test_y_pred).all())

        predictions = classifier.predict(x_train_adv)
        accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(self.y_train, axis=1)) / self.y_train.shape[0]
github IBM / adversarial-robustness-toolbox / examples / mnist_poison_detection_fix_backdoor.py View on Github external
# Create Keras convolutional neural network - basic architecture from Keras examples
        # Source here: https://github.com/keras-team/keras/blob/master/examples/mnist_cnn.py
        k.set_learning_phase(1)
        model = Sequential()
        model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:]))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

        classifier = KerasClassifier((min_, max_), model=model)

        classifier.fit(x_train, y_train, nb_epochs=50, batch_size=128)

        print('Saving poisoned model: ')
        pickle.dump(classifier, open('my_poison_classifier.p', 'wb'))

        # Also saving for Anu:
        file_name = 'anu_poison_mnist'
        model.save(file_name + '.hdf5')
        model_json = model.to_json()
        with open(file_name + '.json', "w") as json_file:
            json_file.write(model_json)

    # Evaluate the classifier on the test set
    preds = np.argmax(classifier.predict(x_test), axis=1)
    acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
github IBM / adversarial-robustness-toolbox / art / attacks / extraction / functionally_equivalent_extraction.py View on Github external
model = tf.keras.models.load_model('./model.h5')
    else:
        model = Sequential()
        model.add(Dense(num_neurons, activation='relu', input_shape=input_shape))
        model.add(Dense(num_classes, activation='linear'))

        model.compile(loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
                      optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001, ), metrics=['accuracy'])

        model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))

        model.save('./model.h5')

    score_target = model.evaluate(x_test, y_test, verbose=0)

    classifier = KerasClassifier(model=model, use_logits=True, clip_values=(0, 1))

    fee = FunctionallyEquivalentExtraction(classifier=classifier, num_neurons=num_neurons)
    bbc = fee.extract(x_test[0:100])

    y_test_predicted_extracted = bbc.predict(x_test)
    y_test_predicted_target = classifier.predict(x_test)

    print('Target model - Test accuracy:', score_target[1])
    print('Extracted model - Test accuracy:',
          np.sum(np.argmax(y_test_predicted_extracted, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0])
    print('Extracted model - Test Fidelity:',
          np.sum(np.argmax(y_test_predicted_extracted, axis=1) == np.argmax(y_test_predicted_target, axis=1)) /
          y_test_predicted_target.shape[0])
github TrustAI / DeepConcolic / src / run_ssc.py View on Github external
def run_svc(test_object, outs):
  print ('To run svc\n')
  
  f_results, cover_layers, _ = ssc_setup (test_object, outs)

  ## define a global attacker
  classifier = KerasClassifier(clip_values=(MIN, -MIN), model=test_object.dnn)
  adv_crafter = FastGradientMethod(classifier)

  test_cases=[]
  adversarials=[]

  count=0

  while True:
    dec_layer_index, dec_pos=get_ssc_next(cover_layers)

    if dec_layer_index==1 and is_input_layer(test_object.dnn.layers[0]): continue
    print ('dec_layer_index', cover_layers[dec_layer_index].layer_index)

    ###
    cond_layer=cover_layers[dec_layer_index-1]
    dec_layer=cover_layers[dec_layer_index]
github IBM / adversarial-robustness-toolbox / examples / mnist_keras_stealing.py View on Github external
def build_model(input_shape):
    m = Sequential([
        Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
        Conv2D(64, (3, 3), activation='relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.25),
        Flatten(),
        Dense(128, activation='relu'),
        Dropout(0.5),
        Dense(10, activation='softmax')])
    m.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    k = KerasClassifier((min_, max_), model=m)
    return k
github TrustAI / DeepConcolic / src / mcdc.py View on Github external
dsp=cover_layers[i].ssc_map.shape
        if is_dense_layer(cover_layers[i].layer) or not (csp[1]==dsp[1] and csp[2]==dsp[2]):
          tmp_decs=cover_layers[i].ssc_map.size
        else:
          ks=cover_layers[i].layer.kernel_size
          dsp=cover_layers[i].ssc_map.shape
          tmp_decs=((dsp[1]-ks[0]+1)*(dsp[2]-ks[1]+1)*dsp[3])
        if is_conv_layer(cover_layers[i].layer):
          if not test_object.feature_indices==None:
             tmp_decs=tmp_decs*(len(test_object.feature_indices)*1.0/dsp[3])
        tot_decs+=tmp_decs
  tot_coverage=0.0

  ## define a global attacker
  #classifier=KerasClassifier((MIN, -MIN), model=test_object.dnn)
  classifier=KerasClassifier(test_object.dnn)
  adv_crafter = FastGradientMethod(classifier)

  test_cases=[]
  adversarials=[]
  count=0

  while count