How to use the deephyper.benchmarks.util.resume_from_disk function in deephyper

To help you get started, we’ve selected a few deephyper examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github deephyper / deephyper / deephyper / benchmarks / b3 / babi_rnn.py View on Github external
x, xq, y = vectorize_stories(train, word_idx, story_maxlen, query_maxlen)
    tx, txq, ty = vectorize_stories(test, word_idx, story_maxlen, query_maxlen)

    print('vocab = {}'.format(vocab))
    print('x.shape = {}'.format(x.shape))
    print('xq.shape = {}'.format(xq.shape))
    print('y.shape = {}'.format(y.shape))
    print('story_maxlen, query_maxlen = {}, {}'.format(story_maxlen, query_maxlen))

    model_path = param_dict['model_path']
    model_mda_path = None
    model = None
    initial_epoch = 0

    if model_path:
        savedModel = util.resume_from_disk(BNAME, param_dict, data_dir=model_path)
        model_mda_path = savedModel.model_mda_path
        model_path = savedModel.model_path
        model = savedModel.model
        initial_epoch = savedModel.initial_epoch

    if model is None:
        print('Build model...')
        sentence = layers.Input(shape=(story_maxlen,), dtype='int32')
        encoded_sentence = layers.Embedding(vocab_size, EMBED_HIDDEN_SIZE)(sentence)
        encoded_sentence = layers.Dropout(DROPOUT)(encoded_sentence)

        question = layers.Input(shape=(query_maxlen,), dtype='int32')
        encoded_question = layers.Embedding(vocab_size, EMBED_HIDDEN_SIZE)(question)
        encoded_question = layers.Dropout(DROPOUT)(encoded_question)
        encoded_question = RNN(EMBED_HIDDEN_SIZE, activation=ACTIVATION)(encoded_question)
        encoded_question = layers.RepeatVector(story_maxlen)(encoded_question)
github deephyper / deephyper / benchmarks / cifar10cnn / cifar10_cnn.py View on Github external
print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    timer.start('preprocessing')

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model_path = param_dict['model_path']
    model_mda_path = None
    model = None
    initial_epoch = 0

    if model_path:
        savedModel = util.resume_from_disk(BNAME, param_dict, data_dir=model_path)
        model_mda_path = savedModel.model_mda_path
        model_path = savedModel.model_path
        model = savedModel.model
        initial_epoch = savedModel.initial_epoch

    if model is None:
        """
        model = Sequential()
        
        model.add(Conv2D(F1_UNITS, (F1_SIZE, F1_SIZE), padding='same',
                        input_shape=x_train.shape[1:]))
        model.add(Activation(ACTIVATION))
        model.add(Conv2D(F1_UNITS, (F1_SIZE, F1_SIZE)))
        model.add(Activation(ACTIVATION))
        model.add(MaxPooling2D(pool_size=(P_SIZE, P_SIZE), padding='same'))
        model.add(Dropout(DROPOUT))
github deephyper / deephyper / deephyper / benchmarks / mnistcnn / mnistcnn.py View on Github external
print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    timer.start('preprocessing')

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model_path = param_dict['model_path']
    model_mda_path = None
    model = None
    initial_epoch = 0

    if model_path:
        savedModel = util.resume_from_disk(BNAME, param_dict, data_dir=model_path)
        model_mda_path = savedModel.model_mda_path
        model_path = savedModel.model_path
        model = savedModel.model
        initial_epoch = savedModel.initial_epoch

    if model is None:
        model = Sequential()
        print(input_shape)
        model.add(Conv2D(F1_UNITS, (F1_SIZE, F1_SIZE), padding='same',
                        input_shape=input_shape))
        model.add(Activation(ACTIVATION))
        model.add(Conv2D(F1_UNITS, (F1_SIZE, F1_SIZE)))
        model.add(Activation(ACTIVATION))
        #model.add(MaxPooling2D(pool_size=(P_SIZE, P_SIZE), padding='same'))
        model.add(Dropout(DROPOUT))
github deephyper / deephyper / deephyper / benchmarks / dummy2 / regression.py View on Github external
timer.start('preprocessing')
    penalty = param_dict['penalty']
    epochs = param_dict['epochs']
    if type(epochs) is not int:
        print("converting epochs to int:", epochs)
        epochs = int(epochs)
    lr = param_dict['lr']
    
    model_path = param_dict['model_path']
    model_mda_path = None
    model = None
    initial_epoch = 0

    if model_path:
        savedModel = util.resume_from_disk(BNAME, param_dict, data_dir=model_path)
        model_mda_path = savedModel.model_mda_path
        model_path = savedModel.model_path
        model = savedModel.model
        initial_epoch = savedModel.initial_epoch

    if model is None:
        a = np.random.uniform(-0.4, 0.4)
        b = np.random.uniform(0, 1)
        print("starting new model", a, b)
    else:
        a, b = model.a, model.b
        print("loaded model from disk:", a, b)
        print("on epoch", initial_epoch)

    timer.end()
github deephyper / deephyper / benchmarks / b2 / babi_memnn.py View on Github external
if param_dict['rnn_type'] == 'GRU':
        RNN = layers.GRU
    elif param_dict['rnn_type'] == 'SimpleRNN':
        RNN = layers.SimpleRNN
    else:
        RNN = layers.LSTM

    
    model_path = param_dict['model_path']
    model_mda_path = None
    model = None
    initial_epoch = 0

    if model_path:
        savedModel = util.resume_from_disk(BNAME, param_dict, data_dir=model_path)
        model_mda_path = savedModel.model_mda_path
        model_path = savedModel.model_path
        model = savedModel.model
        initial_epoch = savedModel.initial_epoch

    if model is None:
        # placeholders
        input_sequence = Input((story_maxlen,))
        question = Input((query_maxlen,))

        # encoders
        # embed the input sequence into a sequence of vectors
        input_encoder_m = Sequential()
        input_encoder_m.add(Embedding(input_dim=vocab_size,
                                    output_dim=64))
        input_encoder_m.add(Dropout(DROPOUT))
github deephyper / deephyper / benchmarks / mnistmlp / mnist_mlp.py View on Github external
x_train /= 255
    x_test /= 255
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model_path = param_dict['model_path']
    model_mda_path = None
    model = None
    initial_epoch = 0

    if model_path:
        savedModel = util.resume_from_disk(BNAME, param_dict, data_dir=model_path)
        model_mda_path = savedModel.model_mda_path
        model_path = savedModel.model_path
        model = savedModel.model
        initial_epoch = savedModel.initial_epoch

    if model is None:
        model = Sequential()
        model.add(Dense(NUNITS, activation=ACTIVATION, input_shape=(784,)))
        model.add(Dropout(DROPOUT))
        for i in range(NHIDDEN):
            model.add(Dense(NUNITS, activation=ACTIVATION))
            model.add(Dropout(DROPOUT))
        model.add(Dense(num_classes, activation='softmax'))
        model.summary()
        model.compile(loss='categorical_crossentropy',
              optimizer=optimizer,
github deephyper / deephyper / deephyper / benchmarks / capsule / capsule.py View on Github external
y_test = utils.to_categorical(y_test, num_classes)
    
    model_path = param_dict['model_path']
    model_mda_path = None
    model = None
    initial_epoch = 0

    if model_path:
        custom_objects = {'Capsule' : Capsule,
                          'num_capsule' : 10,
                          'dim_capsule' : DIM_CAPS,
                          'routings' : ROUTINGS,
                          'share_weights' : SHARE_WEIGHTS,
                          'margin_loss': margin_loss
                         }
        savedModel = util.resume_from_disk(BNAME, param_dict, 
                       data_dir=model_path, custom_objects=custom_objects)
        model_mda_path = savedModel.model_mda_path
        model_path = savedModel.model_path
        model = savedModel.model
        initial_epoch = savedModel.initial_epoch


    if model is None:
        # A common Conv2D model
        input_image = Input(shape=(None, None, 3))
        x = input_image #Conv2D(64, (3, 3), activation='relu')(input_image)
        for i in range(NUM_CONV):
            x = Conv2D(64, (3, 3), activation='relu')(x)
            x = Dropout(DROPOUT)(x)
        x = AveragePooling2D((2, 2))(x)
        for i in range(NUM_CONV):