How to use the deephyper.benchmarks.util.save_meta_data function in deephyper

To help you get started, we’ve selected a few deephyper examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github deephyper / deephyper / deephyper / benchmarks / b1 / addition_rnn.py View on Github external
callbacks_list = [timeout_monitor]

    timer.start('model training')
    train_history = model.fit(x_train, y_train, callbacks=callbacks_list, batch_size=BATCH_SIZE, 
                                initial_epoch=initial_epoch, epochs=EPOCHS, validation_split=0.30)#, validation_data=(x_val, y_val))
    timer.end()
    
    score = model.evaluate(x_val, y_val, batch_size=BATCH_SIZE)
    print('===Validation loss:', score[0])
    print('===Validation accuracy:', score[1])
    print('OUTPUT:', -score[1])
    
    if model_path:
        timer.start('model save')
        model.save(model_path)
        util.save_meta_data(param_dict, model_mda_path)
        timer.end()
    return -score[1]
github deephyper / deephyper / deephyper / benchmarks / b3 / babi_rnn.py View on Github external
#earlystop = EarlyStopping(monitor='val_acc', min_delta=0.0001, patience=50, verbose=1, mode='auto')
    timeout_monitor = TerminateOnTimeOut(TIMEOUT)
    callbacks_list = [timeout_monitor]
    timer.start('model training')
    print('Training')
    model.fit([x, xq], y, callbacks=callbacks_list, batch_size=BATCH_SIZE, initial_epoch=initial_epoch, 
                epochs=EPOCHS, validation_split=0.30)
    timer.end()
    loss, acc = model.evaluate([tx, txq], ty, batch_size=BATCH_SIZE)
    print('Test loss / test accuracy = {:.4f} / {:.4f}'.format(loss, acc))
    print('OUTPUT:', -acc)
    
    if model_path:
        timer.start('model save')
        model.save(model_path)  
        util.save_meta_data(param_dict, model_mda_path)
        timer.end()
    
    return -acc
github deephyper / deephyper / deephyper / benchmarks / gcn / gcn.py View on Github external
print(' - timeout: training time = %2.3fs/%2.3fs' % (elapsed, TIMEOUT * 60))
            break
    training_timer.end()

    # Testing
    test_loss, test_acc = evaluate_preds(preds, [y_test], [idx_test])
    print("Test set results:",
        "loss= {:.4f}".format(test_loss[0]),
        "accuracy= {:.4f}".format(test_acc[0]))
    print('===Validation accuracy:', test_acc[0])
    print('OUTPUT:', -test_acc[0])
    
    if model_path:
        timer.start('model save')
        model.save(model_path)  
        util.save_meta_data(param_dict, model_mda_path)
        timer.end()

    return -test_acc[0]
github deephyper / deephyper / deephyper / benchmarks / mnistcnn / mnistcnn.py View on Github external
batch_size=BATCH_SIZE,
                        epochs=EPOCHS,
                        initial_epoch=initial_epoch,
                        verbose=1, 
                        callbacks=callbacks_list,
                        validation_split = 0.3)
                        #validation_data=(x_test, y_test))
    timer.end()
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
       
    if model_path:
        timer.start('model save')
        model.save(model_path)  
        util.save_meta_data(param_dict, model_mda_path)
        timer.end()

    print('OUTPUT:', -score[1])
    return -score[1]
github deephyper / deephyper / benchmarks / mnistmlp / mnist_mlp.py View on Github external
batch_size=BATCH_SIZE,
                    initial_epoch=initial_epoch,
                    epochs=EPOCHS,
                    verbose=1,
                    callbacks=callbacks_list,
                    validation_split = 0.3)
                    #validation_data=(x_test, y_test))
    timer.end()
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    
    if model_path:
        timer.start('model save')
        model.save(model_path)  
        util.save_meta_data(param_dict, model_mda_path)
        timer.end()

    print('OUTPUT:', -score[1])
    return -score[1]
github deephyper / deephyper / deephyper / benchmarks / dummy2 / regression.py View on Github external
a -= lr * grad_a
        b -= lr * grad_b
    timer.end()

    print(f"training done\na={a}\nb={b}")
    predict = linear(training_x, a, b)
    error = predict - training_y
    mse = 0.5 * (error**2).sum() / n_pt
    mse += penalty
    print("OUTPUT:", mse)

    if model_path:
        timer.start('model save')
        model = Model(a, b)
        model.save(model_path)
        util.save_meta_data(param_dict, model_mda_path)
        timer.end()
        print(f"saved model to {model_path} and MDA to {model_mda_path}")
    return mse
github deephyper / deephyper / benchmarks / b2 / babi_memnn.py View on Github external
print('===Validation loss:', score[0])
    print('===Validation accuracy:', score[1])
    print('===Training Time', start_time - end_time)
    print('OUTPUT:', -score[1])

    
    #train_loss = train_history.history['loss']
    #val_acc = train_history.history['val_acc']
    #print('===Train loss:', train_loss[-1])
    #print('===Validation accuracy:', val_acc[-1])
    #print('OUTPUT:', -val_acc[-1])
    
    if model_path:
        timer.start('model save')
        model.save(model_path)  
        util.save_meta_data(param_dict, model_mda_path)
        timer.end()
    return -score[1]
github deephyper / deephyper / deephyper / benchmarks / capsule / capsule.py View on Github external
datagen.flow(x_train, y_train, batch_size=BATCH_SIZE),
            callbacks=callbacks_list,
            epochs=EPOCHS,
            steps_per_epoch=steps_per_epoch,
            initial_epoch=initial_epoch,
            #validation_split=0.10,
            #validation_data=(x_test, y_test),
            validation_data=datagen.flow(x_test, y_test, batch_size=BATCH_SIZE), 
            validation_steps=10,
            workers=1)
    
    timer.end()
    if model_path:
        timer.start('model save')
        model.save(model_path)  
        util.save_meta_data(param_dict, model_mda_path)
        timer.end()

    loss, acc = model.evaluate(x_test, y_test, batch_size=BATCH_SIZE)
    print('Test loss / test accuracy = {:.4f} / {:.4f}'.format(loss, acc))
    print('OUTPUT:', -acc)

    return -acc