How to use the nni.report_final_result function in nni

To help you get started, we’ve selected a few nni examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / nni / test / config_test / multi_phase / multi_phase.py View on Github external
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

import time
import nni

if __name__ == '__main__':
    for i in range(5):
        hyper_params = nni.get_next_parameter()
        print('hyper_params:[{}]'.format(hyper_params))
        if hyper_params is None:
            break
        nni.report_final_result(0.1*i)
        time.sleep(3)
github microsoft / nni / test / config_test / multi_thread / multi_thread_trial.py View on Github external
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

import nni
import time

if __name__ == '__main__':
    nni.get_next_parameter()
    time.sleep(3)
    nni.report_final_result(0.5)
github microsoft / nni / test / naive_test / naive_trial.py View on Github external
# Licensed under the MIT license.

import time

import nni

params = nni.get_next_parameter()
print('params:', params)
x = params['x']

time.sleep(1)
for i in range(1, 10):
    nni.report_intermediate_result(x ** i)
    time.sleep(0.5)

nni.report_final_result(x ** 10)
github microsoft / nni / examples / trials / cifar10_pytorch / main.py View on Github external
args, _ = parser.parse_known_args()

    try:
        RCV_CONFIG = nni.get_next_parameter()
        #RCV_CONFIG = {'lr': 0.1, 'optimizer': 'Adam', 'model':'senet18'}
        _logger.debug(RCV_CONFIG)

        prepare(RCV_CONFIG)
        acc = 0.0
        best_acc = 0.0
        for epoch in range(start_epoch, start_epoch+args.epochs):
            train(epoch)
            acc, best_acc = test(epoch)
            nni.report_intermediate_result(acc)

        nni.report_final_result(best_acc)
    except Exception as exception:
        _logger.exception(exception)
        raise
github microsoft / nni / examples / trials / mnist-batch-tune-keras / mnist-keras.py View on Github external
def train(args, params):
    '''
    Train model
    '''
    x_train, y_train, x_test, y_test = load_mnist_data(args)
    model = create_mnist_model(params)

    # nni
    model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
        validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)])

    _, acc = model.evaluate(x_test, y_test, verbose=0)
    LOG.debug('Final result is: %d', acc)
    nni.report_final_result(acc)
github microsoft / nni / tools / nni_annotation / examples / mnist_generated.py View on Github external
batch = mnist.train.next_batch(batch_num)
            dropout_rate = nni.choice(1, 5, name='dropout_rate')
            mnist_network.train_step.run(feed_dict={mnist_network.images:
                batch[0], mnist_network.labels: batch[1], mnist_network.
                keep_prob: dropout_rate})
            if i % 100 == 0:
                test_acc = mnist_network.accuracy.eval(feed_dict={
                    mnist_network.images: mnist.test.images, mnist_network.
                    labels: mnist.test.labels, mnist_network.keep_prob: 1.0})
                nni.report_intermediate_result(test_acc)
                logger.debug('test accuracy %g', test_acc)
                logger.debug('Pipe send intermediate result done.')
        test_acc = mnist_network.accuracy.eval(feed_dict={mnist_network.
            images: mnist.test.images, mnist_network.labels: mnist.test.
            labels, mnist_network.keep_prob: 1.0})
        nni.report_final_result(test_acc)
        logger.debug('Final result is %g', test_acc)
        logger.debug('Send final result done.')
github microsoft / nni / examples / trials / auto-gbdt / main.py View on Github external
gbm = lgb.train(params,
                    lgb_train,
                    num_boost_round=20,
                    valid_sets=lgb_eval,
                    early_stopping_rounds=5)

    print('Start predicting...')

    # predict
    y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)

    # eval
    rmse = mean_squared_error(y_test, y_pred) ** 0.5
    print('The rmse of prediction is:', rmse)

    nni.report_final_result(rmse)
github microsoft / nni / examples / trials / network_morphism / FashionMNIST / FashionMNIST_keras.py View on Github external
y=y_train,
        batch_size=args.batch_size,
        validation_data=(x_test, y_test),
        epochs=args.epochs,
        shuffle=True,
        callbacks=[
            SendMetrics(),
            EarlyStopping(min_delta=0.001, patience=10),
            TensorBoard(log_dir=TENSORBOARD_DIR),
        ],
    )

    # trial report final acc to tuner
    _, acc = net.evaluate(x_test, y_test)
    logger.debug("Final result is: %.3f", acc)
    nni.report_final_result(acc)
github microsoft / nni / tools / nni_annotation / examples / mnist_without_annotation.py View on Github external
if i % 100 == 0:
                test_acc = mnist_network.accuracy.eval(
                    feed_dict={mnist_network.images: mnist.test.images,
                               mnist_network.labels: mnist.test.labels,
                               mnist_network.keep_prob: 1.0})

                nni.report_intermediate_result(test_acc)
                logger.debug('test accuracy %g', test_acc)
                logger.debug('Pipe send intermediate result done.')

        test_acc = mnist_network.accuracy.eval(
            feed_dict={mnist_network.images: mnist.test.images,
                       mnist_network.labels: mnist.test.labels,
                       mnist_network.keep_prob: 1.0})

        nni.report_final_result(test_acc)
        logger.debug('Final result is %g', test_acc)
        logger.debug('Send final result done.')
github SpongebBob / tabular_automl_NNI / main.py View on Github external
file_name = 'train.tiny.csv'
    target_name = 'Label'
    id_index = 'Id'

    # get parameters from tuner
    RECEIVED_PARAMS = nni.get_next_parameter()
    LOG.info("Received params:\n", RECEIVED_PARAMS)
    
    # list is a column_name generate from tuner
    df = pd.read_csv(file_name)
    sample_col = RECEIVED_PARAMS['sample_feature']
    
    # raw feaure + sample_feature
    df = name2feature(df, sample_col, target_name)
    feature_imp, val_score = lgb_model_train(df,  _epoch = 1000, target_name = target_name, id_index = id_index)
    nni.report_final_result({
        "default":val_score, 
        "feature_importance":feature_imp
    })