How to use the mlflow.log_param function in mlflow

To help you get started, we’ve selected a few mlflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github produvia / ai-platform / tasks / computer-vision / image-classification / 9945345b-9c32-45ba-a079-b9f7d6a2f515 / main.py View on Github external
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset), test_accuracy))
    step = (epoch + 1) * len(train_loader)
    log_scalar('test_loss', test_loss, step)
    log_scalar('test_accuracy', test_accuracy, step)
    return test_accuracy

def log_scalar(name, value, step):
    """Log a scalar value to both MLflow and TensorBoard"""
    writer.add_scalar(name, value, step)
    mlflow.log_metric(name, value)

with mlflow.start_run():
    # Log our parameters into mlflow
    for key, value in vars(args).items():
        mlflow.log_param(key, value)
    
    # Create a SummaryWriter to write TensorBoard events locally
    output_dir = dirpath = tempfile.mkdtemp()
    writer = SummaryWriter(output_dir)
    print("Writing TensorBoard events locally to %s\n" % output_dir)

    # Perform the training
    best_loss = None
    best_acc = 0.
    for epoch in range(1, args.epochs + 1):
        loss = train(epoch, best_loss)
        acc = test(epoch)
        if not best_loss:
            best_loss = loss
        elif best_loss > loss:
            best_loss = loss
github mlflow / mlflow / mlflow / R / mlflow / inst / examples / python / simple / simple.py View on Github external
import os
import mlflow

if __name__ == "__main__":
    with mlflow.start_run():
        mlflow.log_param("parameter", 5)
        mlflow.log_metric("metric", 0)
github produvia / ai-platform / tasks / time-series / time-series-forecasting / bff6d5aa-9957-481c-ac50-6d669f869469 / main.py View on Github external
#Save trained model
        weathernet.save(filepath=model_filepath)
        # Run prediction on trained model
        predict_weather(weathernet)
        # Plot the metrics of the trained model
        plot_metrics(results)
        # Log metrics, parameters, artifacts and log the model
        with mlflow.start_run():
            run_uuid = mlflow.active_run().info.run_uuid
            print("MLflow Run ID: %s" % run_uuid)
            mlflow.keras.log_model(weathernet, "models")
            mlflow.log_artifact(image_dir +  city + '_Loss_Diag.png', "images")
            mlflow.log_artifact(image_dir +  city + '_Daily_Temp_Predicted.png', "images")
            mlflow.log_metric('Loss', loss(results))
            mlflow.log_metric('Validation Loss', val_loss(results))
            mlflow.log_param('City_Name', city)
            mlflow.log_param('Training_Epochs', epochs)
            mlflow.log_param('Steps_per_epoch', steps_per_epoch)
            mlflow.log_param('Validations_steps', val_steps)
            mlflow.log_param('Prediction_steps', predict_steps)
github GoogleCloudPlatform / ml-on-gcp / tutorials / tensorflow / mlflow_gcp / trainer / task.py View on Github external
def on_train_end(self, logs=None):
                mlflow.log_param('num_layers', len(self.model.layers))
                mlflow.log_param('optimizer_name',
                                 type(self.model.optimizer).__name__)
        # MLflow callback
github GoogleCloudPlatform / ml-on-gcp / tutorials / tensorflow / mlflow_gcp / trainer / task.py View on Github external
validation_data=validation_dataset,
            validation_steps=args.eval_steps,
            verbose=1,
            callbacks=[lr_decay_callback, tensorboard_callback,
                       mlflow_callback])
        metrics = history.history
        logging.info(metrics)
        keras_model.summary()
        mlflow.log_param('train_files', args.train_files)
        mlflow.log_param('eval_files', args.eval_files)
        mlflow.log_param('num_epochs', args.num_epochs)
        mlflow.log_param('batch_size', args.batch_size)
        mlflow.log_param('learning_rate', args.learning_rate)
        mlflow.log_param('train_samples', num_train_examples)
        mlflow.log_param('eval_samples', num_eval_examples)
        mlflow.log_param('eval_steps', args.eval_steps)
        mlflow.log_param('steps_per_epoch',
                         int(num_train_examples / args.batch_size))
        # Add metrics
        _mlflow_log_metrics(metrics, 'loss')
        _mlflow_log_metrics(metrics, 'acc')
        _mlflow_log_metrics(metrics, 'val_loss')
        _mlflow_log_metrics(metrics, 'val_acc')
        _mlflow_log_metrics(metrics, 'lr')
        # Export SavedModel
        model_local_path = os.path.join(args.job_dir, run_id, 'model')
        tf.keras.experimental.export_saved_model(keras_model, model_local_path)
        # Define artifacts.
        logging.info('Model exported to: {}'.format(model_local_path))
        # MLflow workaround since is unable to read GCS path.
        # https://github.com/mlflow/mlflow/issues/1765
        if model_local_path.startswith('gs://'):
github mlflow / mlflow / examples / quickstart / mlflow_tracking.py View on Github external
import os
from random import random, randint

from mlflow import log_metric, log_param, log_artifacts

if __name__ == "__main__":
    print("Running mlflow_tracking.py")

    log_param("param1", randint(0, 100))

    log_metric("foo", random())
    log_metric("foo", random() + 1)
    log_metric("foo", random() + 2)

    if not os.path.exists("outputs"):
        os.makedirs("outputs")
    with open("outputs/test.txt", "w") as f:
        f.write("hello world!")

    log_artifacts("outputs")
github produvia / ai-platform / tasks / computer-vision / image-generation / style-gan / generate.py View on Github external
except:
                weights = pickle.load(open(os.path.join(pretrained_dir,
                                                        'karras2019stylegan-{}-{}x{}.pkl'.format(dataset, resolution, resolution))))
            convert(weights,
                    generator=generator,
                    g_out_file=g_out_file,
                    discriminator=discriminator,
                    d_out_file=d_out_file)

            print('finished conversion')

    with mlflow.start_run():

        for key, value in vars(args).items():
            mlflow.log_param(key, value)

        generator.load_state_dict(torch.load(g_out_file))

        device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

        generator.eval()
        generator.to(device)
        torch.manual_seed(random_seed)

        resolution_log2 = int(np.log2(trained_resolution))

        latents = torch.randn(nrow * ncol, 512, device=device)
        with torch.no_grad():
            # alpha is 1
            imgs = generator(latents, resolution_log2, alpha)
            imgs = (imgs.clamp(-1, 1) + 1) / 2.0
github produvia / ai-platform / tasks / methodology / word-embeddings / b1530656-6b32-4cf2-bf52-de9c7d178052 / word_embeddings.py View on Github external
# columns. So I am using only these two columns.
        df['title_text'] = df['title'] + df['text']
        df.drop(columns=['uuid', 'ord_in_thread', 'author', 'published',
                         'title', 'text', 'language', 'crawled', 'site_url',
                         'country', 'domain_rank', 'thread_title',
                         'spam_score', 'main_img_url', 'replies_count',
                         'participants_count', 'likes', 'comments', 'shares',
                         'type'], inplace=True)
        df.dropna(inplace=True)
        df.title_text = df.title_text.str.lower()

        # Turn a document into clean tokens.
        df['cleaned'] = df.title_text.apply(clean_doc)
        print(df.shape)

        mlflow.log_param("word1", word1)
        mlflow.log_param("word2", word2)
        mlflow.log_param("topn", topn)

        # Build the model using gensim.
        w2v = Word2Vec(df.cleaned, min_count=20, window=3,
                       size=300, negative=20)
        words = list(w2v.wv.vocab)
        mlflow.log_metric("vocabulary_size", len(words))

        # Explore the results like finding most similar words and similarity
        word1_most_similar = w2v.wv.most_similar(word1, topn=topn)
        print(word1_most_similar)

        word2_most_similar = w2v.wv.most_similar(word2, topn=topn)
        print(word2_most_similar)
github ThoughtWorksInc / ml-app-template / src / train.py View on Github external
# log training run to mlflow
    mlflow.set_tracking_uri(uri=f'http://{settings.MLFLOW_IP}:5000')
    if settings.CI == 'true':
        mlflow.set_experiment('CI')
    else:
        mlflow.set_experiment('dev')

    with mlflow.start_run() as run:
        # calculate evaluation metrics
        y_test_pred = model.predict(x_test)
        rmse = sqrt(metrics.mean_squared_error(y_true=y_test, y_pred=y_test_pred))
        r2_score = metrics.r2_score(y_true=y_test, y_pred=y_test_pred)

        # log hyperparameters to mlflow
        mlflow.log_param('n_estimators', N_ESTIMATORS)
        mlflow.log_param('max_depth', MAX_DEPTH)
        
        # log metrics to mlflow
        mlflow.log_metric("rmse_validation_data", rmse)
        mlflow.log_metric("r2_score_validation_data", r2_score)
else:
    print('Not logging training run because MLFlow tracking server is not up, or its URL is not set in train.py')
github mlflow / mlflow / examples / h2o / random_forest.py View on Github external
def train_random_forest(ntrees):
    with mlflow.start_run():
        rf = H2ORandomForestEstimator(ntrees=ntrees)
        train_cols = [n for n in wine.col_names if n != "quality"]
        rf.train(train_cols, "quality", training_frame=train, validation_frame=test)

        mlflow.log_param("ntrees", ntrees)

        mlflow.log_metric("rmse", rf.rmse())
        mlflow.log_metric("r2", rf.r2())
        mlflow.log_metric("mae", rf.mae())

        mlflow.h2o.log_model(rf, "model")