How to use the mlflow.start_run function in mlflow

To help you get started, we’ve selected a few mlflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mlflow / mlflow / tests / tensorflow_autolog / test_tensorflow_autolog.py View on Github external
def test_tf_keras_autolog_persists_manually_created_run(random_train_data, random_one_hot_labels,
                                                        fit_variant):
    mlflow.tensorflow.autolog()
    with mlflow.start_run() as run:
        data = random_train_data
        labels = random_one_hot_labels

        model = create_tf_keras_model()

        model.fit(data, labels, epochs=10)

        assert mlflow.active_run()
        assert mlflow.active_run().info.run_id == run.info.run_id
github mlflow / mlflow / tests / models / test_cli.py View on Github external
def test_prepare_env_passes(sk_model):
    if no_conda:
        pytest.skip("This test requires conda.")

    with TempDir(chdr=True):
        with mlflow.start_run() as active_run:
            mlflow.sklearn.log_model(sk_model, "model")
            model_uri = "runs:/{run_id}/model".format(run_id=active_run.info.run_id)

        # Test with no conda
        p = subprocess.Popen(["mlflow", "models", "prepare-env", "-m", model_uri,
                              "--no-conda"], stderr=subprocess.PIPE)
        assert p.wait() == 0

        # With conda
        p = subprocess.Popen(["mlflow", "models", "prepare-env", "-m", model_uri],
                             stderr=subprocess.PIPE)
        assert p.wait() == 0

        # Should be idempotent
        p = subprocess.Popen(["mlflow", "models", "prepare-env", "-m", model_uri],
                             stderr=subprocess.PIPE)
github mlflow / mlflow / tests / h2o / test_h2o_model_export.py View on Github external
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(
        h2o_iris_model):
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.h2o.log_model(h2o_model=h2o_iris_model.model, artifact_path=artifact_path)
        model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format(
            run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path))

    pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
    conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
    with open(conda_env_path, "r") as f:
        conda_env = yaml.safe_load(f)

    assert conda_env == mlflow.h2o.get_default_conda_env()
github mlflow / mlflow / tests / tensorflow / test_tensorflow2_model_export.py View on Github external
def test_log_model_without_specified_conda_env_uses_default_env_with_expected_dependencies(
        saved_tf_iris_model):
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.tensorflow.log_model(tf_saved_model_dir=saved_tf_iris_model.path,
                                    tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags,
                                    tf_signature_def_key=saved_tf_iris_model.signature_def_key,
                                    artifact_path=artifact_path,
                                    conda_env=None)
        model_uri = "runs:/{run_id}/{artifact_path}".format(
            run_id=mlflow.active_run().info.run_id,
            artifact_path=artifact_path)

    model_path = _download_artifact_from_uri(artifact_uri=model_uri)
    pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
    conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
    with open(conda_env_path, "r") as f:
        conda_env = yaml.safe_load(f)

    assert conda_env == mlflow.tensorflow.get_default_conda_env()
github mlflow / mlflow / tests / tensorflow / test_tensorflow2_model_export.py View on Github external
def test_log_and_load_model_persists_and_restores_model_successfully(saved_tf_iris_model):
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.tensorflow.log_model(tf_saved_model_dir=saved_tf_iris_model.path,
                                    tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags,
                                    tf_signature_def_key=saved_tf_iris_model.signature_def_key,
                                    artifact_path=artifact_path)
        model_uri = "runs:/{run_id}/{artifact_path}".format(
            run_id=mlflow.active_run().info.run_id,
            artifact_path=artifact_path)

    infer_fn = mlflow.tensorflow.load_model(model_uri=model_uri)
github mlflow / mlflow / tests / tracking / test_tracking.py View on Github external
def test_metric_timestamp(tracking_uri_mock):
    with mlflow.start_run() as active_run:
        mlflow.log_metric("name_1", 25)
        mlflow.log_metric("name_1", 30)
        run_id = active_run.info.run_uuid
    # Check that metric timestamps are between run start and finish
    client = mlflow.tracking.MlflowClient()
    history = client.get_metric_history(run_id, "name_1")
    finished_run = client.get_run(run_id)
    assert len(history) == 2
    assert all([
        m.timestamp >= finished_run.info.start_time and m.timestamp <= finished_run.info.end_time
        for m in history
    ])
github Azure / AIPlatform / end-to-end-solutions / Luna / src / Luna.Packages / luna-publish-utils / luna / mlflowLunaUtils.py View on Github external
def Init(self, luna_config, run_mode, args, userInput):
        super().Init(luna_config, run_mode, args, userInput)
        mlflow.set_tracking_uri('databricks')
        if not mlflow.active_run():
            with open(self._luna_config["mlflow"]["test_experiment"]) as file:
                test_exp = yaml.full_load(file)
                mlflow.start_run(experiment_id=test_exp["experiment_id"])
        
        self._logger = MLFlowLunaLogger()
github rameshjesswani / NLP- / mlflow-examples / sklearn-example / basic_example.py View on Github external
logger.exception(
            "Unable to download training & test CSV, check your internet connection. Error: %s", e)

    # save dataset for logging 
    data.to_csv(output_dir + "/winequality-red.csv", sep='\t', encoding='utf-8')

    # Split the data into training and test sets. (0.75, 0.25) split.
    train, test = train_test_split(data)

    # The predicted column is "quality" which is a scalar from [3, 9]
    train_x = train.drop(["quality"], axis=1)
    test_x = test.drop(["quality"], axis=1)
    train_y = train[["quality"]]
    test_y = test[["quality"]]

    with mlflow.start_run():

        # lr = LinearRegression()
        er = ElasticNet()

        er.fit(train_x, train_y)
        predicted_qualities = er.predict(test_x)

        (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)

        print("  RMSE: %s" % rmse)
        print("  MAE: %s" % mae)
        print("  R2: %s" % r2)

        mlflow.log_param("coefficients", er.coef_)
        mlflow.log_metric("rmse", rmse)
        mlflow.log_metric("r2", r2)
github PipelineAI / models / keras / mlflow-mnist / model / pipeline_train.py View on Github external
def run(epochs, batch_size):
#    tracking_uri = 'https://community.cloud.pipeline.ai'

    users_home = '/mnt/pipelineai/users' 
    experiment_base_path = '%s/experiments' % users_home
    tracking_uri='file://%s' % experiment_base_path
    mlflow.set_tracking_uri(tracking_uri)

    experiment_name = '%s-%s' % (os.getenv('PIPELINE_RESOURCE_NAME', 'mnist'), os.getenv('PIPELINE_TAG', int(1000 * time.time())))

    mlflow.set_experiment(experiment_name)
    
    with mlflow.start_run() as run:
        mlflow.log_param("epochs", str(epochs))
        mlflow.log_param("batch_size", str(batch_size))

        mnist = tf.keras.datasets.mnist

        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train, x_test = x_train / 255.0, x_test / 255.0

        model = tf.keras.models.Sequential([
          tf.keras.layers.Flatten(input_shape=(28, 28)),
          tf.keras.layers.Dense(512, activation=tf.nn.relu),
          tf.keras.layers.Dropout(0.2),
          tf.keras.layers.Dense(10, activation=tf.nn.softmax)
        ])

        model.compile(optimizer='adam',
github vonfeng / DPLink / codes / run.py View on Github external
experiments = service.list_experiments()
        for exp in experiments:
            if exp.name == experiment_name:
                experiment_ID = exp.experiment_id
                print("Experiment Exists!")
                break

    setproctitle.setproctitle('DPLink')

    thre = args.threshold
    rnn_unit = 'GRU'
    attn_unit = 'dot'
    test_pretrain = False  # test the effect of different pretrain degree, working with run_pretrain
    pre_path, rank_pre2, hit_pre2 = None, None, None
    for run_id in range(args.repeat):
        with mlflow.start_run(experiment_id=experiment_ID):
            archive_path = mlflow.get_artifact_uri()
            if run_id == 0:
                pre_path = archive_path
            else:
                if test_pretrain:
                    os.system("cp " + pre_path + "/SN-pre-" + str(run_id) + ".m " + archive_path + "/")
                else:
                    os.system("cp " + pre_path + "/SN-pre.m " + archive_path + "/")
            hidden_size = settings[args.data]["hidden_size"]
            loc_emb_size = settings[args.data]["loc_emb_size"]
            tim_emb_size = settings[args.data]["tim_emb_size"]
            dropout_p = settings[args.data]["dropout_p"]
            l2 = settings[args.data]["l2"]
            lr_match = settings[args.data]["lr_match"]
            if run_id == 0:
                loss_mode = "BCELoss"