How to use the kfp.dsl.get_pipeline_conf function in kfp

To help you get started, we’ve selected a few kfp examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Hydrospheredata / kubeflow-workshop / workflows / origin.py View on Github external
drift_detector_steps: str
        Amount of steps, during which an autoencoder will be trained.
    drift_detector_batch_size: str
        Batch size, used for training an autoencoder.
    model_name: str
        Name of the classifier, which will be used for deployment.
    model_drift_detector_name: str
        Name of the autoencoder, which will be used for deployment.
    acceptable_accuracy: str
        Accuracy level indicating the final acceptable performance of the model 
        in the evaluation step, which will let let model to be either deployed 
        to production or cause workflow execution to fail. 
    """

    # Configure all steps to have ConfigMap and use aws secret
    dsl.get_pipeline_conf().add_op_transformer(apply_config_map_and_aws_secret)

    download = dsl.ContainerOp(
        name="download",
        image=f"{registry}/mnist-pipeline-download:{tag}",
        file_outputs={
            "output_data_path": "/output_data_path",
            "logs_path": "/logs_path",
        },
        arguments=["--output-data-path", f"s3://{bucket}/data"],
    )

    train_drift_detector = dsl.ContainerOp(
        name="train_drift_detector",
        image=f"{registry}/mnist-pipeline-train-drift-detector:{tag}",
        file_outputs={
            "logs_path": "/logs_path",
github xuw10 / kubeflow-tfx-workshop / kubeflow-pipelines / minio / minio.py View on Github external
tag: str = "latest", 
        namespace: str = "kubeflow", 
        bucket: str = "mybucket"
):

    # configures artifact location
    pipeline_artifact_location = dsl.ArtifactLocation.s3(
        bucket=bucket,
        endpoint="minio-service.%s:9000" % namespace,  # parameterize minio-service endpoint
        insecure=True,
        access_key_secret=V1SecretKeySelector(name="mlpipeline-minio-artifact", key="accesskey"),
        secret_key_secret={"name": "mlpipeline-minio-artifact", "key": "secretkey"},  # accepts dict also
    )

    # set pipeline level artifact location
    dsl.get_pipeline_conf().set_artifact_location(pipeline_artifact_location)

    # artifacts in this op are stored to endpoint `minio-service.:9000`
    op = dsl.ContainerOp(name="foo", image="busybox:%s" % tag)
github xuw10 / kubeflow-tfx-workshop / kubeflow-pipelines / basic / artifact_location.py View on Github external
def custom_artifact_location(
    tag: str, namespace: str = "kubeflow", bucket: str = "mybucket"
):

    # configures artifact location
    pipeline_artifact_location = dsl.ArtifactLocation.s3(
        bucket=bucket,
        endpoint="minio-service.%s:9000" % namespace,  # parameterize minio-service endpoint
        insecure=True,
        access_key_secret=V1SecretKeySelector(name="minio", key="accesskey"),
        secret_key_secret={"name": "minio", "key": "secretkey"},  # accepts dict also
    )

    # set pipeline level artifact location
    dsl.get_pipeline_conf().set_artifact_location(pipeline_artifact_location)

    # artifacts in this op are stored to endpoint `minio-service.:9000`
    op = dsl.ContainerOp(name="foo", image="busybox:%s" % tag)
github kubeflow / pipelines / samples / core / pipeline_transformers / pipeline_transformers.py View on Github external
def transform_pipeline():
  op1 = print_op('hey, what are you up to?')
  op2 = print_op('train my model.')
  dsl.get_pipeline_conf().add_op_transformer(add_annotation)
github Hydrospheredata / kubeflow-workshop / workflows / subsample.py View on Github external
drift_detector_steps: str
        Amount of steps, during which an autoencoder will be trained.
    drift_detector_batch_size: str
        Batch size, used for training an autoencoder.
    model_name: str
        Name of the classifier, which will be used for deployment.
    model_drift_detector_name: str
        Name of the autoencoder, which will be used for deployment.
    acceptable_accuracy: str
        Accuracy level indicating the final acceptable performance of the model 
        in the evaluation step, which will let let model to be either deployed 
        to production or cause workflow execution to fail. 
    """

    # Configure all steps to have ConfigMap and use aws secret
    dsl.get_pipeline_conf().add_op_transformer(apply_config_map_and_aws_secret)

    subsample = dsl.ContainerOp(
        name="subsample",
        image=f"{registry}/mnist-pipeline-subsample:{tag}",
        file_outputs={
            "output_data_path": "/output_data_path",
            "logs_path": "/logs_path",
        },
        arguments=[
            "--output-data-path", "s3://workshop-hydrosphere/mnist/data",
            "--application-name", target_application_name,
            "--limit", sample_limit,
            "--train-part", train_part, 
            "--validation-part", validation_part
        ],
    )
github kubeflow / pipelines / samples / core / artifact_location / artifact_location.py View on Github external
tag: str = '1.31.0',
    namespace: str = "kubeflow",
    bucket: str = "mlpipeline"
):

    # configures artifact location
    pipeline_artifact_location = dsl.ArtifactLocation.s3(
        bucket=bucket,
        endpoint="minio-service.%s:9000" % namespace,  # parameterize minio-service endpoint
        insecure=True,
        access_key_secret=V1SecretKeySelector(name=secret_name, key="accesskey"),
        secret_key_secret={"name": secret_name, "key": "secretkey"},  # accepts dict also
    )

    # set pipeline level artifact location
    dsl.get_pipeline_conf().set_artifact_location(pipeline_artifact_location)

    # artifacts in this op are stored to endpoint `minio-service.:9000`
    op = dsl.ContainerOp(name="foo", image="busybox:%s" % tag,
                         command=['sh', '-c', 'echo hello > /tmp/output.txt'],
                         file_outputs={'output': '/tmp/output.txt'})