How to use the kfp.aws.use_aws_secret function in kfp

To help you get started, we’ve selected a few kfp examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Hydrospheredata / kubeflow-workshop / pipeline.py View on Github external
],
    ).apply(use_aws_secret())
    test.after(deploy_to_stage)

    test.set_retry(3)

    # 6. Deploy model to production application
    deploy_to_prod = dsl.ContainerOp(
        name="deploy_to_prod",
        image="hydrosphere/mnist-pipeline-deploy-to-prod:v1",  # <-- Replace with correct docker image
        arguments=[
            "--model-version", release.outputs["model_version"],
            "--model-name", model_name,
            "--hydrosphere-address", hydrosphere_address
        ],
    ).apply(use_aws_secret())
    deploy_to_prod.after(test)
github Hydrospheredata / kubeflow-workshop / workflows / origin.py View on Github external
def apply_config_map_and_aws_secret(op):
    return (op 
        .apply(use_config_map(configmap))
        .apply(use_aws_secret())
        .set_image_pull_policy('Always')
    )
github Hydrospheredata / kubeflow-workshop / workflows / sampling.py View on Github external
"--hydrosphere-address", hydrosphere_address,
            "--model-name", model_name,
        ],
    ).apply(use_aws_secret())

    # 8. Test the model via stage application
    test = dsl.ContainerOp(
        name="test",
        image=f"hydrosphere/mnist-pipeline-test:{tag}",  # <-- Replace with correct docker image
        arguments=[
            "--data-path", sample.outputs["data_path"],
            "--hydrosphere-address", hydrosphere_address,
            "--application-name", deploy_model_to_stage.outputs["application_name"],
            "--acceptable-accuracy", acceptable_accuracy,
        ],
    ).apply(use_aws_secret())
    test.set_retry(3)

    # 9. Deploy model to production application
    deploy_model_to_prod = dsl.ContainerOp(
        name="deploy_to_prod",
        image=f"hydrosphere/mnist-pipeline-deploy:{tag}",  # <-- Replace with correct docker image
        file_outputs={
            "application_name": "/application_name.txt",
            "application_link": "/application_link.txt"
        },
        arguments=[
            "--model-version", release_model.outputs["model_version"],
            "--model-name", model_name,
            "--application-name-postfix", "_app", 
            "--hydrosphere-address", hydrosphere_address
        ],
github Hydrospheredata / kubeflow-workshop / workflows / subsample.py View on Github external
def apply_config_map_and_aws_secret(op):
    return (op 
        .apply(use_config_map(configmap))
        .apply(use_aws_secret())
        .set_image_pull_policy('Always')
    )
github kubeflow / pipelines / samples / contrib / aws-samples / ground_truth_pipeline_demo / mini-image-classification-pipeline.py View on Github external
job_name=ground_truth_validation_job_name,
        label_attribute_name=ground_truth_label_attribute_name,
        manifest_location=ground_truth_validation_manifest_location,
        output_location=ground_truth_output_location,
        task_type=ground_truth_task_type,
        worker_type=ground_truth_worker_type,
        workteam_arn=workteam.output,
        label_category_config=ground_truth_label_category_config,
        ui_template=ground_truth_ui_template,
        title=ground_truth_title,
        description=ground_truth_description,
        num_workers_per_object=ground_truth_num_workers_per_object,
        time_limit=ground_truth_time_limit,
        task_availibility=ground_truth_task_availibility,
        max_concurrent_tasks=ground_truth_max_concurrent_tasks
    ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))

    training = sagemaker_train_op(
        region=region,
        algorithm_name=training_algorithm_name,
        training_input_mode=training_input_mode,
        hyperparameters=training_hyperparameters,
        channels=training_channels,
        data_location_1=ground_truth_train.outputs['output_manifest_location'],
        data_location_2=ground_truth_validation.outputs['output_manifest_location'],
        instance_type=training_instance_type,
        instance_count=training_instance_count,
        volume_size=training_volume_size,
        max_run_time=training_max_run_time,
        model_artifact_path=training_output_location,
        role=role_arn
    ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))
github kubeflow / pipelines / samples / contrib / aws-samples / mnist-kmeans-sagemaker / kmeans-hpo-pipeline.py View on Github external
max_num_jobs=max_num_jobs,
        max_parallel_jobs=max_parallel_jobs,
        resource_encryption_key=resource_encryption_key,
        max_run_time=max_run_time,
        vpc_security_group_ids=vpc_security_group_ids,
        vpc_subnets=vpc_subnets,
        network_isolation=network_isolation,
        traffic_encryption=traffic_encryption,
        warm_start_type=warm_start_type,
        parent_hpo_jobs=parent_hpo_jobs,
        spot_instance=spot_instance,
        max_wait_time=max_wait_time,
        checkpoint_config=checkpoint_config,
        tags=tags,
        role=role_arn,
    ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))
github Hydrospheredata / kubeflow-workshop / pipeline_recurring.py View on Github external
"--model-name", model_name,
        ],
    ).apply(use_aws_secret())
    deploy_to_stage.after(release)

    # 5. Test the model via stage application
    test = dsl.ContainerOp(
        name="test",
        image="hydrosphere/mnist-pipeline-test:v1",  # <-- Replace with correct docker image
        arguments=[
            "--data-path", sample.outputs["data_path"],
            "--hydrosphere-address", hydrosphere_address,
            "--acceptable-accuracy", acceptable_accuracy,
            "--model-name", model_name, 
        ],
    ).apply(use_aws_secret())
    test.after(deploy_to_stage)

    test.set_retry(3)

    # 6. Deploy model to production application
    deploy_to_prod = dsl.ContainerOp(
        name="deploy_to_prod",
        image="hydrosphere/mnist-pipeline-deploy-to-prod:v1",  # <-- Replace with correct docker image
        arguments=[
            "--model-version", release.outputs["model_version"],
            "--model-name", model_name,
            "--hydrosphere-address", hydrosphere_address
        ],
    ).apply(use_aws_secret())
    deploy_to_prod.after(test)
github kubeflow / pipelines / samples / contrib / aws-samples / titanic-survival-prediction / titanic-survival-prediction.py View on Github external
region=region,
        name=cluster_name,
        instance_type=instance_type,
        instance_count=instance_count,
        log_s3_uri=log_s3_uri,
    ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))

    training_and_prediction = emr_submit_spark_job_op(
        region=region,
        jobflow_id=create_cluster.output,
        job_name=job_name,
        jar_path=jar_path,
        main_class=main_class,
        input=input,
        output=output
    ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))

    delete_cluster = emr_delete_cluster_op(
      region=region,
      jobflow_id=create_cluster.output,
      dependent=training_and_prediction.outputs['job_id']
    ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))
github kubeflow / pipelines / samples / contrib / aws-samples / mnist-kmeans-sagemaker / mnist-classification-pipeline.py View on Github external
traffic_encryption=traffic_encryption,
        spot_instance=train_spot_instance,
        max_wait_time=train_max_wait_time,
        checkpoint_config=train_checkpoint_config,
        role=role_arn,
    ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))

    create_model = sagemaker_model_op(
        region=region,
        endpoint_url=endpoint_url,
        model_name=training.outputs['job_name'],
        image=training.outputs['training_image'],
        model_artifact_url=training.outputs['model_artifact_url'],
        network_isolation=network_isolation,
        role=role_arn
    ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))

    prediction = sagemaker_deploy_op(
        region=region,
        endpoint_url=endpoint_url,
        model_name_1=create_model.output,
    ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))

    batch_transform = sagemaker_batch_transform_op(
        region=region,
        endpoint_url=endpoint_url,
        model_name=create_model.output,
        instance_type=batch_transform_instance_type,
        instance_count=instance_count,
        max_concurrent=batch_transform_max_concurrent,
        max_payload=batch_transform_max_payload,
        batch_strategy=batch_strategy,