How to use the kfp.components.load_component_from_file function in kfp

To help you get started, we’ve selected a few kfp examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github kubeflow / pipelines / samples / contrib / aws-samples / mnist-kmeans-sagemaker / kmeans-hpo-pipeline.py View on Github external
#!/usr/bin/env python3

import kfp
from kfp import components
from kfp import dsl
from kfp.aws import use_aws_secret

sagemaker_hpo_op = components.load_component_from_file('../../../../components/aws/sagemaker/hyperparameter_tuning/component.yaml')

@dsl.pipeline(
    name='MNIST HPO test pipeline',
    description='SageMaker hyperparameter tuning job test'
)
def hpo_test(region='us-west-2',
    hpo_job_name='HPO-kmeans-sample',
    image='',
    algorithm_name='K-Means',
    training_input_mode='File',
    metric_definitions='{}',
    strategy='Bayesian',
    metric_name='test:msd',
    metric_type='Minimize',
    early_stopping_type='Off',
    static_parameters='{"k": "10", "feature_dim": "784"}',
github kubeflow / pipelines / samples / contrib / aws-samples / ground_truth_pipeline_demo / mini-image-classification-pipeline.py View on Github external
#!/usr/bin/env python3

import kfp
from kfp import components
from kfp import dsl
from kfp.aws import use_aws_secret

sagemaker_workteam_op = components.load_component_from_file('../../../../components/aws/sagemaker/workteam/component.yaml')
sagemaker_gt_op = components.load_component_from_file('../../../../components/aws/sagemaker/ground_truth/component.yaml')
sagemaker_train_op = components.load_component_from_file('../../../../components/aws/sagemaker/train/component.yaml')

@dsl.pipeline(
    name='Ground Truth image classification test pipeline',
    description='SageMaker Ground Truth job test'
)
def ground_truth_test(region='us-west-2',
    team_name='ground-truth-demo-team',
    team_description='Team for mini image classification labeling job',
    user_pool='',
    user_groups='',
    client_id='',
    ground_truth_train_job_name='mini-image-classification-demo-train',
    ground_truth_validation_job_name='mini-image-classification-demo-validation',
    ground_truth_label_attribute_name='category',
    ground_truth_train_manifest_location='s3://your-bucket-name/mini-image-classification/ground-truth-demo/train.manifest',
    ground_truth_validation_manifest_location='s3://your-bucket-name/mini-image-classification/ground-truth-demo/validation.manifest',
github kubeflow / pipelines / samples / contrib / aws-samples / titanic-survival-prediction / titanic-survival-prediction.py View on Github external
#!/usr/bin/env python3

import kfp
from kfp import components
from kfp import dsl
from kfp import gcp
from kfp.aws import use_aws_secret

emr_create_cluster_op     = components.load_component_from_file('../../../../components/aws/emr/create_cluster/component.yaml')
emr_submit_spark_job_op = components.load_component_from_file('../../../../components/aws/emr/submit_spark_job/component.yaml')
emr_delete_cluster_op     = components.load_component_from_file('../../../../components/aws/emr/delete_cluster/component.yaml')

@dsl.pipeline(
  name='Titanic Suvival Prediction Pipeline',
  description='Predict survival on the Titanic'
)
def titanic_suvival_prediction(region='us-west-2',
    log_s3_uri="s3://kubeflow-pipeline-data/emr/titanic/logs",
    cluster_name="emr-cluster",
    job_name='spark-ml-trainner',
    input='s3://kubeflow-pipeline-data/emr/titanic/train.csv',
    output='s3://kubeflow-pipeline-data/emr/titanic/output',
    jar_path='s3://kubeflow-pipeline-data/emr/titanic/titanic-survivors-prediction_2.11-1.0.jar',
    main_class='com.amazonaws.emr.titanic.Titanic',
    instance_type="m4.xlarge",
    instance_count="3"
    ):
github kubeflow / pipelines / samples / contrib / aws-samples / titanic-survival-prediction / titanic-survival-prediction.py View on Github external
#!/usr/bin/env python3

import kfp
from kfp import components
from kfp import dsl
from kfp import gcp
from kfp.aws import use_aws_secret

emr_create_cluster_op     = components.load_component_from_file('../../../../components/aws/emr/create_cluster/component.yaml')
emr_submit_spark_job_op = components.load_component_from_file('../../../../components/aws/emr/submit_spark_job/component.yaml')
emr_delete_cluster_op     = components.load_component_from_file('../../../../components/aws/emr/delete_cluster/component.yaml')

@dsl.pipeline(
  name='Titanic Suvival Prediction Pipeline',
  description='Predict survival on the Titanic'
)
def titanic_suvival_prediction(region='us-west-2',
    log_s3_uri="s3://kubeflow-pipeline-data/emr/titanic/logs",
    cluster_name="emr-cluster",
    job_name='spark-ml-trainner',
    input='s3://kubeflow-pipeline-data/emr/titanic/train.csv',
    output='s3://kubeflow-pipeline-data/emr/titanic/output',
    jar_path='s3://kubeflow-pipeline-data/emr/titanic/titanic-survivors-prediction_2.11-1.0.jar',
    main_class='com.amazonaws.emr.titanic.Titanic',
    instance_type="m4.xlarge",
github kubeflow / pipelines / samples / contrib / aws-samples / titanic-survival-prediction / titanic-survival-prediction.py View on Github external
#!/usr/bin/env python3

import kfp
from kfp import components
from kfp import dsl
from kfp import gcp
from kfp.aws import use_aws_secret

emr_create_cluster_op     = components.load_component_from_file('../../../../components/aws/emr/create_cluster/component.yaml')
emr_submit_spark_job_op = components.load_component_from_file('../../../../components/aws/emr/submit_spark_job/component.yaml')
emr_delete_cluster_op     = components.load_component_from_file('../../../../components/aws/emr/delete_cluster/component.yaml')

@dsl.pipeline(
  name='Titanic Suvival Prediction Pipeline',
  description='Predict survival on the Titanic'
)
def titanic_suvival_prediction(region='us-west-2',
    log_s3_uri="s3://kubeflow-pipeline-data/emr/titanic/logs",
    cluster_name="emr-cluster",
    job_name='spark-ml-trainner',
    input='s3://kubeflow-pipeline-data/emr/titanic/train.csv',
    output='s3://kubeflow-pipeline-data/emr/titanic/output',
    jar_path='s3://kubeflow-pipeline-data/emr/titanic/titanic-survivors-prediction_2.11-1.0.jar',
    main_class='com.amazonaws.emr.titanic.Titanic',
    instance_type="m4.xlarge",
    instance_count="3"
github kubeflow / pipelines / samples / contrib / aws-samples / mnist-kmeans-sagemaker / mnist-classification-pipeline.py View on Github external
#!/usr/bin/env python3

import kfp
from kfp import components
from kfp import dsl
from kfp.aws import use_aws_secret

sagemaker_hpo_op = components.load_component_from_file('../../../../components/aws/sagemaker/hyperparameter_tuning/component.yaml')
sagemaker_train_op = components.load_component_from_file('../../../../components/aws/sagemaker/train/component.yaml')
sagemaker_model_op = components.load_component_from_file('../../../../components/aws/sagemaker/model/component.yaml')
sagemaker_deploy_op = components.load_component_from_file('../../../../components/aws/sagemaker/deploy/component.yaml')
sagemaker_batch_transform_op = components.load_component_from_file('../../../../components/aws/sagemaker/batch_transform/component.yaml')

@dsl.pipeline(
    name='MNIST Classification pipeline',
    description='MNIST Classification using KMEANS in SageMaker'
)
def mnist_classification(region='us-west-2',
    image='174872318107.dkr.ecr.us-west-2.amazonaws.com/kmeans:1',
    training_input_mode='File',
    hpo_strategy='Bayesian',
    hpo_metric_name='test:msd',
    hpo_metric_type='Minimize',
    hpo_early_stopping_type='Off',
github kubeflow / pipelines / samples / contrib / aws-samples / mnist-kmeans-sagemaker / mnist-classification-pipeline.py View on Github external
#!/usr/bin/env python3

import kfp
from kfp import components
from kfp import dsl
from kfp.aws import use_aws_secret

sagemaker_hpo_op = components.load_component_from_file('../../../../components/aws/sagemaker/hyperparameter_tuning/component.yaml')
sagemaker_train_op = components.load_component_from_file('../../../../components/aws/sagemaker/train/component.yaml')
sagemaker_model_op = components.load_component_from_file('../../../../components/aws/sagemaker/model/component.yaml')
sagemaker_deploy_op = components.load_component_from_file('../../../../components/aws/sagemaker/deploy/component.yaml')
sagemaker_batch_transform_op = components.load_component_from_file('../../../../components/aws/sagemaker/batch_transform/component.yaml')

@dsl.pipeline(
    name='MNIST Classification pipeline',
    description='MNIST Classification using KMEANS in SageMaker'
)
def mnist_classification(region='us-west-2',
    image='174872318107.dkr.ecr.us-west-2.amazonaws.com/kmeans:1',
    training_input_mode='File',
    hpo_strategy='Bayesian',
    hpo_metric_name='test:msd',
    hpo_metric_type='Minimize',
    hpo_early_stopping_type='Off',
    hpo_static_parameters='{"k": "10", "feature_dim": "784"}',
    hpo_integer_parameters='[{"Name": "mini_batch_size", "MinValue": "500", "MaxValue": "600"}, {"Name": "extra_center_factor", "MinValue": "10", "MaxValue": "20"}]',
    hpo_continuous_parameters='[]',
    hpo_categorical_parameters='[{"Name": "init_method", "Values": ["random", "kmeans++"]}]',
github GoogleCloudPlatform / training-data-analyst / courses / machine_learning / deepdive2 / building_production_ml_systems / solutions / pipelines / create_pipeline.py View on Github external
def pipeline(gcs_bucket_name=''):


    bq2gcs_op = comp.load_component_from_file(BQ2GCS_YAML)
    bq2gcs = bq2gcs_op(
        input_bucket=gcs_bucket_name,
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))


    trainjob_op = comp.load_component_from_file(TRAINJOB_YAML)
    trainjob = trainjob_op(
        input_bucket=gcs_bucket_name,
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))


    deploymodel_op = comp.load_component_from_file(DEPLOYMODEL_YAML)
    deploymodel = deploymodel_op(
        input_bucket=gcs_bucket_name,
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))


    trainjob.after(bq2gcs)
    deploymodel.after(trainjob)