How to use kfp - 10 common examples

To help you get started, we’ve selected a few kfp examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github kubeflow / pipelines / test / sample-test / check_notebook_results.py View on Github external
try:
            with open(DEFAULT_CONFIG, 'r') as f:
                raw_args = yaml.safe_load(f)
        except yaml.YAMLError as yamlerr:
            raise RuntimeError('Illegal default config:{}'.format(yamlerr))
        except OSError as ose:
            raise FileExistsError('Default config not found:{}'.format(ose))
        else:
            test_timeout = raw_args['test_timeout']

        if self._run_pipeline:
            experiment = self._experiment_name
            ###### Initialization ######
            host = 'ml-pipeline.%s.svc.cluster.local:8888' % self._namespace
            client = Client(host=host)

            ###### Get experiments ######
            experiment_id = client.get_experiment(experiment_name=experiment).id

            ###### Get runs ######
            list_runs_response = client.list_runs(page_size=RUN_LIST_PAGE_SIZE,
                                                  experiment_id=experiment_id)

            ###### Check all runs ######
            for run in list_runs_response.runs:
                run_id = run.id
                response = client.wait_for_run_completion(run_id, test_timeout)
                succ = (response.run.status.lower()=='succeeded')
                utils.add_junit_test(test_cases, 'job completion',
                                     succ, 'waiting for job completion failure')
github kubeflow / pipelines / samples / core / preemptible_tpu_gpu / preemptible_tpu_gpu.py View on Github external
@dsl.pipeline(
    name='pipeline flip coin', description='shows how to use dsl.Condition.')
def flipcoin():
  flip = FlipCoinOp().apply(gcp.use_preemptible_nodepool()).set_gpu_limit(
      1, 'nvidia').set_retry(5)
github amygdala / code-snippets / ml / kubeflow-pipelines / sbtb / example_pipelines / bw.py View on Github external
workdir='%s/%s' % (working_dir, dsl.RUN_ID_PLACEHOLDER),
    epochs=epochs, steps_per_epoch=steps_per_epoch,
    load_checkpoint=load_checkpoint
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))


  serve = serve_op(
    model_path=train.outputs['train_output_path'],
    model_name='bikesw'
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))

  train.set_gpu_limit(1)

if __name__ == '__main__':
  import kfp.compiler as compiler
  compiler.Compiler().compile(bikes_weather, __file__ + '.tar.gz')
github kubeflow / pipelines / samples / contrib / aws-samples / ground_truth_pipeline_demo / mini-image-classification-pipeline.py View on Github external
#!/usr/bin/env python3

import kfp
from kfp import components
from kfp import dsl
from kfp.aws import use_aws_secret

sagemaker_workteam_op = components.load_component_from_file('../../../../components/aws/sagemaker/workteam/component.yaml')
sagemaker_gt_op = components.load_component_from_file('../../../../components/aws/sagemaker/ground_truth/component.yaml')
sagemaker_train_op = components.load_component_from_file('../../../../components/aws/sagemaker/train/component.yaml')

@dsl.pipeline(
    name='Ground Truth image classification test pipeline',
    description='SageMaker Ground Truth job test'
)
def ground_truth_test(region='us-west-2',
    team_name='ground-truth-demo-team',
    team_description='Team for mini image classification labeling job',
    user_pool='',
    user_groups='',
    client_id='',
    ground_truth_train_job_name='mini-image-classification-demo-train',
    ground_truth_validation_job_name='mini-image-classification-demo-validation',
    ground_truth_label_attribute_name='category',
github kubeflow / pipelines / samples / contrib / aws-samples / mnist-kmeans-sagemaker / kmeans-hpo-pipeline.py View on Github external
#!/usr/bin/env python3

import kfp
from kfp import components
from kfp import dsl
from kfp.aws import use_aws_secret

sagemaker_hpo_op = components.load_component_from_file('../../../../components/aws/sagemaker/hyperparameter_tuning/component.yaml')

@dsl.pipeline(
    name='MNIST HPO test pipeline',
    description='SageMaker hyperparameter tuning job test'
)
def hpo_test(region='us-west-2',
    hpo_job_name='HPO-kmeans-sample',
    image='',
    algorithm_name='K-Means',
    training_input_mode='File',
    metric_definitions='{}',
    strategy='Bayesian',
    metric_name='test:msd',
    metric_type='Minimize',
    early_stopping_type='Off',
    static_parameters='{"k": "10", "feature_dim": "784"}',
github kubeflow / pipelines / samples / core / kubeflow_training_classification / kubeflow_training_classification.py View on Github external
training.set_gpu_limit(1)

    prediction = dataflow_tf_predict_op(
        data_file_pattern=evaluation,
        schema=schema,
        target_column=target,
        model=training.output,
        run_mode=predict_mode,
        gcp_project=project,
        predictions_dir=output_template
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))

    confusion_matrix = confusion_matrix_op(
        predictions=prediction.output,
        output_dir=output_template
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))
github amygdala / code-snippets / ml / kubeflow-pipelines / samples / kubeflow-tf / gh_summ.py View on Github external
value='gs://aju-dev-demos-codelabs/kubecon/model_output_tbase.bak2019000'),
  deploy_webapp: dsl.PipelineParam = dsl.PipelineParam(name='deploy-webapp', value='true'),
  data_dir: dsl.PipelineParam = dsl.PipelineParam(
      name='data-dir', value='gs://aju-dev-demos-codelabs/kubecon/t2t_data_gh_all/')):


  train = dsl.ContainerOp(
      name='train',
      image='gcr.io/google-samples/ml-pipeline-t2ttrain',
      arguments=["--data-dir", data_dir,
          "--checkpoint-dir", checkpoint_dir,
          "--model-dir", '%s/%s/model_output' % (working_dir, '{{workflow.name}}'),
          "--train-steps", train_steps, "--deploy-webapp", deploy_webapp],
      file_outputs={'output': '/tmp/output'}

      ).apply(gcp.use_gcp_secret('user-gcp-sa'))

  serve = dsl.ContainerOp(
      name='serve',
      image='gcr.io/google-samples/ml-pipeline-kubeflow-tfserve',
      arguments=["--model_name", 'ghsumm-%s' % ('{{workflow.name}}',),
          "--model_path", '%s/%s/model_output/export' % (working_dir, '{{workflow.name}}')
          ]
      )
  serve.after(train)
  train.set_gpu_limit(4)

  with dsl.Condition(train.output == 'true'):
    webapp = dsl.ContainerOp(
        name='webapp',
        image='gcr.io/google-samples/ml-pipeline-webapp-launcher',
        arguments=["--model_name", 'ghsumm-%s' % ('{{workflow.name}}',),
github kubeflow / pipelines / samples / core / kubeflow_training_classification / kubeflow_training_classification.py View on Github external
training_output_dir=output_template
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))

    if use_gpu:
        training.image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer-gpu:0517114dc2b365a4a6d95424af6157ead774eff3',
        training.set_gpu_limit(1)

    prediction = dataflow_tf_predict_op(
        data_file_pattern=evaluation,
        schema=schema,
        target_column=target,
        model=training.output,
        run_mode=predict_mode,
        gcp_project=project,
        predictions_dir=output_template
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))

    confusion_matrix = confusion_matrix_op(
        predictions=prediction.output,
        output_dir=output_template
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))
github kubeflow / pipelines / samples / contrib / aws-samples / ground_truth_pipeline_demo / mini-image-classification-pipeline.py View on Github external
#!/usr/bin/env python3

import kfp
from kfp import components
from kfp import dsl
from kfp.aws import use_aws_secret

sagemaker_workteam_op = components.load_component_from_file('../../../../components/aws/sagemaker/workteam/component.yaml')
sagemaker_gt_op = components.load_component_from_file('../../../../components/aws/sagemaker/ground_truth/component.yaml')
sagemaker_train_op = components.load_component_from_file('../../../../components/aws/sagemaker/train/component.yaml')

@dsl.pipeline(
    name='Ground Truth image classification test pipeline',
    description='SageMaker Ground Truth job test'
)
def ground_truth_test(region='us-west-2',
    team_name='ground-truth-demo-team',
    team_description='Team for mini image classification labeling job',
    user_pool='',
    user_groups='',
    client_id='',
    ground_truth_train_job_name='mini-image-classification-demo-train',
    ground_truth_validation_job_name='mini-image-classification-demo-validation',
    ground_truth_label_attribute_name='category',
    ground_truth_train_manifest_location='s3://your-bucket-name/mini-image-classification/ground-truth-demo/train.manifest',
    ground_truth_validation_manifest_location='s3://your-bucket-name/mini-image-classification/ground-truth-demo/validation.manifest',
github amygdala / code-snippets / ml / kubeflow-pipelines / sbtb / example_pipelines / bw.py View on Github external
def bikes_weather(  #pylint: disable=unused-argument
  working_dir: GCSPath = 'gs://YOUR_GCS_DIR_HERE',
  data_dir: GCSPath = 'gs://aju-dev-demos-codelabs/bikes_weather/',
  epochs: 'Integer' = 1,
  steps_per_epoch: 'Integer' = -1 ,  # if -1, don't override normal calcs based on dataset size
  load_checkpoint: String = ''
  ):


  train = train_op(
    data_dir=data_dir,
    workdir='%s/%s' % (working_dir, dsl.RUN_ID_PLACEHOLDER),
    epochs=epochs, steps_per_epoch=steps_per_epoch,
    load_checkpoint=load_checkpoint
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))


  serve = serve_op(
    model_path=train.outputs['train_output_path'],
    model_name='bikesw'
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))

  train.set_gpu_limit(1)