How to use the kfp.compiler function in kfp

To help you get started, we’ve selected a few kfp examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github amygdala / code-snippets / ml / kubeflow-pipelines / sbtb / example_pipelines / bw.py View on Github external
workdir='%s/%s' % (working_dir, dsl.RUN_ID_PLACEHOLDER),
    epochs=epochs, steps_per_epoch=steps_per_epoch,
    load_checkpoint=load_checkpoint
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))


  serve = serve_op(
    model_path=train.outputs['train_output_path'],
    model_name='bikesw'
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))

  train.set_gpu_limit(1)

if __name__ == '__main__':
  import kfp.compiler as compiler
  compiler.Compiler().compile(bikes_weather, __file__ + '.tar.gz')
github kubeflow / pipelines / samples / core / helloworld / hello_world.py View on Github external
return dsl.ContainerOp(
        name='echo',
        image='library/bash:4.4.23',
        command=['sh', '-c'],
        arguments=['echo "hello world"']
    )

@dsl.pipeline(
    name='My first pipeline',
    description='A hello world pipeline.'
)
def hello_world_pipeline():
    echo_task = echo_op()

if __name__ == '__main__':
    kfp.compiler.Compiler().compile(hello_world_pipeline, __file__ + '.yaml')
github kubeflow / pipelines / samples / contrib / volume_ops / volumeop_sequential.py View on Github external
command=["sh", "-c"],
        arguments=["cp /data/file1 /data/file2"],
        pvolumes={"/data": step1.pvolume}
    )

    step3 = dsl.ContainerOp(
        name="step3",
        image="library/bash:4.4.23",
        command=["cat", "/mnt/file1", "/mnt/file2"],
        pvolumes={"/mnt": step2.pvolume}
    )


if __name__ == "__main__":
    import kfp.compiler as compiler
    compiler.Compiler().compile(volumeop_sequential, __file__ + ".tar.gz")
github Hydrospheredata / kubeflow-workshop / workflows / origin.py View on Github external
help="Which bucket to use, when uploading steps outputs", default="workshop-hydrosphere-mnist")
    parser.add_argument('-t', '--tag', 
        help="Which tag of image to use, when compiling pipeline", default="v3")
    parser.add_argument('-r', '--registry', 
        help="Which docker registry to use, when compiling pipeline", default="hydrosphere")
    parser.add_argument('-c', '--configmap', 
        help="Which ConfigMap to use, when executing pipeline", default="mnist-workflow")
    args = parser.parse_args()
    
    bucket = args.bucket
    tag = args.tag
    registry = args.registry
    configmap = args.configmap

    # Compile pipeline
    compiler.Compiler().compile(pipeline_definition, "origin.tar.gz")
github aronchick / kubeflow-and-mlops / code / pipeline.py View on Github external
op.container.set_image_pull_policy("Always")
        op.add_volume(
            k8s_client.V1Volume(
                name='azure',
                persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
                    claim_name='azure-managed-disk')
                )
            ).add_volume_mount(k8s_client.V1VolumeMount(
                mount_path='/mnt/azure', 
                name='azure')
            )


if __name__ == '__main__':
   import kfp.compiler as compiler
   compiler.Compiler().compile(tacosandburritos_train, __file__ + '.tar.gz')
github kubeflow / pipelines / samples / contrib / seldon / mnist_tf.py View on Github external
}    
""")
    seldon_serving_json = seldon_serving_json_template.substitute({ 'dockerreposerving': str(docker_repo_serving),'dockertagserving': str(docker_tag_serving),'modelpvc': modelvolop.outputs["name"]})

    seldon_deployment = json.loads(seldon_serving_json)

    serve = dsl.ResourceOp(
        name='serve',
        k8s_resource=seldon_deployment,
        success_condition='status.state == Available'
    ).after(build_serving)


if __name__ == "__main__":
    import kfp.compiler as compiler
    compiler.Compiler().compile(mnist_tf, __file__ + ".tar.gz")
github kubeflow / pipelines / samples / core / pipeline_transformers / pipeline_transformers.py View on Github external
def add_annotation(op):
  op.add_pod_annotation(name='hobby', value='football')
  return op

@dsl.pipeline(
    name='Pipeline transformer',
    description='The pipeline shows how to apply functions to all ops in the pipeline by pipeline transformers'
)
def transform_pipeline():
  op1 = print_op('hey, what are you up to?')
  op2 = print_op('train my model.')
  dsl.get_pipeline_conf().add_op_transformer(add_annotation)

if __name__ == '__main__':
  kfp.compiler.Compiler().compile(transform_pipeline, __file__ + '.yaml')
github mlrun / mlrun / mlrun / projects / project.py View on Github external
:param artifact_path:
                       target path/url for workflow artifacts, the string
                       '{{workflow.uid}}' will be replaced by workflow id
        :param ttl     pipeline ttl in secs (after that the pods will be removed)
        """
        if not name or name not in self._workflows:
            raise ValueError('workflow {} not found'.format(name))

        workflow_path, code, _ = self._get_wf_file(name)
        pipeline = _create_pipeline(
            self, workflow_path, self._function_objects, secrets=self._secrets
        )

        artifact_path = artifact_path or self.artifact_path
        conf = new_pipe_meta(artifact_path, ttl=ttl)
        compiler.Compiler().compile(pipeline, target, pipeline_conf=conf)
        if code:
            remove(workflow_path)
github GoogleCloudPlatform / training-data-analyst / courses / machine_learning / deepdive2 / building_production_ml_systems / solutions / pipelines / create_pipeline.py View on Github external
input_bucket=gcs_bucket_name,
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))


    deploymodel_op = comp.load_component_from_file(DEPLOYMODEL_YAML)
    deploymodel = deploymodel_op(
        input_bucket=gcs_bucket_name,
    ).apply(gcp.use_gcp_secret('user-gcp-sa'))


    trainjob.after(bq2gcs)
    deploymodel.after(trainjob)


if __name__ == '__main__':
    compiler.Compiler().compile(pipeline, PIPELINE_TAR, type_check=False)
github kubeflow / pipelines / samples / contrib / aws-samples / titanic-survival-prediction / titanic-survival-prediction.py View on Github external
jobflow_id=create_cluster.output,
        job_name=job_name,
        jar_path=jar_path,
        main_class=main_class,
        input=input,
        output=output
    ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))

    delete_cluster = emr_delete_cluster_op(
      region=region,
      jobflow_id=create_cluster.output,
      dependent=training_and_prediction.outputs['job_id']
    ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))

if __name__ == '__main__':
    kfp.compiler.Compiler().compile(titanic_suvival_prediction, __file__ + '.zip')