How to use the kubernetes.client.V1Volume function in kubernetes

To help you get started, we’ve selected a few kubernetes examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jupyterhub / binderhub / binderhub / build.py View on Github external
def submit(self):
        """Submit a image spec to openshift's s2i and wait for completion """
        volume_mounts = [
            client.V1VolumeMount(mount_path="/var/run/docker.sock", name="docker-socket")
        ]
        docker_socket_path = urlparse(self.docker_host).path
        volumes = [client.V1Volume(
            name="docker-socket",
            host_path=client.V1HostPathVolumeSource(path=docker_socket_path, type='Socket')
        )]

        if self.push_secret:
            volume_mounts.append(client.V1VolumeMount(mount_path="/root/.docker", name='docker-push-secret'))
            volumes.append(client.V1Volume(
                name='docker-push-secret',
                secret=client.V1SecretVolumeSource(secret_name=self.push_secret)
            ))

        env = []
        if self.git_credentials:
            env.append(client.V1EnvVar(name='GIT_CREDENTIAL_ENV', value=self.git_credentials))

        self.pod = client.V1Pod(
github NervanaSystems / coach / rl_coach / orchestrators / kubernetes_orchestrator.py View on Github external
image=trainer_params.image,
                command=trainer_params.command,
                args=trainer_params.arguments,
                image_pull_policy='Always',
                volume_mounts=[k8sclient.V1VolumeMount(
                    name='nfs-pvc',
                    mount_path=trainer_params.checkpoint_dir
                )],
                stdin=True,
                tty=True
            )
            template = k8sclient.V1PodTemplateSpec(
                metadata=k8sclient.V1ObjectMeta(labels={'app': name}),
                spec=k8sclient.V1PodSpec(
                    containers=[container],
                    volumes=[k8sclient.V1Volume(
                        name="nfs-pvc",
                        persistent_volume_claim=self.nfs_pvc
                    )],
                    restart_policy='Never'
                ),
            )
        elif self.params.data_store_params.store_type == "s3":
            container = k8sclient.V1Container(
                name=name,
                image=trainer_params.image,
                command=trainer_params.command,
                args=trainer_params.arguments,
                image_pull_policy='Always',
                env=[k8sclient.V1EnvVar("ACCESS_KEY_ID", self.s3_access_key),
                     k8sclient.V1EnvVar("SECRET_ACCESS_KEY", self.s3_secret_key)],
                stdin=True,
github cvisionai / tator / main / consumers.py View on Github external
node_selector = {'gpuWorker': 'yes'}
        resources = kube_client.V1ResourceRequirements(
            limits={'nvidia.com/gpu': 1},
        )
    else:
        node_selector = {'cpuWorker': 'yes'}
        resources = None
    mount = kube_client.V1VolumeMount(
        name='media-pv-claim',
        mount_path='/work',
        sub_path=uid,
    )
    pvc = kube_client.V1PersistentVolumeClaimVolumeSource(
        claim_name='media-pv-claim'
    )
    volume = kube_client.V1Volume(
        name='media-pv-claim',
        persistent_volume_claim=pvc,
    )
    work_env = kube_client.V1EnvVar(
        name='TATOR_WORK_DIR',
        value='/work',
    )
    container = kube_client.V1Container(
        name=container_name,
        image=(image_name + ':' + image_tag),
        command=command,
        args=args,
        image_pull_policy='Always',
        volume_mounts=[mount,] + other_mounts,
        env=[work_env,] + other_envs,
        resources=resources,
github mlrun / mlrun / mlrun / k8s_utils.py View on Github external
def mount_secret(self, name, path='/secret', items=None):
        self.add_volume(client.V1Volume(
            name=name,
            secret=client.V1SecretVolumeSource(
                secret_name=name,
                items=items,
                )),
            mount_path=path)
github dragonchain / dragonchain / dragonchain / job_processor / job_processor.py View on Github external
if retry > 5:
        # Re-enqueue?
        _log.error("Could not launch job after 5 attempts.")
        raise RuntimeError("Failure to launch job after 5 attempts")

    _log.info("Launching kubernetes job")
    try:
        volume_mounts = [
            kubernetes.client.V1VolumeMount(name="dockersock", mount_path="/var/run/docker.sock"),
            kubernetes.client.V1VolumeMount(name="faas", mount_path="/etc/openfaas-secret", read_only=True),
            kubernetes.client.V1VolumeMount(name="secrets", mount_path=SECRET_LOCATION[: SECRET_LOCATION.rfind("/")], read_only=True),
        ]
        volumes = [
            kubernetes.client.V1Volume(name="dockersock", host_path=kubernetes.client.V1HostPathVolumeSource(path="/var/run/docker.sock")),
            kubernetes.client.V1Volume(name="faas", secret=kubernetes.client.V1SecretVolumeSource(secret_name="openfaas-auth")),  # nosec
            kubernetes.client.V1Volume(name="secrets", secret=kubernetes.client.V1SecretVolumeSource(secret_name=f"d-{INTERNAL_ID}-secrets")),
        ]
        if STORAGE_TYPE == "disk":
            volume_mounts.append(kubernetes.client.V1VolumeMount(name="main-storage", mount_path=STORAGE_LOCATION))
            volumes.append(
                kubernetes.client.V1Volume(
                    name="main-storage",
                    persistent_volume_claim=kubernetes.client.V1PersistentVolumeClaimVolumeSource(claim_name=f"{DEPLOYMENT_NAME}-main-storage"),
                )
            )
        annotations = {}
        if IAM_ROLE:
            annotations["iam.amazonaws.com/role"] = IAM_ROLE

        resp = _kube.create_namespaced_job(
            namespace=NAMESPACE,
            body=kubernetes.client.V1Job(
github labdave / CloudConductor / System / Platform / Kubernetes / KubernetesJob.py View on Github external
cpu_request_max = self.nodepool_info['max_cpu'] - self.cpu_reserve
        mem_request_max = self.nodepool_info['max_mem'] - self.mem_reserve

        # define resource limits/requests
        resource_def = client.V1ResourceRequirements(
            limits={'cpu': cpu_request_max, 'memory': str(mem_request_max)+'G'},
            requests={'cpu': cpu_request_max*.8, 'memory': str(mem_request_max-1)+'G'}
        )

        # place the job in the appropriate node pool
        node_label_dict = {'poolName': str(self.node_label)}

        # build volumes
        volumes.append(
            client.V1Volume(
                name=self.volume_name,
                persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
                    claim_name=self.pvc_name
                )
            )
        )

        # incorporate configured secrets
        if self.gcp_secret_configured:
            volume_mounts.append(
                client.V1VolumeMount(
                    mount_path="/etc/cloud_conductor/gcp.json",
                    sub_path="gcp.json",
                    name="secret-volume",
                    read_only=True
                )
github kubeflow / pipelines / samples / contrib / nvidia-resnet / pipeline / src / pipeline.py View on Github external
op_dict = {}

    op_dict['preprocess'] = PreprocessOp(
        'preprocess', raw_data_dir, processed_data_dir)

    op_dict['train'] = TrainOp(
        'train', op_dict['preprocess'].output, model_dir, model_name, model_version, epochs)

    op_dict['deploy_inference_server'] = InferenceServerLauncherOp(
        'deploy_inference_server', op_dict['train'].output, trtserver_name)

    op_dict['deploy_webapp'] = WebappLauncherOp(
        'deploy_webapp', op_dict['deploy_inference_server'].output, model_name, model_version, webapp_prefix, webapp_port)

    for _, container_op in op_dict.items():
        container_op.add_volume(k8s_client.V1Volume(
            host_path=k8s_client.V1HostPathVolumeSource(
                path=persistent_volume_path),
            name=persistent_volume_name))
        container_op.add_volume_mount(k8s_client.V1VolumeMount(
            mount_path=persistent_volume_path,
            name=persistent_volume_name))
github Yelp / paasta / paasta_tools / kubernetes_tools.py View on Github external
for docker_volume in docker_volumes
        }
        for name, docker_volume in unique_docker_volumes.items():
            pod_volumes.append(
                V1Volume(
                    host_path=V1HostPathVolumeSource(path=docker_volume["hostPath"]),
                    name=name,
                )
            )
        unique_aws_ebs_volumes = {
            self.get_aws_ebs_volume_name(aws_ebs_volume): aws_ebs_volume
            for aws_ebs_volume in aws_ebs_volumes
        }
        for name, aws_ebs_volume in unique_aws_ebs_volumes.items():
            pod_volumes.append(
                V1Volume(
                    aws_elastic_block_store=V1AWSElasticBlockStoreVolumeSource(
                        volume_id=aws_ebs_volume["volume_id"],
                        fs_type=aws_ebs_volume.get("fs_type"),
                        partition=aws_ebs_volume.get("partition"),
                        # k8s wants RW volume even if it's later mounted RO
                        read_only=False,
                    ),
                    name=name,
                )
            )
        return pod_volumes
github funcx-faas / funcX / funcx / executor / parsl / providers / kubernetes / kube.py View on Github external
name=job_name,
                image=job_image,
                ports=[client.V1ContainerPort(container_port=port)],
                volume_mounts=volume_mounts,
                command=['/bin/bash'],
                args=launch_args,
                env=[environment_vars])
        # Create a secret to enable pulling images from secure repositories
        secret = None
        if self.secret:
            secret = client.V1LocalObjectReference(name=self.secret)

        # Create list of volumes from (pvc, mount) tuples
        volume_defs = []
        for volume in volumes:
            volume_defs.append(client.V1Volume(name=volume[0],
                                               persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
                                                   claim_name=volume[0])))

        # Create and configurate a spec section
        template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(labels={"app": job_name}),
            spec=client.V1PodSpec(containers=[container],
                                  image_pull_secrets=[secret],
                                  volumes=volume_defs
                                  ))

        # Create the specification of deployment
        spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,
                                                      template=template)

        # Instantiate the deployment object
github hyperledger / cello / src / agent / kubernetes-agent / src / utils / client.py View on Github external
parameters = {}
            if host_path:
                host_path = client.V1HostPathVolumeSource(path=host_path)
                parameters.update({"host_path": host_path})
            if empty_dir:
                empty_dir = client.V1EmptyDirVolumeSource(**empty_dir)
                parameters.update({"empty_dir": empty_dir})
            persistent_volume_claim = volume.get("pvc", None)
            if persistent_volume_claim:
                persistent_volume_claim = client.V1PersistentVolumeClaimVolumeSource(
                    claim_name=persistent_volume_claim
                )
                parameters.update(
                    {"persistent_volume_claim": persistent_volume_claim}
                )
            volumes.append(client.V1Volume(name=volume_name, **parameters))
        initial_container_pods = self._generate_container_pods(
            initial_containers
        )
        container_pods = self._generate_container_pods(containers)
        pod_spec = client.V1PodSpec(
            init_containers=initial_container_pods,
            containers=container_pods,
            volumes=volumes,
            restart_policy=restart_policy,
        )
        spec_metadata = client.V1ObjectMeta(labels=labels)
        template_spec = client.V1PodTemplateSpec(
            metadata=spec_metadata, spec=pod_spec
        )

        LOG.info("template spec %s", template_spec)