How to use the kubernetes.client.V1Container function in kubernetes

To help you get started, we’ve selected a few kubernetes examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github hail-is / hail / batch / batch / batch.py View on Github external
def _keep_alive_container(self):  # pylint: disable=R0201
        sh_expression = f"""
        set -ex
        python3 -m batch.keep_alive_sidecar
        """

        return kube.client.V1Container(
            image=BATCH_IMAGE,
            name='keep-alive',
            command=['/bin/sh', '-c', sh_expression],
            resources=kube.client.V1ResourceRequirements(
                requests={'cpu': '1m'}),
            ports=[kube.client.V1ContainerPort(container_port=5001)])
github funcx-faas / funcX / funcx / providers / kubernetes / kube.py View on Github external
environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA")

        launch_args = ["-c", "{0}".format(cmd_string)]

        volume_mounts = []
        # Create mount paths for the volumes
        for volume in volumes:
            volume_mounts.append(client.V1VolumeMount(mount_path=volume[1],
                                                      name=volume[0]))
        resources = client.V1ResourceRequirements(limits={'cpu': str(self.max_cpu),
                                                          'memory': self.max_mem},
                                                  requests={'cpu': str(self.init_cpu),
                                                            'memory': self.init_mem}
                                                  )
        # Configure Pod template container
        container = client.V1Container(
            name=pod_name,
            image=image,
            resources=resources,
            ports=[client.V1ContainerPort(container_port=port)],
            volume_mounts=volume_mounts,
            command=['/bin/bash'],
            args=launch_args,
            env=[environment_vars],
            security_context=security_context)

        # Create a secret to enable pulling images from secure repositories
        secret = None
        if self.secret:
            secret = client.V1LocalObjectReference(name=self.secret)

        # Create list of volumes from (pvc, mount) tuples
github polyaxon / polyaxon / polyaxon / polypod / templates / resource_manager.py View on Github external
persistence_data=persistence_data,
            outputs_refs_jobs=outputs_refs_jobs,
            outputs_refs_experiments=outputs_refs_experiments,
            ephemeral_token=ephemeral_token
        )
        env_vars += get_resources_env_vars(resources=resources)
        env_vars += get_kv_env_vars(self._get_kv_env_vars(None))

        # Env from config_map and secret refs
        env_from = get_pod_env_from(secret_refs=secret_refs, config_map_refs=config_map_refs)

        def get_ports():
            _ports = to_list(ports) if ports else []
            return [client.V1ContainerPort(container_port=port) for port in _ports] or None

        return client.V1Container(name=self.job_container_name,
                                  image=self.job_docker_image,
                                  command=command,
                                  args=args,
                                  ports=get_ports(),
                                  env=env_vars,
                                  env_from=env_from,
                                  resources=get_resources(resources),
                                  volume_mounts=volume_mounts)
github KatharaFramework / Kathara / bin / python / k8s / machine_deployer.py View on Github external
# postStart lifecycle hook is launched asynchronously by k8s master when the main container is Ready
    # On Ready state, the pod has volumes and network interfaces up, so this hook is used
    # to execute custom commands coming from .startup file and "exec" option
    lifecycle = None
    if machine["startup_commands"] and len(machine["startup_commands"]) > 0:
        post_start = client.V1Handler(
            _exec=client.V1ExecAction(
                command=["/bin/bash", "-c", "; ".join(machine["startup_commands"])]
            )
        )

        lifecycle = client.V1Lifecycle(post_start=post_start)

    # Main Container definition
    kathara_container = client.V1Container(
        name="kathara",
        image="%s:latest" % machine["image"],
        lifecycle=lifecycle,
        stdin=True,
        image_pull_policy="IfNotPresent",
        ports=container_ports,
        resources=resources,
        volume_mounts=volume_mounts,
        security_context=security_context
    )

    # Create networks annotation
    pod_annotations = dict()

    network_interfaces = []
    for count, machine_interface in enumerate(machine["interfaces"]):
github openstack / tacker / tacker / vnfm / infra_drivers / kubernetes / k8s / translate_outputs.py View on Github external
def init_containers(self, container_props, limit_resource, name):
        list_env_var = self.init_envs(container_props, name)
        container_name = self.pre_process_name(container_props.name)
        list_container_port = list()
        if container_props.ports:
            for container_port in container_props.ports:
                port = int(container_port)
                cport = client.V1ContainerPort(container_port=port)
                list_container_port.append(cport)
        container = client.V1Container(
            name=container_name,
            image=container_props.image,
            ports=list_container_port,
            resources=limit_resource,
            command=container_props.command,
            args=container_props.args,
            env=list_env_var,
            image_pull_policy="IfNotPresent")
        return container
github falcosecurity / kubernetes-response-engine / playbooks / playbooks / infrastructure.py View on Github external
def _build_sysdig_capture_job_body(self, job_name, node_name,
                                       duration_in_seconds, s3_bucket,
                                       aws_access_key_id, aws_secret_access_key):
        return client.V1Job(
            metadata=client.V1ObjectMeta(
                name=job_name
            ),
            spec=client.V1JobSpec(
                template=client.V1PodTemplateSpec(
                    metadata=client.V1ObjectMeta(
                        name=job_name
                    ),
                    spec=client.V1PodSpec(
                        containers=[client.V1Container(
                            name='capturer',
                            image='sysdig/capturer',
                            image_pull_policy='Always',
                            security_context=client.V1SecurityContext(
                                privileged=True
                            ),
                            env=[
                                client.V1EnvVar(
                                    name='AWS_S3_BUCKET',
                                    value=s3_bucket
                                ),
                                client.V1EnvVar(
                                    name='CAPTURE_DURATION',
                                    value=str(duration_in_seconds)
                                ),
                                client.V1EnvVar(
github Parsl / libsubmit / libsubmit / providers / kubernetes / kube.py View on Github external
launch_args = ["-c", "{0}; /app/deploy.sh;".format(cmd_string)]
        print(launch_args)

        # Configureate Pod template container
        container = None
        if security_context:
            container = client.V1Container(
                name=job_name,
                image=job_image,
                ports=[client.V1ContainerPort(container_port=port)],
                command=['/bin/bash'],
                args=launch_args,
                env=[environment_vars],
                security_context=security_context)
        else:
            container = client.V1Container(
                name=job_name,
                image=job_image,
                ports=[client.V1ContainerPort(container_port=port)],
                command=['/bin/bash'],
                args=launch_args,
                env=[environment_vars])
        # Create a secret to enable pulling images from secure repositories
        secret = None
        if self.secret:
            secret = client.V1LocalObjectReference(name=self.secret)

        # Create and configurate a spec section
        template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(labels={"app": job_name}),
            spec=client.V1PodSpec(containers=[container], image_pull_secrets=[secret]))
github ml-tooling / ml-hub / docker-res / kubernetes / jupyterhub_config.py View on Github external
api_token = service.pop('apiToken', None)
    if api_token:
        service['api_token'] = api_token
    c.JupyterHub.services.append(service)


set_config_if_not_none(c.Spawner, 'cmd', 'singleuser.cmd')
set_config_if_not_none(c.Spawner, 'default_url', 'singleuser.defaultUrl')

cloud_metadata = get_config('singleuser.cloudMetadata', {})

if not cloud_metadata.get('enabled', False):
    # Use iptables to block access to cloud metadata by default
    network_tools_image_name = get_config('singleuser.networkTools.image.name')
    network_tools_image_tag = get_config('singleuser.networkTools.image.tag')
    ip_block_container = client.V1Container(
        name="block-cloud-metadata",
        image=f"{network_tools_image_name}:{network_tools_image_tag}",
        command=[
            'iptables',
            '-A', 'OUTPUT',
            '-d', cloud_metadata.get('ip', '169.254.169.254'),
            '-j', 'DROP'
        ],
        security_context=client.V1SecurityContext(
            privileged=True,
            run_as_user=0,
            capabilities=client.V1Capabilities(add=['NET_ADMIN'])
        )
    )

    c.KubeSpawner.init_containers.append(ip_block_container)
github dmlc / dmlc-core / tracker / dmlc_tracker / kubernetes.py View on Github external
def create_job_manifest(envs, commands, name, image, template_file):
    if template_file is not None:
        with open( template_file ) as f:
            job=yaml.safe_load(f)
            job["metadata"]["name"]=name
            job["spec"]["template"]["metadata"]["labels"]["app"]=name
            job["spec"]["template"]["spec"]["containers"][0]["image"]=image
            job["spec"]["template"]["spec"]["containers"][0]["command"]=commands
            job["spec"]["template"]["spec"]["containers"][0]["name"]=name
            job["spec"]["template"]["spec"]["containers"][0]["env"]=envs
            job["spec"]["template"]["spec"]["containers"][0]["command"]=commands
    else:
        container=client.V1Container(image=image, command=commands, name=name, env=envs)
        pod_temp=client.V1PodTemplateSpec(
                spec=client.V1PodSpec(restart_policy="OnFailure", containers=[container]),
                metadata=client.V1ObjectMeta(name=name, labels={"app":name})
                )
        job=client.V1Job(
                api_version="batch/v1",
                kind="Job",
                spec=client.V1JobSpec(template=pod_temp),
                metadata=client.V1ObjectMeta(name=name)
                )
    return job
github kbst / mongodb / mongodb_operator / mongodb_operator / kubernetes_resources.py View on Github external
data_volume = client.V1Volume(
        name='mongo-data',
        empty_dir=client.V1EmptyDirVolumeSource())
    statefulset.spec.template.spec.volumes = [
        ca_volume, tls_volume, data_volume]

    # Init container
    tls_init_ca_volumemount = client.V1VolumeMount(
        name='mongo-ca',
        read_only=True,
        mount_path='/etc/ssl/mongod-ca')
    tls_init_mongodb_tls_volumemount = client.V1VolumeMount(
        name='mongo-tls',
        read_only=False,
        mount_path='/etc/ssl/mongod')
    tls_init_container = client.V1Container(
        name="cert-init",
        image="quay.io/kubestack/mongodb-init:latest",
        volume_mounts=[tls_init_ca_volumemount,
                       tls_init_mongodb_tls_volumemount],
        env=[
            client.V1EnvVar(
                name='METADATA_NAME',
                value_from=client.V1EnvVarSource(
                    field_ref=client.V1ObjectFieldSelector(
                        api_version='v1',
                        field_path='metadata.name'))),
            client.V1EnvVar(
                name='NAMESPACE',
                value_from=client.V1EnvVarSource(
                    field_ref=client.V1ObjectFieldSelector(
                        api_version='v1',