How to use the kubernetes.client.V1EnvVar function in kubernetes

To help you get started, we’ve selected a few kubernetes examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github man-group / pytest-plugins / pytest-server-fixtures / pytest_server_fixtures / serverclass / kubernetes.py View on Github external
def _get_pod_spec(self):
        container = k8sclient.V1Container(
            name='fixture',
            image=self._image,
            command=self._get_cmd(),
            env=[k8sclient.V1EnvVar(name=k, value=v) for k, v in self._env.iteritems()],
        )

        return k8sclient.V1PodSpec(
            containers=[container]
        )
github cvisionai / tator / main / consumers.py View on Github external
value=content['media_list'],
                ),
                kube_client.V1EnvVar(
                    name='TATOR_PROJECT_ID',
                    value=str(algorithm.project.id),
                ),
                kube_client.V1EnvVar(
                    name='TATOR_AUTH_TOKEN',
                    value=token,
                ),
            ]

        # If the algorithm has arguments supply it to the pipeline
        if algorithm.arguments:
            env_list.append(
                kube_client.V1EnvVar(
                    name='TATOR_PIPELINE_ARGS',
                    value=json.dumps(algorithm.arguments))
            )

        # Create setup job.
        create_job(
            container_name=marshal_container_name,
            command=['python',],
            args=['/setup.py'],
            image_name=marshal_container_image,
            image_tag='latest',
            cred_name=marshal_cred_name,
            uid=run_uid,
            metadata=setup_meta,
            batch_v1=batch_v1,
            other_envs=env_list,
github hyperledger / cello / src / agent / kubernetes-agent / src / utils / client.py View on Github external
command_args = container.get("command_args", [])
            volume_mounts = container.get("volume_mounts")
            volume_mounts = (
                [
                    client.V1VolumeMount(
                        mount_path=volume_mount.get("path"),
                        name=volume_mount.get("name"),
                    )
                    for volume_mount in volume_mounts
                ]
                if volume_mounts
                else None
            )

            environments = [
                client.V1EnvVar(name=env.get("name"), value=env.get("value"))
                for env in environments
            ]
            ports = [
                client.V1ContainerPort(container_port=port) for port in ports
            ]
            container_parameter = {
                "name": container.get("name"),
                "image": container.get("image"),
                "image_pull_policy": "IfNotPresent",
            }
            if environments is not None and len(environments) > 0:
                container_parameter.update({"env": environments})
            if command is not None and len(command) > 0:
                container_parameter.update({"command": command})
            if command_args is not None and len(command_args) > 0:
                container_parameter.update({"args": command_args})
github kbst / mongodb / mongodb_operator / mongodb_operator / kubernetes_resources.py View on Github external
'--sslPEMKeyFile', '/etc/ssl/mongod/mongod.pem',
            '--sslCAFile', '/etc/ssl/mongod/ca.pem',
            '--bind_ip', '127.0.0.1,$(POD_IP)'],
        image='mongo:3.6.4',
        ports=[mongodb_port],
        volume_mounts=[mongodb_tls_volumemount, mongodb_data_volumemount],
        resources=mongodb_resources)

    # Metrics container
    metrics_port = client.V1ContainerPort(
        name='metrics', container_port=9001, protocol='TCP')
    metrics_resources = client.V1ResourceRequirements(
        limits={'cpu': '50m', 'memory': '16Mi'},
        requests={'cpu': '50m', 'memory': '16Mi'})
    metrics_secret_name = '{}-monitoring-credentials'.format(name)
    metrics_username_env_var = client.V1EnvVar(
        name='MONGODB_MONITORING_USERNAME',
        value_from=client.V1EnvVarSource(
            secret_key_ref=client.V1SecretKeySelector(
                name=metrics_secret_name,
                key='username')))
    metrics_password_env_var = client.V1EnvVar(
        name='MONGODB_MONITORING_PASSWORD',
        value_from=client.V1EnvVarSource(
            secret_key_ref=client.V1SecretKeySelector(
                name=metrics_secret_name,
                key='password')))
    metrics_container = client.V1Container(
        name='prometheus-exporter',
        image='quay.io/kubestack/prometheus-mongodb-exporter:latest',
        command=[
            '/bin/sh',
github labdave / CloudConductor / System / Platform / Kubernetes / KubernetesJob.py View on Github external
read_only=True
                )
            )
            volumes.append(
                client.V1Volume(
                    name="secret-volume",
                    secret=client.V1SecretVolumeSource(
                        secret_name="cloud-conductor-config",
                        items=[client.V1KeyToPath(key="gcp_json", path="gcp.json")]
                    )
                )
            )
            env_variables.append(client.V1EnvVar(name='GOOGLE_APPLICATION_CREDENTIALS', value='/etc/cloud_conductor/gcp.json'))
            env_variables.append(client.V1EnvVar(name='RCLONE_CONFIG_GS_TYPE', value='google cloud storage'))
            env_variables.append(client.V1EnvVar(name='RCLONE_CONFIG_GS_SERVICE_ACCOUNT_FILE', value='$GOOGLE_APPLICATION_CREDENTIALS'))
            env_variables.append(client.V1EnvVar(name='RCLONE_CONFIG_GS_OBJECT_ACL', value='projectPrivate'))
            env_variables.append(client.V1EnvVar(name='RCLONE_CONFIG_GS_BUCKET_ACL', value='projectPrivate'))

        if self.aws_secret_configured:
            env_variables.append(client.V1EnvVar(name='AWS_ACCESS_KEY_ID', value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(name='cloud-conductor-config', key='aws_id'))))
            env_variables.append(client.V1EnvVar(name='AWS_SECRET_ACCESS_KEY', value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(name='cloud-conductor-config', key='aws_access'))))
            env_variables.append(client.V1EnvVar(name='RCLONE_CONFIG_S3_TYPE', value='s3'))
            env_variables.append(client.V1EnvVar(name='RCLONE_CONFIG_S3_ACCESS_KEY_ID', value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(name='cloud-conductor-config', key='aws_id'))))
            env_variables.append(client.V1EnvVar(name='RCLONE_CONFIG_S3_SECRET_ACCESS_KEY', value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(name='cloud-conductor-config', key='aws_access'))))

        storage_image = 'gcr.io/cloud-builders/gsutil'
        storage_tasks = ['mkdir_', 'grant_']
        entrypoint = ["/bin/sh", "-c"]

        for k, v in self.processes.items():
            # if the process is for storage (i.e. mkdir, etc.)
            if any(x in k for x in storage_tasks) or not v['docker_image']:
github polyaxon / polyaxon / polyaxon / polypod / templates / env_vars.py View on Github external
def get_from_config_map(key_name, cm_key_name, config_map_ref_name=None):
    config_map_ref_name = config_map_ref_name or settings.POLYAXON_K8S_APP_CONFIG_NAME
    config_map_key_ref = client.V1ConfigMapKeySelector(name=config_map_ref_name, key=cm_key_name)
    value_from = client.V1EnvVarSource(config_map_key_ref=config_map_key_ref)
    return client.V1EnvVar(name=key_name, value_from=value_from)
github wylok / sparrow / module / k8s_resource.py View on Github external
host_path=client.V1HostPathVolumeSource(path=path,
                                                                                       type='DirectoryOrCreate')))
        if self.container_port:
            ports = [client.V1ContainerPort(container_port=int(port)) for port in self.container_port]
            liveness_probe = client.V1Probe(initial_delay_seconds=15,
                                            tcp_socket=client.V1TCPSocketAction(port=int(self.container_port[0])))
            readiness_probe = client.V1Probe(initial_delay_seconds=15,
                                             tcp_socket=client.V1TCPSocketAction(port=int(self.container_port[0])))
            if self.healthcheck:
                liveness_probe = client.V1Probe(initial_delay_seconds=15,
                                                http_get=client.V1HTTPGetAction(path=self.healthcheck,
                                                                                port=int(self.container_port[0])))
                readiness_probe = client.V1Probe(initial_delay_seconds=15,
                                                 http_get=client.V1HTTPGetAction(path=self.healthcheck,
                                                                                 port=int(self.container_port[0])))
        Env = [client.V1EnvVar(name='LANG', value='en_US.UTF-8'),
                 client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8'),
                 client.V1EnvVar(name='POD_NAME',value_from=client.V1EnvVarSource(
                     field_ref=client.V1ObjectFieldSelector(field_path='metadata.name'))),
                 client.V1EnvVar(name='POD_IP', value_from=client.V1EnvVarSource(
                     field_ref=client.V1ObjectFieldSelector(field_path='status.podIP'))),
                 ]
        container = client.V1Container(
            name=self.dm_name,
            image=self.image,
            ports=ports,
            image_pull_policy='Always',
            env=Env,
            resources=client.V1ResourceRequirements(limits=self.re_limits,
                                                    requests=self.re_requests),
            volume_mounts=volume_mounts
        )
github wylok / sparrow / module / k8s_resource.py View on Github external
if self.container_port:
            ports = [client.V1ContainerPort(container_port=int(port)) for port in self.container_port]
            liveness_probe = client.V1Probe(initial_delay_seconds=15,
                                            tcp_socket=client.V1TCPSocketAction(port=int(self.container_port[0])))
            readiness_probe = client.V1Probe(initial_delay_seconds=15,
                                             tcp_socket=client.V1TCPSocketAction(port=int(self.container_port[0])))
            if self.healthcheck:
                liveness_probe = client.V1Probe(initial_delay_seconds=15,
                                                http_get=client.V1HTTPGetAction(path=self.healthcheck,
                                                                                port=int(self.container_port[0])))
                readiness_probe = client.V1Probe(initial_delay_seconds=15,
                                                 http_get=client.V1HTTPGetAction(path=self.healthcheck,
                                                                                 port=int(self.container_port[0])))
        Env = [client.V1EnvVar(name='LANG', value='en_US.UTF-8'),
                 client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8'),
                 client.V1EnvVar(name='POD_NAME',value_from=client.V1EnvVarSource(
                     field_ref=client.V1ObjectFieldSelector(field_path='metadata.name'))),
                 client.V1EnvVar(name='POD_IP', value_from=client.V1EnvVarSource(
                     field_ref=client.V1ObjectFieldSelector(field_path='status.podIP'))),
                 ]
        container = client.V1Container(
            name=self.dm_name,
            image=self.image,
            ports=ports,
            image_pull_policy='Always',
            env=Env,
            resources=client.V1ResourceRequirements(limits=self.re_limits,
                                                    requests=self.re_requests),
            volume_mounts=volume_mounts
        )
        if liveness_probe and readiness_probe:
            container = client.V1Container(
github funcx-faas / funcX / funcx / executor / parsl / providers / kubernetes / kube.py View on Github external
- port (integer) : Container port
             - replicas : Number of replica containers to maintain
        Returns:
              - True: The deployment object to launch
        """

        # sorry, quick hack that doesn't pass this stuff through to test it works.
        # TODO it also doesn't only add what is set :(
        security_context = None
        if self.user_id and self.group_id:
            security_context = client.V1SecurityContext(run_as_group=self.group_id,
                                                        run_as_user=self.user_id,
                                                        run_as_non_root=self.run_as_non_root)

        # Create the enviornment variables and command to initiate IPP
        environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA")

        launch_args = ["-c", "{0}; /app/deploy.sh;".format(cmd_string)]

        volume_mounts = []
        # Create mount paths for the volumes
        for volume in volumes:
            volume_mounts.append(client.V1VolumeMount(mount_path=volume[1],
                                                      name=volume[0]))
        # Configureate Pod template container
        container = None
        if security_context:
            container = client.V1Container(
                name=job_name,
                image=job_image,
                ports=[client.V1ContainerPort(container_port=port)],
                volume_mounts=volume_mounts,