Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def refresh(self):
"""Refresh the underlying Kubernetes Api Pod object."""
self.obj = client.CoreV1Api().read_namespaced_pod_status(
name=self.name,
namespace=self.namespace,
)
account=None,
namespace=namespace)
# service component
serviceComponent = "modelServer-service"
generate_command = [ks, "generate", "tf-serving-service", serviceComponent]
util.run(generate_command, cwd=app_dir)
ks_deploy(
app_dir,
serviceComponent,
params,
env=None,
account=None,
namespace=namespace)
core_api = k8s_client.CoreV1Api(api_client)
deploy = core_api.read_namespaced_service(args.deploy_name, args.namespace)
cluster_ip = deploy.spec.cluster_ip
if not cluster_ip:
raise ValueError("inception service wasn't assigned a cluster ip.")
util.wait_for_deployment(
api_client, namespace, args.deploy_name, timeout_minutes=10)
logging.info("Verified TF serving started.")
def test_create_pod_from_yaml(self):
"""
Should be able to create a pod.
"""
k8s_client = client.api_client.ApiClient(configuration=self.config)
utils.create_from_yaml(
k8s_client, self.path_prefix + "core-pod.yaml")
core_api = client.CoreV1Api(k8s_client)
pod = core_api.read_namespaced_pod(name="myapp-pod",
namespace="default")
self.assertIsNotNone(pod)
core_api.delete_namespaced_pod(
name="myapp-pod", namespace="default",
body={})
def main():
from dask_kubernetes import KubeCluster
from kubernetes import client
from kubernetes import config
config.load_incluster_config()
core = client.CoreV1Api()
cluster = KubeCluster()
cluster.adapt(minimum=0, maximum=MAXIMUM)
timeout = time.time() + 120
while True:
# Workers took too long to spawn
if time.time() > timeout:
sys.exit(1)
if len(cluster.scheduler.workers) > 0:
break
time.sleep(2)
import base64
import pymysql
import kubernetes as kube
import os
if 'BATCH_USE_KUBE_CONFIG' in os.environ:
kube.config.load_kube_config()
else:
kube.config.load_incluster_config()
k8s = kube.client.CoreV1Api()
SQL_HOST_DEF = os.environ.get('SQL_HOST')
class Table:
@staticmethod
def get_secret(b64str):
return base64.b64decode(b64str).decode('utf-8')
@staticmethod
def get_secrets():
secrets = {}
res = k8s.read_namespaced_secret('get-users', 'default')
data = res.data
def pod_is_ready_or_not(label_key, label_value):
label_selector_str="{0}={1}".format(label_key, label_value)
config.load_kube_config()
v1 = client.CoreV1Api()
try:
pod_list = v1.list_pod_for_all_namespaces(label_selector=label_selector_str, watch=False)
except ApiException as e:
print "Exception when calling CoreV1Api->list_pod_for_all_namespaces: %s\n" % e
return False
if len(pod_list.items) == 0:
return False
for pod in pod_list.items:
if pod.status.container_statuses is None:
return False
for container in pod.status.container_statuses:
if container.ready != True:
return False
def get_client(context):
try:
config.load_kube_config(context=context)
return client.CoreV1Api()
except config.config_exception.ConfigException:
abort(404)
# Here is where we will store the user script resource object if we get one.
self.userScript = None
# Ge the image to deploy from Toil's configuration
self.dockerImage = applianceSelf()
# Get the name of the AWS secret, if any, to mount in containers.
# TODO: have some way to specify this (env var?)!
self.awsSecretName = os.environ.get("TOIL_AWS_SECRET_NAME", None)
# Set this to True to enable the experimental wait-for-job-update code
self.enableWatching = False
# Required APIs needed from kubernetes
self.batchApi = kubernetes.client.BatchV1Api()
self.coreApi = kubernetes.client.CoreV1Api()
self.jobIds = set()
def get_pod_names(client, namespace, name):
"""Get pod names from k8s.
"""
core_api = k8s_client.CoreV1Api(client)
resp = core_api.list_namespaced_pod(
namespace, label_selector=to_selector({TF_JOB_NAME_LABEL: name}))
logging.info("list_namespaced_pod: %s", str(resp))
pod_names = []
for pod in resp.items:
if pod.metadata and pod.metadata.name:
pod_names.append(pod.metadata.name)
return set(pod_names)
from prometheus_client import Histogram
import k8sUtils
from DataHandler import DataHandler
from job import Job, JobSchema
from pod_template import PodTemplate
from dist_pod_template import DistPodTemplate
from config import config
from cluster_manager import record
logger = logging.getLogger(__name__)
# The config will be loaded from default location.
k8s_config.load_kube_config()
k8s_CoreAPI = client.CoreV1Api()
k8s_AppsAPI = client.AppsV1Api()
class JobDeployer:
def __init__(self):
self.k8s_CoreAPI = k8s_CoreAPI
self.k8s_AppsAPI = k8s_AppsAPI
self.namespace = "default"
self.pretty = "pretty_example"
@record
def _create_pod(self, body):
api_response = self.k8s_CoreAPI.create_namespaced_pod(
namespace=self.namespace,
body=body,
pretty=self.pretty,
)