Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, get_cmd, env, image, labels={}):
global ApiException
global k8sclient
from kubernetes import config
from kubernetes import client as k8sclient
from kubernetes.client.rest import ApiException
config.load_kube_config()
super(KubernetesServer, self).__init__(get_cmd, env)
self._namespace = 'default'
self._name = 'server-fixtures-%s' % uuid.uuid4()
self._image = image
self._run_cmd = get_cmd()
self._labels = _merge_dicts(labels, {
'server-fixtures': 'kubernetes-server-fixtures',
'server-fixtures/session-id': CONFIG.session_id,
})
self._v1api = k8sclient.CoreV1Api()
@asyncio.coroutine
def launch_flash_datanode(parallelism):
print("KUBERNETES: launch flash datanode........")
global flash_launch_num
kubernetes_job_name = "pocket-datanode-nvme-job" + str(flash_launch_num)
yaml_file = "../deploy/pocket-datanode-nvme-job.yaml"
cmd = ["../deploy/update_datanode_yaml.sh", kubernetes_job_name, str(parallelism), yaml_file]
Popen(cmd, stdout=PIPE).wait()
config.load_kube_config()
with open(path.join(path.dirname(__file__), yaml_file)) as f:
job = yaml.load(f)
k8s_beta = client.BatchV1Api()
resp = k8s_beta.create_namespaced_job(
body=job, namespace="default")
print("Job created. status='%s'" % str(resp.status))
flash_launch_num = flash_launch_num+1
print("Wait for flash datanode to start...")
yield from asyncio.sleep(WAIT_FOR_FLASH_STARTUP)
print("Done waiting for flash datanode to start.")
return
argv.remove(argv[0])
for elements in argv:
variable_entrada = elements.split("=")
if len(variable_entrada) == 1 or variable_entrada[1] == '':
raise NameError('[ERROR] Invalid Arguments [python example.py var="text"]')
list_argv.append(variable_entrada)
self.dic_argv = argument_to_dic(list_argv)
try:
self.dic_argv["subtract_days"]
except:
self.dic_argv["subtract_days"] = "10"
# Load Config
if not path.exists('/.dockerenv'):
config.load_kube_config(config_file='/home/%s/.kube/config_liberty_guadaltech' % (getpwuid(getuid()).pw_name))
# config.load_kube_config()
# config.load_kube_config(config_file='%s/credentials/config' % (self.ruta_exec))
else:
config.load_incluster_config()
# Define API
self.v1 = client.CoreV1Api()
self.extv1beta1 = client.ExtensionsV1beta1Api()
# Log
logging.basicConfig(filename='%s/%s/kube-backup.log' % (self.ruta_exec, self.directory_backups), level=logging.INFO)
# Squirrel integration
self.sqin = squirrel_integration(self)
def __init_kube_apis(self):
if os.path.exists('/run/secrets/kubernetes.io/serviceaccount/token'):
f = open('/run/secrets/kubernetes.io/serviceaccount/token')
kube_auth_token = f.read()
kube_config = kubernetes.client.Configuration()
kube_config.api_key['authorization'] = 'Bearer ' + kube_auth_token
kube_config.host = os.environ['KUBERNETES_PORT'].replace('tcp://', 'https://', 1)
kube_config.ssl_ca_cert = '/run/secrets/kubernetes.io/serviceaccount/ca.crt'
else:
kubernetes.config.load_kube_config()
kube_config = None
self.api_client = kubernetes.client.ApiClient(kube_config)
self.core_v1_api = kubernetes.client.CoreV1Api(self.api_client)
self.custom_objects_api = kubernetes.client.CustomObjectsApi(self.api_client)
'ps': ['job-name=ps-1', 'job-name=ps-2']}
```
tf_server_port: The port the TensorFlow server is listening on.
rpc_layer: (Optional) The RPC layer TensorFlow should use to communicate
between tasks in Kubernetes. Defaults to 'grpc'.
override_client: The Kubernetes client (usually automatically retrieved
using `from kubernetes import client as k8sclient`). If you pass this
in, you are responsible for setting Kubernetes credentials manually.
Raises:
ImportError: If the Kubernetes Python client is not installed and no
`override_client` is passed in.
RuntimeError: If autoresolve_task is not a boolean or a callable.
"""
if _KUBERNETES_API_CLIENT_INSTALLED:
k8sconfig.load_kube_config()
if not job_to_label_mapping:
job_to_label_mapping = {'worker': ['job-name=tensorflow']}
if not override_client and not _KUBERNETES_API_CLIENT_INSTALLED:
raise ImportError('The Kubernetes Python client must be installed before'
'using the Kubernetes Cluster Resolver. To install the'
'Kubernetes Python client, run `pip install '
'kubernetes` on your command line.')
self._job_to_label_mapping = job_to_label_mapping
self._tf_server_port = tf_server_port
self._override_client = override_client
self.task_type = None
self.task_id = None
"""Tear down all HEFT deployments.
"""
jupiter_config.set_globals()
"""
This loads the node lists in use
"""
path1 = jupiter_config.HERE + 'nodes.txt'
nodes = utilities.k8s_get_nodes(path1)
"""
This loads the kubernetes instance configuration.
In our case this is stored in admin.conf.
You should set the config file path in the jupiter_config.py file.
"""
config.load_kube_config(config_file = jupiter_config.KUBECONFIG_PATH)
"""
Loop through the list of nodes and deletes the all profiler related k8 deployment, replicaset, pods, and service.
The deletion should follow this particular order for a proper removal.
You can always check if a service/pod/deployment is running after running this script via kubectl command.
E.g.,
kubectl get svc -n "namespace name"
kubectl get deployement -n "namespace name"
kubectl get replicaset -n "namespace name"
kubectl get pod -n "namespace name"
"""
key = app_name+'-home'
print(key)
# We have defined the namespace for deployments in jupiter_config
namespace = jupiter_config.MAPPER_NAMESPACE
jupiter_config.set_globals()
"""
This loads the node lists in use
"""
path1 = jupiter_config.HERE + 'nodes.txt'
nodes, homes = utilities.k8s_get_nodes_worker(path1)
pprint(nodes)
"""
This loads the kubernetes instance configuration.
In our case this is stored in admin.conf.
You should set the config file path in the jupiter_config.py file.
"""
config.load_kube_config(config_file = jupiter_config.KUBECONFIG_PATH)
namespace = jupiter_config.MAPPER_NAMESPACE
# We have defined the namespace for deployments in jupiter_config
# Get proper handles or pointers to the k8-python tool to call different functions.
extensions_v1_beta1_api = client.ExtensionsV1beta1Api()
v1_delete_options = client.V1DeleteOptions()
core_v1_api = client.CoreV1Api()
result = True
for key in nodes:
label = "app=%s_wave_"%(app_name)
label = label + key
resp = None
def __init__(self, config_file=None, context=None, # pylint: disable=too-many-arguments
client_configuration=None, persist_config=True):
"""
TFJob client constructor
:param config_file: kubeconfig file, defaults to ~/.kube/config
:param context: kubernetes context
:param client_configuration: kubernetes configuration object
:param persist_config:
"""
if config_file or not utils.is_running_in_k8s():
config.load_kube_config(
config_file=config_file,
context=context,
client_configuration=client_configuration,
persist_config=persist_config)
else:
config.load_incluster_config()
self.api_instance = client.CustomObjectsApi()
# -*- coding: utf-8 -*-
import json
from kubernetes import client, config
from kubernetes.config import ConfigException
try:
# Load configuration inside the Pod
config.load_incluster_config()
except ConfigException:
# Load configuration for testing
config.load_kube_config()
# Create the Apis
v1_core = client.CoreV1Api()
custom_api = client.CustomObjectsApi()
def parse_error(e):
try:
err = json.loads(e.body)['message']
except json.JSONDecodeError:
err = str(e)
except KeyError:
err = str(e)
return err
#del_resp_2 = core_v1_api.delete_namespaced_service(home_name, namespace)
del_resp_2 = core_v1_api.delete_namespaced_service(home_name, namespace, v1_delete_options)
print("Service Deleted. status='%s'" % str(del_resp_2.status))
"""
This loads the node lists in use
"""
path1 = jupiter_config.HERE + 'nodes.txt'
nodes = utilities.k8s_get_nodes(path1)
"""
This loads the kubernetes instance configuration.
In our case this is stored in admin.conf.
You should set the config file path in the jupiter_config.py file.
"""
config.load_kube_config(config_file = jupiter_config.KUBECONFIG_PATH)
"""
Loop through the list of nodes and deletes the all profiler related k8 deployment, replicaset, pods, and service.
The deletion should follow this particular order for a proper removal.
You can always check if a service/pod/deployment is running after running this script via kubectl command.
E.g.,
kubectl get svc -n "namespace name"
kubectl get deployement -n "namespace name"
kubectl get replicaset -n "namespace name"
kubectl get pod -n "namespace name"
"""
for key in nodes:
# We have defined the namespace for deployments in jupiter_config
namespace = jupiter_config.EXEC_NAMESPACE