Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def delete_pod(context,dm_name):
try:
namespace = "default"
if dm_name:
config.load_kube_config(config_file,context)
api_instance = client.CoreV1Api()
ret = api_instance.list_namespaced_pod(namespace=namespace)
for i in ret.items:
if '-'.join(i.metadata.name.split('-')[:-2]) in dm_name:
api_instance.delete_namespaced_pod(name=i.metadata.name,
namespace=namespace,
body=client.V1DeleteOptions())
return True
except Exception as e:
logging.error(e)
return False
except ApiException as e:
print("Exception Occurred")
# if a service is running, kill it
if resp:
del_resp_2 = core_v1_api.delete_namespaced_service(home_name, namespace, v1_delete_options)
#del_resp_2 = core_v1_api.delete_namespaced_service(home_name, namespace=namespace)
print("Service Deleted. status='%s'" % str(del_resp_2.status))
for key in nodes:
pod_name = app_name +'-'+key
# Get proper handles or pointers to the k8-python tool to call different functions.
api = client.ExtensionsV1beta1Api()
body = client.V1DeleteOptions()
# First check if there is a exisitng profiler deployment with
# the name = key in the respective namespace
print(pod_name)
resp = None
try:
resp = api.read_namespaced_deployment(pod_name, namespace)
except ApiException as e:
print("Exception Occurred")
# if a deployment with the name = key exists in the namespace, delete it
if resp:
del_resp_0 = api.delete_namespaced_deployment(pod_name, namespace, body)
print("Deployment '%s' Deleted. status='%s'" % (key, str(del_resp_0.status)))
def cleanup(api_instance=None):
""" Remove completed jobs from the cluster """
api = api_instance or get_api()
r = api.list_job_for_all_namespaces()
delete_opts = kubernetes.client.V1DeleteOptions(
propagation_policy="Background")
for job in r.items:
if job.status.succeeded == job.spec.completions:
print(job.metadata.name, "finished!")
api.delete_namespaced_job(
job.metadata.name, 'default', body=delete_opts)
def init_kube_client(self):
"""
Method to get a kube client connected to remote or local kube api
"""
kubecfg_path = os.environ.get('KUBECFG_PATH')
if kubecfg_path is None:
config.load_kube_config()
else:
config.load_kube_config(config_file='/tmp/.kube/config')
self.kube_client = k_client.CoreV1Api()
self.kube_v1_batch_client = k_client.BatchV1Api()
self.kube_v1_delete = k_client.V1DeleteOptions()
def clean_up(args):
# body object for kubernetes api
body = client.V1DeleteOptions()
# get all workflows
try:
workflows = custom_api.list_namespaced_custom_object(args.group, args.version, args.namespace, args.plural)
except ApiException as e:
logging.warning("Exception when calling CustomObjectsApi->list_namespaced_custom_object: %s\n" % e)
# track workflows expired, workflows not expired and pods deleted for logging
workflows_expired = []
workflows_not_expired = []
pods_deleted = []
for workflow in workflows['items']:
key = workflow['metadata']['name']
try:
finished_at = datetime.strptime(workflow['status']['finishedAt'], '%Y-%m-%dT%H:%M:%SZ')
except TypeError:
logging.info('could not read workflow {}'.format(key))
def delete(self, request):
"""
Delete a registry secret
"""
username = username = request.user.username
user_namespace = notebook.utils.email_escape(username)
api_client = notebook.utils.get_user_api_client(username)
obj = json.loads(request.body)
name = obj.get("name")
try:
ret = client.CoreV1Api(api_client=api_client).delete_namespaced_secret(
name = name,
namespace = user_namespace,
body = client.V1DeleteOptions())
except ApiException, e:
logging.error("Failed when delete secret.")
return utils.simple_response(500, str(e))
return utils.simple_response(200, "")
sys.path.append(jupiter_config.CIRCE_PATH)
"""
This loads the kubernetes instance configuration.
In our case this is stored in admin.conf.
You should set the config file path in the jupiter_config.py file.
"""
config.load_kube_config(config_file = jupiter_config.KUBECONFIG_PATH)
namespace = jupiter_config.DEPLOYMENT_NAMESPACE
# We have defined the namespace for deployments in jupiter_config
# Get proper handles or pointers to the k8-python tool to call different functions.
extensions_v1_beta1_api = client.ExtensionsV1beta1Api()
v1_delete_options = client.V1DeleteOptions()
core_v1_api = client.CoreV1Api()
result = True
for key, value in dag.items():
# print(key)
# print(value)
# First check if there is a deployment existing with
# the name = key in the respective namespac # Check if there is a replicaset running by using the label app={key}
# The label of kubernets are used to identify replicaset associate to each task
label = "app=" + app_name+'-'+key
resp = None
resp = core_v1_api.list_namespaced_pod(namespace, label_selector = label)
# if a pod is running just delete it
def delete_pod(self, name):
namespace = self._find_pod_namespace(name)
body = client.V1DeleteOptions()
self._v1.delete_namespaced_pod(name=name,
namespace=namespace,
body=body)
The deletion should follow this particular order for a proper removal.
You can always check if a service/pod/deployment is running after running this script via kubectl command.
E.g.,
kubectl get svc -n "namespace name"
kubectl get deployement -n "namespace name"
kubectl get replicaset -n "namespace name"
kubectl get pod -n "namespace name"
"""
for key in nodes:
# We have defined the namespace for deployments in jupiter_config
namespace = jupiter_config.EXEC_NAMESPACE
# Get proper handles or pointers to the k8-python tool to call different functions.
api = client.ExtensionsV1beta1Api()
body = client.V1DeleteOptions()
# First check if there is a exisitng profiler deployment with
# the name = key in the respective namespace
pod_name = app_name+"-"+key
resp = None
try:
resp = api.read_namespaced_deployment(pod_name, namespace)
except ApiException as e:
print("Exception Occurred")
# if a deployment with the name = key exists in the namespace, delete it
if resp:
del_resp_0 = api.delete_namespaced_deployment(pod_name, namespace, body)
print("Deployment '%s' Deleted. status='%s'" % (key, str(del_resp_0.status)))