Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
out = client.list_cluster_custom_object('mcm.ibm.com', 'v1alpha1', 'deployables')
print(json.dumps(out, indent=4))
except (ApiException) as e:
pass
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# necessary when certificate-authority is not added to kubeconfig
config.load_kube_config()
contexts, active_context = config.list_kube_config_contexts()
defs = {}
# for context in contexts:
context = active_context
cluster = context["context"]["cluster"]
api_client = config.new_client_from_config(context=context['name'])
apps_client = client.AppsV1Api(api_client=api_client)
batch_client = client.BatchV1Api(api_client=api_client)
core_client = client.CoreV1Api(api_client=api_client) # usage examples https://www.programcreek.com/python/example/96328/kubernetes.client.CoreV1Api
ext_client = client.ExtensionsV1beta1Api(api_client=api_client)
customs_client = client.CustomObjectsApi(api_client=api_client)
api_ext = client.ApiextensionsV1beta1Api(api_client=api_client)
events_client = client.EventsV1beta1Api (api_client=api_client)
# definitions = api_ext.list_custom_resource_definition(pretty=True)
print(core_client.read_namespaced_pod("boisterous-shark-gbapp-frontend-8b5cc67bf-wctkb", "default"))
break
except (TypeError, config.ConfigException) as e:
print("WARNING: unable to load context {}: {}. Retrying.".format(args.context, e))
time.sleep(sleep_secs)
retries = retries - 1
if retries == 0:
print("ERROR: unable to load context {}. Giving up.".format(args.context))
sys.exit(1)
print("Waiting for job completion. (recheck every {} sec) ".format(sleep_secs))
#
# wait for job to finish
#
retries = 3 # max API failure retries
batch = client.BatchV1Api()
while retries:
job_found = False
try:
job = batch.read_namespaced_job(name=args.job_name,
namespace=args.namespace)
except client.rest.ApiException as e:
print("x:", e)
time.sleep(sleep_secs)
retries = retries - 1
sleep_secs = sleep_secs * 2
continue
job_found = True
if job.status.active or not job.status.conditions:
print(".")
time.sleep(sleep_secs)
def authenticate_platform(self):
cert_path, host, api_token, api_prefix = self.__parse_identity_json(self.identity)
self.configuration = client.Configuration()
self.configuration.api_key["authorization"] = api_token
self.configuration.api_key_prefix['authorization'] = api_prefix
self.configuration.host = host
self.configuration.ssl_ca_cert = cert_path
self.batch_api = client.BatchV1Api(client.ApiClient(self.configuration))
self.core_api = client.CoreV1Api(client.ApiClient(self.configuration))
# Here is where we will store the user script resource object if we get one.
self.userScript = None
# Ge the image to deploy from Toil's configuration
self.dockerImage = applianceSelf()
# Get the name of the AWS secret, if any, to mount in containers.
# TODO: have some way to specify this (env var?)!
self.awsSecretName = os.environ.get("TOIL_AWS_SECRET_NAME", None)
# Set this to True to enable the experimental wait-for-job-update code
self.enableWatching = False
# Required APIs needed from kubernetes
self.batchApi = kubernetes.client.BatchV1Api()
self.coreApi = kubernetes.client.CoreV1Api()
self.jobIds = set()
def execute(self) -> None:
"""
Create a single Kubernetes job on the default namespace that runs a flow
"""
from kubernetes import client, config
try:
config.load_incluster_config()
except config.config_exception.ConfigException:
raise EnvironmentError("Environment not currently inside a cluster")
batch_client = client.BatchV1Api()
with open(path.join(path.dirname(__file__), "job.yaml")) as job_file:
job = yaml.safe_load(job_file)
job = self._populate_yaml(yaml_obj=job)
# Create Job
batch_client.create_namespaced_job(namespace="default", body=job)
def create_job(job_body, namespace='default'):
config.load_kube_config()
v1 = client.BatchV1Api()
job = v1.create_namespaced_job(body=job_body, namespace=namespace)
return job
def __init__(self):
if 'KUBERNETES_LOAD_KUBE_CONFIG' in os.environ:
config.load_kube_config()
else:
config.load_incluster_config()
self._v1 = client.CoreV1Api()
self._batch_v1 = client.BatchV1Api()
def exec_state(exe_f, namespace, state):
bv1 = client.BatchV1Api()
cv1 = client.CoreV1Api()
with open(exe_f) as exe_fp:
exe_i = 0
for exe in exe_file:
job = bv1.read_namespaced_job(exe, namespace=namespace)
state['logs'][exe_i]['start_time'] = job.metadata.creation_timestamp
state['logs'][exe_i]['end_time'] = job.status.completion_time
job_label_s = 'controller-uid='+job.spec.selector.match_labels['controller-uid']
pods = cv1.list_namespaced_pod(label_selector=label_s, namespace=namespace)
try:
state['logs'][exe_i]['stdout'] = read_namespaced_pod_log(pods[0].metadata.name, namespace=namespace)
except IndexError:
print("No pod matching job "+job.metadata.name+" could be found", file=sys.stderr)
def start() -> None:
"""Start the next job in the queue"""
_log.debug("Connecting to service account")
kubernetes.config.load_incluster_config()
_log.debug("Creating kubernetes client")
global _kube
_kube = kubernetes.client.BatchV1Api()
_log.debug("Job processor ready!")
while True:
start_task()
def __init__(self, api_client: client.ApiClient, prefix_namespace: bool) -> None:
super(ApiData, self).__init__()
logging.info('Collecting API data')
logging.debug('Constructing API client wrappers')
core_api = client.CoreV1Api(api_client)
storage_api = client.StorageV1Api(api_client)
rbac_authorization_api = client.RbacAuthorizationV1Api(api_client)
ext_api = client.ExtensionsV1beta1Api(api_client)
batch_api = client.BatchV1Api(api_client)
apps_api = client.AppsV1Api(api_client)
self.custom_api = client.CustomObjectsApi(api_client)
logging.debug('Retrieving data')
storage_classes = storage_api.list_storage_class()
namespaces = core_api.list_namespace()
roles = rbac_authorization_api.list_role_for_all_namespaces()
cluster_roles = rbac_authorization_api.list_cluster_role()
component_statuses = core_api.list_component_status()
nodes = core_api.list_node()
# Try to make it a post, when client api support sending post data
# include {"num_stats": 1} to get the latest only and use less bandwidth
nodes_stats = [
core_api.connect_get_node_proxy_with_path(node.metadata.name, "stats")
for node in nodes.items