How to use the kubernetes.config.load_incluster_config function in kubernetes

To help you get started, we’ve selected a few kubernetes examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Hyperpilotio / be-controller / maincontrol.py View on Github external
def configK8S():
  """ configure K8S environment
  """
  if st.k8sOn:
    try:
      if st.params['ctlloc'] == 'in':
        config.load_incluster_config()
      else:
        config.load_kube_config()
      st.node.kenv = client.CoreV1Api()
      print "Main: K8S API initialized."
    except config.ConfigException as e:
      print "Main:ERROR: Cannot initialize K8S environment, terminating:", e
      sys.exit(-1)
    st.node.name = os.getenv('MY_NODE_NAME')
    if st.node.name is None:
      print "Main:ERROR: Cannot get node name in K8S, terminating."
      sys.exit(-1)
    # read node stats
    try:
      _ = st.node.kenv.read_node(st.node.name)
    except ApiException as e:
      print "Main:ERROR: Exception when calling CoreV1Api->read_node: %s\n" % e
github IBM / kube-safe-scheduler / node-annotator / usage / k8s_analytics.py View on Github external
def main(self):
        # Configs can be set in Configuration class directly or using helper
        # utility. If no argument provided, the config will be loaded from
        # default location.
        print ("Initializing ...")

        if self.cluster_config == 'out-of-cluster':
            try:
                config.load_kube_config(config_file='kubectl.conf')
            except Exception as e:
                print("Error loading out of cluster k8s config: {0}".format(e))
                return
            print("using out-of-cluster K8s conf")
        else:
            try:
                config.load_incluster_config()
            except Exception as e:
                print("Error loading in-cluster k8s config: {0}".format(e))
                return
            print("using in-cluster K8s conf")

        global v1
        v1 = client.CoreV1Api()

        while True:
            print("Listing Nodes with their capacity:")
            try:
                    result = v1.list_node()
            except Exception as e:
                    print(e)
                    return
github kubeflow / tf-operator / sdk / python / kubeflow / tfjob / api / tf_job_client.py View on Github external
client_configuration=None, persist_config=True):
    """
    TFJob client constructor
    :param config_file: kubeconfig file, defaults to ~/.kube/config
    :param context: kubernetes context
    :param client_configuration: kubernetes configuration object
    :param persist_config:
    """
    if config_file or not utils.is_running_in_k8s():
      config.load_kube_config(
        config_file=config_file,
        context=context,
        client_configuration=client_configuration,
        persist_config=persist_config)
    else:
      config.load_incluster_config()

    self.api_instance = client.CustomObjectsApi()
github mattmoor / warm-image / controller.py View on Github external
def main():
    config.load_incluster_config()

    apps_beta1 = client.AppsV1beta1Api()
    crds = client.CustomObjectsApi()

    # Create DaemonSets within our own namespace,
    # owned by us (so they go away if we do).
    namespace = os.environ["MY_NAMESPACE"]
    owner = apps_beta1.read_namespaced_deployment(os.environ["OWNER_NAME"], namespace)

    # Define our OwnerReference that we will add to the metadata of
    # objects we create so that they are garbage collected when this
    # controller is deleted.
    controller_ref = {
        "apiVersion": owner.api_version,
        "blockOwnerDeletion": True,
        "controller": True,
github kiwigrid / k8s-sidecar / sidecar / sidecar.py View on Github external
print(f"{timestamp()} Filter labels with value: {labelValue}")

    targetFolder = os.getenv("FOLDER")
    if targetFolder is None:
        print(f"{timestamp()} Should have added FOLDER as environment variable! Exit")
        return -1

    resources = os.getenv("RESOURCE", "configmap")
    resources = ("secret", "configmap") if resources == "both" else (resources, )
    print(f"{timestamp()} Selected resource type: {resources}")

    method = os.getenv("REQ_METHOD")
    url = os.getenv("REQ_URL")
    payload = os.getenv("REQ_PAYLOAD")

    config.load_incluster_config()
    print(f"{timestamp()} Config for cluster api loaded...")
    currentNamespace = open("/var/run/secrets/kubernetes.io/serviceaccount/namespace").read()

    if os.getenv("SKIP_TLS_VERIFY") == "true":
        configuration = client.Configuration()
        configuration.verify_ssl = False
        configuration.debug = False
        client.Configuration.set_default(configuration)

    if os.getenv("METHOD") == "LIST":
        for res in resources:
            listResources(label, labelValue, targetFolder, url, method, payload,
                          currentNamespace, folderAnnotation, res)
    else:
        watchForChanges(os.getenv("METHOD"), label, labelValue, targetFolder, url, method,
                        payload, currentNamespace, folderAnnotation, resources)
github PrefectHQ / prefect / src / prefect / tasks / kubernetes / job.py View on Github external
raise ValueError(
                "A dictionary representing a V1Job patch must be provided."
            )

        if not job_name:
            raise ValueError("The name of a Kubernetes job must be provided.")

        kubernetes_api_key = Secret(kubernetes_api_key_secret).get()

        if kubernetes_api_key:
            configuration = client.Configuration()
            configuration.api_key["authorization"] = kubernetes_api_key
            api_client = client.BatchV1Api(client.ApiClient(configuration))
        else:
            try:
                config.load_incluster_config()
            except config.config_exception.ConfigException:
                config.load_kube_config()

            api_client = client.BatchV1Api()

        body = {**self.body, **(body or {})}
        kube_kwargs = {**self.kube_kwargs, **(kube_kwargs or {})}

        api_client.patch_namespaced_job(
            name=job_name, namespace=namespace, body=body, **kube_kwargs
        )
github mlrun / mlrun / mlrun / k8s_utils.py View on Github external
def _init_k8s_config(self, log=True):
        try:
            config.load_incluster_config()
            if log:
                logger.info('using in-cluster config.')
        except Exception:
            try:
                config.load_kube_config(self.config_file)
                if log:
                    logger.info('using local kubernetes config.')
            except Exception:
                raise RuntimeError(
                    'cannot find local kubernetes config file,'
                    ' place it in ~/.kube/config or specify it in '
github kubeflow / kubeflow / metric-collector / service-readiness / kubeflow-readiness.py View on Github external
signer = google.auth.iam.Signer(Request(), credentials, signer_email)
  else:
    # A Signer object can sign a JWT using the service account's key.
    signer = credentials.signer

  # Construct OAuth 2.0 service account credentials using the signer and
  # email acquired from the bootstrap credentials.
  service_account_credentials = google.oauth2.service_account.Credentials(
      signer,
      signer_email,
      token_uri=OAUTH_TOKEN_URI,
      additional_claims={'target_audience': args.client_id})

  token_refresh_time = 0
  last_status = -1
  config.load_incluster_config()
  coreApi = client.CoreV1Api()
  while True:
    if time() > token_refresh_time:
      # service_account_credentials gives us a JWT signed by the service
      # account. Next, we use that to obtain an OpenID Connect token,
      # which is a JWT signed by Google.
      google_open_id_connect_token = get_google_open_id_connect_token(
          service_account_credentials)
      token_refresh_time = time() + 1800
    url_status = metric_update(args, google_open_id_connect_token)
    if url_status != last_status:
      last_status = url_status
      # get service centraldashboard, attach event to it.
      svcs = coreApi.list_namespaced_service(
          'kubeflow', label_selector="app=centraldashboard")
      while len(svcs.to_dict()['items']) == 0:
github kubeflow-kale / kale / backend / kale / utils / pod_utils.py View on Github external
def _get_k8s_custom_objects_client():
    k8s_config.load_incluster_config()
    return k8s.CustomObjectsApi()
github keikoproj / minion-manager / cloud_provider / aws / aws_minion_manager.py View on Github external
def log_k8s_event(self, asg_name, price="", useSpot=False):
        msg_str = '{"apiVersion":"v1alpha1","spotPrice":"' + price + '", "useSpot": ' + str(useSpot).lower() + '}'
        event_namespace = os.getenv('EVENT_NAMESPACE', 'default')
        if not self.incluster:
            logger.info(msg_str)
            return

        try:
            config.load_incluster_config()
            v1 = client.CoreV1Api()
            event_timestamp = datetime.now(pytz.utc)
            event_name = "spot-instance-update"
            new_event = client.V1Event(
                count=1,
                first_timestamp=event_timestamp,
                involved_object=client.V1ObjectReference(
                    kind="SpotPriceInfo",
                    name=asg_name,
                    namespace=event_namespace,
                ),
                last_timestamp=event_timestamp,
                metadata=client.V1ObjectMeta(
                    generate_name=event_name,
                ),
                message=msg_str,