How to use the kubernetes.watch.Watch function in kubernetes

To help you get started, we’ve selected a few kubernetes examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github kubeless / kubeless / docker / event-sources / kubernetes / events.py View on Github external
def namespaces():
    w = watch.Watch()
    for event in w.stream(v1.list_namespace):
        logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name))
        msg = {'type':event['type'],'object':event['raw_object']}
        producer.send('k8s', msg)
        producer.flush()
        yield from asyncio.sleep(0.1)
github redhat-cop / anarchy / build / anarchy.py View on Github external
def watch_actions():
    logger.debug('Starting watch for anarchyactions')
    stream = kubernetes.watch.Watch().stream(
        kube_custom_objects.list_namespaced_custom_object,
        anarchy_crd_domain,
        'v1',
        namespace,
        'anarchyactions'
    )
    for event in stream:
        logger.debug("action {} in {} {}".format(
            event['object']['metadata']['name'],
            event['object']['metadata']['namespace'],
            event['type']
        ))
        if event['type'] == 'ADDED':
            handle_action_added(event['object'])
        elif event['type'] == 'MODIFIED':
            handle_action_modified(event['object'])
github airshipit / armada / armada / handlers / k8s.py View on Github external
in `job`, `cronjob`, or `pod`
        :param name: The name of the object to delete
        :param namespace: The namespace of the object
        :param propagation_policy: The Kubernetes propagation_policy to apply
            to the delete. Default 'Foreground' means that child objects
            will be deleted before the given object is marked as deleted.
            See: https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#controlling-how-the-garbage-collector-deletes-dependents  # noqa
        :param timeout: The timeout to wait for the delete to complete
        '''
        try:
            timeout = self._check_timeout(timeout)

            LOG.debug('Watching to delete %s %s, Wait timeout=%s',
                      object_type_description, name, timeout)
            body = client.V1DeleteOptions()
            w = watch.Watch()
            issue_delete = True
            found_events = False
            for event in w.stream(
                    list_func, namespace=namespace, timeout_seconds=timeout):
                if issue_delete:
                    delete_func(
                        name=name,
                        namespace=namespace,
                        body=body,
                        propagation_policy=propagation_policy)
                    issue_delete = False

                event_type = event['type'].upper()
                item_name = event['object'].metadata.name
                LOG.debug('Watch event %s on %s', event_type, item_name)
github bonnefoa / kubectl-fzf / kubectl_fzf_cache_builder / kubectl_fzf_cache_builder.py View on Github external
def watch_resource(self, func, ResourceCls):
        dest_file=os.path.join(self.dir, ResourceCls._dest_file())
        log.warn('Watching {} on namespace {}, writing results in {}'.format(
            ResourceCls.__name__, self.namespace, dest_file))
        w = watch.Watch()
        watches.append(w)
        resources = set()
        with open(dest_file, 'w') as dest:
            kwargs = self._get_resource_kwargs(ResourceCls)
            i = 0
            for resp in w.stream(func, **kwargs):
                resource = ResourceCls(resp['object'])
                self.process_resource(resource, resources, dest)
                i = i + 1
                if i % 1000 == 0:
                    log.info('Process {} {}'.format(i, ResourceCls.__name__))
        log.warn('{} watcher exiting'.format(ResourceCls.__name__))
github chaostoolkit / chaostoolkit-kubernetes / chaosk8s / probes.py View on Github external
def _deployment_readiness_has_state(name: str, ready: bool,
                                    ns: str = "default",
                                    label_selector: str = "name in ({name})",
                                    timeout: int = 30,
                                    secrets: Secrets = None):
    """
    Check wether if the given deployment state is ready or not
    according to the ready paramter.
    If the state is not reached after `timeout` seconds, a
    :exc:`chaoslib.exceptions.ActivityFailed` exception is raised.
    """
    label_selector = label_selector.format(name=name)
    api = create_k8s_api_client(secrets)
    v1 = client.AppsV1Api(api)
    w = watch.Watch()
    timeout = int(timeout)

    try:
        logger.debug("Watching events for {t}s".format(t=timeout))
        for event in w.stream(v1.list_namespaced_deployment, namespace=ns,
                              label_selector=label_selector,
                              _request_timeout=timeout):
            deployment = event['object']
            status = deployment.status
            spec = deployment.spec

            logger.debug(
                "Deployment '{p}' {t}: "
                "Ready Replicas {r} - "
                "Unavailable Replicas {u} - "
                "Desired Replicas {a}".format(
github kubeless / kubeless / docker / event-sources / kubernetes / events.py View on Github external
def services():
    w = watch.Watch()
    for event in w.stream(v1.list_service_for_all_namespaces):
        logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name))
        producer=KafkaProducer(bootstrap_servers='kafka.kubeless:9092',value_serializer=lambda v: json.dumps(v).encode('utf-8'))
        msg = {'type':event['type'],'object':event['raw_object']}
        producer.send('k8s', msg)
        producer.flush()
        yield from asyncio.sleep(0.1)
github hail-is / hail / batch / batch / sidecar.py View on Github external
async def kube_event_loop(pool):
    while True:
        try:
            stream = kube.watch.Watch().stream(
                v1.list_namespaced_pod,
                field_selector=f'metadata.name={pod_name}',
                namespace=HAIL_POD_NAMESPACE)
            async for event in DeblockedIterator(pool, stream):
                type = event['type']
                pod = event['object']
                name = pod.metadata.name
                log.info(f'event {type} named {name}')
                await pod_changed(pod)
        except Exception as exc:  # pylint: disable=W0703
            log.exception(f'k8s event stream failed due to: {exc}')
        await asyncio.sleep(5)
github redhat-cop / anarchy / operator / anarchygovernor.py View on Github external
def watch(runtime):
        '''
        Watch AnarchyGovernors and keep definitions synchronized

        This watch is independent of the kopf watch and is used to keep governor definitions updated
        even when the pod is not the active peer.
        '''
        for event in kubernetes.watch.Watch().stream(
            runtime.custom_objects_api.list_namespaced_custom_object,
            runtime.operator_domain, runtime.api_version, runtime.operator_namespace, 'anarchygovernors'
        ):
            obj = event.get('object')

            if event['type'] == 'ERROR' \
            and obj['kind'] == 'Status':
                if obj['status'] == 'Failure':
                    if obj['reason'] in ('Expired', 'Gone'):
                        operator_logger.info('AnarchyGovernor watch restarting, reason %s', obj['reason'])
                        return
                    else:
                        raise Exception("AnarchyGovernor watch failure: reason {}, message {}", obj['reason'], obj['message'])

            if obj and obj.get('apiVersion') == runtime.api_group_version:
                if event['type'] in ('ADDED', 'MODIFIED', None):
github bazelbuild / rules_k8s / examples / todocontroller / py / controller.py View on Github external
def mark_done(event, obj):
        metadata = obj.get("metadata")
        if not metadata:
            logging.error("No metadata in object, skipping: %s", json.dumps(obj, indent=1))
            return
        name = metadata.get("name")

        obj["spec"]["done"] = True
        obj["spec"]["comment"] = "DEMO "

        logging.error("Updating: %s", name)
        crds.replace_namespaced_custom_object(DOMAIN, "v1", namespace, "todos", name, obj)

    resource_version = ''
    while True:
        stream = watch.Watch().stream(crds.list_namespaced_custom_object,
                                      DOMAIN, "v1", namespace, "todos",
                                      resource_version=resource_version)
        for event in stream:
            obj = event["object"]

            spec = obj.get("spec")
            if not spec:
                logging.error("No 'spec' in object, skipping event: %s", json.dumps(obj, indent=1))
            else:
                if not spec.get("done", True):
                    mark_done(event, obj)

            # Configure where to resume streaming.
            metadata = obj.get("metadata")
            if metadata:
                resource_version = metadata["resourceVersion"]