How to use the charmhelpers.core.hookenv function in charmhelpers

To help you get started, we’ve selected a few charmhelpers examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github openstack / charms.ceph / ceph / __init__.py View on Github external
dev_name = None
    path_parts = os.path.split(block_dev)
    if len(path_parts) == 2:
        dev_name = path_parts[1]
    else:
        log('Unable to determine the block device name from path: {}'.format(
            block_dev))
        # Play it safe and bail
        return
    max_sectors_kb = get_max_sectors_kb(dev_name=dev_name)
    max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name)

    if max_sectors_kb < max_hw_sectors_kb:
        # OK we have a situation where the hardware supports more than Linux is
        # currently requesting
        config_max_sectors_kb = hookenv.config('max-sectors-kb')
        if config_max_sectors_kb < max_hw_sectors_kb:
            # Set the max_sectors_kb to the config.yaml value if it is less
            # than the max_hw_sectors_kb
            log('Setting max_sectors_kb for device {} to {}'.format(
                dev_name, config_max_sectors_kb))
            save_settings_dict[
                "drive_settings"][uuid][
                "read_ahead_sect"] = config_max_sectors_kb
            set_max_sectors_kb(dev_name=dev_name,
                               max_sectors_size=config_max_sectors_kb)
        else:
            # Set to the max_hw_sectors_kb
            log('Setting max_sectors_kb for device {} to {}'.format(
                dev_name, max_hw_sectors_kb))
            save_settings_dict[
                "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb
github kubernetes-incubator / cluster-capacity / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
@when('certificates.available')
def send_data(tls):
    '''Send the data that is required to create a server certificate for
    this server.'''
    # Use the public ip of this unit as the Common Name for the certificate.
    common_name = hookenv.unit_public_ip()

    # Get the SDN gateway based on the cidr address.
    kubernetes_service_ip = get_kubernetes_service_ip()

    domain = hookenv.config('dns_domain')
    # Create SANs that the tls layer will add to the server cert.
    sans = [
        hookenv.unit_public_ip(),
        hookenv.unit_private_ip(),
        socket.gethostname(),
        kubernetes_service_ip,
        'kubernetes',
        'kubernetes.{0}'.format(domain),
        'kubernetes.default',
        'kubernetes.default.svc',
        'kubernetes.default.svc.{0}'.format(domain)
    ]

    # maybe they have extra names they want as SANs
    extra_sans = hookenv.config('extra_sans')
    if extra_sans and not extra_sans == "":
        sans.extend(extra_sans.split())

    # Create a path safe name by removing path characters from the unit name.
github openstack / charm-openstack-dashboard / charmhelpers / core / unitdata.py View on Github external
def __call__(self):
        from charmhelpers.core import hookenv
        hook_name = hookenv.hook_name()

        with self.kv.hook_scope(hook_name):
            self._record_charm_version(hookenv.charm_dir())
            delta_config, delta_relation = self._record_hook(hookenv)
            yield self.kv, delta_config, delta_relation
github apache / bigtop / bigtop-packages / src / charm / hbase / layer-hbase / reactive / hbase.py View on Github external
def report_status():
    hadoop_joined = is_state('hadoop.joined')
    hdfs_ready = is_state('hadoop.hdfs.ready')
    zk_joined = is_state('zookeeper.joined')
    zk_ready = is_state('zookeeper.ready')
    hbase_installed = is_state('hbase.installed')
    if not hadoop_joined:
        hookenv.status_set('blocked',
                           'waiting for relation to hadoop plugin')
    elif not hdfs_ready:
        hookenv.status_set('waiting',
                           'waiting for hdfs to become ready')
    elif not zk_joined:
        hookenv.status_set('blocked',
                           'waiting for relation to zookeeper')
    elif not zk_ready:
        hookenv.status_set('waiting',
                           'waiting for zookeeper to become ready')
    elif not hbase_installed:
        hookenv.status_set('waiting',
                           'waiting to install hbase')
    else:
        hookenv.status_set('active',
                           'ready')
github k3s-io / k3s / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
def restart_apiserver():
    hookenv.status_set('maintenance', 'Restarting kube-apiserver')
    host.service_restart('snap.kube-apiserver.daemon')
github kubernetes-csi / external-provisioner / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-e2e / reactive / kubernetes_e2e.py View on Github external
missing_services.append('certificates')
    if not is_state('kubeconfig.ready'):
        missing_services.append('kubernetes-master:kube-control')

    if missing_services:
        if len(missing_services) > 1:
            subject = 'relations'
        else:
            subject = 'relation'

        services = ','.join(missing_services)
        message = 'Missing {0}: {1}'.format(subject, services)
        hookenv.status_set('blocked', message)
        return

    hookenv.status_set('active', 'Ready to test.')
github kubernetes-csi / external-provisioner / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-worker / reactive / kubernetes_worker.py View on Github external
hookenv.close_port(443)
        return

    # Render the ingress daemon set controller manifest
    context['ingress_image'] = config.get('nginx-image')
    if context['ingress_image'] == "" or context['ingress_image'] == "auto":
        if context['arch'] == 's390x':
            context['ingress_image'] = \
                "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
        else:
            context['ingress_image'] = \
                "k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.15" # noqa
    context['juju_application'] = hookenv.service_name()
    manifest = addon_path.format('ingress-daemon-set.yaml')
    render('ingress-daemon-set.yaml', manifest, context)
    hookenv.log('Creating the ingress daemon set.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create ingress controller. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)
github openstack / charm-nova-cloud-controller / hooks / nova_cc_hooks.py View on Github external
def neutron_settings():
    neutron_settings = {}
    if hookenv.is_relation_made('neutron-api', 'neutron-plugin'):
        neutron_api_info = nova_cc_context.NeutronAPIContext()()
        neutron_settings.update({
            # XXX: Rename these relations settings?
            'quantum_plugin': neutron_api_info['neutron_plugin'],
            'region': hookenv.config('region'),
            'quantum_security_groups':
            neutron_api_info['neutron_security_groups'],
            'quantum_url': neutron_api_info['neutron_url'],
        })
        # DNS domain is optional
        if neutron_api_info.get('dns_domain'):
            neutron_settings['dns_domain'] = neutron_api_info['dns_domain']
        neutron_url = urlparse(neutron_settings['quantum_url'])
        neutron_settings['quantum_host'] = neutron_url.hostname
        neutron_settings['quantum_port'] = neutron_url.port
    return neutron_settings
github intuit / foremast / foremast-barrelman / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-worker / reactive / kubernetes_worker.py View on Github external
def migrate_resource_checksums():
    ''' Migrate resource checksums from the old schema to the new one '''
    for resource in snap_resources:
        new_key = get_resource_checksum_db_key(resource)
        if not db.get(new_key):
            path = hookenv.resource_get(resource)
            if path:
                # old key from charms.reactive.helpers.any_file_changed
                old_key = 'reactive.files_changed.' + path
                old_checksum = db.get(old_key)
                db.set(new_key, old_checksum)
            else:
                # No resource is attached. Previously, this meant no checksum
                # would be calculated and stored. But now we calculate it as if
                # it is a 0-byte resource, so let's go ahead and do that.
                zero_checksum = hashlib.md5().hexdigest()
                db.set(new_key, zero_checksum)
github intuit / foremast / foremast-barrelman / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
except CalledProcessError:
        hookenv.log('failed to get kube-system nodes')
        return
    except (KeyError, json.JSONDecodeError) as e:
        hookenv.log('failed to parse kube-system node status '
                    '({}): {}'.format(e, output), hookenv.ERROR)
        return

    for node in nodes:
        node_name = node['metadata']['name']
        url = 'http://localhost:8080/api/v1/nodes/{}/status'.format(node_name)
        with urlopen(url) as response:
            code = response.getcode()
            body = response.read().decode('utf8')
        if code != 200:
            hookenv.log('failed to get node status from {} [{}]: {}'.format(
                url, code, body), hookenv.ERROR)
            return
        try:
            node_info = json.loads(body)
            conditions = node_info['status']['conditions']
            i = [c['type'] for c in conditions].index('NetworkUnavailable')
            if conditions[i]['status'] == 'True':
                hookenv.log('Clearing NetworkUnavailable from {}'.format(
                    node_name))
                conditions[i] = {
                    "type": "NetworkUnavailable",
                    "status": "False",
                    "reason": "RouteCreated",
                    "message": "Manually set through k8s api",
                }
                req = Request(url, method='PUT',