How to use the charmhelpers.core.hookenv.status_set function in charmhelpers

To help you get started, we’ve selected a few charmhelpers examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github openstack / charms.ceph / ceph / __init__.py View on Github external
def upgrade_monitor(new_version):
    current_version = get_version()
    status_set("maintenance", "Upgrading monitor")
    log("Current ceph version is {}".format(current_version))
    log("Upgrading to: {}".format(new_version))

    try:
        add_source(config('source'), config('key'))
        apt_update(fatal=True)
    except subprocess.CalledProcessError as err:
        log("Adding the ceph source failed with message: {}".format(
            err.message))
        status_set("blocked", "Upgrade to {} failed".format(new_version))
        sys.exit(1)
    try:
        if systemd():
            for mon_id in get_local_mon_ids():
                service_stop('ceph-mon@{}'.format(mon_id))
        else:
github amazon-archives / aws-service-operator / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-worker / reactive / kubernetes_worker.py View on Github external
def update_kubelet_status():
    ''' There are different states that the kubelet can be in, where we are
    waiting for dns, waiting for cluster turnup, or ready to serve
    applications.'''
    services = [
        'kubelet',
        'kube-proxy'
    ]
    failing_services = []
    for service in services:
        daemon = 'snap.{}.daemon'.format(service)
        if not _systemctl_is_active(daemon):
            failing_services.append(service)

    if len(failing_services) == 0:
        hookenv.status_set('active', 'Kubernetes worker running.')
    else:
        msg = 'Waiting for {} to start.'.format(','.join(failing_services))
        hookenv.status_set('waiting', msg)
github k3s-io / k3s / cluster / juju / layers / kubernetes-worker / reactive / kubernetes_worker.py View on Github external
def missing_kube_control():
    """Inform the operator they need to add the kube-control relation.

    If deploying via bundle this won't happen, but if operator is upgrading a
    a charm in a deployment that pre-dates the kube-control relation, it'll be
    missing.

    """
    hookenv.status_set(
        'blocked',
        'Relate {}:kube-control kubernetes-master:kube-control'.format(
            hookenv.service_name()))
github apache / bigtop / bigtop-packages / src / charm / spark / layer-spark / reactive / spark.py View on Github external
deployment_matrix = {
        'hdfs_ready': is_state('hadoop.hdfs.ready'),
        'peers': peers,
        'sample_data': host.file_hash(sample_data) if sample_data else None,
        'spark_master': spark_master_host,
        'yarn_ready': is_state('hadoop.yarn.ready'),
        'zookeepers': zks,
    }

    # No-op if we are not forcing a reinstall or our matrix is unchanged.
    if not (force or data_changed('deployment_matrix', deployment_matrix)):
        report_status()
        return

    # (Re)install based on our execution mode
    hookenv.status_set('maintenance', 'configuring spark in {} mode'.format(mode))
    hookenv.log("Configuring spark with deployment matrix: {}".format(deployment_matrix))

    if mode.startswith('yarn') and is_state('hadoop.yarn.ready'):
        install_spark_yarn()
    elif mode.startswith('local') or mode == 'standalone':
        install_spark_standalone(zks, peers)
    else:
        # Something's wrong (probably requested yarn without yarn.ready).
        remove_state('spark.started')
        report_status()
        return

    # restart services to pick up possible config changes
    spark = Spark()
    spark.stop()
    spark.start()
github intuit / foremast / foremast-barrelman / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-worker / reactive / kubernetes_worker.py View on Github external
def update_kubelet_status():
    ''' There are different states that the kubelet can be in, where we are
    waiting for dns, waiting for cluster turnup, or ready to serve
    applications.'''
    services = [
        'kubelet',
        'kube-proxy'
    ]
    failing_services = []
    for service in services:
        daemon = 'snap.{}.daemon'.format(service)
        if not _systemctl_is_active(daemon):
            failing_services.append(service)

    if len(failing_services) == 0:
        hookenv.status_set('active', 'Kubernetes worker running.')
    else:
        msg = 'Waiting for {} to start.'.format(','.join(failing_services))
        hookenv.status_set('waiting', msg)
github openstack / charms.ceph / ceph / __init__.py View on Github external
return

    if not is_block_device(dev):
        log('Path {} is not a block device - bailing'.format(dev))
        return

    if is_osd_disk(dev) and not reformat_osd:
        log('Looks like {} is already an'
            ' OSD data or journal, skipping.'.format(dev))
        return

    if is_device_mounted(dev):
        log('Looks like {} is in use, skipping.'.format(dev))
        return

    status_set('maintenance', 'Initializing device {}'.format(dev))
    cmd = ['ceph-disk', 'prepare']
    # Later versions of ceph support more options
    if cmp_pkgrevno('ceph', '0.60') >= 0:
        if encrypt:
            cmd.append('--dmcrypt')
    if cmp_pkgrevno('ceph', '0.48.3') >= 0:
        if osd_format:
            cmd.append('--fs-type')
            cmd.append(osd_format)
        if reformat_osd:
            cmd.append('--zap-disk')
        cmd.append(dev)
        if osd_journal:
            least_used = find_least_used_journal(osd_journal)
            cmd.append(least_used)
    else:
github openstack / charm-swift-proxy / charmhelpers / contrib / openstack / ha / utils.py View on Github external
relation_data['resources'][hostname_key] = crm_ocf
        relation_data['resource_params'][hostname_key] = (
            'params fqdn="{}" ip_address="{}"'
            .format(hostname, resolve_address(endpoint_type=endpoint_type,
                                              override=False)))

    if len(hostname_group) >= 1:
        log('DNS HA: Hostname group is set with {} as members. '
            'Informing the ha relation'.format(' '.join(hostname_group)),
            DEBUG)
        relation_data['groups'] = {
            'grp_{}_hostnames'.format(service): ' '.join(hostname_group)
        }
    else:
        msg = 'DNS HA: Hostname group has no members.'
        status_set('blocked', msg)
        raise DNSHAException(msg)
github k3s-io / k3s / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
def install_snaps():
    channel = hookenv.config('channel')
    hookenv.status_set('maintenance', 'Installing kubectl snap')
    snap.install('kubectl', channel=channel, classic=True)
    hookenv.status_set('maintenance', 'Installing kube-apiserver snap')
    snap.install('kube-apiserver', channel=channel)
    hookenv.status_set('maintenance',
                       'Installing kube-controller-manager snap')
    snap.install('kube-controller-manager', channel=channel)
    hookenv.status_set('maintenance', 'Installing kube-scheduler snap')
    snap.install('kube-scheduler', channel=channel)
    hookenv.status_set('maintenance', 'Installing cdk-addons snap')
    snap.install('cdk-addons', channel=channel)
    snap_resources_changed()
    set_state('kubernetes-master.snaps.installed')
    remove_state('kubernetes-master.components.started')
github apache / bigtop / bigtop-packages / src / charm / hive / layer-hive / reactive / hive.py View on Github external
hookenv.status_set('blocked',
                           'waiting for relation to hadoop plugin')
    elif not hadoop_ready:
        hookenv.status_set('waiting',
                           'waiting for hadoop to become ready')
    elif database_joined and not database_ready:
        hookenv.status_set('waiting',
                           'waiting for database to become ready')
    elif hbase_joined and not hbase_ready:
        hookenv.status_set('waiting',
                           'waiting for hbase to become ready')
    elif hive_installed and not database_ready:
        hookenv.status_set('active',
                           'ready (local metastore)')
    elif hive_installed and database_ready:
        hookenv.status_set('active',
                           'ready (remote metastore)')
github openstack / charm-percona-cluster / hooks / percona_hooks.py View on Github external
leader_ip != get_cluster_host_ip()):
            # Fix Bug #1738896
            hosts = [leader_ip] + hosts
        log("Leader is bootstrapped - configuring mysql on this node",
            DEBUG)
        # Rendering the mysqld.cnf and restarting is bootstrapping for a
        # non-leader node.
        render_config_restart_on_changed(hosts)
        # Assert we are bootstrapped. This will throw an
        # InconsistentUUIDError exception if UUIDs do not match.
        update_bootstrap_uuid()
    else:
        # Until the bootstrap-uuid attribute is set by the leader,
        # cluster_ready() will evaluate to False. So it is necessary to
        # feed this information to the user.
        status_set('waiting', "Waiting for bootstrap-uuid set by leader")
        log('Non-leader waiting on leader bootstrap, skipping render',
            DEBUG)
        return

    # Notify any changes to the access network
    update_client_db_relations()

    for rid in relation_ids('ha'):
        # make sure all the HA resources are (re)created
        ha_relation_joined(relation_id=rid)

    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()

    open_port(DEFAULT_MYSQL_PORT)