How to use the charmhelpers.core.host.service_restart function in charmhelpers

To help you get started, we’ve selected a few charmhelpers examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github openstack / charm-designate / src / reactive / designate_handlers.py View on Github external
def remote_pools_updated():
    hookenv.log(
        "Pools updated on remote host, restarting pool manager",
        level=hookenv.DEBUG)
    host.service_restart('designate-pool-manager')
github openstack / charm-openstack-dashboard / charmhelpers / core / services / base.py View on Github external
def service_restart(service_name):
    """
    Wrapper around host.service_restart to prevent spurious "unknown service"
    messages in the logs.
    """
    if host.service_available(service_name):
        if host.service_running(service_name):
            host.service_restart(service_name)
        else:
            host.service_start(service_name)
github apache / bigtop / bigtop-packages / src / charm / hadoop / layer-hadoop-resourcemanager / reactive / resourcemanager.py View on Github external
def start_resourcemanager(namenode):
    hookenv.status_set('maintenance', 'starting resourcemanager')
    # NB: service should be started by install, but we want to verify it is
    # running before we set the .started state and open ports. We always
    # restart here, which may seem heavy-handed. However, restart works
    # whether the service is currently started or stopped. It also ensures the
    # service is using the most current config.
    rm_started = host.service_restart('hadoop-yarn-resourcemanager')
    if rm_started:
        for port in get_layer_opts().exposed_ports('resourcemanager'):
            hookenv.open_port(port)
        set_state('apache-bigtop-resourcemanager.started')
        hookenv.status_set('maintenance', 'resourcemanager started')
        hookenv.application_version_set(get_hadoop_version())
    else:
        hookenv.log('YARN ResourceManager failed to start')
        hookenv.status_set('blocked', 'resourcemanager failed to start')
        remove_state('apache-bigtop-resourcemanager.started')
        for port in get_layer_opts().exposed_ports('resourcemanager'):
            hookenv.close_port(port)

    hs_started = host.service_restart('hadoop-mapreduce-historyserver')
    if not hs_started:
        hookenv.log('YARN HistoryServer failed to start')
github amazon-archives / aws-service-operator / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-worker / reactive / kubernetes_worker.py View on Github external
def restart_unit_services():
    '''Restart worker services.'''
    hookenv.log('Restarting kubelet and kube-proxy.')
    services = ['kube-proxy', 'kubelet']
    for service in services:
        service_restart('snap.%s.daemon' % service)
github k8snetworkplumbingwg / sriov-network-device-plugin / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
def restart_scheduler():
    hookenv.status_set('maintenance', 'Restarting kube-scheduler')
    host.service_restart('snap.kube-scheduler.daemon')
github openstack / charm-nova-compute / hooks / nova_compute_hooks.py View on Github external
def service_restart_handler(relation_id=None, unit=None,
                            default_service=None):
    '''Handler for detecting requests from subordinate
    charms for restarts of services'''
    restart_nonce = relation_get(attribute='restart-nonce',
                                 unit=unit,
                                 rid=relation_id)
    db = unitdata.kv()
    nonce_key = 'restart-nonce'
    if restart_nonce != db.get(nonce_key):
        if not is_unit_paused_set():
            service = relation_get(attribute='remote-service',
                                   unit=unit,
                                   rid=relation_id) or default_service
            if service:
                service_restart(service)
        db.set(nonce_key, restart_nonce)
        db.flush()
github kubernetes-sigs / apiserver-builder-alpha / cmd / vendor / github.com / kubernetes-incubator / reference-docs / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
freeze_service_cidr()
    if not etcd.get_connection_string():
        # etcd is not returning a connection string. This hapens when
        # the master unit disconnects from etcd and is ready to terminate.
        # No point in trying to start master services and fail. Just return.
        return
    handle_etcd_relation(etcd)
    configure_master_services()
    hookenv.status_set('maintenance',
                       'Starting the Kubernetes master services.')

    services = ['kube-apiserver',
                'kube-controller-manager',
                'kube-scheduler']
    for service in services:
        host.service_restart('snap.%s.daemon' % service)

    hookenv.open_port(6443)
    set_state('kubernetes-master.components.started')
github GoogleCloudPlatform / k8s-multicluster-ingress / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
def restart_scheduler():
    prev_state, prev_msg = hookenv.status_get()
    hookenv.status_set('maintenance', 'Restarting kube-scheduler')
    host.service_restart('snap.kube-scheduler.daemon')
    hookenv.status_set(prev_state, prev_msg)
github k8snetworkplumbingwg / sriov-network-device-plugin / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
def restart_apiserver():
    hookenv.status_set('maintenance', 'Restarting kube-apiserver')
    host.service_restart('snap.kube-apiserver.daemon')
github openshift / origin / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
def restart_controller_manager():
    hookenv.status_set('maintenance', 'Restarting kube-controller-manager')
    host.service_restart('snap.kube-controller-manager.daemon')