How to use the charmhelpers.core.host.service_stop function in charmhelpers

To help you get started, we’ve selected a few charmhelpers examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github openstack / charm-swift-proxy / hooks / swift_hooks.py View on Github external
def cluster_non_leader_actions():
    """Cluster relation hook actions to be performed by non-leader units.

    NOTE: must be called by non-leader from cluster relation hook.
    """
    log("Cluster changed by unit=%s (local is non-leader)" % (remote_unit()),
        level=DEBUG)
    settings = relation_get() or {}

    # Check whether we have been requested to stop proxy service
    rq_key = SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC
    token = settings.get(rq_key, None)
    if token:
        log("Peer request to stop proxy service received (%s) - sending ack" %
            (token), level=INFO)
        service_stop('swift-proxy')
        peers_only = settings.get('peers-only', None)
        rq = SwiftProxyClusterRPC().stop_proxy_ack(echo_token=token,
                                                   echo_peers_only=peers_only)
        relation_set(relation_settings=rq)
        return

    # Check if there are any builder files we can sync from the leader.
    log("Non-leader peer - checking if updated rings available", level=DEBUG)
    broker = settings.get('builder-broker', None)
    if not broker:
        log("No update available", level=DEBUG)
        if not is_paused():
            service_start('swift-proxy')
        return

    builders_only = int(settings.get('sync-only-builders', 0))
github openstack / charms.ceph / ceph / __init__.py View on Github external
log("Upgrading to: {}".format(new_version))

    try:
        add_source(config('source'), config('key'))
        apt_update(fatal=True)
    except subprocess.CalledProcessError as err:
        log("Adding the ceph source failed with message: {}".format(
            err.message))
        status_set("blocked", "Upgrade to {} failed".format(new_version))
        sys.exit(1)
    try:
        if systemd():
            for mon_id in get_local_mon_ids():
                service_stop('ceph-mon@{}'.format(mon_id))
        else:
            service_stop('ceph-mon-all')
        apt_install(packages=PACKAGES, fatal=True)

        # Ensure the files and directories under /var/lib/ceph is chowned
        # properly as part of the move to the Jewel release, which moved the
        # ceph daemons to running as ceph:ceph instead of root:root.
        if new_version == 'jewel':
            # Ensure the ownership of Ceph's directories is correct
            owner = ceph_user()
            chownr(path=os.path.join(os.sep, "var", "lib", "ceph"),
                   owner=owner,
                   group=owner,
                   follow_links=True)

        if systemd():
            for mon_id in get_local_mon_ids():
                service_start('ceph-mon@{}'.format(mon_id))
github portworx / torpedo / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-worker / reactive / kubernetes_worker.py View on Github external
def cleanup_pre_snap_services():
    # remove old states
    remove_state('kubernetes-worker.components.installed')

    # disable old services
    services = ['kubelet', 'kube-proxy']
    for service in services:
        hookenv.log('Stopping {0} service.'.format(service))
        service_stop(service)

    # cleanup old files
    files = [
        "/lib/systemd/system/kubelet.service",
        "/lib/systemd/system/kube-proxy.service",
        "/etc/default/kube-default",
        "/etc/default/kubelet",
        "/etc/default/kube-proxy",
        "/srv/kubernetes",
        "/usr/local/bin/kubectl",
        "/usr/local/bin/kubelet",
        "/usr/local/bin/kube-proxy",
        "/etc/kubernetes"
    ]
    for file in files:
        if os.path.isdir(file):
github portworx / torpedo / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-worker / reactive / kubernetes_worker.py View on Github external
def shutdown():
    ''' When this unit is destroyed:
        - delete the current node
        - stop the worker services
    '''
    try:
        if os.path.isfile(kubeconfig_path):
            kubectl('delete', 'node', gethostname())
    except CalledProcessError:
        hookenv.log('Failed to unregister node.')
    service_stop('snap.kubelet.daemon')
    service_stop('snap.kube-proxy.daemon')
github k8snetworkplumbingwg / sriov-network-device-plugin / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
def migrate_from_pre_snaps():
    # remove old states
    remove_state('kubernetes.components.installed')
    remove_state('kubernetes.dashboard.available')
    remove_state('kube-dns.available')
    remove_state('kubernetes-master.app_version.set')

    # disable old services
    services = ['kube-apiserver',
                'kube-controller-manager',
                'kube-scheduler']
    for service in services:
        hookenv.log('Stopping {0} service.'.format(service))
        host.service_stop(service)

    # rename auth files
    os.makedirs('/root/cdk', exist_ok=True)
    rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
                           '/root/cdk/serviceaccount.key')
    rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
                           '/root/cdk/basic_auth.csv')
    rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
                           '/root/cdk/known_tokens.csv')

    # cleanup old files
    files = [
        "/lib/systemd/system/kube-apiserver.service",
        "/lib/systemd/system/kube-controller-manager.service",
        "/lib/systemd/system/kube-scheduler.service",
        "/etc/default/kube-defaults",
github openstack / charm-openstack-dashboard / charmhelpers / core / services / base.py View on Github external
def service_stop(service_name):
    """
    Wrapper around host.service_stop to prevent spurious "unknown service"
    messages in the logs.
    """
    if host.service_running(service_name):
        host.service_stop(service_name)
github apache / bigtop / bigtop-packages / src / charm / hbase / layer-hbase / lib / charms / layer / bigtop_hbase.py View on Github external
def stop(self):
        # order is important; master must stop last.
        hookenv.log('Stopping HBase services')
        host.service_stop('hbase-thrift')
        host.service_stop('hbase-regionserver')
        host.service_stop('hbase-master')
        hookenv.log('HBase services have been stopped')
github k3s-io / k3s / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
def shutdown():
    """ Stop the kubernetes master services

    """
    service_stop('snap.kube-apiserver.daemon')
    service_stop('snap.kube-controller-manager.daemon')
    service_stop('snap.kube-scheduler.daemon')
github openstack / charm-nova-compute / hooks / nova_compute_hooks.py View on Github external
def post_series_upgrade():
    log("Running complete series upgrade hook", "INFO")
    service_stop('nova-compute')
    service_stop(libvirt_daemon())
    # After package upgrade the service is broken and leaves behind a
    # PID file which causes the service to fail to start.
    # Remove this before restart
    if os.path.exists(LIBVIRTD_PID):
        os.unlink(LIBVIRTD_PID)
    series_upgrade_complete(
        resume_unit_helper, CONFIGS)
github openstack / charms.openstack / charms_openstack / charm / core.py View on Github external
This function is a @decorator that checks if the wrapped function
        changes any of the files identified by the keys in the
        self.restart_map{} and, if they change, restarts the services in the
        corresponding list.
        """
        checksums = {path: ch_host.path_hash(path)
                     for path in self.full_restart_map.keys()}
        yield
        restarts = []
        for path in self.full_restart_map:
            if ch_host.path_hash(path) != checksums[path]:
                restarts += self.full_restart_map[path]
        services_list = list(collections.OrderedDict.fromkeys(restarts).keys())
        for service_name in services_list:
            ch_host.service_stop(service_name)
        for service_name in services_list:
            ch_host.service_start(service_name)