How to use the charmhelpers.core.host function in charmhelpers

To help you get started, we’ve selected a few charmhelpers examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github juju / charm-tools / tests / layers / mysql / hooks / data-relation.py View on Github external
def data_relation_gone():
    hookenv.log('Data relation no longer present, stopping MysQL.')
    host.service_stop('mysql')
github bcsaller / juju-docker / hooks / charmhelpers / core / services / base.py View on Github external
def service_stop(service_name):
    """
    Wrapper around host.service_stop to prevent spurious "unknown service"
    messages in the logs.
    """
    if host.service_running(service_name):
        host.service_stop(service_name)
github apache / bigtop / bigtop-packages / src / charm / hive / layer-hive / lib / charms / layer / bigtop_hive.py View on Github external
def stop(self):
        # order is important; metastore must stop last.
        hookenv.log('Stopping Hive services')
        host.service_stop('hive-server2')
        host.service_stop('hive-metastore')
        hookenv.log('Hive services have been stopped')
github openstack / charms.openstack / charms_openstack / charm / classes.py View on Github external
def enable_apache_ssl_vhost(self):
        """Enable Apache vhost for SSL termination

        Enable Apache vhost for SSL termination if vhost exists and it is not
        curently enabled
        """
        if not os.path.exists(self.apache_ssl_vhost_file):
            open(self.apache_ssl_vhost_file, 'a').close()

        check_enabled = subprocess.call(
            ['a2query', '-s', 'openstack_https_frontend'])
        if check_enabled:
            subprocess.check_call(['a2ensite', 'openstack_https_frontend'])
            ch_host.service_reload('apache2', restart_on_failure=True)
github openshift / open-service-broker-sdk / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
def migrate_from_pre_snaps():
    # remove old states
    remove_state('kubernetes.components.installed')
    remove_state('kubernetes.dashboard.available')
    remove_state('kube-dns.available')
    remove_state('kubernetes-master.app_version.set')

    # disable old services
    services = ['kube-apiserver',
                'kube-controller-manager',
                'kube-scheduler']
    for service in services:
        hookenv.log('Stopping {0} service.'.format(service))
        host.service_stop(service)

    # rename auth files
    os.makedirs('/root/cdk', exist_ok=True)
    rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
                           '/root/cdk/serviceaccount.key')
    rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
                           '/root/cdk/basic_auth.csv')
    rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
                           '/root/cdk/known_tokens.csv')

    # cleanup old files
    files = [
        "/lib/systemd/system/kube-apiserver.service",
        "/lib/systemd/system/kube-controller-manager.service",
        "/lib/systemd/system/kube-scheduler.service",
        "/etc/default/kube-defaults",
github intuit / foremast / foremast-barrelman / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
def restart_controller_manager():
    hookenv.status_set('maintenance', 'Restarting kube-controller-manager')
    host.service_restart('snap.kube-controller-manager.daemon')
github paulgear / ntpmon / reactive / ntpmon.py View on Github external
Install package dependencies, source files, and startup configuration.
    """
    hookenv.log('installing ntpmon dependencies')
    apt_install(['python3-psutil'])

    hookenv.log('installing ntpmon')
    host.mkdir(os.path.dirname(ntpmon_dir))
    host.rsync('src/', '{}/'.format(ntpmon_dir))

    if host.init_is_systemd():
        hookenv.log('installing ntpmon systemd configuration')
        host.rsync('src/' + service_name + '.systemd', systemd_config)
        subprocess.call(['systemd', 'daemon-reload'])
    else:
        hookenv.log('installing ntpmon upstart configuration')
        host.rsync('src/' + service_name + '.upstart', upstart_config)
    set_state('ntpmon.installed')
    remove_state('ntpmon.configured')
github intuit / foremast / foremast-barrelman / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
def master_services_down():
    """Ensure master services are up and running.

    Return: list of failing services"""
    services = ['kube-apiserver',
                'kube-controller-manager',
                'kube-scheduler']
    failing_services = []
    for service in services:
        daemon = 'snap.{}.daemon'.format(service)
        if not host.service_running(daemon):
            failing_services.append(service)
    return failing_services
github openstack / charm-designate / src / reactive / designate_handlers.py View on Github external
def local_pools_updated():
    hookenv.log(
        "Pools updated locally, restarting pool manager",
        level=hookenv.DEBUG)
    host.service_restart('designate-pool-manager')
github openstack / charm-nova-cloud-controller / charmhelpers / contrib / charmsupport / nrpe.py View on Github external
Add checks for each service in list

    :param NRPE nrpe: NRPE object to add check to
    :param list services: List of services to check
    :param str unit_name: Unit name to use in check description
    :param bool immediate_check: For sysv init, run the service check immediately
    """
    for svc in services:
        # Don't add a check for these services from neutron-gateway
        if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
            next

        upstart_init = '/etc/init/%s.conf' % svc
        sysv_init = '/etc/init.d/%s' % svc

        if host.init_is_systemd():
            nrpe.add_check(
                shortname=svc,
                description='process check {%s}' % unit_name,
                check_cmd='check_systemd.py %s' % svc
            )
        elif os.path.exists(upstart_init):
            nrpe.add_check(
                shortname=svc,
                description='process check {%s}' % unit_name,
                check_cmd='check_upstart_job %s' % svc
            )
        elif os.path.exists(sysv_init):
            cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
            checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
            croncmd = (
                '/usr/local/lib/nagios/plugins/check_exit_status.pl '