How to use the charmhelpers.core.hookenv.open_port function in charmhelpers

To help you get started, we’ve selected a few charmhelpers examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github openstack / charm-openstack-dashboard / charmhelpers / core / services / base.py View on Github external
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
        if os.path.exists(port_file):
            with open(port_file) as fp:
                old_ports = fp.read().split(',')
            for old_port in old_ports:
                if bool(old_port) and not self.ports_contains(old_port, new_ports):
                    hookenv.close_port(old_port)
        with open(port_file, 'w') as fp:
            fp.write(','.join(str(port) for port in new_ports))
        for port in new_ports:
            # A port is either a number or 'ICMP'
            protocol = 'TCP'
            if str(port).upper() == 'ICMP':
                protocol = 'ICMP'
            if event_name == 'start':
                hookenv.open_port(port, protocol)
            elif event_name == 'stop':
                hookenv.close_port(port, protocol)
github k3s-io / k3s / cluster / juju / layers / kubernetes-worker / reactive / kubernetes_worker.py View on Github external
context['ingress_image'] = \
            "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
    manifest = addon_path.format('ingress-replication-controller.yaml')
    render('ingress-replication-controller.yaml', manifest, context)
    hookenv.log('Creating the ingress replication controller.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create ingress controller. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)
github k8snetworkplumbingwg / sriov-network-device-plugin / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-worker / reactive / kubernetes_worker.py View on Github external
context['juju_application'] = hookenv.service_name()
    manifest = addon_path.format('ingress-daemon-set.yaml')
    render('ingress-daemon-set.yaml', manifest, context)
    hookenv.log('Creating the ingress daemon set.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create ingress controller. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)
github k8snetworkplumbingwg / sriov-network-device-plugin / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-worker / reactive / kubernetes_worker.py View on Github external
context['daemonset_api_version'] = 'apps/v1'
    context['juju_application'] = hookenv.service_name()
    manifest = addon_path.format('ingress-daemon-set.yaml')
    render('ingress-daemon-set.yaml', manifest, context)
    hookenv.log('Creating the ingress daemon set.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create ingress controller. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)
github apache / bigtop / bigtop-packages / src / charm / hadoop / layer-hadoop-resourcemanager / reactive / resourcemanager.py View on Github external
def start_resourcemanager(namenode):
    hookenv.status_set('maintenance', 'starting resourcemanager')
    # NB: service should be started by install, but we want to verify it is
    # running before we set the .started state and open ports. We always
    # restart here, which may seem heavy-handed. However, restart works
    # whether the service is currently started or stopped. It also ensures the
    # service is using the most current config.
    rm_started = host.service_restart('hadoop-yarn-resourcemanager')
    if rm_started:
        for port in get_layer_opts().exposed_ports('resourcemanager'):
            hookenv.open_port(port)
        set_state('apache-bigtop-resourcemanager.started')
        hookenv.status_set('maintenance', 'resourcemanager started')
        hookenv.application_version_set(get_hadoop_version())
    else:
        hookenv.log('YARN ResourceManager failed to start')
        hookenv.status_set('blocked', 'resourcemanager failed to start')
        remove_state('apache-bigtop-resourcemanager.started')
        for port in get_layer_opts().exposed_ports('resourcemanager'):
            hookenv.close_port(port)

    hs_started = host.service_restart('hadoop-mapreduce-historyserver')
    if not hs_started:
        hookenv.log('YARN HistoryServer failed to start')
github k3s-io / k3s / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
if not etcd.get_connection_string():
        # etcd is not returning a connection string. This happens when
        # the master unit disconnects from etcd and is ready to terminate.
        # No point in trying to start master services and fail. Just return.
        return

    # TODO: Make sure below relation is handled on change
    # https://github.com/kubernetes/kubernetes/issues/43461
    handle_etcd_relation(etcd)

    # Add CLI options to all components
    configure_apiserver(etcd.get_connection_string(), getStorageBackend())
    configure_controller_manager()
    configure_scheduler()
    set_state('kubernetes-master.components.started')
    hookenv.open_port(6443)
github kubernetes-sigs / apiserver-builder-alpha / cmd / vendor / github.com / kubernetes-incubator / reference-docs / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-worker / reactive / kubernetes_worker.py View on Github external
# Render the ingress replication controller manifest
    manifest = addon_path.format('ingress-replication-controller.yaml')
    render('ingress-replication-controller.yaml', manifest, context)
    hookenv.log('Creating the ingress replication controller.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create ingress controller. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)
github GoogleCloudPlatform / k8s-multicluster-ingress / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-master / reactive / kubernetes_master.py View on Github external
if not etcd.get_connection_string():
        # etcd is not returning a connection string. This happens when
        # the master unit disconnects from etcd and is ready to terminate.
        # No point in trying to start master services and fail. Just return.
        return

    # TODO: Make sure below relation is handled on change
    # https://github.com/kubernetes/kubernetes/issues/43461
    handle_etcd_relation(etcd)

    # Add CLI options to all components
    configure_apiserver(etcd.get_connection_string(), getStorageBackend())
    configure_controller_manager()
    configure_scheduler()
    set_state('kubernetes-master.components.started')
    hookenv.open_port(6443)
github jenkinsci / jenkins-charm / lib / charms / layer / jenkins / configuration.py View on Github external
err = "{} is not a valid setting for jnlp-port".format(
                config["jnlp-port"]
            )
            hookenv.log(err)
            hookenv.status_set("blocked", err)
            return False

        context = {
            "master_executors": config["master-executors"],
            "jnlp_port": config["jnlp-port"]}

        templating.render(
            "jenkins-config.xml", paths.CONFIG_FILE, context,
            owner="jenkins", group="nogroup")

        hookenv.open_port(PORT)

        # if we're using a set JNLP port, open it
        if config["jnlp-port"] > 0:
            hookenv.open_port(config["jnlp-port"])

        return True
github amazon-archives / aws-service-operator / vendor / k8s.io / kubernetes / cluster / juju / layers / kubernetes-worker / reactive / kubernetes_worker.py View on Github external
context['juju_application'] = hookenv.service_name()
    manifest = addon_path.format('ingress-daemon-set.yaml')
    render('ingress-daemon-set.yaml', manifest, context)
    hookenv.log('Creating the ingress daemon set.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create ingress controller. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)