How to use the execo.log.style function in execo

To help you get started, we’ve selected a few execo examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github BeyondTheClouds / enos / engine / g5k_engine.py View on Github external
name = self.config["name"]), s)
                      for s, c in criteria.items()]
        logger.info("Criteria for the reservation: %s" % pf(jobs_specs))

        # Make the reservation
        gridjob, _ = EX5.oargridsub(
            jobs_specs,
            reservation_date=self.config['reservation'],
            walltime=self.config['walltime'].encode('ascii', 'ignore'),
            job_type='deploy'
        )

        # TODO - move this upper to not have a side effect here
        if gridjob is not None:
            self.gridjob = gridjob
            logger.info("Using new oargrid job %s" % style.emph(self.gridjob))
        else:
            logger.error("No oar job was created.")
            sys.exit(26)
github mliroz / hadoop_g5k / hadoop_g5k / ecosystem / spark.py View on Github external
ids_to_kill = []
            for line in proc.stdout.splitlines():
                field = line.split()
                if field[1] in spark_processes:
                    ids_to_kill.append(field[0])

            if ids_to_kill:
                force_kill = True
                ids_to_kill_str = ""
                for pid in ids_to_kill:
                    ids_to_kill_str += " " + pid

                logger.warn(
                    "Killing running Spark processes in host %s" %
                    style.host(h.address.split('.')[0]))

                proc = SshProcess("kill -9" + ids_to_kill_str, h)
                proc.run()

        if force_kill:
            logger.info(
                "Processes from previous hadoop deployments had to be killed")

        self.clean_logs()
github mliroz / hadoop_g5k / hadoop_g5k / util.py View on Github external
elif "," in hosts_input:
        # We assume the string is a comma separated list of hosts
        for hstr in hosts_input.split(','):
            h = Host(hstr.rstrip())
            if h not in hosts:
                hosts.append(h)
    elif hosts_input.isdigit():
        # If the file_name is a number, we assume this is a oargrid_job_id
        hosts = get_oargrid_job_nodes(int(hosts_input))
    else:
        # If not any of the previous, we assume is a single-host cluster where
        # the given input is the only host
        hosts = [Host(hosts_input.rstrip())]

    logger.debug('Hosts list: \n%s',
                 ' '.join(style.host(host.address.split('.')[0])
                          for host in hosts))
    return hosts
github mliroz / hadoop_g5k / hadoop_g5k / ecosystem / hive.py View on Github external
self.warehouse_dir = config.get("cluster", "hive_warehouse_dir")
        self.metastore_dir = config.get("cluster", "hive_metastore_dir")
        self.local_base_conf_dir = config.get("local", "local_base_conf_dir")

        self.bin_dir = self.base_dir + "/bin"

        # Initialize hosts
        self.hosts = hadoop_cluster.hosts
        self.master = hadoop_cluster.master

        # Store reference to Hadoop cluster and check if mandatory
        self.hc = hadoop_cluster

        logger.info("Hive cluster created in hosts %s."
                    " It is linked to a Hadoop cluster." if self.hc else "",
                    ' '.join([style.host(h.address.split('.')[0])
                              for h in self.hosts]))
github mliroz / hadoop_g5k / hadoop_g5k / cluster.py View on Github external
# Store cluster information
        self.hw = hw_manager.make_deployment_hardware()
        self.hw.add_hosts(self.hosts)
        self.master_cluster = self.hw.get_host_cluster(self.master)

        # Create a string to display the topology
        t = {v: [] for v in self.topology.topology.values()}
        for key, value in self.topology.topology.iteritems():
            t[value].append(key.address)
        log_topo = ', '.join([style.user2(k) + ': ' +
                              ' '.join(map(lambda x: style.host(x.split('.')[0]), v))
                              for k, v in t.iteritems()])
        
        logger.info("Hadoop cluster created with master %s, hosts %s and "
                    "topology %s",
                    style.host(self.master.address), 
                    ' '.join([style.host(h.address.split('.')[0])
                              for h in self.hosts]),
                    log_topo)
github mliroz / hadoop_g5k / hadoop_g5k / util / g5k.py View on Github external
elif "," in hosts_str:
            # We assume the string is a comma separated list of hosts
            for hstr in hosts_str.split(','):
                h = Host(hstr.rstrip())
                if h not in hosts:
                    hosts.append(h)
        elif hosts_str.isdigit():
            # If the file_name is a number, we assume this is a oargrid_job_id
            hosts = get_oargrid_job_nodes(int(hosts_str))
        else:
            # If not any of the previous, we assume is a single-host cluster
            # where the given input is the only host
            hosts = [Host(hosts_str.rstrip())]

        logger.debug('Hosts list: \n%s',
                     ' '.join(style.host(host.address.split('.')[0])
                              for host in hosts))
        return hosts
github BeyondTheClouds / enos / 01_deploy_nodes.py View on Github external
def generate_kolla_files(config_vars, kolla_vars, directory):
    # get the static parameters from the config file
    kolla_globals = config_vars
    # add the generated parameters 
    kolla_globals.update(kolla_vars)
    # write to file in the result dir
    globals_path = os.path.join(directory, 'globals.yml')
    with open(globals_path, 'w') as f:
        yaml.dump(kolla_globals, f, default_flow_style=False)

    logger.info("Wrote " + style.emph(globals_path))

    # copy the passwords file
    passwords_path = os.path.join(directory, "passwords.yml")
    os.system("cp %s/passwords.yml %s" % (TEMPLATE_DIR, passwords_path))
    logger.info("Password file is copied to  %s" % (passwords_path))
    
    # admin openrc 
    admin_openrc_path = os.path.join(directory, 'admin-openrc')
    admin_openrc_vars = {
        'keystone_address': kolla_vars['kolla_internal_vip_address']
    }
    render_template('templates/admin-openrc.jinja2', admin_openrc_vars, admin_openrc_path)
    logger.info("admin-openrc generated in %s" % (admin_openrc_path))
github BeyondTheClouds / enos / kolla-g5k.py View on Github external
def generate_inventory(roles, base_inventory, dest):
    """
    Generate the inventory.
    It will generate a group for each role in roles and
    concatenate them with the base_inventory file.
    The generated inventory is written in dest
    """
    with open(dest, 'w') as f:
        f.write(to_ansible_group_string(roles))
        with open(base_inventory, 'r') as a:
            for line in a:
                f.write(line)

    logger.info("Inventory file written to " + style.emph(dest))