How to use the execo.time_utils.format_date function in execo

To help you get started, we’ve selected a few execo examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github BeyondTheClouds / enos / tests / functionnal / jenkins / common.py View on Github external
def make_reservation(job_name=JOB_NAME, job_type='allow_classic_ssh'):
    plan = ex5.planning
    end = ex.time_utils.format_date(time.time()+12600)

    logging.basicConfig(level=logging.DEBUG)
    oargrid_job_id, _ = ex5.planning.get_job_by_name(job_name)
    if oargrid_job_id is None:
        logging.info("Starting a new job")
        planning = plan.get_planning(endtime=end)
        slots = plan.compute_slots(planning, walltime=WALLTIME, excluded_elements=excluded)
        startdate, enddate, resources = plan.find_free_slot(slots, {'grid5000':1})
        logging.info("startdate = %s, enddate = %s resources = %s" % (startdate, enddate, resources))
        resources = plan.distribute_hosts(resources, {'grid5000':1}, excluded_elements=excluded)
        # shuffling to load balance load accros nodes
        random.shuffle(resources)
        specs = plan.get_jobs_specs(resources, excluded_elements=excluded)
        spec, frontend = specs[0]
        spec.name = job_name
        logging.info("specs = %s" % spec)
github mliroz / hadoop_g5k / hadoop_g5k / engine / engine.py View on Github external
logger.info('Performing reservation')
        now = int(time.time() + timedelta_to_seconds(datetime.timedelta(minutes=1)))
        starttime = now
        endtime = int(starttime + timedelta_to_seconds(datetime.timedelta(days=3,
                                                                 minutes=1)))
        startdate, n_nodes = self._get_nodes(starttime, endtime)

        search_time = 3 * 24 * 60 * 60  # 3 days
        walltime_seconds = get_seconds(self.options.walltime)

        iteration = 0
        while not n_nodes:
            iteration += 1
            logger.info('Not enough nodes found between %s and %s, ' +
                        'increasing time window',
                        format_date(starttime), format_date(endtime))
            starttime = max(now, now + iteration * search_time - walltime_seconds)
            endtime = int(now + (iteration + 1) * search_time)

            startdate, n_nodes = self._get_nodes(starttime, endtime)
            if starttime > int(time.time() + timedelta_to_seconds(
                                            datetime.timedelta(weeks=6))):
                logger.error('There are not enough nodes on %s for your ' +
                             'experiments, abort ...', self.cluster)
                exit()

        jobs_specs = get_jobs_specs({self.cluster: n_nodes},
                                    name=self.__class__.__name__)
        sub = jobs_specs[0][0]
        sub.walltime = self.options.walltime
        if self.use_kadeploy:
            sub.additional_options = '-t deploy'
github mliroz / hadoop_g5k / hadoop_g5k / engine / engine.py View on Github external
datetime.timedelta(weeks=6))):
                logger.error('There are not enough nodes on %s for your ' +
                             'experiments, abort ...', self.cluster)
                exit()

        jobs_specs = get_jobs_specs({self.cluster: n_nodes},
                                    name=self.__class__.__name__)
        sub = jobs_specs[0][0]
        sub.walltime = self.options.walltime
        if self.use_kadeploy:
            sub.additional_options = '-t deploy'
        else:
            sub.additional_options = '-t allow_classic_ssh'
        sub.reservation_date = startdate
        (self.oar_job_id, self.frontend) = oarsub(jobs_specs)[0]
        logger.info('Startdate: %s, n_nodes: %s, job_id: %s', format_date(startdate),
                    str(n_nodes), str(self.oar_job_id))