How to use the boltons.fileutils.atomic_save function in boltons

To help you get started, we’ve selected a few boltons examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github hatnote / pacetrack / pacetrack / update.py View on Github external
'description': self.description,
               'contacts': self.contacts,
               'wikiproject_name': self.wikiproject_name,
               'campaign_start_date': self.campaign_start_date.isoformat(),
               'campaign_end_date': self.campaign_end_date.isoformat(),
               'date_created': self.date_created.isoformat(),
               'date_updated': datetime.datetime.utcnow().strftime(UPDATED_DT_FORMAT),
               'article_count': len(self.article_title_list),
               'all_results': all_results,
               'goals': [{'name': 'Article', 'slug': 'title'}] + sorted(self.goals, key=lambda s: s['name'])}
        campaign_static_path = STATIC_PATH + 'campaigns/%s/' % self.id
        article_list_html = ASHES_ENV.render('articles.html', ctx)
        article_list_path = campaign_static_path + 'articles.html'
        article_list_json_path = campaign_static_path + 'articles.json'
        mkdir_p(os.path.split(article_list_path)[0])
        with atomic_save(article_list_path) as html_f, atomic_save(article_list_json_path) as json_f:
            html_f.write(article_list_html.encode('utf-8'))
            json.dump(ctx, json_f, indent=2, sort_keys=True)
        return
github GeoscienceAustralia / digitalearthau / digitalearthau / sync / scan.py View on Github external
if locations_cache and not cache_is_too_old(locations_cache):
        path_set = dawg.CompletionDAWG()
        log.debug("paths.trie.cache.load", file=locations_cache)
        path_set.load(str(locations_cache))
    else:
        log.info("paths.trie.build")
        path_set = dawg.CompletionDAWG(
            chain(
                collection.all_indexed_uris(),
                collection.all_uris()
            )
        )
        log.info("paths.trie.done")
        if locations_cache is not None:
            log.debug("paths.trie.cache.create", file=locations_cache)
            with fileutils.atomic_save(str(locations_cache)) as f:
                path_set.write(f)
    return path_set
github GeoscienceAustralia / digitalearthau / datacubenci / sync.py View on Github external
if locations_cache and not cache_is_too_old(locations_cache):
        path_set = dawg.CompletionDAWG()
        log.debug("paths.trie.cache.load", file=locations_cache)
        path_set.load(str(locations_cache))
    else:
        log.info("paths.trie.build")
        path_set = dawg.CompletionDAWG(
            chain(
                path_index.iter_all_uris(),
                (path.absolute().as_uri() for path in path_search_root.glob(path_offset_glob))
            )
        )
        log.info("paths.trie.done")
        if locations_cache is not None:
            log.debug("paths.trie.cache.create", file=locations_cache)
            with fileutils.atomic_save(str(locations_cache)) as f:
                path_set.write(f)
    return path_set
github mahmoud / apatite / apatite / cli.py View on Github external
def normalize(plist, pfile):
    """normalize project and tag order, checking for duplicates
    and format divergences, overwrites the yaml listing"""
    plist.normalize()
    new_yaml = plist.to_yaml()
    # say no to trailing whitespace
    new_yaml = '\n'.join([line.rstrip() for line in new_yaml.splitlines()])
    with atomic_save(pfile) as f:
        f.write(new_yaml.encode('utf8'))
    return
github hatnote / pacetrack / pacetrack / update.py View on Github external
def update(self, force=False, _act=None):
        "does it all"
        final_update_log_path = STATIC_PATH + 'campaigns/%s/update.log' % self.id
        _act['name'] = self.name
        _act['id'] = self.id
        _act['log_path'] = final_update_log_path
        now = datetime.datetime.utcnow()
        with atomic_save(final_update_log_path) as f:
            cur_update_sink = build_stream_sink(f)
            old_sinks = tlog.sinks
            tlog.set_sinks(old_sinks + [cur_update_sink])
            try:
                self.load_article_list()
                self.load_latest_state()

                next_fetch = now if not self.latest_state else self.latest_state.timestamp + self.fetch_frequency
                if not force and next_fetch > now:
                    tlog.critical('skip_fetch').success(
                        '{cid} not out of date, skipping until next fetch at {next_fetch}. ',
                        cid=self.id, next_fetch=next_fetch)
                    return

                self.record_state()  # defaults to now
                self.load_latest_state()
github hatnote / pacetrack / pacetrack / update.py View on Github external
'campaign_start_date': self.campaign_start_date.isoformat(),
               'campaign_end_date': self.campaign_end_date.isoformat(),
               'date_created': self.date_created.isoformat(),
               'date_updated': datetime.datetime.utcnow().strftime(UPDATED_DT_FORMAT),
               'goals': self.goals,
               'article_count': len(self.article_title_list),
               'start_state_goal': start_state,
               'latest_state_goal': latest_state,
               'combined_state': combined
        }
        campaign_static_path = STATIC_PATH + 'campaigns/%s/' % self.id
        mkdir_p(campaign_static_path)
        report_html = ASHES_ENV.render('campaign.html', ctx)
        report_path = campaign_static_path + 'index.html'
        report_json_path = campaign_static_path + 'campaign.json'
        with atomic_save(report_path) as html_f, atomic_save(report_json_path) as json_f:
            html_f.write(report_html)
            json.dump(ctx, json_f, indent=2, sort_keys=True)
        return