How to use the simplejson.dump function in simplejson

To help you get started, we’ve selected a few simplejson examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Denvi / FlatCAM / FlatCAMApp.py View on Github external
self.options_read_form()

        # Serialize the whole project
        d = {"objs": [obj.to_dict() for obj in self.collection.get_list()],
             "options": self.options,
             "version": self.version}

        # Open file
        try:
            f = open(filename, 'w')
        except IOError:
            App.log.error("[error] Failed to open file for saving: %s", filename)
            return

        # Write
        json.dump(d, f, default=to_dict)
        # try:
        #     json.dump(d, f, default=to_dict)
        # except Exception, e:
        #     print str(e)
        #     App.log.error("[error] File open but failed to write: %s", filename)
        #     f.close()
        #     return

        f.close()

        self.inform.emit("Project saved to: %s" % filename)
github dersphere / plugin.audio.radio_de / resources / lib / file_manager.py View on Github external
def __write_file(self):
        self.log('__write_file started')
        if not os.path.isdir(self.file_path):
            os.makedirs(self.file_path)
        full_name = os.path.join(self.file_path, self.file_name)
        json.dump(self.__file, open(full_name, 'w'), indent=1)
        self.log('__write_file finished')
github turbulenz / turbulenz_tools / turbulenz_tools / tools / json2json.py View on Github external
LOG.info("Processing:%03i:%s", i + 1, f)
        try:
            with open(f, 'r') as source:
                j = json_load(source)
                if isinstance(j, dict):
                    merged = merge_dictionaries(j, merged)
                else:
                    merged = j
        except IOError as e:
            LOG.error("Failed processing: %s", f)
            LOG.error('  >> %s', e)
    try:
        with open(output_filename, 'w') as target:
            LOG.info("Writing:%s", output_filename)
            json_encoder.FLOAT_REPR = float_to_string
            json_dump(merged, target, sort_keys=True, separators=(',', ':'))
    except IOError as e:
        LOG.error('Failed processing: %s', output_filename)
        LOG.error('  >> %s', e)
    else:
        if output_metrics:
            log_metrics(merged)
github noironetworks / aci-containers / cni / opflex_agent_cni.py View on Github external
for p in ['CNI_CONTAINERID', 'CNI_IFNAME', 'CNI_NETNS']:
        if p not in os.environ:
            handleError("Missing required environment variable",
                        errors['PARAM'], p)

    for p in ['name', 'ipam']:
        if p not in netconfig:
            handleError("Missing required configuration parameter",
                        errors['PARAM'], p)

    if "type" not in netconfig['ipam']:
        handleError("Missing required IPAM configuration parameter",
                    errors['PARAM'], "type")

    ipamresult = executeIPAM(netconfig)
    json.dump(ipamresult, sys.stdout, indent=2, sort_keys=True)
    print('')
github eudicots / Cactus / cactus.py View on Github external
def write(self):
		json.dump(self._data, open(self.path, 'w'), sort_keys=True, indent=4)
github pediapress / mwlib / mwlib / siteinfo / fetch_siteinfo.py View on Github external
def fetch(lang):
    url = 'http://%s.wikipedia.org/w/api.php?action=query&meta=siteinfo&siprop=general|namespaces|namespacealiases|magicwords|interwikimap&format=json' % lang
    print 'fetching %r' % url
    data = urllib.urlopen(url).read()
    fn = 'siteinfo-%s.json' % lang
    print 'writing %r' % fn
    data = json.loads(data)['query']
    json.dump(data, open(fn, 'wb'), indent=4, sort_keys=True)
github qcl / master-research / prefix_tree_model / modelBuiler.py View on Github external
t["_rls_"].append(relationship)
            if len(t["_rls_"]) > max_len:
                max_len = len(t["_rls_"])
                max_rls = t["_rls_"]
                max_ptn = ws
        else:
            t["_rls_"] = [relationship]
            t["_rid_"] = rid
            rid += 1
        t["_ptn_"] = l[:-1]

    g.close()
f.close()

h = open("./patternTree.json","w")
json.dump(tree,h)
h.close()

# some testing code here.
print pos
print count,dup
print tree["has"]["released"]["on"]["_rls_"]
print max_len,max_ptn,max_rls
print tree.keys()
print len(tree.keys())
print rid
github forcedotcom / distributions / distributions / io / stream.py View on Github external
def json_stream_dump(stream, filename, **kwargs):
    kwargs['separators'] = (',', ':')
    stream = iter(stream)
    with open_compressed(filename, 'w') as f:
        f.write('[')
        try:
            item = next(stream)
            f.write('\n')
            simplejson.dump(item, f, **kwargs)
            for item in stream:
                f.write(',\n')
                simplejson.dump(item, f, **kwargs)
        except StopIteration:
            pass
        f.write('\n]')
github harrystech / arthur-redshift-etl / python / etl / s3.py View on Github external
def upload_data_to_s3(data: dict, bucket_name: str, object_key: str) -> None:
    """
    Write data object (formatted as JSON, readable as YAML) into an S3 object.

    Although we generally support YAML because it allows adding comments, we prefer
    the format and formatting of JSON.
    """
    uploader = S3Uploader(bucket_name)
    with tempfile.NamedTemporaryFile(mode="w+") as local_file:
        json.dump(data, local_file, indent="    ", sort_keys=True, cls=FancyJsonEncoder)
        local_file.write("\n")  # type: ignore
        local_file.flush()
        uploader(local_file.name, object_key)
github harrystech / arthur-redshift-etl / python / etl / design / bootstrap.py View on Github external
"""
    # Validate before writing to make sure we don't drift between bootstrap and JSON schema.
    etl.design.load.validate_table_design(table_design, target_table_name)

    # FIXME Move this logic into file sets (note that "source_name" is in table_design)
    filename = os.path.join(source_dir, "{}-{}.yaml".format(source_table_name.schema, source_table_name.table))
    this_table = target_table_name.identifier
    if dry_run:
        logger.info("Dry-run: Skipping writing new table design file for '%s'", this_table)
    elif os.path.exists(filename) and not overwrite:
        logger.warning("Skipping writing new table design for '%s' since '%s' already exists", this_table, filename)
    else:
        logger.info("Writing new table design file for '%s' to '%s'", this_table, filename)
        # We use JSON pretty printing because it is prettier than YAML printing.
        with open(filename, 'w') as o:
            json.dump(table_design, o, indent="    ", item_sort_key=make_item_sorter())
            o.write('\n')
        logger.debug("Completed writing '%s'", filename)