How to use the ujson.load function in ujson

To help you get started, we’ve selected a few ujson examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github luispedro / BuildingMachineLearningSystemsWithPython / ch05 / chose_instances.py View on Github external
except:
    print("""\
Enchant is not installed, which is not a problem since spell correction features
will not be used in the chapter. If, however, you want to experiment with them
(highly encouraged!), you can get the library from http://packages.python.org/pyenchant/.
""")
    class EnchantMock:
        def __init__(self):
            pass
        def check(self, word):
            return True
    speller = EnchantMock()

from data import chosen, chosen_meta, filtered, filtered_meta

filtered_meta = json.load(open(filtered_meta, "r"))



def misspelled_fraction(p):
    tokens = p.split()
    if not tokens:
        return 0.0
    return 1 - float(sum(speller.check(t) for t in tokens)) / len(tokens)


def data(filename, col=None):
    for line in open(filename, "r"):
        data = line.strip().split("\t")

        # check format
        Id, ParentId, IsAccepted, TimeToAnswer, Score, Text, NumTextTokens, NumCodeLines, LinkCount, NumImages = data
github badgeteam / ESP32-platform-firmware / firmware / python_modules / sha2017 / woezel.py View on Github external
def get_pkg_metadata(name):
    f = url_open("https://badge.team/eggs/get/%s/json" % name)
    try:
        return json.load(f)
    finally:
        f.close()
github IsaacChanghau / SequenceToSequence / utils / data_utils.py View on Github external
def load_data(filename):
    if filename.endswith(".json"):
        with codecs.open(filename, mode='r', encoding='utf-8', errors='ignore') as f:
            data = ujson.load(f)
        return data
    elif filename.endswith(".pkl"):
        with codecs.open(filename, mode='rb') as f:
            data = pickle.load(f)
        return data
    else:
        raise ValueError("ERROR: Unknown file extension, only support `.json` and `.pkl` formats!!!")
github openplotter / openplotter / classes / nodes.py View on Github external
def read_flow(self):
		try:
			with open(self.flows_file) as data_file:
				data = ujson.load(data_file)
			return data
		except:
			print("ERROR reading flows file")
			return []
github pycom / pycom-micropython-sigfox / tools / upip.py View on Github external
def get_pkg_metadata(name):
    for url in index_urls:
        try:
            f = url_open("%s/%s/json" % (url, name))
        except NotFoundError:
            continue
        try:
            return json.load(f)
        finally:
            f.close()
    raise NotFoundError("Package not found")
github defconfurs / dc26-fur-scripts / animations / __init__.py View on Github external
def __init__(self):
        fh = open(self.path, "r")
        self.framenum = 0
        self.js = ujson.load(fh)
        self.intensity = bytearray([0, 2, 3, 4, 6, 9, 12, 17, 24, 34, 47, 66, 92, 130, 182, 255])
        fh.close()
        self.draw()
github bluesky / databroker / metadataservice / schema / validate.py View on Github external
from __future__ import (absolute_import, division, print_function,
                        unicode_literals)
from pkg_resources import resource_filename as rs_fn
import ujson


SCHEMA_PATH = 'schema'
SCHEMA_NAMES = {'run_start': 'run_start.json',
                'run_stop': 'run_stop.json',
                'event': 'event.json',
                'descriptor': 'event_descriptor.json'}
fn = '{}/{{}}'.format(SCHEMA_PATH)
schemas = {}
for name, filename in SCHEMA_NAMES.items():
    with open(rs_fn('metadataservice', resource_name=fn.format(filename))) as fin:
        schemas[name] = ujson.load(fin)
github NLPLearn / QANet / main.py View on Github external
def demo(config):
    with open(config.word_emb_file, "r") as fh:
        word_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.char_emb_file, "r") as fh:
        char_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.test_meta, "r") as fh:
        meta = json.load(fh)

    model = Model(config, None, word_mat, char_mat, trainable=False, demo = True)
    demo = Demo(model, config)
github IsaacChanghau / AmusingPythonCodes / rnet / main.py View on Github external
def train(config):
    with open(config.word_emb_file, "r") as fh:
        word_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.char_emb_file, "r") as fh:
        char_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.train_eval_file, "r") as fh:
        train_eval_file = json.load(fh)
    with open(config.dev_eval_file, "r") as fh:
        dev_eval_file = json.load(fh)
    with open(config.dev_meta, "r") as fh:
        meta = json.load(fh)

    dev_total = meta["total"]

    print("Building model...")
    parser = get_record_parser(config)
    train_dataset = get_batch_dataset(config.train_record_file, parser, config)
    dev_dataset = get_dataset(config.dev_record_file, parser, config)
    handle = tf.placeholder(tf.string, shape=[])
    iterator = tf.data.Iterator.from_string_handle(handle, train_dataset.output_types, train_dataset.output_shapes)
    train_iterator = train_dataset.make_one_shot_iterator()
    dev_iterator = dev_dataset.make_one_shot_iterator()
    # create model
    model = Model(config, iterator, word_mat, char_mat)

    sess_config = tf.ConfigProto(allow_soft_placement=True)
    sess_config.gpu_options.allow_growth = True
github mitll / TweetE / pyTweet / pyTweet / breadth_first_sampling.py View on Github external
3. next_user_list - List of users to collect on next hop
    4. added_topics_for_cur_hop - Topics added from current hop (if relevant to sampling method)
    5. unavailable_accounts - List of unavailable accounts
    6. finished_users - Users that have already been collected

    :param user_dir: Directory where profile information is saved
    :return place_saver_obj: Python dictionary of forementioned fields
    """
    # Load object
    try:
        jfid = open(os.path.join(user_dir, 'place_saver_v1.txt'))
        place_saver_obj = ujson.load(jfid)
        jfid.close()
    except ValueError:
        jfid = open(os.path.join(user_dir, 'place_saver_v2.txt'))
        place_saver_obj = ujson.load(jfid)
        jfid.close()
    except IOError:
        print "The object 'place_saver' does not exist, creating it now"
        place_saver_obj = {}
    # Make all necessary fields in case they don't already exist
    if 'cur_user_list' not in place_saver_obj.keys():
        place_saver_obj['cur_user_list'] = set([])
    if 'next_user_list' not in place_saver_obj.keys():
        place_saver_obj['next_user_list'] = set([])
    if 'cur_hop' not in place_saver_obj.keys():
        place_saver_obj['cur_hop'] = 0
    if 'added_topics_for_cur_hop' not in place_saver_obj.keys():
        place_saver_obj['added_topics_for_cur_hop'] = set([])
    if 'unavailable_accounts' not in place_saver_obj.keys():
        place_saver_obj['unavailable_accounts'] = set([])
    if 'finished_users' not in place_saver_obj.keys():