How to use the deepdish.io.save function in deepdish

To help you get started, we’ve selected a few deepdish examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github uchicago-cs / deepdish / deepdish / tools / caffe / tester.py View on Github external
if args.seed is None:
        pattern = re.compile(r'_s(\d+)_')
        m = pattern.search(os.path.basename(args.caffemodel))
        if m:
            seed = int(m.group(1))
        else:
            raise ValueError('Could not automatically determine seed')
    else:
        seed = args.seed
    print('Seed:', seed)

    scores = net.forward_all(data=x).values()[0].squeeze((2, 3))
    yhat = scores.argmax(-1)
    if args.output:
        dd.io.save(args.output, dict(scores=scores, labels=y, name=name, seed=seed))

    success = (yhat == y).mean()
    error = 1 - success

    print('Success: {:.2f}% / Error: {:.2f}%'.format(success * 100, error * 100))
github baccuslab / deep-retina / deepretina / core.py View on Github external
# store results in this directory
    name = '_'.join([mdl.name, cellname, expt, stim, datetime.now().strftime('%Y.%m.%d-%H.%M')])
    base = f'../results/{name}'
    os.makedirs(base, exist_ok=True)

    # define model callbacks
    cbs = [cb.ModelCheckpoint(os.path.join(base, 'weights-{epoch:03d}-{val_loss:.3f}.h5')),
           cb.TensorBoard(log_dir=base, histogram_freq=1, batch_size=5000, write_grads=True),
           cb.ReduceLROnPlateau(min_lr=0, factor=0.2, patience=10),
           cb.CSVLogger(os.path.join(base, 'training.csv')),
           cb.EarlyStopping(monitor='val_loss', patience=20)]

    # train
    history = mdl.fit(x=data.X, y=data.y, batch_size=bz, epochs=nb_epochs,
                      callbacks=cbs, validation_split=val_split, shuffle=True)
    dd.io.save(os.path.join(base, 'history.h5'), history.history)

    return history
github geoscixyz / em_examples / em_examples / TDEMInductiveSource.py View on Github external
Pez = mesh.getInterpolationMat(meshCore.gridCC, locType="Ez")
    Pfx = mesh.getInterpolationMat(meshCore.gridCC, locType="Fx")
    Pfy = mesh.getInterpolationMat(meshCore.gridCC, locType="Fy")
    Pfz = mesh.getInterpolationMat(meshCore.gridCC, locType="Fz")

    sigma_core = sigma[actinds]
    def getEBJcore(src0):
        B0 = np.r_[Pfx*f[src0, "b"], Pfy*f[src0, "b"], Pfz*f[src0, "b"]]
        E0 = np.r_[Pex*f[src0, "e"], Pey*f[src0, "e"], Pez*f[src0, "e"]]
        J0 = Utils.sdiag(np.r_[sigma_core, sigma_core, sigma_core]) * E0
        return E0, B0, J0

    E, B, J = getEBJcore(src)
    tdem_is = { "E": E, "B": B, "J": J,
                "sigma": sigma_core, "mesh": meshCore.serialize(), 'time':prb.times}
    dd.io.save(fname, tdem_is)
github ejolly / pymer4 / pymer4 / io.py View on Github external
dtypes = elem.dtypes.to_dict()
                            data_atts_separated[f"list_{i}_cols__{k}"] = cols
                            data_atts_separated[f"list_{i}_idx__{k}"] = idx
                            data_atts_separated[f"list_{i}_vals__{k}"] = vals
                            data_atts_separated[f"list_{i}_dtypes__{k}"] = dtypes
                        else:
                            raise TypeError(f"Value is list but list item is {type(elem)} not pd.DataFrame")
        
        # Combine all attributes into a single dict and save with dd
        model_atts = {}
        model_atts['simple_atts'] = simple_atts
        model_atts['data_atts'] = data_atts_separated
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=FutureWarning)
            warnings.simplefilter("ignore", category=NaturalNameWarning)
            dd.io.save(filepath, model_atts, compression=compression, **kwargs)
        assert os.path.exists(filepath)

        # Now deal with model object in R if needed
        if model.model_obj is not None:
            base.saveRDS(model.model_obj, f"{filename}.rds")
            assert os.path.exists(f"{filename}.rds")
    else:
        raise IOError("filepath must end with .h5 or .hdf5")
github ContextLab / quail / quail / egg.py View on Github external
'data' : self.data,
            'analysis' : self.analysis,
            'list_length' : self.list_length,
            'n_lists' : self.n_lists,
            'n_subjects' : self.n_subjects,
            'position' : self.position,
            'date_created' : self.date_created,
            'meta' : self.meta
        }

        if fname[-4:]!='.fegg':
            fname+='.fegg'

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            dd.io.save(fname, egg, compression=compression)
github ContextLab / hypertools / hypertools / datageometry.py View on Github external
'reduce' : self.reduce,
            'align' : self.align,
            'normalize' : self.normalize,
            'semantic' : self.semantic,
            'corpus' : np.array(self.corpus) if isinstance(self.corpus, list) else self.corpus,
            'kwargs' : self.kwargs,
            'version' : self.version,
            'dtype' : self.dtype
        }

        # if extension wasn't included, add it
        if fname[-4:]!='.geo':
            fname+='.geo'

        # save
        dd.io.save(fname, geo, compression=compression)
github uchicago-cs / deepdish / deepdish / experiments / cnn_boosting / logitboost_caffe.py View on Github external
def create_weighted_db(X, y, weights, name='regression'):
    X = X.reshape(-1, 3, 32, 32)
    train_fn = os.path.join(DIR, '{}.h5'.format(name))

    dd.io.save(train_fn, dict(data=X,
                              label=y.astype(np.float32),
                              sample_weight=weights), compress=False)
    with open(os.path.join(DIR, '{}.txt'.format(name)), 'w') as f:
        print(train_fn, file=f)
github geoscixyz / em_examples / em_examples / TDEMGroundedSource.py View on Github external
Pez = mesh.getInterpolationMat(meshCore.gridCC, locType="Ez")
    Pfx = mesh.getInterpolationMat(meshCore.gridCC, locType="Fx")
    Pfy = mesh.getInterpolationMat(meshCore.gridCC, locType="Fy")
    Pfz = mesh.getInterpolationMat(meshCore.gridCC, locType="Fz")

    sigma_core = sigma[actinds]
    def getEBJcore(src0):
        B0 = np.r_[Pfx*f[src0, "b"], Pfy*f[src0, "b"], Pfz*f[src0, "b"]]
        E0 = np.r_[Pex*f[src0, "e"], Pey*f[src0, "e"], Pez*f[src0, "e"]]
        J0 = Utils.sdiag(np.r_[sigma_core, sigma_core, sigma_core]) * E0
        return E0, B0, J0

    E, B, J = getEBJcore(src)
    tdem_gs = { "E": E, "B": B, "J": J,
                "sigma": sigma_core, "mesh": meshCore.serialize(), 'time':prb.times-t0, 'input_currents':input_currents}
    dd.io.save(fname, tdem_gs)
github uchicago-cs / deepdish / deepdish / experiments / cnn_boosting / adaboost_caffe.py View on Github external
def create_weighted_db(X, y, weights, name='boost'):
    X = X.reshape(-1, 3, 32, 32)
    train_fn = os.path.join(DIR, name + '.h5')

    dd.io.save(train_fn, dict(data=X,
                              label=y.astype(np.float32),
                              sample_weight=weights), compress=False)
    with open(os.path.join(DIR, name + '.txt'), 'w') as f:
        print(train_fn, file=f)