How to use the joblib.load function in joblib

To help you get started, we’ve selected a few joblib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github arthurmensch / cogspaces / sandbox / exps_old / plot_maps.py View on Github external
def load_rec():
    return load(join(get_output_dir(), 'benchmark', 'rec.pkl'))


# compute_rec()

exp_dirs = join(get_output_dir(), 'single_exp', '17')
models = []
rec = load_rec()
mask_img = fetch_mask()
masker = MultiNiftiMasker(mask_img=mask_img).fit()

for exp_dir in [exp_dirs]:
    estimator = load(join(exp_dirs, 'estimator.pkl'))
    transformer = load(join(exp_dirs, 'transformer.pkl'))
    for coef, (dataset, sc), (_, lbin) in zip(estimator.coef_, transformer.scs_.items(),
                                      transformer.lbins_.items()):
        print(dataset)
        classes = lbin.classes_
        print(classes)
        coef /= sc.scale_
        coef_rec = coef.dot(rec)
        # coef_rec -= np.mean(coef_rec, axis=0)
        print(join(exp_dirs, 'maps_%s.nii.gz' % dataset))
        imgs = masker.inverse_transform(coef_rec)
        imgs.to_filename(join(exp_dirs, 'maps_%s.nii.gz' % dataset))
        fig, axes = plt.subplots(len(classes) // 4, 4, figsize=(24, len(classes) // 4 * 3))
        axes = axes.ravel()
        for ax, img, this_class in zip(axes, iter_img(imgs), classes):
            this_class = this_class.replace('_', ' ')
            this_class = this_class.replace('&', ' ')
github m-doru / Facial-based-authentication-system / src / faceSpoofDetection / SVMclassifier / idiap_classifier.py View on Github external
clf = svm.SVC(
            verbose=True,
            probability=True,
            C=0.0001,
            kernel="rbf",
            gamma=0.001,
            class_weight="balanced",
        )
        clf.fit(train_features, train_labels)

        # print("Best estimator found by grid search:")
        # print(clf.best_estimator_)

        # joblib.dump(clf, saved_classifier_filename)
    else:
        clf = joblib.load(saved_classifier_filename)

    # (test_features, test_labels) = get_test_features_and_labels(load_test_features)

    test_labels_bin = label_binarize(test_labels, classes=[-1, 1])

    pred_labels = clf.predict(test_features)

    pred_confidences = clf.predict_proba(test_features)

    plot_roc_curve(test_labels_bin, pred_confidences)

    from sklearn.metrics import (
        roc_curve,
        accuracy_score,
        confusion_matrix,
        roc_auc_score,
github PPPLDeepLearning / plasma-python / plasma / models / shallow_runner.py View on Github external
elif model_conf['type'] == 'mlp':
            hidden_layer_sizes = tuple(reversed(
                [model_conf['final_hidden_layer_size']*2**x
                 for x in range(model_conf['num_hidden_layers'])]))
            model = MLPClassifier(
                hidden_layer_sizes=hidden_layer_sizes,
                learning_rate_init=model_conf['learning_rate_mlp'],
                alpha=model_conf['mlp_regularization'])
        else:
            print("Unkown model type, exiting.")
            exit(1)
        model.fit(X, Y)
        joblib.dump(model, model_path)
        print("Fit model in {} seconds".format(time.time()-start_time))
    else:
        model = joblib.load(model_path)
        print("model exists.")

    Y_pred = model.predict(X)
    print("Train")
    print(classification_report(Y, Y_pred))
    Y_predv = model.predict(Xv)
    print("Validate")
    print(classification_report(Yv, Y_predv))
    if ('monitor_test' in conf['callbacks'].keys()
            and conf['callbacks']['monitor_test']):
        times = conf['callbacks']['monitor_times']
        roc_areas, losses = make_predictions_and_evaluate_multiple_times(
            conf, shot_list_validate, loader, times)
        for roc, t in zip(roc_areas, times):
            print('val_roc_{} = {}'.format(t, roc))
        if shot_list_test is not None:
github varunagrawal / tiny-faces-pytorch / utils / cluster.py View on Github external
def compute_kmedoids(bboxes, cls, option='pyclustering', indices=15, max_clusters=35, max_limit=5000):
    print("Performing clustering using", option)
    clustering = [{} for _ in range(indices)]

    bboxes = centralize_bbox(bboxes)

    # subsample the number of bounding boxes so that it can fit in memory and is faster
    if bboxes.shape[0] > max_limit:
        sub_ind = np.random.choice(np.arange(bboxes.shape[0]), size=max_limit, replace=False)
        bboxes = bboxes[sub_ind]

    distances_cache = Path('distances_{0}.jbl'.format(cls))
    if distances_cache.exists():
        print("Loading distances")
        dist = joblib.load(distances_cache)
    else:
        dist = compute_distances(bboxes)
        joblib.dump(dist, distances_cache, compress=5)

    if option == 'pyclustering':
        for k in range(indices, max_clusters+1):
            print(k, "clusters")

            initial_medoids = np.random.choice(bboxes.shape[0], size=k, replace=False)

            kmedoids_instance = kmedoids(dist, initial_medoids, ccore=True, data_type='distance_matrix')

            print("Running KMedoids")
            t1 = datetime.now()
            kmedoids_instance.process()
            dt = datetime.now() - t1
github undertheseanlp / word_tokenize / egs / vlsp2013_crf / analyze_full.py View on Github external
from os.path import join, dirname
import time
import joblib
import pycrfsuite
from sklearn_crfsuite import metrics

from load_data import load_dataset


transformer = joblib.load(join(dirname(__file__), "final_model", "transformer.bin"))
path = join(dirname(__file__), "final_model", "model.bin")
estimator = pycrfsuite.Tagger()
estimator.open(path)

test_set = load_dataset(join(dirname(dirname(dirname(__file__))), "data", "vlsp2016", "corpus", "test.txt"))
X_test, y_test = transformer.transform(test_set)
start = time.time()
y_pred = [estimator.tag(x) for x in X_test]
end = time.time()
test_time = end - start
f1_test_score = metrics.flat_f1_score(y_test, y_pred, average='weighted')
print("F1 score: ", f1_test_score)
print("Test time: ", test_time)
with open("report_full.txt", "w") as f:
    f.write("F1 score: " + str(f1_test_score) + "\n" + "Test time: " + str(test_time))
github arthurmensch / cogspaces / exps / analyse / plot_maps.py View on Github external
def get_components(output_dir, dl=False, return_type='img'):
    if dl:
        estimator = load(join(output_dir, 'estimator.pkl'))
        components = estimator.components_dl_
    else:
        module = get_module(output_dir)
        components = module.embedder.linear.weight.detach().numpy()

    if return_type in ['img', 'arrays_full']:
        dictionary = get_dictionary()
        components_full = components.dot(dictionary)
        if return_type == 'img':
            masker = get_masker()
            components_img = masker.inverse_transform(components_full)
            return components_img
        elif return_type == 'arrays_full':
            components_full = components.dot(dictionary)
            return components_full
    elif return_type == 'arrays':
github etzinis / two_step_mask_learning / two_step_mask_learning / dnn / dataset_loader / torch_dataloader.py View on Github external
def load_item_file(path):
        try:
            loaded_file = joblib.load(path)
        except:
            raise IOError("Failed to load data file from path: {} "
                          "".format(path))
        return loaded_file
github avisingh599 / reward-learning-rl / scripts / imitate_skills.py View on Github external
def collect_expert_trajectories(expert_snapshot, max_path_length):
    tf.logging.info('Collecting expert trajectories')
    with tf.Session() as sess:
        data = joblib.load(expert_snapshot)
        policy = data['policy']
        env = data['env']
        num_skills = (
            np.prod(data['policy']._observation_shape)
            - np.prod(data['env'].observation_space.shape))
        traj_vec = []
        with policy.deterministic(True):
            for z in range(num_skills):
                fixed_z_policy = FixedOptionPolicy(policy, num_skills, z)
                new_paths = rollouts(env, fixed_z_policy,
                                     args.max_path_length, n_paths=1)
                path = new_paths[0]
                traj_vec.append(path)
    tf.reset_default_graph()
    return traj_vec
github dtsbourg / ff-attention / examples / vppv / vppv.py View on Github external
# Load VP sensors
        dfvp = pd.DataFrame(dtype=float)
        for i in range(len(vp_occ[:-1])):
            dfvp[i] = list(map(float,vp_occ[i]))
        dfvp = dfvp.transpose()
        # Scale
        scaler_vp = StandardScaler()
        dfvp = scaler_vp.fit_transform(dfvp)
        vp_module_readouts = np.asarray([module_seperator(_) for _ in dfvp])

        # Cache data
        joblib.dump(npvs, NPVS_CACHE)
        joblib.dump(vp_module_readouts, VP_CACHE)
    else:
        npvs = joblib.load(NPVS_CACHE)
        vp_module_readouts = joblib.load(VP_CACHE)
        scaler_pv = joblib.load(SCALER_CACHE)

    return npvs, vp_module_readouts, scaler_pv