How to use the miscnn.data_loading.data_io.create_directories function in miscnn

To help you get started, we’ve selected a few miscnn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github frankkramer-lab / MIScnn / miscnn / evaluation / cross_validation.py View on Github external
def split_folds(sample_list, k_fold=3, evaluation_path="evaluation"):
    # Randomly permute the sample list
    samples_permuted = np.random.permutation(sample_list)
    # Split sample list into folds
    folds = np.array_split(samples_permuted, k_fold)
    fold_indices = list(range(len(folds)))
    # Iterate over each fold
    for i in fold_indices:
        # Subset training and validation data set
        training = np.concatenate([folds[x] for x in fold_indices if x!=i],
                                  axis=0)
        validation = folds[i]
        # Initialize evaluation subdirectory for current fold
        subdir = create_directories(evaluation_path, "fold_" + str(i))
        fold_cache = os.path.join(subdir, "sample_list.json")
        # Write sampling to disk
        write_fold2disk(fold_cache, training, validation)
github frankkramer-lab / MIScnn / miscnn / evaluation / cross_validation.py View on Github external
if return_output : validation_results = []
    # Randomly permute the sample list
    samples_permuted = np.random.permutation(sample_list)
    # Split sample list into folds
    folds = np.array_split(samples_permuted, k_fold)
    fold_indices = list(range(len(folds)))
    # Start cross-validation
    for i in fold_indices:
        # Reset Neural Network model weights
        model.reset_weights()
        # Subset training and validation data set
        training = np.concatenate([folds[x] for x in fold_indices if x!=i],
                                  axis=0)
        validation = folds[i]
        # Initialize evaluation subdirectory for current fold
        subdir = create_directories(evaluation_path, "fold_" + str(i))
        # Save model for each fold
        cb_model = ModelCheckpoint(os.path.join(subdir, "model.hdf5"),
                                   monitor="val_loss", verbose=1,
                                   save_best_only=True, mode="min")
        if save_models == True : cb_list = callbacks + [cb_model]
        else : cb_list = callbacks
        # Run training & validation
        history = model.evaluate(training, validation, epochs=epochs,
                                 iterations=iterations, callbacks=cb_list)
        # Backup current history dictionary
        if return_output : validation_results.append(history.history)
        else : backup_history(history.history, subdir)
        # Draw plots for the training & validation
        if draw_figures:
            plot_validation(history.history, model.metrics, subdir)
        # Make a detailed validation of the current cv-fold
github frankkramer-lab / MIScnn / miscnn / evaluation / leave_one_out.py View on Github external
def leave_one_out(sample_list, model, epochs=20, iterations=None, callbacks=[],
                  evaluation_path="evaluation"):
    # Choose a random sample
    loo = sample_list.pop(np.random.choice(len(sample_list)))
    # Reset Neural Network model weights
    model.reset_weights()
    # Train the model with the remaining samples
    model.train(sample_list, epochs=epochs, iterations=iterations,
                callbacks=callbacks)
    # Initialize evaluation directory
    create_directories(evaluation_path)
    # Make a detailed validation on the LOO sample
    detailed_validation([loo], model, evaluation_path)
github frankkramer-lab / MIScnn / miscnn / evaluation / split_validation.py View on Github external
# Calculate the number of samples in the validation set
    validation_size = int(math.ceil(float(len(sample_list) * percentage)))
    # Randomly pick samples until %-split percentage
    validation = []
    for i in range(validation_size):
        validation_sample = sample_list.pop(np.random.choice(len(sample_list)))
        validation.append(validation_sample)
    # Rename the remaining cases as training
    training = sample_list
    # Reset Neural Network model weights
    model.reset_weights()
    # Run training & validation
    history = model.evaluate(training, validation, epochs=epochs,
                             iterations=iterations, callbacks=callbacks)
    # Initialize evaluation directory
    create_directories(evaluation_path)
    # Draw plots for the training & validation
    if draw_figures:
        plot_validation(history.history, model.metrics, evaluation_path)
    # Make a detailed validation
    if run_detailed_evaluation:
        detailed_validation(validation, model, evaluation_path)
    # Return or backup the validation results
    if return_output : return history.history
    else : backup_history(history.history, evaluation_path)