How to use the miscnn.neural_network.NeuralNetwork function in miscnn

To help you get started, we’ve selected a few miscnn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github frankkramer-lab / MIScnn / miscnn / evaluation / evaluation.py View on Github external
def cross_validation(cases, config):
    # Randomly permute the case list
    cases_permuted = np.random.permutation(cases)
    # Split case list into folds
    folds = np.array_split(cases_permuted, config["n_folds"])
    fold_indices = list(range(len(folds)))
    # Cache original evaluation path
    eval_path = config["evaluation_path"]
    # Start cross-validation
    for i in fold_indices:
        # Create a Convolutional Neural Network model
        model = MIScnn_NN.NeuralNetwork(config)
        # Subset training and validation data set
        training = np.concatenate([folds[x] for x in fold_indices if x!=i],
                                  axis=0)
        validation = folds[i]
        # Initialize evaluation subdirectory for current fold
        config["evaluation_path"] = update_evalpath("fold_" + str(i), eval_path)
        # Run training & validation
        history = model.evaluate(training, validation)
        # Draw plots for the training & validation
        visualize_training(history, "fold_" + str(i), config["evaluation_path"])
        # Save model to file
        model.dump("fold_" + str(i))
        # Make a detailed validation of the current cv-fold
        detailed_validation(model, validation, "fold_" + str(i), config)
github frankkramer-lab / MIScnn / miscnn / evaluation / evaluation.py View on Github external
def leave_one_out(cases, config):
    # Start leave-one-out cycling
    for i in range(config["n_loo"]):
        # Create a Convolutional Neural Network model
        model = MIScnn_NN.NeuralNetwork(config)
        # Choose a random sample
        loo = cases.pop(np.random.choice(len(cases)))
        # Train the model with the remaining cases
        model.train(cases)
        # Make a detailed validation on the LOO sample
        detailed_validation(model, [loo], str(loo), config)
github frankkramer-lab / MIScnn / miscnn / evaluation / evaluation.py View on Github external
def split_validation(cases, config):
    # Calculate the number of samples in the testing set
    test_size = int(math.ceil(float(len(cases) * config["per_split"])))
    # Randomly pick samples until %-split percentage
    testing = []
    for i in range(test_size):
        test_sample = cases.pop(np.random.choice(len(cases)))
        testing.append(test_sample)
    # Rename the remaining cases as training
    training = cases
    # Create a Convolutional Neural Network model
    model = MIScnn_NN.NeuralNetwork(config)
    # Run training & validation
    history = model.evaluate(training, testing)
    # Draw plots for the training & validation
    visualize_training(history, "split_validation", config["evaluation_path"])
    # Make a detailed validation of the current cv-fold
    detailed_validation(model, testing, "complete", config)