Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def detailed_validation(model, cases, suffix, config):
# Initialize kits19 scoring file
save_evaluation(["case_id", "score_KidneyTumor", "score_Tumor"],
config["evaluation_path"],
"kits19_scoring." + suffix + ".tsv",
start=True)
# Predict the cases with the provided model
model.predict(cases)
# Iterate over each case
for id in cases:
# Load the truth segmentation
truth = load_segmentation_nii(id, config["data_path"]).get_data()
# Load the prediction segmentation
pred = load_prediction_nii(id, config["output_path"]).get_data()
# Calculate kits19 score
score_kidney, score_tumor = kits19_score(truth, pred)
# Save kits19 score to file
save_evaluation([id, score_kidney, score_tumor],
config["evaluation_path"],
save_evaluation(["case_id", "score_KidneyTumor", "score_Tumor"],
config["evaluation_path"],
"kits19_scoring." + suffix + ".tsv",
start=True)
# Predict the cases with the provided model
model.predict(cases)
# Iterate over each case
for id in cases:
# Load the truth segmentation
truth = load_segmentation_nii(id, config["data_path"]).get_data()
# Load the prediction segmentation
pred = load_prediction_nii(id, config["output_path"]).get_data()
# Calculate kits19 score
score_kidney, score_tumor = kits19_score(truth, pred)
# Save kits19 score to file
save_evaluation([id, score_kidney, score_tumor],
config["evaluation_path"],
"kits19_scoring." + suffix + ".tsv")
# Calculate class frequency per slice
if config["class_freq"]:
class_freq = calc_ClassFrequency(truth, pred)
for i in range(len(class_freq)):
print(str(id) + "\t" + str(i) + "\t" + str(class_freq[i]))
# Visualize the truth and prediction segmentation
if config["visualize"]:
# Load the volume
vol = load_volume_nii(id, config["data_path"]).get_data()
# Run visualization
visualize_evaluation(id, vol, truth, pred, config["evaluation_path"])
def on_epoch_end(self, epoch, logs={}):
data_point = [epoch, logs["loss"],
logs["dice_coefficient"], logs["dice_classwise"],
logs["categorical_accuracy"],
logs["categorical_crossentropy"],
logs["val_loss"], logs["val_dice_coefficient"],
logs["val_dice_classwise"],
logs["val_categorical_accuracy"],
logs["val_categorical_crossentropy"]]
save_evaluation(data_point, self.eval_path, "validation.tsv")
def __init__(self, eval_path):
self.eval_path = eval_path
# Create evaluation tsv file
save_evaluation(["epoch", "tversky_loss", "dice_coef",
"dice_classwise", "categorical_accuracy",
"categorical_crossentropy", "val.tversky_loss",
"val.dice_coef", "val.dice_classwise",
"val.categorical_accuracy",
"val.categorical_crossentropy"],
eval_path,
"validation.tsv",
start=True)
# Create training tsv file
save_evaluation(["epoch", "batch", "tversky_loss",
"dice_coef", "dice_classwise"],
eval_path,
"training.tsv",
start=True)
def __init__(self, eval_path):
self.eval_path = eval_path
# Create evaluation tsv file
save_evaluation(["epoch", "tversky_loss", "dice_coef",
"dice_classwise", "categorical_accuracy",
"categorical_crossentropy", "val.tversky_loss",
"val.dice_coef", "val.dice_classwise",
"val.categorical_accuracy",
"val.categorical_crossentropy"],
eval_path,
"validation.tsv",
start=True)
# Create training tsv file
save_evaluation(["epoch", "batch", "tversky_loss",
"dice_coef", "dice_classwise"],
eval_path,
"training.tsv",
start=True)
def on_batch_end(self, batch, logs={}):
data_point = [self.current_epoch, batch, logs["loss"],
logs["dice_coefficient"], logs["dice_classwise"]]
save_evaluation(data_point, self.eval_path, "training.tsv")