How to use the lifelines.utils.concordance_index function in lifelines

To help you get started, we’ve selected a few lifelines examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github CamDavidsonPilon / lifelines / tests / test_estimation.py View on Github external
def test_cox_ph_prediction_monotonicity(self, data_pred2):
        # Concordance wise, all prediction methods should be monotonic versions
        # of one-another, unless numerical factors screw it up.
        t = data_pred2["t"]
        e = data_pred2["E"]
        X = data_pred2[["x1", "x2"]]

        cf = CoxPHFitter()
        cf.fit(data_pred2, duration_col="t", event_col="E")

        # Base comparison is partial_hazards
        ci_ph = concordance_index(t, -cf.predict_partial_hazard(X).values, e)

        ci_med = concordance_index(t, cf.predict_median(X).squeeze(), e)
        # pretty close.
        assert abs(ci_ph - ci_med) < 0.001

        ci_exp = concordance_index(t, cf.predict_expectation(X).squeeze(), e)
        assert ci_ph == ci_exp
github CamDavidsonPilon / lifelines / tests / utils / test_utils.py View on Github external
def test_concordance_index():
    size = 1000
    T = np.random.normal(size=size)
    P = np.random.normal(size=size)
    C = np.random.choice([0, 1], size=size)
    Z = np.zeros_like(T)

    # Zeros is exactly random
    assert utils.concordance_index(T, Z) == 0.5
    assert utils.concordance_index(T, Z, C) == 0.5

    # Itself is 1
    assert utils.concordance_index(T, T) == 1.0
    assert utils.concordance_index(T, T, C) == 1.0

    # Random is close to 0.5
    assert abs(utils.concordance_index(T, P) - 0.5) < 0.05
    assert abs(utils.concordance_index(T, P, C) - 0.5) < 0.05
github MStarmans91 / WORC / WORC / plotting / plot_SVM.py View on Github external
r2score.append(r2score_temp)
                MSE.append(MSE_temp)
                coefICC.append(coefICC_temp)
                PearsonC.append(PearsonC_temp)
                PearsonP.append(PearsonP_temp)
                SpearmanC.append(SpearmanC_temp)
                SpearmanP.append(SpearmanP_temp)

        # TODO: override with new survival code
        if survival:
            # Extract time to event and event from label data
            E_truth = np.asarray([labels[1][k][0] for k in test_indices])
            T_truth = np.asarray([labels[2][k][0] for k in test_indices])

            # Concordance index
            cindex.append(1 - ll.utils.concordance_index(T_truth, y_prediction, E_truth))

            # Fit Cox model using SVR output, time to event and event
            data = {'predict': y_prediction, 'E': E_truth, 'T': T_truth}
            data = pd.DataFrame(data=data, index=test_patient_IDs)

            cph = ll.CoxPHFitter()
            cph.fit(data, duration_col='T', event_col='E')

            coxcoef.append(cph.summary['coef']['predict'])
            coxp.append(cph.summary['p']['predict'])

    if output in ['scores', 'decision']:
        # Return the scores and true values of all patients
        return y_truths, y_scores, y_predictions, pids
    elif output == 'stats':
        # Compute statistics
github paidamoyo / adversarial_time_to_event / model / date_ae.py View on Github external
self.risk_set: risk_batch, self.batch_size_tensor: batch_size, self.is_training: True,
                               self.noise_alpha: np.ones(shape=self.noise_dim)}
            for k in range(self.disc_updates):
                _ = self.session.run([self.disc_solver], feed_dict=feed_dict_train)

            for m in range(self.gen_updates):
                _ = self.session.run([self.gen_solver], feed_dict=feed_dict_train)

            summary, train_time, train_cost, train_ranking, train_rae, train_reg, train_gen, train_layer_one_recon, \
            train_t_reg, train_t_mse, train_disc = self.session.run(
                [self.merged, self.predicted_time, self.cost, self.ranking_partial_lik, self.total_rae,
                 self.reg_loss, self.gen_one_loss, self.layer_one_recon, self.t_regularization_loss, self.t_mse,
                 self.disc_one_loss],
                feed_dict=feed_dict_train)
            try:
                train_ci = concordance_index(event_times=t_batch,
                                             predicted_event_times=train_time.reshape(t_batch.shape),
                                             event_observed=e_batch)
            except IndexError:
                train_ci = 0.0
                print("C-Index IndexError")

            tf.verify_tensor_all_finite(train_cost, "Training Cost has Nan or Infinite")
            if j >= self.num_examples:
                epochs += 1
                is_epoch = True
                # idx = 0
                j = 0
            else:
                # idx = j
                j += self.batch_size
                is_epoch = False
github paidamoyo / adversarial_time_to_event / model / deep_regularized_aft.py View on Github external
predicted_time[i:j], cost, ranking, lik, rae, reg, log_var[i:j], recon = self.session.run(
                [self.predicted_time, self.cost, self.ranking_partial_lik, self.neg_log_lik, self.total_rae,
                 self.reg_loss,
                 self.t_log_var, self.total_t_recon_loss],
                feed_dict=feed_dict)

            total_ranking += ranking
            total_cost += cost
            total_rae += rae
            total_log_lik += lik
            total_reg += reg
            total_recon += recon
            i = j

        predicted_event_times = predicted_time.reshape(input_size)
        ci_index = concordance_index(event_times=t, predicted_event_times=predicted_event_times.tolist(),
                                     event_observed=e)

        def batch_average(total):
            return total / num_batches

        return ci_index, batch_average(total_cost), batch_average(total_rae), batch_average(
            total_ranking), batch_average(
            total_log_lik), batch_average(total_reg), log_var, batch_average(total_recon)
github paidamoyo / adversarial_time_to_event / model / date_ae.py View on Github external
# print("temp_pred_time:{}".format(temp_pred_time.shape))
            predicted_time[i:j] = np.median(temp_pred_time, axis=0)

            total_ranking += ranking
            total_cost += cost
            total_rae += rae
            total_gen_loss += gen_loss
            total_reg += reg
            total_layer_one_recon += layer_one_recon
            total_disc_loss += disc_loss
            total_t_reg_loss += t_reg_loss
            total_mse += t_mse
            i = j

        predicted_event_times = predicted_time.reshape(input_size)
        ci_index = concordance_index(event_times=t, predicted_event_times=predicted_event_times.tolist(),
                                     event_observed=e)

        def batch_average(total):
            return total / num_batches

        return ci_index, batch_average(total_cost), batch_average(total_rae), batch_average(
            total_ranking), batch_average(
            total_gen_loss), batch_average(total_reg), batch_average(total_disc_loss), batch_average(
            total_layer_one_recon), batch_average(total_t_reg_loss), batch_average(total_mse)
github mahmoodlab / PathomicFusion / utils.py View on Github external
def CIndex_lifeline(hazards, labels, survtime_all):
    return(concordance_index(survtime_all, -hazards, labels))
github jaredleekatzman / DeepSurv / deepsurv / deep_surv.py View on Github external
0.0 is perfect anti-concordance (multiply predictions with -1 to get 1.0)

        Score is usually 0.6-0.7 for survival models.

        See:
        Harrell FE, Lee KL, Mark DB. Multivariable prognostic models: issues in
        developing models, evaluating assumptions and adequacy, and measuring and
        reducing errors. Statistics in Medicine 1996;15(4):361-87.
        """
        compute_hazards = theano.function(
            inputs = [self.X],
            outputs = -self.partial_hazard
        )
        partial_hazards = compute_hazards(x)

        return concordance_index(t,
            partial_hazards,
            e)