How to use the lifelines.utils.CensoringType.is_left_censoring function in lifelines

To help you get started, we’ve selected a few lifelines examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github CamDavidsonPilon / lifelines / lifelines / fitters / __init__.py View on Github external
def _fit_model(self, Ts, E, entry, weights, show_progress=True):

        if utils.CensoringType.is_left_censoring(self):
            negative_log_likelihood = self._negative_log_likelihood_left_censoring
        elif utils.CensoringType.is_interval_censoring(self):
            negative_log_likelihood = self._negative_log_likelihood_interval_censoring
        elif utils.CensoringType.is_right_censoring(self):
            negative_log_likelihood = self._negative_log_likelihood_right_censoring

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            results = minimize(
                value_and_grad(negative_log_likelihood),  # pylint: disable=no-value-for-parameter
                self._initial_values,
                jac=True,
                method=self._scipy_fit_method,
                args=(Ts, E, entry, weights),
                bounds=self._bounds,
github CamDavidsonPilon / lifelines / lifelines / fitters / kaplan_meier_fitter.py View on Github external
if weights is not None:
            weights = np.asarray(weights)
            if (weights.astype(int) != weights).any():
                warnings.warn(
                    """It looks like your weights are not integers, possibly propensity scores then?
  It's important to know that the naive variance estimates of the coefficients are biased. Instead use Monte Carlo to
  estimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis"
  or "Adjusted Kaplan-Meier estimator and log-rank test with inverse probability of treatment weighting for survival data."
                  """,
                    StatisticalWarning,
                )
        else:
            weights = np.ones_like(durations, dtype=float)

        # if the user is interested in left-censorship, we return the cumulative_density_, no survival_function_,
        is_left_censoring = CensoringType.is_left_censoring(self)
        primary_estimate_name = "survival_function_" if not is_left_censoring else "cumulative_density_"
        secondary_estimate_name = "cumulative_density_" if not is_left_censoring else "survival_function_"

        (
            self.durations,
            self.event_observed,
            self.timeline,
            self.entry,
            self.event_table,
            self.weights,
        ) = _preprocess_inputs(durations, event_observed, timeline, entry, weights)

        alpha = alpha if alpha else self.alpha
        log_estimate, cumulative_sq_ = _additive_estimate(
            self.event_table, self.timeline, self._additive_f, self._additive_var, is_left_censoring
        )
github CamDavidsonPilon / lifelines / lifelines / fitters / generalized_gamma_regression_fitter.py View on Github external
def _create_initial_point(self, Ts, E, entries, weights, Xs):
        # detect constant columns
        constant_col = (Xs.df.var(0) < 1e-8).idxmax()

        import lifelines

        uni_model = lifelines.GeneralizedGammaFitter()

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            if utils.CensoringType.is_right_censoring(self):
                uni_model.fit_right_censoring(Ts[0], event_observed=E, entry=entries, weights=weights)
            elif utils.CensoringType.is_interval_censoring(self):
                uni_model.fit_interval_censoring(Ts[0], Ts[1], event_observed=E, entry=entries, weights=weights)
            elif utils.CensoringType.is_left_censoring(self):
                uni_model.fit_left_censoring(Ts[1], event_observed=E, entry=entries, weights=weights)

            # we may use this later in print_summary
            self._ll_null_ = uni_model.log_likelihood_

            d = {}

            d["mu_"] = np.array([0.0] * (len(Xs.mappings["mu_"])))
            if constant_col in Xs.mappings["mu_"]:
                d["mu_"][Xs.mappings["mu_"].index(constant_col)] = uni_model.mu_

            d["sigma_"] = np.array([0.0] * (len(Xs.mappings["sigma_"])))
            if constant_col in Xs.mappings["mu_"]:
                d["sigma_"][Xs.mappings["sigma_"].index(constant_col)] = uni_model.ln_sigma_

            # this needs to be non-zero because we divide by it
github CamDavidsonPilon / lifelines / lifelines / fitters / log_normal_fitter.py View on Github external
def _create_initial_point(self, Ts, E, *args):
        if CensoringType.is_right_censoring(self):
            log_T = np.log(Ts[0])
        elif CensoringType.is_left_censoring(self):
            log_T = np.log(Ts[1])
        elif CensoringType.is_interval_censoring(self):
            log_T = np.log(Ts[1])
        return np.array([np.median(log_T), 1.0])
github CamDavidsonPilon / lifelines / lifelines / fitters / generalized_gamma_fitter.py View on Github external
def _create_initial_point(self, Ts, E, *args):
        if CensoringType.is_right_censoring(self):
            log_data = log(Ts[0])
        elif CensoringType.is_left_censoring(self):
            log_data = log(Ts[1])
        elif CensoringType.is_interval_censoring(self):
            # this fails if Ts[1] == Ts[0], so we add a some fudge factors.
            log_data = log(Ts[1] - Ts[0] + 0.01)
        return np.array([log_data.mean(), log(log_data.std() + 0.01), 0.1])
github CamDavidsonPilon / lifelines / lifelines / fitters / __init__.py View on Github external
regressors = {name: ["intercept"] for name in self._fitted_parameter_names}
        df = pd.DataFrame({"entry": self.entry, "intercept": 1, "w": self.weights})
        model = self.__class__(penalizer=self.penalizer)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            if utils.CensoringType.is_right_censoring(self):
                df["T"], df["E"] = self.durations, self.event_observed
                model.fit_right_censoring(df, "T", "E", entry_col="entry", weights_col="w", regressors=regressors)
            elif utils.CensoringType.is_interval_censoring(self):
                df["lb"], df["ub"], df["E"] = self.lower_bound, self.upper_bound, self.event_observed
                model.fit_interval_censoring(
                    df, "lb", "ub", "E", entry_col="entry", weights_col="w", regressors=regressors
                )
            if utils.CensoringType.is_left_censoring(self):
                df["T"], df["E"] = self.durations, self.event_observed
                model.fit_left_censoring(df, "T", "E", entry_col="entry", weights_col="w", regressors=regressors)

        self._ll_null_ = model.log_likelihood_
        return self._ll_null_
github CamDavidsonPilon / lifelines / lifelines / fitters / __init__.py View on Github external
name = self._class_name.replace("AFT", "")
        try:
            uni_model = getattr(lifelines, name)()
        except AttributeError:
            # some custom AFT model if univariate model is not defined.
            return super(ParametericAFTRegressionFitter, self)._create_initial_point(Ts, E, entries, weights, Xs)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            if utils.CensoringType.is_right_censoring(self):
                uni_model.fit_right_censoring(Ts[0], event_observed=E, entry=entries, weights=weights)
            elif utils.CensoringType.is_interval_censoring(self):
                uni_model.fit_interval_censoring(Ts[0], Ts[1], event_observed=E, entry=entries, weights=weights)
            elif utils.CensoringType.is_left_censoring(self):
                uni_model.fit_left_censoring(Ts[1], event_observed=E, entry=entries, weights=weights)

        # we may use this later in print_summary
        self._ll_null_ = uni_model.log_likelihood_

        d = {}

        for param, mapping in Xs.mappings.items():
            d[param] = np.array([0.0] * (len(mapping)))
            if constant_col in mapping:
                d[param][mapping.index(constant_col)] = _transform_ith_param(getattr(uni_model, param))
        return d