How to use the pymer4.utils._perm_find function in pymer4

To help you get started, we’ve selected a few pymer4 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ejolly / pymer4 / pymer4 / models / Lm2.py View on Github external
# sign-flip permutation test for each beta instead to replace p-values
                if perm_on == 'coef':
                    return_stat = 'mean'
                else:
                    return_stat = 't-stat'
                seeds = np.random.randint(np.iinfo(np.int32).max, size=permute)
                par_for = Parallel(n_jobs=n_jobs, backend="multiprocessing")
                perm_est = par_for(
                    delayed(_permute_sign)(
                        data=betas[:, i], seed=seeds[j], return_stat=return_stat
                    )
                    for j in range(permute)
                )
                perm_est = np.array(perm_est)
                if perm_on == "coef":
                    perm_ps.append(_perm_find(perm_est, betas[:, i].mean()))
                else:
                    perm_ps.append(_perm_find(perm_est, lm.coefs["T-stat"].values))

        results = pd.concat(results, axis=0)
        ivs = self.formula.split("~")[-1].strip().split("+")
        ivs = [e.strip() for e in ivs]
        if to_corrs:
            intercept_pd = dict()
            for c in results.columns:
                intercept_pd[c] = np.nan
            intercept_pd = pd.DataFrame(intercept_pd, index=[0])
            results = pd.concat([intercept_pd, results], ignore_index=True)
        results.index = x.columns
        self.coefs = results
        if to_corrs:
            self.fixef = pd.DataFrame(betas, columns=ivs)
github ejolly / pymer4 / pymer4 / models / Lm2.py View on Github external
return_stat = 'mean'
                else:
                    return_stat = 't-stat'
                seeds = np.random.randint(np.iinfo(np.int32).max, size=permute)
                par_for = Parallel(n_jobs=n_jobs, backend="multiprocessing")
                perm_est = par_for(
                    delayed(_permute_sign)(
                        data=betas[:, i], seed=seeds[j], return_stat=return_stat
                    )
                    for j in range(permute)
                )
                perm_est = np.array(perm_est)
                if perm_on == "coef":
                    perm_ps.append(_perm_find(perm_est, betas[:, i].mean()))
                else:
                    perm_ps.append(_perm_find(perm_est, lm.coefs["T-stat"].values))

        results = pd.concat(results, axis=0)
        ivs = self.formula.split("~")[-1].strip().split("+")
        ivs = [e.strip() for e in ivs]
        if to_corrs:
            intercept_pd = dict()
            for c in results.columns:
                intercept_pd[c] = np.nan
            intercept_pd = pd.DataFrame(intercept_pd, index=[0])
            results = pd.concat([intercept_pd, results], ignore_index=True)
        results.index = x.columns
        self.coefs = results
        if to_corrs:
            self.fixef = pd.DataFrame(betas, columns=ivs)
        else:
            self.fixef = pd.DataFrame(betas, columns=x.columns)
github ejolly / pymer4 / pymer4 / models / Lmer.py View on Github external
for i in range(permute):
                    perm_dat[dv_var] = perm_dat.groupby(grp_vars)[dv_var].transform(
                        lambda x: x.sample(frac=1)
                    )
                    if self.family == "gaussian":
                        perm_obj = lmer.lmer(self.formula, data=perm_dat, REML=REML)
                    else:
                        perm_obj = lmer.glmer(
                            self.formula, data=perm_dat, family=_fam, REML=REML
                        )
                    perms.append(_return_t(perm_obj))
                perms = np.array(perms)
                pvals = []
                for c in range(df.shape[0]):
                    if self.family in ["gaussian", "gamma", "inverse_gaussian"]:
                        pvals.append(_perm_find(perms[:, c], df["T-stat"][c]))
                    else:
                        pvals.append(_perm_find(perms[:, c], df["Z-stat"][c]))
                df["P-val"] = pvals
                if "DF" in df.columns:
                    df["DF"] = [permute] * df.shape[0]
                    df = df.rename(columns={"DF": "Num_perm", "P-val": "Perm-P-val"})
                else:
                    df["Num_perm"] = [permute] * df.shape[0]
                    df = df.rename(columns={"P-val": "Perm-P-val"})

            if "P-val" in df.columns:
                df = df.assign(Sig=df["P-val"].apply(lambda x: _sig_stars(x)))
            elif "Perm-P-val" in df.columns:
                df = df.assign(Sig=df["Perm-P-val"].apply(lambda x: _sig_stars(x)))

            if (conf_int == "boot") and (permute is None):
github ejolly / pymer4 / pymer4 / models / Lm.py View on Github external
delayed(_chunk_perm_ols)(
                    x=x,
                    y=y,
                    robust=robust,
                    n_lags=n_lags,
                    cluster=cluster,
                    weights=weights,
                    seed=seeds[i],
                )
                for i in range(permute)
            )
            perm_ts = np.array(perm_ts)

            p = []
            for col, fit_t in zip(range(perm_ts.shape[1]), t):
                p.append(_perm_find(perm_ts[:, col], fit_t))
            p = np.array(p)
            df = np.array([permute] * len(p))
            sig = np.array([_sig_stars(elem) for elem in p])

        # Make output df
        results = np.column_stack([b, ci_l, ci_u, se, df, t, p, sig])
        results = pd.DataFrame(results)
        results.index = x.columns
        results.columns = [
            "Estimate",
            "2.5_ci",
            "97.5_ci",
            "SE",
            "DF",
            "T-stat",
            "P-val",
github ejolly / pymer4 / pymer4 / models / Lmer.py View on Github external
lambda x: x.sample(frac=1)
                    )
                    if self.family == "gaussian":
                        perm_obj = lmer.lmer(self.formula, data=perm_dat, REML=REML)
                    else:
                        perm_obj = lmer.glmer(
                            self.formula, data=perm_dat, family=_fam, REML=REML
                        )
                    perms.append(_return_t(perm_obj))
                perms = np.array(perms)
                pvals = []
                for c in range(df.shape[0]):
                    if self.family in ["gaussian", "gamma", "inverse_gaussian"]:
                        pvals.append(_perm_find(perms[:, c], df["T-stat"][c]))
                    else:
                        pvals.append(_perm_find(perms[:, c], df["Z-stat"][c]))
                df["P-val"] = pvals
                if "DF" in df.columns:
                    df["DF"] = [permute] * df.shape[0]
                    df = df.rename(columns={"DF": "Num_perm", "P-val": "Perm-P-val"})
                else:
                    df["Num_perm"] = [permute] * df.shape[0]
                    df = df.rename(columns={"P-val": "Perm-P-val"})

            if "P-val" in df.columns:
                df = df.assign(Sig=df["P-val"].apply(lambda x: _sig_stars(x)))
            elif "Perm-P-val" in df.columns:
                df = df.assign(Sig=df["Perm-P-val"].apply(lambda x: _sig_stars(x)))

            if (conf_int == "boot") and (permute is None):
                # We're computing parametrically bootstrapped ci's so it doesn't make sense to use approximation for p-values. Instead remove those from the output and make significant inferences based on whether the bootstrapped ci's cross 0.
                df = df.drop(columns=["P-val", "Sig"])