How to use pymer4 - 10 common examples

To help you get started, we’ve selected a few pymer4 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ejolly / pymer4 / pymer4 / test_install.py View on Github external
def test_install():
    """
    Quick function to test installation by import a lmm object and fitting a quick model.
    """
    try:
        from pymer4.models import Lmer
        from pymer4.utils import get_resource_path
        import os
        import pandas as pd
        import warnings

        warnings.filterwarnings("ignore")
        df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv"))
        model = Lmer("DV ~ IV3 + (1|Group)", data=df)
        model.fit(summarize=False)
        print("Pymer4 installation working successfully!")
    except Exception as e:
        print("Error! {}".format(e))
github ejolly / pymer4 / pymer4 / test_install.py View on Github external
def test_install():
    """
    Quick function to test installation by import a lmm object and fitting a quick model.
    """
    try:
        from pymer4.models import Lmer
        from pymer4.utils import get_resource_path
        import os
        import pandas as pd
        import warnings

        warnings.filterwarnings("ignore")
        df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv"))
        model = Lmer("DV ~ IV3 + (1|Group)", data=df)
        model.fit(summarize=False)
        print("Pymer4 installation working successfully!")
    except Exception as e:
        print("Error! {}".format(e))
github ejolly / pymer4 / pymer4 / stats.py View on Github external
def _perm_test(x, y, stat, equal_var, random_state):
    """For use in parallel perm_test"""
    random_state = _check_random_state(random_state)
    if stat in ["pearsonr", "spearmanr"]:
        y = random_state.permutation(y)
    elif stat in ["tstat", "cohensd", "mean"]:
        if y is None:
            x = x * random_state.choice([1, -1], len(x))
        elif isinstance(y, (float, int)):
            x -= y
            x = x * random_state.choice([1, -1], len(x))
        else:
            shuffled_combined = random_state.permutation(np.hstack([x, y]))
            x, y = shuffled_combined[: x.size], shuffled_combined[x.size :]
    elif (stat == "tstat-paired") or (y is None):
        x = x * random_state.choice([1, -1], len(x))

    return perm_test(x, y, stat, equal_var=equal_var, n_perm=0)
github ejolly / pymer4 / docs / _build / html / _downloads / basic_usage.py View on Github external
import pandas as pd
import seaborn as sns
from pymer4.models import Lmer
from pymer4.utils import get_resource_path

df = pd.read_csv(os.path.join(get_resource_path(),'sample_data.csv'))
df.head()
print("Hello World!")

#######################################################################
# Estimate a model
# ----------------
#
# Initialize linear model with random intercepts, slopes and their correlation

model = Lmer('DV ~ IV2 + (IV2|Subject)',data=df)

#########################################################################
# Fit it

model.fit()

#######################################################################
# Inspect clusters
# --------------------------
#
# We can look at the 'Subject' level parameters easily
# Each row here is a unique Subject's random intercept and slope

model.fixef.head()

# We can also plot these values with respect to the population parameters
github ejolly / pymer4 / docs / _build / html / _downloads / continuous_models.py View on Github external
import os
import pandas as pd
import seaborn as sns
from pymer4.models import Lmer
from pymer4.utils import get_resource_path

df = pd.read_csv(os.path.join(get_resource_path(),'sample_data.csv'))
df.head()

#######################################################################
# Estimate a model
# ----------------
#
# Initialize linear model with random intercepts, slopes and their correlation

model = Lmer('DV ~ IV2 + (IV2|Subject)',data=df)

#########################################################################
# Fit it

model.fit()

#######################################################################
# Inspect clusters
# --------------------------
#
# We can look at the 'Subject' level parameters easily
# Each row here is a unique Subject's random intercept and slope

model.fixef.head()

# We can also plot these values with respect to the population parameters
github ejolly / pymer4 / pymer4 / io.py View on Github external
).astype(model_atts['data_atts'][dtype_name])
                    # Check if the list already exists if so just append to it
                    if hasattr(model, item_name):
                        current_items = getattr(model, item_name)
                        if current_items is not None:
                            current_items += [df]
                            setattr(model, item_name, current_items)
                        else:
                            setattr(model, item_name, [df])
                    # Otherwise create it
                    else:
                        setattr(model, item_name, [df])
                    # Add to the list of completed items
                    completed.extend([item_name, vals_name, idx_name, dtype_name])
        # Now deal with model object in R if needed
        if isinstance(model, Lmer):
            filename = filepath.split('.')[0]
            model.model_obj = base.readRDS(f"{filename}.rds")
        return model
    else:
        raise IOError("filepath must end with .h5 or .hdf5")
github ejolly / pymer4 / pymer4 / models / Lm2.py View on Github external
# sign-flip permutation test for each beta instead to replace p-values
                if perm_on == 'coef':
                    return_stat = 'mean'
                else:
                    return_stat = 't-stat'
                seeds = np.random.randint(np.iinfo(np.int32).max, size=permute)
                par_for = Parallel(n_jobs=n_jobs, backend="multiprocessing")
                perm_est = par_for(
                    delayed(_permute_sign)(
                        data=betas[:, i], seed=seeds[j], return_stat=return_stat
                    )
                    for j in range(permute)
                )
                perm_est = np.array(perm_est)
                if perm_on == "coef":
                    perm_ps.append(_perm_find(perm_est, betas[:, i].mean()))
                else:
                    perm_ps.append(_perm_find(perm_est, lm.coefs["T-stat"].values))

        results = pd.concat(results, axis=0)
        ivs = self.formula.split("~")[-1].strip().split("+")
        ivs = [e.strip() for e in ivs]
        if to_corrs:
            intercept_pd = dict()
            for c in results.columns:
                intercept_pd[c] = np.nan
            intercept_pd = pd.DataFrame(intercept_pd, index=[0])
            results = pd.concat([intercept_pd, results], ignore_index=True)
        results.index = x.columns
        self.coefs = results
        if to_corrs:
            self.fixef = pd.DataFrame(betas, columns=ivs)
github ejolly / pymer4 / pymer4 / models / Lm2.py View on Github external
return_stat = 'mean'
                else:
                    return_stat = 't-stat'
                seeds = np.random.randint(np.iinfo(np.int32).max, size=permute)
                par_for = Parallel(n_jobs=n_jobs, backend="multiprocessing")
                perm_est = par_for(
                    delayed(_permute_sign)(
                        data=betas[:, i], seed=seeds[j], return_stat=return_stat
                    )
                    for j in range(permute)
                )
                perm_est = np.array(perm_est)
                if perm_on == "coef":
                    perm_ps.append(_perm_find(perm_est, betas[:, i].mean()))
                else:
                    perm_ps.append(_perm_find(perm_est, lm.coefs["T-stat"].values))

        results = pd.concat(results, axis=0)
        ivs = self.formula.split("~")[-1].strip().split("+")
        ivs = [e.strip() for e in ivs]
        if to_corrs:
            intercept_pd = dict()
            for c in results.columns:
                intercept_pd[c] = np.nan
            intercept_pd = pd.DataFrame(intercept_pd, index=[0])
            results = pd.concat([intercept_pd, results], ignore_index=True)
        results.index = x.columns
        self.coefs = results
        if to_corrs:
            self.fixef = pd.DataFrame(betas, columns=ivs)
        else:
            self.fixef = pd.DataFrame(betas, columns=x.columns)
github ejolly / pymer4 / docs / _build / html / _downloads / basic_usage.py View on Github external
This tutorial illustrates how to estimate a simple model with one continuous predictor. We're going to fit a model with random intercepts, slopes and their correlations. 95% confidence intevals will be estimated using the Wald-method assuming a quadratic likelihood surface.

"""
##########################################
# Import module and check out data
# --------------------------------
#

import os
import pandas as pd
import seaborn as sns
from pymer4.models import Lmer
from pymer4.utils import get_resource_path

df = pd.read_csv(os.path.join(get_resource_path(),'sample_data.csv'))
df.head()
print("Hello World!")

#######################################################################
# Estimate a model
# ----------------
#
# Initialize linear model with random intercepts, slopes and their correlation

model = Lmer('DV ~ IV2 + (IV2|Subject)',data=df)

#########################################################################
# Fit it

model.fit()
github nimh-mbdu / sklearn-lmer / examples / plot_lmerregressor.py View on Github external
#%%
# Imports
# -------

import numpy as np
from matplotlib import pyplot as plt
from sklmer import LmerRegressor
import pandas as pd
import os
from pymer4.utils import get_resource_path

#%%
# Load and prepare data
# ---------------------
# split out training and test data
df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv"))
df = df.reset_index().rename(columns={'index':'orig_index'})
test = df.groupby('Group').apply(lambda x: x.sample(frac=0.2)).reset_index(drop=True)
train = df.loc[~df.orig_index.isin(test.orig_index), :]

#%%
# Fit and predict with some different estimator options
# -----------------------------------------------------
# First with defaults.

df_estimator = LmerRegressor("DV ~ IV2 + (IV2|Group)", X_cols=df.columns)
df_estimator.fit(data=train)
df_preds = df_estimator.predict(test)

#%%
# Then use random effects information in the prediction.