How to use the linearmodels.utility.WaldTestStatistic function in linearmodels

To help you get started, we’ve selected a few linearmodels examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github bashtage / linearmodels / linearmodels / iv / results.py View on Github external
from linearmodels.iv import IVGMM
        mod = IVGMM(dependent, exog_e, endog_e, instruments)
        res_e = mod.fit(cov_type=self.cov_type, **self.cov_config)
        j_e = res_e.j_stat.stat

        x = self.model._x
        y = self.model._y
        z = self.model._z
        nz = z.shape[1]
        weight_mat_c = res_e.weight_matrix.values[:nz, :nz]
        params_c = mod.estimate_parameters(x, y, z, weight_mat_c)
        j_c = self.model._j_statistic(params_c, weight_mat_c).stat

        stat = j_e - j_c
        df = exog_e.shape[1] - exog.shape[1]
        return WaldTestStatistic(stat, null, df, name='C-statistic')
github bashtage / linearmodels / linearmodels / utility.py View on Github external
def pval(self):
        """Always returns np.NaN"""
        return np.NaN

    @property
    def critical_values(self):
        """Always returns None"""
        return None

    def __str__(self):
        msg = "Invalid test statistic\n{reason}\n{name}"
        name = '' if self._name is None else self._name
        return msg.format(name=name, reason=self._reason)


class InapplicableTestStatistic(WaldTestStatistic):
    """
    Class returned if a requested test is not applicable for a specification

    Parameters
    ----------
    reason : str
        Explanation why test is invalid
    name : str, optional
        Name of test

    See Also
    --------
    WaldTestStatistic
    """

    def __init__(self, *, reason=None, name=None):
github bashtage / linearmodels / linearmodels / system / model.py View on Github external
params = stats.params[sel]
        df = params.shape[0]
        nobs = stats.nobs
        null = 'All parameters ex. constant are zero'
        name = 'Equation F-statistic'
        try:
            stat = float(params.T @ inv(cov) @ params)

        except np.linalg.LinAlgError:
            return InvalidTestStatistic('Covariance is singular, possibly due '
                                        'to constraints.', name=name)

        if debiased:
            total_reg = np.sum(list(map(lambda s: s.shape[1], self._wx)))
            df_denom = len(self._wx) * nobs - total_reg
            wald = WaldTestStatistic(stat / df, null, df, df_denom=df_denom,
                                     name=name)
        else:
            return WaldTestStatistic(stat, null=null, df=df, name=name)

        return wald
github bashtage / linearmodels / linearmodels / system / model.py View on Github external
null = 'All parameters ex. constant are zero'
        name = 'Equation F-statistic'
        try:
            stat = float(params.T @ inv(cov) @ params)

        except np.linalg.LinAlgError:
            return InvalidTestStatistic('Covariance is singular, possibly due '
                                        'to constraints.', name=name)

        if debiased:
            total_reg = np.sum(list(map(lambda s: s.shape[1], self._wx)))
            df_denom = len(self._wx) * nobs - total_reg
            wald = WaldTestStatistic(stat / df, null, df, df_denom=df_denom,
                                     name=name)
        else:
            return WaldTestStatistic(stat, null=null, df=df, name=name)

        return wald
github bashtage / linearmodels / linearmodels / panel / model.py View on Github external
######################################
        if self.entity_effects or self.time_effects or self.other_effects:
            wy, wx = root_w * self.dependent.values2d, root_w * self.exog.values2d
            df_num, df_denom = (df_model - wx.shape[1]), df_resid
            if not self.has_constant:
                # Correction for when models does not have explicit constant
                wy -= root_w * lstsq(root_w, wy)[0]
                wx -= root_w * lstsq(root_w, wx)[0]
                df_num -= 1
            weps_pooled = wy - wx @ lstsq(wx, wy)[0]
            resid_ss_pooled = float(weps_pooled.T @ weps_pooled)
            num = (resid_ss_pooled - resid_ss) / df_num

            denom = resid_ss / df_denom
            stat = num / denom
            f_pooled = WaldTestStatistic(stat, 'Effects are zero',
                                         df_num, df_denom=df_denom,
                                         name='Pooled F-statistic')
            res.update(f_pooled=f_pooled)
            effects = pd.DataFrame(eps_effects - eps, columns=['estimated_effects'],
                                   index=self.dependent.index)
        else:
            effects = pd.DataFrame(np.zeros_like(eps), columns=['estimated_effects'],
                                   index=self.dependent.index)

        res.update(dict(df_resid=df_resid, df_model=df_model, nobs=y.shape[0],
                        residual_ss=resid_ss, total_ss=total_ss, wresids=weps, resids=eps,
                        r2=r2, entity_effects=self.entity_effects, time_effects=self.time_effects,
                        other_effects=self.other_effects, sigma2_eps=sigma2_eps,
                        sigma2_effects=sigma2_effects, rho=rho, r2_ex_effects=r2_ex_effects,
                        effects=effects, fitted=fitted, idiosyncratic=idiosyncratic))
github bashtage / linearmodels / linearmodels / panel / model.py View on Github external
num_df = x.shape[1]
        name = 'Model F-statistic (homoskedastic)'
        if self.has_constant:
            if num_df == 1:
                return InvalidTestStatistic('Model contains only a constant',
                                            name=name)

            num_df -= 1
            weps_const = y - float((root_w.T @ y) / (root_w.T @ root_w))

        resid_ss = weps.T @ weps
        num = float(weps_const.T @ weps_const - resid_ss)
        denom = resid_ss
        denom_df = df_resid
        stat = float((num / num_df) / (denom / denom_df))
        return WaldTestStatistic(stat, null='All parameters ex. constant not zero',
                                 df=num_df, df_denom=denom_df, name=name)
github bashtage / linearmodels / linearmodels / panel / model.py View on Github external
def deferred_f():
            test_params = params[sel]
            test_cov = cov_est.cov[sel][:, sel]
            test_stat = test_params.T @ np.linalg.inv(test_cov) @ test_params
            test_stat = float(test_stat)
            df = sel.sum()
            null = 'All parameters ex. constant not zero'

            if debiased:
                wald = WaldTestStatistic(test_stat / df, null, df, df_resid,
                                         name=name)
            else:
                wald = WaldTestStatistic(test_stat, null, df, name=name)
            return wald
github bashtage / linearmodels / linearmodels / asset_pricing / model.py View on Github external
jacobian = self._jacobian(betas, lam, alphas)

        if cov_type not in ('robust', 'heteroskedastic', 'kernel'):
            raise ValueError('Unknown weight: {0}'.format(cov_type))
        if cov_type in ('robust', 'heteroskedastic'):
            cov_est = HeteroskedasticCovariance
        else:  # 'kernel':
            cov_est = KernelCovariance
        cov_est = cov_est(moments, jacobian=jacobian, center=False,
                          debiased=debiased, df=fc.shape[1], **cov_config)

        # VCV
        full_vcv = cov_est.cov
        alpha_vcv = full_vcv[s2:, s2:]
        stat = float(alphas.T @ np.linalg.pinv(alpha_vcv) @ alphas)
        jstat = WaldTestStatistic(stat, 'All alphas are 0', nport - nf - nrf,
                                  name='J-statistic')

        total_ss = ((p - p.mean(0)[None, :]) ** 2).sum()
        residual_ss = (eps ** 2).sum()
        r2 = 1 - residual_ss / total_ss
        rp = lam
        rp_cov = full_vcv[s1:s2, s1:s2]
        betas = betas if excess_returns else betas[:, 1:]
        params = np.c_[alphas, betas]
        param_names = []
        for portfolio in self.portfolios.cols:
            param_names.append('alpha-{0}'.format(portfolio))
            for factor in self.factors.cols:
                param_names.append('beta-{0}-{1}'.format(portfolio, factor))
        if not excess_returns:
            param_names.append('lambda-risk_free')