How to use the ngboost.scores.LogScore function in ngboost

To help you get started, we’ve selected a few ngboost examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github stanfordmlgroup / ngboost / ngboost / distns / bivariate_normal.py View on Github external
import scipy as sp
import numpy as np
from scipy.stats import norm as dist
from ngboost.distns import RegressionDistn
from ngboost.scores import LogScore, CRPScore

eps = 1e-6


class BivariateNormalCensoredScore(LogScore):

    def score(self, Y):
        E, T = Y['Event'], Y['Time']
        nll = - self.marginal_dist(Y).logpdf(T) - self.conditional_dist(Y).logsf(T) + self.censoring_prob(Y)
        return nll

    def d_score(self, Y):
        D = - self.D_conditional_logsf(Y) - self.D_marginal_logpdf(Y) + self.D_censoring_prob(Y)
        return D

    def metric(self):
        J = np.zeros((2,2,3,self.N))
        J[0,0,0] = 2*self.var0
        J[0,1,0] = J[1,0,0] = self.cor
        J[1,1,1] = 2*self.var1
        J[0,1,1] = J[1,0,1] = self.cor
github stanfordmlgroup / ngboost / ngboost / distns / normal.py View on Github external
return np.array([self.rvs() for i in range(m)])

    def __getattr__(
        self, name
    ):  # gives us Normal.mean() required for RegressionDist.predict()
        if name in dir(self.dist):
            return getattr(self.dist, name)
        return None

    @property
    def params(self):
        return {"loc": self.loc, "scale": self.scale}


### Fixed Variance Normal ###
class NormalFixedVarLogScore(LogScore):
    def score(self, Y):
        return -self.dist.logpdf(Y)

    def d_score(self, Y):
        D = np.zeros((len(Y), 1))
        D[:, 0] = (self.loc - Y) / self.var
        return D

    def metric(self):
        FI = np.zeros((self.var.shape[0], 1, 1))
        FI[:, 0, 0] = 1 / self.var + 1e-5
        return FI


class NormalFixedVarCRPScore(CRPScore):
    def score(self, Y):
github stanfordmlgroup / ngboost / ngboost / api.py View on Github external
def __init__(
        self,
        Dist=Bernoulli,
        Score=LogScore,
        Base=default_tree_learner,
        natural_gradient=True,
        n_estimators=500,
        learning_rate=0.01,
        minibatch_frac=1.0,
        col_sample=1.0,
        verbose=True,
        verbose_eval=100,
        tol=1e-4,
        random_state=None,
    ):
        assert issubclass(
            Dist, ClassificationDistn
        ), f"{Dist.__name__} is not useable for classification."
        super().__init__(
            Dist,
github stanfordmlgroup / ngboost / ngboost / distns / normal.py View on Github external
from ngboost.distns import RegressionDistn
from ngboost.scores import LogScore, CRPScore
import scipy as sp
import numpy as np
from scipy.stats import norm as dist


class NormalLogScore(LogScore):
    def score(self, Y):
        return -self.dist.logpdf(Y)

    def d_score(self, Y):
        D = np.zeros((len(Y), 2))
        D[:, 0] = (self.loc - Y) / self.var
        D[:, 1] = 1 - ((self.loc - Y) ** 2) / self.var
        return D

    def metric(self):
        FI = np.zeros((self.var.shape[0], 2, 2))
        FI[:, 0, 0] = 1 / self.var + 1e-5
        FI[:, 1, 1] = 2
        return FI
github stanfordmlgroup / ngboost / ngboost / distns / lognormal.py View on Github external
import scipy as sp
import numpy as np
from scipy.stats import lognorm as dist
from ngboost.distns import RegressionDistn
from ngboost.scores import LogScore, CRPScore


class LogNormalLogScoreCensored(LogScore):
    def score(self, Y):
        E = Y["Event"]
        T = Y["Time"]
        cens = (1 - E) * np.log(1 - self.dist.cdf(T) + self.eps)
        uncens = E * self.dist.logpdf(T)
        return -(cens + uncens)

    def d_score(self, Y):
        E = Y["Event"][:, np.newaxis]
        T = Y["Time"]
        lT = np.log(T)
        Z = (lT - self.loc) / self.scale

        D_uncens = np.zeros((self.loc.shape[0], 2))
        D_uncens[:, 0] = (self.loc - lT) / (self.scale ** 2)
        D_uncens[:, 1] = 1 - ((self.loc - lT) ** 2) / (self.scale ** 2)
github stanfordmlgroup / ngboost / ngboost / distns / exponential.py View on Github external
import scipy as sp
import numpy as np
from scipy.stats import expon as dist
from ngboost.distns import RegressionDistn
from ngboost.scores import LogScore, CRPScore

eps = 1e-10


class ExponentialLogScore(LogScore):
    def score(self, Y):
        E, T = Y["Event"], Y["Time"]
        cens = (1 - E) * np.log(1 - self.dist.cdf(T) + eps)
        uncens = E * self.dist.logpdf(T)
        return -(cens + uncens)

    def d_score(self, Y):
        E, T = Y["Event"], Y["Time"]
        cens = (1 - E) * T.squeeze() / self.scale
        uncens = E * (-1 + T.squeeze() / self.scale)
        return -(cens + uncens).reshape((-1, 1))

    def metric(self):
        FI = np.ones_like(self.scale)
        return FI[:, np.newaxis, np.newaxis]
github stanfordmlgroup / ngboost / ngboost / distns / bivariate_lognormal.py View on Github external
import scipy as sp
import numpy as np
from scipy.stats import norm as dist
from ngboost.distns import RegressionDistn
from ngboost.scores import LogScore, CRPScore

eps = 1e-6


class BivariateLogNormalCensoredScore(LogScore):

    def score(self, Y):
        E, T = Y['Event'], np.log(Y['Time'])
        nll = - self.marginal_dist(Y).logpdf(T) - self.conditional_dist(Y).logsf(T) + self.censoring_prob(Y)
        return nll

    def d_score(self, Y):
        D = - self.D_conditional_logsf(Y) - self.D_marginal_logpdf(Y) + self.D_censoring_prob(Y)
        return D

    def metric(self):
        J = np.zeros((2,2,3,self.N))
        J[0,0,0] = 2*self.var0
        J[0,1,0] = J[1,0,0] = self.cor
        J[1,1,1] = 2*self.var1
        J[0,1,1] = J[1,0,1] = self.cor
github stanfordmlgroup / ngboost / ngboost / distns / categorical.py View on Github external
from ngboost.distns import ClassificationDistn
from ngboost.scores import LogScore, CRPScore
import numpy as np
import scipy as sp
import scipy.special


class CategoricalLogScore(LogScore):
    def score(self, Y):
        return -np.log(self.probs[Y, range(len(Y))])

    def d_score(self, Y):
        return (self.probs.T - np.eye(self.K_)[Y])[:, 1 : self.K_]

    def metric(self):
        FI = -np.einsum(
            "ji,ki->ijk", self.probs[1 : self.K_, :], self.probs[1 : self.K_, :]
        )
        d = np.einsum("jii->ij", FI)
        d[:] += self.probs[1 : self.K_, :]
        return FI

    # a test:
    # if k==j: