How to use the thinc.describe.attributes function in thinc

To help you get started, weโ€™ve selected a few thinc examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github explosion / thinc / thinc / neural / _classes / affine.py View on Github external
from ... import check
from ...check import has_shape


def _set_dimensions_if_needed(model, X, y=None):
    if model.nI is None:
        model.nI = X.shape[1]
    if model.nO is None and y is not None:
        if len(y.shape) == 2:
            model.nO = y.shape[1]
        else:
            model.nO = int(y.max()) + 1


@describe.on_data(_set_dimensions_if_needed)
@describe.attributes(
    nB=Dimension("Batch size"),
    nI=Dimension("Input size"),
    nO=Dimension("Output size"),
    W=Synapses(
        "Weights matrix",
        lambda obj: (obj.nO, obj.nI),
        lambda W, ops: ops.xavier_uniform_init(W),
    ),
    b=Biases("Bias vector", lambda obj: (obj.nO,)),
    d_W=Gradient("W"),
    d_b=Gradient("b"),
)
class Affine(Model):
    """Computes the linear transform Y = (W @ X) + b."""

    name = "affine"
github explosion / thinc / thinc / neural / _classes / embed.py View on Github external
    @contextlib.contextmanager
    def use_params(self, params):
        backup = None
        weights = self._mem.weights
        if self.id in params:
            param = params[self.id]
            backup = weights.copy()
            weights[:] = param
        yield
        if backup is not None:
            weights[:] = backup


@describe.on_data(LSUVinit)
@describe.attributes(
    nM=Dimension("Vector dimensions"),
    nV=Dimension("Number of vectors"),
    nO=Dimension("Size of output"),
    W=Synapses(
        "A projection matrix, to change vector dimensionality",
        lambda obj: (obj.nO, obj.nM),
        lambda W, ops: ops.xavier_uniform_init(W),
    ),
    vectors=Weights(
        "Embedding table", lambda obj: (obj.nV, obj.nM), _uniform_init(-0.1, 0.1)
    ),
    d_W=Gradient("W"),
    d_vectors=Gradient("vectors"),
)
class Embed(Model):
    name = "embed"
github explosion / thinc / thinc / neural / _classes / difference.py View on Github external
model._layers.append(similarity)

    def on_data(self, X, y):
        input1, input2 = zip(*X)
        for hook in layer.on_data_hooks:
            hook(layer, input1, y)

    model.on_data_hooks.append(on_data)
    return model


def unit_init(W, ops):
    W.fill(1)


@describe.attributes(
    nO=Dimension("Output size"),
    W=Synapses("Weights matrix", lambda obj: (obj.nO,), unit_init),
    d_W=Gradient("W"),
)
class CauchySimilarity(Model):
    # From chen (2013)
    def __init__(self, length):
        Model.__init__(self)
        self.nO = length

    def begin_update(self, vec1_vec2, drop=0.0):
        weights = self.W
        vec1, vec2 = vec1_vec2
        diff = vec1 - vec2
        square_diff = diff ** 2
        total = (weights * square_diff).sum(axis=1)
github explosion / thinc / thinc / neural / _classes / attention.py View on Github external
# coding: utf8
from __future__ import unicode_literals

from ... import describe
from ...describe import Dimension, Synapses, Gradient
from .model import Model


@describe.attributes(
    nO=Dimension("Output size"),
    Q=Synapses(
        "Learned 'query' vector",
        lambda obj: (obj.nO, 1),
        lambda Q, ops: ops.normal_init(Q, Q.shape[0]),
    ),
    dQ=Gradient("Q"),
)
class ParametricAttention(Model):
    """Weight inputs by similarity to a learned vector"""

    name = "para-attn"

    def __init__(self, nO=None, hard=False, **kwargs):
        Model.__init__(self, **kwargs)
        self.nO = nO
github explosion / thinc / thinc / neural / _classes / mish.py View on Github external
from .model import Model
from ...describe import Dimension, Synapses, Biases, Gradient


def _set_dimensions_if_needed(model, X, y=None):
    if model.nI is None:
        model.nI = X.shape[1]
    if model.nO is None and y is not None:
        if len(y.shape) == 2:
            model.nO = y.shape[1]
        else:
            model.nO = int(y.max()) + 1


@describe.on_data(_set_dimensions_if_needed)
@describe.attributes(
    nB=Dimension("Batch size"),
    nI=Dimension("Input size"),
    nO=Dimension("Output size"),
    W=Synapses(
        "Weights matrix",
        lambda obj: (obj.nO, obj.nI),
        lambda W, ops: ops.xavier_uniform_init(W),
    ),
    b=Biases("Bias vector", lambda obj: (obj.nO,)),
    d_W=Gradient("W"),
    d_b=Gradient("b"),
)
class Mish(Model):
    """Dense layer with mish activation.
    
    https://arxiv.org/pdf/1908.08681.pdf
github explosion / thinc / thinc / neural / _classes / rnn.py View on Github external
)
            return d_acts, d_prev_cells

        return state, lstm_gates_bwd

    return layerize(lstm_gates_fwd)


def _uniform_init(lo, hi):
    def wrapped(W, ops):
        copy_array(W, ops.xp.random.uniform(lo, hi, W.shape))

    return wrapped


@describe.attributes(
    nO=Dimension("Output size"),
    nI=Dimension("Input size"),
    W=Synapses(
        "Weights matrix",
        lambda obj: (obj.nO * 4, obj.nI + obj.nO),
        lambda W, ops: copy_array(W, svd_orthonormal(W.shape)),
    ),
    b=Biases("Bias vector", lambda obj: (obj.nO * 4,)),
    forget_bias=Biases(
        "Bias for forget gates",
        lambda obj: (obj.nO,),
        lambda b, ops: copy_array(b, ops.xp.ones(b.shape, dtype=b.dtype)),
    ),
    d_W=Gradient("W"),
    d_b=Gradient("b"),
    d_forget_bias=Gradient("forget_bias"),
github explosion / thinc / thinc / neural / _classes / window_encode.py View on Github external
def LSUVinit(model, positions, y=None):
    ids = []
    for id_, occurs in positions.items():
        ids.extend(id_ for _ in occurs)
    ids = model.ops.asarray(ids, dtype='i')
    for hook in model.embed.on_data_hooks:
        hook(model.embed, ids, y)
    return do_lsuv(model.ops, model.W, model, positions)


@describe.input(("nB", "nI"))
@describe.output(("nB", "nO"))
@describe.on_data(_set_dimensions_if_needed, LSUVinit)
@describe.on_init(_set_dimensions_if_given)
@describe.attributes(
    nP=Dimension("Number of pieces"),
    nF=Dimension("Number of features"),
    nO=Dimension("Size of output"),
    nI=Dimension("Size of input"),
    W=Synapses("Weights matrix", lambda obj: (obj.nO, obj.nP, obj.nF, obj.nI),
        lambda W, ops: ops.xavier_uniform_init(W)),
    b=Biases("Bias vector", lambda obj: (obj.nO, obj.nP)),
    d_W=Gradient("W"),
    d_b=Gradient("b")
)
class MaxoutWindowEncode(Model):
    name = 'window-encode'
    @property
    def nW(self):
        return int((self.nF-1)/2)
github explosion / spaCy / spacy / _ml.py View on Github external
def zero_init(model):
    def _zero_init_impl(self, X, y):
        self.W.fill(0)

    model.on_data_hooks.append(_zero_init_impl)
    return model


def getitem(i):
    def getitem_fwd(X, drop=0.0):
        return X[i], None

    return layerize(getitem_fwd)


@describe.attributes(
    W=Synapses("Weights matrix", lambda obj: (obj.nO, obj.nI), lambda W, ops: None)
)
class MultiSoftmax(Affine):
    """Neural network layer that predicts several multi-class attributes at once.
    For instance, we might predict one class with 6 variables, and another with 5.
    We predict the 11 neurons required for this, and then softmax them such
    that columns 0-6 make a probability distribution and coumns 6-11 make another.
    """

    name = "multisoftmax"

    def __init__(self, out_sizes, nI=None, **kwargs):
        Model.__init__(self, **kwargs)
        self.out_sizes = out_sizes
        self.nO = sum(out_sizes)
        self.nI = nI
github explosion / thinc / thinc / neural / _classes / batchnorm.py View on Github external
from .model import Model
from ... import describe


def _init_to_one(W, ops):
    W.fill(1.0)


def _run_child_hooks(model, X, y=None):
    for hook in model.child.on_data_hooks:
        hook(model.child, X, y)


@describe.on_data(_run_child_hooks)
@describe.attributes(
    G=describe.Weights("Scaling vector", lambda obj: (obj.nO,), _init_to_one),
    b=describe.Biases("Bias vector", lambda obj: (obj.nO,)),
    d_G=describe.Gradient("G"),
    d_b=describe.Gradient("b"),
    m=describe.Weights("Means", lambda obj: (obj.nO,)),
    v=describe.Weights("Variance", lambda obj: (obj.nO,), _init_to_one),
)
class BatchNorm(Model):
    name = "batchnorm"

    def __init__(self, child, **kwargs):
        self.child = child
        self._layers = [child]
        if "nO" in kwargs:
            self.nO = kwargs["nO"]
        elif getattr(child, "nO", None):