How to use the thinc.api.layerize function in thinc

To help you get started, weโ€™ve selected a few thinc examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github explosion / thinc / examples / cnn_tagger.py View on Github external
nlp.vocab.lex_attr_getters[PREFIX] = lambda string: string[:3]
    nlp.vocab.lex_attr_getters[SUFFIX] = lambda string: string[-3:]

    def forward(texts, drop=0.0):
        if tokenized:
            docs = [Doc(nlp.vocab, words) for words in texts]
        else:
            docs = [nlp(text) for text in texts]
        features = [doc.to_array(attrs) for doc in docs]

        def backward(d_features, sgd=None):
            return d_features

        return features, backward

    return layerize(forward)
github explosion / thinc / examples / attention_tagger.py View on Github external
nlp.vocab.lex_attr_getters[PREFIX] = lambda string: string[:3]
    nlp.vocab.lex_attr_getters[SUFFIX] = lambda string: string[-3:]

    def forward(texts, drop=0.0):
        if tokenized:
            docs = [Doc(nlp.vocab, words) for words in texts]
        else:
            docs = [nlp(text) for text in texts]
        features = [doc.to_array(attrs) for doc in docs]

        def backward(d_features, sgd=None):
            return d_features

        return features, backward

    return layerize(forward)
github explosion / spaCy / spacy / _ml.py View on Github external
def doc2feats(cols=None):
    if cols is None:
        cols = [ID, NORM, PREFIX, SUFFIX, SHAPE, ORTH]

    def forward(docs, drop=0.0):
        feats = []
        for doc in docs:
            feats.append(doc.to_array(cols))
        return feats, None

    model = layerize(forward)
    model.cols = cols
    return model
github explosion / thinc / thinc / neural / _classes / rnn.py View on Github external
size = new_cells.shape[0]

        def lstm_gates_bwd(d_state, sgd=None):
            d_cells, d_hiddens = d_state
            d_cells = d_cells[:size]
            d_hiddens = d_hiddens[:size]
            d_acts = [ops.allocate(act.shape) for act in acts]
            d_prev_cells = ops.allocate(prev_cells.shape)
            ops.backprop_lstm(
                d_cells, d_prev_cells, d_acts, d_hiddens, acts, new_cells, prev_cells
            )
            return d_acts, d_prev_cells

        return state, lstm_gates_bwd

    return layerize(lstm_gates_fwd)
github explosion / thinc / examples / cnn_twitter_ner.py View on Github external
def Residual(layer):
    def forward(X, drop=0.0):
        y, bp_y = layer.begin_update(X, drop=drop)
        output = X + y

        def backward(d_output, sgd=None):
            return d_output + bp_y(d_output, sgd)

        return output, backward

    model = layerize(forward)
    model._layers.append(layer)

    def on_data(self, X, y=None):
        for layer in self._layers:
            for hook in layer.on_data_hooks:
                hook(layer, X, y)

    model.on_data_hooks.append(on_data)
    return model
github explosion / spaCy / spacy / ml / _wire.py View on Github external
def concatenate_lists(*layers, **kwargs):  # pragma: no cover
    """Compose two or more models `f`, `g`, etc, such that their outputs are
    concatenated, i.e. `concatenate(f, g)(x)` computes `hstack(f(x), g(x))`
    """
    if not layers:
        return layerize(noop())
    drop_factor = kwargs.get("drop_factor", 1.0)
    ops = layers[0].ops
    layers = [chain(layer, flatten) for layer in layers]
    concat = concatenate(*layers)

    def concatenate_lists_fwd(Xs, drop=0.0):
        if drop is not None:
            drop *= drop_factor
        lengths = ops.asarray([len(X) for X in Xs], dtype="i")
        flat_y, bp_flat_y = concat.begin_update(Xs, drop=drop)
        ys = ops.unflatten(flat_y, lengths)

        def concatenate_lists_bwd(d_ys, sgd=None):
            return bp_flat_y(ops.flatten(d_ys), sgd=sgd)

        return ys, concatenate_lists_bwd
github justindujardin / prodigy-scratch / recipes / attention_weights.py View on Github external
def create_attn_proxy(attn):
    """Return a proxy to the attention layer which will fetch the attention
    weights on each call, appending them to the list 'output'.
    """
    output = []

    def get_weights(Xs_lengths, drop=0.):
        Xs, lengths = Xs_lengths
        output.append(attn._get_attention(attn.Q, Xs, lengths)[0])
        return attn.begin_update(Xs_lengths, drop=drop)

    return output, layerize(get_weights)
github explosion / thinc / thinc / neural / vecs2vec.py View on Github external
for i, func in enumerate(funcs):
            pooled[i], bp_funcs[i] = func.begin_update((X, lengths))

        def finish_update(d_pooled, sgd=None):
            d_pooled = d_pooled.reshape((len(lengths), F, O))
            d_pooled = d_pooled.transpose((1, 0, 2))
            dX = ops.allocate(X.shape)
            for i, bp_func in enumerate(bp_funcs):
                dX += bp_func(d_pooled[i])
            return dX

        pooled = pooled.transpose((1, 0, 2))
        pooled = pooled.reshape((len(lengths), F * O))
        return pooled, finish_update

    return layerize(begin_update)
github explosion / spaCy / spacy / _ml.py View on Github external
@layerize
def flatten(seqs, drop=0.0):
    ops = Model.ops
    lengths = ops.asarray([len(seq) for seq in seqs], dtype="i")

    def finish_update(d_X, sgd=None):
        return ops.unflatten(d_X, lengths, pad=0)

    X = ops.flatten(seqs, pad=0)
    return X, finish_update
github explosion / thinc / examples / lstm_tagger.py View on Github external
nlp.vocab.lex_attr_getters[PREFIX] = lambda string: string[:3]
    nlp.vocab.lex_attr_getters[SUFFIX] = lambda string: string[-3:]

    def forward(texts, drop=0.0):
        if tokenized:
            docs = [Doc(nlp.vocab, words) for words in texts]
        else:
            docs = [nlp(text) for text in texts]
        features = [doc.to_array(attrs) for doc in docs]

        def backward(d_features, sgd=None):
            return d_features

        return features, backward

    return layerize(forward)