How to use the einops.einops.rearrange function in einops

To help you get started, we’ve selected a few einops examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github arogozhnikov / einops / tests / test_ops.py View on Github external
def test4(x):
        # space-to-depth
        y = rearrange(x, 'b c (h h1) (w w1) -> b (h1 w1 c) h w', h1=2, w1=2)
        assert y.shape == (10, 20 * 4, 30 // 2, 40 // 2)
        return y
github arogozhnikov / einops / tests / test_ops.py View on Github external
def new_way(input, num_classes, num_anchors, anchors, stride_h, stride_w):
        raw_predictions = rearrange(input, ' b (anchor prediction) h w -> prediction b anchor h w', anchor=num_anchors)

        anchors = torch.FloatTensor(anchors).to(input.device)
        anchor_sizes = rearrange(anchors, 'anchor dim -> dim () anchor () ()')

        _, _, _, in_h, in_w = raw_predictions.shape
        grid_h = rearrange(torch.arange(in_h).float(), 'h -> () () h ()').to(input.device)
        grid_w = rearrange(torch.arange(in_w).float(), 'w -> () () () w').to(input.device)

        predicted_bboxes = torch.zeros_like(raw_predictions)
        predicted_bboxes[0] = (raw_predictions[0].sigmoid() + grid_h) * stride_h  # center y
        predicted_bboxes[1] = (raw_predictions[1].sigmoid() + grid_w) * stride_w  # center x
        predicted_bboxes[2:4] = (raw_predictions[2:4].exp()) * anchor_sizes  # bbox width and height
        predicted_bboxes[4] = raw_predictions[4].sigmoid()  # confidence
        predicted_bboxes[5:] = raw_predictions[5:].sigmoid()  # class predictions
        # only to match results of original code, not needed
        return rearrange(predicted_bboxes, 'prediction b anchor h w -> b anchor h w prediction')
github arogozhnikov / einops / tests / test_ops.py View on Github external
for n_arrays in [1, 2, 5]:
            shapes = [[], [1], [1, 1], [2, 3, 5, 7], [1] * 6]
            for shape in shapes:
                if backend.framework_name == 'mxnet.ndarray' and len(shape) == 0:
                    # known bug of mxnet
                    continue
                arrays1 = [numpy.arange(i, i + numpy.prod(shape)).reshape(shape) for i in range(n_arrays)]
                arrays2 = [backend.from_numpy(array) for array in arrays1]
                result0 = numpy.asarray(arrays1)
                result1 = rearrange(arrays1, '...->...')
                result2 = rearrange(arrays2, '...->...')
                assert numpy.array_equal(result0, result1)
                assert numpy.array_equal(result1, backend.to_numpy(result2))

                result1 = rearrange(arrays1, 'b ... -> ... b')
                result2 = rearrange(arrays2, 'b ... -> ... b')
                assert numpy.array_equal(result1, backend.to_numpy(result2))
github arogozhnikov / einops / tests / test_ops.py View on Github external
anchors = torch.FloatTensor(anchors).to(input.device)
        anchor_sizes = rearrange(anchors, 'anchor dim -> dim () anchor () ()')

        _, _, _, in_h, in_w = raw_predictions.shape
        grid_h = rearrange(torch.arange(in_h).float(), 'h -> () () h ()').to(input.device)
        grid_w = rearrange(torch.arange(in_w).float(), 'w -> () () () w').to(input.device)

        predicted_bboxes = torch.zeros_like(raw_predictions)
        predicted_bboxes[0] = (raw_predictions[0].sigmoid() + grid_h) * stride_h  # center y
        predicted_bboxes[1] = (raw_predictions[1].sigmoid() + grid_w) * stride_w  # center x
        predicted_bboxes[2:4] = (raw_predictions[2:4].exp()) * anchor_sizes  # bbox width and height
        predicted_bboxes[4] = raw_predictions[4].sigmoid()  # confidence
        predicted_bboxes[5:] = raw_predictions[5:].sigmoid()  # class predictions
        # only to match results of original code, not needed
        return rearrange(predicted_bboxes, 'prediction b anchor h w -> b anchor h w prediction')
github arogozhnikov / einops / tests / test_ops.py View on Github external
def unet_like_1d(x, usual_convolution):
        # u-net like steps for increasing / reducing dimensionality
        x = rearrange(x, 'b c t1 t2 -> b c (t1 t2)')  # reduce dimensionality
        y = rearrange(x, 'b c (t dt) -> b (dt c) t', dt=2)
        y = usual_convolution(y)
        x = x + rearrange(y, 'b (dt c) t -> b c (t dt)', dt=2)
        return x
github arogozhnikov / einops / tests / test_ops.py View on Github external
def test9(x):
        # squeeze - unsqueeze
        y = reduce(x, 'b c h w -> b c () ()', reduction='max')
        assert y.shape == (10, 20, 1, 1)
        y = rearrange(y, 'b c () () -> c b')
        assert y.shape == (20, 10)
        return y
github arogozhnikov / einops / tests / test_ops.py View on Github external
def test_concatenations_and_stacking():
    for backend in imp_op_backends:
        print('testing shapes for ', backend.framework_name)
        for n_arrays in [1, 2, 5]:
            shapes = [[], [1], [1, 1], [2, 3, 5, 7], [1] * 6]
            for shape in shapes:
                if backend.framework_name == 'mxnet.ndarray' and len(shape) == 0:
                    # known bug of mxnet
                    continue
                arrays1 = [numpy.arange(i, i + numpy.prod(shape)).reshape(shape) for i in range(n_arrays)]
                arrays2 = [backend.from_numpy(array) for array in arrays1]
                result0 = numpy.asarray(arrays1)
                result1 = rearrange(arrays1, '...->...')
                result2 = rearrange(arrays2, '...->...')
                assert numpy.array_equal(result0, result1)
                assert numpy.array_equal(result1, backend.to_numpy(result2))

                result1 = rearrange(arrays1, 'b ... -> ... b')
                result2 = rearrange(arrays2, 'b ... -> ... b')
                assert numpy.array_equal(result1, backend.to_numpy(result2))
github arogozhnikov / einops / tests / test_ops.py View on Github external
]:
        result = rearrange(x, pattern)
        assert len(numpy.setdiff1d(x, result)) == 0
        assert result.dtype == x.dtype

    result = rearrange(x, 'a b c d e f -> a (b) (c d e) f')
    assert numpy.array_equal(x.flatten(), result.flatten())

    result = rearrange(x, 'a aa aa1 a1a1 aaaa a11 -> a aa aa1 a1a1 aaaa a11')
    assert numpy.array_equal(x, result)

    result1 = rearrange(x, 'a b c d e f -> f e d c b a')
    result2 = rearrange(x, 'f e d c b a -> a b c d e f')
    assert numpy.array_equal(result1, result2)

    result = rearrange(rearrange(x, 'a b c d e f -> (f d) c (e b) a'), '(f d) c (e b) a -> a b c d e f', b=2, d=5)
    assert numpy.array_equal(x, result)

    sizes = dict(zip('abcdef', shape))
    temp = rearrange(x, 'a b c d e f -> (f d) c (e b) a', **sizes)
    result = rearrange(temp, '(f d) c (e b) a -> a b c d e f', **sizes)
    assert numpy.array_equal(x, result)

    x2 = numpy.arange(2 * 3 * 4).reshape([2, 3, 4])
    result = rearrange(x2, 'a b c -> b c a')
    assert x2[1, 2, 3] == result[2, 3, 1]
    assert x2[0, 1, 2] == result[1, 2, 0]
github arogozhnikov / einops / tests / test_ops.py View on Github external
def operation(x):
        if reduction == 'rearrange':
            return rearrange(x, pattern, **axes_lengths)
        else:
            return reduce(x, pattern, reduction, **axes_lengths)
github arogozhnikov / einops / tests / test_ops.py View on Github external
def test5(x):
        # simple transposition
        y = rearrange(x, 'b1 sound b2 letter -> b1 b2 sound letter')
        assert y.shape == (10, 30, 20, 40)
        return y