How to use the bigdl.nn.layer.Squeeze function in bigdl

To help you get started, we’ve selected a few bigdl examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
blayer = BLayer.SpatialAveragePooling(
            kw=1,
            kh=self.klayer.pool_length,
            dw=1,
            dh=self.klayer.stride,
            pad_w=bpadW,
            pad_h=bpadH,
            global_pooling=False,
            ceil_mode=False,
            count_include_pad=False,
            divide=True,
            format="NHWC",
            bigdl_type="float"
        )
        seq.add(blayer)
        seq.add(BLayer.Squeeze(3))
        return seq
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
blayer = BLayer.LocallyConnected2D(n_input_plane=int(self.input_shape[2]),
                                           input_width=1,
                                           input_height=int(self.input_shape[1]),
                                           n_output_plane=self.klayer.nb_filter,
                                           kernel_w=1,
                                           kernel_h=self.klayer.filter_length,
                                           stride_w=1,
                                           stride_h=self.klayer.subsample_length,
                                           pad_w=0,
                                           pad_h=0,
                                           wRegularizer=to_bigdl_reg(self.config["W_regularizer"]),
                                           bRegularizer=to_bigdl_reg(self.config["b_regularizer"]),
                                           with_bias=self.klayer.bias,
                                           data_format="NHWC")
        seq.add(blayer)
        seq.add(BLayer.Squeeze(3))
        if self.config["activation"] != "linear":
            activation = get_activation_by_name(self.config["activation"],
                                                "%s_%s" % (self.config["name"], self.config["activation"]))
            return self.fuse(seq, activation)
        else:
            return seq
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
seq = BLayer.Sequential()
        blayer = BLayer.SpatialMaxPooling(
            kw=b_kw,
            kh=b_kh,
            dw=b_kw,
            dh=b_kh,
            pad_w=0,
            pad_h=0,
            to_ceil=False,
            format=bigdl_order,
            bigdl_type="float"
        )
        seq.add(blayer)
        if bigdl_order == "NCHW":
            seq.add(BLayer.Squeeze(3, num_input_dims=3))
            seq.add(BLayer.Squeeze(2, num_input_dims=2))
        else:
            seq.add(BLayer.Squeeze(2, num_input_dims=3))
            seq.add(BLayer.Squeeze(1, num_input_dims=2))
        return seq
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
pad_w=0,
            pad_h=0,
            global_pooling=False,
            ceil_mode=False,
            count_include_pad=False,
            divide=True,
            format=bigdl_order,
            bigdl_type="float"
        )
        seq.add(blayer)
        if bigdl_order == "NCHW":
            seq.add(BLayer.Squeeze(3, num_input_dims=3))
            seq.add(BLayer.Squeeze(2, num_input_dims=2))
        else:
            seq.add(BLayer.Squeeze(2, num_input_dims=3))
            seq.add(BLayer.Squeeze(1, num_input_dims=2))
        return seq
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
blayer = BLayer.SpatialAveragePooling(
            kw=b_kw,
            kh=b_kh,
            dw=1,
            dh=1,
            pad_w=0,
            pad_h=0,
            global_pooling=False,
            ceil_mode=False,
            count_include_pad=False,
            divide=True,
            format="NHWC",
            bigdl_type="float"
        )
        seq.add(blayer)
        seq.add(BLayer.Squeeze(3))
        seq.add(BLayer.Squeeze(2))
        return seq
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
seq = BLayer.Sequential()
        seq.add(BLayer.Reshape([int(self.input_shape[1]), 1, int(self.input_shape[2])], True))
        blayer = BLayer.SpatialMaxPooling(
            kw=1,
            kh=self.klayer.pool_length,
            dw=1,
            dh=self.klayer.stride,
            pad_w=bpadW,
            pad_h=bpadH,
            to_ceil=False,
            format="NHWC",
            bigdl_type="float"
        )
        seq.add(blayer)
        seq.add(BLayer.Squeeze(3))
        return seq
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
kw=b_kw,
            kh=b_kh,
            dw=b_kw,
            dh=b_kh,
            pad_w=0,
            pad_h=0,
            to_ceil=False,
            format=bigdl_order,
            bigdl_type="float"
        )
        seq.add(blayer)
        if bigdl_order == "NCHW":
            seq.add(BLayer.Squeeze(3, num_input_dims=3))
            seq.add(BLayer.Squeeze(2, num_input_dims=2))
        else:
            seq.add(BLayer.Squeeze(2, num_input_dims=3))
            seq.add(BLayer.Squeeze(1, num_input_dims=2))
        return seq
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
seq = BLayer.Sequential()
        seq.add(BLayer.Reshape([int(self.input_shape[1]), 1, int(self.input_shape[2])], True))
        blayer = BLayer.SpatialMaxPooling(
            kw=b_kw,
            kh=b_kh,
            dw=1,
            dh=1,
            pad_w=0,
            pad_h=0,
            to_ceil=False,
            format="NHWC",
            bigdl_type="float"
        )
        seq.add(blayer)
        seq.add(BLayer.Squeeze(3))
        seq.add(BLayer.Squeeze(2))
        return seq
github intel-analytics / analytics-zoo / pyzoo / zoo / pipeline / api / keras / layers / self_attention.py View on Github external
if hidden_size < 0:
            raise TypeError('hidden_size must be greater than 0 with default embedding layer')
        from bigdl.nn.layer import Squeeze
        word_input = InputLayer(input_shape=(seq_len,))
        postion_input = InputLayer(input_shape=(seq_len,))

        embedding = Sequential()
        embedding.add(Merge(layers=[word_input, postion_input], mode='concat')) \
            .add(Reshape([seq_len * 2])) \
            .add(Embedding(vocab, hidden_size, input_length=seq_len * 2,
                           weights=np.random.normal(0.0, initializer_range, (vocab, hidden_size))))\
            .add(Dropout(embedding_drop)) \
            .add(Reshape((seq_len, 2, hidden_size))) \
            .add(KerasLayerWrapper(Sum(dimension=3, squeeze=True)))
        # walk around for bug #1208, need remove this line after the bug fixed
        embedding.add(KerasLayerWrapper(Squeeze(dim=3)))

        shape = ((seq_len,), (seq_len,))
        return TransformerLayer(n_block, hidden_drop, attn_drop, n_head, initializer_range,
                                bidirectional, output_all_block, embedding, input_shape=shape)