How to use the bigdl.nn.layer function in bigdl

To help you get started, we’ve selected a few bigdl examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
def create_timedistributed(self):
        # input_shape is (batch, time, other dims)
        inner_input_shape = (self.input_shape[0], ) + self.input_shape[2:]
        blayer = LayerConverter(self.klayer.layer, self.config['layer'], inner_input_shape).create()
        return BLayer.TimeDistributed(blayer)
github intel-analytics / BigDL / pyspark / bigdl / util / common.py View on Github external
def get_activation_by_name(activation_name, activation_id=None):
    """ Convert to a bigdl activation layer
        given the name of the activation as a string  """
    import bigdl.nn.layer as BLayer
    activation = None
    activation_name = activation_name.lower()
    if activation_name == "tanh":
        activation = BLayer.Tanh()
    elif activation_name == "sigmoid":
        activation = BLayer.Sigmoid()
    elif activation_name == "hard_sigmoid":
        activation = BLayer.HardSigmoid()
    elif activation_name == "relu":
        activation = BLayer.ReLU()
    elif activation_name == "softmax":
        activation = BLayer.SoftMax()
    elif activation_name == "softplus":
        activation = BLayer.SoftPlus(beta=1.0)
    elif activation_name == "softsign":
        activation = BLayer.SoftSign()
    elif activation_name == "linear":
        activation = BLayer.Identity()
    else:
        raise Exception("Unsupported activation type: %s" % activation_name)
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
if self.klayer.dot_axes != [1, 1]:
                raise Exception("For merge mode dot, only dot_axes=1 is supported for now.")
            model = BLayer.Sequential()
            blayer = model.add(BLayer.DotProduct(bigdl_type="float"))\
                .add(BLayer.Reshape([1], True))
        elif self.klayer.mode == "ave":
            blayer = BLayer.CAveTable(
                inplace=False,
                bigdl_type="float")
        elif self.klayer.mode in ['cos']:
            if len(self.input_shape[0]) >= 3:
                raise Exception("For merge mode cos, 3D input or above is not supported for now.")
            if self.klayer.dot_axes != [1, 1]:
                raise Exception("For merge mode cos, only dot_axes=1 is supported for now.")
            blayer = BLayer.Sequential()
            blayer.add(BLayer.CosineDistance(bigdl_type="float")).add(BLayer.Reshape([1, 1], True))
        else:  # invalid mode or lambda functions
            raise Exception("Invalid merge mode: `%s`. Lambda/function as merge mode is not supported for now."
                            % self.klayer.mode)
        if self.__is_from_sequential():
            bseq = BLayer.Sequential()
            parallel_table = BLayer.ParallelTable()
            for l in self.klayer.layers:
                bl = DefinitionLoader.from_kmodel(l)
                parallel_table.add(bl)
            bseq.add(parallel_table)
            bseq.add(blayer)
            return bseq
        else:
            return blayer
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
def __process_recurrent_layer(self, return_sequences, go_backwards, blayer):
        # For recurrent layers,
        # handle whether to return the last output sentence or the full sequence;
        # handle whether the input will go backwards
        model = BLayer.Sequential()
        if go_backwards:
            model.add(BLayer.Reverse(2))
        model.add(blayer)
        if not return_sequences:
            model.add(BLayer.Select(2, -1))
        return model
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
def create_maxoutdense(self):
        blayer = BLayer.Maxout(input_size=int(self.input_shape[1]),
                               output_size=self.klayer.output_dim,
                               maxout_number=self.klayer.nb_feature,
                               with_bias=self.klayer.bias,
                               w_regularizer=to_bigdl_reg(self.config["W_regularizer"]),
                               b_regularizer=to_bigdl_reg(self.config["b_regularizer"]))
        return blayer
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
elif self.klayer.mode == "ave":
            blayer = BLayer.CAveTable(
                inplace=False,
                bigdl_type="float")
        elif self.klayer.mode in ['cos']:
            if len(self.input_shape[0]) >= 3:
                raise Exception("For merge mode cos, 3D input or above is not supported for now.")
            if self.klayer.dot_axes != [1, 1]:
                raise Exception("For merge mode cos, only dot_axes=1 is supported for now.")
            blayer = BLayer.Sequential()
            blayer.add(BLayer.CosineDistance(bigdl_type="float")).add(BLayer.Reshape([1, 1], True))
        else:  # invalid mode or lambda functions
            raise Exception("Invalid merge mode: `%s`. Lambda/function as merge mode is not supported for now."
                            % self.klayer.mode)
        if self.__is_from_sequential():
            bseq = BLayer.Sequential()
            parallel_table = BLayer.ParallelTable()
            for l in self.klayer.layers:
                bl = DefinitionLoader.from_kmodel(l)
                parallel_table.add(bl)
            bseq.add(parallel_table)
            bseq.add(blayer)
            return bseq
        else:
            return blayer
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
def create_inputlayer(self):
        return BLayer.Identity()
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
def _construct_bigdl_sequence(self):
        bseq = BLayer.Sequential()
        for layer in self.kmodel.layers:
            # recursive logic is within create method.
            blayer = LayerConverter(layer, self.node_id_to_config_layer[layer.name]).create()
            bseq.add(blayer)
        return bseq
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
def create_reshape(self):
        if -1 in self.klayer.target_shape:
            blayer = BLayer.InferReshape(self.klayer.target_shape, True)
        else:
            blayer = BLayer.Reshape(self.klayer.target_shape, None)
        return blayer
github intel-analytics / BigDL / pyspark / bigdl / keras / converter.py View on Github external
def create_parametricsoftplus(self):
        alpha = float(self.klayer.alpha_init)
        beta = float(self.klayer.beta_init)
        if self.klayer.shared_axes != [None]:
            unsupport_exp("shared_axes")
        if round(alpha * beta, 4) == 1.0:
            return BLayer.SoftPlus(beta=beta,
                                   bigdl_type="float")
        else:
            raise Exception("Only alpha_init = 1/beta_init is supported for now")