How to use the deepxde.maps.activations.get function in DeepXDE

To help you get started, we’ve selected a few DeepXDE examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lululxvi / deepxde / deepxde / maps / fnn.py View on Github external
def __init__(
        self,
        layer_size,
        activation,
        kernel_initializer,
        regularization=None,
        dropout_rate=0,
        batch_normalization=None,
    ):
        self.layer_size = layer_size
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.regularizer = regularizers.get(regularization)
        self.dropout_rate = dropout_rate
        self.batch_normalization = batch_normalization

        super(FNN, self).__init__()
github lululxvi / deepxde / deepxde / maps / mfnn.py View on Github external
def __init__(
        self,
        layer_size_low_fidelity,
        layer_size_high_fidelity,
        activation,
        kernel_initializer,
        regularization=None,
        residue=False,
    ):
        self.layer_size_lo = layer_size_low_fidelity
        self.layer_size_hi = layer_size_high_fidelity
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.regularizer = regularizers.get(regularization)
        self.residue = residue

        super(MfNN, self).__init__()
github lululxvi / deepxde / deepxde / maps / resnet.py View on Github external
def __init__(
        self,
        input_size,
        output_size,
        num_neurons,
        num_blocks,
        activation,
        kernel_initializer,
        regularization=None,
    ):
        self.input_size = input_size
        self.output_size = output_size
        self.num_neurons = num_neurons
        self.num_blocks = num_blocks
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.regularizer = regularizers.get(regularization)

        super(ResNet, self).__init__()
github lululxvi / deepxde / deepxde / maps / mfnn.py View on Github external
activation=self.activation,
                regularizer=self.regularizer,
            )
        y_hi_nl = self.dense(
            y, self.layer_size_hi[-1], use_bias=False, regularizer=self.regularizer
        )
        # Linear + nonlinear
        if not self.residue:
            alpha = tf.Variable(0, dtype=config.real(tf))
            alpha = activations.get("tanh")(alpha)
            self.y_hi = y_hi_l + alpha * y_hi_nl
        else:
            alpha1 = tf.Variable(0, dtype=config.real(tf))
            alpha1 = activations.get("tanh")(alpha1)
            alpha2 = tf.Variable(0, dtype=config.real(tf))
            alpha2 = activations.get("tanh")(alpha2)
            self.y_hi = self.y_lo + 0.1 * (alpha1 * y_hi_l + alpha2 * y_hi_nl)

        self.target_lo = tf.placeholder(config.real(tf), [None, self.layer_size_lo[-1]])
        self.target_hi = tf.placeholder(config.real(tf), [None, self.layer_size_hi[-1]])
github lululxvi / deepxde / deepxde / maps / mfnn.py View on Github external
y,
                self.layer_size_hi[i],
                activation=self.activation,
                regularizer=self.regularizer,
            )
        y_hi_nl = self.dense(
            y, self.layer_size_hi[-1], use_bias=False, regularizer=self.regularizer
        )
        # Linear + nonlinear
        if not self.residue:
            alpha = tf.Variable(0, dtype=config.real(tf))
            alpha = activations.get("tanh")(alpha)
            self.y_hi = y_hi_l + alpha * y_hi_nl
        else:
            alpha1 = tf.Variable(0, dtype=config.real(tf))
            alpha1 = activations.get("tanh")(alpha1)
            alpha2 = tf.Variable(0, dtype=config.real(tf))
            alpha2 = activations.get("tanh")(alpha2)
            self.y_hi = self.y_lo + 0.1 * (alpha1 * y_hi_l + alpha2 * y_hi_nl)

        self.target_lo = tf.placeholder(config.real(tf), [None, self.layer_size_lo[-1]])
        self.target_hi = tf.placeholder(config.real(tf), [None, self.layer_size_hi[-1]])
github lululxvi / deepxde / deepxde / maps / mfnn.py View on Github external
# Nonlinear
        y = X_hi
        for i in range(len(self.layer_size_hi) - 1):
            y = self.dense(
                y,
                self.layer_size_hi[i],
                activation=self.activation,
                regularizer=self.regularizer,
            )
        y_hi_nl = self.dense(
            y, self.layer_size_hi[-1], use_bias=False, regularizer=self.regularizer
        )
        # Linear + nonlinear
        if not self.residue:
            alpha = tf.Variable(0, dtype=config.real(tf))
            alpha = activations.get("tanh")(alpha)
            self.y_hi = y_hi_l + alpha * y_hi_nl
        else:
            alpha1 = tf.Variable(0, dtype=config.real(tf))
            alpha1 = activations.get("tanh")(alpha1)
            alpha2 = tf.Variable(0, dtype=config.real(tf))
            alpha2 = activations.get("tanh")(alpha2)
            self.y_hi = self.y_lo + 0.1 * (alpha1 * y_hi_l + alpha2 * y_hi_nl)

        self.target_lo = tf.placeholder(config.real(tf), [None, self.layer_size_lo[-1]])
        self.target_hi = tf.placeholder(config.real(tf), [None, self.layer_size_hi[-1]])
github lululxvi / deepxde / deepxde / maps / opnn.py View on Github external
layer_size_function,
        layer_size_location,
        activation,
        kernel_initializer,
        regularization=None,
        use_bias=True,
        stacked=False,
    ):
        if layer_size_function[-1] != layer_size_location[-1]:
            raise ValueError(
                "Output sizes of function NN and location NN do not match."
            )

        self.layer_size_func = layer_size_function
        self.layer_size_loc = layer_size_location
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        if stacked:
            self.kernel_initializer_stacked = initializers.get(
                kernel_initializer + "stacked"
            )
        self.regularizer = regularizers.get(regularization)
        self.use_bias = use_bias
        self.stacked = stacked

        super(OpNN, self).__init__()