How to use the deepxde.config function in DeepXDE

To help you get started, we’ve selected a few DeepXDE examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lululxvi / deepxde / deepxde / geometry / geometry_1d.py View on Github external
def uniform_boundary_points(self, n):
        if n == 1:
            return np.array([[self.l]]).astype(config.real(np))
        xl = np.full((n // 2, 1), self.l).astype(config.real(np))
        xr = np.full((n - n // 2, 1), self.r).astype(config.real(np))
        return np.vstack((xl, xr))
github lululxvi / deepxde / deepxde / data / pde.py View on Github external
def losses_test():
            return [loss(tf.zeros(tf.shape(fi)), fi) for fi in f] + [
                tf.constant(0, dtype=config.real(tf)) for _ in self.bcs
            ]
github lululxvi / deepxde / deepxde / data / ide.py View on Github external
return self.quad_w * x / 2

        if training:
            num_bc = sum(self.num_bcs)
            X = self.train_x
        else:
            num_bc = 0
            X = self.test_x
        if training or self.num_test is None:
            num_f = self.num_domain + self.num_boundary
            if self.anchors is not None:
                num_f += len(self.anchors)
        else:
            num_f = self.num_test

        int_mat = np.zeros((num_bc + num_f, X.size), dtype=config.real(np))
        for i in range(num_f):
            x = X[i + num_bc, 0]
            beg = num_f + num_bc + self.quad_deg * i
            end = beg + self.quad_deg
            K = np.ravel(self.kernel(np.full((self.quad_deg, 1), x), X[beg:end]))
            int_mat[i + num_bc, beg:end] = get_quad_weights(x) * K
        return int_mat
github lululxvi / deepxde / deepxde / maps / opnn.py View on Github external
y_loc = self.dense(
                y_loc,
                self.layer_size_loc[i],
                activation=self.activation,
                regularizer=self.regularizer,
            )

        # Dot product
        self.y = tf.einsum("bi,bi->b", y_func, y_loc)
        self.y = tf.expand_dims(self.y, axis=1)
        # Add bias
        if self.use_bias:
            b = tf.Variable(tf.zeros(1))
            self.y += b

        self.target = tf.placeholder(config.real(tf), [None, 1])
github lululxvi / deepxde / deepxde / geometry / timedomain.py View on Github external
map(
                    lambda l: l[0] * l[1],
                    itertools.combinations(
                        self.geometry.bbox[1] - self.geometry.bbox[0], 2
                    ),
                )
            )
            nx = int((n * s / self.timedomain.diam) ** 0.5)
        nt = int(np.ceil(n / nx))
        x = self.geometry.uniform_boundary_points(nx)
        t = np.linspace(
            self.timedomain.t1,
            self.timedomain.t0,
            num=nt,
            endpoint=False,
            dtype=config.real(np),
        )
        xt = []
        for ti in t:
            xt.append(np.hstack((x, np.full([nx, 1], ti))))
        xt = np.vstack(xt)
        if n != len(xt):
            print(
                "Warning: {} points required, but {} points sampled.".format(n, len(xt))
            )
        return xt
github lululxvi / deepxde / deepxde / array_ops.py View on Github external
def convert_to_array(value):
    """Convert a list to numpy array or tensorflow tensor."""
    if istensorlist(value):
        return tf.convert_to_tensor(value, dtype=config.real(tf))
    value = np.array(value)
    if value.dtype != config.real(np):
        return value.astype(config.real(np))
    return value
github lululxvi / deepxde / deepxde / fractional.py View on Github external
def get_matrix_static(self):
        print("Warning: assume zero boundary condition.")
        n = (self.disc.resolution[0] - 2) * (self.nt - 1)
        int_mat = np.zeros((n, n), dtype=config.real(np))
        self.fracx = Fractional(self.alpha, self.geom, self.disc, None)
        int_mat_one = self.fracx.get_matrix()
        beg = 0
        for _ in range(self.nt - 1):
            int_mat[
                beg : beg + self.disc.resolution[0] - 2,
                beg : beg + self.disc.resolution[0] - 2,
            ] = int_mat_one[1:-1, 1:-1]
            beg += self.disc.resolution[0] - 2
        return int_mat
github lululxvi / deepxde / deepxde / maps / resnet.py View on Github external
def build(self):
        print("Building residual neural network...")
        self.x = tf.placeholder(config.real(tf), [None, self.input_size])

        y = self.dense(self.x, self.num_neurons, activation=self.activation)
        for _ in range(self.num_blocks):
            y = self.residual_block(y)
        self.y = self.dense(y, self.output_size)

        self.y_ = tf.placeholder(config.real(tf), [None, self.output_size])
github lululxvi / deepxde / deepxde / maps / fnn.py View on Github external
def build(self):
        print("Building feed-forward neural network...")
        self.x = tf.placeholder(config.real(tf), [None, self.layer_size[0]])

        y = self.x
        for i in range(len(self.layer_size) - 2):
            if self.batch_normalization is None:
                y = self.dense(y, self.layer_size[i + 1], activation=self.activation)
            elif self.batch_normalization == "before":
                y = self.dense_batchnorm_v1(y, self.layer_size[i + 1])
            elif self.batch_normalization == "after":
                y = self.dense_batchnorm_v2(y, self.layer_size[i + 1])
            else:
                raise ValueError("batch_normalization")
            if self.dropout_rate > 0:
                y = tf.layers.dropout(y, rate=self.dropout_rate, training=self.dropout)
        self.y = self.dense(y, self.layer_size[-1])

        self.y_ = tf.placeholder(config.real(tf), [None, self.layer_size[-1]])
github lululxvi / deepxde / deepxde / maps / mfnn.py View on Github external
def build(self):
        print("Building multifidelity neural network...")
        self.X = tf.placeholder(config.real(tf), [None, self.layer_size_lo[0]])

        # Low fidelity
        y = self.X
        for i in range(len(self.layer_size_lo) - 2):
            y = self.dense(
                y,
                self.layer_size_lo[i + 1],
                activation=self.activation,
                regularizer=self.regularizer,
            )
        self.y_lo = self.dense(y, self.layer_size_lo[-1], regularizer=self.regularizer)

        # High fidelity
        X_hi = tf.concat([self.X, self.y_lo], 1)
        # Linear
        y_hi_l = self.dense(X_hi, self.layer_size_hi[-1])