How to use the deepxde.data function in DeepXDE

To help you get started, we’ve selected a few DeepXDE examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lululxvi / deepxde / examples / Poisson_Robin_1d.py View on Github external
dy_xx = tf.gradients(dy_x, x)[0]
        return dy_xx - 2

    def boundary_l(x, on_boundary):
        return on_boundary and np.isclose(x[0], -1)

    def boundary_r(x, on_boundary):
        return on_boundary and np.isclose(x[0], 1)

    def func(x):
        return (x + 1) ** 2

    geom = dde.geometry.Interval(-1, 1)
    bc_l = dde.DirichletBC(geom, func, boundary_l)
    bc_r = dde.RobinBC(geom, lambda X, y: y, boundary_r)
    data = dde.data.PDE(geom, 1, pde, [bc_l, bc_r], 16, 2, func=func, num_test=100)

    layer_size = [1] + [50] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)
    model.compile("adam", lr=0.001, metrics=["l2 relative error"])
    losshistory, train_state = model.train(epochs=10000)

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
github lululxvi / deepxde / examples / Poisson_Dirichlet_1d.py View on Github external
def main():
    def pde(x, y):
        dy_x = tf.gradients(y, x)[0]
        dy_xx = tf.gradients(dy_x, x)[0]
        return -dy_xx - np.pi ** 2 * tf.sin(np.pi * x)

    def boundary(x, on_boundary):
        return on_boundary

    def func(x):
        return np.sin(np.pi * x)

    geom = dde.geometry.Interval(-1, 1)
    bc = dde.DirichletBC(geom, func, boundary)
    data = dde.data.PDE(geom, 1, pde, bc, 16, 2, func=func, num_test=100)

    layer_size = [1] + [50] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)
    model.compile("adam", lr=0.001, metrics=["l2 relative error"])

    checkpointer = dde.callbacks.ModelCheckpoint(
        "./model/model.ckpt", verbose=1, save_better_only=True
    )
    movie = dde.callbacks.MovieDumper(
        "model/movie", [-1], [1], period=100, save_spectrum=True, y_reference=func
    )
    losshistory, train_state = model.train(
github lululxvi / deepxde / examples / Poisson_Neumann_1d.py View on Github external
dy_xx = tf.gradients(dy_x, x)[0]
        return dy_xx - 2

    def boundary_l(x, on_boundary):
        return on_boundary and np.isclose(x[0], -1)

    def boundary_r(x, on_boundary):
        return on_boundary and np.isclose(x[0], 1)

    def func(x):
        return (x + 1) ** 2

    geom = dde.geometry.Interval(-1, 1)
    bc_l = dde.DirichletBC(geom, func, boundary_l)
    bc_r = dde.NeumannBC(geom, lambda X: 2 * (X + 1), boundary_r)
    data = dde.data.PDE(geom, 1, pde, [bc_l, bc_r], 16, 2, func=func, num_test=100)

    layer_size = [1] + [50] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)
    model.compile("adam", lr=0.001, metrics=["l2 relative error"])
    losshistory, train_state = model.train(epochs=10000)

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
github lululxvi / deepxde / examples / Poisson_Lshape.py View on Github external
dy_x = tf.gradients(y, x)[0]
        dy_x, dy_y = dy_x[:, 0:1], dy_x[:, 1:]
        dy_xx = tf.gradients(dy_x, x)[0][:, 0:1]
        dy_yy = tf.gradients(dy_y, x)[0][:, 1:]
        return -dy_xx - dy_yy - 1

    def boundary(x, on_boundary):
        return on_boundary

    def func(x):
        return np.zeros([len(x), 1])

    geom = dde.geometry.Polygon([[0, 0], [1, 0], [1, -1], [-1, -1], [-1, 1], [0, 1]])
    bc = dde.DirichletBC(geom, func, boundary)

    data = dde.data.PDE(
        geom, 1, pde, bc, num_domain=1200, num_boundary=120, num_test=1500
    )
    net = dde.maps.FNN([2] + [50] * 4 + [1], "tanh", "Glorot uniform")
    model = dde.Model(data, net)

    model.compile("adam", lr=0.001)
    model.train(epochs=50000)
    model.compile("L-BFGS-B")
    losshistory, train_state = model.train()
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
github lululxvi / deepxde / examples / func.py View on Github external
def main():
    def func(x):
        """
        x: array_like, N x D_in
        y: array_like, N x D_out
        """
        return x * np.sin(5 * x)

    geom = dde.geometry.Interval(-1, 1)
    num_train = 16
    num_test = 100
    data = dde.data.Func(geom, func, num_train, num_test)

    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN([1] + [20] * 3 + [1], activation, initializer)

    model = dde.Model(data, net)
    model.compile("adam", lr=0.001, metrics=["l2 relative error"])
    losshistory, train_state = model.train(epochs=10000)

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
github lululxvi / deepxde / examples / mf_dataset.py View on Github external
def main():
    fname_lo_train = "dataset/mf_lo_train.dat"
    fname_hi_train = "dataset/mf_hi_train.dat"
    fname_hi_test = "dataset/mf_hi_test.dat"

    data = dde.data.MfDataSet(
        fname_lo_train=fname_lo_train,
        fname_hi_train=fname_hi_train,
        fname_hi_test=fname_hi_test,
        col_x=(0,),
        col_y=(1,),
    )

    activation = "tanh"
    initializer = "Glorot uniform"
    regularization = ["l2", 0.01]
    net = dde.maps.MfNN(
        [1] + [20] * 4 + [1],
        [10] * 2 + [1],
        activation,
        initializer,
        regularization=regularization,
github lululxvi / deepxde / examples / dataset.py View on Github external
def main():
    fname_train = "dataset/dataset.train"
    fname_test = "dataset/dataset.test"
    data = dde.data.DataSet(
        fname_train=fname_train, fname_test=fname_test, col_x=(0,), col_y=(1,)
    )

    layer_size = [1] + [50] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot normal"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)
    model.compile("adam", lr=0.001, metrics=["l2 relative error"])
    losshistory, train_state = model.train(epochs=50000)

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
github lululxvi / deepxde / examples / ide.py View on Github external
def boundary(x, on_boundary):
        return on_boundary and np.isclose(x[0], 0)

    def func(x):
        """
        x: array_like, N x D_in
        y: array_like, N x D_out
        """
        return np.sin(2 * np.pi * x)

    geom = dde.geometry.Interval(0, 1)
    bc = dde.DirichletBC(geom, func, boundary)

    quad_deg = 16
    data = dde.data.IDE(geom, ide, bc, quad_deg, num_domain=16, num_boundary=2)

    layer_size = [1] + [20] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)
    model.compile("adam", lr=0.001)
    model.train(epochs=10000)

    X = geom.uniform_points(100, True)
    y_true = func(X)
    y_pred = model.predict(X)
    print("L2 relative error:", dde.metrics.l2_relative_error(y_true, y_pred))

    plt.figure()