I want to Solve Lorenz System #1900
Unanswered
Msnaldin-64
asked this question in
Q&A
Replies: 0 comments
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
-
Dear Dr. Lu Lu
I have just started to use Deepxde this week and I'm very interested in. I'm trying to solve Lorenz System with the following code but the results are far from being in the right shape. Could you kindly assist me in correcting the code. Thank you in advance.
import deepxde as dde
import matplotlib.pyplot as plt
import numpy as np
from scipy import integrate
from deepxde.backend import tf
def Lorenz_system(x, y):
"""Lorenz system.
dy1/dx = 10 * (y2 - y1)
dy2/dx = y1 * (28 - y3) - y2
dy3/dx = y1 * y2 - 8/3 * y3
"""
y1, y2, y3 = y[:, 0:1], y[:, 1:2], y[:, 2:]
dy1_x = dde.grad.jacobian(y1, x, i=0)
dy2_x = dde.grad.jacobian(y2, x, i=0)
dy3_x = dde.grad.jacobian(y3, x, i=0)
return [
dy1_x - 10.0 * (y2 - y1),
dy2_x - y1 * (28 - y3) + y2,
dy3_x - y1 * y2 + 8.0/3.0 * y3,
]
def boundary(_, on_initial):
return on_initial
geom = dde.geometry.TimeDomain(0, 50)
ic1 = dde.icbc.IC(geom, lambda x: 1.0, boundary, component=0)
ic2 = dde.icbc.IC(geom, lambda x: 1.0, boundary, component=1)
ic3 = dde.icbc.IC(geom, lambda x: 1.0, boundary, component=2)
data = dde.data.PDE(geom, Lorenz_system, [ic1, ic2, ic3], 3500, 2, solution=None, num_test=100)
layer_size = [1] + [64] * 6 + [3]
activation = "tanh"
initializer = "Glorot uniform"
net = dde.nn.FNN(layer_size, activation, initializer)
def input_transform(t):
return tf.concat(
(
t,
tf.sin(t),
tf.sin(2 * t),
tf.sin(3 * t),
tf.sin(4 * t),
tf.sin(5 * t),
tf.sin(6 * t),
),
axis=1,
)
def output_transform(t, y):
y1 = y[:, 0:1]
y2 = y[:, 1:2]
y3 = y[:, 2:3]
return tf.concat([y1 * tf.tanh(t) + 1, y2 * tf.tanh(t) + 1, y3 * tf.tanh(t) + 1], axis=1)
net.apply_feature_transform(input_transform)
net.apply_output_transform(output_transform)
model = dde.Model(data, net)
model.compile("adam", lr=0.001)
losshistory, train_state = model.train(iterations=50000)
Most backends except jax can have a second fine tuning of the solution
model.compile("L-BFGS")
losshistory, train_state = model.train()
dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Beta Was this translation helpful? Give feedback.
All reactions