-
Notifications
You must be signed in to change notification settings - Fork 96
/
Copy pathODE_NN.py
89 lines (68 loc) · 2.66 KB
/
ODE_NN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import math, random
import matplotlib.pyplot as plt
from scipy import special
# Define the number of outputs and the learning rate
n_input = 1
n_nodes_hl1 = 400
n_nodes_hl2 = 400
n_nodes_hl3 = 500
n_output = 1
learn_rate = 0.00003
# Boundary Conditions
A = 1
# training data
N = 1000
a = 0
b = 1
x = np.arange(a, b, (b-a)/N).reshape((N,1))
y = np.zeros(N)
# Placeholders
x_ph = tf.placeholder('float', [None, 1],name='input')
y_ph = tf.placeholder('float')
# number of epochs
n_epochs = 800
# batchsize per epoch
batch_size= 200
#validation-data size
n_valid= 400
# Define standard deviation for initialising weights and biases from normal distribution.
hl_sigma = 0.02
def neural_network_model(data):
data = tf.cast(data, tf.float32)
hidden_1_layer = {'weights': tf.Variable(tf.random.normal([n_input, n_nodes_hl1],stddev=hl_sigma)),
'biases': tf.Variable(tf.random.normal([n_nodes_hl1], stddev=hl_sigma))}
hidden_2_layer = {'weights': tf.Variable(tf.random.normal([n_nodes_hl1, n_nodes_hl2], stddev=hl_sigma)),
'biases': tf.Variable(tf.random.normal([n_nodes_hl2], stddev=hl_sigma))}
output_layer = {'weights': tf.Variable(tf.random.normal([n_nodes_hl2, n_output], stddev=hl_sigma)),
'biases': tf.Variable(tf.random.normal([n_output], stddev=hl_sigma))}
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.tanh(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
output = tf.add(tf.matmul(l2, output_layer['weights']), output_layer['biases'], name='output')
return output
def train_neural_network_batch():
prediction = neural_network_model(x_ph)
pred_dx = tf.gradients(prediction, x_ph)
u = A + (1-x_ph)*prediction
dudx = -prediction + (1-x_ph)*pred_dx
cost = tf.reduce_mean(tf.square(dudx + 2*x_ph))
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(n_epochs):
_, l = sess.run([optimizer,cost], feed_dict={x_ph:x, y_ph:y})
if(epoch % 100 == 0):
print('loss:-',l,', epoch:-',epoch)
# Validation
return sess.run(tf.squeeze(prediction),{x_ph:x}), x
y_pred,x_pred = train_neural_network_batch()
y_pred = y_pred.reshape(N,1)
u = A+(1-x)*y_pred
plt.plot(x_pred, u,label ='NN')
plt.plot(x_pred, -x_pred*x_pred+2,label ='Analytical')
plt.legend()
plt.show()