I’m using a PINN to solve the damped oscillator differential equation and simultaneously find the friction parameter of the latter, given noisy observations of a damped oscillator as an input. I wrote the code using Tensorflow using a custom training routine. The problem is that the trainable parameter that I’m defining isn’t approaching the correct value that I know from the noisy observations. Ultimately, the solution of the PINN is completely incorrect. However, my code works perfectly fine without the side quest of finding a trainable parameter, i.e., the friction parameter here.
# Implementation of oscillator system for NN
def oscillator_system_data_loss(t, net, func, params, mu, bc, t_data, u_data, lambda1):
t = t.reshape(-1,1)
t = tf.constant(t, dtype = tf.float32)
t_0 = tf.zeros((1,1))
# Nested loop for 2nd derivative
with tf.GradientTape() as outer_tape:
outer_tape.watch(t)
with tf.GradientTape() as inner_tape:
inner_tape.watch(t)
x = net(t)
dx_dt = inner_tape.gradient(x, t) # 1st derivative
d2x_dt2 = outer_tape.gradient(dx_dt, t) # 2nd derivative
# Boundary loss
bc_loss_1 = tf.square(net(t_0) - bc[0])
bc_loss_2 = tf.square(dx_dt[0] - bc[1])
# Learnable parameter mu passed to ODE
ode_loss = d2x_dt2 - func(x, dx_dt, params[0], mu, params[2])
# Data loss with hyperparameter lambda1
data_loss = u_data - net(t_data)
square_loss = tf.square(ode_loss) + lambda1*tf.square(data_loss) + bc_loss_1 + bc_loss_2
total_loss = tf.reduce_mean(square_loss)
return total_loss, mu
# Training routine with data loss
def train_NN_data_loss(epochs, optm, NN, func, bc, lambda1, train_t, train_u, data_t, data_u,
data_u_noised, test_t_plot, true_u_plot, testing_t):
train_loss_record = []
loss_tracker = plotting_points(epochs)
mu = tf.Variable(initial_value=tf.ones((1,1)), trainable=True, dtype=tf.float32)
mu_list = []
patience = 200
wait = 0
best = float('inf')
early_stop = 0
for itr in range(epochs):
with tf.GradientTape() as tape:
#tape.watch(mu)
train_loss, mu = oscillator_system_data_loss(train_t, NN, func, params, mu, bc, data_t, data_u_noised, lambda1)
train_loss_record.append(train_loss)
grad_w = tape.gradient(train_loss, NN.trainable_variables + [mu])
optm.apply_gradients(zip(grad_w, NN.trainable_variables + [mu]))
if itr in loss_tracker:
print(train_loss.numpy())
print(mu.numpy())
plot_epochs_with_noise(train_t, train_u, data_t, data_u_noised, test_t_plot, true_u_plot, testing_t, itr, NN)
mu_list.append(mu.numpy())
# Early stopping
# wait += 1
# if train_loss.numpy() < best:
# best = train_loss.numpy()
# wait = 0
# if wait >= patience:
# print(f"Stopped at iteration {itr} with loss {train_loss_record[itr]}.")
# early_stop = itr
# break
return train_loss_record, mu_list, early_stop
# For ODE loss computation / minimization
NN_osc_func = lambda x, dx_dt, k, d, m: -k/m*x - d/m*dx_dt
You can see here the result after 6000 epochs. The neural network is converging to a horizontal line with an error of 5.76 and a parameter estimation of 0.84, although the correct value is 4. This is my damped oscillator set-up:
k = 400
d = 4
m = 1
y0 = np.array([1.0, 0.0])
Wrong result.
Corresponding loss.
Unfortunately, at this point I don’t know what could be the problem. I tried to change NN_osc_func
and I played with the tape.gradient()
in the two functions. I would appreciate any help. Thanks!
Artem is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.