“Hello everyone, I’m new to this, but I’m attempting to create my first PDEs to solve the Euler-Bernoulli beam equation, specifically d4w/dx4=q(x). It works when I use uniform inputs, especially for a length LL, but when I change the input data, it fails. Therefore, I believe I need to normalize the input, or perhaps there’s another solution that Please help me. I’ve used Tensorflow and numpy
Here is my code
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
class PINN(tf.keras.Model):
def __init__(self):
super(PINN, self).__init__()
self.dense1 = tf.keras.layers.Dense(50, activation='tanh', input_dim=1)
self.dense2 = tf.keras.layers.Dense(50, activation='tanh')
self.dense3 = tf.keras.layers.Dense(50, activation='tanh')
self.output_layer = tf.keras.layers.Dense(1, activation='tanh')
def call(self, inputs):
hidden1 = self.dense1(inputs)
hidden2 = self.dense2(hidden1)
hidden3 = self.dense2(hidden2)
output = self.output_layer(hidden3)
return output
def q(x):
z=q0/(E*I)*np.ones(1000)
return z
E = 210e9 # Young's modulus (Pa)
I = 1e-6 # Moment of inertia (m^4)
L = 10 # Length of the beam (m)
q0 = 1000 # Load intensity (N/m)
def ode_system(net,x):
#one = tf.ones((1,1))
with tf.GradientTape() as g:
g.watch(x)
with tf.GradientTape() as gg:
gg.watch(x)
with tf.GradientTape() as ggg:
ggg.watch(x)
with tf.GradientTape() as gggg:
gggg.watch(x)
u = net(x)
u_x = gggg.gradient(u, x)
u_xx = ggg.gradient(u_x, x)
u_xxx = gg.gradient(u_xx, x)
u_xxxx = g.gradient(u_xxx, x)
ode_loss = u_xxxx -q(x)
IC_loss = u[0]-0
IC1_loss=u_x[0]-0
IC2_loss=u_xx[-1]-0
IC3_loss=u_xxx[-1]-0
square_loss = tf.square(ode_loss) + tf.square(IC_loss)+tf.square(IC1_loss)+tf.square(IC2_loss)+tf.square(IC3_loss)
total_loss = tf.reduce_mean(square_loss)
return total_loss
model = PINN()
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
train_x =(np.linspace(0.0,L,1000)).reshape(-1,1)
train_x = tf.convert_to_tensor(train_x, dtype=tf.float32)
# Training loop
num_epochs = 500
for epoch in range(num_epochs):
with tf.GradientTape() as tape:
physics_loss_value = ode_system(model, train_x)
total_loss = physics_loss_value
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
if epoch % 10 == 0:
print(f"Epoch {epoch}/{num_epochs}, Total Loss: {total_loss.numpy()}, Physics Loss: {physics_loss_value.numpy()}")
test_x = np.linspace(0, L, 1000)
test_x = tf.convert_to_tensor(train_x, dtype=tf.float32)
uf=model(test_x)
test_x = np.linspace(0, L, 1000)
true_u =q0*test_x**2/(24*E*I)*(6*L**2-4*L*test_x+test_x**2)
train_u = q0*train_x**2/(24*E*I)*(6*L**2-4*L*train_x+train_x**2)
plt.figure(figsize = (10,8))
plt.plot(train_x, train_u, 'ok', label = 'Train')
plt.plot(test_x, true_u, '-k',label = 'True')
plt.plot(test_x, uf, '--r', label = 'Prediction')
plt.legend(fontsize = 15)
plt.xlabel('t', fontsize = 15)
plt.ylabel('u', fontsize = 15)
plt.show()
I tried modifying the activation function of the input layer to ReLU, but there was no improvement. I expected PINNs to provide solutions close to the analytical solution for this simple case. Thanks.
fedele Morrone is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.