I am working on solving the 2D Laplace equation with PINNs to determine the hydraulic head in a confined aquifer. The problem involves a rectangular domain with dimensions 4m x 10m. The hydraulic head is fixed at 5m on the left side and 1m on the right side, while the top and bottom boundaries are no-flow boundaries.
I should consider 3 collocation points on the left and right boundaries, 10 collocation points on the top and bottom boundaries, and 30 collocation points in the domain uniformly spaced with the same intervals.
However, I am unsure of how to accomplish this task, so I assumed that these points were randomly distributed.
Initially, I applied Drichlet boundary conditions to all boundaries and wrote the corresponding code. I assumed h at the top and bottom boundaries is 0. However, I am now facing a challenge in defining Neumann boundary conditions in PyTorch. Can someone provide guidance on how to implement Neumann boundary conditions and uniformly distribution of collocation points? Thank you.
The Laplace equation is:
d2h/dx2 + d2h/dy2 = 0
BCs:
h(0, y)=5m , h(10, y) = 1m
dh(x,0)/dy = 0 , dh(x,4)/dy = 0
The code with assumed BCs and collocation points distribution is:
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
num_points = 5 # Number of randomly sampled points for IC and BC
x_min, x_max, y_min, y_max = 0, 10, 0, 4
x_range, y_range = np.random.rand(num_points), np.random.rand(num_points)
zeros = np.zeros(num_points)
ones = np.ones(num_points)
bc1 = np.vstack([x_range, ones, zeros]).T # Boundary top: h(x, 4) = 0
bc2 = np.vstack([10*ones, y_range, ones]).T # Boundary right: u(x=10, y) = 1
bc3 = np.vstack([x_range, zeros, zeros]).T # Boundary bottom: h(x, 0) = 0
bc4 = np.vstack([zeros, y_range, 5*ones]).T # Boundary left: h(x=0, y) = 5
initial_boundary_conditions = np.vstack([bc1, bc2, bc3, bc4])
x_initial_bc = initial_boundary_conditions[:, 0]
y_initial_bc = initial_boundary_conditions[:, 1]
u_initial_bc = initial_boundary_conditions[:, 2]
x_initial_bc = torch.autograd.Variable(torch.from_numpy(x_initial_bc).float(), requires_grad=True)
y_initial_bc = torch.autograd.Variable(torch.from_numpy(y_initial_bc).float(), requires_grad=True)
u_initial_bc = torch.autograd.Variable(torch.from_numpy(u_initial_bc).float(), requires_grad=True)
# neural network
collocation_points = 100000 # Number of randomly sampled collocation points to be evaluated for the PDE-based loss
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.neurons_per_layer = 20
self.fc1 = nn.Linear(2, self.neurons_per_layer)
self.fc2 = nn.Linear(self.neurons_per_layer, self.neurons_per_layer)
self.fc3 = nn.Linear(self.neurons_per_layer, self.neurons_per_layer)
self.fc4 = nn.Linear(self.neurons_per_layer, self.neurons_per_layer)
self.fc5 = nn.Linear(self.neurons_per_layer, self.neurons_per_layer)
self.fc6 = nn.Linear(self.neurons_per_layer, self.neurons_per_layer)
self.fc7 = nn.Linear(self.neurons_per_layer, self.neurons_per_layer)
self.fc8 = nn.Linear(self.neurons_per_layer, 1)
self.act_func = nn.Sigmoid()
def forward(self, x, y):
inputs = torch.cat([x.reshape(-1, 1), y.reshape(-1, 1)], axis=1)
output = self.act_func(self.fc1(inputs))
output = self.act_func(self.fc2(output))
output = self.act_func(self.fc3(output))
output = self.act_func(self.fc4(output))
output = self.act_func(self.fc5(output))
output = self.act_func(self.fc6(output))
output = self.act_func(self.fc7(output))
output = self.fc8(output)
return output
net = Net()
epochs = 500
optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
criterion = torch.nn.MSELoss()
# loss
def f(x, y, net):
u = net(x, y)
u_x = torch.autograd.grad(u, x, create_graph=True, retain_graph=True, grad_outputs=torch.ones_like(u))[0]
u_xx = torch.autograd.grad(u_x, x, create_graph=True, retain_graph=True, grad_outputs=torch.ones_like(u_x))[0]
u_y = torch.autograd.grad(u, y, create_graph=True, retain_graph=True, grad_outputs=torch.ones_like(u))[0]
u_yy = torch.autograd.grad(u_y, y, create_graph=True, retain_graph=True, grad_outputs=torch.ones_like(u_y))[0]
#####u_t = torch.autograd.grad(u, t, create_graph=True, retain_graph=True, grad_outputs=torch.ones_like(u))[0]
loss_f = (u_xx + u_yy)
return loss_f
# training
losses = []
for epoch in range(epochs):
optimizer.zero_grad()
predictions_initial_bc = net(x_initial_bc, y_initial_bc)
mse_u = criterion(predictions_initial_bc.reshape(-1,), u_initial_bc) # This is the loss from boundary and initial conditions
x_collocation = torch.FloatTensor(collocation_points, ).uniform_(x_min, x_max)
y_collocation = torch.FloatTensor(collocation_points, ).uniform_(y_min, y_max)
x_collocation = torch.autograd.Variable(x_collocation, requires_grad=True)
y_collocation = torch.autograd.Variable(y_collocation, requires_grad=True)
f_out = f(x_collocation, y_collocation, net)
mse_f = criterion(torch.zeros_like(f_out), f_out) # This is the PDE-based loss evaluated at the randomly sampled collocation points
loss = mse_u + mse_f
losses.append(loss.item())
loss.backward()
optimizer.step()
if epoch % 100 == 0:
print(f'Epoch {epoch}/{epochs}: Loss = {loss.item()}')
# plotting
x = np.arange(x_min, x_max, 0.01)
y = np.arange(y_min, y_max, 0.01)
mesh_x, mesh_y = np.meshgrid(x, y)
x = np.ravel(mesh_x).reshape(-1, 1)
y = np.ravel(mesh_y).reshape(-1, 1)
pt_x = torch.autograd.Variable(torch.from_numpy(x).float(), requires_grad=True)
pt_y = torch.autograd.Variable(torch.from_numpy(y).float(), requires_grad=True)
fig, axes = plt.subplots(2, 5, figsize=(15, 5))
for index, axis in enumerate(axes.ravel()):
u = net(pt_x, pt_y).data.numpy()
mesh_u = u.reshape(mesh_x.shape)
cm = axis.pcolormesh(mesh_x, mesh_y, mesh_u, cmap='jet')#, vmin=-1, vmax=1)
fig.colorbar(cm, ax=axis)
axis.set_xlim([x_min, x_max])
axis.set_xticks([])
axis.set_yticks([])
axis.set_ylim([y_min, y_max])
fig.tight_layout()
I can’t define Neumann BC in pytorch.