I’m trying to build a UNet Segmentor for an aerial image of vegetation w/ pytorch. The UNet then gets exported to an ONNX/GIS_ONNX to be usable in the Deepness-Plugin of QGIS. The Unet-Basecode and the training script are mostly derived/copied from Mostafa Wael on toward data science. The Dataloader is a Custom-Build upon the torch.utils.dataloader class. Training data is fed in tile-pairs of cut ortho mosaic and corresponding class mask (512×512).
My Problem is, that Deepness only identifies 100% background class (and no other of the actual classes), despite being used on the training data.
import torch
import os
import numpy as np
import glob
import matplotlib.pyplot as plt
import cv2
import math
import random
from torchvision.transforms import transforms
from torchvision.transforms import functional
from torch.utils.data import Dataset, DataLoader, Subset
def largest_rotated_rect(w, h, angle):
"""
Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle within the rotated rectangle.
Original JS code by 'Andri' and Magnus Hoff from Stack Overflow
Converted to Python by Aaron Snoswell
"""
quadrant = int(math.floor(angle / (math.pi / 2))) & 3
sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle
alpha = (sign_alpha % math.pi + math.pi) % math.pi
bb_w = w * math.cos(alpha) + h * math.sin(alpha)
bb_h = w * math.sin(alpha) + h * math.cos(alpha)
gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)
delta = math.pi - alpha - gamma
length = h if (w < h) else w
d = length * math.cos(alpha)
a = d * math.sin(alpha) / math.sin(delta)
y = a * math.cos(gamma)
x = y * math.tan(gamma)
return (
bb_w - 2 * x,
bb_h - 2 * y
)
def pickTransform(transform_choices): # list must be in torch syntax e.g. transforms.Random... // add metadata img size, so largest_rect is dynamic
check = np.random.choice([True, False], size=len(transform_choices))
selected_transforms = np.array(transform_choices)[check].tolist()
if any(isinstance(i, transforms.RandomCrop) for i in selected_transforms):
selected_transforms.insert(1,transforms.Pad(100, padding_mode="reflect"))
print(selected_transforms)
return selected_transforms
geo_trans = [
transforms.RandomCrop(400),
transforms.RandomHorizontalFlip(p=1),
transforms.RandomVerticalFlip(p=1),
]
opt_trans = [
transforms.ColorJitter(0.05, 0, 0, 0),
transforms.ColorJitter(0, 0.05, 0, 0),
transforms.ColorJitter(0, 0, 0.05, 0),
transforms.ColorJitter(0, 0, 0, 0.05)
]
def rotateCrop(Tensor):
if np.random.choice([True, False]): # rotation is seperated from other transforms, so the actual angle (when object is called) gets extracted for largest rect
w = Tensor.size(1) #aks Chris, if the x/y Dims are always there
h = Tensor.size(2)
angle = float(np.random.choice(range(-45,45,1)))
print(angle)
crop_x, crop_y = largest_rotated_rect(w, h, angle)
print(crop_x)
functional.rotate(Tensor, angle)
rect_crop = transforms.Compose([
(transforms.CenterCrop(crop_x)),
(transforms.Resize(w, interpolation=transforms.InterpolationMode.NEAREST))
])
TensorRotCrop = rect_crop(Tensor)
return TensorRotCrop
else:
return Tensor
def onehot(Tensor):
Tensor = Tensor.long()
Tensor = torch.nn.functional.one_hot(Tensor,9)
Tensor = Tensor.squeeze(0)
Tensor = Tensor.permute(2,0,1)
return Tensor
class customDataset(Dataset):
def __init__(self, dir_data, transformation=None, train_percent=0.5): #via transformation = None, Val_Data can be created
super(customDataset,self).__init__()
self.dir_data = dir_data
self.img_files = glob.glob(os.path.join(dir_data,'Images','*.*'))
self.label_files = glob.glob(os.path.join(dir_data,'Labels','*.*'))
self.transformation = transformation
#Prepare Indices for Training and Validation
self.indices = list(range(len(self.img_files)))
random.shuffle(self.indices)
num_train = int(train_percent*len(self.indices)) #set threshold
self.train_indices = self.indices[:num_train] #later on the Training and Val-Data can be acessed via self.XX_indices
self.val_indices = self.indices[num_train:]
def __len__(self):
return len(self.img_files)
def __getitem__(self, index):
img_path = self.img_files[index]
label_path = self.label_files[index]
image = np.array(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB))
label = np.array(cv2.imread(label_path, cv2.IMREAD_GRAYSCALE))
image_tensor = torch.from_numpy(image).permute(2, 0, 1) #schiebt die Kanäle (2) nach vorne (für torch-syntax Kanäle, Höhe, Breite), muss angepasst werden wenn bilder mit mehr als 3 Kanälen verwendet werdern// sollte Input zw. 0 und 1 vorausgesetzt werdern --> .float() / 255.0
label_tensor = torch.from_numpy(label).unsqueeze(0) # add an empty dimension so torch.cat can stack along that dimension
if index in self.train_indices:
if self.transformation == "opt" or self.transformation == "both":
transform_opt = transforms.Compose(pickTransform(opt_trans))
image_tensor = transform_opt(image_tensor)
# print(image_tensor.size(), label_tensor.size())
image_tensor = image_tensor.float()
label_tensor = label_tensor.float()
label_tensor = onehot(label_tensor)
return image_tensor, label_tensor
if self.transformation == "geo" or self.transformation == "both":
stack_tensor = torch.cat((image_tensor, label_tensor)) #concatenate img and mask along dim 0, so transformations affect both
transform_geo = transforms.Compose(pickTransform(geo_trans))
transformed_stack = transform_geo(stack_tensor)
transformed_stack = rotateCrop(transformed_stack)
image_tensor = transformed_stack[0:-1]
label_tensor = transformed_stack[-1] #re-adds empty dimension, after Slicing removes it
# print(image_tensor.size(), label_tensor.size())
image_tensor = image_tensor.float()
label_tensor = label_tensor.float()
label_tensor = onehot(label_tensor)
return image_tensor, label_tensor
else:
image_tensor = image_tensor.float()
label_tensor = label_tensor.float()
label_tensor = onehot(label_tensor)
return image_tensor, label_tensor
I ran between 5-15 Epoch on a high performance cluster. I also switched the optimizer and scheduler step around compared to Mostafa Wael as I tought, that it maybe hindered learning and because I got a torch warning.
def train_model(model, optimizer, scheduler, num_epochs=10):
dataloaders = get_data_loaders()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 1e10
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
since = time.time()
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
for param_group in optimizer.param_groups:
print("LR", param_group['lr'])
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
metrics = defaultdict(float)
epoch_samples = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
loss = calc_loss(outputs, labels, metrics)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
scheduler.step()
# statistics
epoch_samples += inputs.size(0)
print_metrics(metrics, epoch_samples, phase)
epoch_loss = metrics['loss'] / epoch_samples
# deep copy the model
if phase == 'val' and epoch_loss < best_loss:
print("saving best model")
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val loss: {:4f}'.format(best_loss))
# load best model weights
model.load_state_dict(best_model_wts)
does it just need more training or better training data (the initial polygon tracing was pretty sloppy)? thank you in advance.
Wenzel Uhlmann is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.