So I am trying to normalize a tensor the needed some extra dimensions. So, I used unsqueeze as you’ll observe in the following code in order to add this extra dimension, which corresponds to the channels of the image.
def scale_img(self, sample_img, sample_name, scaling_mode):
# Add a channel dimension if missing
if len(sample_img.shape) == 2:
sample_img = sample_img.unsqueeze(0)
if scaling_mode == 'normalize':
if 'SEN2' in sample_name:
return TF.normalize(sample_img, mean=self.means['sen2'], std=self.stds['sen2'])
elif 'MODIS' in sample_name:
return TF.normalize(sample_img, mean=self.means['mod'], std=self.stds['mod'])
elif scaling_mode == 'min-max':
mins = sample_img.min(dim=-1).values.min(dim=-1).values
maxs = sample_img.max(dim=-1).values.max(dim=-1).values
print("mins:", mins)
print("maxs:", maxs)
uniq_mins = mins.unique()
uniq_maxs = maxs.unique()
if not (((len(uniq_mins) == 1) and (uniq_mins.item() == 0.)) and ((len(uniq_maxs) == 1) and (uniq_maxs.item() == 0.))):
# Some images are all-zeros so scaling returns a NaN image
new_ch = []
for ch in range(sample_img.shape[0]):
if mins[ch] == maxs[ch]:
# Some channels contain only a single value, so scaling returns all-NaN
# We convert it to all-zeros
new_ch.append(torch.zeros(*sample_img[ch, :, :].shape)[None, :, :])
else:
new_ch.append(((sample_img[ch, :, :] - mins[:, None, None][ch]) / (maxs[:, None, None][ch] - mins[:, None, None][ch]))[None, :, :])
return torch.cat(new_ch, dim=0)
elif isinstance(scaling_mode, list):
new_min, new_max = [torch.tensor(i) for i in scaling_mode]
mins = sample_img.min(dim=-1).values.min(dim=-1).values
maxs = sample_img.max(dim=-1).values.max(dim=-1).values
uniq_mins = mins.unique()
uniq_maxs = maxs.unique()
if not (((len(uniq_mins) == 1) and (uniq_mins.item() == 0.)) and ((len(uniq_maxs) == 1) and (uniq_maxs.item() == 0.))):
# Some images are all-zeros so scaling returns a NaN image
new_ch = []
for ch in range(sample_img.shape[0]):
if mins[ch] == maxs[ch]:
# Some channels contain only a single value, so scaling returns all-NaN
# We convert it to all-zeros
new_ch.append(torch.zeros(*sample_img[ch, :, :].shape)[None, :, :])
else:
new_ch.append(((sample_img[ch, :, :] - mins[:, None, None][ch]) / (maxs[:, None, None][ch] - mins[:, None, None][ch]))[None, :, :])
return torch.mul(torch.cat(new_ch, dim=0), (new_max - new_min)) + new_min
elif scaling_mode.startswith('clamp_scale'):
thresh = int(scaling_mode.split('_')[-1])
return torch.clamp(sample_img, min=0, max=thresh) / thresh
elif scaling_mode.startswith('clamp'):
thresh = int(scaling_mode.split('_')[-1])
sample_img = torch.clamp(sample_img, min=0, max=thresh)
if 'normalize' in scaling_mode:
if 'SEN2' in sample_name:
return TF.normalize(sample_img, mean=self.means['sen2'], std=self.stds['sen2'])
elif 'MODIS' in sample_name:
return TF.normalize(sample_img, mean=self.means['mod'], std=self.stds['mod'])
else:
return sample_img
Thing is I got a RuntimeError: output with shape [1, 256, 256] doesn’t match the broadcast shape [7, 256, 256], which assumes the added dimension doesn’t match the shape of corresponding images. What I am trying to do is trying to figure out a way to give this extra dimension, sen2 or mod respectively values directly from the following dictionary I made:
configs = {
"selected_bands": {
"sen2": {
"B02": -1,
"B03": -1,
"B04": -1,
"B05": -1,
"B06": -1,
"B07": -1,
"B11": -1,
"B12": -1,
"B8A": -1
},
"mod": {
"B01": -1,
"B02": -1,
"B03": -1,
"B04": -1,
"B05": -1,
"B06": -1,
"B07": -1
}
},
"mod_bands": {
"250": {
"B01": 0,
"B02": 1
},
"500": {
"B01": 0,
"B02": 1,
"B03": 2,
"B04": 3,
"B05": 4,
"B06": 5,
"B07": 6
}
},
"sen2_bands": {
"10": {
"B02": 0,
"B03": 1,
"B04": 2,
"B08": 3
},
"20": {
"B02": 0,
"B03": 1,
"B04": 2,
"B05": 3,
"B06": 4,
"B07": 5,
"B11": 6,
"B12": 7,
"B8A": 8
},
"60": {
"B01": 0,
"B02": 1,
"B03": 2,
"B04": 3,
"B05": 4,
"B06": 5,
"B07": 6,
"B09": 7,
"B11": 8,
"B12": 9,
"B8A": 10
}
},
"sen2_mod_500_band_mapping": {
"B02": "B03", # Blue
"B03": "B04", # Green
"B04": "B01", # Red
"B08": "B02", # NIR
"B12": "B07", # SWIR
"B8A": "B02" # NIR
},
"sen2_mean": {
"10": [],
"20": [63.8612, 73.0030, 78.0166, 100.7361, 137.4804, 151.7485, 144.9945, 105.9401, 162.0981],
"60": [353.2076, 403.5439, 526.2053, 548.9030, 788.2869, 1270.0687, 1443.4496, 1594.2333, 1319.8888, 887.8551, 1570.0552]
},
"sen2_std": {
"10": [],
"20": [288.5572, 318.7534, 354.1387, 430.6897, 573.3617, 634.2242, 614.6827, 454.1967, 680.0145],
"60": [119.6622, 128.4608, 139.5304, 162.2530, 177.1854, 225.1098, 251.2140, 296.0676, 242.8814, 191.1843, 267.1247]
},
"mod_mean": {
"250":[1.1],
"500": [462.7152, 1470.8407, 282.5445, 437.1656, 1615.3417, 1256.1718, 732.4786]
},
"mod_std": {
"250":[1.1],
"500": [112.8232, 228.2582, 76.8198, 96.0663, 241.4973, 205.7800, 146.8115]
}
}
}
dry_martini is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.