In version 0.x, in the load_data_list()
function in basesegdataset.py
and the encode_decode()
function in encoder_decoder.py
, the print information shows that it can read in sequence according to H48F020017_clip1_1_0_-1.png, H48F020017_clip1_2_0_-1 up to H48F020017_clip1_100_0_-1:
However, after upgrading to version 1.x, the load_data_list()
function can still read in sequence, but the reading order of the encode_decode()
function has changed to 100, 10, 11, 12…, and the order has become chaotic:
I want to try to add index conversion in the load_data_list()
function, for example, convert 1 to 100 and 2 to 10, imagining whether it can be read correctly in the encode_decode()
function. But I didn’t succeed, and I don’t think this is a good solution. I really want to know if the upgrade has changed the definitions of some functions, which has caused this problem. I hope someone can help me solve this problem. Thank you very much.
def load_data_list(self) -> List[dict]:
"""Load annotation from directory or annotation file.
Returns:
list[dict]: All data info of dataset.
"""
data_list = []
img_dir = self.data_prefix.get('img_path', None)
ann_dir = self.data_prefix.get('seg_map_path', None)
if not osp.isdir(self.ann_file) and self.ann_file:
assert osp.isfile(self.ann_file),
f'Failed to load `ann_file` {self.ann_file}'
lines = mmengine.list_from_file(
self.ann_file, backend_args=self.backend_args)
print(f'样本文件夹的数目: {len(lines)}') # 打印 lines 数量
for line in lines:
img_name = line.strip()
img_dir_path = osp.join(img_dir, img_name) # 文件夹路径
seg_dir_path = osp.join(ann_dir, img_name) # 对应的标注文件夹路径
img_files = glob.glob(osp.join(img_dir_path, '*' + self.img_suffix)) # 获取该文件夹中的所有影像文件
seg_files = glob.glob(osp.join(seg_dir_path, '*' + self.seg_map_suffix)) # 获取该文件夹中的所有标注文件
# 按文件名中的ID字段排序
img_files = sort_by_natural_order(img_files)
seg_files = sort_by_natural_order(seg_files)
# print(f'Number of image files in {img_dir_path}: {len(img_files)}') # 打印 img_files 数量
# print(f'Number of segmentation files in {seg_dir_path}: {len(seg_files)}') # 打印 seg_files 数量
for img_file, seg_file in zip(img_files, seg_files):
#print(os.path.basename(img_file))
data_info = dict(
img_path=img_file,
seg_map_path=seg_file
)
#todo:后面三个dict对应一个文件夹都是一样的,不知道有没有影响
data_info['label_map'] = self.label_map
data_info['reduce_zero_label'] = self.reduce_zero_label
data_info['seg_fields'] = []
data_list.append(data_info)
print(f'if:总共读取了 {len(data_list)} 个样本')
print('222222222222222222') #--songyujian
print(f'Reading files from: {img_dir_path}')
print(f'Image files: {img_files}')
print(f'Segmentation files: {seg_files}')
else:
_suffix_len = len(self.img_suffix)
for img in fileio.list_dir_or_file(
dir_path=img_dir,
list_dir=False,
suffix=self.img_suffix,
recursive=True,
backend_args=self.backend_args):
data_info = dict(img_path=osp.join(img_dir, img))
if ann_dir is not None:
seg_map = img[:-_suffix_len] + self.seg_map_suffix
data_info['seg_map_path'] = osp.join(ann_dir, seg_map)
data_info['label_map'] = self.label_map
data_info['reduce_zero_label'] = self.reduce_zero_label
data_info['seg_fields'] = []
data_list.append(data_info)
data_list = sorted(data_list, key=lambda x: x['img_path'])
return data_list
def encode_decode(self, inputs: Tensor,
batch_img_metas: List[dict], overlap) -> Tensor:
"""Encode images with backbone and decode into a semantic segmentation
map of the same size as input."""
# 改
print(batch_img_metas)
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
x = self.extract_feat(inputs, overlap)
# 改 predict 返回2个值,只接收前1个
seg_logits = self.decode_head.predict(x, batch_img_metas,
self.test_cfg)[0]
#原来的:
# seg_logits = self.decode_head.predict(x, batch_img_metas,
#
return seg_logits
user27292594 is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.