流程分析:在利用TensorFlow框架进行语义分割训练时,前面的数据转化过程步骤中含有将voc格式的语义分割图像标签转化为物体像素值为训练类别索引的图像标签这一步骤。
面对的问题:在项目开发过程中发现,tfrecord图像大小会影响模型最优在val数据集上的miou精度。同时目前负责开发的项目中数据分布不均衡,也需要进行一些数据增广操作。
解决方案:写了以下代码,兼顾标签数据格式转化及数据增广功能。
可以优化的地方:下面代码是通过np.where来判断目标物体和背景,所以只支持单类别的数据格式处理。
# NOTE: input_dir和output_dir可以是同一个路径。python augment.py --dst_size="480,480" --input_dir 输入voc数据路径 --output_dir 输出数据增广后的路径
# -*- coding: utf-8 -*-"""@File : augment.py@Time : 2022-2-26 22:57@Author : qian0733@Description : 代替remove_gt_colormap函数进行数据格式转化,并对数据进行了增广操作@局限性 : 只支持单类别的数据格式处理!"""import osimport cv2import sysimport argparseimport numpy as npdef parse_arguments(argv):parser = argparse.ArgumentParser()parser.add_argument('--dst_size', type=str, help='dst resize size', default='480,480')parser.add_argument('--input_dir', type=str, help='input jpg and label dir (voc type)', default='')parser.add_argument('--output_dir', type=str, help='output images dir', default='')return parser.parse_args(argv)def augment_data():for img in os.listdir(input_img):img_path = os.path.join(input_img, img)label_path = os.path.join(input_label, img.split(".")[0] + ".png")jpg = cv2.imread(img_path)label = cv2.imread(label_path)# resize/hist/flip/crop# resize/hist/flip/cropjpg_bak = jpg.copy()label_bak = label.copy()h, w = jpg_bak.shape[:2]# 缩放至目标尺寸jpg_resize = cv2.resize(jpg_bak, dst_size, interpolation=cv2.INTER_AREA)label_resize = cv2.resize(label_bak, dst_size, interpolation=cv2.INTER_AREA)label_resize = np.where(label_resize[:, :, 2] > 0, 1, 0) # 3通道二值图label_resize = np.asanyarray(label_resize, dtype=np.uint8)cv2.imwrite(os.path.join(output_img, img.split(".")[0] + r"_resize.jpg"), jpg_resize)cv2.imwrite(os.path.join(output_labels, img.split(".")[0] + r"_resize.png"), label_resize.astype(np.uint8))#在缩放好的图像上做直方图均值化b,g,r =cv2.split(jpg_resize)bh = cv2.equalizeHist(b)gh = cv2.equalizeHist(g)rh = cv2.equalizeHist(r)jpg_hist = cv2.merge((bh, gh, rh))label_hist = label_resizecv2.imwrite(os.path.join(output_img, img.split(".")[0] + r"_hist.png"), label_hist.astype(np.uint8))# 图像水平方向的镜面翻转jpg_flip = cv2.flip(jpg_bak, 1)label_flip = cv2.flip(label_bak, 1)jpg_resize = cv2.resize(jpg_flip, dst_size, interpolation=cv2.INTER_AREA)label_resize = cv2.resize(label_flip, dst_size, interpolation=cv2.INTER_AREA)label_resize = np.where(label_resize[:, :, 2] > 0, 1, 0) # 二值图label_resize = np.asanyarray(label_resize, dtype=np.uint8)print(label_resize.shape)cv2.imwrite(os.path.join(output_img, img.split(".")[0] + r"_flip.jpg"), jpg_resize)cv2.imwrite(os.path.join(output_labels, img.split(".")[0] + r"_flip.png"), label_resize.astype(np.uint8))# 随机裁剪,以扩大目标管的面积占比idx = np.random.randint(1, 3)if idx == 1:jpg_crop = jpg_bak[: int(h * 0.75), : int(w * 0.75)]label_crop = label_bak[: int(h * 0.75), : int(w * 0.75)]else:jpg_crop = jpg_bak[int(h * 0.75) :, int(w * 0.75) :]label_crop = label_bak[int(h * 0.75) :, int(w * 0.75) :]label_crop = np.where(label_crop[:, :, 2] > 0, 1, 0) # 二值图label_crop = np.asanyarray(label_crop, dtype=np.uint8)cv2.imwrite(os.path.join(output_img, img.split(".")[0] + r"_crop.jpg"), jpg_crop)cv2.imwrite(os.path.join(output_labels, img.split(".")[0] + r"_crop.png"), label_crop.astype(np.uint8))if __name__ == '__main__':args = parse_arguments(sys.argv[1:])dst_size = tuple([int(i) for i in args.dst_size.split(",")])input = args.input_diroutput = args.output_dirinput_img, input_label = os.path.join(input, "JPEGImages"), os.path.join(input, "SegmentationClassPNG")output_img, output_labels = os.path.join(output, "images"), os.path.join(output, "labels")if not os.path.exists(output_img):os.makedirs(output_img)os.makedirs(output_labels)augment_data()
