针对D435i相机,主要是实例化realsense模块的代码(61-90行),其余为yolo目标检测的代码

    1. import argparse # python的命令行解析的标准模块 可以让我们直接在命令行中就可以向程序中传入参数并让程序运行
    2. import os
    3. import shutil
    4. import time
    5. from pathlib import Path # Path将str转换为Path对象 使字符串路径易于操作的模块
    6. import cv2
    7. import torch
    8. import torch.backends.cudnn as cudnn # cuda模块
    9. from numpy import random
    10. import numpy as np
    11. import pyrealsense2 as rs # 导入realsense的sdk模块
    12. from models.experimental import attempt_load
    13. from utils.general import (
    14. check_img_size, non_max_suppression, apply_classifier, scale_coords,
    15. xyxy2xywh, plot_one_box, strip_optimizer, set_logging, plot_line, plot_cross)
    16. from utils.torch_utils import select_device, load_classifier, time_synchronized
    17. from utils.datasets import letterbox
    18. def detect(save_img=False):
    19. # 加载参数
    20. out, source, weights, view_img, save_txt, imgsz = \
    21. opt.save_dir, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
    22. webcam = source == '0' or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')
    23. # 初始化
    24. set_logging() # 生成日志
    25. device = select_device(opt.device) # 获取当前主机可用的设备
    26. if os.path.exists(out): # output dir
    27. shutil.rmtree(out) # delete dir
    28. os.makedirs(out) # make new dir
    29. # 如果设配是GPU 就使用half(float16) 包括模型半精度和输入图片半精度
    30. half = device.type != 'cpu' # half precision only supported on CUDA
    31. # 载入模型和模型参数并调整模型
    32. model = attempt_load(weights, map_location=device) # 加载Float32模型
    33. imgsz = check_img_size(imgsz, s=model.stride.max()) # 确保输入图片的尺寸imgsz能整除stride=32 如果不能则调整为能被整除并返回 # stride: 模型最大的下采样率 [8, 16, 32] 所有stride一般为32
    34. if half: # 是否将模型从float32 -> float16 加速推理
    35. model.half() # to FP16
    36. # 加载推理数据
    37. vid_path, vid_writer = None, None
    38. # 采用webcam数据源
    39. view_img = True
    40. cudnn.benchmark = True # 加快常量图像大小推断
    41. # dataset = LoadStreams(source, img_size=imgsz) #load 文件夹中视频流
    42. # 获取每个类别的名字和随机生成类别颜色
    43. names = model.module.names if hasattr(model, 'module') else model.names
    44. colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
    45. # 正式推理
    46. t0 = time.time() # 记录时间
    47. # 这里先设置一个全零的Tensor进行一次前向推理 判断程序是否正常
    48. img = torch.zeros((1, 3, imgsz, imgsz), device=device)
    49. _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
    50. # 实例化realsense模块
    51. # https://www.rs-online.com/designspark/intelpython2-nvidia-jetson-nanorealsense-d435-cn
    52. pipeline = rs.pipeline()
    53. # 创建 config 对象:
    54. config = rs.config()
    55. # 声明RGB和深度视频流
    56. # config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    57. config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
    58. config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
    59. # 启动数据流
    60. pipeline.start(config)
    61. align_to_color = rs.align(rs.stream.color) # 对齐rgb和深度图
    62. sum = 0
    63. count = 0
    64. while True:
    65. start = time.time()
    66. # Wait for a coherent pair of frames(一对连贯的帧): depth and color
    67. frames = pipeline.wait_for_frames() # 等待最新的影像,wait_for_frames返回的是一個合成的影像
    68. frames = align_to_color.process(frames) # 将上图获取视频帧对齐
    69. # 使用 process 來實現剛剛宣告的 align 對齊功能
    70. # 将合成帧分开
    71. depth_frame = frames.get_depth_frame()
    72. color_frame = frames.get_color_frame()
    73. # 转换成 numpy 数据
    74. color_image = np.asanyarray(color_frame.get_data())
    75. # print(color_image.shape)
    76. depth_image = np.asanyarray(depth_frame.get_data())
    77. # mask = np.zeros([color_image.shape[0], color_image.shape[1]], dtype=np.uint8)
    78. # mask[0:480, 320:640] = 255
    79. # 对RGB的img进行处理,送入预测模型
    80. sources = [source] # 数据源
    81. imgs = [None]
    82. path = sources # path: 图片/视频的路径
    83. imgs[0] = color_image
    84. im0s = imgs.copy() # img0s: 原尺寸的图片
    85. img = [letterbox(x, new_shape=imgsz)[0] for x in im0s] # img: 进行resize + pad之后的图片
    86. print(imgsz)
    87. img = np.stack(img, 0) # 沿着0dim进行堆叠
    88. img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to 3x416x416, uint8 to float32
    89. img = np.ascontiguousarray(img, dtype=np.float16 if half else np.float32)
    90. # ascontiguousarray函数将一个内存不连续存储的数组转换为内存连续存储的数组,使得运行速度更快。
    91. img /= 255.0 # 0 - 255 to 0.0 - 1.0
    92. # 处理每一张图片的数据格式
    93. img = torch.from_numpy(img).to(device) # 将numpy转为pytorch的tensor,并转移到运算设备上计算
    94. # 如果图片是3维(RGB) 就在前面添加一个维度1当中batch_size=1
    95. # 因为输入网络的图片需要是4为的 [batch_size, channel, w, h]
    96. if img.ndimension() == 3:
    97. img = img.unsqueeze(0) # 在dim0位置添加维度1,[channel, w, h] -> [batch_size, channel, w, h]
    98. t1 = time_synchronized() # 精确计算当前时间 并返回当前时间
    99. # 对每张图片/视频进行前向推理
    100. pred = model(img, augment=opt.augment)[0]
    101. # 进行NMS 去除多余的框
    102. # conf_thres: 置信度阈值
    103. # iou_thres: iou阈值
    104. # classes: 是否只保留特定的类别 默认为None
    105. # agnostic_nms: 进行nms是否也去除不同类别之间的框 默认False
    106. # max_det: 每张图片的最大目标个数 默认1000
    107. # pred: [num_obj, 6] = [5, 6] 这里的预测信息pred还是相对于 img_size(640) 的
    108. pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
    109. # 后续保存或者打印预测信息
    110. for i, det in enumerate(pred): # detections per image
    111. p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
    112. s += '%gx%g ' % img.shape[2:] # print string 输出信息 图片shape(w,h)
    113. gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain gn=[w,h,w,h] 用于后面的归一化
    114. if det is not None and len(det):
    115. # Rescale boxes from img_size to im0 size 将预测信息(相对img_size 640)映射回原图 img0 size
    116. det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
    117. # Print results 输出信息s + 检测到的各个类别的目标个数
    118. for c in det[:, -1].unique():
    119. n = (det[:, -1] == c).sum() # detections per class
    120. s += '%g %ss, ' % (n, names[int(c)]) # add to string
    121. # Write results 保存预测信息: img0上画框、crop_img
    122. for *xyxy, conf, cls in reversed(det):
    123. if (conf):
    124. count = count + 1
    125. sum = sum + conf
    126. ave = sum / count
    127. print('ave_conf = %.3f' % ave)
    128. #保存为label.txt用的,我们没保存,所以with open...也被删了,下面两行没用到
    129. xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # 归一化为 xywh
    130. line = (cls, conf, *xywh) if opt.save_conf else (cls, *xywh) # label format
    131. # 获取距离信息
    132. # print("find a pine")
    133. # print(xyxy)
    134. distance_list = []
    135. mid_pos = [int((int(xyxy[0]) + int(xyxy[2])) / 2),
    136. int((int(xyxy[1]) + int(xyxy[3])) / 2)] # 获得预测框的中心像素位置
    137. min_val = min(abs(int(xyxy[2]) - int(xyxy[0])), abs(int(xyxy[3]) - int(xyxy[1]))) # 确定偏差搜索范围
    138. # print(box,)
    139. # 声明一个num为40的随机序列,同一目标预测框每个序号生成一个深度值
    140. randnum = 40
    141. for i in range(randnum):
    142. bias = random.randint(-min_val // 4, min_val // 4) # 生成固定范围内的随机整数为偏差,'//'整数除法
    143. dist = depth_frame.get_distance(int(mid_pos[0] + bias),
    144. int(mid_pos[1] + bias)) # 从深度视频帧中获得目标点的深度信息
    145. # print(int(mid_pos[1] + bias), int(mid_pos[0] + bias))
    146. if dist:
    147. distance_list.append(dist) # 添加到列表
    148. # 将40个深度数据进行处理
    149. distance_list = np.array(distance_list)
    150. distance_list = np.sort(distance_list)[
    151. randnum // 2 - randnum // 4:randnum // 2 + randnum // 4] # 冒泡排序实现中值滤波
    152. label = '%s %.2f%s' % (names[int(cls)], np.mean(distance_list), 'm') # 最后取平均值为目标深度
    153. plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) # 将结果框打印回原图
    154. im_mid_pos = (im0.shape[1]//2,im0.shape[0]//2)
    155. plot_cross(im0, im_mid_pos, size=20,color=[0, 255, 0], line_thickness=2)
    156. plot_line(im0, im_mid_pos, tuple(mid_pos),color=colors[int(cls)], line_thickness=3)
    157. t2 = time_synchronized()
    158. # Print time (inference + NMS)
    159. sec = t2 - t1
    160. # fps = 1 / sec
    161. print('%sDone. (%.3fs)' % (s, sec ))
    162. # save video
    163. # out = cv2.VideoWriter('rs.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 30, (640, 480), True)
    164. # print('writing video')
    165. # out.write(im0)
    166. # Stream results
    167. if view_img:
    168. cv2.imshow(p, im0)
    169. if cv2.waitKey(1) == ord('q'): # q to quit
    170. raise StopIteration
    171. # print('Done. (%.3fs)' % (time.time() - t0))
    172. if __name__ == '__main__':
    173. parser = argparse.ArgumentParser()
    174. # parser.add_argument('--weights', nargs='+', type=str, default='yolov5m.pt', help='model.pt path(s)')
    175. parser.add_argument('--weights', nargs='+', type=str,default='runs/train/exp32/weights/best.pt', help='model.pt path(s)')
    176. parser.add_argument('--source', type=str, default='0', help='source') # file/ folder, 0 for webcam
    177. parser.add_argument('--img-size', type=int, default=1280, help='inference size (pixels)')
    178. parser.add_argument('--conf-thres', type=float, default=0.35, help='object confidence threshold')
    179. parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
    180. parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    181. parser.add_argument('--view-img', action='store_true', help='display results')
    182. parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
    183. parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
    184. parser.add_argument('--save-dir', type=str, default='inference/output', help='directory to save results')
    185. parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
    186. parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
    187. parser.add_argument('--augment', action='store_true', help='augmented inference')
    188. parser.add_argument('--update', action='store_true', help='update all models')
    189. opt = parser.parse_args()
    190. print(opt)
    191. with torch.no_grad(): # 一个上下文管理器,被该语句wrap起来的部分将不会track梯度
    192. detect()