source:
开源地址:https://github.com/misbah4064/hand_pose_detection
模型下载:https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/models/getModels.sh
论坛资料 openpose使用 https://blog.csdn.net/qq_27158179/article/details/82717821

演示视频

huhuPose.avi (13.17MB)

代码

视频处理

  1. import cv2
  2. import time
  3. import numpy as np
  4. protoFile = "D:\\Data_documents\\ImageProcess\\MachineViewReport\\hand_pose_detection-master\\hand\\pose_deploy.prototxt"
  5. weightsFile = "D:\\Data_documents\\ImageProcess\\MachineViewReport\\hand_pose_detection-master\\hand\\pose_iter_102000.caffemodel"
  6. nPoints = 22
  7. POSE_PAIRS = [ [0,1],[1,2],[2,3],[3,4],[0,5],[5,6],[6,7],[7,8],[0,9],[9,10],[10,11],[11,12],[0,13],[13,14],[14,15],[15,16],[0,17],[17,18],[18,19],[19,20] ]
  8. threshold = 0.2
  9. video_file = "D:\\Data_documents\\ImageProcess\\MachineViewReport\\hand_pose_detection-master\\videoMine.mp4"
  10. cap = cv2.VideoCapture(video_file)
  11. hasFrame, frame = cap.read()
  12. frameWidth = frame.shape[1]
  13. frameHeight = frame.shape[0]
  14. aspect_ratio = frameWidth/frameHeight
  15. inHeight = 368
  16. inWidth = int(((aspect_ratio*inHeight)*8)//8)
  17. vid_writer = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 15, (frame.shape[1],frame.shape[0]))
  18. net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
  19. k = 0
  20. while 1:
  21. k+=1
  22. t = time.time()
  23. hasFrame, frame = cap.read()
  24. frameCopy = np.copy(frame)
  25. if not hasFrame:
  26. cv2.waitKey()
  27. break
  28. inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight),
  29. (0, 0, 0), swapRB=False, crop=False)
  30. net.setInput(inpBlob)
  31. output = net.forward()
  32. print("forward = {}".format(time.time() - t))
  33. # Empty list to store the detected keypoints
  34. points = []
  35. for i in range(nPoints):
  36. # confidence map of corresponding body's part.
  37. probMap = output[0, i, :, :]
  38. probMap = cv2.resize(probMap, (frameWidth, frameHeight))
  39. # Find global maxima of the probMap.
  40. minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
  41. if prob > threshold :
  42. cv2.circle(frameCopy, (int(point[0]), int(point[1])), 6, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
  43. cv2.putText(frameCopy, "{}".format(i), (int(point[0]), int(point[1])), cv2.FONT_HERSHEY_SIMPLEX, .8, (0, 0, 255), 2, lineType=cv2.LINE_AA)
  44. # Add the point to the list if the probability is greater than the threshold
  45. points.append((int(point[0]), int(point[1])))
  46. else :
  47. points.append(None)
  48. # Draw Skeleton
  49. for pair in POSE_PAIRS:
  50. partA = pair[0]
  51. partB = pair[1]
  52. if points[partA] and points[partB]:
  53. cv2.line(frame, points[partA], points[partB], (0, 255, 255), 2, lineType=cv2.LINE_AA)
  54. cv2.circle(frame, points[partA], 5, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)
  55. cv2.circle(frame, points[partB], 5, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)
  56. print("Time Taken for frame = {}".format(time.time() - t))
  57. # cv2.putText(frame, "time taken = {:.2f} sec".format(time.time() - t), (50, 50), cv2.FONT_HERSHEY_COMPLEX, .8, (255, 50, 0), 2, lineType=cv2.LINE_AA)
  58. # cv2.putText(frame, "Hand Pose using OpenCV", (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 50, 0), 2, lineType=cv2.LINE_AA)
  59. cv2.imshow('Output-Skeleton', frame)
  60. # cv2.imwrite("video_output/{:03d}.jpg".format(k), frame)
  61. key = cv2.waitKey(1)
  62. if key == 27:
  63. break
  64. print("total = {}".format(time.time() - t))
  65. vid_writer.write(frame)
  66. vid_writer.release()

图像处理

  1. import cv2
  2. import time
  3. import numpy as np
  4. #加载模型和图像
  5. protoFile = "D:\\Data_documents\\ImageProcess\\MachineViewReport\\hand_pose_detection-master\\hand\\pose_deploy.prototxt"
  6. weightsFile = "D:\\Data_documents\\ImageProcess\\MachineViewReport\\hand_pose_detection-master\\hand\\pose_iter_102000.caffemodel"
  7. nPoints = 22
  8. POSE_PAIRS = [ [0,1],[1,2],[2,3],[3,4],[0,5],[5,6],[6,7],[7,8],[0,9],[9,10],[10,11],[11,12],[0,13],[13,14],[14,15],[15,16],[0,17],[17,18],[18,19],[19,20] ]
  9. threshold = 0.2
  10. frame = cv2.imread("D:\\Data_documents\\ImageProcess\\MachineViewReport\\hand_pose_detection-master\\hand02.jpg")
  11. net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
  12. frameCopy = np.copy(frame)
  13. frameWidth = frame.shape[1]
  14. frameHeight = frame.shape[0]
  15. aspect_ratio = frameWidth/frameHeight
  16. inHeight = 368
  17. inWidth = int(((aspect_ratio*inHeight)*8)//8)
  18. #推断预测
  19. inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight),(0, 0, 0), swapRB=False, crop=False)#blobFromImage将图像转为blob
  20. net.setInput(inpBlob)# 将图像输入到caffe网络中
  21. output = net.forward()#输出前向传播的预测结果 实现网络推断
  22. #显示检测
  23. points = []
  24. for i in range(nPoints):
  25. # confidence map of corresponding body's part.
  26. probMap = output[0, i, :, :]
  27. probMap = cv2.resize(probMap, (frameWidth, frameHeight))
  28. # Find global maxima of the probMap.
  29. minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
  30. if prob > threshold :
  31. cv2.circle(frameCopy, (int(point[0]), int(point[1])), 8, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
  32. cv2.putText(frameCopy, "{}".format(i), (int(point[0]), int(point[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
  33. # Add the point to the list if the probability is greater than the threshold
  34. points.append((int(point[0]), int(point[1])))
  35. else :
  36. points.append(None)
  37. cv2.imshow('Output-Keypoints', frameCopy)
  38. cv2.waitKey(0)
  39. cv2.destroyAllWindows()
  40. cv2.imwrite('frame01.png', frameCopy)
  41. #绘制骨骼
  42. for pair in POSE_PAIRS:
  43. partA = pair[0]
  44. partB = pair[1]
  45. if points[partA] and points[partB]:
  46. cv2.line(frame, points[partA], points[partB], (0, 255, 255), 2)
  47. cv2.circle(frame, points[partA], 8, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)
  48. cv2.imshow('Output-Skeleton', frame)
  49. cv2.imwrite('frame02.png', frame)
  50. cv2.waitKey(0)
  51. cv2.destroyAllWindows()

CV函数

cv2.VideoCapture()
cap = cv2.VideoCapture(0)
VideoCapture()中参数是0,表示打开笔记本的内置摄像头。
cap = cv2.VideoCapture(“…/1.avi”)
VideoCapture(“…/1.avi”),表示参数是视频文件路径则打开视频
cap.read()
cap.read()按帧读取视频,ret,frame是获cap.read()方法的两个返回值。其中ret是布尔值,如果读取帧是正确的则返回True,如果文件读取到结尾,它的返回值就为False。frame就是每一帧的图像,是个三维矩阵
cv2.VideoWriter
VideoWriter(filename, fourcc, fps, frameSize[, isColor]) ->

  1. 第一个参数是要保存的文件的路径
  2. fourcc 指定编码器
  3. fps 要保存的视频的帧率
  4. frameSize 要保存的文件的画面尺寸
  5. isColor 指示是黑白画面还是彩色的画面

https://blog.csdn.net/briblue/article/details/87914421

CV机器学习函数

  1. cv2.dnn.readNetFromCaffe(prototxt, model)

用于进行SSD网络的caffe框架的加载
参数说明:prototxt表示caffe网络的结构文本,model表示已经训练好的参数结果

  1. t=delib.correlation_tracker()

使用delib生成单目标的追踪器

  1. delib.rectangle(int(box[0]), int(box[1]), int(box[2]), int(box[3]))

用于生成追踪器所需要的矩形框[(startX, startY), (endX, endY)]

  1. t.start_track(rgb, rect) # 初始化生成器的开始状态
  2. cv2.Writer(name, fourcc, (frame.shape[1], frame.shape[0]), True)

进行图片写入到视频里面
参数说明: name表示视频的名字,fourcc表示视频格式,frame.shape[1] 表示视频的长和宽,

  1. cv2.dnn.blobFromImage(frame, 0.007843, (w, h), 127.5)

对图像进行归一化操作(-1, 1),
参数说明:frame表示输入图片,0.007843表示需要乘的数,即1/127.5,(w, h)表示图像大小,127.5表示需要 减去的数

  1. net.SetInput(rgb)

表示将图片输入到caffe网络中
参数说明: rgb表示已经经过归一化的图片

  1. net.forward() 输出前向传播的预测结果
  2. oq = multiprocessing.Queue() 生成用于多进行传输过程中的线程
  3. p = multiprocessing.Process(target=start_track, args=(bb, label, rgb, iq, oq))

用于对函数创建进程
参数说明:target表示需要转换为进程的函数,args表示传入到进程里函数的参数