BM算法实现双目视觉测距–python实现

在完成对双目摄像头的标定之后,获得标定的矩阵包括左右相机的内参数矩阵、畸变矩阵、旋转矩阵和平移矩阵。将其放入代码中,如下所示:

import cv2
import numpy as np

# 左相机内参
left_camera_matrix = np.array([(426.61499943, 0, 337.77666426),
                   (0, 426.50296492, 254.57967858),
                   (0, 0, 1)])


# 左相机畸变系数:[k1, k2, p1, p2, k3]
left_distortion = np.array([[1.64160258e-02 ,1.90313751e-02  ,3.85843636e-05 ,-6.81027605e-04,  -7.77682876e-02]])

# 右相机内参
right_camera_matrix = np.array([( 428.37039364, 0, 335.14944239),
                   (0, 428.26149388, 253.70369704),
                   (0, 0, 1)])

# 右相机畸变系数:[k1, k2, p1, p2, k3]
right_distortion = np.array([[1.18624707e-02 , 2.92395356e-02 , 7.84349618e-04 ,-4.59924352e-05,  -8.34482148e-02]])

# om = np.array([-0.00009, 0.02300, -0.00372])
# R = cv2.Rodrigues(om)[0]

# 旋转矩阵
R = np.array([(9.99999401e-01 ,-1.08818183e-03 , 1.21838861e-04),
                   (1.08825693e-03 , 9.99999217e-01, -6.18081935e-04),
                   (-1.21166180e-04 , 6.18214157e-04 , 9.99999802e-01)])

# 平移向量
T = np.array([[-59.98598553], [-0.81926245], [0.12784464]])
doffs = 0.0

size = (640, 480)

R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(left_camera_matrix, left_distortion,
                                                                  right_camera_matrix, right_distortion, size, R,
                                                                  T)

left_map1, left_map2 = cv2.initUndistortRectifyMap(left_camera_matrix, left_distortion, R1, P1, size, cv2.CV_16SC2)
right_map1, right_map2 = cv2.initUndistortRectifyMap(right_camera_matrix, right_distortion, R2, P2, size, cv2.CV_16SC2)

在进行立体标定和平行校正。代码如下:

    img1_rectified = cv2.remap(frame1, left_map1, left_map2, cv2.INTER_LINEAR,
                               cv2.BORDER_CONSTANT)
    img2_rectified = cv2.remap(frame2, right_map1, right_map2, cv2.INTER_LINEAR,
                               cv2.BORDER_CONSTANT)

也可以不用,如果你的相机的平行相机的话,这一步做不做都可以。

完成之后在看一下BM算法:

    # BM
    numberOfDisparities = ((640 // 8) + 15) & -16  # 640对应是分辨率的宽

    stereo = cv2.StereoBM_create(numDisparities=16*10, blockSize=7)  # 立体匹配
    stereo.setROI1(camera_configs.validPixROI1)
    stereo.setROI2(camera_configs.validPixROI2)
    stereo.setPreFilterCap(50)
    stereo.setBlockSize(9)
    stereo.setMinDisparity(0)
    stereo.setNumDisparities(numberOfDisparities)
    stereo.setTextureThreshold(10)
    stereo.setUniquenessRatio(15)
    stereo.setSpeckleWindowSize(200)
    stereo.setSpeckleRange(32)
    stereo.setDisp12MaxDiff(8)

    disparity = stereo.compute(img1_rectified, img2_rectified)  # 计算视差

    disp = cv2.normalize(disparity, disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)  # 归一化函数算法

    threeD = cv2.reprojectImageTo3D(disparity, camera_configs.Q, handleMissingValues=True)  # 计算三维坐标数据值
    threeD = threeD * 16

这块就是计算三维坐标了,其中stereo里的都是算法的参数,可以进行调节,找到一种效果好的参数组合即可,当然建议设置一个滑块调节,这样调节起来比较方便,如何设置滑块进行参数调节,我将在另一篇博客中介绍。

    cv2.setMouseCallback(WIN_NAME, onmouse_pick_points, threeD)

    cv2.imshow("left", frame1)
    cv2.imshow(WIN_NAME, disp)  # 显示深度图的双目画面

这样就可以进行展示了,是建立在左相机的坐标系下进行计算和展示。

通过鼠标点击输入像素坐标信息,通过BM算法进行对应点的匹配,再根据平行视图的计算公式进行计算,即可得三维坐标了,这样需要乘以16进行换算才是真实坐标。

鼠标点击输入函数如下,其中xy也可以自己设置输入,进行对应坐标的计算:

def onmouse_pick_points(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN:
        threeD = param
        print('n像素坐标 x = %d, y = %d' % (x, y))
        print("世界坐标xyz 是:", threeD[y][x][0] / 1000.0, threeD[y][x][1] / 1000.0, threeD[y][x][2] / 1000.0, "m")

        distance = math.sqrt(threeD[y][x][0] ** 2 + threeD[y][x][1] ** 2 + threeD[y][x][2] ** 2)
        distance = distance / 1000.0  # mm -> m
        print("距离是:", distance, "m")
    cv2.setMouseCallback(WIN_NAME, onmouse_pick_points, threeD)

把这些函数组合一下就可以得到整个函数了,图片输入识别深度和摄像头实时识别都是可以的,计算速度快,实际测量的话,感觉精度还行,当然是短距离。效果的话没有SGBM算法好的,但是胜在计算速度快。

完整的代码如下了,最重要的就是。

# -*- coding: utf-8 -*-

import numpy as np
import cv2
import camera_configs
import random
import math

cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 480)  # 打开并设置摄像头


# 鼠标回调函数
def onmouse_pick_points(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN:
        threeD = param
        print('n像素坐标 x = %d, y = %d' % (x, y))
        # print("世界坐标是:", threeD[y][x][0], threeD[y][x][1], threeD[y][x][2], "mm")
        print("世界坐标xyz 是:", threeD[y][x][0] / 1000.0, threeD[y][x][1] / 1000.0, threeD[y][x][2] / 1000.0, "m")

        distance = math.sqrt(threeD[y][x][0] ** 2 + threeD[y][x][1] ** 2 + threeD[y][x][2] ** 2)
        distance = distance / 1000.0  # mm -> m
        print("距离是:", distance, "m")


WIN_NAME = 'Deep disp'
cv2.namedWindow(WIN_NAME, cv2.WINDOW_AUTOSIZE)

while True:
    ret, frame = cap.read()
    frame1 = frame[0:480, 0:640]
    frame2 = frame[0:480, 640:1280]  # 割开双目图像

    imgL = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)  # 将BGR格式转换成灰度图片
    imgR = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)

    # cv2.remap 重映射,就是把一幅图像中某位置的像素放置到另一个图片指定位置的过程。
    # 依据MATLAB测量数据重建无畸变图片
    img1_rectified = cv2.remap(imgL, camera_configs.left_map1, camera_configs.left_map2, cv2.INTER_LINEAR)
    img2_rectified = cv2.remap(imgR, camera_configs.right_map1, camera_configs.right_map2, cv2.INTER_LINEAR)

    imageL = cv2.cvtColor(img1_rectified, cv2.COLOR_GRAY2BGR)
    imageR = cv2.cvtColor(img2_rectified, cv2.COLOR_GRAY2BGR)

    # BM
    numberOfDisparities = ((640 // 8) + 15) & -16  # 640对应是分辨率的宽

    stereo = cv2.StereoBM_create(numDisparities=16*10, blockSize=7)  # 立体匹配
    stereo.setROI1(camera_configs.validPixROI1)
    stereo.setROI2(camera_configs.validPixROI2)
    stereo.setPreFilterCap(50)
    stereo.setBlockSize(9)
    stereo.setMinDisparity(0)
    stereo.setNumDisparities(numberOfDisparities)
    stereo.setTextureThreshold(10)
    stereo.setUniquenessRatio(15)
    stereo.setSpeckleWindowSize(200)
    stereo.setSpeckleRange(32)
    stereo.setDisp12MaxDiff(8)

    disparity = stereo.compute(img1_rectified, img2_rectified)  # 计算视差

    disp = cv2.normalize(disparity, disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)  # 归一化函数算法

    threeD = cv2.reprojectImageTo3D(disparity, camera_configs.Q, handleMissingValues=True)  # 计算三维坐标数据值
    threeD = threeD * 16

    # threeD[y][x] x:0~640; y:0~480;   !!!!!!!!!!
    cv2.setMouseCallback(WIN_NAME, onmouse_pick_points, threeD)

    cv2.imshow("left", frame1)
    cv2.imshow(WIN_NAME, disp)  # 显示深度图的双目画面

    key = cv2.waitKey(1)
    if key == ord("q"):
        break

cap.release()
cv2.destroyAllWindows()

本图文内容来源于网友网络收集整理提供,作为学习参考使用,版权属于原作者。
THE END
分享
二维码
< <上一篇
下一篇>>