Introduction to Image Processing, Computer Vision and Deep Learning
目錄
# Grayscale
grayImage = cv2.cvtColor(originalImage, cv2.COLOR_BGR2GRAY)
# Binary
(thresh, blackAndWhiteImage) = cv2.threshold(grayImage, 127, 255, cv2.THRESH_BINARY)
blur = cv2.GaussianBlur(gray,(kernel_size, kernel_size), 0)
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
image, countours, hierarchy = cv2.findContours(image, mode, method[, contours[, hierarchy[, offset ]]])
cv2.drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset ]]]]])
image = cv2.imread('./Datasets/Q1_Image/coin01.jpg')
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
(thresh, binary) = cv2.threshold(gray_image, 127, 255, cv2.THRESH_BINARY)
guassian = cv2.GaussianBlur(binary, (11, 11), 0)
edge_image = cv2.Canny(guassian, 127, 127)
edge_image, contours, hierarchy = cv2.findContours(edge_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
image_copy = image.copy()
cv2.drawContours(image_copy, contours, -1, (0, 0, 255), 2)
# image_copy[edge_image > 0.01 * edge_image.max()] = [0, 0, 255]
cv2.namedWindow('coin01')
cv2.imshow('coin01', image_copy)
count = len(contours)
2D x Intrinsic x Extrinsic x 3D
# Grayscale
grayImage = cv2.cvtColor(chess_board_image, cv2.COLOR_BGR2GRAY)
#Enter the number of inside corners in (x, y) = (nx, ny)
ret, corners = cv2.findChessboardCorners(grayImage, (nx, ny), None)
cv2.drawChessboardCorners(chess_board_image, (nx, ny), corners, ret)
chess_images = glob.glob('./Datasets/Q2_Image/*.bmp')
# Select any index to grab an image from the list
for i in range(len(chess_images)):
# Read in the image
chess_board_image = cv2.imread(chess_images[i])
# Convert to grayscale
gray = cv2.cvtColor(chess_board_image, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ny = 8
nx = 11
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If found, draw corners
if ret == True:
# Draw and display the corners
cv2.drawChessboardCorners(chess_board_image, (nx, ny), corners, ret)
result_name = 'board' + str(i+1) + '.bmp'
cv2.imwrite(result_name, chess_board_image)
cv2.namedWindow("%s" % (i+1))
cv2.imshow("%s" % (i+1), chess_board_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
Find the intrinsic matrix:
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((11 * 8, 3), np.float32)
objp[:, :2] = np.mgrid[0:8, 0:11].T.reshape(-1, 2)
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane
for i in range(len(chess_images)):
# Read in the image
image = cv2.imread(chess_images[i])
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (8, 11), None)
corners2 = cv2.cornerSubPix(gray, corners, (7, 7), (-1, -1), criteria)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((11 * 8, 3), np.float32)
objp[:, :2] = np.mgrid[0:8, 0:11].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane
chess_images = glob.glob('./Datasets/Q2_Image/*.bmp')
# Select any index to grab an image from the list
for i in range(len(chess_images)):
# Read in the image
image = cv2.imread(chess_images[i])
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (8, 11), None)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (7, 7), (-1, -1), criteria)
imgpoints.append(corners2)
# gray.shape[::-1] = (2048, 2048)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (2048, 2048), None, None)
print(mtx)
Extrinsic matrix:
Bonus: 陣列縱向合併–np.vstack()
# gray.shape[::-1] = (2048, 2048)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (2048, 2048), None, None)
R = cv2.Rodrigues(rvecs[num-1])
ext = np.hstack((R[0], tvecs[num-1]))
print(ext)
Distortion Coefficients: k1, k2, k3, p1, p2
# gray.shape[::-1] = (2048, 2048)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (2048, 2048), None, None)
print(dist)
參考:
imgpts, jac = cv2.projectPoints(axis, rvecs[i], tvecs[i], mtx, dist)
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((11 * 8, 3), np.float32)
objp[:, :2] = np.mgrid[0:8, 0:11].T.reshape(-1, 2)
# axis = np.float32([[3, 3, -3], [1, 1, 0], [3, 5, 0], [5, 1, 0]]).reshape(-1, 3)
axis = np.float32([[5, 3, -3], [7, 1, 0], [3, 3, 0], [7, 5, 0]]).reshape(-1, 3)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane
chess_images = glob.glob('./Datasets/Q3_Image/*.bmp')
# Select any index to grab an image from the list
for i in range(len(chess_images)):
# Read in the image
image = cv2.imread(chess_images[i])
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (8, 11), None)
if ret == True:
objpoints.append(objp)
# objp = 8 * 11 objpoints (x, y, z)
corners2 = cv2.cornerSubPix(gray, corners, (7, 7), (-1, -1), criteria)
imgpoints.append(corners2)
# corner2 = each object point on 2D image (x, y)
# gray.shape[::-1] = (2048, 2048)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (2048, 2048), None, None)
# project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvecs[i], tvecs[i], mtx, dist)
def draw(image, imgpts):
image = cv2.line(image, tuple(imgpts[0].ravel()), tuple(imgpts[1].ravel()), (0, 0, 255), 5)
image = cv2.line(image, tuple(imgpts[0].ravel()), tuple(imgpts[2].ravel()), (0, 0, 255), 5)
image = cv2.line(image, tuple(imgpts[0].ravel()), tuple(imgpts[3].ravel()), (0, 0, 255), 5)
image = cv2.line(image, tuple(imgpts[1].ravel()), tuple(imgpts[2].ravel()), (0, 0, 255), 5)
image = cv2.line(image, tuple(imgpts[1].ravel()), tuple(imgpts[3].ravel()), (0, 0, 255), 5)
image = cv2.line(image, tuple(imgpts[2].ravel()), tuple(imgpts[3].ravel()), (0, 0, 255), 5)
return image
img = draw(image, imgpts)
cv2.imwrite('%s_v.jpg' % i, img)
img = cv2.resize(img, (1024, 1024), interpolation=cv2.INTER_AREA)
cv2.namedWindow('img')
cv2.imshow('img', img)
cv2.waitKey(5)
參考:
Original image(left&right):
Stereo Disparity map:
這題做的時候有問題,disparity[y][x]的值一直是0,真的值output不出來QQ
所以code的部分參考參考,可以修改更好。
imgL = cv2.imread('./Datasets/Q4_Image/imgL.png', 0)
imgR = cv2.imread('./Datasets/Q4_Image/imgR.png', 0)
stereo = cv2.StereoBM_create(numDisparities=256, blockSize=25)
disparity = stereo.compute(imgL, imgR).astype(np.float32) / 16.0
disparity = cv2.resize(disparity, (1400, 950), interpolation=cv2.INTER_AREA)
# cv2.namedWindow('disparity')
# cv2.imshow('disparity', disparity)
# cv2.imwrite('disparity.jpg', disparity)
focal_len = 2826
baseline = 178
Cx = 123
# mouse callback function
def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
# print(x, ",", y)
cv2.rectangle(disparity, (x-3, y-3), (x+3, y+3), (0, 0, 255), -1)
dist = disparity[y][x] - Cx
depth = int(focal_len * baseline / abs(dist))
# print("Disparity: " + str(disparity[x][y]) + " pixels")
# print("Depth: " + str(depth) + " mm")
# text = disparity.copy()
cv2.rectangle(disparity, (1100, 850), (1390, 940), (255, 255, 255), -1)
cv2.putText(disparity, "Disparity: " + str(int(disparity[y][x])) + " pixels", (1120, 890),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(disparity, "Depth: " + str(depth) + " mm", (1120, 930),
cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 0), 2, cv2.LINE_AA)
# image = np.hstack([disparity, text])
cv2.imshow('image', disparity)
while(1):
cv2.namedWindow('image')
cv2.setMouseCallback('image', draw_circle)
cv2.imshow('image', disparity)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.waitKey(0)
cv2.destroyAllWindows()