---
title: Python-OpenCV hw2筆記
tags: OpenCV,Python
---
**Introduction to Image Processing, Computer Vision and Deep Learning**
**目錄**
[TOC]
---
## 1. Find Contour
### 1) Draw Contour
![](https://i.imgur.com/Bfl8MQV.png)
![](https://i.imgur.com/I8zfLYL.png)
#### Follow the steps:
1. RGB -> Grayscale -> Binary
```
# Grayscale
grayImage = cv2.cvtColor(originalImage, cv2.COLOR_BGR2GRAY)
# Binary
(thresh, blackAndWhiteImage) = cv2.threshold(grayImage, 127, 255, cv2.THRESH_BINARY)
```
2. Remember to use **Gaussian Blur** to remove the noise.
```
blur = cv2.GaussianBlur(gray,(kernel_size, kernel_size), 0)
```
3. Using some **edge detection functions** to get better results. (Ex: cv2.Canny)
```
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
```
4. use **cv2.findContours**
```
image, countours, hierarchy = cv2.findContours(image, mode, method[, contours[, hierarchy[, offset ]]])
```
5. use **cv2.drawContours**
```
cv2.drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset ]]]]])
```
#### Example code:
```python=
image = cv2.imread('./Datasets/Q1_Image/coin01.jpg')
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
(thresh, binary) = cv2.threshold(gray_image, 127, 255, cv2.THRESH_BINARY)
guassian = cv2.GaussianBlur(binary, (11, 11), 0)
edge_image = cv2.Canny(guassian, 127, 127)
edge_image, contours, hierarchy = cv2.findContours(edge_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
image_copy = image.copy()
cv2.drawContours(image_copy, contours, -1, (0, 0, 255), 2)
# image_copy[edge_image > 0.01 * edge_image.max()] = [0, 0, 255]
cv2.namedWindow('coin01')
cv2.imshow('coin01', image_copy)
```
### 2) Count Coins
![](https://i.imgur.com/tPOrnb2.png)
```python=
count = len(contours)
```
## 2. Camera Calibration
### Relationship of `2D x Intrinsic x Extrinsic x 3D`
![](https://i.imgur.com/zcOvbAX.png)
### 1) Corner detection
![](https://i.imgur.com/JZIZboz.png)
#### Follow the steps:
1. Given 15 of chessboard images
2. Read the images and convert to grayscale
```
# Grayscale
grayImage = cv2.cvtColor(chess_board_image, cv2.COLOR_BGR2GRAY)
```
3. Find the chessboard corners
```
#Enter the number of inside corners in (x, y) = (nx, ny)
ret, corners = cv2.findChessboardCorners(grayImage, (nx, ny), None)
```
4. Draw and display the corners
```
cv2.drawChessboardCorners(chess_board_image, (nx, ny), corners, ret)
```
#### Example code:
```python=
chess_images = glob.glob('./Datasets/Q2_Image/*.bmp')
# Select any index to grab an image from the list
for i in range(len(chess_images)):
# Read in the image
chess_board_image = cv2.imread(chess_images[i])
# Convert to grayscale
gray = cv2.cvtColor(chess_board_image, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ny = 8
nx = 11
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If found, draw corners
if ret == True:
# Draw and display the corners
cv2.drawChessboardCorners(chess_board_image, (nx, ny), corners, ret)
result_name = 'board' + str(i+1) + '.bmp'
cv2.imwrite(result_name, chess_board_image)
cv2.namedWindow("%s" % (i+1))
cv2.imshow("%s" % (i+1), chess_board_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
### 2) Find the intrinsic matrix
Find the intrinsic matrix: ![](https://i.imgur.com/Q8zWBEL.png)
#### Follow the steps:
1. termination criteria
```
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
```
2. prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
```
objp = np.zeros((11 * 8, 3), np.float32)
objp[:, :2] = np.mgrid[0:8, 0:11].T.reshape(-1, 2)
```
3. Arrays to store object points and image points from all the images
```
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane
```
4. Select all images, convert to grayscale, and find the chessboard corners
```
for i in range(len(chess_images)):
# Read in the image
image = cv2.imread(chess_images[i])
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (8, 11), None)
```
5. Further optimization calculations on the corners
```
corners2 = cv2.cornerSubPix(gray, corners, (7, 7), (-1, -1), criteria)
```
6. use **cv2.calibrateCamera**
```
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
```
#### Example code:
```python=
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((11 * 8, 3), np.float32)
objp[:, :2] = np.mgrid[0:8, 0:11].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane
chess_images = glob.glob('./Datasets/Q2_Image/*.bmp')
# Select any index to grab an image from the list
for i in range(len(chess_images)):
# Read in the image
image = cv2.imread(chess_images[i])
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (8, 11), None)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (7, 7), (-1, -1), criteria)
imgpoints.append(corners2)
# gray.shape[::-1] = (2048, 2048)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (2048, 2048), None, None)
print(mtx)
```
### 3) Find the extrinsic matrix
Extrinsic matrix:
![](https://i.imgur.com/fI0H1Xp.png)
#### Follow the steps:
1. Follow **2) Find the intrinsic matrix**
2. use **cv2.Rodrigues** 做旋轉向量和旋轉矩陣的轉換
3. use **np.hstack** 做陣列橫向合併
:::success
**Bonus:** 陣列縱向合併--np.vstack()
:::
#### Example code:
```python=
# gray.shape[::-1] = (2048, 2048)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (2048, 2048), None, None)
R = cv2.Rodrigues(rvecs[num-1])
ext = np.hstack((R[0], tvecs[num-1]))
print(ext)
```
### 4) Find the distortion matrix
Distortion Coefficients: k1, k2, k3, p1, p2
#### Follow the steps:
1. Follow **2) Find the intrinsic matrix**
#### Example code:
```python=
# gray.shape[::-1] = (2048, 2048)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (2048, 2048), None, None)
print(dist)
```
> 參考:
> 1. https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga687a1ab946686f0d85ae0363b5af1d7b
## 3. Augmented Reality
![](https://i.imgur.com/lZQOt1E.gif)
#### Follow the steps:
1. Follow **2) Find the intrinsic matrix**
2. prepare object points which you want to draw, like (3,3,-3), (1,1,0), (3,5,0), (5,1,0)
3. use **cv2.projectPoints** to project 3D points to image plane
```
imgpts, jac = cv2.projectPoints(axis, rvecs[i], tvecs[i], mtx, dist)
```
4. use **cv2.line** draw line
#### Example code:
```python=
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((11 * 8, 3), np.float32)
objp[:, :2] = np.mgrid[0:8, 0:11].T.reshape(-1, 2)
# axis = np.float32([[3, 3, -3], [1, 1, 0], [3, 5, 0], [5, 1, 0]]).reshape(-1, 3)
axis = np.float32([[5, 3, -3], [7, 1, 0], [3, 3, 0], [7, 5, 0]]).reshape(-1, 3)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane
chess_images = glob.glob('./Datasets/Q3_Image/*.bmp')
# Select any index to grab an image from the list
for i in range(len(chess_images)):
# Read in the image
image = cv2.imread(chess_images[i])
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (8, 11), None)
if ret == True:
objpoints.append(objp)
# objp = 8 * 11 objpoints (x, y, z)
corners2 = cv2.cornerSubPix(gray, corners, (7, 7), (-1, -1), criteria)
imgpoints.append(corners2)
# corner2 = each object point on 2D image (x, y)
# gray.shape[::-1] = (2048, 2048)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (2048, 2048), None, None)
# project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvecs[i], tvecs[i], mtx, dist)
def draw(image, imgpts):
image = cv2.line(image, tuple(imgpts[0].ravel()), tuple(imgpts[1].ravel()), (0, 0, 255), 5)
image = cv2.line(image, tuple(imgpts[0].ravel()), tuple(imgpts[2].ravel()), (0, 0, 255), 5)
image = cv2.line(image, tuple(imgpts[0].ravel()), tuple(imgpts[3].ravel()), (0, 0, 255), 5)
image = cv2.line(image, tuple(imgpts[1].ravel()), tuple(imgpts[2].ravel()), (0, 0, 255), 5)
image = cv2.line(image, tuple(imgpts[1].ravel()), tuple(imgpts[3].ravel()), (0, 0, 255), 5)
image = cv2.line(image, tuple(imgpts[2].ravel()), tuple(imgpts[3].ravel()), (0, 0, 255), 5)
return image
img = draw(image, imgpts)
cv2.imwrite('%s_v.jpg' % i, img)
img = cv2.resize(img, (1024, 1024), interpolation=cv2.INTER_AREA)
cv2.namedWindow('img')
cv2.imshow('img', img)
cv2.waitKey(5)
```
> 參考:
> 1. https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c
> 2. https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_pose/py_pose.html
## 4. Stereo Disparity Map
Original image(left&right):
![](https://i.imgur.com/mm9wgue.jpg)
![](https://i.imgur.com/nEKeM2d.jpg)
Stereo Disparity map:
![](https://i.imgur.com/RScQL13.png)
### 1) Compute disparity map
![](https://i.imgur.com/AOcjx3i.png)
#### Follow the steps:
1. open image left and right
2. use **cv2.StereoBM_create**, **stereo.compute**
### 2) Calculate the depth
:::danger
這題做的時候有問題,disparity[y][x]的值一直是0,真的值output不出來QQ
所以code的部分參考參考,可以修改更好。
:::
#### Follow the steps:
1. focal_len = 2826
baseline = 178
Cx(𝑐_𝑥^𝑟𝑖𝑔ℎ𝑡 − 𝑐_𝑥^𝑙𝑒𝑓𝑡) = 123
We know that: ![](https://i.imgur.com/EwoZ25t.png)
d(distance) = (your point) - Cx
Z(depth) = focal_length * baseline / d
2. use **cv2.setMouseCallback** to show the points which you clicked
3. use **cv2.rectangle** to draw the point
4. use **cv2.putText** to show disparity and depth
#### Example code:
```python=
imgL = cv2.imread('./Datasets/Q4_Image/imgL.png', 0)
imgR = cv2.imread('./Datasets/Q4_Image/imgR.png', 0)
stereo = cv2.StereoBM_create(numDisparities=256, blockSize=25)
disparity = stereo.compute(imgL, imgR).astype(np.float32) / 16.0
disparity = cv2.resize(disparity, (1400, 950), interpolation=cv2.INTER_AREA)
# cv2.namedWindow('disparity')
# cv2.imshow('disparity', disparity)
# cv2.imwrite('disparity.jpg', disparity)
focal_len = 2826
baseline = 178
Cx = 123
# mouse callback function
def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
# print(x, ",", y)
cv2.rectangle(disparity, (x-3, y-3), (x+3, y+3), (0, 0, 255), -1)
dist = disparity[y][x] - Cx
depth = int(focal_len * baseline / abs(dist))
# print("Disparity: " + str(disparity[x][y]) + " pixels")
# print("Depth: " + str(depth) + " mm")
# text = disparity.copy()
cv2.rectangle(disparity, (1100, 850), (1390, 940), (255, 255, 255), -1)
cv2.putText(disparity, "Disparity: " + str(int(disparity[y][x])) + " pixels", (1120, 890),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(disparity, "Depth: " + str(depth) + " mm", (1120, 930),
cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 0), 2, cv2.LINE_AA)
# image = np.hstack([disparity, text])
cv2.imshow('image', disparity)
while(1):
cv2.namedWindow('image')
cv2.setMouseCallback('image', draw_circle)
cv2.imshow('image', disparity)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.waitKey(0)
cv2.destroyAllWindows()
```