# Update version
```python=
import cv2
import numpy as np
import math
def find_obj(data):
#參考點(手臂初始位置)
x0 = 360
y0 = 240
# 讀取圖像,並將其轉換為灰度圖像
# 輸入相對路徑
img = cv2.imread(data)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0) # 指定區域單位為 (5, 5)
i = 1
# 進行閾值處理
ret, thresh = cv2.threshold(gray, 200, 255, 0)
#cv2.imshow("test", thresh)
# 查找輪廓
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
rows, cols = img.shape[:2]
# 繪製輪廓
cv2.drawContours(img, contours, -1, (0, 255, 0), 1)
coordinate_list = []
# 計算輪廓的中心點
for contour in contours:
# 計算輪廓的矩
M = cv2.moments(contour)
if M["m00"] != 0:
area = cv2.contourArea(contour)
if area > 500 :
# 計算中心點座標
center_x = int(M["m10"] / M["m00"])
center_y = int(M["m01"] / M["m00"])
deg = 0.5 * math.atan2((2 * M["mu11"]), (M["mu20"] - M["mu02"]))
# 繪製中心點
cv2.circle(img, (center_x, center_y), 3, (255, 0, 0), -1)
#print(center_x, center_y)
length = 1000
x1 = int(center_x - length * math.cos(deg))
y1 = int(center_y - length * math.sin(deg))
x2 = int(center_x + length * math.cos(deg))
y2 = int(center_y + length * math.sin(deg))
cv2.line(img, (x1,y1), (x2,y2), (255,0,0), 1)
# [vx, vy, x, y] = cv2.fitLine(contour, cv2.DIST_L2, 0, 0.01, 0.01)
# lefty = int((-x * vy / vx)[0] + y)
# righty = int(((cols - x) * vy / vx)[0] + y)
# cv2.line(img, (cols - 1, righty),(0, lefty), (255, 0, 0), 1)
# a = math.sqrt((lefty-righty)**2+(0-(cols-1))**2)
# b = (-lefty+righty)/a
# c = ((cols-1))/a
#角度換算
deg = -(np.arctan2(math.sin(deg), math.cos(deg)))* 180 / np.pi
deg = np.around(deg, decimals=2)
degg = str(deg)
degg = f"princple angle({i}) " + degg
cv2.putText(img, degg, (center_x+20, center_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 125, 255), 1, cv2.LINE_AA)
#turn_deg = 90 + deg
if deg >= 0 :
deg = -90 + deg
elif deg < 0 :
deg = 90 + deg
#連線
cv2.circle(img, (center_x, center_y), 2, (0, 0, 255), -1)
cv2.line(img, (center_x, center_y), (x0, y0), (0, 255, 0), 1)
distance_y = -center_x+x0
distance_x = -center_y+y0
#5.3 pixel = 1 mm
distance_y = np.around(distance_y/5.3, decimals=2)
distance_x = np.around(distance_x/5.3, decimals=2)
i += 1
cv2.line(img, (x0, y0), (x0 - 50,y0), (0, 255, 0), 2)
cv2.line(img, (x0, y0-50), (x0, y0), (0, 0, 255), 2)
coordinate_list.append({"dis_x" : distance_x, "dis_y" : distance_y, "deg" : deg})
# 顯示圖像
#print(np.around(deg, decimals=2))
#plt.imshow(img)
#plt.show()
cv2.imshow("image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return coordinate_list
if __name__ == "__main__":
data = "images\er7-4.jpg"
coordinate = find_obj(data)
#迴圈
i = 0
for i in range(len(coordinate)):
dic = coordinate[i]
x = dic["dis_x"]
y = dic["dis_y"]
deg = dic["deg"]
print(x, y, deg)
i += 1
```
```python=
import cv2
import numpy as np
import math
def find_obj(data):
#參考點(手臂初始位置)
x0 = 360
y0 = 240
# 讀取圖像,並將其轉換為灰度圖像
# 輸入相對路徑
img = cv2.imread(data)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
i = 1
# 進行閾值處理
ret, thresh = cv2.threshold(gray, 200, 255, 0)
#cv2.imshow("test", thresh)
# 查找輪廓
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
rows, cols = img.shape[:2]
# 繪製輪廓
cv2.drawContours(img, contours, -1, (0, 255, 0), 1)
coordinate_list = []
# 計算輪廓的中心點
for contour in contours:
# 計算輪廓的矩
M = cv2.moments(contour)
if M["m00"] != 0:
# 計算中心點座標
center_x = int(M["m10"] / M["m00"])
center_y = int(M["m01"] / M["m00"])
th = math.atan2(M["m10"], M["m01"])
# 繪製中心點
cv2.circle(img, (center_x, center_y), 3, (255, 0, 0), -1)
#print(center_x, center_y)
[vx, vy, x, y] = cv2.fitLine(contour, cv2.DIST_L2, 0, 0.01, 0.01)
lefty = int((-x * vy / vx)[0] + y)
righty = int(((cols - x) * vy / vx)[0] + y)
cv2.line(img, (cols - 1, righty),(0, lefty), (255, 0, 0), 1)
a = math.sqrt((lefty-righty)**2+(0-(cols-1))**2)
b = (-lefty+righty)/a
c = ((cols-1))/a
#角度換算
deg = -(np.arctan2(b, c)) * 180 / np.pi
deg = np.around(deg, decimals=2)
#turn_deg = 90 + deg
if deg >= 0 :
deg = -90 + deg
elif deg < 0 :
deg = 90 + deg
degg = str(deg)
degg = f"princple angle({i}) " + degg
cv2.putText(img, degg, (center_x+20, center_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 125, 255), 1, cv2.LINE_AA)
#連線
cv2.circle(img, (center_x, center_y), 2, (0, 0, 255), -1)
cv2.line(img, (center_x, center_y), (x0, y0), (0, 255, 0), 1)
distance_y = -center_x+x0
distance_x = -center_y+y0
#5.3 pixel = 1 mm
distance_y = distance_y/5.3
distance_x = distance_x/5.3
#print(f"distance{i}(mm) = ({distance_x}, {distance_y})")
i += 1
cv2.line(img, (x0, y0), (x0 - 50,y0), (0, 255, 0), 2)
cv2.line(img, (x0, y0-50), (x0, y0), (0, 0, 255), 2)
coordinate_list.append({"dis_x" : distance_x, "dis_y" : distance_y, "deg" : deg})
# 顯示圖像
#print(np.around(deg, decimals=2))
#plt.imshow(img)
#plt.show()
cv2.imshow("image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return coordinate_list
if __name__ == "__main__":
data = "images\er7-1.jpg"
coordinate = find_obj(data)
#print(coordinate)
#迴圈
i = 0
for i in range(len(coordinate)):
dic = coordinate[i]
x = dic["dis_x"]
y = dic["dis_y"]
deg = dic["deg"]
print(x, y, deg)
i += 1
```

```
#!/usr/bin/env python
import rclpy
import cv2
import sys
import time
import numpy as np
# from . import image_sub
import math
sys.path.append('/home/robot/colcon_ws/install/tm_msgs/lib/python3.6/site-packages')
from tm_msgs.msg import *
from tm_msgs.srv import *
# arm client
def send_script(script):
arm_node = rclpy.create_node('arm')
arm_cli = arm_node.create_client(SendScript, 'send_script')
while not arm_cli.wait_for_service(timeout_sec=1.0):
arm_node.get_logger().info('service not availabe, waiting again...')
move_cmd = SendScript.Request()
move_cmd.script = script
arm_cli.call_async(move_cmd)
arm_node.destroy_node()
# gripper client
def set_io(state):
gripper_node = rclpy.create_node('gripper')
gripper_cli = gripper_node.create_client(SetIO, 'set_io')
while not gripper_cli.wait_for_service(timeout_sec=1.0):
node.get_logger().info('service not availabe, waiting again...')
io_cmd = SetIO.Request()
io_cmd.module = 1
io_cmd.type = 1
io_cmd.pin = 0
io_cmd.state = state
gripper_cli.call_async(io_cmd)
gripper_node.destroy_node()
def find_obj2(img):
#參考點(手臂初始位置)
x0 =(int)(img.shape[1] / 2)
y0 =(int)(img.shape[0] / 2)+420
# 讀取圖像,並將其轉換為灰度圖像
# 輸入相對路徑
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0) # 指定區域單位為 (5, 5)
i = 1
# 進行閾值處理
ret, thresh = cv2.threshold(gray, 180, 255, 0)
kernel = np.ones((3,3),np.uint8)
thresh = cv2.erode(thresh, kernel,iterations=3)
cv2.imshow("test-gray",gray)
cv2.imshow("test-thresh",thresh)
# 查找輪廓
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# for i,contour in enumerate(contours):
# rect=cv2.minAreaRect(contour)
# center,(w,h),angle=rect
# if(w<h):
# angle+=90
# contours[i][]
rows, cols = img.shape[:2]
# 繪製輪廓
# cv2.drawContours(img, contours, -1, (0, 255, 0), 1)
coordinate_list = []
# print(contours)
# 計算輪廓的中心點
for contour in contours:
# 計算輪廓的矩
rect=cv2.minAreaRect(contour)
center,(w,h),angle=rect
if(w<h):
angle+=90
print('center',type(center))
print(type(center[0]))
area = cv2.contourArea(contour)
if area > 500 :
# 計算中心點座標
center_x = int(center[0])
center_y = int(center[1])
deg = angle
# 繪製中心點
cv2.circle(img, (center_x, center_y), 3, (255, 0, 0), -1)
print(center_x, center_y)
length = 1000
x1 = int(center_x - length * math.cos(deg))
y1 = int(center_y - length * math.sin(deg))
x2 = int(center_x + length * math.cos(deg))
y2 = int(center_y + length * math.sin(deg))
cv2.line(img, (x1,y1), (x2,y2), (255,0,0), 1)
#角度換算
deg = -(np.arctan2(math.sin(deg), math.cos(deg)))* 180 / np.pi
deg = np.around(deg, decimals=2)
degg = str(deg)
degg = f"princple angle({i}) " + degg
cv2.putText(img, degg, (center_x+20, center_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 125, 255), 1, cv2.LINE_AA)
#turn_deg = 180 + deg
if deg >= 0 :
deg = -180 + deg
elif deg < 0 :
deg = 180 + deg
#連線
# cv2.circle(img, (center_x, center_y), 2, (0, 0, 255), -1)
# cv2.line(img, (center_x, center_y), (x0, y0), (0, 255, 0), 1)
distance_y = -center_x+x0
distance_x = -center_y+y0
#5.3 pixel = 1 mm
distance_y = np.around(distance_y/5.3, decimals=2)
distance_x = np.around(distance_x/5.3, decimals=2)
i += 1
# cv2.line(img, (x0, y0), (x0 - 50,y0), (0, 255, 0), 2)
# cv2.line(img, (x0, y0-50), (x0, y0), (0, 0, 255), 2)
coordinate_list.append((distance_x, distance_y, deg))
# 顯示圖像
#print(np.around(deg, decimals=2))
# plt.imshow(img)
#plt.show()
cv2.imshow("image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return coordinate_list
def find_obj(img):
#參考點(手臂初始位置)
x0 =(int)(img.shape[1] / 2)
y0 =(int)(img.shape[0] / 2)+420
# 讀取圖像,並將其轉換為灰度圖像
# 輸入相對路徑
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0) # 指定區域單位為 (5, 5)
i = 1
# 進行閾值處理
ret, thresh = cv2.threshold(gray, 180, 255, 0)
kernel = np.ones((3,3),np.uint8)
thresh = cv2.erode(thresh, kernel,iterations=3)
cv2.imshow("test-gray",gray)
cv2.imshow("test-thresh",thresh)
# 查找輪廓
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# for i,contour in enumerate(contours):
# rect=cv2.minAreaRect(contour)
# center,(w,h),angle=rect
# if(w<h):
# angle+=90
# contours[i][]
rows, cols = img.shape[:2]
# 繪製輪廓
# cv2.drawContours(img, contours, -1, (0, 255, 0), 1)
coordinate_list = []
# print(contours)
# 計算輪廓的中心點
for contour in contours:
# 計算輪廓的矩
M = cv2.moments(contour)
if M["m00"] != 0:
area = cv2.contourArea(contour)
if area > 500 :
# 計算中心點座標
center_x = int(M["m10"] / M["m00"])
center_y = int(M["m01"] / M["m00"])
deg = 0.5 * math.atan2((2 * M["mu11"]), (M["mu20"] - M["mu02"]))
# 繪製中心點
cv2.circle(img, (center_x, center_y), 3, (255, 0, 0), -1)
print(center_x, center_y)
length = 1000
x1 = int(center_x - length * math.cos(deg))
y1 = int(center_y - length * math.sin(deg))
x2 = int(center_x + length * math.cos(deg))
y2 = int(center_y + length * math.sin(deg))
cv2.line(img, (x1,y1), (x2,y2), (255,0,0), 1)
#角度換算
deg = -(np.arctan2(math.sin(deg), math.cos(deg)))* 180 / np.pi
deg = np.around(deg, decimals=2)
degg = str(deg)
degg = f"princple angle({i}) " + degg
cv2.putText(img, degg, (center_x+20, center_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 125, 255), 1, cv2.LINE_AA)
#turn_deg = 180 + deg
if deg >= 0 :
deg = -180 + deg
elif deg < 0 :
deg = 180 + deg
#連線
# cv2.circle(img, (center_x, center_y), 2, (0, 0, 255), -1)
# cv2.line(img, (center_x, center_y), (x0, y0), (0, 255, 0), 1)
distance_y = -center_x+x0
distance_x = -center_y+y0
#5.3 pixel = 1 mm
distance_y = np.around(distance_y/5.3, decimals=2)
distance_x = np.around(distance_x/5.3, decimals=2)
i += 1
# cv2.line(img, (x0, y0), (x0 - 50,y0), (0, 255, 0), 2)
# cv2.line(img, (x0, y0-50), (x0, y0), (0, 0, 255), 2)
coordinate_list.append((distance_x, distance_y, deg))
# 顯示圖像
#print(np.around(deg, decimals=2))
# plt.imshow(img)
#plt.show()
cv2.imshow("image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return coordinate_list
def handZ(x,y,angle,z):
return (x,y,z,-180.00, 0.0,angle)
def nextPos(init_x,init_y,offset_x,offset_y, angle, next_z):
temp= handZ(init_x+offset_x , init_y+offset_y, angle, next_z)
string=""
for i in temp:
string+=str(i)+","
return string[0:-1]
def main(args=None):
rclpy.init(args=args)
# node.main_script()
#--- move command by joint angle ---#
# script = 'PTP(\"JPP\",45,0,90,0,90,0,35,200,0,false)'
#--- move command by end effector's pose (x,y,z,a,b,c) ---#
# targetP1 = "398.97, -122.27, 748.26, -179.62, 0.25, 90.12"s
# Initial camera position for taking image (Please do not change the values)
# For right arm: targetP1 = "230.00, 230, 730, -180.00, 0.0, 135.00"
# For left arm: targetP1 = "350.00, 350, 730, -180.00, 0.0, 135.00"
target_init = "300.00, 100, 300, -180.00, 0.0, 90.00"
# targetP2 = "330.00, 150, 600, -180.00, 0.0, 90.00"
script_start = "PTP(\"CPP\","+target_init+",100,50,0,false)"
# script2 = "PTP(\"CPP\","+targetP2+",100,200,0,true)"
# moving
set_io(0.0)
send_script(script_start)
send_script("Vision_DoJob(job1)")
time.sleep(10)
cv2.waitKey(1)
# read img
img=cv2.imread("/home/robot/workspace2/team10_ws/src/send_script/send_script/img/data.png")
offset=find_obj2(img)
# input("test")
print(f"offset={offset}")
for i, pos in enumerate(offset):
send_script(script_start)
# z=300, up
next=nextPos(300.00, 100, pos[0], pos[1],pos[2], 300)
script_next = "PTP(\"CPP\","+next+",100,50,0,false)"
send_script(script_next)
# z=110, down
next=nextPos(300.00, 100, pos[0], pos[1],pos[2], 110)
script_next = "PTP(\"CPP\","+next+",100,50,0,false)"
send_script(script_next)
set_io(1.0)
# z=300, up
next=nextPos(300.00, 100, pos[0], pos[1],pos[2], 300)
script_next = "PTP(\"CPP\","+next+",100,50,0,false)"
send_script(script_next)
# to pile point
time.sleep(3)
target_final = "200.00, 300, 300, -180.00, 0.0, 90.00"
script_final = "PTP(\"CPP\","+target_final+",100,50,0,false)"
send_script(script_final)
target_final = "200.00, 300,"+ str(110+ 25*i)+", -180.00, 0.0, 90.00"
script_final = "PTP(\"CPP\","+target_final+",100,50,0,false)"
send_script(script_final)
set_io(0.0)
target_final = "200.00, 300, 300, -180.00, 0.0, 90.00"
script_final = "PTP(\"CPP\","+target_final+",100,50,0,false)"
send_script(script_final)
print(next)
# back to init place
set_io(0.0)
send_script(script_start)
# What does Vision_DoJob do? Try to use it...
# -------------------------------------------------
# send_script(script2)
# send_script("Vision_DoJob(job1)")
# cv2.waitKey(1)
#--------------------------------------------------
# set_io(1.0) # 1.0: close gripper, 0.0: open gripper
# set_io(0.0)
rclpy.shutdown()
if __name__ == '__main__':
main()
```