# **LINE_Notify研究版本集(~8/26)** ###### tags: `藍柏婷` ###### tags: `2021/08/26` ## **## 所有程式碼集(依時間排序)** ### **== LINE_Notify_0 ==** ```python= import requests def SendMessageToLineNotify(message, pic): Token = "WWjgIOXkPZq7RrRGwIUnru3ivMPwqc1g3EMT1Ryo7lx" # 你的權杖 url = "https://notify-api.line.me/api/notify" # 跑不出來的某網站 # my_files = {'my_filename': open('C:/Users/藍柏婷/Desktop/竹女網自我介紹詞.docx', 'rb')} payload = {'message':message, # 文字 'imageThumbnail':pic, 'imageFullsize':pic, # 圖片 # imageThumbnail、imageFullsize為成對的圖片,各有尺寸大小 'stickerPackageId':446, 'stickerId':1988 # 貼圖 # stickerPackageId、stickerId為貼圖成對的編號 # 可參見Line Sticker List總表 https://developers.line.biz/en/docs/messaging-api/sticker-list/#sticker-definitions } # 要傳送的訊息 header = {'Content-Type':'application/x-www-form-urlencoded', # 在 HTTP 中有兩種 POST 數據的方式:application/x-www-form-urlencoded和multipart/form-data。 # 如果您要傳輸二進制(非字母數字)數據(或非常大的有效負載),請使用multipart/form-data。 # 否則,請使用application/x-www-form-urlencoded。 'Authorization':'Bearer ' + Token # 根據使用者的角色來授予應有的權限 # 不要手殘把'Bearer '中的空格弄掉!!!會出事!!! } # 自訂請求表投 r=requests.post(url, headers=header, data=payload) # 將文字、照片、貼圖傳送出去 print(r.text) # 回傳" {"status":200,"message":"ok"} "的指示(.text可以查看資料) def main(): message = '小雞好可愛' # 提示文字 pic = 'https://imgcdn.cna.com.tw/www/WebPhotos/800/20210427/1920x1280_581754849593.jpg' # 圖片 SendMessageToLineNotify(message, pic) if __name__ == '__main__': # 可避免電腦將引用之程式的主程式也一併執行 main() ``` ### **== LINE_Notify_1 ==** ```python= import requests send={ # 網路上大多叫"payload" 'message':"火災警示", 'imageThumbnail':'https://imgcdn.cna.com.tw/www/WebPhotos/800/20210427/1920x1280_581754849593.jpg', 'imageFullsize':'https://imgcdn.cna.com.tw/www/WebPhotos/800/20210427/1920x1280_581754849593.jpg', } header={ 'Content-Type':'application/x-www-form-urlencoded', 'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP" } r=requests.post("https://notify-api.line.me/api/notify", headers=header, data=send) print(r.text) ``` ### **== LINE_Notify_2 ==** ```python= import requests # from my-recognition import class_idx def send_to_line(category='',pic=''): send={ 'message':("嗨陌生人你用到我們的pincode了 所以傳不到你那邊啦"), 'imageThumbnail':pic, 'imageFullsize':pic, } header={ 'Content-Type':'application/x-www-form-urlencoded', 'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP" } r=requests.post("https://notify-api.line.me/api/notify", headers=header, data=send) print(r.text) # {"status":200,"message":"ok"} danger=False if danger!=True: category="microwave" pic='https://memeprod.sgp1.digitaloceanspaces.com/user-wtf/1622455521876.jpg' send_to_line(category,pic) ``` ### **== LINE_Notify_3 ==** ```python= import jetson.inference import jetson.utils import requests net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) #camera = jetson.utils.videoSource("csi://0") # csi camera camera = jetson.utils.videoSource("/dev/video0") # usb camera display = jetson.utils.videoOutput("display://0") # 'my_video.mp4' for file def send_to_line(category='',pic=''): send={ 'message':("嗨陌生人你用到我們的pincode了 所以傳不到你那邊啦"), 'imageThumbnail':pic, 'imageFullsize':pic, } header={ 'Content-Type':'application/x-www-form-urlencoded', 'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP" } r=requests.post("https://notify-api.line.me/api/notify", headers=header, data=send) print(r.text) # {"status":200,"message":"ok"} while True: img = camera.Capture() detections = net.Detect(img) display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) danger=False if danger!=True: category="microwave" pic='https://memeprod.sgp1.digitaloceanspaces.com/user-wtf/1622455521876.jpg' send_to_line(category,pic) if not camera.IsStreaming() or not display.IsStreaming(): break ``` ### **== LINE_Notify_4 ==** ```python= import cv2 import requests def send_to_line(category,pic): params = {'message':"\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)} files = {'imageFile': open(pic, 'rb')} headers = {'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP"} r = requests.post("https://notify-api.line.me/api/notify", headers=headers, params=params, files=files) print(r.text) cap = cv2.VideoCapture(1) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) # 畫面寬度設定為 1920 cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) # 畫面高度設定為 1080 cv2.namedWindow('image_win',flags=cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED) img_count = 1 while(True): ret, frame = cap.read() cv2.imshow('image_win',frame) # 更新窗口“image_win”中的圖片 # 不可以刪掉,否則會沒有畫面 key = cv2.waitKey(1) # 等待按鍵事件發生 等待1ms if key == ord('q'): break elif key == ord('c'): # 如果c键按下,則進行圖片保存 cv2.imwrite("{}.png".format(img_count), frame) # 寫入圖片 并命名圖片為 <img_count>.png category = "microwave" pic = 'C:/Users/藍柏婷/Desktop/Python/{}.png'.format(img_count) send_to_line(category,pic) img_count += 1 cap.release() # 釋放VideoCapture cv2.destroyAllWindows() # 銷毀所有的窗口 ``` ### **== my-detection_2.py ==** ```python= import jetson.inference import jetson.utils import requests import cv2 def send_to_line(category='',pic=''): send={ 'message':("\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)), 'imageThumbnail':pic, 'imageFullsize':pic, } header={ 'Content-Type':'application/x-www-form-urlencoded', 'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP" } r=requests.post("https://notify-api.line.me/api/notify", headers=header, data=send) print(r.text) # {"status":200,"message":"ok"} net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) camera = jetson.utils.videoSource("/dev/video0") # usb camera display = jetson.utils.videoOutput("display://0") # 'my_video.mp4' for file i=0 while True: img = camera.Capture() detections = net.Detect(img) if len(detections)>=1: ret,frame=camera.read() cv2.imshow("frame",frame) category="book" pic='https://imgcdn.cna.com.tw/www/WebPhotos/800/20210427/1920x1280_581754849593.jpg' send_to_line(category,pic) display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) if not camera.IsStreaming() or not display.IsStreaming(): break ``` ### **== my-detection_3.py ==** ```python= import jetson.inference import jetson.utils import requests def send_to_line(category='',pic=''): send={ 'message':("\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)), 'imageThumbnail':pic, 'imageFullsize':pic, } header={ 'Content-Type':'application/x-www-form-urlencoded', 'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP" } r=requests.post("https://notify-api.line.me/api/notify", headers=header, data=send) print(r.text) # {"status":200,"message":"ok"} net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) camera = jetson.utils.videoSource("/dev/video0") # usb camera display = jetson.utils.videoOutput("display://0") # 'my_video.mp4' for file while True: img = camera.Capture() detections = net.Detect(img) if len(detections)>=1: category="book" pic='https://imgcdn.cna.com.tw/www/WebPhotos/800/20210427/1920x1280_581754849593.jpg' send_to_line(category,pic) display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) if not camera.IsStreaming() or not display.IsStreaming(): break ``` ### **== LINE_Notify_5 ==** ```python= import jetson.inference import jetson.utils import requests import cv2 def send_to_line(category,pic): params = {'message':"\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)} files = {'imageFile': open(pic, 'rb')} headers = {'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP"} r = requests.post("https://notify-api.line.me/api/notify", headers=headers, params=params, files=files) print(r.text) net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) camera = jetson.utils.videoSource("/dev/video0") # usb camera display = jetson.utils.videoOutput("display://0") # 'my_video.mp4' for file # camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) # 畫面寬度設定為 1920 # camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) # 畫面高度設定為 1080 # cv2.namedWindow('image_win',flags=cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED) while True: img = camera.Capture() detections = net.Detect(img) if len(detections)>=1: img_count = 1 # frame = camera.read() cv2.imshow('image_win',img) # 更新窗口“image_win”中的圖片 # 不可以刪掉,否則會沒有畫面 key = cv2.waitKey(1) # 等待按鍵事件發生 等待1ms if key == ord('q'): break elif key == ord('c'): # 如果c键按下,則進行圖片保存 cv2.imwrite("{}.png".format(img_count), img) # 寫入圖片 并命名圖片為 <img_count>.png category = "microwave" pic = '/home/iamai2021/Desktop/{}.png'.format(img_count) send_to_line(category,pic) img_count += 1 display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) if not camera.IsStreaming() or not display.IsStreaming(): break # cap.release() # 釋放VideoCapture # cv2.destroyAllWindows() # 銷毀所有的窗口 ``` ### **== LINE_Notify_6 ==** ```python= import jetson.inference import jetson.utils import requests import cv2 def send_to_line(category,pic): params = {'message':"\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)} files = {'imageFile': open(pic, 'rb')} headers = {'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP"} r = requests.post("https://notify-api.line.me/api/notify", headers=headers, params=params, files=files) print(r.text) net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) camera = jetson.utils.videoSource("/dev/video0") # usb camera display = jetson.utils.videoOutput("display://0") # 'my_video.mp4' for file # camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) # 畫面寬度設定為 1920 # camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) # 畫面高度設定為 1080 # cv2.namedWindow('image_win',flags=cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED) img_count = 1 while True: img = camera.Capture() detections = net.Detect(img) # frame=display.Render(img) display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) if len(detections)>=1: cv2.imwrite("{}.png".format(img_count), frame) # 寫入圖片 并命名圖片為 <img_count>.png # frame = camera.read() # cv2.imshow('image_win',frame) # 更新窗口“image_win”中的圖片 # 不可以刪掉,否則會沒有畫面 # key = cv2.waitKey(1) # 等待按鍵事件發生 等待1ms # if key == ord('q'): # break # elif key == ord('c'): # 如果c键按下,則進行圖片保存 # cv2.imwrite("{}.png".format(img_count), frame) # 寫入圖片 并命名圖片為 <img_count>.png category = "microwave" pic = '/home/iamai2021/Desktop/{}.png'.format(img_count) send_to_line(category,pic) img_count += 1 if not camera.IsStreaming() or not display.IsStreaming(): break ``` ### **== LINE_Notify_7 ==** ```python= import jetson.inference import jetson.utils import requests def send_to_line(category,pic): params = {'message':"\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)} files = {'imageFile': open(pic, 'rb')} headers = {'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP"} r = requests.post("https://notify-api.line.me/api/notify", headers=headers, params=params, files=files) print(r.text) net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) camera = jetson.utils.videoSource("/dev/video0") display = jetson.utils.videoOutput("file://my_image.jpg") while True: img = camera.Capture() detections = net.Detect(img) display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) if len(detections)>=1: category = "microwave" pic = '/home/iamai2021/Desktop/my_image.jpg' send_to_line(category,pic) if not camera.IsStreaming() or not display.IsStreaming(): break ``` ## **## 最終版展示** ### **== LINE_Notify_7 ==** ```python= import jetson.inference import jetson.utils import requests def send_to_line(category,pic): params = {'message':"\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)} files = {'imageFile': open(pic, 'rb')} headers = {'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP"} r = requests.post("https://notify-api.line.me/api/notify", headers=headers, params=params, files=files) print(r.text) net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) camera = jetson.utils.videoSource("/dev/video0") display = jetson.utils.videoOutput("file://my_image.jpg") while True: img = camera.Capture() detections = net.Detect(img) display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) if len(detections)>=1: category = "microwave" pic = '/home/iamai2021/Desktop/my_image.jpg' send_to_line(category,pic) if not camera.IsStreaming() or not display.IsStreaming(): break ``` ### **== 跑出結果 ==** :::success **#跑出結果** ```python iamai2021@iamai2021:~/Desktop$ python3 LINE_Notify_5.py /usr/lib/python3/dist-packages/requests/__init__.py:80: RequestsDependencyWarning: urllib3 (1.26.6) or chardet (3.0.4) doesn't match a supported version! RequestsDependencyWarning) jetson.inference -- detectNet loading build-in network 'ssd-mobilenet-v2' detectNet -- loading detection network model from: -- model networks/SSD-Mobilenet-v2/ssd_mobilenet_v2_coco.uff -- input_blob 'Input' -- output_blob 'NMS' -- output_count 'NMS_1' -- class_labels networks/SSD-Mobilenet-v2/ssd_coco_labels.txt -- threshold 0.500000 -- batch_size 1 [TRT] TensorRT version 7.1.3 [TRT] loading NVIDIA plugins... [TRT] Registered plugin creator - ::GridAnchor_TRT version 1 [TRT] Registered plugin creator - ::NMS_TRT version 1 [TRT] Registered plugin creator - ::Reorg_TRT version 1 [TRT] Registered plugin creator - ::Region_TRT version 1 [TRT] Registered plugin creator - ::Clip_TRT version 1 [TRT] Registered plugin creator - ::LReLU_TRT version 1 [TRT] Registered plugin creator - ::PriorBox_TRT version 1 [TRT] Registered plugin creator - ::Normalize_TRT version 1 [TRT] Registered plugin creator - ::RPROI_TRT version 1 [TRT] Registered plugin creator - ::BatchedNMS_TRT version 1 [TRT] Could not register plugin creator - ::FlattenConcat_TRT version 1 [TRT] Registered plugin creator - ::CropAndResize version 1 [TRT] Registered plugin creator - ::DetectionLayer_TRT version 1 [TRT] Registered plugin creator - ::Proposal version 1 [TRT] Registered plugin creator - ::ProposalLayer_TRT version 1 [TRT] Registered plugin creator - ::PyramidROIAlign_TRT version 1 [TRT] Registered plugin creator - ::ResizeNearest_TRT version 1 [TRT] Registered plugin creator - ::Split version 1 [TRT] Registered plugin creator - ::SpecialSlice_TRT version 1 [TRT] Registered plugin creator - ::InstanceNormalization_TRT version 1 [TRT] detected model format - UFF (extension '.uff') [TRT] desired precision specified for GPU: FASTEST [TRT] requested fasted precision for device GPU without providing valid calibrator, disabling INT8 [TRT] native precisions detected for GPU: FP32, FP16 [TRT] selecting fastest native precision for GPU: FP16 [TRT] attempting to open engine cache file /usr/local/bin/networks/SSD-Mobilenet-v2/ssd_mobilenet_v2_coco.uff.1.1.7103.GPU.FP16.engine [TRT] loading network plan from engine cache... /usr/local/bin/networks/SSD-Mobilenet-v2/ssd_mobilenet_v2_coco.uff.1.1.7103.GPU.FP16.engine [TRT] device GPU, loaded /usr/local/bin/networks/SSD-Mobilenet-v2/ssd_mobilenet_v2_coco.uff [TRT] Using an engine plan file across different models of devices is not recommended and is likely to affect performance or even cause errors. [TRT] Deserialize required 7629430 microseconds. [TRT] [TRT] CUDA engine context initialized on device GPU: [TRT] -- layers 119 [TRT] -- maxBatchSize 1 [TRT] -- workspace 0 [TRT] -- deviceMemory 35486720 [TRT] -- bindings 3 [TRT] binding 0 -- index 0 -- name 'Input' -- type FP32 -- in/out INPUT -- # dims 3 -- dim #0 3 (SPATIAL) -- dim #1 300 (SPATIAL) -- dim #2 300 (SPATIAL) [TRT] binding 1 -- index 1 -- name 'NMS' -- type FP32 -- in/out OUTPUT -- # dims 3 -- dim #0 1 (SPATIAL) -- dim #1 100 (SPATIAL) -- dim #2 7 (SPATIAL) [TRT] binding 2 -- index 2 -- name 'NMS_1' -- type FP32 -- in/out OUTPUT -- # dims 3 -- dim #0 1 (SPATIAL) -- dim #1 1 (SPATIAL) -- dim #2 1 (SPATIAL) [TRT] [TRT] binding to input 0 Input binding index: 0 [TRT] binding to input 0 Input dims (b=1 c=3 h=300 w=300) size=1080000 [TRT] binding to output 0 NMS binding index: 1 [TRT] binding to output 0 NMS dims (b=1 c=1 h=100 w=7) size=2800 [TRT] binding to output 1 NMS_1 binding index: 2 [TRT] binding to output 1 NMS_1 dims (b=1 c=1 h=1 w=1) size=4 [TRT] [TRT] device GPU, /usr/local/bin/networks/SSD-Mobilenet-v2/ssd_mobilenet_v2_coco.uff initialized. [TRT] W = 7 H = 100 C = 1 [TRT] detectNet -- maximum bounding boxes: 100 [TRT] detectNet -- loaded 91 class info entries [TRT] detectNet -- number of object classes: 91 [gstreamer] initialized gstreamer, version 1.14.5.0 [gstreamer] gstCamera -- attempting to create device v4l2:///dev/video0 [gstreamer] gstCamera -- found v4l2 device: UVC Camera (046d:0825) [gstreamer] v4l2-proplist, device.path=(string)/dev/video0, udev-probed=(boolean)false, device.api=(string)v4l2, v4l2.device.driver=(string)uvcvideo, v4l2.device.card=(string)"UVC\ Camera\ \(046d:0825\)", v4l2.device.bus_info=(string)usb-70090000.xusb-2, v4l2.device.version=(uint)264649, v4l2.device.capabilities=(uint)2216689665, v4l2.device.device_caps=(uint)69206017; [gstreamer] gstCamera -- found 38 caps for v4l2 device /dev/video0 [gstreamer] [0] video/x-raw, format=(string)YUY2, width=(int)1280, height=(int)960, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 15/2, 5/1 }; [gstreamer] [1] video/x-raw, format=(string)YUY2, width=(int)1280, height=(int)720, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 15/2, 5/1 }; [gstreamer] [2] video/x-raw, format=(string)YUY2, width=(int)1184, height=(int)656, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 10/1, 5/1 }; [gstreamer] [3] video/x-raw, format=(string)YUY2, width=(int)960, height=(int)720, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 10/1, 5/1 }; [gstreamer] [4] video/x-raw, format=(string)YUY2, width=(int)1024, height=(int)576, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 10/1, 5/1 }; [gstreamer] [5] video/x-raw, format=(string)YUY2, width=(int)960, height=(int)544, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 15/1, 10/1, 5/1 }; [gstreamer] [6] video/x-raw, format=(string)YUY2, width=(int)800, height=(int)600, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [7] video/x-raw, format=(string)YUY2, width=(int)864, height=(int)480, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [8] video/x-raw, format=(string)YUY2, width=(int)800, height=(int)448, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [9] video/x-raw, format=(string)YUY2, width=(int)752, height=(int)416, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [10] video/x-raw, format=(string)YUY2, width=(int)640, height=(int)480, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [11] video/x-raw, format=(string)YUY2, width=(int)640, height=(int)360, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [12] video/x-raw, format=(string)YUY2, width=(int)544, height=(int)288, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [13] video/x-raw, format=(string)YUY2, width=(int)432, height=(int)240, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [14] video/x-raw, format=(string)YUY2, width=(int)352, height=(int)288, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [15] video/x-raw, format=(string)YUY2, width=(int)320, height=(int)240, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [16] video/x-raw, format=(string)YUY2, width=(int)320, height=(int)176, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [17] video/x-raw, format=(string)YUY2, width=(int)176, height=(int)144, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [18] video/x-raw, format=(string)YUY2, width=(int)160, height=(int)120, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [19] image/jpeg, width=(int)1280, height=(int)960, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [20] image/jpeg, width=(int)1280, height=(int)720, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [21] image/jpeg, width=(int)1184, height=(int)656, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [22] image/jpeg, width=(int)960, height=(int)720, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [23] image/jpeg, width=(int)1024, height=(int)576, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [24] image/jpeg, width=(int)960, height=(int)544, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [25] image/jpeg, width=(int)800, height=(int)600, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [26] image/jpeg, width=(int)864, height=(int)480, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [27] image/jpeg, width=(int)800, height=(int)448, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [28] image/jpeg, width=(int)752, height=(int)416, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [29] image/jpeg, width=(int)640, height=(int)480, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [30] image/jpeg, width=(int)640, height=(int)360, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [31] image/jpeg, width=(int)544, height=(int)288, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [32] image/jpeg, width=(int)432, height=(int)240, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [33] image/jpeg, width=(int)352, height=(int)288, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [34] image/jpeg, width=(int)320, height=(int)240, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [35] image/jpeg, width=(int)320, height=(int)176, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [36] image/jpeg, width=(int)176, height=(int)144, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] [37] image/jpeg, width=(int)160, height=(int)120, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 }; [gstreamer] gstCamera -- selected device profile: codec=mjpeg format=unknown width=1280 height=720 [gstreamer] gstCamera pipeline string: [gstreamer] v4l2src device=/dev/video0 ! image/jpeg, width=(int)1280, height=(int)720 ! jpegdec ! video/x-raw ! appsink name=mysink [gstreamer] gstCamera successfully created device v4l2:///dev/video0 [video] created gstCamera from v4l2:///dev/video0 ------------------------------------------------ gstCamera video options: ------------------------------------------------ -- URI: v4l2:///dev/video0 - protocol: v4l2 - location: /dev/video0 -- deviceType: v4l2 -- ioType: input -- codec: mjpeg -- width: 1280 -- height: 720 -- frameRate: 30.000000 -- bitRate: 0 -- numBuffers: 4 -- zeroCopy: true -- flipMethod: none -- loop: 0 -- rtspLatency 2000 ------------------------------------------------ [video] created imageWriter from file://my_image.jpg ------------------------------------------------ imageWriter video options: ------------------------------------------------ -- URI: file://my_image.jpg - protocol: file - location: my_image.jpg - extension: jpg -- deviceType: file -- ioType: output -- codec: unknown -- width: 0 -- height: 0 -- frameRate: 0.000000 -- bitRate: 0 -- numBuffers: 4 -- zeroCopy: true -- flipMethod: none -- loop: 0 -- rtspLatency 2000 ------------------------------------------------ [OpenGL] glDisplay -- X screen 0 resolution: 1920x1080 [OpenGL] glDisplay -- X window resolution: 1920x1080 [OpenGL] glDisplay -- display device initialized (1920x1080) [video] created glDisplay from display://0 ------------------------------------------------ glDisplay video options: ------------------------------------------------ -- URI: display://0 - protocol: display - location: 0 -- deviceType: display -- ioType: output -- codec: raw -- width: 1920 -- height: 1080 -- frameRate: 0.000000 -- bitRate: 0 -- numBuffers: 4 -- zeroCopy: true -- flipMethod: none -- loop: 0 -- rtspLatency 2000 ------------------------------------------------ [gstreamer] opening gstCamera for streaming, transitioning pipeline to GST_STATE_PLAYING [gstreamer] gstreamer changed state from NULL to READY ==> mysink [gstreamer] gstreamer changed state from NULL to READY ==> capsfilter1 [gstreamer] gstreamer changed state from NULL to READY ==> jpegdec0 [gstreamer] gstreamer changed state from NULL to READY ==> capsfilter0 [gstreamer] gstreamer changed state from NULL to READY ==> v4l2src0 [gstreamer] gstreamer changed state from NULL to READY ==> pipeline0 [gstreamer] gstreamer changed state from READY to PAUSED ==> capsfilter1 [gstreamer] gstreamer changed state from READY to PAUSED ==> jpegdec0 [gstreamer] gstreamer changed state from READY to PAUSED ==> capsfilter0 [gstreamer] gstreamer stream status CREATE ==> src [gstreamer] gstreamer changed state from READY to PAUSED ==> v4l2src0 [gstreamer] gstreamer changed state from READY to PAUSED ==> pipeline0 [gstreamer] gstreamer stream status ENTER ==> src [gstreamer] gstreamer message new-clock ==> pipeline0 [gstreamer] gstreamer changed state from PAUSED to PLAYING ==> capsfilter1 [gstreamer] gstreamer changed state from PAUSED to PLAYING ==> jpegdec0 [gstreamer] gstreamer changed state from PAUSED to PLAYING ==> capsfilter0 [gstreamer] gstreamer changed state from PAUSED to PLAYING ==> v4l2src0 [gstreamer] gstreamer message stream-start ==> pipeline0 [gstreamer] gstCamera -- onPreroll [gstreamer] gstCamera -- map buffer size was less than max size (1382400 vs 1382407) [gstreamer] gstCamera recieve caps: video/x-raw, format=(string)I420, width=(int)1280, height=(int)720, interlace-mode=(string)progressive, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, pixel-aspect-ratio=(fraction)1/1, chroma-site=(string)mpeg2, colorimetry=(string)1:4:0:0, framerate=(fraction)30/1 [gstreamer] gstCamera -- recieved first frame, codec=mjpeg format=i420 width=1280 height=720 size=1382407 RingBuffer -- allocated 4 buffers (1382407 bytes each, 5529628 bytes total) [gstreamer] gstreamer changed state from READY to PAUSED ==> mysink [gstreamer] gstreamer message async-done ==> pipeline0 [gstreamer] gstreamer changed state from PAUSED to PLAYING ==> mysink [gstreamer] gstreamer changed state from PAUSED to PLAYING ==> pipeline0 RingBuffer -- allocated 4 buffers (2764800 bytes each, 11059200 bytes total) [OpenGL] glDisplay -- set the window size to 1280x720 [OpenGL] creating 1280x720 texture (GL_RGB8 format, 2764800 bytes) [cuda] registered openGL texture for interop access (1280x720, GL_RGB8, 2764800 bytes) [image] saved 'my_image.jpg' (1280x720, 3 channels) [gstreamer] gstreamer message qos ==> v4l2src0 [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) {"status":200,"message":"ok"} [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) {"status":200,"message":"ok"} [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) {"status":200,"message":"ok"} [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) {"status":200,"message":"ok"} [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [image] saved 'my_image.jpg' (1280x720, 3 channels) [OpenGL] glDisplay -- the window has been closed [image] saved 'my_image.jpg' (1280x720, 3 channels) [gstreamer] gstCamera -- stopping pipeline, transitioning to GST_STATE_NULL [gstreamer] gstCamera -- onPreroll [gstreamer] gstCamera -- pipeline stopped ``` ::: ### **== 實際效果 ==** :::success **#實際效果** ![](https://i.imgur.com/mhWNcqy.jpg) ![](https://i.imgur.com/9aPEr6K.jpg) ![](https://i.imgur.com/TkROUo0.jpg) ![](https://i.imgur.com/WRaAbXA.jpg) ::: ## **## 結論** :::info **LINE Notify圖片及文字傳輸 DONE!** ::: ## **## 所有參考網站** ### **LINE_Notify** * https://engineering.linecorp.com/zh-hant/blog/using-line-notify-to-send-stickers-and-upload-images/ * https://blog.miniasp.com/post/2020/02/17/Go-Through-LINE-Notify-Without-Any-Code * https://www.oxxostudio.tw/articles/201806/line-notify.html * http://white5168.blogspot.com/2017/01/line-notify-4-line-notify.html * https://vimsky.com/zh-tw/examples/usage/python-pil-image-thumbnail-method.html * https://ithelp.ithome.com.tw/articles/10234576 * https://notify-bot.line.me/doc/en/ * https://eason851021.medium.com/line-notify-%E5%88%A9%E7%94%A8python%E5%82%B3%E9%80%81%E5%AE%A2%E8%A3%BD%E5%8C%96%E8%A8%8A%E6%81%AF-%E4%BB%A5%E5%90%89%E5%A8%83%E5%A8%83%E9%95%B7%E8%BC%A9%E5%9C%96%E7%82%BA%E4%BE%8B-2a50c6a5197b * http://blog.3dgowl.com/arduino-esp32%E4%B8%89-%E4%BD%BF%E7%94%A8-esp32-cam-%E6%8B%8D%E7%85%A7%E4%B8%A6%E5%82%B3line%E9%80%9A%E7%9F%A5/ * http://cheng-min-i-taiwan.blogspot.com/2018/02/line-bot-notify-aspnet-vbnet.html * https://www.rs-online.com/designspark/nvidia-jetson-nano-line-cn * https://www.learncodewithmike.com/2020/06/python-line-notify.html * https://ithelp.ithome.com.tw/questions/10201732 ### **LINE_Robot** * https://engineering.linecorp.com/zh-hant/blog/line-device-10/ ### **Requests** * https://docs.python-requests.org/en/master/ * https://blog.gtwang.org/programming/python-requests-module-tutorial/ ### **Bearer Token** * https://blog.yorkxin.org/posts/oauth2-6-bearer-token.html * https://ithelp.ithome.com.tw/articles/10197166 ### **Keras** * https://keras.io/guides/functional_api/ * https://ithelp.ithome.com.tw/articles/10191627 * https://vimsky.com/zh-tw/examples/detail/python-method-keras.models.load_model.html * https://github.com/tensorflow/tensorflow/issues/26813 * https://github.com/googlecreativelab/teachablemachine-community/blob/master/snippets/markdown/image/tensorflow/keras.md * https://hoohoo.top/blog/several-ways-to-solve-the-no-module-named-kerasmodels-error/ ### **Webhook** * https://jp.cybozu.help/k/zh-tw/user/app_settings/set_webhook/webhook.html ### **Tensorflow** * https://blog.csdn.net/blueheart20/article/details/78980736 * https://forums.developer.nvidia.com/t/official-tensorflow-for-jetson-nano/71770 * https://www.daimajiaoliu.com/daima/47991c649100407 ### **shell** * https://blog.csdn.net/weixin_37913042/article/details/103968504 * https://www.runoob.com/linux/linux-shell.html * https://zhuanlan.zhihu.com/p/104729643 ### **Parser/Argparse** * http://inspiregate.com/programming/other/476-compiler-3-3.html * https://docs.python.org/zh-tw/3/howto/argparse.html ### **WebDriver API** * https://selenium-python.readthedocs.io/api.html ### **OpenCV** * http://www.1zlab.com/wiki/python-opencv-tutorial/opencv-read-usb-camera-image/ * https://blog.gtwang.org/programming/opencv-webcam-video-capture-and-file-write-tutorial/ * https://blog.gtwang.org/programming/opencv-basic-image-read-and-write-tutorial/ * https://blog.cavedu.com/2020/02/06/jetson-nano-01-webcam/ ### **jetson.utils** * https://cloud.tencent.com/developer/article/1845702 ### **錯誤訊息** * https://github.com/keras-team/keras-tuner/issues/317 * https://github.com/tensorflow/tensorflow/issues/26813 * https://hoohoo.top/blog/several-ways-to-solve-the-no-module-named-kerasmodels-error/ * https://github.com/pypa/virtualenv/issues/988 ### **nvgstcapture-1.0** * https://github.com/pauldotknopf/JetsonTX1Drivers/blob/master/nv_tegra/nv_sample_apps/nvgstcapture-1.0_README.txt ### **NVIDIA** * https://developer.nvidia.com/embedded/learn/tutorials/first-picture-csi-usb-camera ### **jetson inference** * https://github.com/dusty-nv/jetson-inference/blob/master/docs/aux-streaming.md