Try   HackMD

LINE_Notify研究版本集(~8/26)

tags: 藍柏婷
tags: 2021/08/26

## 所有程式碼集(依時間排序)

== LINE_Notify_0 ==

import requests def SendMessageToLineNotify(message, pic): Token = "WWjgIOXkPZq7RrRGwIUnru3ivMPwqc1g3EMT1Ryo7lx" # 你的權杖 url = "https://notify-api.line.me/api/notify" # 跑不出來的某網站 # my_files = {'my_filename': open('C:/Users/藍柏婷/Desktop/竹女網自我介紹詞.docx', 'rb')} payload = {'message':message, # 文字 'imageThumbnail':pic, 'imageFullsize':pic, # 圖片 # imageThumbnail、imageFullsize為成對的圖片,各有尺寸大小 'stickerPackageId':446, 'stickerId':1988 # 貼圖 # stickerPackageId、stickerId為貼圖成對的編號 # 可參見Line Sticker List總表 https://developers.line.biz/en/docs/messaging-api/sticker-list/#sticker-definitions } # 要傳送的訊息 header = {'Content-Type':'application/x-www-form-urlencoded', # 在 HTTP 中有兩種 POST 數據的方式:application/x-www-form-urlencoded和multipart/form-data。 # 如果您要傳輸二進制(非字母數字)數據(或非常大的有效負載),請使用multipart/form-data。 # 否則,請使用application/x-www-form-urlencoded。 'Authorization':'Bearer ' + Token # 根據使用者的角色來授予應有的權限 # 不要手殘把'Bearer '中的空格弄掉!!!會出事!!! } # 自訂請求表投 r=requests.post(url, headers=header, data=payload) # 將文字、照片、貼圖傳送出去 print(r.text) # 回傳" {"status":200,"message":"ok"} "的指示(.text可以查看資料) def main(): message = '小雞好可愛' # 提示文字 pic = 'https://imgcdn.cna.com.tw/www/WebPhotos/800/20210427/1920x1280_581754849593.jpg' # 圖片 SendMessageToLineNotify(message, pic) if __name__ == '__main__': # 可避免電腦將引用之程式的主程式也一併執行 main()

== LINE_Notify_1 ==

import requests send={ # 網路上大多叫"payload" 'message':"火災警示", 'imageThumbnail':'https://imgcdn.cna.com.tw/www/WebPhotos/800/20210427/1920x1280_581754849593.jpg', 'imageFullsize':'https://imgcdn.cna.com.tw/www/WebPhotos/800/20210427/1920x1280_581754849593.jpg', } header={ 'Content-Type':'application/x-www-form-urlencoded', 'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP" } r=requests.post("https://notify-api.line.me/api/notify", headers=header, data=send) print(r.text)

== LINE_Notify_2 ==

import requests # from my-recognition import class_idx def send_to_line(category='',pic=''): send={ 'message':("嗨陌生人你用到我們的pincode了 所以傳不到你那邊啦"), 'imageThumbnail':pic, 'imageFullsize':pic, } header={ 'Content-Type':'application/x-www-form-urlencoded', 'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP" } r=requests.post("https://notify-api.line.me/api/notify", headers=header, data=send) print(r.text) # {"status":200,"message":"ok"} danger=False if danger!=True: category="microwave" pic='https://memeprod.sgp1.digitaloceanspaces.com/user-wtf/1622455521876.jpg' send_to_line(category,pic)

== LINE_Notify_3 ==

import jetson.inference import jetson.utils import requests net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) #camera = jetson.utils.videoSource("csi://0") # csi camera camera = jetson.utils.videoSource("/dev/video0") # usb camera display = jetson.utils.videoOutput("display://0") # 'my_video.mp4' for file def send_to_line(category='',pic=''): send={ 'message':("嗨陌生人你用到我們的pincode了 所以傳不到你那邊啦"), 'imageThumbnail':pic, 'imageFullsize':pic, } header={ 'Content-Type':'application/x-www-form-urlencoded', 'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP" } r=requests.post("https://notify-api.line.me/api/notify", headers=header, data=send) print(r.text) # {"status":200,"message":"ok"} while True: img = camera.Capture() detections = net.Detect(img) display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) danger=False if danger!=True: category="microwave" pic='https://memeprod.sgp1.digitaloceanspaces.com/user-wtf/1622455521876.jpg' send_to_line(category,pic) if not camera.IsStreaming() or not display.IsStreaming(): break

== LINE_Notify_4 ==

import cv2 import requests def send_to_line(category,pic): params = {'message':"\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)} files = {'imageFile': open(pic, 'rb')} headers = {'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP"} r = requests.post("https://notify-api.line.me/api/notify", headers=headers, params=params, files=files) print(r.text) cap = cv2.VideoCapture(1) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) # 畫面寬度設定為 1920 cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) # 畫面高度設定為 1080 cv2.namedWindow('image_win',flags=cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED) img_count = 1 while(True): ret, frame = cap.read() cv2.imshow('image_win',frame) # 更新窗口“image_win”中的圖片 # 不可以刪掉,否則會沒有畫面 key = cv2.waitKey(1) # 等待按鍵事件發生 等待1ms if key == ord('q'): break elif key == ord('c'): # 如果c键按下,則進行圖片保存 cv2.imwrite("{}.png".format(img_count), frame) # 寫入圖片 并命名圖片為 <img_count>.png category = "microwave" pic = 'C:/Users/藍柏婷/Desktop/Python/{}.png'.format(img_count) send_to_line(category,pic) img_count += 1 cap.release() # 釋放VideoCapture cv2.destroyAllWindows() # 銷毀所有的窗口

== my-detection_2.py ==

import jetson.inference import jetson.utils import requests import cv2 def send_to_line(category='',pic=''): send={ 'message':("\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)), 'imageThumbnail':pic, 'imageFullsize':pic, } header={ 'Content-Type':'application/x-www-form-urlencoded', 'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP" } r=requests.post("https://notify-api.line.me/api/notify", headers=header, data=send) print(r.text) # {"status":200,"message":"ok"} net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) camera = jetson.utils.videoSource("/dev/video0") # usb camera display = jetson.utils.videoOutput("display://0") # 'my_video.mp4' for file i=0 while True: img = camera.Capture() detections = net.Detect(img) if len(detections)>=1: ret,frame=camera.read() cv2.imshow("frame",frame) category="book" pic='https://imgcdn.cna.com.tw/www/WebPhotos/800/20210427/1920x1280_581754849593.jpg' send_to_line(category,pic) display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) if not camera.IsStreaming() or not display.IsStreaming(): break

== my-detection_3.py ==

import jetson.inference import jetson.utils import requests def send_to_line(category='',pic=''): send={ 'message':("\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)), 'imageThumbnail':pic, 'imageFullsize':pic, } header={ 'Content-Type':'application/x-www-form-urlencoded', 'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP" } r=requests.post("https://notify-api.line.me/api/notify", headers=header, data=send) print(r.text) # {"status":200,"message":"ok"} net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) camera = jetson.utils.videoSource("/dev/video0") # usb camera display = jetson.utils.videoOutput("display://0") # 'my_video.mp4' for file while True: img = camera.Capture() detections = net.Detect(img) if len(detections)>=1: category="book" pic='https://imgcdn.cna.com.tw/www/WebPhotos/800/20210427/1920x1280_581754849593.jpg' send_to_line(category,pic) display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) if not camera.IsStreaming() or not display.IsStreaming(): break

== LINE_Notify_5 ==

import jetson.inference import jetson.utils import requests import cv2 def send_to_line(category,pic): params = {'message':"\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)} files = {'imageFile': open(pic, 'rb')} headers = {'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP"} r = requests.post("https://notify-api.line.me/api/notify", headers=headers, params=params, files=files) print(r.text) net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) camera = jetson.utils.videoSource("/dev/video0") # usb camera display = jetson.utils.videoOutput("display://0") # 'my_video.mp4' for file # camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) # 畫面寬度設定為 1920 # camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) # 畫面高度設定為 1080 # cv2.namedWindow('image_win',flags=cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED) while True: img = camera.Capture() detections = net.Detect(img) if len(detections)>=1: img_count = 1 # frame = camera.read() cv2.imshow('image_win',img) # 更新窗口“image_win”中的圖片 # 不可以刪掉,否則會沒有畫面 key = cv2.waitKey(1) # 等待按鍵事件發生 等待1ms if key == ord('q'): break elif key == ord('c'): # 如果c键按下,則進行圖片保存 cv2.imwrite("{}.png".format(img_count), img) # 寫入圖片 并命名圖片為 <img_count>.png category = "microwave" pic = '/home/iamai2021/Desktop/{}.png'.format(img_count) send_to_line(category,pic) img_count += 1 display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) if not camera.IsStreaming() or not display.IsStreaming(): break # cap.release() # 釋放VideoCapture # cv2.destroyAllWindows() # 銷毀所有的窗口

== LINE_Notify_6 ==

import jetson.inference import jetson.utils import requests import cv2 def send_to_line(category,pic): params = {'message':"\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)} files = {'imageFile': open(pic, 'rb')} headers = {'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP"} r = requests.post("https://notify-api.line.me/api/notify", headers=headers, params=params, files=files) print(r.text) net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) camera = jetson.utils.videoSource("/dev/video0") # usb camera display = jetson.utils.videoOutput("display://0") # 'my_video.mp4' for file # camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) # 畫面寬度設定為 1920 # camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) # 畫面高度設定為 1080 # cv2.namedWindow('image_win',flags=cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED) img_count = 1 while True: img = camera.Capture() detections = net.Detect(img) # frame=display.Render(img) display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) if len(detections)>=1: cv2.imwrite("{}.png".format(img_count), frame) # 寫入圖片 并命名圖片為 <img_count>.png # frame = camera.read() # cv2.imshow('image_win',frame) # 更新窗口“image_win”中的圖片 # 不可以刪掉,否則會沒有畫面 # key = cv2.waitKey(1) # 等待按鍵事件發生 等待1ms # if key == ord('q'): # break # elif key == ord('c'): # 如果c键按下,則進行圖片保存 # cv2.imwrite("{}.png".format(img_count), frame) # 寫入圖片 并命名圖片為 <img_count>.png category = "microwave" pic = '/home/iamai2021/Desktop/{}.png'.format(img_count) send_to_line(category,pic) img_count += 1 if not camera.IsStreaming() or not display.IsStreaming(): break

== LINE_Notify_7 ==

import jetson.inference import jetson.utils import requests def send_to_line(category,pic): params = {'message':"\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)} files = {'imageFile': open(pic, 'rb')} headers = {'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP"} r = requests.post("https://notify-api.line.me/api/notify", headers=headers, params=params, files=files) print(r.text) net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) camera = jetson.utils.videoSource("/dev/video0") display = jetson.utils.videoOutput("file://my_image.jpg") while True: img = camera.Capture() detections = net.Detect(img) display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) if len(detections)>=1: category = "microwave" pic = '/home/iamai2021/Desktop/my_image.jpg' send_to_line(category,pic) if not camera.IsStreaming() or not display.IsStreaming(): break

## 最終版展示

== LINE_Notify_7 ==

import jetson.inference import jetson.utils import requests def send_to_line(category,pic): params = {'message':"\n嗨,你好\n這是 \"火災警示\" 通知\n請注意!!\n偵測到 \"{0}\" 溫度過高".format(category)} files = {'imageFile': open(pic, 'rb')} headers = {'Authorization':'Bearer ' + "gOljwt8lLAnbpWSIXUk3RJP34Syw6NQADBtorl2GoCP"} r = requests.post("https://notify-api.line.me/api/notify", headers=headers, params=params, files=files) print(r.text) net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5) camera = jetson.utils.videoSource("/dev/video0") display = jetson.utils.videoOutput("file://my_image.jpg") while True: img = camera.Capture() detections = net.Detect(img) display.Render(img) display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) if len(detections)>=1: category = "microwave" pic = '/home/iamai2021/Desktop/my_image.jpg' send_to_line(category,pic) if not camera.IsStreaming() or not display.IsStreaming(): break

== 跑出結果 ==

#跑出結果

iamai2021@iamai2021:~/Desktop$ python3 LINE_Notify_5.py
/usr/lib/python3/dist-packages/requests/__init__.py:80: RequestsDependencyWarning: urllib3 (1.26.6) or chardet (3.0.4) doesn't match a supported version!
  RequestsDependencyWarning)
jetson.inference -- detectNet loading build-in network 'ssd-mobilenet-v2'

detectNet -- loading detection network model from:
          -- model        networks/SSD-Mobilenet-v2/ssd_mobilenet_v2_coco.uff
          -- input_blob   'Input'
          -- output_blob  'NMS'
          -- output_count 'NMS_1'
          -- class_labels networks/SSD-Mobilenet-v2/ssd_coco_labels.txt
          -- threshold    0.500000
          -- batch_size   1

[TRT]    TensorRT version 7.1.3
[TRT]    loading NVIDIA plugins...
[TRT]    Registered plugin creator - ::GridAnchor_TRT version 1
[TRT]    Registered plugin creator - ::NMS_TRT version 1
[TRT]    Registered plugin creator - ::Reorg_TRT version 1
[TRT]    Registered plugin creator - ::Region_TRT version 1
[TRT]    Registered plugin creator - ::Clip_TRT version 1
[TRT]    Registered plugin creator - ::LReLU_TRT version 1
[TRT]    Registered plugin creator - ::PriorBox_TRT version 1
[TRT]    Registered plugin creator - ::Normalize_TRT version 1
[TRT]    Registered plugin creator - ::RPROI_TRT version 1
[TRT]    Registered plugin creator - ::BatchedNMS_TRT version 1
[TRT]    Could not register plugin creator -  ::FlattenConcat_TRT version 1
[TRT]    Registered plugin creator - ::CropAndResize version 1
[TRT]    Registered plugin creator - ::DetectionLayer_TRT version 1
[TRT]    Registered plugin creator - ::Proposal version 1
[TRT]    Registered plugin creator - ::ProposalLayer_TRT version 1
[TRT]    Registered plugin creator - ::PyramidROIAlign_TRT version 1
[TRT]    Registered plugin creator - ::ResizeNearest_TRT version 1
[TRT]    Registered plugin creator - ::Split version 1
[TRT]    Registered plugin creator - ::SpecialSlice_TRT version 1
[TRT]    Registered plugin creator - ::InstanceNormalization_TRT version 1
[TRT]    detected model format - UFF  (extension '.uff')
[TRT]    desired precision specified for GPU: FASTEST
[TRT]    requested fasted precision for device GPU without providing valid calibrator, disabling INT8
[TRT]    native precisions detected for GPU:  FP32, FP16
[TRT]    selecting fastest native precision for GPU:  FP16
[TRT]    attempting to open engine cache file /usr/local/bin/networks/SSD-Mobilenet-v2/ssd_mobilenet_v2_coco.uff.1.1.7103.GPU.FP16.engine
[TRT]    loading network plan from engine cache... /usr/local/bin/networks/SSD-Mobilenet-v2/ssd_mobilenet_v2_coco.uff.1.1.7103.GPU.FP16.engine
[TRT]    device GPU, loaded /usr/local/bin/networks/SSD-Mobilenet-v2/ssd_mobilenet_v2_coco.uff
[TRT]    Using an engine plan file across different models of devices is not recommended and is likely to affect performance or even cause errors.
[TRT]    Deserialize required 7629430 microseconds.
[TRT]    
[TRT]    CUDA engine context initialized on device GPU:
[TRT]       -- layers       119
[TRT]       -- maxBatchSize 1
[TRT]       -- workspace    0
[TRT]       -- deviceMemory 35486720
[TRT]       -- bindings     3
[TRT]       binding 0
                -- index   0
                -- name    'Input'
                -- type    FP32
                -- in/out  INPUT
                -- # dims  3
                -- dim #0  3 (SPATIAL)
                -- dim #1  300 (SPATIAL)
                -- dim #2  300 (SPATIAL)
[TRT]       binding 1
                -- index   1
                -- name    'NMS'
                -- type    FP32
                -- in/out  OUTPUT
                -- # dims  3
                -- dim #0  1 (SPATIAL)
                -- dim #1  100 (SPATIAL)
                -- dim #2  7 (SPATIAL)
[TRT]       binding 2
                -- index   2
                -- name    'NMS_1'
                -- type    FP32
                -- in/out  OUTPUT
                -- # dims  3
                -- dim #0  1 (SPATIAL)
                -- dim #1  1 (SPATIAL)
                -- dim #2  1 (SPATIAL)
[TRT]    
[TRT]    binding to input 0 Input  binding index:  0
[TRT]    binding to input 0 Input  dims (b=1 c=3 h=300 w=300) size=1080000
[TRT]    binding to output 0 NMS  binding index:  1
[TRT]    binding to output 0 NMS  dims (b=1 c=1 h=100 w=7) size=2800
[TRT]    binding to output 1 NMS_1  binding index:  2
[TRT]    binding to output 1 NMS_1  dims (b=1 c=1 h=1 w=1) size=4
[TRT]    
[TRT]    device GPU, /usr/local/bin/networks/SSD-Mobilenet-v2/ssd_mobilenet_v2_coco.uff initialized.
[TRT]    W = 7  H = 100  C = 1
[TRT]    detectNet -- maximum bounding boxes:  100
[TRT]    detectNet -- loaded 91 class info entries
[TRT]    detectNet -- number of object classes:  91
[gstreamer] initialized gstreamer, version 1.14.5.0
[gstreamer] gstCamera -- attempting to create device v4l2:///dev/video0
[gstreamer] gstCamera -- found v4l2 device: UVC Camera (046d:0825)
[gstreamer] v4l2-proplist, device.path=(string)/dev/video0, udev-probed=(boolean)false, device.api=(string)v4l2, v4l2.device.driver=(string)uvcvideo, v4l2.device.card=(string)"UVC\ Camera\ \(046d:0825\)", v4l2.device.bus_info=(string)usb-70090000.xusb-2, v4l2.device.version=(uint)264649, v4l2.device.capabilities=(uint)2216689665, v4l2.device.device_caps=(uint)69206017;
[gstreamer] gstCamera -- found 38 caps for v4l2 device /dev/video0
[gstreamer] [0] video/x-raw, format=(string)YUY2, width=(int)1280, height=(int)960, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 15/2, 5/1 };
[gstreamer] [1] video/x-raw, format=(string)YUY2, width=(int)1280, height=(int)720, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 15/2, 5/1 };
[gstreamer] [2] video/x-raw, format=(string)YUY2, width=(int)1184, height=(int)656, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 10/1, 5/1 };
[gstreamer] [3] video/x-raw, format=(string)YUY2, width=(int)960, height=(int)720, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 10/1, 5/1 };
[gstreamer] [4] video/x-raw, format=(string)YUY2, width=(int)1024, height=(int)576, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 10/1, 5/1 };
[gstreamer] [5] video/x-raw, format=(string)YUY2, width=(int)960, height=(int)544, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 15/1, 10/1, 5/1 };
[gstreamer] [6] video/x-raw, format=(string)YUY2, width=(int)800, height=(int)600, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [7] video/x-raw, format=(string)YUY2, width=(int)864, height=(int)480, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [8] video/x-raw, format=(string)YUY2, width=(int)800, height=(int)448, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [9] video/x-raw, format=(string)YUY2, width=(int)752, height=(int)416, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [10] video/x-raw, format=(string)YUY2, width=(int)640, height=(int)480, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [11] video/x-raw, format=(string)YUY2, width=(int)640, height=(int)360, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [12] video/x-raw, format=(string)YUY2, width=(int)544, height=(int)288, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [13] video/x-raw, format=(string)YUY2, width=(int)432, height=(int)240, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [14] video/x-raw, format=(string)YUY2, width=(int)352, height=(int)288, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [15] video/x-raw, format=(string)YUY2, width=(int)320, height=(int)240, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [16] video/x-raw, format=(string)YUY2, width=(int)320, height=(int)176, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [17] video/x-raw, format=(string)YUY2, width=(int)176, height=(int)144, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [18] video/x-raw, format=(string)YUY2, width=(int)160, height=(int)120, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [19] image/jpeg, width=(int)1280, height=(int)960, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [20] image/jpeg, width=(int)1280, height=(int)720, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [21] image/jpeg, width=(int)1184, height=(int)656, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [22] image/jpeg, width=(int)960, height=(int)720, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [23] image/jpeg, width=(int)1024, height=(int)576, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [24] image/jpeg, width=(int)960, height=(int)544, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [25] image/jpeg, width=(int)800, height=(int)600, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [26] image/jpeg, width=(int)864, height=(int)480, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [27] image/jpeg, width=(int)800, height=(int)448, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [28] image/jpeg, width=(int)752, height=(int)416, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [29] image/jpeg, width=(int)640, height=(int)480, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [30] image/jpeg, width=(int)640, height=(int)360, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [31] image/jpeg, width=(int)544, height=(int)288, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [32] image/jpeg, width=(int)432, height=(int)240, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [33] image/jpeg, width=(int)352, height=(int)288, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [34] image/jpeg, width=(int)320, height=(int)240, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [35] image/jpeg, width=(int)320, height=(int)176, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [36] image/jpeg, width=(int)176, height=(int)144, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] [37] image/jpeg, width=(int)160, height=(int)120, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction){ 30/1, 25/1, 20/1, 15/1, 10/1, 5/1 };
[gstreamer] gstCamera -- selected device profile:  codec=mjpeg format=unknown width=1280 height=720
[gstreamer] gstCamera pipeline string:
[gstreamer] v4l2src device=/dev/video0 ! image/jpeg, width=(int)1280, height=(int)720 ! jpegdec ! video/x-raw ! appsink name=mysink
[gstreamer] gstCamera successfully created device v4l2:///dev/video0
[video]  created gstCamera from v4l2:///dev/video0
------------------------------------------------
gstCamera video options:
------------------------------------------------
  -- URI: v4l2:///dev/video0
     - protocol:  v4l2
     - location:  /dev/video0
  -- deviceType: v4l2
  -- ioType:     input
  -- codec:      mjpeg
  -- width:      1280
  -- height:     720
  -- frameRate:  30.000000
  -- bitRate:    0
  -- numBuffers: 4
  -- zeroCopy:   true
  -- flipMethod: none
  -- loop:       0
  -- rtspLatency 2000
------------------------------------------------
[video]  created imageWriter from file://my_image.jpg
------------------------------------------------
imageWriter video options:
------------------------------------------------
  -- URI: file://my_image.jpg
     - protocol:  file
     - location:  my_image.jpg
     - extension: jpg
  -- deviceType: file
  -- ioType:     output
  -- codec:      unknown
  -- width:      0
  -- height:     0
  -- frameRate:  0.000000
  -- bitRate:    0
  -- numBuffers: 4
  -- zeroCopy:   true
  -- flipMethod: none
  -- loop:       0
  -- rtspLatency 2000
------------------------------------------------
[OpenGL] glDisplay -- X screen 0 resolution:  1920x1080
[OpenGL] glDisplay -- X window resolution:    1920x1080
[OpenGL] glDisplay -- display device initialized (1920x1080)
[video]  created glDisplay from display://0
------------------------------------------------
glDisplay video options:
------------------------------------------------
  -- URI: display://0
     - protocol:  display
     - location:  0
  -- deviceType: display
  -- ioType:     output
  -- codec:      raw
  -- width:      1920
  -- height:     1080
  -- frameRate:  0.000000
  -- bitRate:    0
  -- numBuffers: 4
  -- zeroCopy:   true
  -- flipMethod: none
  -- loop:       0
  -- rtspLatency 2000
------------------------------------------------
[gstreamer] opening gstCamera for streaming, transitioning pipeline to GST_STATE_PLAYING
[gstreamer] gstreamer changed state from NULL to READY ==> mysink
[gstreamer] gstreamer changed state from NULL to READY ==> capsfilter1
[gstreamer] gstreamer changed state from NULL to READY ==> jpegdec0
[gstreamer] gstreamer changed state from NULL to READY ==> capsfilter0
[gstreamer] gstreamer changed state from NULL to READY ==> v4l2src0
[gstreamer] gstreamer changed state from NULL to READY ==> pipeline0
[gstreamer] gstreamer changed state from READY to PAUSED ==> capsfilter1
[gstreamer] gstreamer changed state from READY to PAUSED ==> jpegdec0
[gstreamer] gstreamer changed state from READY to PAUSED ==> capsfilter0
[gstreamer] gstreamer stream status CREATE ==> src
[gstreamer] gstreamer changed state from READY to PAUSED ==> v4l2src0
[gstreamer] gstreamer changed state from READY to PAUSED ==> pipeline0
[gstreamer] gstreamer stream status ENTER ==> src
[gstreamer] gstreamer message new-clock ==> pipeline0
[gstreamer] gstreamer changed state from PAUSED to PLAYING ==> capsfilter1
[gstreamer] gstreamer changed state from PAUSED to PLAYING ==> jpegdec0
[gstreamer] gstreamer changed state from PAUSED to PLAYING ==> capsfilter0
[gstreamer] gstreamer changed state from PAUSED to PLAYING ==> v4l2src0
[gstreamer] gstreamer message stream-start ==> pipeline0
[gstreamer] gstCamera -- onPreroll
[gstreamer] gstCamera -- map buffer size was less than max size (1382400 vs 1382407)
[gstreamer] gstCamera recieve caps:  video/x-raw, format=(string)I420, width=(int)1280, height=(int)720, interlace-mode=(string)progressive, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, pixel-aspect-ratio=(fraction)1/1, chroma-site=(string)mpeg2, colorimetry=(string)1:4:0:0, framerate=(fraction)30/1
[gstreamer] gstCamera -- recieved first frame, codec=mjpeg format=i420 width=1280 height=720 size=1382407
RingBuffer -- allocated 4 buffers (1382407 bytes each, 5529628 bytes total)
[gstreamer] gstreamer changed state from READY to PAUSED ==> mysink
[gstreamer] gstreamer message async-done ==> pipeline0
[gstreamer] gstreamer changed state from PAUSED to PLAYING ==> mysink
[gstreamer] gstreamer changed state from PAUSED to PLAYING ==> pipeline0
RingBuffer -- allocated 4 buffers (2764800 bytes each, 11059200 bytes total)
[OpenGL] glDisplay -- set the window size to 1280x720
[OpenGL] creating 1280x720 texture (GL_RGB8 format, 2764800 bytes)
[cuda]   registered openGL texture for interop access (1280x720, GL_RGB8, 2764800 bytes)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[gstreamer] gstreamer message qos ==> v4l2src0
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
{"status":200,"message":"ok"}
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
{"status":200,"message":"ok"}
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
{"status":200,"message":"ok"}
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
{"status":200,"message":"ok"}
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[OpenGL] glDisplay -- the window has been closed
[image] saved 'my_image.jpg'  (1280x720, 3 channels)
[gstreamer] gstCamera -- stopping pipeline, transitioning to GST_STATE_NULL
[gstreamer] gstCamera -- onPreroll
[gstreamer] gstCamera -- pipeline stopped

== 實際效果 ==

#實際效果



## 結論

LINE Notify圖片及文字傳輸 DONE!

## 所有參考網站

LINE_Notify

LINE_Robot

Requests

Bearer Token

Keras

Webhook

Tensorflow

shell

Parser/Argparse

WebDriver API

OpenCV

jetson.utils

錯誤訊息

nvgstcapture-1.0

NVIDIA

jetson inference