# 測試 deepstream-test
###### tags: `NVIDIA` `Deepstream` `勤美`
## 鏡頭串流介接
```
rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4
docker restart deepstream_6.0
gst-launch-1.0 playbin uri=rtsp://admin:SKY123456@192.168.111.87:7070
gst-launch-1.0 playbin uri=rtsp://192.168.50.28:9988
gst-launch-1.0 rtspsrc location=rtsp://admin:123456@192.168.51.85:7070 ! decodebin ! autovideosink
python3 run.py rtsp://admin:123456@192.168.51.85:7070 --codec h264
```
## 鏡頭資料
```
https://docs.google.com/spreadsheets/d/194LFHQd9zOpshwrMDRn48q52TS5Ak6_n/edit#gid=524533705
```
## 必要套件
```
apt-get update && \
apt-get install -y libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
libgstrtspserver-1.0-dev libx11-dev libjson-glib-dev
apt install gcc
```
# CUDA版本
```
nvcc -V
```
# 自動測試
```
export deepstreamver=deepstream-6.0
export nvccver=11.4
export deepstreamver=deepstream-5.0
export nvccver=10.2
export objcount=1
for i in {1..4}
do
echo "$i"
cd /opt/nvidia/deepstream/$deepstreamver/sources/apps/sample_apps/deepstream-test$i && \
CUDA_VER=$nvccver make
done
```
# 編譯對應版本
```
sudo CUDA_VER=11.4 make
# 或者
export CUDA_VER=11.1 && \
sudo make
```
# 無法顯示螢幕時 於容器內
```
export DISPLAY=:0
```
# 遠端 ssh 後 無法顯示螢幕時
```
xhost +
echo $DISPLAY
# 得到到代號寫入 ssh 端
export DISPLAY=:1
```
# 找不到 libnvvpi.so.1 時
```
ln /opt/nvidia/deepstream/deepstream-6.0/lib/libnvvpi.so.1.1.12 /opt/nvidia/deepstream/deepstream-6.0/lib/libnvvpi.so.1
```
# No such file or directory #include <gst/gst.h>
```
apt install libjson-glib-dev
```
# gst/rtsp-server/rtsp-server.h: No such file or directory
```
apt install libgstrtspserver-1.0-dev
```
# set ver
```
export deepstreamver=deepstream-5.0
export nvccver=11.1
```
# 測試 test1
```
cd /opt/nvidia/deepstream/deepstream-6.0/sources/apps/sample_apps/deepstream-test1
CUDA_VER=11.4 make && ./deepstream-test1-app /opt/nvidia/deepstream/deepstream-6.0//samples/streams/sample_720p.h264
CUDA_VER=$nvccver make && \
./deepstream-test1-app /opt/nvidia/deepstream/$deepstreamver/samples/streams/sample_720p.h264
```
# 測試 test2
```
CUDA_VER=$nvccver make && \
./deepstream-test2-app /opt/nvidia/deepstream/$deepstreamver/samples/streams/sample_720p.h264
```
# 測試 test3
```
CUDA_VER=$nvccver make && \
./deepstream-test3-app rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4 rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4 rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4
```
# test 4
```
CUDA_VER=$nvccver make && \
./deepstream-test4-app -i /opt/nvidia/deepstream/$deepstreamver/samples/streams/sample_720p.h264 -p /opt/nvidia/deepstream/$deepstreamver/lib/libnvds_kafka_proto.so --conn-str="192.168.50.100;9092;OjDetect"
```
# test 5
```
CUDA_VER=$nvccver make && \
./deepstream-test5-app -c configs/test5_config_file_src_infer.txt -i /opt/nvidia/deepstream/$deepstreamver/samples/streams/sample_720p.h264 -p /opt/nvidia/deepstream/$deepstreamver/lib/libnvds_kafka_proto.so --conn-str="192.168.50.100;9092;run"
```
# 人流
model.sh 修改
```
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/tao/peoplenet/versions/pruned_v2.0/zip -O peoplenet.zip
```
攝影機資料紀錄 dstest5_msgconv_sample_config.txt
```
[sensor0]
enable=1
type=TESTTYPE
id=HWY_20_AND_LOCUST__EBA__4_11_2018_4_59_59_508_AM_UTC-07_00
location=45.293701447;-75.8303914499;48.1557479338
description=GOOD description
coordinate=5.2;10.1;11.2
```
```
./deepstream-test5-analytics -c config/test5_config_file_src_infer_tlt.txt
```
## 錯誤 32 | #include <cuda_runtime_api.h>
檢察編譯令 應該是 CUDA_VER=11.4 make
對應 data['place']
## 必要修改 參考 https://github.com/NVIDIA-AI-IOT/deepstream-occupancy-analytics/issues/48
```
Hi @Pauli343,
As indicated in @Acedev003 comment (pdf slides, page 28), I edited the sources/includes/nvdsmeta_schema.h and added the lines below to the NvDsEventMsgMeta struct :
guint source_id;
guint occupancy;
guint lccum_cnt_entry;
guint lccum_cnt_exit;
} NvDsEventMsgMeta;
```
https://on24static.akamaized.net/event/25/37/22/4/rt/1/documents/resourceList1598389152860/dstltwebinarmonikav31598389151693.pdf
page 26~29
# docker
```
xhost +
sudo docker run -it --rm --net=host --runtime nvidia -e DISPLAY=$DISPLAY -w /opt/nvidia/deepstream/deepstream-6.1 -v /tmp/.X11-unix/:/tmp/.X11-unix nvcr.io/nvidia/deepstream-l4t:5.0.1-20.09-samples
/opt/nvidia/deepstream/deepstream/user_additional_install.sh
```
# 手動安裝TRT
```
wget https://ftpweb.intemotech.com/trt/nv-tensorrt-local-repo-ubuntu1804-8.5.1-cuda-10.2_1.0-1_amd64.deb
sudo dpkg -i nv-tensorrt-local-repo-ubuntu1804-8.5.1-cuda-10.2_1.0-1_amd64.deb
sudo apt update
sudo apt install tensorrt
```
# 自定義格式說明
https://github.com/bug-developer021/deepstream_python_nvdsanalytics_to_kafka/blob/main/README_CN.md#%E5%9C%A8nvdseventmsgmeta%E7%BB%93%E6%9E%84%E9%87%8C%E6%B7%BB%E5%8A%A0analytics-msg-meta
# 測試基礎輸出(未知錯誤)
```
cd /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps
git clone https://github.com/NVIDIA-AI-IOT/deepstream-occupancy-analytics.git
cd deepstream-occupancy-analytics
```
model.sh 修改
```
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/tao/peoplenet/versions/pruned_v2.0/zip -O peoplenet.zip
```
下載
```
cd config
./model.sh
```
git 轉為6.0
```
```
修改 /opt/nvidia/deepstream/deepstream/sources/includes/nvdsmeta_schema.h
```
guint source_id;
guint occupancy;
guint lccum_cnt_entry;
guint lccum_cnt_exit;
} NvDsEventMsgMeta;
```
編譯
```
CUDA_VER=11.4 make
```
修改 test5_config_file_src_infer_tlt.txt
```
msg-broker-conn-str=192.168.50.100;9092;quickstart-events_new
```
更換kafka傳送
```
mv /opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_msgconv.so /opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_msgconv.sotmp
cp /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-occupancy-analytics/bin/x86/libnvds_msgconv.so /opt/nvidia/deepstream/deepstream-6.1/lib/libnvds_msgconv.so
```
執行
```
./deepstream-test5-analytics -c config/test5_config_file_src_infer_tlt.txt
```
# 測試 編譯後執行
# 測試 減少多於輸出 in eventmsg_payload.cpp 中 generate_event_message
## 關閉videoPath
```
// json_object_set_object_member (rootObj, "sensor", sensorObj);
// json_object_set_object_member (rootObj, "analyticsModule", analyticsObj);
// json_object_set_object_member (rootObj, "object", objectObj);
// if (meta->videoPath)
// json_object_set_string_member (rootObj, "videoPath", meta->videoPath);
// else
// json_object_set_string_member (rootObj, "videoPath", "");
cd /opt/nvidia/deepstream/deepstream/sources/libs/nvmsgconv && make && cp libnvds_msgconv.so /opt/nvidia/deepstream/deepstream/lib/libnvds_msgconv.so
```
# 加入一個自定義格式
```
json_object_set_string_member (analyticsObj, "version", dsObj->version.c_str());
json_object_set_string_member (analyticsObj, "testadddatatest", "testdata"); // add
```
# 備份所有修改
```
/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-occupancy-analytics/deepstream_nvdsanalytics_meta.cpp
/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-occupancy-analytics/deepstream_test5_app_main.c
/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-occupancy-analytics/config/model.sh
/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-occupancy-analytics/config/config_nvdsanalytics.txt
/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-occupancy-analytics/config/dstest5_msgconv_sample_config.txt
/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-occupancy-analytics/config/test5_config_file_src_infer_tlt.txt
/opt/nvidia/deepstream/deepstream-6.0/sources/libs/nvmsgconv/deepstream_schema/eventmsg_payload.cpp
/opt/nvidia/deepstream/deepstream/sources/includes/nvdsmeta_schema.h
```
# 還原備份
```
cd /opt/nvidia/deepstream/deepstream-6.0/sources/apps/sample_apps
wget https://ftpweb.intemotech.com/deepstream/deepstream6.zip
unzip deepstream6.zip
cd deepstream6
chmod +x run.sh
./run.sh
```
# 備份出現問題這邊會順便重新嘗試 agx 6.1 版本
```
xhost +
sudo docker run -it --rm --net=host --runtime nvidia -e DISPLAY=$DISPLAY -v /tmp/.X11-unix/:/tmp/.X11-unix nvcr.io/nvidia/deepstream-l4t:6.1.1-triton
```
# 安裝
```
cd /opt/nvidia/deepstream/deepstream &&\
./user_additional_install.sh
```
# 測試
```
cd /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test1
CUDA_VER=11.4 make
./deepstream-test1-app /opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.h264
export DISPLAY=:1
```
# 測試
```
cd /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/
git clone https://github.com/NVIDIA-AI-IOT/deepstream-occupancy-analytics.git -b ds_6.0
cd deepstream-occupancy-analytics/
```
## 必要修改 參考 https://github.com/NVIDIA-AI-IOT/deepstream-occupancy-analytics/issues/48
```
Hi @Pauli343,
As indicated in @Acedev003 comment (pdf slides, page 28), I edited the /opt/nvidia/deepstream/deepstream/sources/includes/nvdsmeta_schema.h and added the lines below to the NvDsEventMsgMeta struct :
guint source_id;
guint occupancy;
guint lccum_cnt_entry;
guint lccum_cnt_exit;
} NvDsEventMsgMeta;
```
# 修改 /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-occupancy-analytics/Makefile
```
NVDS_VERSION:=6.0
to
NVDS_VERSION:=6.1
```
# 修改 /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-occupancy-analytics/config/test5_config_file_src_infer_tlt.txt
```
uri=file:///opt/nvidia/deepstream/deepstream-6.0/samples/streams/sample_1080p_h264.mp4
to
uri=file:///opt/nvidia/deepstream/deepstream-6.1/samples/streams/sample_1080p_h264.mp4
```
# 修改 /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-occupancy-analytics/config/model.sh
```
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/tlt_peoplenet/versions/pruned_v2.0/zip -O peoplenet.zip
to
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/tao/peoplenet/versions/pruned_v2.0/zip -O peoplenet.zip
```
# 修改 /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-occupancy-analytics/config/test5_config_file_src_infer_tlt.txt
```
msg-broker-conn-str=localhost;9092;quickstart-events
to
msg-broker-conn-str=192.168.50.100;9092;quickstart-events
msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_kafka_proto.so
to
msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream-6.1/lib/libnvds_kafka_proto.so
ll-lib-file=/opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_nvmultiobjecttracker.so
to
ll-lib-file=/opt/nvidia/deepstream/deepstream-6.1/lib/libnvds_nvmultiobjecttracker.so
```
# 執行
```
CUDA_VER=11.4 make && \
./deepstream-test5-analytics -c config/test5_config_file_src_infer_tlt.txt
```
# 修改 /opt/nvidia/deepstream/deepstream-6.1/sources/libs/nvmsgconv/deepstream_schema/eventmsg_payload.cpp
```
// analytics object
analyticsObj = json_object_new ();
json_object_set_string_member (analyticsObj, "id", dsObj->id.c_str());
json_object_set_string_member (analyticsObj, "description", dsObj->desc.c_str());
json_object_set_string_member (analyticsObj, "source", dsObj->source.c_str());
json_object_set_string_member (analyticsObj, "version", dsObj->version.c_str());
return analyticsObj;
to
// analytics object
analyticsObj = json_object_new ();
// json_object_set_string_member (analyticsObj, "id", dsObj->id.c_str());
// json_object_set_string_member (analyticsObj, "description", dsObj->desc.c_str());
// json_object_set_string_member (analyticsObj, "source", dsObj->source.c_str());
// json_object_set_string_member (analyticsObj, "version", dsObj->version.c_str());
// json_object_set_string_member (analyticsObj, "testadddatatest", "testdata");
json_object_set_double_member (analyticsObj, "source_id", meta->source_id);
json_object_set_double_member (analyticsObj, "Entry", meta->lccum_cnt_entry);
json_object_set_double_member (analyticsObj, "Exit", meta->lccum_cnt_exit);
return analyticsObj;
// root object
rootObj = json_object_new ();
json_object_set_string_member (rootObj, "messageid", msgIdStr);
json_object_set_string_member (rootObj, "mdsversion", "1.0");
json_object_set_string_member (rootObj, "@timestamp", meta->ts);
json_object_set_object_member (rootObj, "place", placeObj);
json_object_set_object_member (rootObj, "sensor", sensorObj);
json_object_set_object_member (rootObj, "analyticsModule", analyticsObj);
json_object_set_object_member (rootObj, "object", objectObj);
json_object_set_object_member (rootObj, "event", eventObj);
if (meta->videoPath)
json_object_set_string_member (rootObj, "videoPath", meta->videoPath);
else
json_object_set_string_member (rootObj, "videoPath", "");
to
// root object
rootObj = json_object_new ();
// json_object_set_string_member (rootObj, "messageid", msgIdStr);
// json_object_set_string_member (rootObj, "mdsversion", "1.0");
json_object_set_string_member (rootObj, "@timestamp", meta->ts);
// json_object_set_object_member (rootObj, "place", placeObj);
// json_object_set_object_member (rootObj, "sensor", sensorObj);
json_object_set_object_member (rootObj, "analyticsModule", analyticsObj);
// json_object_set_object_member (rootObj, "object", objectObj);
// json_object_set_object_member (rootObj, "event", eventObj);
// if (meta->videoPath)
// json_object_set_string_member (rootObj, "videoPath", meta->videoPath);
// else
// json_object_set_string_member (rootObj, "videoPath", "");
```
# 編譯修改內容
```
cd /opt/nvidia/deepstream/deepstream/sources/libs/nvmsgconv && make && cp libnvds_msgconv.so /opt/nvidia/deepstream/deepstream/lib/libnvds_msgconv.so
```
# 重新編譯執行
```
CUDA_VER=11.4 make && \
./deepstream-test5-analytics -c config/test5_config_file_src_infer_tlt.txt
```
# 改為 RTSP
```
```
# 測試串流
```
cd /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test3 && \
CUDA_VER=11.4 make && \
./deepstream-test3-app rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4
./deepstream-test3-app rtsp://admin:123456@192.168.50.85:7070
```
# 還原
```
git clone https://github.com/NVIDIA-AI-IOT/deepstream-occupancy-analytics.git -b ds_6.0
wget https://ftpweb.intemotech.com/deepstream/deepstream6.1.1.zip
unzip deepstream6.1.1.zip
cd deepstream6.1.1
sudo ./run.sh
cd ../deepstream-occupancy-analytics/config
model.sh
cd /opt/nvidia/deepstream/deepstream-6.1/sources/libs/nvmsgconv && make && cp libnvds_msgconv.so /opt/nvidia/deepstream/deepstream/lib/libnvds_msgconv.so
cd /opt/nvidia/deepstream/deepstream-6.1/sources/apps/sample_apps/deepstream-occupancy-analytics
CUDA_VER=11.4 make && \
./deepstream-test5-analytics -c config/test5_config_file_src_infer_tlt.txt
```
# 串流延遲發生延遲
```
[sourcex]
sync=0
[streammux]
live-source=1
```
# OOM 修復動作重新修改部分
## 主要修正檔案 /opt/nvidia/deepstream/deepstream/sources/libs/nvmsgconv/deepstream_schema/eventmsg_payload.cpp
gchar* generate_event_message (void *privData, NvDsEventMsgMeta *meta)
關閉大量無用訊息並且記錄人數,若總數不變則不回傳節省流量
```
cd /opt/nvidia/deepstream/deepstream/sources/libs/nvmsgconv/deepstream_schema/
rm eventmsg_payload.cpp
wget https://ftpweb.intemotech.com/deepstream/eventmsg_payload.cpp
```
# 全速運作
```
sudo nvpmodel -m 0
sudo /usr/bin/jetson_clocks
```
# 測試年齡性別
```
https://github.com/Ryoyo-NV/Gaze-Analysis-System/tree/d5e36feaf1c38ac12b1bf548fad10f4b2c15a579
https://github.com/ddasdkimo/Gaze-Analysis-System.git
```
## 執行
```
git clone https://github.com/ddasdkimo/Gaze-Analysis-System.git
cd Gaze-Analysis-System
```
## 修改 setup.sh
```
#!/bin/bash
set -eu
trap catch ERR
trap finally EXIT
CUDA_VER=11.4
GAZE_DIR=$(pwd)
DEEPSTREAM_DIR=/opt/nvidia/deepstream/deepstream/
WORKSPACE_SIZE=2000000000
TAO_CONVERTER_URI=https://developer.nvidia.com/jp46-20210820t231431z-001zip
TEGRA_ID=33
function catch {
echo Setup failed. Please check error messages.
}
function finally {
echo exit.
cd $GAZE_DIR
}
# CHECK EXIST sudo COMMAND
sudo ()
{
[[ $EUID = 0 ]] || set -- command sudo "$@"
"$@"
}
# CHECK DEEPSTREAM INSTALATION
echo Checking DeepStream installation...
if [ ! -f $DEEPSTREAM_DIR/version ]; then
echo Installing DeepStream SDK...
sudo apt install -y deepstream-6.0
fi
echo done.
echo
# INSTALL DEPENDENCIES
echo Installing dependencies...
sudo apt update
sudo apt install -y gstreamer1.0-tools gstreamer1.0-alsa gstreamer1.0-plugins-base gstreamer1.0-plugins-good \
gstreamer1.0-plugins-bad gstreamer1.0-plugins-ugly gstreamer1.0-libav libgstreamer1.0-dev\
libgstreamer-plugins-base1.0-dev libgstreamer-plugins-good1.0-dev libgstreamer-plugins-bad1.0-dev\
python3-dev python-gi-dev python3-pip git libgirepository1.0-dev libcairo2-dev apt-transport-https\
ca-certificates cmake libjpeg-dev unzip
pip3 install Pillow azure-iot-device
echo done.
echo
# BUILD PYDS (DEEPSTREAM PYTHON BINDINGS)
echo Checking pyds installation...
set +e
trap - ERR
PYDS_AUTHOR=`pip3 show pyds | grep -i author:`
set -e
trap catch ERR
if [[ "$PYDS_AUTHOR" != *NVIDIA* ]]; then
echo Building pyds...
cd $GAZE_DIR/ds/lib
if [ ! -d deepstream_python_apps ]; then
git clone https://github.com/NVIDIA-AI-IOT/deepstream_python_apps
fi
cd deepstream_python_apps/bindings/
git submodule update --init
mkdir build && cd build
cmake .. -DPYTHON_MAJOR_VERSION=3 -DPYTHON_MINOR_VERSION=8 -DPIP_PLATFORM=linux_aarch64 -DDS_PATH=$DEEPSTREAM_DIR
make -j$(nproc)
echo Installing pyds...
pip3 install ./pyds-*.whl
cd $GAZE_DIR
fi
echo done.
echo
# BUILD DS CLASSIFIER CUSTOM PARSER
echo Building classification cutom parser library...
cd $GAZE_DIR/ds/lib/customparser
env CUDA_VER=$CUDA_VER make
cp libcustomparser.so $GAZE_DIR/ds/lib
cd $GAZE_DIR
echo done.
echo
# BUILD DS GAZE INFER PLUGIN
echo Building Python wrapper library for gazeinfer...
cd $GAZE_DIR/ds/lib/gazeinfer
if [ ! -d deepstream_tao_apps ]; then
git clone https://github.com/NVIDIA-AI-IOT/deepstream_tao_apps
fi
env CUDA_VER=$CUDA_VER make
cp dscprobes.so $GAZE_DIR/ds/lib
echo done.
echo
echo Building gazeinfer library...
cd deepstream_tao_apps/apps/tao_others/deepstream-gaze-app/gazeinfer_impl
env CUDA_VER=$CUDA_VER make
cp libnvds_gazeinfer.so $GAZE_DIR/ds/lib
cd $GAZE_DIR
echo done.
echo
# DOWNLOAD INFERENCE MODEL FROM NGC
cd $GAZE_DIR/model
echo Downloading face detection model...
if [ ! -f face/facenet.etlt ]; then
if [ "$TEGRA_ID" == 24 ] || [ "$TEGRA_ID" == 33 ]; then
# fp16:TX2/TX1/Nano
MODEL_URI=https://api.ngc.nvidia.com/v2/models/nvidia/tao/facenet/versions/deployable_v1.0/files/
wget $MODEL_URI/model.etlt -O face/facenet.etlt
else
# int8:Xavier or later
MODEL_URI=https://api.ngc.nvidia.com/v2/models/nvidia/tao/facenet/versions/pruned_quantized_v2.0.1/files/
wget $MODEL_URI/model.etlt -O face/facenet.etlt
wget $MODEL_URI/int8_calibration.txt -O face/facenet_cal.txt
fi
fi
echo done.
echo
echo Downloading facial landmark model...
if [ ! -f faciallandmarks/fpenet.etlt ]; then
if [ "$TEGRA_ID" == 24 ] || [ "$TEGRA_ID" == 33 ]; then
# fp16:TX2/TX1/Nano
# FPENet model has different output layer names by either int8 or fp16 for now. (2022/05/01)
# And FPENet postprocess codes in call_probes_from_py.cpp depend on the names.
# So you need to use the model suitable for the codes.
# Please see also the beggining part of ds/ds_pipeline.py.
MODEL_URI=https://api.ngc.nvidia.com/v2/models/nvidia/tao/fpenet/versions/deployable_v1.0/files/
wget $MODEL_URI/model.etlt -O faciallandmarks/fpenet.etlt
else
# int8:Xavier or later
MODEL_URI=https://api.ngc.nvidia.com/v2/models/nvidia/tao/fpenet/versions/deployable_v3.0/files/
wget $MODEL_URI/model.etlt -O faciallandmarks/fpenet.etlt
wget $MODEL_URI/int8_calibration.txt -O faciallandmarks/fpenet_cal.txt
fi
fi
echo done.
echo
echo Downloading gaze detection model...
if [ ! -f gaze/gazenet_facegrid.etlt ]; then
MODEL_URI=https://api.ngc.nvidia.com/v2/models/nvidia/tao/gazenet/versions/deployable_v1.0/files/
wget $MODEL_URI/model.etlt -O gaze/gazenet_facegrid.etlt
fi
echo done.
cd $GAZE_DIR
echo
# BUILD GAZE
cd $GAZE_DIR/model/gaze
if [ ! -f $GAZE_DIR/model/gaze/tao-converter.zip ]; then
echo Downloading tao-converter...
wget $TAO_CONVERTER_URI -O tao-converter.zip
echo done.
fi
unzip -jo tao-converter.zip '*/tao-converter'
if [ ! -f gazenet_facegrid_fp16_b8.engine ]; then
echo Building gaze model...
# tao-converter fails to convert when passed 4d(nchw) specs with multiple input model for now.
# so give the additional dummy dimension(1xNxCxHxW) as a workaround. (@2022/04/28)
./tao-converter -k nvidia_tlt -p input_face_images:0,1x1x224x224,4x1x224x224,8x1x224x224 \
-p input_left_images:0,1x1x1x224x224,1x4x1x224x224,1x8x1x224x224 \
-p input_right_images:0,1x1x1x224x224,1x4x1x224x224,1x8x1x224x224 \
-p input_facegrid:0,1x1x1x625x1,1x4x1x625x1,1x8x1x625x1 \
-m 8 -t fp16 -w $WORKSPACE_SIZE -e gazenet_facegrid_fp16_b8.engine \
gazenet_facegrid.etlt
echo done.
fi
cd $GAZE_DIR
echo
# CHANGE INFERENCE MODE
echo Setting inference mode INT8 or FP16
cd $GAZE_DIR/ds
chmod +x chinfmod.sh
if [ "$TEGRA_ID" == 24 ] || [ "$TEGRA_ID" == 33 ]; then
# fp16:TX2/TX1/Nano
./chinfmod.sh -fp16
else
# int8:Xavier or later
./chinfmod.sh -int8
fi
cd $GAZE_DIR
echo done.
echo
echo All done.
echo
```
## 修改 } catch (std::exception &e) {
```
} catch (std::exception &e) {
```
## 錯誤 urllib3 (1.26.13) or chardet (3.0.4) doesn't match a supported version!
```
pip3 install --upgrade requests
```
## ./deepstream-emotion-app: error while loading shared libraries: libnvcv_faciallandmarks.so: cannot open shared object file: No such file or directory
```
cd /opt/nvidia/deepstream/deepstream-6.1/sources/apps/sample_apps/Gaze-Analysis-System/ds/lib/gazeinfer/deepstream_tao_apps/apps/tao_others
export CUDA_VER=cuda version in the device
make
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nvidia/deepstream/deepstream/lib/cvcore_libs
```
## ImportError: libnvcv_faciallandmarks.so: cannot open shared object file: No such file or directory
```
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nvidia/deepstream/deepstream-6.1/lib/cvcore_libs
```
## 修改 /opt/nvidia/deepstream/deepstream-6.1/sources/apps/sample_apps/Gaze-Analysis-System/ds/ds_pipeline.py
```
INFER_FPENET_MODEL_TYPE=1
```
## 執行
```
python3 run.py /opt/nvidia/deepstream/deepstream-6.1/samples/streams/sample_720p.h264 --codec h264
export DISPLAY=:1
export DISPLAY=:0
python3 run.py 1.mp4 --codec h264
```