<h1>20200322 Jeff Liao</h1>
1) I used semantic segmentation model and Implemented with OpenCV "cv2.dnn.readNetFromModelOptimizer" on CPU. Sometimes, the datatype of model cannot be fit my device. Moreover, I used media-SDK to open ".h264" video file instead of ".mp4".
```python
import cv2
import numpy as np
color_palette = np.array([[0, 113, 188],
[216, 82, 24],
[236, 176, 31],
[125, 46, 141],
[118, 171, 47],
[76, 189, 237],
[161, 19, 46],
[76, 76, 76],
[153, 153, 153],
[255, 0, 0],
[255, 127, 0],
[190, 190, 0],
[0, 255, 0],
[0, 0, 255],
[170, 0, 255],
[84, 84, 0],
[84, 170, 0],
[84, 255, 0],
[170, 84, 0],
[170, 170, 0],
[170, 255, 0],
[255, 84, 0],
[255, 170, 0],
[255, 255, 0],
[0, 84, 127],
[0, 170, 127],
[0, 255, 127],
[84, 0, 127],
[84, 84, 127],
[84, 170, 127],
[84, 255, 127],
[170, 0, 127],
[170, 84, 127],
[170, 170, 127],
[170, 255, 127],
[255, 0, 127],
[255, 84, 127],
[255, 170, 127],
[255, 255, 127],
[0, 84, 255],
[0, 170, 255],
[0, 255, 255],
[84, 0, 255],
[84, 84, 255],
[84, 170, 255],
[84, 255, 255],
[170, 0, 255],
[170, 84, 255],
[170, 170, 255],
[170, 255, 255],
[255, 0, 255],
[255, 84, 255],
[255, 170, 255],
[42, 0, 0],
[84, 0, 0],
[127, 0, 0],
[170, 0, 0],
[212, 0, 0],
[255, 0, 0],
[0, 42, 0],
[0, 84, 0],
[0, 127, 0],
[0, 170, 0],
[0, 212, 0],
[0, 255, 0],
[0, 0, 42],
[0, 0, 84],
[0, 0, 127],
[0, 0, 170],
[0, 0, 212],
[0, 0, 255],
[0, 0, 0],
[36, 36, 36],
[72, 72, 72],
[109, 109, 109],
[145, 145, 145],
[182, 182, 182],
[218, 218, 218],
[255, 255, 255]], dtype=np.uint8)
net = cv2.dnn.readNetFromModelOptimizer('./road-segmentation-adas-fp16/road-segmentation-adas-fp16-0001.xml', './road-segmentation-adas-fp16/road-segmentation-adas-fp16-0001.bin')
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
#frame = cv2.imread('./highspeed.png')
cap = cv2.VideoCapture('./123.h264', cv2.CAP_INTEL_MFX)
while cap.isOpened():
ret, frame = cap.read()
blob = cv2.dnn.blobFromImage(frame, size=(672, 384), ddepth=cv2.CV_8U)
net.setInput(blob)
out = net.forward()
for data in out:
classes_map = np.zeros(shape=(384, 672, 3), dtype=np.uint8)
for i in range(384):
for j in range(672):
if len(data[:, i, j]) == 1:
pixel_class = int(data[:, i, j])
else:
pixel_class = np.argmax(data[:, i, j])
classes_map[i, j, :] = color_palette[min(pixel_class, 79)]
cv2.imshow("fraom",classes_map)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
```

2) I used the semantic segmentation model and Implemented with OpenVINO Inference Enginn on CPU. Because of dealing with visualizing per frame, the fps is very low about 3s. I'll use asycn mode to reduce the of procudure or try another method to show the result.
```python
import os
import sys
import cv2
import numpy as np
from argparse import ArgumentParser
from openvino.inference_engine import IENetwork, IECore
from datetime import datetime
def add_parser():
parser = ArgumentParser(description='semantic segmentation demo', add_help=False)
parser.add_argument('-m', '--model')
parser.add_argument('-i', '--input_video')
parser.add_argument('-l', '--cpu_extension',
help='MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels impl.', type=str, default=None)
parser.add_argument('-d', '--device')
return parser
def run(model_xml, video, extension, dev):
model_bin = os.path.splitext(model_xml)[0] + ".bin"
ie = IECore()
ie.add_extension(extension, dev)
net = IENetwork(model=model_xml, weights=model_bin)
supported_layers = ie.query_network(net, "CPU")
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
# Read IR
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
# Read and pre-process input images
n, c, h, w = net.inputs[input_blob].shape
# Loading model to the plugin
exec_net = ie.load_network(network=net, device_name=dev)
cap = cv2.VideoCapture(video, cv2.CAP_INTEL_MFX)
win_name = "road"
print ('[0]',datetime.now())
while cap.isOpened():
ret, frame = cap.read()
frame = cv2.resize(frame, (w ,h))
frame = frame.transpose((2, 0, 1))
print ('[1]',datetime.now())
#start sync inference
res = exec_net.infer(inputs={input_blob: frame})
print ('[2]',datetime.now())
# Processing output blob
res = res[out_blob]
print ('[3]',datetime.now())
_, _, out_h, out_w = res.shape
for data in res:
classes_map = np.zeros(shape=(out_h, out_w, 3), dtype=np.uint8)
for i in range(out_h):
for j in range(out_w):
if len(data[:, i, j]) == 1:
pixel_class = int(data[:, i, j])
else:
pixel_class = np.argmax(data[:, i, j])
classes_map[i, j, :] = color_palette[min(pixel_class, 79)]
print ('[4]',datetime.now())
cv2.imshow(win_name, classes_map)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
color_palette = np.array([[0, 113, 188],
[216, 82, 24],
[236, 176, 31],
[125, 46, 141],
[118, 171, 47],
[76, 189, 237],
[161, 19, 46],
[76, 76, 76],
[153, 153, 153],
[255, 0, 0],
[255, 127, 0],
[190, 190, 0],
[0, 255, 0],
[0, 0, 255],
[170, 0, 255],
[84, 84, 0],
[84, 170, 0],
[84, 255, 0],
[170, 84, 0],
[170, 170, 0],
[170, 255, 0],
[255, 84, 0],
[255, 170, 0],
[255, 255, 0],
[0, 84, 127],
[0, 170, 127],
[0, 255, 127],
[84, 0, 127],
[84, 84, 127],
[84, 170, 127],
[84, 255, 127],
[170, 0, 127],
[170, 84, 127],
[170, 170, 127],
[170, 255, 127],
[255, 0, 127],
[255, 84, 127],
[255, 170, 127],
[255, 255, 127],
[0, 84, 255],
[0, 170, 255],
[0, 255, 255],
[84, 0, 255],
[84, 84, 255],
[84, 170, 255],
[84, 255, 255],
[170, 0, 255],
[170, 84, 255],
[170, 170, 255],
[170, 255, 255],
[255, 0, 255],
[255, 84, 255],
[255, 170, 255],
[42, 0, 0],
[84, 0, 0],
[127, 0, 0],
[170, 0, 0],
[212, 0, 0],
[255, 0, 0],
[0, 42, 0],
[0, 84, 0],
[0, 127, 0],
[0, 170, 0],
[0, 212, 0],
[0, 255, 0],
[0, 0, 42],
[0, 0, 84],
[0, 0, 127],
[0, 0, 170],
[0, 0, 212],
[0, 0, 255],
[0, 0, 0],
[36, 36, 36],
[72, 72, 72],
[109, 109, 109],
[145, 145, 145],
[182, 182, 182],
[218, 218, 218],
[255, 255, 255]], dtype=np.uint8)
if '__main__' in __name__:
args = add_parser().parse_args()
run(args.model, args.input_video, args.cpu_extension, args.device)
```

<h2>Problems</h2>
* Find that NCS2 is only used with USB3.*