# Use POT to Quantize UNET Public Model
###### tags: `POT`
## Use OpenVINO dockerhub image
```
docker run -it -v ~/Downloads:/mnt -u root --rm openvino/ubuntu20_data_dev:latest
```
## Run Accuracy Checker and POT
In ubuntu20_data_dev docker image,
#### 1. build UNET model
apt update
apt install git
cd /home/openvino
pip3 install keras
pip3 install git+https://github.com/karolzak/keras-unet
git clone https://github.com/karolzak/keras-unet.git
python3 unet.py # This takes time to train UNET model on ISBI2015 dataset
#### 2. Convert KERAS-UNET model to IR
python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo.py --saved_model_dir unet --input_shape [1,256,256,1] --model_name unet_256 --output_dir openvino_models/public/unet_256/FP16 --data_type FP16
python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo.py --saved_model_dir unet --input_shape [1,256,256,1] --model_name unet_256 --output_dir openvino_models/public/unet_256/FP32 --data_type FP32
#### 3. Use POT SW API to convert UNET IR to INT8
python3 unet_segmentation_pot.py -m /home/openvino/openvino_models/public/unet_256/FP16/unet_256.xml -d /home/openvino/keras-unet/input/isbi2015/train/image --mask-dir /home/openvino/keras-unet/input/isbi2015/train/label --imageset-file /home/openvino/keras_unet_isbi2015_test_image_val.txt
#### 4. Copy UNET FP16-INT8 IR
mkdir /home/openvino/openvino_models/public/unet_256/FP16-INT8/
cp -ar optimized/* /home/openvino/openvino_models/public/unet_256/FP16-INT8/
## Reference
```
drwxr-xr-x 9 root root 4096 Apr 21 06:58 keras-unet
-rw-r--r-- 1 root root 80 Apr 21 07:30 keras_unet_isbi2015_test_image_val.txt
drwxr-xr-x 2 root root 4096 Apr 21 07:31 optimized
-rw-r--r-- 1 root root 248456056 Apr 21 07:27 segm_model_v0.h5
drwxr-xr-x 4 root root 4096 Apr 21 07:27 unet
-rw-r--r-- 1 root root 3433 Apr 21 07:00 unet.py
-rw-r--r-- 1 root root 9103 Apr 21 07:30 unet_segmentation_pot.py
drwxr-xr-x 3 root root 4096 Apr 21 07:29 openvino_models
openvino_models/public/unet_256/FP16/unet_256.bin
openvino_models/public/unet_256/FP16/unet_256.mapping
openvino_models/public/unet_256/FP16/unet_256.xml
openvino_models/public/unet_256/FP32/unet_256.bin
openvino_models/public/unet_256/FP32/unet_256.mapping
openvino_models/public/unet_256/FP32/unet_256.xml
openvino_models/public/unet_256/FP16-INT8/unet_256.bin
openvino_models/public/unet_256/FP16-INT8/unet_256.mapping
openvino_models/public/unet_256/FP16-INT8/unet_256.xml
```
Note : Copy openvino_models foler to /mnt folder. They will be accessable in ~/Downloads folder in the host and /mnt in the container.
### unet.py
```
import numpy as np
#import matplotlib.pyplot as plt
#%matplotlib inline
import glob
import os
import sys
from PIL import Image
masks = glob.glob("/home/openvino/keras-unet/input/isbi2015/train/label/*.png")
orgs = glob.glob("/home/openvino/keras-unet/input/isbi2015/train/image/*.png")
imgs_list = []
masks_list = []
for image, mask in zip(orgs, masks):
imgs_list.append(np.array(Image.open(image).resize((256,256))))
im = Image.open(mask).resize((256,256))
#width, height = im.size # Get dimensions
#left = (width - 388)/2
#top = (height - 388)/2
#right = (width + 388)/2
#bottom = (height + 388)/2
#im_cropped = im.crop((left, top, right, bottom))
masks_list.append(np.array(im))
imgs_np = np.asarray(imgs_list)
masks_np = np.asarray(masks_list)
print(imgs_np.shape, masks_np.shape)
#from keras_unet.utils import plot_imgs
#
#plot_imgs(org_imgs=imgs_np, mask_imgs=masks_np, nm_img_to_plot=10, figsize=6)
print(imgs_np.max(), masks_np.max())
x = np.asarray(imgs_np, dtype=np.float32)/255
y = np.asarray(masks_np, dtype=np.float32)/255
print(x.max(), y.max())
print(x.shape, y.shape)
y = y.reshape(y.shape[0], y.shape[1], y.shape[2], 1)
print(x.shape, y.shape)
x = x.reshape(x.shape[0], x.shape[1], x.shape[2], 1)
print(x.shape, y.shape)
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.5, random_state=0)
print("x_train: ", x_train.shape)
print("y_train: ", y_train.shape)
print("x_val: ", x_val.shape)
print("y_val: ", y_val.shape)
from keras_unet.utils import get_augmented
train_gen = get_augmented(
x_train, y_train, batch_size=2,
data_gen_args = dict(
rotation_range=15.,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=50,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='constant'
))
sample_batch = next(train_gen)
xx, yy = sample_batch
print(xx.shape, yy.shape)
#from keras_unet.utils import plot_imgs
#
#plot_imgs(org_imgs=xx, mask_imgs=yy, nm_img_to_plot=2, figsize=6)
from keras_unet.models import custom_unet
input_shape = x_train[0].shape
model = custom_unet(
input_shape,
use_batch_norm=False,
num_classes=1,
filters=64,
dropout=0.2,
output_activation='sigmoid'
)
model.summary()
from keras.callbacks import ModelCheckpoint
model_filename = 'segm_model_v0.h5'
callback_checkpoint = ModelCheckpoint(
model_filename,
verbose=1,
monitor='val_loss',
save_best_only=True,
)
from keras.optimizers import Adam, SGD
from keras_unet.metrics import iou, iou_thresholded
from keras_unet.losses import jaccard_distance
from tensorflow.keras import optimizers
sgd = optimizers.SGD(lr=0.01, momentum=0.99)
model.compile(
#optimizer=Adam(),
optimizer=sgd, #SGD(lr=0.01, momentum=0.99),
loss='binary_crossentropy',
#loss=jaccard_distance,
metrics=[iou, iou_thresholded]
)
history = model.fit_generator(
train_gen,
steps_per_epoch=100,
epochs=10,
validation_data=(x_val, y_val),
callbacks=[callback_checkpoint]
)
#from keras_unet.utils import plot_segm_history
#
#plot_segm_history(history)
model.load_weights(model_filename)
y_pred = model.predict(x_val)
#from keras_unet.utils import plot_imgs
#
#plot_imgs(org_imgs=x_val, mask_imgs=y_val, pred_imgs=y_pred, nm_img_to_plot=9)
model.save('unet')
```
### unet_segmentation_pot.py
```
#
# Copyright 2020-2021 Intel Corporation.
#
# This software and the related documents are Intel copyrighted materials,
# and your use of them is governed by the express license under which they
# were provided to you (End User License Agreement for the Intel(R) Software
# Development Products (Version October 2018)). Unless the License provides
# otherwise, you may not use, modify, copy, publish, distribute, disclose or
# transmit this software or the related documents without Intel's prior
# written permission.
#
# This software and the related documents are provided as is, with no
# express or implied warranties, other than those that are expressly
# stated in the License.
import os
import math
import cv2
import numpy as np
from addict import Dict
from compression.api import Metric, DataLoader
from compression.engines.ie_engine import IEEngine
from compression.graph import load_model, save_model
from compression.graph.model_utils import compress_model_weights
from compression.pipeline.initializer import create_pipeline
from compression.utils.logger import init_logger
from sample.utils.argument_parser import get_common_argparser
# Initialize the logger to print the quantization process in the console.
init_logger(level='INFO')
_SEGMENTATION_COLORS = ((
(0, 0, 0), (255, 255, 255)
))
# Custom DataLoader class implementation
class ISBI2015SegmentationLoader(DataLoader):
# Required methods:
def __init__(self, config):
if not isinstance(config, Dict):
config = Dict(config)
super().__init__(config)
self._image_size = config.image_size
self._img_ids = self._read_img_ids(config)
def __getitem__(self, index):
"""
Returns annotation and image (and optionally image metadata) at the specified index.
Possible formats:
(img_id, img_annotation), image
(img_id, img_annotation), image, image_metadata
"""
if index >= len(self):
raise IndexError
mask_path = os.path.join(self.config.mask_dir, self._img_ids[index] + '.png')
image_path = os.path.join(self.config.data_source, self._img_ids[index] + '.png')
#print('mask_path:{}'.format(mask_path))
#print('image_path:{}'.format(image_path))
annotation = (index, self._read_and_preprocess_mask(mask_path))
return annotation, self._read_and_preprocess_image(image_path)
def __len__(self):
""" Returns size of the dataset """
return len(self._img_ids)
# Methods specific to the current implementation
@staticmethod
def _read_img_ids(config):
with open(config.imageset_file) as f:
return f.read().splitlines()
def _read_and_preprocess_image(self, image_path):
image = cv2.imread(image_path)
image = cv2.resize(image, (256, 256))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return np.expand_dims(image, axis=0)
def _read_and_preprocess_mask(self, mask_path):
mask = self._read_and_preprocess_image(mask_path)
return mask
def _read_and_preprocess_mask(self, mask_path):
mask = self._read_and_preprocess_image(mask_path).transpose(1, 2, 0)
encoded_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for label, color in enumerate(_SEGMENTATION_COLORS):
encoded_mask[np.where(np.all(mask == color, axis=-1))[:2]] = label
return encoded_mask
# Custom implementation of Mean Intersection Over Union metric.
class MeanIOU(Metric):
# Required methods
def __init__(self, num_classes):
self._classes_num = num_classes
super().__init__()
self._name = 'mean_iou'
self._current_cm = []
self._total_cm = np.zeros((self._classes_num, self._classes_num))
@property
def value(self):
""" Returns metric value for the last model output.
Possible format: {metric_name: [metric_values_per_image]}
"""
return {self._name: [self._evaluate(cm) for cm in self._current_cm]}
@property
def avg_value(self):
""" Returns average metric value for all model outputs.
Possible format: {metric_name: metric_value}
"""
return {self._name: self._evaluate(self._total_cm)}
def update(self, output, target):
""" Calculates and updates metric value
:param output: model output
:param target: annotations
"""
#print('output:{}'.format(output))
#print('target:{}'.format(target))
if len(output) > 1:
raise Exception('The Mean IOU metric cannot be calculated '
'for a model with multiple outputs')
self._current_cm = []
y_pred = output[0].flatten()
y_true = target[0].flatten()
valid_pixels = (y_true >= 0) & (y_true < self._classes_num) & \
(y_pred >= 0) & (y_pred < self._classes_num)
y_true = y_true[valid_pixels]
y_pred = y_pred[valid_pixels]
#print('y_true:{}'.format(y_true))
#print('y_pred:{}'.format(y_pred))
current_cm = np.bincount((self._classes_num * y_true + y_pred).astype('int64'),
minlength=self._classes_num ** 2)
current_cm = current_cm.reshape(self._classes_num, self._classes_num)
#print('current_cm:{}'.format(current_cm))
self._current_cm.append(current_cm)
self._total_cm += current_cm
def reset(self):
""" Resets metric """
self._current_cm = []
self._total_cm = np.zeros((self._classes_num, self._classes_num))
def get_attributes(self):
"""
Returns a dictionary of metric attributes {metric_name: {attribute_name: value}}.
Required attributes: 'direction': 'higher-better' or 'higher-worse'
'type': metric type
"""
return {self._name: {'direction': 'higher-better',
'type': 'mean_iou'}}
# Methods specific to the current implementation
@staticmethod
def _evaluate(confusion_matrix):
intersection = np.diagonal(confusion_matrix)
union = confusion_matrix.sum(axis=1) + \
confusion_matrix.sum(axis=0) - \
intersection
return np.nanmean(np.divide(intersection, union,
out=np.full(intersection.shape, np.nan),
where=union != 0))
def main():
parser = get_common_argparser()
parser.add_argument(
'--mask-dir',
help='Path to the directory with segmentation masks',
required=True
)
parser.add_argument(
'--imageset-file',
help='Path to the ImageSet file',
required=True
)
args = parser.parse_args()
if not args.weights:
args.weights = '{}.bin'.format(os.path.splitext(args.model)[0])
model_config = Dict({
'model_name': 'unet_256',
'model': os.path.expanduser(args.model),
'weights': os.path.expanduser(args.weights)
})
engine_config = Dict({
'device': 'CPU',
'stat_requests_number': 4,
'eval_requests_number': 4
})
dataset_config = Dict({
'data_source': os.path.expanduser(args.dataset),
'mask_dir': os.path.expanduser(args.mask_dir),
'imageset_file': os.path.expanduser(args.imageset_file),
'image_size': 256
})
algorithms = [
{
'name': 'DefaultQuantization',
'params': {
'target_device': 'CPU',
'preset': 'performance',
'stat_subset_size': 30
}
}
]
# Step 1: Load the model.
model = load_model(model_config)
# Step 2: Initialize the data loader.
data_loader = ISBI2015SegmentationLoader(dataset_config)
# Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric.
metric = MeanIOU(num_classes=2)
# Step 4: Initialize the engine for metric calculation and statistics collection.
engine = IEEngine(config=engine_config,
data_loader=data_loader,
metric=metric)
# Step 5: Create a pipeline of compression algorithms.
pipeline = create_pipeline(algorithms, engine)
# Step 6: Execute the pipeline.
compressed_model = pipeline.run(model)
# Step 7 (Optional): Compress model weights to quantized precisionkeras_unet_isbi2015_test_image_val.txt
# in order to reduce the size of final .bin file.
compress_model_weights(compressed_model)
# Step 8: Save the compressed model to the desired path.
save_model(compressed_model, os.path.join(os.path.curdir, 'optimized'))
# Step 9 (Optional): Evaluate the compressed model. Print the results.
metric_results = pipeline.evaluate(compressed_model)
if metric_results:
for name, value in metric_results.items():
print('{: <27s}: {}'.format(name, value))
if __name__ == '__main__':
main()
```
### keras_unet_isbi2015_test_image_val.txt
```
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
```
### UNET Training log
```
python3 unet.py
(30, 256, 256) (30, 256, 256)
255 255
1.0 1.0
(30, 256, 256) (30, 256, 256)
(30, 256, 256) (30, 256, 256, 1)
(30, 256, 256, 1) (30, 256, 256, 1)
x_train: (15, 256, 256, 1)
y_train: (15, 256, 256, 1)
x_val: (15, 256, 256, 1)
y_val: (15, 256, 256, 1)
2021-04-21 07:00:55.794678: W tensorflow/stream_executor/platform/default/dso_loader.cc:59] Could not load dynamic library 'libcudart.so.10.1'; dlerror: libcudart.so.10.1: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /opt/intel/openvino/opt/intel/mediasdk/lib64/:/opt/intel/openvino/data_processing/dl_streamer/lib:/opt/intel/openvino/data_processing/gstreamer/lib:/opt/intel/openvino/opencv/lib:/opt/intel/openvino/deployment_tools/ngraph/lib:/opt/intel/openvino/deployment_tools/inference_engine/external/tbb/lib::/opt/intel/openvino/deployment_tools/inference_engine/external/hddl/lib:/opt/intel/openvino/deployment_tools/inference_engine/external/omp/lib:/opt/intel/openvino/deployment_tools/inference_engine/external/gna/lib:/opt/intel/openvino/deployment_tools/inference_engine/external/mkltiny_lnx/lib:/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64
2021-04-21 07:00:55.794704: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
-----------------------------------------
keras-unet init: TF version is >= 2.0.0 - using `tf.keras` instead of `Keras`
-----------------------------------------
(2, 256, 256, 1) (2, 256, 256, 1)
2021-04-21 07:00:57.000998: W tensorflow/stream_executor/platform/default/dso_loader.cc:59] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /opt/intel/openvino/opt/intel/mediasdk/lib64/:/opt/intel/openvino/data_processing/dl_streamer/lib:/opt/intel/openvino/data_processing/gstreamer/lib:/opt/intel/openvino/opencv/lib:/opt/intel/openvino/deployment_tools/ngraph/lib:/opt/intel/openvino/deployment_tools/inference_engine/external/tbb/lib::/opt/intel/openvino/deployment_tools/inference_engine/external/hddl/lib:/opt/intel/openvino/deployment_tools/inference_engine/external/omp/lib:/opt/intel/openvino/deployment_tools/inference_engine/external/gna/lib:/opt/intel/openvino/deployment_tools/inference_engine/external/mkltiny_lnx/lib:/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64
2021-04-21 07:00:57.001020: W tensorflow/stream_executor/cuda/cuda_driver.cc:312] failed call to cuInit: UNKNOWN ERROR (303)
2021-04-21 07:00:57.001053: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:163] no NVIDIA GPU device is present: /dev/nvidia0 does not exist
2021-04-21 07:00:57.001186: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN)to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2021-04-21 07:00:57.023184: I tensorflow/core/platform/profile_utils/cpu_utils.cc:104] CPU Frequency: 3699850000 Hz
2021-04-21 07:00:57.023631: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x5194ab0 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2021-04-21 07:00:57.023646: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
Model: "functional_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 256, 256, 1) 0
__________________________________________________________________________________________________
conv2d (Conv2D) (None, 256, 256, 64) 640 input_1[0][0]
__________________________________________________________________________________________________
spatial_dropout2d (SpatialDropo (None, 256, 256, 64) 0 conv2d[0][0]
__________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 256, 256, 64) 36928 spatial_dropout2d[0][0]
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 128, 128, 64) 0 conv2d_1[0][0]
__________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 128, 128, 128 73856 max_pooling2d[0][0]
__________________________________________________________________________________________________
spatial_dropout2d_1 (SpatialDro (None, 128, 128, 128 0 conv2d_2[0][0]
__________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 128, 128, 128 147584 spatial_dropout2d_1[0][0]
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 64, 64, 128) 0 conv2d_3[0][0]
__________________________________________________________________________________________________
conv2d_4 (Conv2D) (None, 64, 64, 256) 295168 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
spatial_dropout2d_2 (SpatialDro (None, 64, 64, 256) 0 conv2d_4[0][0]
__________________________________________________________________________________________________
conv2d_5 (Conv2D) (None, 64, 64, 256) 590080 spatial_dropout2d_2[0][0]
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D) (None, 32, 32, 256) 0 conv2d_5[0][0]
__________________________________________________________________________________________________
conv2d_6 (Conv2D) (None, 32, 32, 512) 1180160 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
spatial_dropout2d_3 (SpatialDro (None, 32, 32, 512) 0 conv2d_6[0][0]
__________________________________________________________________________________________________
conv2d_7 (Conv2D) (None, 32, 32, 512) 2359808 spatial_dropout2d_3[0][0]
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D) (None, 16, 16, 512) 0 conv2d_7[0][0]
__________________________________________________________________________________________________
conv2d_8 (Conv2D) (None, 16, 16, 1024) 4719616 max_pooling2d_3[0][0]
__________________________________________________________________________________________________
spatial_dropout2d_4 (SpatialDro (None, 16, 16, 1024) 0 conv2d_8[0][0]
__________________________________________________________________________________________________
conv2d_9 (Conv2D) (None, 16, 16, 1024) 9438208 spatial_dropout2d_4[0][0]
__________________________________________________________________________________________________
conv2d_transpose (Conv2DTranspo (None, 32, 32, 512) 2097664 conv2d_9[0][0]
__________________________________________________________________________________________________
concatenate (Concatenate) (None, 32, 32, 1024) 0 conv2d_transpose[0][0]
conv2d_7[0][0]
__________________________________________________________________________________________________
conv2d_10 (Conv2D) (None, 32, 32, 512) 4719104 concatenate[0][0]
__________________________________________________________________________________________________
conv2d_11 (Conv2D) (None, 32, 32, 512) 2359808 conv2d_10[0][0]
__________________________________________________________________________________________________
conv2d_transpose_1 (Conv2DTrans (None, 64, 64, 256) 524544 conv2d_11[0][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 64, 64, 512) 0 conv2d_transpose_1[0][0]
conv2d_5[0][0]
__________________________________________________________________________________________________
conv2d_12 (Conv2D) (None, 64, 64, 256) 1179904 concatenate_1[0][0]
__________________________________________________________________________________________________
conv2d_13 (Conv2D) (None, 64, 64, 256) 590080 conv2d_12[0][0]
__________________________________________________________________________________________________
conv2d_transpose_2 (Conv2DTrans (None, 128, 128, 128 131200 conv2d_13[0][0]
__________________________________________________________________________________________________
concatenate_2 (Concatenate) (None, 128, 128, 256 0 conv2d_transpose_2[0][0]
conv2d_3[0][0]
__________________________________________________________________________________________________
conv2d_14 (Conv2D) (None, 128, 128, 128 295040 concatenate_2[0][0]
__________________________________________________________________________________________________
conv2d_15 (Conv2D) (None, 128, 128, 128 147584 conv2d_14[0][0]
__________________________________________________________________________________________________
conv2d_transpose_3 (Conv2DTrans (None, 256, 256, 64) 32832 conv2d_15[0][0]
__________________________________________________________________________________________________
concatenate_3 (Concatenate) (None, 256, 256, 128 0 conv2d_transpose_3[0][0]
conv2d_1[0][0]
__________________________________________________________________________________________________
conv2d_16 (Conv2D) (None, 256, 256, 64) 73792 concatenate_3[0][0]
__________________________________________________________________________________________________
conv2d_17 (Conv2D) (None, 256, 256, 64) 36928 conv2d_16[0][0]
__________________________________________________________________________________________________
conv2d_18 (Conv2D) (None, 256, 256, 1) 65 conv2d_17[0][0]
==================================================================================================
Total params: 31,030,593
Trainable params: 31,030,593
Non-trainable params: 0
__________________________________________________________________________________________________
WARNING:tensorflow:From unet.py:126: Model.fit_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
Please use Model.fit, which supports generators.
Epoch 1/10
100/100 [==============================] - ETA: 0s - loss: 0.4583 - iou: 0.6439 - iou_thresholded: 0.7505
100/100 [==============================] - 154s 2s/step - loss: 0.4583 - iou: 0.6439 - iou_thresholded: 0.7505 - val_loss: 0.3525 - val_iou: 0.7791 - val_iou_thresholded: 0.8186
Epoch 2/10
100/100 [==============================] - ETA: 0s - loss: 0.3928 - iou: 0.6990 - iou_thresholded: 0.7557
100/100 [==============================] - 153s 2s/step - loss: 0.3928 - iou: 0.6990 - iou_thresholded: 0.7557 - val_loss: 0.5997 - val_iou: 0.5307 - val_iou_thresholded: 0.5625
Epoch 3/10
100/100 [==============================] - ETA: 0s - loss: 0.4127 - iou: 0.6750 - iou_thresholded: 0.7316
100/100 [==============================] - 154s 2s/step - loss: 0.4127 - iou: 0.6750 - iou_thresholded: 0.7316 - val_loss: 0.4126 - val_iou: 0.6777 - val_iou_thresholded: 0.7406
Epoch 4/10
100/100 [==============================] - ETA: 0s - loss: 0.3461 - iou: 0.7134 - iou_thresholded: 0.7851
100/100 [==============================] - 153s 2s/step - loss: 0.3461 - iou: 0.7134 - iou_thresholded: 0.7851 - val_loss: 0.3555 - val_iou: 0.7059 - val_iou_thresholded: 0.8178
Epoch 5/10
100/100 [==============================] - ETA: 0s - loss: 0.3136 - iou: 0.7390 - iou_thresholded: 0.8053
100/100 [==============================] - 157s 2s/step - loss: 0.3136 - iou: 0.7390 - iou_thresholded: 0.8053 - val_loss: 0.2985 - val_iou: 0.7766 - val_iou_thresholded: 0.8464
Epoch 6/10
100/100 [==============================] - ETA: 0s - loss: 0.3013 - iou: 0.7434 - iou_thresholded: 0.8093
100/100 [==============================] - 156s 2s/step - loss: 0.3013 - iou: 0.7434 - iou_thresholded: 0.8093 - val_loss: 0.2977 - val_iou: 0.7593 - val_iou_thresholded: 0.8489
Epoch 7/10
100/100 [==============================] - ETA: 0s - loss: 0.2909 - iou: 0.7511 - iou_thresholded: 0.8182
100/100 [==============================] - 156s 2s/step - loss: 0.2909 - iou: 0.7511 - iou_thresholded: 0.8182 - val_loss: 0.2881 - val_iou: 0.7888 - val_iou_thresholded: 0.8527
Epoch 8/10
100/100 [==============================] - ETA: 0s - loss: 0.2902 - iou: 0.7518 - iou_thresholded: 0.8196
100/100 [==============================] - 156s 2s/step - loss: 0.2902 - iou: 0.7518 - iou_thresholded: 0.8196 - val_loss: 0.2843 - val_iou: 0.7822 - val_iou_thresholded: 0.8537
Epoch 9/10
100/100 [==============================] - ETA: 0s - loss: 0.2818 - iou: 0.7635 - iou_thresholded: 0.8276
100/100 [==============================] - 154s 2s/step - loss: 0.2818 - iou: 0.7635 - iou_thresholded: 0.8276 - val_loss: 0.2872 - val_iou: 0.7679 - val_iou_thresholded: 0.8536
Epoch 10/10
100/100 [==============================] - ETA: 0s - loss: 0.2713 - iou: 0.7664 - iou_thresholded: 0.8312
100/100 [==============================] - 156s 2s/step - loss: 0.2713 - iou: 0.7664 - iou_thresholded: 0.8312 - val_loss: 0.2765 - val_iou: 0.7794 - val_iou_thresholded: 0.8557
WARNING:tensorflow:From /usr/local/lib/python3.8/dist-packages/tensorflow/python/training/tracking/tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
This property should not be used in TensorFlow 2.0, as updates are applied automatically.
2021-04-21 07:27:07.565707: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.
WARNING:tensorflow:From /usr/local/lib/python3.8/dist-packages/tensorflow/python/training/tracking/tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.
Instructions for updating:
This property should not be used in TensorFlow 2.0, as updates are applied automatically.
```
### POT log
```
python3 unet_segmentation_pot.py -m /home/openvino/openvino_models/public/unet_256/FP16/unet_256.xml -d /home/openvino/keras-unet/input/isbi2015/train/image --mask-dir /home/openvino/keras-unet/input/isbi2015/train/label --imageset-file /home/openvino/keras_unet_isbi2015_test_image_val.txt
07:35:15 accuracy_checker WARNING: /usr/local/lib/python3.8/dist-packages/networkx/classes/graph.py:23: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3, and in 3.9 it will stop working
from collections import Mapping
07:35:15 accuracy_checker WARNING: /usr/local/lib/python3.8/dist-packages/networkx/classes/reportviews.py:95: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3, and in 3.9 it will stop working
from collections import Mapping, Set, Iterable
07:35:15 accuracy_checker WARNING: /opt/intel/openvino/deployment_tools/tools/post_training_optimization_toolkit/compression/algorithms/quantization/optimization/algorithm.py:41: UserWarning: Nevergrad package could not be imported. If you are planning to useany hyperparameter optimization algo, consider installing itusing pip. This implies advanced usage of the tool.Note that nevergrad is compatible only with Python 3.6+
warnings.warn(
07:35:15 accuracy_checker WARNING: /usr/local/lib/python3.8/dist-packages/past/builtins/misc.py:45: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses
from imp import reload
INFO:compression.statistics.collector:Start computing statistics for algorithms : DefaultQuantization
INFO:compression.statistics.collector:Computing statistics finished
INFO:compression.pipeline.pipeline:Start algorithm: DefaultQuantization
INFO:compression.algorithms.quantization.default.algorithm:Start computing statistics for algorithm : ActivationChannelAlignment
INFO:compression.algorithms.quantization.default.algorithm:Computing statistics finished
INFO:compression.algorithms.quantization.default.algorithm:Start computing statistics for algorithms : MinMaxQuantization,FastBiasCorrection
07:35:23 accuracy_checker WARNING: /opt/intel/openvino/deployment_tools/model_optimizer/mo/back/ie_ir_ver_2/emitter.py:243: DeprecationWarning: This method will be removed in future versions. Use 'list(elem)' or iteration over elem instead.
if len(element.attrib) == 0 and len(element.getchildren()) == 0:
INFO:compression.algorithms.quantization.default.algorithm:Computing statistics finished
INFO:compression.pipeline.pipeline:Finished: DefaultQuantization
===========================================================================
INFO:compression.pipeline.pipeline:Evaluation of generated model
INFO:compression.engines.ie_engine:Start inference of 30 images
INFO:compression.engines.ie_engine:3/30 batches are processed in 0.42s
INFO:compression.engines.ie_engine:6/30 batches are processed in 0.23s
INFO:compression.engines.ie_engine:9/30 batches are processed in 0.39s
INFO:compression.engines.ie_engine:12/30 batches are processed in 0.23s
INFO:compression.engines.ie_engine:15/30 batches are processed in 0.39s
INFO:compression.engines.ie_engine:18/30 batches are processed in 0.23s
INFO:compression.engines.ie_engine:21/30 batches are processed in 0.39s
INFO:compression.engines.ie_engine:24/30 batches are processed in 0.39s
INFO:compression.engines.ie_engine:27/30 batches are processed in 0.20s
INFO:compression.engines.ie_engine:30/30 batches are processed in 0.36s
INFO:compression.engines.ie_engine:Inference finished
mean_iou : 0.41817569033127566
```