# Chinese Ordering System
# Run with container image on Ubuntu 20.04.2 Host 11th Gen Core system
```
docker pull chungyehwangai/chinese_ordering_system:0.7
docker run -it -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=$DISPLAY -v ~/Downloads:/mnt --device /dev/dri:/dev/dri --group-add=$(stat -c "%g" /dev/dri/render*) --rm chungyehwangai/chinese_ordering_system:0.7
cd /mnt
ln -s /home/openvino/chinese_ordering .
cd /mnt/chinese_ordering/Chinese.Ordering.System/
export MODEL_DIR=/mnt/chinese_ordering/Chinese.Ordering.System/ir
export VOCAB_DIR=/mnt/chinese_ordering/Chinese.Ordering.System/vocab/
export PARAGRAPH_FILE=mc_paragraph.txt
python3.7 ordering_system_demo_pipeline.py \
-m_mel ${MODEL_DIR}/mel.xml \
-m_mg ${MODEL_DIR}/melgan.xml \
-m_d ${MODEL_DIR}/decoder.xml \
-m_e ${MODEL_DIR}/encoder.xml \
-m_dp ${MODEL_DIR}/duration_predictor.xml \
-m_b ${MODEL_DIR}/bert.xml \
-m_a ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.xml \
-p mds09x_cn \
-para ${PARAGRAPH_FILE} \
-i audio/sample.wav \
-v_b ${VOCAB_DIR}/vocab_bert.txt \
-v_p ${VOCAB_DIR}/vocab_pinyin.txt \
-L ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.scorer
```
# Detail steps
https://github.com/FengYen-Chang/Chinese.Ordering.System
git clone https://github.com/FengYen-Chang/Chinese.Ordering.System.git
cd Chinese.Ordering.System/
mkdir ir
python3.7 -m pip install ds-ctcdecoder==0.9.3
cd model-conversion/mozilla-deepspeech-0.9.3-zh-CN
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models-zh-CN.pbmm
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models-zh-CN.scorer
python3.7 pbmm_to_pb.py deepspeech-0.9.3-models-zh-CN.pbmm deepspeech-0.9.3-models-zh-CN.pb
python3.7 /opt/intel/openvino/deployment_tools/model_optimizer/mo.py \
--input_model deepspeech-0.9.3-models-zh-CN.pb \
--freeze_placeholder_with_value="input_lengths->16" \
--input=input_node,previous_state_h,previous_state_c \
--input_shape=[1,16,19,26],[1,2048],[1,2048] \
--disable_nhwc_to_nchw \
--output=logits,cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd,cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd_1
mv deepspeech-0.9.3-models-zh-CN.xml ../../ir
mv deepspeech-0.9.3-models-zh-CN.bin ../../ir
mv deepspeech-0.9.3-models-zh-CN.mapping ../../ir
mv deepspeech-0.9.3-models-zh-CN.scorer ../../ir
cd ../../
python3 speech_recognition_demo.py -i audio/sample.wav -p mds09x_cn -m deepspeech-0.9.3-models-zh-CN.xml
### TTS
git clone https://github.com/FengYen-Chang/Chinese.Ordering.System.git
cd Chinese.Ordering.System
git submodule update --init ./extension/mandarin-tts
git submodule update --init ./extension/melgan
cd ./extension/mandarin-tts
pip install gdown
gdown https://drive.google.com/uc?id=11mBus5gn69_KwvNec9Zy9jjTs3LgHdx3
tar xf fastspeech2u_ckpt.tar.gz
sudo apt-get install ffmpeg
sudo apt-get install liblzma-dev
pip3 install -r requirements.txt
python3 export_onnx.py --model_file ./ckpt/hanzi/checkpoint_300000.pth.tar --text_file ./test.txt --channel 2 --duration_control 1.0 --output_dir ./output
cd onnx
pip3 install -r /opt/intel/openvino/deployment_tools/model_optimizer/requirements_onnx.txt
python3 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model mel.onnx
python3 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model decoder.onnx
python3 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model duration_predictor.onnx
python3 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model encoder.onnx
cd ../../../extension/melgan
python3 export_onnx.py
cd onnx
python3 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model melgan.onnx
For test, you can run the tts.py.
python3 tts.py \
-m_mel /your_root/Chinese.Ordering.System/extension/mandarin-tts/onnx/mel.xml \
-m_mg /your_root/Chinese.Ordering.System/extension/melgan/onnx/melgan.xml \
-m_d /your_root/Chinese.Ordering.System/extension/mandarin-tts/onnx/decoder.xml \
-m_e /your_root/Chinese.Ordering.System/extension/mandarin-tts/onnx/encoder.xml \
-m_dp /your_root/Chinese.Ordering.System/extension/mandarin-tts/onnx/duration_predictor.xml \
-i 一百五一百五一百五一百五一百五一百五一百五一百五一百五一百五
### BERT
git clone https://github.com/FengYen-Chang/Chinese.Ordering.System.git
git submodule update --init ./extension/cmrc2018/
cd menu-generator
python mc_menu_generator.py
cd ../extension/cmrc2018/baseline
apt update
apt install software-properties-common
add-apt-repository ppa:deadsnakes/ppa
apt update
apt install python3.7
python3.7 -m pip install tensorflow==1.15.0
//pip install tensorflow==1.15.0
python3.7 run_cmrc2018_drcd_baseline.py \
--vocab_file=/mnt/roberta-wwm-base-distill/model/3layers_large/vocab.txt \
--bert_config_file=/mnt/roberta-wwm-base-distill/model/3layers_large/bert_config.json \
--init_checkpoint=/mnt/roberta-wwm-base-distill/model/3layers_large/bert_model.ckpt \
--do_train=True \
--train_file=../../../menu-generator/test_1.json \
--do_predict=True \
--predict_file=../../../menu-generator/test_1.json \
--train_batch_size=32 \
--num_train_epochs=40 \
--max_seq_length=256 \
--doc_stride=128 \
--learning_rate=3e-5 \
--save_checkpoints_steps=1000 \
--output_dir=aa \
--do_lower_case=False \
--use_tpu=False
### numpy 1.21.0 causing issue below. Used 1.18.1 instead to resolve.
```
INFO:tensorflow:***** Running predictions *****
I0701 06:39:57.537319 140599964568448 run_cmrc2018_drcd_baseline.py:1257] ***** Running predictions *****
INFO:tensorflow: Num orig examples = 76
I0701 06:39:57.537443 140599964568448 run_cmrc2018_drcd_baseline.py:1258] Num orig examples = 76
INFO:tensorflow: Num split examples = 111
I0701 06:39:57.537497 140599964568448 run_cmrc2018_drcd_baseline.py:1259] Num split examples = 111
INFO:tensorflow: Batch size = 8
I0701 06:39:57.537549 140599964568448 run_cmrc2018_drcd_baseline.py:1260] Batch size = 8
INFO:tensorflow:Calling model_fn.
I0701 06:39:57.556351 140599964568448 estimator.py:1148] Calling model_fn.
INFO:tensorflow:Running infer on CPU
I0701 06:39:57.556496 140599964568448 tpu_estimator.py:3124] Running infer on CPU
INFO:tensorflow:*** Features ***
I0701 06:39:57.556693 140599964568448 run_cmrc2018_drcd_baseline.py:647] *** Features ***
INFO:tensorflow: name = input_ids, shape = (?, 256)
I0701 06:39:57.556771 140599964568448 run_cmrc2018_drcd_baseline.py:649] name = input_ids, shape = (?, 256)
INFO:tensorflow: name = input_mask, shape = (?, 256)
I0701 06:39:57.556828 140599964568448 run_cmrc2018_drcd_baseline.py:649] name = input_mask, shape = (?, 256)
INFO:tensorflow: name = input_span_mask, shape = (?, 256)
I0701 06:39:57.556879 140599964568448 run_cmrc2018_drcd_baseline.py:649] name = input_span_mask, shape = (?, 256)
INFO:tensorflow: name = segment_ids, shape = (?, 256)
I0701 06:39:57.556931 140599964568448 run_cmrc2018_drcd_baseline.py:649] name = segment_ids, shape = (?, 256)
INFO:tensorflow: name = unique_ids, shape = (?,)
I0701 06:39:57.556982 140599964568448 run_cmrc2018_drcd_baseline.py:649] name = unique_ids, shape = (?,)
ERROR:tensorflow:Error recorded from prediction_loop: Cannot convert a symbolic Tensor (bert/encoder/strided_slice:0) to a numpy array.
E0701 06:39:57.594310 140599964568448 error_handling.py:75] Error recorded from prediction_loop: Cannot convert a symbolic Tensor (bert/encoder/strided_slice:0) to a numpy array.
INFO:tensorflow:prediction_loop marked as finished
I0701 06:39:57.594428 140599964568448 error_handling.py:101] prediction_loop marked as finished
WARNING:tensorflow:Reraising captured error
W0701 06:39:57.594482 140599964568448 error_handling.py:135] Reraising captured error
Traceback (most recent call last):
File "run_cmrc2018_drcd_baseline.py", line 1366, in <module>
tf.app.run()
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/platform/app.py", line 40, in run
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
File "/usr/local/lib/python3.7/dist-packages/absl/app.py", line 312, in run
_run_main(main, args)
File "/usr/local/lib/python3.7/dist-packages/absl/app.py", line 258, in _run_main
sys.exit(main(argv))
File "run_cmrc2018_drcd_baseline.py", line 1274, in main
predict_input_fn, yield_single_examples=True):
File "/usr/local/lib/python3.7/dist-packages/tensorflow_estimator/python/estimator/tpu/tpu_estimator.py", line 3078, in predict
rendezvous.raise_errors()
File "/usr/local/lib/python3.7/dist-packages/tensorflow_estimator/python/estimator/tpu/error_handling.py", line 136, in raise_errors
six.reraise(typ, value, traceback)
File "/usr/lib/python3/dist-packages/six.py", line 703, in reraise
raise value
File "/usr/local/lib/python3.7/dist-packages/tensorflow_estimator/python/estimator/tpu/tpu_estimator.py", line 3072, in predict
yield_single_examples=yield_single_examples):
File "/usr/local/lib/python3.7/dist-packages/tensorflow_estimator/python/estimator/estimator.py", line 622, in predict
features, None, ModeKeys.PREDICT, self.config)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_estimator/python/estimator/tpu/tpu_estimator.py", line 2857, in _call_model_fn
config)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_estimator/python/estimator/estimator.py", line 1149, in _call_model_fn
model_fn_results = self._model_fn(features=features, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_estimator/python/estimator/tpu/tpu_estimator.py", line 3126, in _model_fn
features, labels, is_export_mode=is_export_mode)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_estimator/python/estimator/tpu/tpu_estimator.py", line 1663, in call_without_tpu
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_estimator/python/estimator/tpu/tpu_estimator.py", line 1994, in _call_model_fn
estimator_spec = self._model_fn(features=features, **kwargs)
File "run_cmrc2018_drcd_baseline.py", line 666, in model_fn
use_one_hot_embeddings=use_one_hot_embeddings)
File "run_cmrc2018_drcd_baseline.py", line 601, in create_model
use_one_hot_embeddings=use_one_hot_embeddings)
File "/mnt/chinese_ordering/Chinese.Ordering.System/extension/cmrc2018/baseline/modeling.py", line 202, in __init__
input_ids, input_mask)
File "/mnt/chinese_ordering/Chinese.Ordering.System/extension/cmrc2018/baseline/modeling.py", line 552, in create_attention_mask_from_input_mask
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/array_ops.py", line 2560, in ones
output = _constant_if_small(one, shape, dtype, name)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/array_ops.py", line 2295, in _constant_if_small
if np.prod(shape) < 1000:
File "<__array_function__ internals>", line 6, in prod
File "/usr/local/lib/python3.7/dist-packages/numpy/core/fromnumeric.py", line 3052, in prod
keepdims=keepdims, initial=initial, where=where)
File "/usr/local/lib/python3.7/dist-packages/numpy/core/fromnumeric.py", line 86, in _wrapreduction
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/ops.py", line 736, in __array__
" array.".format(self.name))
NotImplementedError: Cannot convert a symbolic Tensor (bert/encoder/strided_slice:0) to a numpy array.
```
python3.7 export_pb.py \
--vocab_file=/mnt/roberta-wwm-base-distill/model/3layers_large/vocab.txt \
--bert_config_file=/mnt/roberta-wwm-base-distill/model/3layers_large/bert_config.json \
--init_checkpoint=aa/model.ckpt-138 \
--do_train=True \
--train_file=../../../menu-generator/test_1.json \
--do_predict=True \
--predict_file=../../../menu-generator/test_1.json \
--train_batch_size=32 \
--num_train_epochs=40 \
--max_seq_length=256 \
--doc_stride=128 \
--learning_rate=3e-5 \
--save_checkpoints_steps=1000 \
--output_dir=aa \
--do_lower_case=False \
--use_tpu=False
python3.7 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model=aa/inference_graph.pb \
--input "IteratorGetNext:0{i32}[1 256],IteratorGetNext:1{i32}[1 256],IteratorGetNext:3{i32}[1 256]" \
--disable_nhwc_to_nchw
For Ordering system pipeline demo:
pip install pyyaml
pip install pycnnum
pip install ds-ctcdecoder==0.9.3
```
export MODEL_DIR=/mnt/chinese_ordering/Chinese.Ordering.System/ir
export VOCAB_DIR=/mnt/chinese_ordering/Chinese.Ordering.System/vocab/
export PARAGRAPH_FILE=mc_paragraph.txt
python3.7 ordering_system_demo_pipeline.py \
-m_mel ${MODEL_DIR}/mel.xml \
-m_mg ${MODEL_DIR}/melgan.xml \
-m_d ${MODEL_DIR}/decoder.xml \
-m_e ${MODEL_DIR}/encoder.xml \
-m_dp ${MODEL_DIR}/duration_predictor.xml \
-m_b ${MODEL_DIR}/bert.xml \
-m_a ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.xml \
-p mds09x_cn \
-para ${PARAGRAPH_FILE} \
-i audio/sample.wav \
-v_b ${VOCAB_DIR}/vocab_bert.txt \
-v_p ${VOCAB_DIR}/vocab_pinyin.txt \
-L ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.scorer
```
Example for paragraph, mc_paragraph.txt.
麦当劳目前的餐点有:大麦克价格为72元、双层牛肉吉事堡价格为62元、嫩煎鸡腿堡价格为82元、麦香鸡价格为44元、麦克鸡块(6块)价格为60元、麦克鸡块(10块)价格为100元、劲辣鸡腿堡价格为72元、麦脆(2块)价格为110元、麦脆鸡翅(2块)价格为90元、黄金起司猪排堡价格为52元、麦香鱼价格为44元、烟熏鸡肉长堡价格为74元、姜烧猪肉长堡价格为74元、BLT 安格斯黑牛堡价格为109元、BLT 辣脆鸡腿堡价格为109元、BLT 嫩煎鸡腿堡价格为109元、蕈菇安格斯黑牛堡价格为119元、凯萨脆鸡沙拉价格为99元和义式烤鸡沙拉价格为99元。
The content of the mc_paragraph.txt is from Bert's training data.

```
root@93805c56e6de:/mnt/chinese_ordering/Chinese.Ordering.System# history
1 cd
2 apt update
3 apt install git
4 cd /mnt/
5 mkdir chinese_ordering
6 cd chinese_ordering/
7 git clone https://github.com/FengYen-Chang/Chinese.Ordering.System.git
8 cd Chinese.Ordering.System/
9 ls
10 git submodule update --init ./extension/mandarin-tts
11 git submodule update --init ./extension/melgan
12 cd ./extension/mandarin-tts
13 pip install gdown
14 gdown https://drive.google.com/uc?id=11mBus5gn69_KwvNec9Zy9jjTs3LgHdx3
15 tar xf fastspeech2u_ckpt.tar.gz
16 sudo apt-get install ffmpeg
17 apt-get install ffmpeg
18 ls
19 pip3 install -r requirements.txt
20 apt-get install liblzma-dev
21 pip3 install -r requirements.txt
22 python3 export_onnx.py --model_file ./ckpt/hanzi/checkpoint_300000.pth.tar --text_file ./test.txt --channel 2 --duration_control 1.0 --output_dir ./output
23 ls
24 cd onnx/
25 ls
26 python3 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model mel.onnx
27 pip3 install /opt/intel/openvino/deployment_tools/model_optimizer/requirements_onnx.txt
28 pip3 install -r /opt/intel/openvino/deployment_tools/model_optimizer/requirements_onnx.txt
29 python3 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model mel.onnx
30 python3 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model decoder.onnx
31 ls
32 python3 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model duration_predictor.onnx
33 ls
34 python3 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model encoder.onnx
35 cd ../../../extension/melga
36 cd ../../../extension/melgan/
37 ls
38 python3 export_onnx.py
39 cd onnx/
40 ls
41 python3 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model melgan.onnx
42 ls
43 cd ../../
44 ls
45 cd ../
46 ls
47 python3 tts.py -m_mel /mnt/chinese_ordering/Chinese.Ordering.System/extension/mandarin-tts/onnx/mel.xml -m_mg /mnt/chinese_ordering/Chinese.Ordering.System/extension/melgan/onnx/melgan.xml -m_d /mnt/chinese_ordering/Chinese.Ordering.System/extension/mandarin-tts/onnx/decoder.xml -m_e /mnt/chinese_ordering/Chinese.Ordering.System/extension/mandarin-tts/onnx/encoder.xml -m_dp /mnt/chinese_ordering/Chinese.Ordering.System/extension/mandarin-tts/onnx/duration_predictor.xml -i 一百五一百五一百五一百五一百五一百五一百五一百五一百五一百五
48 git submodule update --init ./extension/cmrc2018/
49 cd menu-generato
50 ls
51 cd menu-generator/
52 python mc_menu_generator.py
53 python3 mc_menu_generator.py
54 cd ../extension/cmrc2018/baseline/
55 pip install tensorflow==1.15.0
56 pip3 install tensorflow==1.15.0
57 python3 -m pip install tensorflow==1.15.0
58 pip3 list | grep tensor
59 pip3 install tensorflow==1.15
60 python3 -m venv
61 cd
62 python3 -m venv -h
63 apt install software-properties-common
64 add-apt-repository ppa:deadsnakes/ppa
65 apt update
66 apt install python3.7
67 which python
68 which python3
69 which python3.7
70 ls -l /usr/bin/python3
71 python3.7
72 python3.7 -m pip install tensorflow==1.15.0
73 cd /mnt/chinese_ordering/Chinese.Ordering.System/
74 ls
75 cd extension/
76 ls
77 cd Chinese.BERT.OpenVINO/
78 ls
79 ls ../cmrc2018/
80 cd ../../
81 find | grep robeta
82 find | grep roberta
83 git submodule update --init ./extension/Chinese.BERT.OpenVINO
84 history
```
for bert + deepspeech
```
1 cd /mnt/chinese_ordering/Chinese.Ordering.System/
2 ls
3 cd extension/cmrc2018/
4 cd ba
5 ls
6 cd baseline/
7 python3.7 run_cmrc2018_drcd_baseline.py --vocab_file=/mnt/roberta-wwm-base-distill/model/3layers_large/vocab.txt --bert_config_file=/mnt/roberta-wwm-base-distill/model/3layers_large/bert_config.json --init_checkpoint=/mnt/roberta-wwm-base-distill/model/3layers_large/bert_model.ckpt --do_train=True --train_file=../../../menu-generator/test_1.json --do_predict=True --predict_file=../../../menu-generator/test_1.json --train_batch_size=32 --num_train_epochs=40 --max_seq_length=256 --doc_stride=128 --learning_rate=3e-5 --save_checkpoints_steps=1000 --output_dir=aa --do_lower_case=False --use_tpu=False
8 ls aa
9 ls
10 python3.7 -m pip list | grep numpy
11 python3.7 export_pb.py --vocab_file=/mnt/roberta-wwm-base-distill/model/3layers_large/vocab.txt --bert_config_file=/mnt/roberta-wwm-base-distill/model/3layers_large/bert_config.json --init_checkpoint=aa/model.ckpt-138 --do_train=True --train_file=../../../menu-generator/test_1.json --do_predict=True --predict_file=../../../menu-generator/test_1.json --train_batch_size=32 --num_train_epochs=40 --max_seq_length=256 --doc_stride=128 --learning_rate=3e-5 --save_checkpoints_steps=1000 --output_dir=aa --do_lower_case=False --use_tpu=False
12 ls
13 python3.7 --version
14 python3.7 -m pip list
15 python3.7 -m pip install numpy==1.18.1
16 python3.7 export_pb.py --vocab_file=/mnt/roberta-wwm-base-distill/model/3layers_large/vocab.txt --bert_config_file=/mnt/roberta-wwm-base-distill/model/3layers_large/bert_config.json --init_checkpoint=aa/model.ckpt-138 --do_train=True --train_file=../../../menu-generator/test_1.json --do_predict=True --predict_file=../../../menu-generator/test_1.json --train_batch_size=32 --num_train_epochs=40 --max_seq_length=256 --doc_stride=128 --learning_rate=3e-5 --save_checkpoints_steps=1000 --output_dir=aa --do_lower_case=False --use_tpu=False
17 ls
18 ls aa
19 python3.7 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model=aa/inference_graph.pb --input "IteratorGetNext:0{i32}[1 256],IteratorGetNext:1{i32}[1 256],IteratorGetNext:3{i32}[1 256]" --disable_nhwc_to_nchw
20 cat /opt/intel/openvino_2021/deployment_tools/model_optimizer/requirements.txt
21 python3.7 -m pip install networkx>=1.11
22 ls
23 rm '=1.11'
24 python3.7 -m pip install 'networkx>=1.11'
25 python3.7 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model=aa/inference_graph.pb --input "IteratorGetNext:0{i32}[1 256],IteratorGetNext:1{i32}[1 256],IteratorGetNext:3{i32}[1 256]" --disable_nhwc_to_nchw
26 python3.7 -m pip install 'defusedxml>=0.5.0'
27 python3.7 /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py --input_model=aa/inference_graph.pb --input "IteratorGetNext:0{i32}[1 256],IteratorGetNext:1{i32}[1 256],IteratorGetNext:3{i32}[1 256]" --disable_nhwc_to_nchw
28 cd ../../
29 ls
30 mkdir ir
31 cd ir
32 cp ../cmrc2018/baseline/inference_graph.* .
33 ls
34 mkdir bert
35 mv inference_graph.* bert/
36 ls
37 mv ir ../
38 cd ../
39 mv ir ../
40 cd ../ir/
41 ls e
42 ls ../extension/
43 ls ../extension/mandarin-tts/
44 ls ../extension/mandarin-tts/onnx/
45 ls ../extension/mandarin-tts/onnx/*xml .
46 cp ../extension/mandarin-tts/onnx/*xml .
47 cp ../extension/mandarin-tts/onnx/*bin .
48 cp ../extension/mandarin-tts/onnx/*mappings .
49 cp ../extension/mandarin-tts/onnx/*mapping .
50 ls
51 cp ../extension/melgan/onnx/melgan.* .
52 ls
53 rm melgan.onnx
54 ls
55 cd ../
56 ls
57 python3.7 -m pip install ds-ctcdecoder==0.9.3
58 cd cd model-conversion/mozilla-deepspeech-0.9.3-zh-CN
59 cd model-conversion/mozilla-deepspeech-0.9.3-zh-CN
60 wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models-zh-CN.pbmm
61 apt install wget
62 wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models-zh-CN.pbmm
63 python3.7 pbmm_to_pb.py deepspeech-0.9.3-models-zh-CN.pbmm deepspeech-0.9.3-models-zh-CN.pb
64 python3.7 /opt/intel/openvino/deployment_tools/model_optimizer/mo.py --input_model deepspeech-0.9.3-models-zh-CN.pb --freeze_placeholder_with_value="input_lengths->16" --input=input_node,previous_state_h,previous_state_c --input_shape=[1,16,19,26],[1,2048],[1,2048] --disable_nhwc_to_nchw --output=logits,cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd,cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd_1
65 python3.7 -m pip install "test-generator==0.1.1"
66 ls ../
67 ls ../../
68 ls ../../ir/
69 mv deepspeech-0.9.3-models-zh-CN.xml ../../ir
70 mv deepspeech-0.9.3-models-zh-CN.bin ../../ir
71 mv deepspeech-0.9.3-models-zh-CN.mapping ../../ir
72 cd ../../
73 python3 speech_recognition_demo.py -i audio/sample.wav -p mds09x_cn -m ir/deepspeech-0.9.3-models-zh-CN.xml
74 python3.7 -m pip install pyyaml
75 python3 speech_recognition_demo.py -i audio/sample.wav -p mds09x_cn -m ir/deepspeech-0.9.3-models-zh-CN.xml
76 python3.7 speech_recognition_demo.py -i audio/sample.wav -p mds09x_cn -m ir/deepspeech-0.9.3-models-zh-CN.xml
77 python3.7 -m pip install tqdm
78 python3.7 speech_recognition_demo.py -i audio/sample.wav -p mds09x_cn -m ir/deepspeech-0.9.3-models-zh-CN.xml
79 cat requirements.txt
80 python3.7 -m pip install -r requirements.txt
81 python3.7 speech_recognition_demo.py -i audio/sample.wav -p mds09x_cn -m ir/deepspeech-0.9.3-models-zh-CN.xml
82 cd ir/
83 ls
84 mv bert/inference_graph.xml bert.xml
85 mv bert/inference_graph.bin bert.bin
86 mv bert/inference_graph.mapping bert.mapping
87 rm -rf bert
88 export MODEL_DIR=/mnt/chinese_ordering/Chinese.Ordering.System/ir
89 ls ../../../roberta-wwm-base-distill
90 ls ../../../roberta-wwm-base-distill/model/
91 ls ../../../roberta-wwm-base-distill/model/3layers_large/
92 vi ../../../roberta-wwm-base-distill/model/3layers_large/vocab.txt
93 cp ../../../roberta-wwm-base-distill/model/3layers_large/vocab.txt bert_vocab.txt
94 ls ../extension/mandarin-tts/
95 cd ../
96 ls vocab/
97 export VOCAB_DIR=/mnt/chinese_ordering/Chinese.Ordering.System/vocab/
98 find | grep mc_pa
99 ls
100 export PARAGRAPH_FILE=mc_paragraph.txt
101 ls
102 python ordering_system_demo_pipeline.py -m_mel ${MODEL_DIR}/mel.xml -m_mg ${MODEL_DIR}/melgan.xml -m_d ${MODEL_DIR}/decoder.xml -m_e ${MODEL_DIR}/encoder.xml -m_dp ${MODEL_DIR}/duration_predictor.xml -m_b ${MODEL_DIR}/bert.xml -m_a ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.xml -p mds09x_cn -para ${PARAGRAPH_FILE} -i audio/sample.wav -v_b ${VOCAB_DIR}/vocab_bert.txt -v_p ${VOCAB_DIR}/vocab_pinyin.txt -L ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.scorer
103 python3.7 ordering_system_demo_pipeline.py -m_mel ${MODEL_DIR}/mel.xml -m_mg ${MODEL_DIR}/melgan.xml -m_d ${MODEL_DIR}/decoder.xml -m_e ${MODEL_DIR}/encoder.xml -m_dp ${MODEL_DIR}/duration_predictor.xml -m_b ${MODEL_DIR}/bert.xml -m_a ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.xml -p mds09x_cn -para ${PARAGRAPH_FILE} -i audio/sample.wav -v_b ${VOCAB_DIR}/vocab_bert.txt -v_p ${VOCAB_DIR}/vocab_pinyin.txt -L ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.scorer
104 python3 ordering_system_demo_pipeline.py -m_mel ${MODEL_DIR}/mel.xml -m_mg ${MODEL_DIR}/melgan.xml -m_d ${MODEL_DIR}/decoder.xml -m_e ${MODEL_DIR}/encoder.xml -m_dp ${MODEL_DIR}/duration_predictor.xml -m_b ${MODEL_DIR}/bert.xml -m_a ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.xml -p mds09x_cn -para ${PARAGRAPH_FILE} -i audio/sample.wav -v_b ${VOCAB_DIR}/vocab_bert.txt -v_p ${VOCAB_DIR}/vocab_pinyin.txt -L ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.scorer
105 pip3 install yaml
106 pip3 install -r requirements.txt
107 python3 ordering_system_demo_pipeline.py -m_mel ${MODEL_DIR}/mel.xml -m_mg ${MODEL_DIR}/melgan.xml -m_d ${MODEL_DIR}/decoder.xml -m_e ${MODEL_DIR}/encoder.xml -m_dp ${MODEL_DIR}/duration_predictor.xml -m_b ${MODEL_DIR}/bert.xml -m_a ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.xml -p mds09x_cn -para ${PARAGRAPH_FILE} -i audio/sample.wav -v_b ${VOCAB_DIR}/vocab_bert.txt -v_p ${VOCAB_DIR}/vocab_pinyin.txt -L ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.scorer
108 ls ir/
109 findls
110 ls
111 ls model-conversion/
112 ls model-conversion/mozilla-deepspeech-0.9.3-zh-CN/
113 find model-conversion/
114 cd ir
115 wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models-zh-CN.scorer
116 cd ../
117 python3 ordering_system_demo_pipeline.py -m_mel ${MODEL_DIR}/mel.xml -m_mg ${MODEL_DIR}/melgan.xml -m_d ${MODEL_DIR}/decoder.xml -m_e ${MODEL_DIR}/encoder.xml -m_dp ${MODEL_DIR}/duration_predictor.xml -m_b ${MODEL_DIR}/bert.xml -m_a ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.xml -p mds09x_cn -para ${PARAGRAPH_FILE} -i audio/sample.wav -v_b ${VOCAB_DIR}/vocab_bert.txt -v_p ${VOCAB_DIR}/vocab_pinyin.txt -L ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.scorer
118 ls
119 ./tts.wav
120 clear
121 history
122 history | grep erport
123 history | grep export
124 clear
125 python ordering_system_demo_pipeline.py -m_mel ${MODEL_DIR}/mel.xml -m_mg ${MODEL_DIR}/melgan.xml -m_d ${MODEL_DIR}/decoder.xml -m_e ${MODEL_DIR}/encoder.xml -m_dp ${MODEL_DIR}/duration_predictor.xml -m_b ${MODEL_DIR}/bert.xml -m_a ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.xml -p mds09x_cn -para ${PARAGRAPH_FILE} -i audio/sample.wav -v_b ${VOCAB_DIR}/vocab_bert.txt -v_p ${VOCAB_DIR}/vocab_pinyin.txt -L ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.scorer
126 clear
127 clearpython ordering_system_demo_pipeline.py -m_mel ${MODEL_DIR}/mel.xml -m_mg ${MODEL_DIR}/melgan.xml -m_d ${MODEL_DIR}/decoder.xml -m_e ${MODEL_DIR}/encoder.xml -m_dp ${MODEL_DIR}/duration_predictor.xml -m_b ${MODEL_DIR}/bert.xml -m_a ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.xml -p mds09x_cn -para ${PARAGRAPH_FILE} -i audio/sample.wav -v_b ${VOCAB_DIR}/vocab_bert.txt -v_p ${VOCAB_DIR}/vocab_pinyin.txt \
128 python3.7 ordering_system_demo_pipeline.py -m_mel ${MODEL_DIR}/mel.xml -m_mg ${MODEL_DIR}/melgan.xml -m_d ${MODEL_DIR}/decoder.xml -m_e ${MODEL_DIR}/encoder.xml -m_dp ${MODEL_DIR}/duration_predictor.xml -m_b ${MODEL_DIR}/bert.xml -m_a ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.xml -p mds09x_cn -para ${PARAGRAPH_FILE} -i audio/sample.wav -v_b ${VOCAB_DIR}/vocab_bert.txt -v_p ${VOCAB_DIR}/vocab_pinyin.txt -L ${MODEL_DIR}/deepspeech-0.9.3-models-zh-CN.scorer
129 history
root@56be06b27ef8:/mnt/chinese_ordering/Chinese.Ordering.System#
```