# Ken0001-train
###### tags: `note`
```python=
from argparse import ArgumentParser
import read_data as rd
import os
from sklearn.model_selection import train_test_split
from ML_DenseNet import mldensenet
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.callbacks import ReduceLROnPlateau, LearningRateScheduler
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix, precision_score, recall_score
```
## argparse
```python=
parser = ArgumentParser()
parser.add_argument("-d", "--dataset", help="Dataset", dest="dataset", default="none")
parser.add_argument("-m", "--model", help="Model", dest="model", default="none")
parser.add_argument("-e", "--epoch", help="Epoch", dest="epoch", type=int, default="90")
parser.add_argument("-b", "--batch_size", help="Batch size", dest="batch_size", type=int, default="16")
args = parser.parse_args()
print("|-------------Training info-------------")
print("|-Dataset: ", args.dataset)
print("|-Model: ", args.model)
print("|-Epoch: ", args.epoch)
print("|-Batch_size:", args.batch_size)
path = args.dataset
ml = False
activation = "softmax"
loss = "categorical_crossentropy"
# Check single-label or multi-label
if "multi" in os.listdir(path+"/train"):
ml = True
activation = "sigmoid"
loss = "binary_crossentropy"
print("|-Activation:", activation)
print("|-Loss: ", loss)
print("|---------------------------------------")
```
* ArgumentParser
* name or flags: 參數的名稱,可以用縮寫,但全名要有。例如:--target, -t
* help: 參數的說明
* dest: 當 parse_args() 剖析完後的參數屬性。若無指定此項,則為 name 的大寫全名
* default: 預設的參數值
* type: 參數值的型態
* ml = multi-label
## Read data
```python=
print("> Loading training data")
x_train, y_train = rd.read_dataset(path+"/train/*")
print("> Loading testing data")
x_test, y_test = rd.read_dataset(path+"/test/*")
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, shuffle= True)
print("Train:", x_train.shape, y_train.shape)
print("Val:", x_val.shape, y_val.shape)
print("Test:", x_test.shape, y_test.shape)
print("Done!")
```
* x = image
* train
* test
* val
* y = label
* train
* test
* val
## Prepare model
```python=
img_shape = (224, 224, 3)
num_class = 5
model = mldensenet(img_shape, num_class, mltype=3, finalAct=activation)
opt = SGD(lr=0.01, decay=0.0001, momentum=0.9, nesterov=True)
model.compile(loss=loss,
optimizer=opt,
metrics=["binary_accuracy", "categorical_accuracy"])
reduce_lr = ReduceLROnPlateau(monitor='val_loss',factor=0.5, patience=10, mode='auto', cooldown=3, min_lr=0.00001)
def lr_scheduler(epoch, lr):
decay_rate = 0.1
if epoch % 30 == 0 and epoch:
return lr * decay_rate
if epoch % 60 == 0 and epoch:
return lr * decay_rate
return lr
callbacks = [
reduce_lr
#LearningRateScheduler(lr_scheduler, verbose=1)
]
```
* opt = 優化器
* metrics = 評估方法
## Training Model
```python=
train_history = model.fit(x_train, y_train,
batch_size=args.batch_size,
epochs=args.epoch,
verbose=1,
callbacks=callbacks,
shuffle=True,
validation_data=(x_val, y_val))
model.save("./model/ml_densenet.h5")
```
## Evaluate
```python=
scores = model.evaluate(x_test, y_test, verbose=0)
print('\nTesting result:')
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
acc = train_history.history['categorical_accuracy']
val_acc = train_history.history['val_categorical_accuracy']
loss = train_history.history['loss']
val_loss = train_history.history['val_loss']
p_epochs = range(1, len(acc) + 1)
```
## Plot
```python=
#Train and validation accuracy
plt.plot(p_epochs, acc, 'b', label='Training accurarcy')
plt.plot(p_epochs, val_acc, 'g', label='Validation accurarcy')
plt.plot(p_epochs, loss, 'r', label='Training loss')
plt.plot(p_epochs, val_loss, 'y', label='Validation loss')
plt.title('Training and Validation')
plt.show()
```