# Tensorflow == 1.15
## 宣告:
```python=
tf.constant() #建立常數
tf.variable() #建立變數
x = tf.constant([[1, 2],[3, 4]], name='x')
y = tf.Variable([[5, 6],[7, 8]], name='y')
```
## Placeholder
```python=
w1= tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
w2= tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
x_placeholder1 = tf.placeholder(tf.float32, shape=(3, 2), name="input") #預計未來輸入3x2的array
a = tf.matmul(x_placeholder1, w1)
y = tf.matmul(a, w2)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer()) #這兩句一定要一起寫 
    print('y is:')
    print(sess.run(y, feed_dict={x_placeholder1: [[0.7,0.9],[0.1,0.4],[0.5,0.8]]})) #如果不是3x2的array會出現error
```
## 數值運算:
```python=
tf.add(x,y,name) 加法
tf.subtract(x,y,name) 減法
tf.multiply(x,y,name) 乘法
tf.divide(x,y,name) 除法
tf.mod(x,y,name) 餘數
tf.negative(x) 負數
tf.sqrt(x,name) 平方
tf.abs(x,name) 絕對值
tf_sum = x + y
tf_sub = x - y
tf_mul = x * y #這是element wise的相乘,不是array的相乘
tf_div = x / y
tf_mod = x % y
tf_neg = -x
```
## 矩陣相乘
```python=
import tensorflow as tf
matrix1 = tf.constant([[1, 2],[3, 4]], name='matrix1', dtype=tf.float32)
matrix2 = tf.constant([[5, 6],[7, 8]], name='matrix2', dtype=tf.float32)
product = tf.matmul(matrix1, matrix2) #array的相乘
inv = tf.matrix_inverse(matrix1)
trans = tf.matrix_transpose(matrix1)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer()) #加了不會錯,不加有可能出錯(把其他的沒有載入的變量初始化)
    print("product: {}\n".format(sess.run(product)))
    print("inv: {}\n".format(sess.run(inv)))
    print("trans: {}\n".format(sess.run(trans)))
    print(sess.run(matrix1))
```
## 常用函數
```python=
tf.zeros() :內容數值皆為 0 的常數張量
tf.ones() :內容數值皆為 1 的常數張量
tf.fill() :內容數值皆為特定值的常數張量
tf.range() :內容數值為 (start, limit, delta) 數列的常數張量
tf.random_normal() :內容數值為符合常態分佈數列的常數張量
tf.random_uniform() :內容數值為符合均勻分佈數列的常數張量
tf.reshape() :調整矩陣外觀
tf.eye() :單位矩陣
tf.diag() :對角矩陣
tf.matrix_transpose() :轉置矩陣
tf.matmul() :矩陣相乘
```
## Numpy & Tensorflow
```python=
import numpy as np
import tensorflow as tf
print("NumPy:")
print(np.zeros((2, 2)))
print(np.ones((2, 2)))
print(np.full((2, 2), 5))
print(np.arange(1, 9, 2).reshape(2, 2))
print(np.random.normal(size=(2, 2)))
print(np.random.uniform(size=(2, 2)))
print("\n")
print("TensorFlow:")
zeros = tf.zeros((2, 2))
ones = tf.ones((2, 2))
fills = tf.fill((2, 2), 5)
ranges = tf.reshape(tf.range(1, 9, 2), (2, 2))
normals = tf.random_normal((2, 2))
uniforms = tf.random_uniform((2, 2))
initializations = [zeros, ones, fills, ranges, normals, uniforms, ]
with tf.Session() as sess:
    for i in initializations:
        print(sess.run(i))
```
```python=
import numpy as np
import tensorflow as tf
print("NumPy:")
print(np.eye(2))
print(np.diag(np.arange(4)))
print(np.ones((2, 3)).T)
print(np.dot(np.arange(4).reshape(2, 2), np.arange(4).reshape(2, 2)))
print("\n")
print("TensorFlow:")
eye = tf.eye(2)
diag = tf.diag(tf.range(4))
transpose = tf.matrix_transpose(tf.ones((2, 3)))
x = tf.reshape(tf.range(4), (2, 2))
multiply = tf.matmul(x, x)
matrice = [eye, diag, transpose, multiply]
with tf.Session() as sess:
    for mat in matrice:
        print(sess.run(mat))
```
## Save and Restore Model
TensorFlow saver can save/restore model or variable for future usage. Saver would produce theses three files:
1. model.ckpt.meta: preserve computation graph
2. model.ckpt: preserve variable in the graph
3. checkpoint: preserve the latest model
```python=
import tensorflow as tf
tf.reset_default_graph()
v1 = tf.Variable(tf.constant(1.0, shape=[1]), name='v1')
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name='v2')
result = tf.add(v1, v2, name="result")
saver = tf.train.Saver()
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    saver.save(sess, "Saved_model/model.ckpt")
    print(sess.run(result))
```
If you want to restore variable only, you should build the computation graph again and load previous variable.
```python=
import tensorflow as tf
tf.reset_default_graph()
v1 = tf.Variable(tf.constant(1.0, shape=[1]), name='v1')
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name='v2')
result = v1 + v2
saver = tf.train.Saver() #儲存模型
with tf.Session() as sess:
    saver.restore(sess, "Saved_model/model.ckpt")
    print(sess.run(result))
```
Or, you can restore previous computation graph and variables by the following code
```python=
import tensorflow as tf
tf.reset_default_graph()
saver = tf.train.import_meta_graph("Saved_model/model.ckpt.meta")
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    saver.restore(sess, "Saved_model/model.ckpt")
    print(sess.run("result:0"))
```
## Optimizer 優化器
```python=
import tensorflow as tf
tf.reset_default_graph()
learning_rate = 0.01
a = tf.constant(2.0)
b = tf.constant(1.0)
c = tf.constant(3.0)
x = tf.Variable(tf.constant(1.0), name='x')
y = a*x*x + b*x + c
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(y)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(y)
# optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(y)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for _ in range(1000):
        optimizer_ = sess.run(optimizer)
    x_ = sess.run(x)    
    print('when x = {}, y have min value'.format(x_))    
```
## 損失函數(loss function)
#### MSE
```python=
import tensorflow as tf
predict = tf.constant([-0.5, 1, 2], name='predict')
labels = tf.constant([1.0, 0.0, 0.0], name='labels')
MSE = tf.losses.mean_squared_error(predictions=predict, labels=labels) 
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    cost1_ =  sess.run(MSE)
    print('mean square is\n {} \n'.format(cost1_))
```
#### Cross-Entropy
```python=
import tensorflow as tf
predict = tf.constant([-0.5, 1, 2], name='predict')
labels = tf.constant([1.0, 0.0, 0.0], name='labels')
cost1 = tf.nn.softmax_cross_entropy_with_logits(logits=predict , labels=labels)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    cost1_ =  sess.run(cost1)
    print('softmax with cross entropy is\n {} \n'.format(cost1_))
```
## 神經網路(Neural Network)
#### DNN
```python=
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Parameters
learning_rate = 0.001
training_epochs =15
batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 128 # 1st layer number of features
n_hidden_2 = 64 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Create model
def multilayer_perceptron(x, weights, biases):
  
    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
    out_1 = tf.nn.relu(layer_1)
    layer_2 = tf.add(tf.matmul(out_1, weights['h2']), biases['b2'])
    out_2 = tf.nn.relu(layer_2)
  
    out_layer = tf.matmul(out_2, weights['out']) + biases['out']
    return out_layer
# Store layers weight & bias
weights = {
    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = multilayer_perceptron(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) #loss function
#--------------------------正則向-------------------------------
# beta = 0.01
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)\
#                       +beta*tf.nn.l2_loss(weights['h1'])
#                       +beta*tf.nn.l2_loss(weights['h2'])
#                       +beta*tf.nn.l2_loss( weights['out'])
#                      )
#---------------------------------------------------------------
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) #優化器
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) #抓出預測答案跟實際答案比較大的值
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) #tf.cast轉型別之後做加總平均
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples/batch_size)
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size) #批次讀檔
            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
            avg_cost += c / total_batch
        # Display logs per epoch step
        if epoch % display_step == 0:
            print("Epoch {}, cost= {}".format(epoch+1,avg_cost))
    print("Optimization Finished!")
    accuracy_ = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})
    print("Accuracy: {}".format(accuracy_))
```
#### DNN with dropout
```python=
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 128 # 1st layer number of features
n_hidden_2 = 64 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# add dropout
keep_prob = tf.placeholder(tf.float32)
# Create model
def multilayer_perceptron(x, weights, biases):
  
    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
    out_1 = tf.nn.relu(layer_1)
    layer_2 = tf.add(tf.matmul(out_1, weights['h2']), biases['b2'])
    out_2 = tf.nn.relu(layer_2)
    
    out_layer = tf.matmul(out_2, weights['out']) + biases['out']
    return out_layer
# Store layers weight & bias
weights = {
    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = multilayer_perceptron(x, weights, biases)
pred_drop = tf.nn.dropout(pred, keep_prob)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred_drop, labels=y))#loss function
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)#optimizer
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))#抓出預測答案跟實際答案比較大的值
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))#tf.cast轉型別之後做加總平均
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples/batch_size)
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size) #批次讀檔
            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})
            avg_cost += c / total_batch
        # Display logs per epoch step
        if epoch % display_step == 0:
            print("Epoch {}, cost= {}".format(epoch+1,avg_cost))
    print("Optimization Finished!")
    accuracy_ = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0}) #keep_prob要設為1
    print("Accuracy: {}".format(accuracy_))
```
#### DNN with tensorboard
```python=
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.reset_default_graph()
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Parameters
learning_rate = 0.001
training_epochs = 15 #訓練次數
batch_size = 100 #一次訓練幾張
display_step = 1 #
# Network Parameters
n_hidden_1 = 128 # 1st layer number of features
n_hidden_2 = 64 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_input]) 
y = tf.placeholder("float", [None, n_classes])  
# Create model
def multilayer_perceptron(x, weights, biases):
  
    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
    out_1 = tf.nn.relu(layer_1)
    tf.summary.histogram("relu1", out_1) #Tensorboard
    
    layer_2 = tf.add(tf.matmul(out_1, weights['h2']), biases['b2'])
    out_2 = tf.nn.relu(layer_2)
    tf.summary.histogram("relu2", out_2)
    
    out_layer = tf.matmul(out_2, weights['out']) + biases['out']
    return out_layer
# Store layers weight & bias
weights = {
    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}
with tf.name_scope('DNN_Model'):
    pred = multilayer_perceptron(x, weights, biases)
with tf.name_scope('Cost'):
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) #loss function
with tf.name_scope('SGD'):
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) #optimizer
with tf.name_scope('Accuracy'):
    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) #抓出預測答案跟實際答案比較大的值
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) #tf.cast轉型別之後做加總平均
# Create a summary to monitor cost tensor
tf.summary.scalar("loss", cost)
# Create a summary to monitor accuracy tensor
tf.summary.scalar("accuracy", accuracy)
# Create summaries to visualize weights
for var in tf.trainable_variables():
    tf.summary.histogram(var.name.replace(':','_'), var)
# Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()
    
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    summary_writer = tf.summary.FileWriter('./tensorboard_data', graph=tf.get_default_graph())
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples/batch_size)
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)#批次讀檔
            # Run optimization op (backprop) and cost op (to get loss value)
            _, c, summary = sess.run([optimizer, cost, merged_summary_op], feed_dict={x: batch_x, y: batch_y})
            # Write logs at every iteration
            summary_writer.add_summary(summary, epoch * total_batch + i)
            
            avg_cost += c / total_batch
        # Display logs per epoch step
        if epoch % display_step == 0:
            print("Epoch {}, cost= {}".format(epoch+1,avg_cost))
    acc_ = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})
    print("Optimization Finished!")
    print("Accuracy: {}".format(acc_))
```
#### Convolution & Maxpooling用法
```python=
%matplotlib inline
import cv2
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
temp = np.array([ [-1, -1, -1],
                  [-1,  8, -1],
                  [-1, -1, -1] ], dtype='float32')
# change kernel to 4D tensor
kernel = tf.reshape(tf.Variable(temp), [3, 3, 1, 1]) #改變shape
print(kernel) 
raw_image = cv2.imread('test_img.jpg', 0) #Using 0 to read image in grayscale mode 
_, threshold_image = cv2.threshold(raw_image, 0, 255, cv2.THRESH_OTSU) 
#threshold函數有兩個返回值,其中第二個返回值是二值化後的灰階圖
#cv2.THRESH_OTSU,把閾值thresh設爲0,算法會找到最優閾值,並作爲第一個返回值返回。
                                                                        
threshold_image= threshold_image.astype('float32')
# change image to 4D tensor
x_img = tf.reshape(threshold_image, [-1, threshold_image.shape[0], threshold_image.shape[1], 1]) #[批次大小,長,寬,深度]
y_conv = tf.nn.conv2d(x_img, kernel, strides=[1, 1, 1, 1], padding='SAME') #[1,1,1,1]前後都寫1,第二個1是水平平移的步數,第二個1是垂直平移的步數
#--------------------Maxpooling---------------------------------
# ksize = [1, width, height, 1]
# strides = [1, horizontal strides, vertical strides, 1]
#y_maxpool = tf.nn.max_pool(x_img, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#---------------------------------------------------------------
with tf.Session() as sess:
    sess.run( tf.global_variables_initializer() )
    
#--------------------Maxpooling---------------------------------    
    #result = sess.run(y_maxpool)
    
    #result_img = np.reshape(result, [14, 14])
#---------------------------------------------------------------    
    result = sess.run(y_conv)
    result_img = np.reshape(result, [threshold_image.shape[0], threshold_image.shape[1]])
    
    plt.imshow(threshold_image, cmap='gray')
    plt.show()
    
    plt.imshow(result_img, cmap='gray')
    plt.show()
```
#### CNN
```python=
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
def weight_variable(shape):
    initial_value = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial_value)
def bias_variable(shape):
    initial_value = tf.constant(0.1, shape=shape)
    return tf.Variable(initial_value)
def conv2d(x, W):
    # strides = [1, horizontal strides, vertical strides, 1], padding = SAME mean add paddings on input data
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
    # ksize = [1, width, height, 1]
    # strides = [1, horizontal strides, vertical strides, 1]
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
W_conv1 = weight_variable([5, 5, 1, 32])#為5x5x1(1:灰階)的數據組,32張的feature
b_conv1 = bias_variable([32])
x = tf.placeholder(tf.float32, [None, 784]) #784=28*28 ;如果今天圖片是100*100 ,就需要修改
y_ = tf.placeholder(tf.float32, [None, 10]) #要辨識的類別有幾樣
# Reshape our data as 4-D tensorflow. Remind that a batch of images is a 4-D tensor
x_image = tf.reshape(x, [-1, 28, 28, 1]) #[批次大小,長,寬,深度]
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64]) #為5x5x32的數據組,64張的feature ; 32要跟上一層的數字一樣
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024]) #h_pool2的shape ; 因為maxpooling的kernel為2*2;原本28*28的圖片經過兩次maxpooling就會變成7*7
b_fc1 = bias_variable([1024])
# Flatten previous layer result and feed them into fully connected layer
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# Define cost
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
# Optimization 
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# Calculate accuracy 
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1)) #抓出預測答案跟實際答案比較大的值
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #tf.cast轉型別之後做加總平均
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(1000):
        batch = mnist.train.next_batch(100)
        
        train_step_ = sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) #dropout 50%
        if i % 100 == 0:
            train_accuracy = sess.run(accuracy, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
            print('step {}, training accuracy {}'.format(i, train_accuracy))
            
    test_accuracy_ = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})    
    print('test accuracy {}'.format(test_accuracy_))
```