## 使用機器學習做類神經網路分類 #### 此次的目的是: 嘗試生成一張各顏色在特定範圍內均勻分布的點陣圖,透過機器學習來分類出特定標籤的點之間的分界。 需要的函式庫 ```python= import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from sklearn.model_selection import train_test_split ``` 此次選定點陣分布的大概形狀為一同心圓,為了能夠讓每個點在圓中能夠均勻分布,需先定義 ```python= def generate_circle_points(r_min, r_max, num_points): # 使用均勻的角度和均勻的半徑生成坐標 angles = np.random.uniform(0, 2 * np.pi, num_points) radii = np.sqrt(np.random.uniform(r_min**2, r_max**2, num_points)) x = radii * np.cos(angles) y = radii * np.sin(angles) return x, y ``` 接著繪製出散點圖 ```python= # 生成數據點 R0 = 0 R1 = 2 R2 = 1 R3 = 3 x_o, y_o = generate_circle_points(R0, R1, N) #在半徑0 to 2 之間生成N個點,N=200 x_x, y_x = generate_circle_points(R2, R3, N) #在半徑1 to 3 之間生成N個點,N=200 # 繪製散點圖 plt.figure(figsize=(8, 8)) plt.scatter(x_o, y_o, c='blue', marker='o', label='Positive O') #令生成在半徑 0 to 2的點藍色,標籤為"positive o" plt.scatter(x_x, y_x, c='orange', marker='o', label='Negative O')#令生成在半徑 1 to 3的點黃色,標籤為"negative o" # 整合並標記數據點 x:將{[x_o,y_o以列合併後],[x_x,y_x以列合併後]}用vstack併起來 y:[200個1,200個-1] X = np.vstack((np.column_stack((x_o, y_o)), np.column_stack((x_x, y_x)))) y = np.array([1] * N + [-1] * N) # 1代表o,-1代表x plt.xlabel('X') plt.ylabel('Y') plt.title('Scatter Plot with Different Classes') plt.legend() plt.grid(True) plt.axis('equal') plt.show() ``` 產生的結果為: ![image](https://hackmd.io/_uploads/Hk364PUVT.png) define一個使用模型分別對測試集、訓練集的x,y進行預測的function ```python= def plot_predictions(X_train, y_train, X_test, y_test, model): # 使用模型對訓練集進行預測 y_pred_train_continuous = model.predict(X_train) y_pred_train = np.where(y_pred_train_continuous.flatten() >= 0, 1, -1) # 使用模型對測試集進行預測 y_pred_test_continuous = model.predict(X_test) y_pred_test = np.where(y_pred_test_continuous.flatten() >= 0, 1, -1) plt.figure(figsize=(8, 8)) # 訓練集 - 預測正確 #positive預測正確:產生藍色'o',label="Train Correct" #negative預測正確:產生橘色'o',label="Train Correct" correct_train = (y_train == y_pred_train) oc = (y_train == 1) & correct_train xc = (y_train == -1) & correct_train plt.scatter(X_train[oc, 0], X_train[oc, 1], c='blue', marker='o', label='Train Correct (Positive)') plt.scatter(X_train[xc, 0], X_train[xc, 1], c='orange', marker='o', label='Train Correct (Negative)') # 訓練集 - 預測錯誤 #positive預測錯誤:產生藍色'x',label="Train InCorrect" #negative預測錯誤:產生橘色'x',label="Train InCorrect" incorrect_train = (y_train != y_pred_train) oi = (y_train == 1) & incorrect_train xi = (y_train == -1) & incorrect_train plt.scatter(X_train[oi, 0], X_train[oi, 1], c='blue', marker='x', label='Train Incorrect (Positive)') plt.scatter(X_train[xi, 0], X_train[xi, 1], c='orange', marker='x', label='Train Incorrect (Negative)') # 測試集 - 預測正確 #positive預測正確:產生青色'o',label="Test Correct" #negative預測正確:產生粉色'o',label="Test Correct" correct_test = (y_test == y_pred_test) oc = (y_test == 1) & correct_test xc = (y_test == -1) & correct_test plt.scatter(X_test[oc, 0], X_test[oc, 1], c='cyan', marker='o', label='Test Correct (Positive)') plt.scatter(X_test[xc, 0], X_test[xc, 1], c='pink', marker='o', label='Test Correct (Negative)') # 測試集 - 預測錯誤 #positive預測錯誤:產生青色'x',label="Test InCorrect" #negative預測錯誤:產生粉色'x',label="Test InCorrect" incorrect_test = (y_test != y_pred_test) oi = (y_test == 1) & incorrect_test xi = (y_test == -1) & incorrect_test plt.scatter(X_test[oi, 0], X_test[oi, 1], c='cyan', marker='x', label='Test Incorrect (Positive)') plt.scatter(X_test[xi, 0], X_test[xi, 1], c='pink', marker='x', label='Test Incorrect (Negative)') plt.xlabel('X Coordinate') plt.ylabel('Y Coordinate') plt.title('Model Predictions') plt.legend() plt.grid(True) plt.show() #匯出加上預測結果的分布圖 # 分割數據集, 把 N 中的 20% 做為測試 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # 創建神經網路模型 N1 = 3 model = Sequential() model.add(Dense(N1, input_dim=2, activation='tanh')) # 第一層有N1個神經元,使用tanh激活函數 model.add(Dense(2, input_dim=2, activation='tanh')) # 第二層有2個神經元使用tanh model.add(Dense(1, activation='tanh')) # 第三層有1個神經元使用tanh # 編譯模型 model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) ``` 接下來就開始訓練 # 訓練模型並記錄訓練過程 #次數為100次,每批量5個 #verbose設置為0表示不顯示訓練過程的詳細信息 #validation_data指定了驗證數據的特徵和標籤。 history = model.fit(X_train, y_train, epochs=100, batch_size=5, verbose=0, validation_data=(X_test, y_test)) # 獲取損失函數值 loss_values = history.history['loss'] <font color="#f00">**損失函數(Loss Function)是在機器學習中用來評估模型預測與實際目標之間差異的一個指標。損失函數值是這個指標的具體數值,它表示模型在一次訓練迭代中對訓練數據的預測效果有多好或多差。** **在深度學習中,損失函數通常是一個根據模型預測輸出和實際標籤計算的數值。目標是使這個損失函數的值越小越好,因為它表示模型的預測越接近實際標籤。一般而言,損失函數越小,模型的性能越好。**</font> **model.fit函數會根據訓練數據進行多次訓練迭代,每次迭代都會計算一次損失函數值。這些損失函數值被存儲在history.history['loss']中,並在後續的程式碼中用來繪製損失函數值的變化趨勢。** # 繪製損失函數圖 plt.figure(figsize=(8, 4)) plt.plot(loss_values, label='Training Loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.title('Loss Function During Training') plt.legend() plt.grid(True) plt.show() # 使用 plot_predictions 繪製訓練集和測試集中預測正確和預測錯誤的點 plot_predictions(X_train, y_train, X_test, y_test, model) # 獲取第一層的權重和偏置 weights, biases = model.layers[0].get_weights() ![image](https://hackmd.io/_uploads/S1TjOZPVa.png) ![image](https://hackmd.io/_uploads/Hk02_bP4p.png) # 生成更多隨機點進行預測 x_a, y_a = generate_circle_points(R0, R3, Np) additional_X = np.column_stack((x_a, y_a)) y_pred_additional = model.predict(additional_X) y_pred_additional = np.where(y_pred_additional.flatten() >= 0, 1, -1) # 繪製測試數據和預測結果 plt.figure(figsize=(8, 8)) plt.scatter(x_o, y_o, c='blue', marker='o', label='Class Positive') plt.scatter(x_x, y_x, c='orange', marker='o', label='Class Negative') plt.scatter(additional_X[y_pred_additional == 1, 0], additional_X[y_pred_additional == 1, 1], c='blue', marker='o', label='Predicted Positive') plt.scatter(additional_X[y_pred_additional == -1, 0], additional_X[y_pred_additional == -1, 1], c='orange', marker='o', label='Predicted Negative') # 繪製第一層的N1條直線 x_values = np.linspace(-2, 2, 100) y_values = np.linspace(-2, 2, 100) for i in range(N1): a, b = weights[:, i] c = biases[i] if abs(a) > abs(b): x_line = (-b * y_values - c) / a plt.plot(x_line, y_values, linestyle='--', label=f'Neuron {i+1} Boundary') else: y_line = (-a * x_values - c) / b plt.plot(x_values, y_line, linestyle='--', label=f'Neuron {i+1} Boundary') plt.xlabel('X') plt.ylabel('Y') plt.title('Additional Points Classification with Neuron Boundaries') plt.legend() plt.grid(True) plt.axis('equal') plt.show() 結果: ![image](https://hackmd.io/_uploads/r1UPClv4a.png) ![image](https://hackmd.io/_uploads/rJI_RevNa.png) ![image](https://hackmd.io/_uploads/SJJc0gvVa.png) 算出所有預測錯誤與預測正確的結果,並計算accuracy ```python= y_pred_train_continuous = model.predict(X_train) y_pred_train = np.where(y_pred_train_continuous.flatten() >= 0, 1, -1) TP = sum((y_pred_train==1) & (y_train == 1))#true positive 原本是1,預測出來是1 TN = sum((y_pred_train==-1) & (y_train == -1))#true negative 原本是-1,預測出來是-1 FP = sum((y_pred_train==1) & (y_train == -1))#false positive 原本是-1,預測出來是1 FN = sum((y_pred_train==-1) & (y_train == 1)) print("In training set:") TPR = TP/(TP+FN) TNR = TN/(TN+FP) FPR = 1-TNR FNR = 1-TPR PPV = TP/(TP+FP) NPV = TN/(TN+FN) F1 = 2*PPV*TPR/(PPV+TPR) ACC = (TP+TN)/(TP+TN+FP+FN) print("TP=",TP,"TN=",TN,"FP=",FP,"FN=",FN) print("Accuracy=",ACC) ``` ![image](https://hackmd.io/_uploads/rkBkfZwNT.png)