5109029036_劉建昌_HW4
### 資料前處裡
```python=
import pandas as pd
data=pd.read_csv("D:\\Ml_Python\\bank.csv",delimiter=";",header='infer') #,sep='skiprows=1')
final=data.drop(['balance','day','housing','campaign','pdays','previous','poutcome','contact','month','duration'],axis=1)
data['job'].unique()
data['marital'].unique()
data['education'].unique()
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
final.y=le.fit_transform(final.y)
final.job=le.fit_transform(final.job)
final.default=le.fit_transform(final.default)
final.loan=le.fit_transform(final.loan)
final.education=le.fit_transform(final.education)
final.marital=le.fit_transform(final.marital)
X=final.drop(['y'],axis=1) #將資料分割成 X 軸
y=final.drop(['age','loan','job','education','default','marital'],axis=1) #,'campaign','pdays','previous'將資料分割成 y 軸
```
### SVM
``` python=
#Linear SVM
from sklearn import preprocessing
Mnscaler = preprocessing.MinMaxScaler()
X=Mnscaler.fit_transform(X)
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
svc = SVC(kernel='linear')
y.values.ravel('C')
cross_val_score(svc, X, y, scoring='accuracy', cv=10).mean()
svc.fit(X_train, y_train)
svc.score(X_test, y_test)
svc = SVC(C=0.01, kernel='linear')
svc.fit(X_train, y_train)
svc.score(X_test, y_test)
#Out[8]: 0.8729281767955801
svc = SVC(C=10, kernel='linear')
svc.fit(X_train, y_train)
svc.score(X_test, y_test)
#Out[10]: 0.8729281767955801
svc.support_vectors_.shape
##Non-linear SVM
#Custom (自製的) kernel
import numpy as np
def custom_kernel(x1, x2):
return np.squeeze(np.dot(x1,x2)+1)
kernel=custom_kernel
#使用 linear classifiers
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
cross_val_score(lr, X, y, scoring='accuracy', cv=10).mean()
#Out[11]: 0.8847603977417025
from sklearn.svm import SVC
lsvc = SVC(kernel='linear')
cross_val_score(lsvc, X, y, scoring='accuracy', cv=10).mean()
#Out[8]: 0.8847603977417025
#使用 non-linear SVMs waiter.acquire() KeyboardInterrupt
from sklearn.svm import SVC
import multiprocessing
from sklearn.model_selection import GridSearchCV
param_grid = [ {
'kernel': ['linear', 'rbf', 'poly', 'sigmoid'],
'C': [0.1, 0.2, 0.4, 0.5, 1.0, 1.5, 1.8, 2.0, 2.5, 3.0]
}]
gs = GridSearchCV(estimator=SVC(), param_grid=param_grid, scoring='accuracy', cv=10, n_jobs=multiprocessing.cpu_count())
gs.fit(X, y)
gs.best_estimator_ #找最佳參數
gs.best_score_
#Out[14]: 0.8847603977417025
```
# TensorFlow
``` python=
#sklearn 也有 neural networks
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split (X, y, test_size=0.20,random_state=1234)
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(hidden_layer_sizes=(50,), activation='tanh', solver='sgd', learning_rate_init=0.1, max_iter=10000)
mlp.fit(X_train, y_train)
mlp.score(X_test, y_test)
#Out[35]: 0.8729281767955801
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(50, activation='tanh', input_shape=(6,)))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='RMSprop', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=20)
test_los, test_acc = model.evaluate(X_test, y_test)
history = model.fit(X_train, y_train, epochs=1000)
import matplotlib.pyplot as plt
plt.ylabel('loss')
plt.xlabel('epoch')
plt.plot(history.history['loss'])
from tensorflow.keras import Input
visible = Input(shape=(6,))
from tensorflow.keras.layers import Dense
hidden = Dense(50, activation='tanh')(visible)
output = Dense(1, activation='sigmoid')(hidden)
from tensorflow.keras import Model
model = Model(inputs=visible, outputs=output)
from tensorflow.keras.utils import plot_model
plot_model(model, to_file='MLP.png')
model.compile(optimizer='RMSprop', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=1000)
test_los, test_acc =model.evaluate(X_test, y_test)
print('loss:',test_los,',acc:',test_acc)
import matplotlib.pyplot as plt
plt.ylabel('loss')
plt.xlabel('epoch')
plt.plot(history.history['loss'])