# 資通電0815(https://bit.ly/3iGy5BP)
* download
* [https://support.microsoft.com/zh-tw/help/2977003/the-latest-supported-visual-c-downloads](https://support.microsoft.com/zh-tw/help/2977003/the-latest-supported-visual-c-downloads)
* Check env
* open cmd
```
python
quit()
pip list
(should see pip, setuptools)
```
* for home (windows 10)
```
@echo off
pushd "%~dp0"
dir /b %SystemRoot%\servicing\Packages\Microsoft-Windows-GroupPolicy-ClientExtensions-Package~3*.mum >List.txt
dir /b %SystemRoot%\servicing\Packages\Microsoft-Windows-GroupPolicy-ClientTools-Package~3*.mum >>List.txt
for /f %%i in ('findstr /i . List.txt 2^>nul') do dism /online /norestart /add-package:"%SystemRoot%\servicing\Packages\%%i"
pause
```
* https://www.jetbrains.com/
## prepare env
* `python -m pip install --upgrade pip`
* `pip install -U setuptools`
* `pip install virtualenv virtualenvwrapper virtualenvwrapper-win`
* `mkvirtualenv PYKT_0815`
* `pip install --upgrade tensorflow`
* `workon`
* `workon PYKT_0815`
* `deactivate`
## conda env
* `conda create -n PYKT_0815 python=3.7`
* `conda activate PYKT_0815`
* for mac
* `https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-2.1.0-cp37-cp37m-macosx_10_9_x86_64.whl`
* for pc
* `https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow_cpu-2.3.0-cp37-cp37m-win_amd64.whl`
* `pip install --upgrade YOUR_URL`
* `conda info --envs`
* `conda activate PYKT_0815`
* `conda info --envs`
## check
```
python
>>>
import tensorflow as tf
print(tf.__version__)
print(tf.constant("hello tensorflow"))
...
...
tf.Tensor(b'hello tensorflow', shape=(), dtype=string)
```
## install remaining
```
pip install numpy scipy sklearn matplotlib pandas ipython jupyter pillow jupyterlab keras
```
## for python
```
C:\Users\Admin\Envs\PYKT_0815\Scripts\python.exe
```
## enable larger VM heap
```
-Xms2048m
-Xmx2048m
```
## demo1.py
```
import os, sys
import tensorflow as tf
import sklearn
print(os.getcwd())
print(sys.executable)
print(tf.__version__)
print(sklearn.__version__)
```
### Kite
* (https://www.kite.com/download/)[https://www.kite.com/download/]
### CUDA
* https://developer.nvidia.com/cuda-toolkit-archive
* https://developer.nvidia.com/cudnn
* https://docs.nvidia.com/deploy/cuda-compatibility/index.html
### you can use google colab, too
* https://colab.research.google.com/notebooks/intro.ipynb
* https://scipy.org/
* https://www.sympy.org/en/index.html
* `1/(1+exp(-x))`
* https://www.tenlong.com.tw/products/9789865021924?list_name=srh
* https://cs.nyu.edu/~mohri/mlbook/
## lab1 (cont'd)
```
import os, sys
import tensorflow
import sklearn
import matplotlib
import pandas as pd
print(os.getcwd())
print(sys.executable)
print(tensorflow.__version__)
print(sklearn.__version__)
print(matplotlib.__version__)
print(pd.__version__)
print(tensorflow.constant("demo"))
```
## lab2
```
import matplotlib.pyplot as plt
import numpy as np
# ctrl+alt+L ==> spacing
b = 5
a = 3
x = np.arange(-10, 10, 0.1)
y = a * x + b
plt.plot(x, y, label=f"y={a}x+{b}")
plt.legend(loc=2)
plt.axhline(0, color='black')
plt.axvline(0, color='black')
plt.show()
```
## lab3
```
import matplotlib.pyplot as plt
import numpy as np
b = np.linspace(5, -5, 10)
a = 3
x = np.arange(-5, 5, 0.1)
for b1 in b:
y = a * x + b1
plt.plot(x, y, label=f"y={a}x+{b1:.1f}")
plt.legend(loc=2)
plt.axhline(0, color='black')
plt.axvline(0, color='black')
plt.show()
```
## lab4
```
import matplotlib.pyplot as plt
import numpy as np
b = 5
a = np.linspace(3, -1, 10)
x = np.arange(-5, 5, 0.1)
for a1 in a:
y = a1 * x + b
plt.plot(x, y, label=f"y={a1:.1f}x+{b}")
plt.legend(loc=2)
plt.axhline(0, color='black')
plt.axvline(0, color='black')
plt.show()
```
## lab5
```
import matplotlib.pyplot as plt
from sklearn import linear_model
regression1 = linear_model.LinearRegression()
features = [[1], [2], [3], [4]]
labels = [1, 4, 15, 18]
plt.scatter(features, labels, c='green')
plt.show()
regression1.fit(features, labels)
print(f'coef={regression1.coef_}')
print(f"intercept={regression1.intercept_}")
range1 = [0, 4]
plt.plot(range1, regression1.coef_ * range1 + regression1.intercept_, c='gray')
plt.scatter(features, labels, c='green')
plt.show()
```
## lab6
```
import matplotlib.pyplot as plt
from sklearn import linear_model
features = [[0, 1], [1, 3], [2, 8]]
labels = [1, 4, 5.5]
regression1 = linear_model.LinearRegression()
regression1.fit(features, labels)
print(f"coef={regression1.coef_}")
print(f"intercept={regression1.intercept_}")
# y= a1x1+a2x2
print(f"a1={regression1.coef_[0]}, a2={regression1.coef_[1]}")
newpoints = [[0.2, 0.8], [0.5, 0.5],
[0.8, 0.8], [2, 1], [10, 14]]
guess = regression1.predict(newpoints)
print(guess)
mapping = [1.9, 3.25, 4.3, 9., 34.5]
print(regression1.score(newpoints, mapping))
real = [2, 4, 5, 10, 35]
print(regression1.score(newpoints, real))
```
## lab7
```
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
data1 = datasets.make_regression(100, 1, noise=1)
print(type(data1))
print(data1[0].shape, data1[1].shape)
print(np.array([1, 3, 5, 7]).shape)
print(np.array([[1], [3], [5], [6]]).shape)
plt.scatter(data1[0], data1[1], c='red', marker='^')
plt.show()
regression1 = linear_model.LinearRegression()
regression1.fit(data1[0], data1[1])
print(f'coef ={regression1.coef_}, intercept={regression1.intercept_}')
print(f"regression score = {regression1.score(data1[0], data1[1])}")
range1 = [-3, 3]
plt.plot(range1, regression1.coef_ * range1 + regression1.intercept_, c='blue')
plt.scatter(data1[0], data1[1],c='red', marker='^')
plt.show()
```
## lab8
```
from sklearn import datasets
data1 = datasets.make_regression(10, 6, noise=5)
print(data1[0].shape)
print(data1[1].shape)
regressionX = data1[0]
r1 = sorted(regressionX, key=lambda t: t[0])
r2 = sorted(regressionX, key=lambda t: t[1])
r3 = sorted(regressionX, key=lambda t: t[2])
r4 = sorted(regressionX, key=lambda t: t[3])
r5 = sorted(regressionX, key=lambda t: t[4])
r6 = sorted(regressionX, key=lambda t: t[5])
print("finished")
```
## lab9
```
from sklearn import datasets
import matplotlib.pyplot as plt
data1 = datasets.make_regression(10, 6, noise=5)
for i in range(0, data1[0].shape[1]):
x = data1[0][:, i]
y = data1[1]
plt.title(f"#{i} variable")
plt.scatter(x, y)
plt.show()
```
## lab10
```
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
x = np.array([5, 15, 25, 35, 45, 55, 65]).reshape((-1, 1))
y = np.array([15, 11, 2, 8, 25, 32, 42])
plt.plot(x, y, 'r--')
plt.scatter(x, y)
plt.show()
regression1 = LinearRegression()
regression1.fit(x, y)
x_seq = np.array(np.arange(5, 55, 0.1)).reshape(-1, 1)
plt.plot(x, y, 'r--')
plt.scatter(x, y)
plt.plot(x, regression1.coef_ * x + regression1.intercept_, 'g-')
plt.show()
print(f"1st order linear regression score={regression1.score(x, y)}")
transformer = PolynomialFeatures(degree=2, include_bias=False)
transformer.fit(x)
x_ = transformer.transform(x)
print(f"x shape={x.shape}, x_ shape={x_.shape}")
print(x)
print(x_)
regression2 = LinearRegression().fit(x_, y)
print(f"2nd order linear regression score={regression2.score(x_, y)}")
```
# https://bit.ly/3gkXI9Y
# lab11
```python
import numpy as np
a = np.zeros((1, 2))
b = np.ones((2, 1))
print(a)
print(b)
#
c = np.zeros((10, 2))
d = c.T
e = d.view()
print(c.shape, d.shape, e.shape)
f = np.reshape(d, (5, 4))
g = np.reshape(d, (20,)) # 1-dim
h = np.reshape(d, (20, -1)) # (20,1) 20 row, 1 col 2-dim
i = np.reshape(d, (-1, 20)) # (1,20) 1 row, 20 col
# 2-dim
print(f.shape, g.shape, h.shape, i.shape)
```
## lab12
```python
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = a.view()
c = a
d = a.copy()
print(a, b, c, d, sep='\n')
a[0][0] = 100
print("stage2", a, b, c, d, sep='\n')
b.shape = (4, -1)
print("stage3", a, b, c, d, sep='\n')
c.shape = (1, 4)
print("stage4", a, b, c, d, sep='\n')
```
## lab13
```python
import pandas as pd
df = pd.DataFrame({'Location':
{0: 'Taipei',
1: 'HsinChu',
2: 'Taichung'},
'Java': {0: 5,
1: 10,
2: 15},
'Python': {0: 2, 1: 4, 2: 6}})
print(df)
```
## lab14
```python
import pandas as pd
df = pd.DataFrame({'Location':
{0: 'Taipei',
1: 'HsinChu',
2: 'Taichung'},
'Java': {0: 5,
1: 10,
2: 15},
'Python': {0: 2, 1: 4, 2: 6}})
print(df)
print(pd.melt(df, id_vars=['Location'],
value_vars=['Java']))
print(pd.melt(df, id_vars=['Location'],
value_vars=['Java','Python']))
print('by default, select all')
print(pd.melt(df, id_vars=['Location']))
```
## lab15
```
from sklearn import linear_model, datasets
import numpy as np
diabetes = datasets.load_diabetes()
print(type(diabetes))
print(dir(diabetes))
print(diabetes.DESCR)
print(diabetes.data.shape) # feature
print(diabetes.target.shape) # label
dataForTest = -50
data_train = diabetes.data[:dataForTest]
target_train = diabetes.target[:dataForTest]
print(f"features dim={data_train.shape}")
print(f"label dim={target_train.shape}")
data_test = diabetes.data[dataForTest:]
target_test = diabetes.target[dataForTest:]
print(f"testing feature dim={data_test.shape}")
print(f"testing label dim={target_test.shape}")
regression1 = linear_model.LinearRegression(normalize=True)
regression1.fit(data_train, target_train)
print(regression1.coef_)
print(regression1.intercept_)
for i, v in enumerate(regression1.coef_):
print('feature:%0d, score:%.5f' % (i, v))
print("score=%.2f" % regression1.score(data_test, target_test))
for i in range(dataForTest, 0):
dataArray = np.array(data_test[i]).reshape(1, -1)
print("predict={:.1f},actual={}".format(
regression1.predict(dataArray)[0],
target_test[i]
))
mean_square_error = np.mean((regression1.predict(data_test) - target_test) ** 2)
mean_absolute_error = mean_square_error ** 0.5
print(mean_square_error)
print(mean_absolute_error)
```
```
git --version
```
```
https://git-scm.com/
```
```
http://github.com/
```
## lab15
```
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression
from matplotlib import pyplot
x, y = make_regression(n_samples=100, n_features=10,
n_informative=5)
model1 = LinearRegression()
model1.fit(x, y)
# print(x)
importance = model1.coef_
print(importance)
for i, v in enumerate(importance):
print("feature:%0d, score:%.5f" % (i, v))
pyplot.bar([x for x in range(len(importance))], importance)
pyplot.show()
```
## lab16
```
import matplotlib.pyplot as plt
from sklearn import datasets
iris = datasets.load_iris()
print(type(iris), dir(iris))
features = iris.feature_names
X = iris.data
species = iris.target
print(features)
counter = 1
for i in range(0, 4):
for j in range(i + 1, 4):
plt.figure(counter, figsize=(8, 6))
counter += 1
xData = X[:, i]
yData = X[:, j]
x_min, x_max = xData.min() - 0.5, xData.max() + 0.5
y_min, y_max = yData.min() - 0.5, yData.max() + 0.5
plt.clf()
plt.scatter(xData, yData, c=species, cmap=plt.cm.Paired)
plt.xlabel(features[i])
plt.ylabel(features[j])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.show()
```
## lab17
```
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(-10, 10, 0.1)
f = 1 / (1 + np.exp(-x))
plt.xlabel(x)
plt.ylabel('g(z)')
plt.plot(x, f)
plt.grid()
plt.show()
```
## lab18
```
import matplotlib.pyplot as plt
import numpy as np
W = [0.5, 1.0, 2.0]
L = ['w=0.5', 'w=1.0', 'w=2.0']
x = np.arange(-10, 10, 0.1)
for w, l in zip(W, L):
f = 1 / (1 + np.exp(-(w * x + 0)))
plt.plot(x, f, label=l)
plt.legend(loc=2)
plt.show()
```
## lab19
```
import matplotlib.pyplot as plt
import numpy as np
w = 3.0
B = [-8, 0, 8]
L = ['b=-8', 'b=0', 'b=8']
x = np.arange(-10, 10, 0.1)
for b, l in zip(B, L):
f = 1 / (1 + np.exp(-(w * x + b)))
plt.plot(x, f, label=l)
plt.legend(loc=2)
plt.show()
```
## lab20
```
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
iris = datasets.load_iris()
print(list(iris.keys()))
print(iris.feature_names[3])
X = iris["data"][:, 3:]
print(iris.target_names[2])
y = (iris["target"] == 2).astype(np.int)
logistic_regression1 = LogisticRegression()
logistic_regression1.fit(X, y)
print(logistic_regression1.coef_, logistic_regression1.intercept_)
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_prob = logistic_regression1.predict_proba(X_new)
# get parameters from logistic regression
a = logistic_regression1.coef_[0]
b = logistic_regression1.intercept_
my_plot = 1 / (1 + np.exp(-(a * X_new + b)))
plt.plot(X, y, "g.")
plt.plot(X_new, y_prob[:, 1], "r-", label="Iris_Virginica")
plt.plot(X_new, y_prob[:, 0], "b--", label='Not Iris_Virginica')
plt.plot(X_new, my_plot, label="formula")
plt.grid()
plt.legend()
plt.xlabel("petal width")
plt.ylabel("probability")
plt.show()
```
## lab21
```
import sklearn.datasets as datasets
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
iris = datasets.load_iris()
data = iris.data
target = iris.target
logisticRegression1 = LogisticRegression()
scores = model_selection.cross_val_score(logisticRegression1,
data,
target,
cv=5)
print(scores)
print(scores.mean(), scores.std())
```
## lab22_svm
```
import numpy as np
from sklearn.svm import SVC
X = np.array([[-1, -1], [-2, -1], [-3, -2],
[1, 1], [2, 1], [3, 2]])
y = np.array([1, 1, 1, 2, 2, 2])
classifier1 = SVC()
classifier1.fit(X, y)
print(classifier1)
print("predict=", classifier1.predict([[1, 0], [0, 1],
[0.5, 0.5], [0.5, -0.5],
[-1, 0], [0, -1]]))
```
## lab23
```
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, svm
from sklearn.decomposition import PCA
iris = datasets.load_iris()
pca = PCA(n_components=2)
data = pca.fit(iris.data).transform(iris.data)
print(data.shape)
dataMax = data.max(axis=0) + 1
dataMin = data.min(axis=0) - 1
n = 1000
X, Y = np.meshgrid(np.linspace(dataMin[0], dataMax[0], n),
np.linspace(dataMin[1], dataMax[1], n))
# C=1, 10
# kernel='linear' # 0.9667,9733
# kernel='rbf' # 0.96, 9667
# kernel='poly' # 0.9467, 0.96
# kernel='sigmoid' # 0.86, 0.82
svc = svm.SVC(C=10, kernel='sigmoid')
svc.fit(data, iris.target)
Z = svc.predict(np.c_[X.ravel(), Y.ravel()])
plt.contour(X, Y, Z.reshape(X.shape), colors='#000000')
for c, s in zip([0, 1, 2], ['o', '^', '*']):
d = data[iris.target == c]
plt.scatter(d[:, 0], d[:, 1], c='k', marker=s)
print(f"score={svc.score(data, iris.target):.4f}")
plt.show()
```
# lab24
```
import sklearn.datasets as datasets
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn import svm
iris = datasets.load_iris()
data = iris.data
target = iris.target
logisticRegression1 = LogisticRegression()
svc1 = svm.SVC()
estimators = [logisticRegression1, svc1]
for e in estimators:
scores = model_selection.cross_val_score(e,
data,
target,
cv=5)
print(e)
print(scores)
print(scores.mean(), scores.std())
```
## download graphviz
```
https://www2.graphviz.org/Packages/stable/windows/10/msbuild/Release/Win32/
```
### rename to graphviz
```
udner C:\graphviz\bin>
execute gvgen
set path
```
C:\temp_phw\PYKT_4days
```
git status
```
```
git remote add origin https://github.com/markho/PYKT0822_4days.git
git push -u origin master
```
### lab25
```
from sklearn import tree
from matplotlib import pyplot
X = [[0, 0], [1, 1]]
Y = [0, 1]
classifier1 = tree.DecisionTreeClassifier()
classifier1.fit(X, Y)
print(classifier1.predict([[2, 2],
[-2, -2], [2, -2], [-2, 2]]))
tree.plot_tree(classifier1)
pyplot.show()
```
## demo26
```
from subprocess import check_call
import matplotlib.pyplot as plt
from sklearn import tree
from sklearn.tree import export_graphviz
X = [[0, 0], [1, 1], [0, 1], [1, 0]]
Y = [0, 0, 1, 1]
colors = ['red', 'green']
marker = ['o', 'd']
index = 0
while index < len(X):
type = Y[index]
plt.scatter(X[index][0], X[index][1], c=colors[type],
marker=marker[type])
index += 1
plt.show()
classifier1 = tree.DecisionTreeClassifier()
classifier1.fit(X, Y)
print(classifier1.tree_)
# manual make a directory output
export_graphviz(classifier1, out_file='output/lab26.dot',
filled=True, rounded=True,
special_characters=True)
# -Tpng, -Tpdf
check_call(['dot', '-Tpng', 'output/lab26.dot',
'-o', 'output/lab26.png'])
check_call(['dot', '-Tpdf', 'output/lab26.dot',
'-o', 'output/lab26.pdf'])
check_call(['dot', '-Tsvg', 'output/lab26.dot',
'-o', 'output/lab26.svg'])
```
```
dot -Tpng output/lab26.dot -o output/t
est.png
```
```
jupyter-notebook
```
```python
import pandas as pd
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn import datasets
iris = datasets.load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
y = iris.target
df.head()
```
```python
from sklearn import tree
import graphviz as gv
tree1 = DecisionTreeClassifier()
tree1.fit(df, y)
dot_data = tree.export_graphviz(tree1, feature_names=iris.feature_names,
out_file=None, filled=True, rounded=True,
special_characters=True)
graph_output = gv.Source(dot_data)
graph_output
#dot_data
```
```
tree.plot_tree(tree1)
```
## lab27
```
from sklearn.cluster import KMeans
import numpy as np
X = np.array([[1, 0], [0, 1], [1, 2], [1, 4], [1, 8],
[4, 2], [4, 4], [4, 0], [4, 6], [4, 7]])
kmeans = KMeans(n_clusters=2).fit(X)
print(kmeans.labels_)
print(kmeans.predict([[0, 0], [5, 5], [5, 0], [0, 5]]))
print(kmeans.cluster_centers_)
print(kmeans.inertia_)
```
## lab28
```
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
X = np.r_[np.random.randn(50, 2) + [2, 2],
np.random.randn(50, 2) + [0, -2],
np.random.randn(50, 2) + [-2, 2]]
[plt.scatter(e[0], e[1], c='black', s=7) for e in X]
k = 3
C_x = np.random.uniform(np.min(X[:, 0]),
np.max(X[:, 0]), size=k)
C_y = np.random.uniform(np.min(X[:, 1]),
np.max(X[:, 1]), size=k)
C = np.array(list(zip(C_x, C_y)), dtype=np.float32)
plt.scatter(C_x, C_y, marker='*', s=200, c='#0599FF')
plt.show()
def dist(a, b, ax=1):
return np.linalg.norm(a - b, axis=ax)
C_old = np.zeros(C.shape)
clusters = np.zeros(len(X))
delta = dist(C, C_old, None)
print(f"delta={delta}")
def plot_kmean(current_cluster, delta):
colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
fig, ax = plt.subplots()
for index1 in range(k):
pts = np.array([X[j] for j in range(len(X)) if current_cluster[j] == index1])
ax.scatter(pts[:, 0], pts[:, 1], s=7, c=colors[index1])
ax.scatter(C[:, 0], C[:, 1], marker='*', s=200, c='#0599FF')
plt.title(f'delta={delta:.4f}')
plt.show()
while delta != 0:
for i in range(len(X)):
distances = dist(X[i], C)
cluster = np.argmin(distances)
clusters[i] = cluster
C_old = deepcopy(C)
for i in range(k):
points = [X[j] for j in range(len(X)) if clusters[j] == i]
C[i] = np.mean(points, axis=0)
delta = dist(C, C_old, None)
plot_kmean(clusters, delta)
```
## lab29
```
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
X = np.r_[np.random.randn(2000, 2) + [2, 2],
np.random.randn(2000, 2) + [0, -2],
np.random.randn(2000, 2) + [-2, 2]]
kmeans = KMeans(n_clusters=3,
n_init=10,
max_iter=300)
kmeans.fit(X)
print(kmeans.cluster_centers_)
print(kmeans.inertia_)
colors = ['c', 'm', 'y', 'k']
markers = ['o', 'v', '*', 'x']
for i in range(3):
dataX = X[kmeans.labels_ == i]
plt.scatter(dataX[:, 0], dataX[:, 1],
c=colors[i], marker=markers[i])
print(f'group{i} has {dataX.size}')
plt.scatter(kmeans.cluster_centers_[:, 0],
kmeans.cluster_centers_[:, 1],
marker='*',
s=200,
c='#0599FF')
plt.show()
```
## lab30
```
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
X = np.r_[np.random.randn(2000, 2) + [2, 2],
np.random.randn(2000, 2) + [0, -2],
np.random.randn(2000, 2) + [-2, 2]]
interias = []
for k in range(1, 10):
kmeans = KMeans(n_clusters=k)
kmeans.fit(X)
interias.append(kmeans.inertia_)
print(interias)
plt.plot(range(1,10), interias)
plt.show()
```
## Introduction to Algorithms
## lab31
```
import numpy as np
from sklearn.neighbors import NearestNeighbors
X = np.array([[-1, -1], [-2, -1], [-3, -2],
[1, 1], [2, 1], [3, 2]])
shortestNeighbors = NearestNeighbors(n_neighbors=2,
algorithm='auto').fit(X)
distances, indices = \
shortestNeighbors.kneighbors(X, return_distance=True)
print(distances)
print(indices)
print(shortestNeighbors.kneighbors_graph(X).toarray())
```
## lab32
```
import numpy as np
from sklearn.naive_bayes import GaussianNB
X = np.array([[-1, -1], [-2, -1], [-3, -2],
[1, 1], [2, 1], [3, 2]])
Y = [1, 1, 1, 2, 2, 2]
classifier1 = GaussianNB()
classifier1.fit(X, Y)
print(classifier1.predict([[-0.8, -0.8], [2.1, 2.5]]))
classifier2 = GaussianNB()
classifier2.partial_fit(X, Y, np.unique(Y))
print(classifier2.predict([[-0.4, -0.4]]))
classifier2.partial_fit([[-0.5, -0.5]], [2])
print(classifier2.predict([[-0.4, -4]]))
```
## lab33
```python=3.6
import matplotlib.pyplot as plt
import numpy as np
from sklearn.naive_bayes import GaussianNB
X = np.array([[-1, -1], [-2, -1], [-3, -2],
[1, 1], [2, 1], [3, 2]])
#Y = np.array([1, 1, 1, 2, 2, 2])
#Y = np.array([1, 2, 2, 1, 2, 2])
Y = np.array([1, 1, 2, 1, 1, 2])
x_min, x_max = -4, 4
y_min, y_max = -4, 4
h = .025
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
classifier1 = GaussianNB()
classifier1.fit(X, Y)
Z = classifier1.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.pcolormesh(xx, yy, Z)
XB = []
YB = []
XR = []
YR = []
index = 0
for index in range(0, len(Y)):
if Y[index] == 1:
XB.append(X[index, 0])
YB.append(X[index, 1])
elif Y[index] == 2:
XR.append(X[index, 0])
YR.append(X[index, 1])
plt.scatter(XB, YB, color='b',label='BLUE')
plt.scatter(XR, YR, color='r',label='RED')
plt.legend()
plt.show()
plt.show()
```
## lab34
```python=3.6
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
iris = datasets.load_iris()
X = iris.data
species = iris.target
fig = plt.figure(1, figsize=(8, 8))
ax = Axes3D(fig, elev=150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2],
c=species, cmap=plt.cm.Paired)
plt.show()
```
## lab35
```
from numpy import array
from sklearn.decomposition import PCA
A = array([[1, 2, 3], [3, 4, 5], [5, 6, 7], [7, 8, 9]])
print(A)
pca = PCA(2)
pca.fit(A)
print("components:", pca.components_)
print("variance", pca.explained_variance_)
B = pca.transform(A)
print(B)
```
## lab36
```python=3.6
from numpy import array, cov, mean
from numpy.linalg import eig
A = array([[1, 2, 3], [3, 4, 5], [5, 6, 7], [7, 8, 9]])
print(A)
M = mean(A.T, axis=1)
print(M)
M2 = mean(A.T)
print(M2)
M3 = mean(A, axis=1)
print(M3)
C = A - M
print(C)
V = cov(C.T)
print(V)
values, vectors = eig(V)
print("vectors", vectors)
print("values", values)
P = vectors.T.dot(C.T)
print("proect", P.T)
```
## lab37
```
import tensorflow as tf
hello1 = tf.constant("Hello, Tensorflow!")
print(hello1)
```
## lab38
```
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
hello1 = tf.constant('Hello Tensorflow!')
session1 = tf.compat.v1.Session()
print(hello1)
print(session1.run(hello1))
```
## lab39
```
import tensorflow as tf
import numpy as np
print(tf.__version__)
a = np.array([5, 3, 8])
b = np.array([3, -1, 2])
c = np.add(a, b)
print(c)
a2 = tf.constant([5, 3, 8])
b2 = tf.constant([3, -1, 2])
c2 = tf.add(a2, b2)
c3 = tf.add(a, b)
print(c2)
print(c3)
```
## lab40
```
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
a = tf.compat.v1.placeholder(dtype=tf.int32, shape=(None,))
b = tf.compat.v1.placeholder(dtype=tf.int32, shape=(None,))
c = tf.add(a, b)
with tf.compat.v1.Session() as session:
result = session.run(c, feed_dict={
a: [3, 4, 5],
b: [-1, -2, -3]
})
print(result)
```
## lab41
```
import tensorflow as tf
@tf.function
def add(p, q):
return tf.add(p, q)
print(add([3, 4, 5], [-1, -2, -3]).numpy())
```
## lab42
```
import tensorflow as tf
from datetime import datetime
@tf.function
def computeArea(sides):
a = sides[:, 0]
b = sides[:, 1]
c = sides[:, 2]
s = (tf.add(tf.add(a, b), c)) / 2
areaSquare = s * (s - a) * (s - b) * (s - c)
return areaSquare ** 0.5
stamp = datetime.now().strftime("%Y%m%d-%H%M")
logdir = 'logs/%s' % stamp
writer = tf.summary.create_file_writer(logdir)
tf.summary.trace_on(graph=True, profiler=True)
print(computeArea(tf.constant([[5.0, 3.0, 4.0],
[6.0, 6.0, 6.0]])))
with writer.as_default():
tf.summary.trace_export(name='lab42',
step=0,
profiler_outdir=logdir)
tf.summary.trace_off()
```
```
cd C:\temp_phw\PYKT_4days\logs
tensorboard --logdir 20200829-121
```
```
http://localhost:6006/#graphs&run=.
```
## lab43
```
import tensorflow as tf
vectors = [3, -1, 2.4, 5.9, 0.0001, 0.51, 0.49, -0.0008]
result1 = tf.nn.relu(vectors)
print("after apply relu, result1=", result1)
result2 = tf.nn.sigmoid(vectors)
print("after apply relu, result2=", result2)
```
## lab44
get
https://www.kaggle.com/uciml/pima-indians-diabetes-database
```
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
import os
dataset1 = np.loadtxt("data/diabetes.csv",
delimiter=",",
skiprows=1)
print(dataset1.shape)
inputList = dataset1[:, 0:8]
resultList = dataset1[:, 8]
model = Sequential()
model.add(Dense(14, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.summary()
model.fit(inputList, resultList, epochs=200, batch_size=20,
validation_split=0.2)
scores = model.evaluate(inputList, resultList)
print(model.metrics_names)
print(scores)
print("{}:{}".format(model.metrics_names[0], scores[0]))
print("{}:{}".format(model.metrics_names[1], scores[1]))
```
## demo45
```
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
import os
from keras.models import save_model, load_model
MODEL_LOC = 'models/lab45'
dataset1 = np.loadtxt("data/diabetes.csv",
delimiter=",",
skiprows=1)
print(dataset1.shape)
inputList = dataset1[:, 0:8]
resultList = dataset1[:, 8]
def createModel():
# global model
model = Sequential()
model.add(Dense(14, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.summary()
return model
model = createModel()
model.fit(inputList, resultList, epochs=200, batch_size=20)
save_model(model, MODEL_LOC)
scores = model.evaluate(inputList, resultList)
print(model.metrics_names)
print(scores)
print("{}:{}".format(model.metrics_names[0], scores[0]))
print("{}:{}".format(model.metrics_names[1], scores[1]))
model2 = createModel()
scores2 = model2.evaluate(inputList, resultList)
print("{}:{}".format(model2.metrics_names[0], scores2[0]))
print("{}:{}".format(model2.metrics_names[1], scores2[1]))
model3 = load_model(MODEL_LOC)
scores3 = model3.evaluate(inputList, resultList)
print("{}:{}".format(model3.metrics_names[0], scores3[0]))
print("{}:{}".format(model3.metrics_names[1], scores3[1]))
```
## lab44(cont'd)
```
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
import os
import matplotlib.pyplot as plt
dataset1 = np.loadtxt("data/diabetes.csv",
delimiter=",",
skiprows=1)
print(dataset1.shape)
inputList = dataset1[:, 0:8]
resultList = dataset1[:, 8]
model = Sequential()
model.add(Dense(14, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.summary()
history = model.fit(inputList, resultList, epochs=200, batch_size=20,
validation_split=0.1)
scores = model.evaluate(inputList, resultList)
print(model.metrics_names)
print(scores)
print("{}:{}".format(model.metrics_names[0], scores[0]))
print("{}:{}".format(model.metrics_names[1], scores[1]))
plt.plot(history.history['val_loss'])
plt.plot(history.history['loss'])
plt.legend(['val_loss','loss'])
plt.show()
plt.plot(history.history['val_accuracy'])
plt.plot(history.history['accuracy'])
plt.legend(['val_accuracy','accuracy'])
plt.show()
```
## lab46
```
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
dataset1 = np.loadtxt("data/diabetes.csv",
delimiter=",",
skiprows=1)
print(dataset1.shape)
inputList = dataset1[:, 0:8]
resultList = dataset1[:, 8]
feature_train, feature_test, label_train, label_test \
= train_test_split(inputList, resultList, test_size=0.2, stratify=resultList)
for data in [resultList, label_train, label_test]:
classes, counts = np.unique(data, return_counts=True)
for cl, co in zip(classes, counts):
print(f"{int(cl)}==>{co / sum(counts)}")
model = Sequential()
model.add(Dense(14, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.summary()
history = model.fit(feature_train, label_train, epochs=200, batch_size=20,
validation_data=(feature_test, label_test))
plt.plot(history.history['val_loss'])
plt.plot(history.history['loss'])
plt.legend(['val_loss', 'loss'])
plt.show()
plt.plot(history.history['val_accuracy'])
plt.plot(history.history['accuracy'])
plt.legend(['val_accuracy', 'accuracy'])
plt.show()
```
## 47
```
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
dataset1 = np.loadtxt("data/diabetes.csv",
delimiter=",",
skiprows=1)
print(dataset1.shape)
inputList = dataset1[:, 0:8]
resultList = dataset1[:, 8]
fiveFold = StratifiedKFold(n_splits=5, shuffle=True)
totalScore = []
for train, test in fiveFold.split(inputList, resultList):
model = Sequential()
model.add(Dense(14, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.summary()
history = model.fit(inputList[train], resultList[train],
epochs=200, batch_size=20,
validation_split=0.1,verbose=0)
scores = model.evaluate(inputList[test], resultList[test])
print(model.metrics_names)
print(scores)
print("{}:{}".format(model.metrics_names[0], scores[0]))
print("{}:{}".format(model.metrics_names[1], scores[1]))
totalScore.append(scores[1] * 100)
print(totalScore)
print(np.mean(totalScore), np.std(totalScore))
```
## lab48
```
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
import os
import matplotlib.pyplot as plt
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score
dataset1 = np.loadtxt("data/diabetes.csv",
delimiter=",",
skiprows=1)
print(dataset1.shape)
inputList = dataset1[:, 0:8]
resultList = dataset1[:, 8]
def createModel():
# global model
model = Sequential()
model.add(Dense(14, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.summary()
return model
model2 = KerasClassifier(build_fn=createModel,
epochs=200, batch_size=20, verbose=0)
fiveFold = StratifiedKFold(n_splits=5, shuffle=True)
result = cross_val_score(model2, inputList, resultList, cv=fiveFold)
print(result)
print(result.mean(), result.std())
```
## lab49
```
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
import os
import matplotlib.pyplot as plt
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import \
StratifiedKFold, cross_val_score, GridSearchCV
dataset1 = np.loadtxt("data/diabetes.csv",
delimiter=",",
skiprows=1)
print(dataset1.shape)
inputList = dataset1[:, 0:8]
resultList = dataset1[:, 8]
def createModel(optimizer='adam', init='uniform'):
# global model
model = Sequential()
model.add(Dense(14, kernel_initializer=init,
input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model.summary()
return model
model2 = KerasClassifier(build_fn=createModel, verbose=0)
optimizers = ['adam', 'rmsprop', 'sgd']
inits = ['normal', 'uniform']
epochs = [50, 100, 150]
batches = [5, 10, 15]
param_grid = dict(optimizer=optimizers,
epochs=epochs,
batch_size=batches,
init=inits)
grid = GridSearchCV(estimator=model2, param_grid=param_grid)
grid_result = grid.fit(inputList, resultList)
```
## jupyter-notebook
```
Lab50
```
```
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
import os
import matplotlib.pyplot as plt
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import \
StratifiedKFold, cross_val_score, GridSearchCV
dataset1 = np.loadtxt("data/diabetes.csv",
delimiter=",",
skiprows=1)
print(dataset1.shape)
inputList = dataset1[:, 0:8]
resultList = dataset1[:, 8]
def createModel(optimizer='adam', init='uniform'):
# global model
model = Sequential()
model.add(Dense(14, kernel_initializer=init,
input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model.summary()
return model
model2 = KerasClassifier(build_fn=createModel, verbose=0)
optimizers = ['adam', 'rmsprop', 'sgd']
inits = ['normal', 'uniform']
epochs = [50, 100, 150]
batches = [5, 10, 15]
param_grid = dict(optimizer=optimizers,
epochs=epochs,
batch_size=batches,
init=inits)
grid = GridSearchCV(estimator=model2, param_grid=param_grid)
grid_result = grid.fit(inputList, resultList)
```
## https://archive.ics.uci.edu/ml/datasets/iris
## lab52
```
from pandas import read_csv
import numpy as np
df1 = read_csv("./data/iris.data", header=None)
dataset = df1.values
features = dataset[:, 0:4].astype(float)
labels = dataset[:, 4]
print(features.shape, labels.shape)
print(np.unique(labels, return_counts=True))
```
## 50 result
```
grid_result.best_score_
```
```
grid_result.best_params_
```
```
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f(%f with %r)"%(mean, std, param))
```
## lab52
```
from pandas import read_csv
import numpy as np
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
from keras import Sequential
from keras.layers import Dense
df1 = read_csv("./data/iris.data", header=None)
dataset = df1.values
features = dataset[:, 0:4].astype(float)
labels = dataset[:, 4]
print(features.shape, labels.shape)
print(np.unique(labels, return_counts=True))
encoder = LabelEncoder()
encoder.fit(labels)
encoded_Y = encoder.transform(labels)
print(np.unique(encoded_Y, return_counts=True))
dummy_y = np_utils.to_categorical(encoded_Y)
print(dummy_y[:10], dummy_y[50:60], dummy_y[100:110])
def baseline_model():
model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
m1 = baseline_model()
print(m1.summary())
```
## lab53
```
import tensorflow as tf
import numpy as np
import tensorflow as tf
scores = [3.0, 1.0, 2.0]
def my_softmax(x):
ax = np.array(x)
return np.exp(ax) / np.sum(np.exp(ax), axis=0)
def normal_ratio(x):
ax = np.array(x)
return ax / np.sum(ax, axis=0)
print(my_softmax(scores), normal_ratio(scores))
print(tf.nn.softmax(scores).numpy())
```
## lab52
```
from pandas import read_csv
import numpy as np
from keras.utils import np_utils
from sklearn.model_selection import KFold, cross_val_score
from sklearn.preprocessing import LabelEncoder
from keras import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
df1 = read_csv("./data/iris.data", header=None)
dataset = df1.values
features = dataset[:, 0:4].astype(float)
labels = dataset[:, 4]
print(features.shape, labels.shape)
print(np.unique(labels, return_counts=True))
encoder = LabelEncoder()
encoder.fit(labels)
encoded_Y = encoder.transform(labels)
print(np.unique(encoded_Y, return_counts=True))
dummy_y = np_utils.to_categorical(encoded_Y)
print(dummy_y[:10], dummy_y[50:60], dummy_y[100:110])
def baseline_model():
model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
estimator = KerasClassifier(build_fn=baseline_model,
epochs=200,
batch_size=10,
verbose=1)
kfold = KFold(n_splits=3, shuffle=True)
results = cross_val_score(estimator, features,
dummy_y, cv=kfold)
print(results)
```
## lab54
```
import numpy
from keras.datasets import imdb
from matplotlib import pyplot as plt
(X_train, y_train), (X_test, y_test) = imdb.load_data()
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
X = numpy.concatenate((X_train, X_test), axis=0)
y = numpy.concatenate((y_train, y_test), axis=0)
print(numpy.unique(y, return_counts=True))
print(len((numpy.unique(numpy.hstack(X)))))
lengths = [len(x) for x in X]
print(numpy.mean(lengths), numpy.std(lengths))
plt.subplot(121)
plt.boxplot(lengths)
plt.subplot(122)
plt.hist(lengths)
plt.show()
```
## lab55
```
import numpy as np
from keras.datasets import imdb
(train_data, train_labels), (test_data, test_labels) = \
imdb.load_data(num_words=10000)
print(train_data[0])
print(max([max(sequence) for sequence in train_data]))
word_to_digit_index = imdb.get_word_index()
print(type(word_to_digit_index))
# for index in word_to_digit_index:
# print(index, word_to_digit_index[index])
reverse_index = dict([(v, k) for k, v
in word_to_digit_index.items()])
def decodeIMDB(x):
return ' '.join([reverse_index.get(i - 3, '?')
for i in train_data[x]])
for i in range(5):
print(train_labels[i])
print(decodeIMDB(i))
```
# Sep-05
## lab55 (cont'd)
```
import numpy as np
from keras import models, layers
from keras.datasets import imdb
(train_data, train_labels), (test_data, test_labels) = \
imdb.load_data(num_words=10000)
print(train_data[0])
print(max([max(sequence) for sequence in train_data]))
word_to_digit_index = imdb.get_word_index()
print(type(word_to_digit_index))
# for index in word_to_digit_index:
# print(index, word_to_digit_index[index])
reverse_index = dict([(v, k) for k, v
in word_to_digit_index.items()])
def decodeIMDB(x):
return ' '.join([reverse_index.get(i - 3, '?')
for i in train_data[x]])
for i in range(5):
print(train_labels[i])
print(decodeIMDB(i))
# 05-Sep-2020
def vectorize_sequence(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, seq in enumerate(sequences):
results[i, seq] = 1
return results
x_train = vectorize_sequence(train_data)
x_test = vectorize_sequence(test_data)
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
model = models.Sequential()
model.add(layers.Dense(32, activation='relu',
input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
print(model.summary())
history = model.fit(x_train, y_train,
epochs=30,
batch_size=500,
validation_data=(x_test, y_test))
import matplotlib.pyplot as plt
history_dict = history.history
```
## lab56
```
y = 1
def calculate(x):
for i in range(0, 1000000):
x += 0.0000001
x -= 0.1
return x
print(f"result={calculate(y):.6f}")
```
## lab57_mnist
```python=
import tensorflow as tf
import numpy as np
import keras
import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = keras.datasets.mnist.load_data()
def plot_image(image, label):
plt.imshow(image.reshape(28, 28), cmap='binary')
plt.title(label)
plt.show()
# for i in range(0, 10):
# label = f"the image marked as:{train_labels[i]}"
# plot_image(train_images[i], label)
FLATTEN_DIM = 28 * 28
TRAINING_SIZE = len(train_images)
TEST_SIZE = len(test_images)
trainImages = np.reshape(train_images, (TRAINING_SIZE, FLATTEN_DIM))
testImages = np.reshape(test_images, (TEST_SIZE, FLATTEN_DIM))
print(type(trainImages[0]))
trainImages = trainImages.astype(np.float32)
testImages = testImages.astype(np.float32)
trainImages /= 255
testImages /= 255
NUM_DIGITS = 10
print(train_labels[:10])
trainImages = keras.utils.to_categorical(trainImages, NUM_DIGITS)
testImages = keras.utils.to_categorical(testImages, NUM_DIGITS)
```
## lab58_1_hot_encoding
```python=
from tensorflow.keras import utils
origs = [4, 7, 13, 5, 8]
NUM_DIGITS = 20
for o in origs:
converted = utils.to_categorical(o, NUM_DIGITS)
print(f"{o}==>{converted}")
```
## lab57(cont'd)
```python=
import tensorflow as tf
import numpy as np
import keras
import matplotlib.pyplot as plt
from keras import Sequential
from keras.layers import Dense
from keras.callbacks import TensorBoard
(train_images, train_labels), (test_images, test_labels) = keras.datasets.mnist.load_data()
def plot_image(image, label):
plt.imshow(image.reshape(28, 28), cmap='binary')
plt.title(label)
plt.show()
# for i in range(0, 10):
# label = f"the image marked as:{train_labels[i]}"
# plot_image(train_images[i], label)
FLATTEN_DIM = 28 * 28
TRAINING_SIZE = len(train_images)
TEST_SIZE = len(test_images)
trainImages = np.reshape(train_images, (TRAINING_SIZE, FLATTEN_DIM))
testImages = np.reshape(test_images, (TEST_SIZE, FLATTEN_DIM))
print(type(trainImages[0]))
trainImages = trainImages.astype(np.float32)
testImages = testImages.astype(np.float32)
trainImages /= 255
testImages /= 255
NUM_DIGITS = 10
print(train_labels[:10])
trainLabels = keras.utils.to_categorical(train_labels, NUM_DIGITS)
testLabels = keras.utils.to_categorical(test_labels, NUM_DIGITS)
model = Sequential()
model.add(Dense(128, activation=tf.nn.relu,
input_shape=(FLATTEN_DIM,)))
model.add(Dense(10, activation=tf.nn.softmax))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
print(model.summary())
tbCallbacks = TensorBoard(log_dir='logs',
histogram_freq=0,
write_graph=True,
write_images=True)
model.fit(trainImages, trainLabels, epochs=20,
callbacks=[tbCallbacks])
predictLabels = model.predict_classes(testImages)
print("result=", predictLabels[:10])
loss, accuracy = model.evaluate(testImages, testLabels)
print("loss={}, accuracy={}".format(loss, accuracy))
```
```
tensorboard --logdir=logs
```
```
model.fit(trainImages, trainLabels, epochs=20,
callbacks=[tbCallbacks],
validation_data=(testImages, testLabels))
```
```
jupyter-notebook
```
```
import tensorflow as tf
import numpy as np
import keras
import matplotlib.pyplot as plt
from keras import Sequential
from keras.layers import Dense
from keras.callbacks import TensorBoard
(train_images, train_labels), (test_images, test_labels) = keras.datasets.mnist.load_data()
def plot_image(image, label):
plt.imshow(image.reshape(28, 28), cmap='binary')
plt.title(label)
plt.show()
# for i in range(0, 10):
# label = f"the image marked as:{train_labels[i]}"
# plot_image(train_images[i], label)
FLATTEN_DIM = 28 * 28
TRAINING_SIZE = len(train_images)
TEST_SIZE = len(test_images)
trainImages = np.reshape(train_images, (TRAINING_SIZE, FLATTEN_DIM))
testImages = np.reshape(test_images, (TEST_SIZE, FLATTEN_DIM))
print(type(trainImages[0]))
trainImages = trainImages.astype(np.float32)
testImages = testImages.astype(np.float32)
trainImages /= 255
testImages /= 255
NUM_DIGITS = 10
print(train_labels[:10])
trainLabels = keras.utils.to_categorical(train_labels, NUM_DIGITS)
testLabels = keras.utils.to_categorical(test_labels, NUM_DIGITS)
```
```
model = Sequential()
model.add(Dense(128, activation=tf.nn.relu,
input_shape=(FLATTEN_DIM,)))
model.add(Dense(10, activation=tf.nn.softmax))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
print(model.summary())
tbCallbacks = TensorBoard(log_dir='logs',
histogram_freq=0,
write_graph=True,
write_images=True)
model.fit(trainImages, trainLabels, epochs=20,
callbacks=[tbCallbacks],
validation_data=(testImages, testLabels))
```
```python=
predictLabels = model.predict_classes(testImages)
print("first 10 result={}".format(predictLabels[:10]))
```
```python=
def plotTestImage(index):
plt.title("the image marked as %d, predict as %d"%
(test_labels[index], predictLabels[index]))
plt.imshow(test_images[index], cmap='binary')
return plt
plotTestImage(5)
```
```python=
trainHistory = model.fit(trainImages, trainLabels, epochs=10, validation_split=0.1)
```
```python
import matplotlib.pyplot as plt
plt.plot(trainHistory.history['accuracy'], color='red')
plt.plot(trainHistory.history['val_accuracy'], color='blue')
plt.legend(['train', 'validation'])
```
```python=
import pandas as pd
pd.crosstab(test_labels, predictLabels, rownames=['label'], colnames=['predict'])
```
```python=
measure1 = pd.DataFrame({'label':test_labels, 'predict':predictLabels})
measure1[:20]
```
```python=
def plotTestImage(index):
plt.title("the image marked as %d, predict as %d"%
(test_labels[index], predictLabels[index]))
plt.imshow(test_images[index], cmap='binary')
plt.show()
for i in indexs.index:
print(i)
plotTestImage(i)
```
## lab59_mnist2
```python=
import tensorflow as tf
import keras
import numpy as np
from matplotlib import pyplot as plt
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) \
= fashion_mnist.load_data()
class_names = ['T-shirt', 'Trouser', 'Pullover', 'Dress',
'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure()
plt.imshow(train_images[1])
plt.colorbar()
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(12, 9))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
```
```python=
import tensorflow as tf
import keras
from keras import layers
import numpy as np
from matplotlib import pyplot as plt
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) \
= fashion_mnist.load_data()
class_names = ['T-shirt', 'Trouser', 'Pullover', 'Dress',
'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure()
plt.imshow(train_images[1])
plt.colorbar()
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(12, 9))
for i in range(16):
plt.subplot(4, 4, i + 1)
plt.xticks([])
plt.yticks([])
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
print(train_images.shape)
print(test_images.shape)
print(train_labels.shape)
print(test_labels.shape)
model = keras.Sequential([
layers.Flatten(input_shape=(28,28)),
layers.Dense(128, activation='relu'),
layers.Dense(10, activation='softmax')
])
print(model.summary())
```
```
import tensorflow as tf
import keras
from keras import layers
import numpy as np
from matplotlib import pyplot as plt
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) \
= fashion_mnist.load_data()
class_names = ['T-shirt', 'Trouser', 'Pullover', 'Dress',
'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure()
plt.imshow(train_images[1])
plt.colorbar()
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(12, 9))
for i in range(16):
plt.subplot(4, 4, i + 1)
plt.xticks([])
plt.yticks([])
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
print(train_images.shape)
print(test_images.shape)
print(train_labels.shape)
print(test_labels.shape)
model = keras.Sequential([
layers.Flatten(input_shape=(28, 28)),
layers.Dense(128, activation='relu'),
layers.Dense(10, activation='softmax')
])
print(model.summary())
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=30)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print("\nTest accuracy:", test_acc)
```
```
import tensorflow as tf
import keras
from keras import layers
import numpy as np
from matplotlib import pyplot as plt
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) \
= fashion_mnist.load_data()
class_names = ['T-shirt', 'Trouser', 'Pullover', 'Dress',
'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure()
plt.imshow(train_images[1])
plt.colorbar()
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(12, 9))
for i in range(16):
plt.subplot(4, 4, i + 1)
plt.xticks([])
plt.yticks([])
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
print(train_images.shape)
print(test_images.shape)
print(train_labels.shape)
print(test_labels.shape)
model = keras.Sequential([
layers.Flatten(input_shape=(28, 28)),
layers.Dense(128, activation='relu'),
layers.Dense(10, activation='softmax')
])
print(model.summary())
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=30)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print("\nTest accuracy:", test_acc)
predictions = model.predict(test_images)
num_rows = 5
num_cols = 3
num_images = num_rows * num_cols
def plot_image(i, predictions_array, true_label, img):
true_label, img = true_label[i], img[i]
plt.imshow(img, cmap=plt.cm.binary)
plt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
pass
plt.show()
```
```
import tensorflow as tf
import keras
from keras import layers
import numpy as np
from matplotlib import pyplot as plt
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) \
= fashion_mnist.load_data()
class_names = ['T-shirt', 'Trouser', 'Pullover', 'Dress',
'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure()
plt.imshow(train_images[1])
plt.colorbar()
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(12, 9))
for i in range(16):
plt.subplot(4, 4, i + 1)
plt.xticks([])
plt.yticks([])
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
print(train_images.shape)
print(test_images.shape)
print(train_labels.shape)
print(test_labels.shape)
model = keras.Sequential([
layers.Flatten(input_shape=(28, 28)),
layers.Dense(128, activation='relu'),
layers.Dense(10, activation='softmax')
])
print(model.summary())
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=30)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print("\nTest accuracy:", test_acc)
predictions = model.predict(test_images)
num_rows = 5
num_cols = 3
num_images = num_rows * num_cols
def plot_image(i, predictions_array, true_label, img):
true_label, img = true_label[i], img[i]
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
prediction_label = np.argmax(predictions_array)
if prediction_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{}{:2.0f}({})".format(class_names[prediction_label],
100 * np.max(predictions_array),
class_names[true_label]),
color=color)
plt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
pass
plt.show()
```
```
import tensorflow as tf
import keras
from keras import layers
import numpy as np
from matplotlib import pyplot as plt
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) \
= fashion_mnist.load_data()
class_names = ['T-shirt', 'Trouser', 'Pullover', 'Dress',
'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure()
plt.imshow(train_images[1])
plt.colorbar()
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(12, 9))
for i in range(16):
plt.subplot(4, 4, i + 1)
plt.xticks([])
plt.yticks([])
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
print(train_images.shape)
print(test_images.shape)
print(train_labels.shape)
print(test_labels.shape)
model = keras.Sequential([
layers.Flatten(input_shape=(28, 28)),
layers.Dense(128, activation='relu'),
layers.Dense(10, activation='softmax')
])
print(model.summary())
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=30)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print("\nTest accuracy:", test_acc)
predictions = model.predict(test_images)
num_rows = 5
num_cols = 3
num_images = num_rows * num_cols
def plot_image(i, predictions_array, true_label, img):
true_label, img = true_label[i], img[i]
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
prediction_label = np.argmax(predictions_array)
if prediction_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{}{:2.0f}({})".format(class_names[prediction_label],
100 * np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
true_label = true_label[i]
plt.xticks(range(10))
plt.yticks([])
thisPlot = plt.bar(range(10), predictions_array, color="#888888")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisPlot[predicted_label].set_color('red')
thisPlot[true_label].set_color('blue')
plt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
```
## lab60_generate_bmi
```
import random
def calculateBMI(h, w):
b = w / ((h / 100) ** 2)
if b < 18.5:
return 'thin'
elif b < 25:
return 'normal'
else:
return 'fat'
with open('data/bmi.csv', 'w', encoding='UTF-8') as file1:
file1.write('height,weight,label\n')
category = {'thin': 0, 'normal': 0, 'fat': 0}
for i in range(300000):
currentHeight = random.randint(110, 200)
currentWeight = random.randint(40, 60)
label = calculateBMI(currentHeight, currentWeight)
category[label] += 1
file1.write("%d,%d,%s\n" % (currentHeight, currentWeight, label))
print("distribution=%s" % str(category))
print("generate OK")
```
## lab62
```
import pandas as pd
import keras
from sklearn.preprocessing import LabelBinarizer
from keras import callbacks
csv = pd.read_csv("data/bmi.csv")
csv['height'] = csv['height'] / 200
csv['weight'] = csv['weight'] / 100
print(csv.head(n=20))
encoder = LabelBinarizer()
transformedLabel = encoder.fit_transform(csv['label'])
print(csv['label'][:20])
print(transformedLabel[:20])
TEST_START = 250000
test_csv = csv[TEST_START:]
test_pat = test_csv[['weight', 'height']]
test_ans = transformedLabel[TEST_START:]
train_csv = csv[:TEST_START]
train_pat = train_csv[["weight", 'height']]
train_ans = transformedLabel[:TEST_START]
print(test_pat.shape)
print(test_ans.shape)
print(train_pat.shape)
print(train_ans.shape)
```
```
import pandas as pd
import keras
from sklearn.preprocessing import LabelBinarizer
from keras import callbacks
csv = pd.read_csv("data/bmi.csv")
csv['height'] = csv['height'] / 200
csv['weight'] = csv['weight'] / 100
print(csv.head(n=20))
encoder = LabelBinarizer()
transformedLabel = encoder.fit_transform(csv['label'])
print(csv['label'][:20])
print(transformedLabel[:20])
TEST_START = 250000
test_csv = csv[TEST_START:]
test_pat = test_csv[['weight', 'height']]
test_ans = transformedLabel[TEST_START:]
train_csv = csv[:TEST_START]
train_pat = train_csv[["weight", 'height']]
train_ans = transformedLabel[:TEST_START]
print(test_pat.shape)
print(test_ans.shape)
print(train_pat.shape)
print(train_ans.shape)
Sequential = keras.models.Sequential
Dense = keras.layers.Dense
backend = keras.backend
model = Sequential()
model.add(Dense(10, activation='relu', input_shape=(2,)))
model.add(Dense(10, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd',
metrics=['accuracy'])
tensorboard = callbacks.TensorBoard(log_dir='logs', histogram_freq=1)
print(model.summary())
model.fit(train_pat, train_ans, batch_size=100, epochs=50,
verbose=1, validation_data=(test_pat, test_ans),
callbacks=[tensorboard])
score = model.evaluate(test_pat, test_ans, verbose=0)
print("score[0]={}, score[1]={}".format(score[0], score[1]))
```
```
tensorboard --logdir=logs
```
## lab57
```
import tensorflow as tf
import numpy as np
import keras
import matplotlib.pyplot as plt
from keras import Sequential
from keras.layers import Dense
from keras.callbacks import TensorBoard
(train_images, train_labels), (test_images, test_labels) = keras.datasets.mnist.load_data()
def plot_image(image, label):
plt.imshow(image.reshape(28, 28), cmap='binary')
plt.title(label)
plt.show()
# for i in range(0, 10):
# label = f"the image marked as:{train_labels[i]}"
# plot_image(train_images[i], label)
FLATTEN_DIM = 28 * 28
TRAINING_SIZE = len(train_images)
TEST_SIZE = len(test_images)
trainImages = np.reshape(train_images, (TRAINING_SIZE, FLATTEN_DIM))
testImages = np.reshape(test_images, (TEST_SIZE, FLATTEN_DIM))
print(type(trainImages[0]))
trainImages = trainImages.astype(np.float32)
testImages = testImages.astype(np.float32)
trainImages /= 255
testImages /= 255
NUM_DIGITS = 10
print(train_labels[:10])
trainLabels = keras.utils.to_categorical(train_labels, NUM_DIGITS)
testLabels = keras.utils.to_categorical(test_labels, NUM_DIGITS)
model = Sequential()
model.add(Dense(128, activation=tf.nn.relu,
input_shape=(FLATTEN_DIM,)))
model.add(Dense(10, activation=tf.nn.softmax))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
print(model.summary())
tbCallbacks = TensorBoard(log_dir='logs',
histogram_freq=0,
write_graph=True,
write_images=True)
model.fit(trainImages, trainLabels, epochs=20,
callbacks=[tbCallbacks],
validation_data=(testImages, testLabels))
predictLabels = model.predict_classes(testImages)
print("class result=", predictLabels[:10])
predicts = model.predict(testImages)
print("predict result=", predicts[:10])
predictProbabilities = model.predict_proba(testImages)
print("probability result=",predictProbabilities[:10])
loss, accuracy = model.evaluate(testImages, testLabels)
print("loss={}, accuracy={}".format(loss, accuracy))
```
## lab72_bostom
```
from keras import models
from keras import layers
from keras.datasets import boston_housing
(train_data, train_target), (test_data, test_target) = boston_housing.load_data()
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
print(train_data.shape, test_data.shape)
test_data -= mean
test_data /= std
def build_model():
m = models.Sequential()
m.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
m.add(layers.Dense(64, activation='relu'))
m.add(layers.Dense(1))
return m
model = build_model()
model.compile(optimizer='adam', loss="mse", metrics=['mae'])
model.fit(train_data, train_target, validation_split=0.1,
epochs=100, batch_size=5, verbose=1)
```
```
from keras import models
from keras import layers
from keras.datasets import boston_housing
(train_data, train_target), (test_data, test_target) = boston_housing.load_data()
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
print(train_data.shape, test_data.shape)
test_data -= mean
test_data /= std
def build_model():
m = models.Sequential()
m.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
m.add(layers.Dense(64, activation='relu'))
m.add(layers.Dense(1))
return m
model = build_model()
model.compile(optimizer='adam', loss="mse", metrics=['mae'])
model.fit(train_data, train_target, validation_split=0.1,
epochs=100, batch_size=5, verbose=1)
for (i, j) in zip(test_data, test_target):
predict = model.predict(i.reshape(1, -1))
print("predict as:{:.1f}, real is:{}, diff={:.1f}".format(predict[0][0], j, predict[0][0] - j))
```
## lab63
```
from keras import layers, models
from keras.datasets import mnist
from keras.utils import to_categorical
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(28, 28, 1)))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation="softmax"))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
metrics=['accuracy'])
print(model.summary())
```
```
from keras import layers, models
from keras.datasets import mnist
from keras.utils import to_categorical
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(28, 28, 1)))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation="softmax"))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
metrics=['accuracy'])
print(model.summary())
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28, 28, 1))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28, 28, 1))
test_images = test_images.astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
model.fit(train_images, train_labels, epochs=10,
batch_size=50, validation_data=(test_images, test_labels))
loss, acc = model.evaluate(test_images, test_labels)
print("loss={}, acc={}".format(loss, acc))
```
```
from keras import layers, models
from keras.datasets import mnist
from keras.utils import to_categorical
model = models.Sequential()
model.add(layers.Conv2D(32, (5, 5), activation='relu',
input_shape=(28, 28, 1)))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(32, (5, 5), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation="softmax"))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
metrics=['accuracy'])
print(model.summary())
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28, 28, 1))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28, 28, 1))
test_images = test_images.astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
model.fit(train_images, train_labels, epochs=10,
batch_size=50, validation_data=(test_images, test_labels))
loss, acc = model.evaluate(test_images, test_labels)
print("loss={}, acc={}".format(loss, acc))
```
```
from keras import layers, models
from keras.datasets import mnist
from keras.utils import to_categorical
model = models.Sequential()
model.add(layers.Conv2D(32, (5, 5), activation='relu',
input_shape=(28, 28, 1)))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(32, (5, 5), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Dropout(0.1))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation="softmax"))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
metrics=['accuracy'])
print(model.summary())
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28, 28, 1))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28, 28, 1))
test_images = test_images.astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
model.fit(train_images, train_labels, epochs=10,
batch_size=50, validation_data=(test_images, test_labels))
loss, acc = model.evaluate(test_images, test_labels)
print("loss={}, acc={}".format(loss, acc))
```
```
https://github.com/pjreddie/darknet
```