# 410621225葉映辰 深度學習作業
# 問題:
八位元(1或0)數列中,若"1"的出現次數為偶數輸出1,反之輸出0。
請用深度學習辨識輸入的八位元(1或0)數列輸出的結果。
# 解題思路:
## 第一部分.深度學習用的資料庫之搭建
```python
def iqwe(x):
cntoy = 1
while(x):
cntoy += 1
x &= x-1
#print(cntoy & 1)
return cntoy & 1
zeros = np.zeros( (256, 8) )
for a_total in range(0,256):
a = a_total
for i in range(0, 8):
if a >= 2**(7-i):
a = a - 2**(7-i)
zeros[a_total, i] = 1
one = np.zeros((256,1))
for i in range(0,256):
one[i,0] = iqwe(i)
```
### 測試用題目:
用迴圈產生0到255的十進位數,並轉成二進位後。
### 測試用答案:
呼叫子程式計算每組數烈,"1”出現的奇偶數,以輸出1或0。
# 第二部分. 模型製作
```python
class Linear:
def __init__(self, m, n):
self.W = np.random.randn(m, n)
self.b = np.random.randn(1, n)
self.dW = None
self.db = None
def forward(self, x):
self.x = x
out = np.dot(x, self.W) + self.b
#print("self.W : ",self.W.T)
#print("self.b : ",self.b.T)
return out
def backward(self, dout):
dx = np.dot(dout, self.W.T)
self.dW = np.dot(self.x.T, dout)
self.db = np.sum(dout, axis=0)
return dx
class ReLU:
def __init__(self):
pass
def forward(self, x):
self.mask = (x <= 0)
out = x
out[out <= 0] = 0
return out
def backward(self, dout):
dx = dout
dx[self.mask] = 0
return dx
class Sigmoid:
def __int__(self):
pass
def forward(self, x):
out = 1 / (1 + np.exp(-x))
self.o = out
return out
def backward(self, dout):
dx = dout * self.o * (1 - self.o)
return dx
class Loss:
def __inint__(self):
pass
def forward(self, y, ybar):
self.ybar = ybar
return np.sum((y - ybar) ** 2)
def backward(self, dout):
dy = -(2 * (y - self.ybar))
return dy
class twoLayer:
def __init__(self, m, n, o):
self.linear0 = Linear(m, n)
self.relu0 = ReLU()
self.linear = Linear(n, o)
self.sigmoid = Sigmoid()
self.loss = Loss()
self.last_dW = 0
self.last_db = 0
self.last_dW0 = 0
self.last_db0 = 0
def forward(self, x):
x = self.linear0.forward(x)
x = self.relu0.forward(x)
x = self.linear.forward(x)
self.ybar = self.sigmoid.forward(x)
return self.ybar
def backward(self, y):
self.L = self.loss.forward(y, self.ybar)
g = self.loss.backward(1)##############################################################
g = self.sigmoid.backward(g)
g = self.linear.backward(g)
g = self.relu0.backward(g)
g = self.linear0.backward(g)
def update(self, eta, alpha):
#print("alpha : ",alpha)
self.linear.W = self.linear.W - eta * self.linear.dW + alpha * self.last_dW
self.linear.b = self.linear.b - eta * self.linear.db + alpha * self.last_db
self.last_dW = eta * self.linear.dW
self.last_db = eta * self.linear.db
self.linear0.W = self.linear0.W - eta * self.linear0.dW + alpha * self.last_dW0
self.linear0.b = self.linear0.b - eta * self.linear0.db + alpha * self.last_db0
self.last_dW0 = eta * self.linear0.dW
self.last_db0 = eta * self.linear0.db
X = zeros
y = one
#X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
#y = np.array([[0], [1], [1], [0]])
model = twoLayer(8, 128, 1)
max_epochs, chk_epochs = 100000, 10000
last_dW, last_db = 0, 0
eta, alpha = 0.04, 0.02
for e in range(max_epochs):
model.forward(X)
model.backward(y)
model.update(eta, alpha)
if (e + 1) % chk_epochs == 0:
print(model.ybar.T)
print('Epoch %3d: loss=%.6f' % (e + 1, model.L))
print("")
```


