# note
```python
import matplotlib
#matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
#rom tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Input
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
from alibi.explainers import AnchorImage
from HateSymbolDetection import *
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
print('x_train shape:', x_train.shape, 'y_train shape:', y_train.shape)
idx = 0
plt.imshow(x_train[idx]);
plt.savefig("mnist_data.png")
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
x_train = np.reshape(x_train, x_train.shape + (1,))
x_test = np.reshape(x_test, x_test.shape + (1,))
print('x_train shape:', x_train.shape, 'x_test shape:', x_test.shape)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print('y_train shape:', y_train.shape, 'y_test shape:', y_test.shape)
"""
def model():
x_in = Input(shape=(28, 28, 1))
x = Conv2D(filters=64, kernel_size=2, padding='same', activation='relu')(x_in)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Conv2D(filters=32, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
x_out = Dense(10, activation='softmax')(x)
cnn = Model(inputs=x_in, outputs=x_out)
cnn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return cnn
cnn = model()
cnn.summary()
cnn.fit(x_train, y_train, batch_size=64, epochs=3)
# Evaluate the model on test set
score = cnn.evaluate(x_test, y_test, verbose=0)
print('Test accuracy: ', score[1])
"""
hatesymboldetection = HateSymbolDetection()
hatesymboldetection.load()
#x = ["doc/atomwaffen.jpg"]
#x = ["doc/sample2.jpeg"]
#x1 = x_train[0]
x1 = ["01e0c4be.png"] #ok can only take the file name
result = hatesymboldetection.predict(x1)
#result = hatesymboldetection.predict(x)
print("-----hate symbol detection results: ", result)
def superpixel(image, size=(4, 7)): #image是file name, no shape
#segments = np.zeros([640, 640])
segments = np.zeros([28, 28])
#segments = np.zeros([image.shape[0], image.shape[1]])
row_idx, col_idx = np.where(segments == 0)
for i, j in zip(row_idx, col_idx):
segments[i, j] = int((28/size[1]) * (i//size[0]) + j//size[1])
#segments[i, j] = int((640/size[1]) * (i//size[0]) + j//size[1])
#segments[i, j] = int((image.shape[1]/size[1]) * (i//size[0]) + j//size[1])
return segments
"""
segments = superpixel(x_train[idx])
plt.imshow(segments);
"""
segments = superpixel(x1)
plt.imshow(segments);
plt.savefig("atomwaffen_superpixel")
"""
predict_fn = lambda x: cnn.predict(x)
image_shape = x_train[idx].shape
explainer = AnchorImage(predict_fn, image_shape, segmentation_fn=superpixel)
i = 1
image = x_test[i]
plt.imshow(image[:,:,0]);
cnn.predict(image.reshape(1, 28, 28, 1)).argmax()
explanation = explainer.explain(image, threshold=.95, p_sample=.8, seed=0)
plt.imshow(explanation.anchor[:,:,0]);
plt.savefig("test.png")
"""
predict_fn = lambda x: hatesymboldetection.predict(x)
image_shape = x_train[idx].shape
#print('=======image_shape: ', image_shape) #(28, 28, 1)
#image_shape = (640, 640)
#image_shape = x.shape
#explainer = AnchorImage(predict_fn, image_shape, segmentation_fn=superpixel)
#explainer = AnchorImage(predict_fn, 640,640, segmentation_fn=superpixel)
explainer = AnchorImage(predict_fn, image_shape, segmentation_fn=superpixel)
print('*************')
i = 1
image = x_test[i]
print('=====image.shape: ', image.shape) #(28, 28, 1)
#plt.imshow(image[:,:,0]);
#cnn.predict(image.reshape(1, 28, 28, 1)).argmax()
explanation = explainer.explain(image, threshold=.95, p_sample=.8, seed=0)
#explanation = explainer.explain(x1, threshold=.95, p_sample=.8, seed=0)
#plt.imshow(explanation.anchor[:,:,0]);
#plt.savefig("atomwaffen_test.png")
```
```python
import matplotlib
#matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Input
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
from alibi.explainers import AnchorImage
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
print('x_train shape:', x_train.shape, 'y_train shape:', y_train.shape)
idx = 0
plt.imshow(x_train[idx]);
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
x_train = np.reshape(x_train, x_train.shape + (1,))
x_test = np.reshape(x_test, x_test.shape + (1,))
print('x_train shape:', x_train.shape, 'x_test shape:', x_test.shape)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print('y_train shape:', y_train.shape, 'y_test shape:', y_test.shape)
def model():
x_in = Input(shape=(28, 28, 1))
x = Conv2D(filters=64, kernel_size=2, padding='same', activation='relu')(x_in)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Conv2D(filters=32, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
x_out = Dense(10, activation='softmax')(x)
cnn = Model(inputs=x_in, outputs=x_out)
cnn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return cnn
cnn = model()
cnn.summary()
cnn.fit(x_train, y_train, batch_size=64, epochs=1)
# Evaluate the model on test set
score = cnn.evaluate(x_test, y_test, verbose=0)
print('Test accuracy: ', score[1])
def superpixel(image, size=(4, 7)):
segments = np.zeros([image.shape[0], image.shape[1]])
row_idx, col_idx = np.where(segments == 0)
for i, j in zip(row_idx, col_idx):
segments[i, j] = int((image.shape[1]/size[1]) * (i//size[0]) + j//size[1])
return segments
segments = superpixel(x_train[idx])
plt.imshow(segments);
predict_fn = lambda x: cnn.predict(x)
image_shape = x_train[idx].shape
explainer = AnchorImage(predict_fn, image_shape, segmentation_fn=superpixel)
i = 1
image = x_test[i]
plt.imshow(image[:,:,0]);
tmp = cnn.predict(image.reshape(1, 28, 28, 1)).argmax()
print('tmp: ', tmp)
explanation = explainer.explain(image, threshold=.95, p_sample=.8, seed=0)
plt.imshow(explanation.anchor[:,:,0]);
plt.savefig("test.png")
```