import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
#将那些用matplotlib绘制的图显示在页面里而不是弹出一个窗口
%matplotlib inline
np.random.seed(2)
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils.np_utils import to_categorical # 转换成 one-hot-encoding
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.optimizers import adam, RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
Using TensorFlow backend.
# Load the data
train = pd.read_csv(r'''/home/cd/kaggle-master/datasets/getting-started/digit-recognizer/input/train.csv''')
test = pd.read_csv(r'''/home/cd/kaggle-master/datasets/getting-started/digit-recognizer/input/test.csv''')
X_train = train.values[:,1:]
Y_train = train.values[:,0]
test=test.values
# Normalization
X_train = X_train / 255.0
test = test / 255.0
X_train = X_train.reshape(-1,28,28,1)
test = test.reshape(-1,28,28,1)
Y_train = to_categorical(Y_train, num_classes = 10)
random_seed = 2
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed)
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
activation ='relu', input_shape = (28,28,1)))
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
# Define the optimizer
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# Compile the model
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
epochs = 30
batch_size = 86
# Set a learning rate annealer
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
datagen = ImageDataGenerator(
featurewise_center=False, # 使输入数据集去中心化(均值为0), 按feature执行
samplewise_center=False, # 使输入数据的每个样本均值为0
featurewise_std_normalization=False, # 将输入除以数据集的标准差以完成标准化, 按feature执行
samplewise_std_normalization=False, # 将输入的每个样本除以其自身的标准差
zca_whitening=False, # 对输入数据施加ZCA白化
rotation_range=10, # 数据增强时图片随机转动的角度
zoom_range = 0.1, # 随机缩放的幅度
width_shift_range=0.1, # 图片宽度的某个比例,数据增强时图片水平偏移的幅度
height_shift_range=0.1, # 图片高度的某个比例,数据增强时图片竖直偏移的幅度
horizontal_flip=False, # 进行随机水平翻转
vertical_flip=False) # 进行随机竖直翻转
datagen.fit(X_train)
import datetime
starttime = datetime.datetime.now()
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data = (X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction])
endtime = datetime.datetime.now()
print ((endtime - starttime).seconds)
Epoch 1/30
- 10s - loss: 0.0352 - acc: 0.9900 - val_loss: 0.0174 - val_acc: 0.9943
Epoch 2/30
- 10s - loss: 0.0322 - acc: 0.9908 - val_loss: 0.0176 - val_acc: 0.9952
Epoch 3/30
- 10s - loss: 0.0353 - acc: 0.9895 - val_loss: 0.0177 - val_acc: 0.9950
Epoch 4/30
- 9s - loss: 0.0320 - acc: 0.9908 - val_loss: 0.0172 - val_acc: 0.9950
Epoch 5/30
- 9s - loss: 0.0350 - acc: 0.9903 - val_loss: 0.0173 - val_acc: 0.9952
Epoch 6/30
Epoch 00006: reducing learning rate to 1e-05.
- 10s - loss: 0.0346 - acc: 0.9897 - val_loss: 0.0170 - val_acc: 0.9948
Epoch 7/30
- 10s - loss: 0.0350 - acc: 0.9897 - val_loss: 0.0171 - val_acc: 0.9948
Epoch 8/30
- 10s - loss: 0.0338 - acc: 0.9907 - val_loss: 0.0170 - val_acc: 0.9950
Epoch 9/30
- 10s - loss: 0.0355 - acc: 0.9896 - val_loss: 0.0172 - val_acc: 0.9948
Epoch 10/30
- 9s - loss: 0.0335 - acc: 0.9907 - val_loss: 0.0174 - val_acc: 0.9948
Epoch 11/30
- 9s - loss: 0.0318 - acc: 0.9903 - val_loss: 0.0171 - val_acc: 0.9948
Epoch 12/30
- 10s - loss: 0.0348 - acc: 0.9902 - val_loss: 0.0173 - val_acc: 0.9948
Epoch 13/30
- 10s - loss: 0.0337 - acc: 0.9902 - val_loss: 0.0170 - val_acc: 0.9950
Epoch 14/30
- 10s - loss: 0.0344 - acc: 0.9902 - val_loss: 0.0172 - val_acc: 0.9948
Epoch 15/30
- 9s - loss: 0.0339 - acc: 0.9900 - val_loss: 0.0171 - val_acc: 0.9950
Epoch 16/30
- 10s - loss: 0.0338 - acc: 0.9904 - val_loss: 0.0168 - val_acc: 0.9948
Epoch 17/30
- 10s - loss: 0.0342 - acc: 0.9902 - val_loss: 0.0166 - val_acc: 0.9950
Epoch 18/30
- 10s - loss: 0.0358 - acc: 0.9903 - val_loss: 0.0169 - val_acc: 0.9950
Epoch 19/30
- 9s - loss: 0.0339 - acc: 0.9903 - val_loss: 0.0166 - val_acc: 0.9950
Epoch 20/30
- 10s - loss: 0.0356 - acc: 0.9903 - val_loss: 0.0166 - val_acc: 0.9950
Epoch 21/30
- 10s - loss: 0.0350 - acc: 0.9900 - val_loss: 0.0165 - val_acc: 0.9952
Epoch 22/30
- 10s - loss: 0.0350 - acc: 0.9899 - val_loss: 0.0169 - val_acc: 0.9950
Epoch 23/30
- 10s - loss: 0.0353 - acc: 0.9898 - val_loss: 0.0171 - val_acc: 0.9948
Epoch 24/30
- 9s - loss: 0.0325 - acc: 0.9904 - val_loss: 0.0167 - val_acc: 0.9948
Epoch 25/30
- 10s - loss: 0.0359 - acc: 0.9892 - val_loss: 0.0168 - val_acc: 0.9948
Epoch 26/30
- 10s - loss: 0.0349 - acc: 0.9901 - val_loss: 0.0163 - val_acc: 0.9948
Epoch 27/30
- 10s - loss: 0.0328 - acc: 0.9908 - val_loss: 0.0166 - val_acc: 0.9948
Epoch 28/30
- 10s - loss: 0.0331 - acc: 0.9910 - val_loss: 0.0166 - val_acc: 0.9952
Epoch 29/30
- 10s - loss: 0.0343 - acc: 0.9903 - val_loss: 0.0173 - val_acc: 0.9943
Epoch 30/30
- 9s - loss: 0.0346 - acc: 0.9902 - val_loss: 0.0166 - val_acc: 0.9948
287
results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("datasets/getting-started/digit-recognizer/ouput/Result_keras_CNN.csv",index=False)