Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

My model #14

Closed
RParedesPalacios opened this issue Mar 28, 2020 · 0 comments
Closed

My model #14

RParedesPalacios opened this issue Mar 28, 2020 · 0 comments

Comments

@RParedesPalacios
Copy link
Contributor

RParedesPalacios commented Mar 28, 2020

Perhaps other practitioners can get ideas from my model. This is for the 2 class problem C vs N however I think that it would get good results with the other problems.

from future import print_function

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D
from keras.layers.normalization import BatchNormalization as BN
from keras.layers import GaussianNoise as GN
from keras.optimizers import SGD

from keras.callbacks import LearningRateScheduler as LRS
from keras.preprocessing.image import ImageDataGenerator
import os
from sklearn.utils import class_weight
import sys

Load

y_train = np.load('tr2_Y.npy')
y_test = np.load('val2_Y.npy')

print(y_train.shape)
print(y_test.shape)

print(sum(y_train[:,0]))
print(sum(y_train[:,1]))

x_train = np.load('tr2_X.npy')
x_test = np.load('val2_X.npy')

print(x_train.shape)

print(x_test.shape)

result = np.argmax(y_train, axis=1)
print(result.shape)

class_weights = class_weight.compute_class_weight('balanced',np.unique(result),result)

print(class_weights)

x_train /= 255
x_test /= 255

num_classes = 2

DEFINE A DATA AUGMENTATION GENERATOR

datagen = ImageDataGenerator(
width_shift_range=0.2,
height_shift_range=0.2,
rotation_range=20,
zoom_range=[0.9,1.1],
horizontal_flip=False)

DEF a Bottleneck BLOCK CONV + BN + MAXPOOL

def CBGN(model,filters,size):

model.add(Conv2D(filters, (1, 1), padding='same'))
model.add(BN())
model.add(Activation('relu'))

model.add(Conv2D(filters, (size, size), padding='same'))
model.add(BN())
model.add(Activation('relu'))

model.add(Conv2D(4*filters, (1, 1), padding='same'))
model.add(BN())
model.add(Activation('relu'))

model.add(MaxPooling2D(pool_size=(2, 2)))

return model

DEF NN TOPOLOGY

model = Sequential()

model.add(Conv2D(32, (7,7), padding='same',strides=(2,2),input_shape=x_train.shape[1:]))

model=CBGN(model,32,3)
model=CBGN(model,64,3)
model=CBGN(model,128,3)
model=CBGN(model,256,3)
model=CBGN(model,256,3)

model.add(GlobalMaxPooling2D())
#model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
#model.add(BN())

model.add(Dense(num_classes))
model.add(Activation('softmax'))

model.summary()

OPTIM AND COMPILE

opt = SGD()

model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])

DEFINE A LEARNING RATE SCHEDULER

def scheduler(epoch):
if epoch < 200:
return .001
elif epoch < 250:
return 0.0001
else:
return 0.00001

set_lr = LRS(scheduler)

TRAINING with DA and LRA

batch_size=16 ## 2 x Titan 12Gb
epochs=100

First a pre-training without data augmentation or class_weight

history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,verbose=1,
callbacks=[set_lr],
validation_data=(x_test, y_test))

Now a training with data augmentation and class_weight

epochs=300
history=model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch=len(x_train) / batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=[set_lr],
class_weight=class_weights,
verbose=1)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant