Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added the final code file #3

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
162 changes: 162 additions & 0 deletions main_code.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
# -*- coding: utf-8 -*-
"""IE_PROJECT.ipynb

Automatically generated by Colaboratory.

Original file is located at
https://colab.research.google.com/drive/1R8YipvIIF4_mZOYzJSR6UTEj7xHCiUtz
"""

from keras.datasets import mnist
import matplotlib.pyplot as plt

(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_set=X_test
import numpy as np

from keras.models import Sequential
from keras.layers import Convolution2D as Conv2D
from keras.layers import MaxPooling2D,Activation,Dropout
from keras.layers import Flatten
from keras.layers import Dense
from keras.utils import np_utils
from keras.layers.normalization import BatchNormalization


X_train=X_train.reshape(-1,28,28,1)/255.0
X_test=X_test.reshape(-1,28,28,1)/255.0

y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)

import torch
from torchvision import models
# Load alexnet model
alexnet = models.alexnet(pretrained=True)
print(alexnet)

# My implementation of AlexNet using keras
model = Sequential()

# (0)
model.add(Conv2D(filters=96, input_shape=(28,28,1), kernel_size=(3,3),strides=(4,4), padding='valid'))

# (1)
model.add(Activation('relu'))

# (2)
model.add(MaxPooling2D(pool_size=(1,1), strides=(2,2), padding='valid'))

# Normalising
model.add(BatchNormalization())

# (3)
model.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='valid'))

# (4)
model.add(Activation('relu'))


# (5)
model.add(MaxPooling2D(pool_size=(1,1), strides=(2,2), padding='valid'))

# Normalising
model.add(BatchNormalization())

# (6)
model.add(Conv2D(filters=384, kernel_size=(1,1), strides=(1,1), padding='valid'))

# (7)
model.add(Activation('relu'))


# Normalisng
model.add(BatchNormalization())

# (8)
model.add(Conv2D(filters=384, kernel_size=(1,1), strides=(1,1), padding='valid'))

#(9)
model.add(Activation('relu'))


#Normalising
model.add(BatchNormalization())

# (10)
model.add(Conv2D(filters=256, kernel_size=(1,1), strides=(1,1), padding='valid'))

#(11)
model.add(Activation('relu'))

# (12)
model.add(MaxPooling2D(pool_size=(1,1), strides=(2,2), padding='valid'))


# Normalising
model.add(BatchNormalization())


# AdaptiveAvgPool2d
model.add(Flatten())
model.add(Dense(4096, input_shape=(28*28*1,)))

model.add(Activation('relu'))

# (0)
model.add(Dropout(0.5))

#Normalising
model.add(BatchNormalization())
# (1)
model.add(Dense(4096))

# (2)
model.add(Activation('relu'))

# (3)
model.add(Dropout(0.5))

# Normalising
model.add(BatchNormalization())

# (4)
model.add(Dense(1000))
# (5)
model.add(Activation('relu'))




# Normalising
model.add(BatchNormalization())

# (6)
model.add(Dense(10))
model.add(Activation('softmax'))

# AlexNet model ends here

model.summary()

# Compile step
model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])


# Fitting step
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=200, verbose=2)


"""
Output

Train on 60000 samples, validate on 10000 samples
Epoch 1/3
- 355s - loss: 0.8524 - acc: 0.7469 - val_loss: 0.6196 - val_acc: 0.8106
Epoch 2/3
- 350s - loss: 0.5407 - acc: 0.8253 - val_loss: 0.5186 - val_acc: 0.8397
Epoch 3/3
- 351s - loss: 0.4828 - acc: 0.8433 - val_loss: 0.4859 - val_acc: 0.8416

I've included the output here because it takes around 16-17 minutes to run
"""