Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
RishalAggarwal authored Mar 14, 2018
1 parent 41bea34 commit 194f780
Show file tree
Hide file tree
Showing 11 changed files with 272 additions and 0 deletions.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import os
import pandas as pd
import numpy as np
import skimage
from skimage import transform
import nibabel

ssdata_dir='path_to_skull_stripped_images'
aldata_dir='path_to_images_of_patients_with_alzheimers'
nldata_dir='path_to_images_of_normal_control'
mcidata_dir='path_to_images_of_patients_with_mild_cognitive_impairment'
labels_df=pd.read_csv('path_to_dataset_datasheet',index_col=2) #indexed by the patient id

#labeling and object formation

for file in os.listdir(ssdata_dir):
netdata=[] #will be used for numpy object
try:
img = nibabel.load(os.path.join(ssdata_dir, file)) # loading the image
img = img.get_data() # accessing image array
img = skimage.transform.resize(img, (106, 106, 120))#resizing the image to dimensions(106,106,120)
id = file.partition('.') #accessing patient id numbers
id = id[0].partition('_2')[0]
label=labels_df.get_value(id[0], 'Screen.Diagnosis') #getting the label
if np.unique(label == 'NL'):
labelar = np.array([1, 0, 0])
netdata.append([img, labelar]) #one hot encoding and saving numpy object
np.save(os.path.join(nldata_dir, id[0] + id[1]), netdata)
elif np.unique(label == 'AD'):
labelar = np.array([0, 1, 0])
netdata.append([img, labelar])
np.save(os.path.join(aldata_dir, id[0] + id[1]), netdata)
elif np.unique(label == 'MCI'):
labelar = np.array([0, 0, 1])
netdata.append([img, labelar])
np.save(os.path.join(mcidata_dir, id[0] + id[1]), netdata)
except:
continue

#normalisation

totalnum=[] #total number of pixels in the image
mean=[] #mean of the pixels in the image
nummax=[] #maximum value of pixels in the image
for file in os.listdir(aldata_dir):
img = np.load(os.path.join(aldata_dir,file))
mean.append(np.mean(img[0][0]))
totalnum.append((img[0][0].shape[0]*img[0][0].shape[1]*img[0][0].shape[2]))
nummax.append(np.max(img[0][0]))
for file in os.listdir(nldata_dir):
img = np.load(os.path.join(nldata_dir, file))
mean.append(np.mean(img[0][0]))
totalnum.append((img[0][0].shape[0]*img[0][0].shape[1]*img[0][0].shape[2]))
nummax.append(np.max(img[0][0]))
for file in os.listdir(mcidata_dir):
img = np.load(os.path.join(mcidata_dir, file))
mean.append(np.mean(img[0][0]))
totalnum.append((img[0][0].shape[0]*img[0][0].shape[1]*img[0][0].shape[2]))
nummax.append(np.max(img[0][0]))
nummean=np.vdot(mean,totalnum)/np.sum(totalnum) #mean value for the full dataset
nummax=np.max(nummax) #max value for the full dataset

for file in os.listdir(aldata_dir):
img = np.load(os.path.join(aldata_dir,file))
img[0][0]=(img[0][0]-nummean)/nummax #normalisation(x-mean/max value)
np.save(os.path.join(aldata_dir,file),img)
for file in os.listdir(nldata_dir):
img = np.load(os.path.join(nldata_dir, file))
img[0][0] =(img[0][0] - nummean) / nummax
np.save(os.path.join(nldata_dir,file),img)
for file in os.listdir(mcidata_dir):
img = np.load(os.path.join(mcidata_dir, file))
img[0][0] =(img[0][0] - nummean) / nummax
np.save(os.path.join(mcidata_dir, file),img)


Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
import nibabel
import matplotlib.pyplot as plt
import os
import numpy as np

data_dir='path_to_image_folder' #image directory
img=nibabel.load(os.path.join(data_dir,'image.name')) #loading the image
img_data=img.get_data()

hist,bins=np.histogram(img_data[img_data!=0],bins='auto',density=True) #mapping the histogram of the image using probability density function(density=True), background black values are ignored.
bins=0.5*(bins[1:]+bins[:-1]) #taking midpoints of bins

t1=0 #threshold1 index
t2=0 #threshold2 index

currvar=0 #we have to maximise this value
u=np.zeros(3) #mean of the three distributions
w=np.zeros(3) #weightages of the three distributions

uT=np.vdot(bins,hist)/np.sum(hist) #mean of the full histogram

for i in range(1,int(len(hist)/2)):
w[0]=np.sum(hist[:i])/np.sum(hist)
u[0]=np.vdot(bins[:i],hist[:i])/np.sum(hist[:i])
for j in range(i+1,len(hist)):
w[1]=np.sum(hist[i:j])/np.sum(hist)
u[1]=np.vdot(bins[i:j],hist[i:j])/np.sum(hist[i:j])
w[2] = np.sum(hist[j:])/np.sum(hist)
u[2] =np.vdot(bins[j:],hist[j:])/np.sum(hist[j:])
maxvar=np.vdot(w,(np.power((u-uT),2))) #according to formula
if(maxvar>currvar): #maximimsing currvar
currvar=maxvar
print(currvar)
t2 = i
t1 = j

plt.bar(bins,hist,width=1)
plt.axvline(bins[t1],c='r') #plotting histogram with the two thresholds,red vertical line is threshold1 and green vertical line is threshold2
plt.axvline(bins[t2],c='g')
plt.show()

threshold1=bins[t1]
threshold2=bins[t2]
print('threshold1 = '+threshold1)
print('threshold2 = '+threshold2)

Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import os
import nipype
import nipype.interfaces.fsl as fsl

data_dir='path_to_raw_data' #path to raw image directory
ssdata_dir='output_data_path' #path to skull stripped image directory

for file in os.listdir(data_dir):
try:
mybet = nipype.interfaces.fsl.BET(in_file=os.path.join(data_dir,file),out_file=os.path.join(ssdata_dir,file +'_2.nii'), frac=0.2) #frac=0.2
mybet.run() #executing the brain extraction
print(file+'is skull stripped')
except:
print(file+'is not skull stripped')
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import nibabel
import matplotlib.pyplot as plt #package imports
import os

def process_key(event):
fig = event.canvas.figure
ax = fig.axes[0]
if event.key == 'a':
previous_slice(ax)
elif event.key == 'q':
next_slice(ax)
fig.canvas.draw()

def multi_slice_viewer(volume):
fig, ax = plt.subplots()
ax.volume = volume
ax.index =60
ax.imshow(volume[ax.index],cmap='gray')
fig.canvas.mpl_connect('key_press_event', process_key)

def previous_slice(ax):
"""Go to the previous slice."""
volume = ax.volume
ax.index = (ax.index - 1) % volume.shape[0] # wrap around using %
ax.images[0].set_array(volume[ax.index])

def next_slice(ax):
"""Go to the next slice."""
volume = ax.volume
ax.index = (ax.index + 1) % (volume.shape[0])
ax.images[0].set_array(volume[ax.index])

data_dir='path_to_image_folder' #image directory
img=nibabel.load(os.path.join(path,'image.name')) #loading the image
img_data=img.get_data() #accessing image array
multi_slice_viewer(img_data)
plt.show()

Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
49 changes: 49 additions & 0 deletions 3D Convolutional Network for Alzheimer's Detection/test cnn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import os
import tensorflow as tf
import tflearn
import numpy as np

data_dir='path_to_test_data'

datanp=[] #images
truenp=[] #labels

for file in os.listdir(data_dir):
data=np.load(os.path.join(data_dir,file))
datanp.append((data[0][0]))
truenp.append(data[0][1])

sh=datanp.shape

tf.reset_default_graph()

net = tflearn.input_data(shape=[None, sh[1], sh[2], sh[3], sh[4]])
net = tflearn.conv_3d(net, 16,5,strides=2,activation='leaky_relu', padding='VALID',weights_init='xavier',regularizer='L2',weight_decay=0.01)
net = tflearn.max_pool_3d(net, kernel_size = 3, strides=2, padding='VALID')
net = tflearn.conv_3d(net, 32,3,strides=2, padding='VALID',weights_init='xavier',regularizer='L2',weight_decay=0.01)
net = tflearn.normalization.batch_normalization(net)
net = tflearn.activations.leaky_relu (net)
net = tflearn.max_pool_3d(net, kernel_size = 2, strides=2, padding='VALID')
net = tflearn.dropout(net,0.5)
net = tflearn.fully_connected(net, 1024,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.1,beta=0.1)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.6)
net = tflearn.fully_connected(net, 512,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.2,beta=0.2)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.7)
net = tflearn.fully_connected(net, 128,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.4,beta=0.4)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.7)
net = tflearn.fully_connected(net, 3,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.3,beta=0.3)
net = tflearn.activations.softmax(net)
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy')
model = tflearn.DNN(net, checkpoint_path = 'drive/model/model.tfl.ckpt',max_checkpoints=3) #model definition

ckpt='path_to_latest_checkpoint'
model.load(ckpt) #loading checkpoints

model.evaluate(datanp,truenp) #evaluating the model returns test accuracy
49 changes: 49 additions & 0 deletions 3D Convolutional Network for Alzheimer's Detection/training cnn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import os
import tensorflow as tf
import tflearn
import numpy as np

data_dir='path_to_training_data'

datanp=[] #images
truenp=[] #labels

for file in os.listdir(data_dir):
data=np.load(os.path.join(data_dir,file))
datanp.append((data[0][0]))
truenp.append(data[0][1])

sh=datanp.shape

tf.reset_default_graph()

net = tflearn.input_data(shape=[None, sh[1], sh[2], sh[3], sh[4]])
net = tflearn.conv_3d(net, 16,5,strides=2,activation='leaky_relu', padding='VALID',weights_init='xavier',regularizer='L2',weight_decay=0.01)
net = tflearn.max_pool_3d(net, kernel_size = 3, strides=2, padding='VALID')
net = tflearn.conv_3d(net, 32,3,strides=2, padding='VALID',weights_init='xavier',regularizer='L2',weight_decay=0.01)
net = tflearn.normalization.batch_normalization(net)
net = tflearn.activations.leaky_relu (net)
net = tflearn.max_pool_3d(net, kernel_size = 2, strides=2, padding='VALID')
net = tflearn.dropout(net,0.5)
net = tflearn.fully_connected(net, 1024,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.1,beta=0.1)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.6)
net = tflearn.fully_connected(net, 512,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.2,beta=0.2)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.7)
net = tflearn.fully_connected(net, 128,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.4,beta=0.4)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.7)
net = tflearn.fully_connected(net, 3,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.3,beta=0.3)
net = tflearn.activations.softmax(net)
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy')
model = tflearn.DNN(net, checkpoint_path = 'drive/model/model.tfl.ckpt',max_checkpoints=3) #model definition

ckpt='path_to_latest_checkpoint'
model.load(ckpt) #loading checkpoints

model.fit(datanp, truenp, batch_size = 8, show_metric=True) #training with batch size of 8

0 comments on commit 194f780

Please sign in to comment.