diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..6c723cd3b --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 zhixuhao + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/data.py b/data.py index 956497d3f..e7322de56 100644 --- a/data.py +++ b/data.py @@ -5,22 +5,7 @@ import glob import skimage.io as io import skimage.transform as trans - -Sky = [128,128,128] -Building = [128,0,0] -Pole = [192,192,128] -Road = [128,64,128] -Pavement = [60,40,222] -Tree = [128,128,0] -SignSymbol = [192,128,128] -Fence = [64,64,128] -Car = [64,0,128] -Pedestrian = [64,64,0] -Bicyclist = [0,128,192] -Unlabelled = [0,0,0] - -COLOR_DICT = np.array([Sky, Building, Pole, Road, Pavement, - Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled]) +import matplotlib.pyplot as plt def adjustData(img,mask,flag_multi_class,num_class): @@ -45,7 +30,7 @@ def adjustData(img,mask,flag_multi_class,num_class): -def trainGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = "grayscale", +def dataGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = "grayscale", mask_color_mode = "grayscale",image_save_prefix = "image",mask_save_prefix = "mask", flag_multi_class = False,num_class = 2,save_to_dir = None,target_size = (256,256),seed = 1): ''' @@ -109,16 +94,20 @@ def geneTrainNpy(image_path,mask_path,flag_multi_class = False,num_class = 2,ima return image_arr,mask_arr -def labelVisualize(num_class,color_dict,img): - img = img[:,:,0] if len(img.shape) == 3 else img - img_out = np.zeros(img.shape + (3,)) - for i in range(num_class): - img_out[img == i,:] = color_dict[i] - return img_out / 255 -def saveResult(save_path,npyfile,flag_multi_class = False,num_class = 2): - for i,item in enumerate(npyfile): - img = labelVisualize(num_class,COLOR_DICT,item) if flag_multi_class else item[:,:,0] - io.imsave(os.path.join(save_path,"%d_predict.png"%i),img) \ No newline at end of file +def saveResult(save_path,pred_im_array): + for i,item in enumerate(pred_im_array): + img = item[:,:,0] + io.imsave(os.path.join(save_path,f"{i}_predict.png"),img) + + +def plot_metrics(history_obj): + plt.figure(1) + plt.plot(history_obj.history['loss']) + plt.plot(history_obj.history['acc']) + plt.title('model performance') + plt.xlabel('epoch') + plt.legend(['loss', 'accuracy'], loc='upper left') + plt.show() \ No newline at end of file diff --git a/dataPrepare.ipynb b/dataPrepare.ipynb index 9639938b8..59fc85855 100644 --- a/dataPrepare.ipynb +++ b/dataPrepare.ipynb @@ -59,7 +59,7 @@ " zoom_range=0.05,\n", " horizontal_flip=True,\n", " fill_mode='nearest')\n", - "myGenerator = trainGenerator(20,'data/membrane/train','image','label',data_gen_args,save_to_dir = \"data/train/aug\")" + "myGenerator = trainGenerator(20,'data/membrane/train','image','label',data_gen_args,save_to_dir = \"data/membrane/train/aug\")" ] }, { @@ -75,7 +75,7 @@ "metadata": {}, "outputs": [], "source": [ - "#you will see 60 transformed images and their masks in data/train/aug\n", + "#you will see 60 transformed images and their masks in data/membrane/train/aug\n", "num_batch = 3\n", "for i,batch in enumerate(myGenerator):\n", " if(i >= num_batch):\n", diff --git a/folder_structure_script.py b/folder_structure_script.py new file mode 100644 index 000000000..62d26afd7 --- /dev/null +++ b/folder_structure_script.py @@ -0,0 +1,27 @@ +#Script for copying files containing 'nolabels' or 'IAR' from the same folder to two separate folders, one for each type of image. +#Currently there is a minor bug, such that this will not work for both files simultaneously. In order to make lines 25-27 work, lines 22-24 +#need to be commented out and elif in line 25 needs to be replaced with an if. + +import shutil +from glob import glob +import ipdb + +extensions = glob('*.jpg') +string_match_1 = 'nolabels' +string_match_2 = 'IAR' + +PATH_1 = 'C:/Users/maran/Desktop/Files Dissertation/training_data/selected_good_days_grayscale_unlabeled_2' +PATH_2 = 'C:/Users/maran/Desktop/Files Dissertation/training_data/selected_good_days_IARsignal_predicted_2' + +list_good_days = 'list_good_days.txt' +with open(list_good_days, 'r') as handle: + for line in handle: #152 + tokens = line.split() + for filename in extensions: + if (tokens[0]+'_'+tokens[1]+'_'+tokens[2]) in filename: + if string_match_1 in filename: + shutil.copy2(filename, PATH_1) + break + elif string_match_2 in filename: + shutil.copy2(filename, PATH_2) + break diff --git a/main.py b/main.py index f6041b875..0d47108e7 100644 --- a/main.py +++ b/main.py @@ -1,9 +1,8 @@ from model import * from data import * - -#os.environ["CUDA_VISIBLE_DEVICES"] = "0" - - + +BATCH_SIZE = 2 + data_gen_args = dict(rotation_range=0.2, width_shift_range=0.05, height_shift_range=0.05, @@ -11,12 +10,19 @@ zoom_range=0.05, horizontal_flip=True, fill_mode='nearest') -myGene = trainGenerator(2,'data/membrane/train','image','label',data_gen_args,save_to_dir = None) + +myGene = dataGenerator(BATCH_SIZE,'data/train','image','label',data_gen_args,save_to_dir = None) model = unet() model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True) -model.fit_generator(myGene,steps_per_epoch=300,epochs=1,callbacks=[model_checkpoint]) +history = model.fit_generator(myGene,steps_per_epoch=1,epochs=3,callbacks=[model_checkpoint]) + +figure = plot_metrics(history) + +testGene = testGenerator("data/test") + +#validation set +loss, acc = model.evaluate_generator(myGene, steps=10) -testGene = testGenerator("data/membrane/test") -results = model.predict_generator(testGene,30,verbose=1) -saveResult("data/membrane/test",results) \ No newline at end of file +results = model.predict_generator(testGene,10,verbose=1) +saveResult("data/test",results) \ No newline at end of file diff --git a/model.py b/model.py index 0ebd5ab19..e6d0e7282 100644 --- a/model.py +++ b/model.py @@ -31,28 +31,28 @@ def unet(pretrained_weights = None,input_size = (256,256,1)): drop5 = Dropout(0.5)(conv5) up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5)) - merge6 = merge([drop4,up6], mode = 'concat', concat_axis = 3) + merge6 = concatenate([drop4,up6], axis = 3) conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6) conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6) up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6)) - merge7 = merge([conv3,up7], mode = 'concat', concat_axis = 3) + merge7 = concatenate([conv3,up7], axis = 3) conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7) conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7) up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7)) - merge8 = merge([conv2,up8], mode = 'concat', concat_axis = 3) + merge8 = concatenate([conv2,up8], axis = 3) conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8) conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8) up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8)) - merge9 = merge([conv1,up9], mode = 'concat', concat_axis = 3) + merge9 = concatenate([conv1,up9], axis = 3) conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9) conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9) conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9) conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9) - model = Model(input = inputs, output = conv10) + model = Model(inputs = inputs, outputs = conv10) model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])