I'm using code from this github, following this tutorial.
There are few changes that I did, because I'm training the CNN on my data. But, probably there is a problem with the changes I performed in 'create_lmdb.py'
file. the difference between the two databases are:
first: I'm training my network with 32x32 images. second: my database includes only grayscale images. but - I train my network for binary classification too.
After the modifications, this is my file:
import os
import glob
import random
import numpy as np
import cv2
import caffe
from caffe.proto import caffe_pb2
import lmdb
#Size of images
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
def transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT):
#Histogram Equalization
img = cv2.equalizeHist(img)
#img[:, :, 1] = cv2.equalizeHist(img[:, :, 1]) not a RGB
#img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])
#Image Resizing
img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC) # make sure all the images are at the same size
return img
def make_datum(img, label):
#image is numpy.ndarray format. BGR instead of RGB
return caffe_pb2.Datum(
channels=1, #not an RGB image
width=IMAGE_WIDTH,
height=IMAGE_HEIGHT,
label=label,
data=img.tostring())
train_lmdb = '/home/roishik/Desktop/Thesis/Code/cafe_cnn/first/input/train_lmdb'
validation_lmdb = '/home/roishik/Desktop/Thesis/Code/cafe_cnn/first/input/validation_lmdb'
os.system('rm -rf ' + train_lmdb)
os.system('rm -rf ' + validation_lmdb)
train_data = [img for img in glob.glob("../input/train/*png")]
test_data = [img for img in glob.glob("../input/test1/*png")]
#Shuffle train_data
random.shuffle(train_data)
print 'Creating train_lmdb'
in_db = lmdb.open(train_lmdb, map_size=int(1e12))
with in_db.begin(write=True) as in_txn:
for in_idx, img_path in enumerate(train_data):
if in_idx % 6 == 0:
continue
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
img = transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)
if 'cat' in img_path:
label = 0
else:
label = 1
datum = make_datum(img, label)
in_txn.put('{:0>5d}'.format(in_idx), datum.SerializeToString())
print '{:0>5d}'.format(in_idx) + ':' + img_path
in_db.close()
print '\nCreating validation_lmdb'
in_db = lmdb.open(validation_lmdb, map_size=int(1e12))
with in_db.begin(write=True) as in_txn:
for in_idx, img_path in enumerate(train_data):
if in_idx % 6 != 0:
continue
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
img = transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)
prec=int(img_path[(img_path.index('prec_')+5):(img_path.index('prec_')+8)])
if prec>50:
label = 1
else:
label = 0
datum = make_datum(img, label)
in_txn.put('{:0>5d}'.format(in_idx), datum.SerializeToString())
print '{:0>5d}'.format(in_idx) + ':' + img_path
in_db.close()
print '\nFinished processing all images'
But I think that according to the training results: the .mdb output file is corrupted (maybe empty or something - even its weights 47MB).
Can anyone see something wrong with this file? or, alternatively, give me a link to a good tutorial about building lmdb files?
Really appreciate your help! Thanks