I am working in computer vision, and I am trying to convert Microsoft Coco dataset into png images so that I can use them directly on Caffe.
I modified some function of their API so that I have the correct segmentation colors (each class id has (id,id,id) color in rgb, and background is (0,0,0)). Here is the modified function:
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
if datasetType == 'instances':
ax = plt.gca()
polygons = []
color = []
# sort annotations from biggest to smallest to avoid occlusions
anns.sort(key=lambda x: x['area'], reverse=True)
for ann in anns:
pixelvalue = ann['category_id']/255.0
c = [pixelvalue, pixelvalue, pixelvalue]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=1))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
color_mask = c
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(1,1,1,1), linewidths=1, alpha=1)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
This code is called in my python script below:
from pycocotools.coco import COCO
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
dataDir='..'
dataType='train2014'
annFile='%s/annotations/instance`enter code here`s_%s.json'%(dataDir,dataType)
# initialize COCO api for instance annotations
coco=COCO(annFile)
#display COCO categories and supercategories
cats = coco.loadCats(coco.getCatIds())
# get all images
catIds = coco.getCatIds();
imgIds = coco.getImgIds()
imgs = coco.loadImgs(imgIds)
imgs.sort(key=lambda x: x['id'])
for img in imgs:
# load and display image
I = io.imread('%s/images/%s/%s'%(dataDir,dataType,img['file_name']))
# 0 all values
I[:] = 0
axes = plt.gca()
axes.set_frame_on(False)
axes.set_xticks([]); axes.set_yticks([])
plt.axis('off')
plt.imshow(I)
# load and display instance annotations
annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)
anns = coco.loadAnns(annIds)
coco.showAnns(anns)
path='vocstyle_cocoimages/'+str(img['file_name'])[:-4]+'.png'
plt.savefig(path, bbox_inches='tight', pad_inches=0)
plt.clf()
So here is my problem here: When I do this, I obtain a save png with a different size from the original one. I would like to have exactly the same dimensions, since it is important for evaluation: any misplaced pixel will lower the accuracy in the deep learning phase.
I haven't found a proper way to save the "image" part of the figure stripped of any other thing in the figure. Let's say I have a 640*480 image input, I would like to have the same size as an ouput.
Thank you for reading