How to put my dataset in a .pkl file in the exact

2019-05-20 23:15发布

I'm trying to make a dataset of images in the same format as mnist.pkl

I have used https://github.com/dmitriy-serdyuk/cats_vs_dogs/blob/master/cats_vs_dogs/make_dataset.py as reference.

This is what i have so far

path = '/home/dell/thesis/neon/Images'

def PIL2array(img):
    return numpy.array(img.getdata(),
    numpy.uint8).reshape(img.size[1], img.size[0], 1)

def main():


fileList = [os.path.join(dirpath, f)
    for dirpath, dirnames, files in os.walk(path)
    for f in files if f.endswith('.jpg')]

print "Preparing your pickle files. Pls wait..."

t0 = time.time()
for file_ in fileList:
    print file_
    img = Image.open(file_)
    arr = PIL2array(img)
    cPickle.dump(arr,open(file_+"-prot0"+".pkl","wb"),protocol=0)

t1=time.time()
total = t1-t0
print "P(h)ickling execution time: %.2f sec" % total

# routine to recursively traverse a folder and save list of file names
pklList = [os.path.join(dirpath, f)
       for dirpath, dirnames, files in os.walk(path)
       for f in files if f.endswith('.pkl')]

#print "hi"
all_files = []
for file_ in pklList:
    all_files += [file_]
train_share = 0.6
valid_share = 0.2
seed = 1
n_train = int(len(all_files) * train_share)
n_valid = int(len(all_files) * valid_share)
rng = np.random.RandomState(seed)
rng.shuffle(all_files)
train = all_files[:n_train]
valid = all_files[n_train:(n_train + n_valid)]
test = all_files[(n_train + n_valid):]
save_path = os.path.join(dirpath, '../datasets.pkl')


with open(save_path, 'w') as fout:
    cPickle.dump((train, valid, test), fout)

filters = tables.Filters(complib='blosc', complevel=5)
hdf5_file = 'dataset.h5'
full_path = os.path.join(dirpath, hdf5_file)    
h5file = tables.open_file(full_path, mode='w',
                          title='pics',
                          filters=filters)
save_path = os.path.join(dirpath, '../datasets.pkl')
with open(save_path, 'r') as fin:
    files = cPickle.load(fin)

**for subfiles, subset in zip(files, ['train', 'valid', 'test']):
        group = h5file.create_group(h5file.root, subset, subset)
        X = h5file.create_vlarray(group, 'X', atom=tables.UInt8Atom(),
                                  title='Data values',
                                  expectedrows=len(subfiles),            f                                     filters=filters)
        y = h5file.create_carray(group, 'y', atom=tables.UInt8Atom(),
                                 title='Data targets',
                                 shape=(len(subfiles),), filters=filters)
        s = h5file.create_carray(group, 's', atom=tables.UInt32Atom(),
                                 title='Data shapes',
                           shape=(len(subfiles), 3), filters=filters)** 

for i, file in enumerate(subfiles):
        full_path = os.path.join(dirpath, file)
        with open(full_path, 'r') as fin:
            image, label = cPickle.load(fin)    

        X.append(image.flatten())
        y[i] = label
        s[i] = np.array(image.shape)
        if i % 50 == 0:
            print '.. aggregated', i, 'from', subset
            h5file.flush()
    h5file.flush()

I have highlighted the section where Im facing problems. I keep getting the following error :

in main image, label = cPickle.load(fin)
ValueError: too many values to unpack

Can anyone help me out here?

0条回答
登录 后发表回答