Keras DataGenerator with Multi-workers 加速读取训练数据(从单一数据集文件夹中读取)
描述
训练集比较大的时候需要使用multi-workers来加速数据集的读取,如果已经预先进行了数据集的平衡工作并将训练所需的数据集都存在了一个文件夹下,则可用此博文的代码实现数据读取加速。如果读者需要对多个不等长的数据集文件夹同时进行读取,则请参考博文Keras DataGenerator with Multi-workers 加速读取训练数据(同时从多个不等长数据文件夹中读取)
代码
1.DataGenerator Module
class DataGenerator_MultiWorkers(tf.keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, label_IDs, batch_size, imgSize=(64,64), n_channels=3, n_classes=4, num_workers=8,shuffle=False, transform=None, epoch_idx=None,collate_fn=default_collate_fn):
'Initialization'
self.imgSize = imgSize
self.batch_size = batch_size
self.label_IDs = label_IDs
self.list_IDs = list_IDs
self.n_channels = n_channels
self.shuffle = shuffle
self.n_classes = n_classes
self.transform = transform
self.epoch_idx = epoch_idx
self.num_workers = num_workers
self.Dataset_Test = Dataset_Test(self.list_IDs,self.label_IDs)
self.collate_fn = collate_fn
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
# return int(np.floor( (len(self.list_IDs[0])+len(self.list_IDs[1])+len(self.list_IDs[2])+len(self.list_IDs[3]))/self.batch_size ) )
return int(len(self.indexes)/self.batch_size)
def __getitem__(self, index): # index represent the steps index
# index is not order
'Generate one batch of data'
# # Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
samples = []
with ThreadPoolExecutor(max_workers=self.num_workers) as executor:
for sample in executor.map(lambda i: self.Dataset_Test[i],indexes):
samples.append(sample)
batch_data, batch_label = self.collate_fn(samples)
return tuple(({'data':batch_data}, [batch_label]))
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
np.random.shuffle(self.list_IDs)
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
2. Dataset Module
class Dataset_Test(object):
def __init__(self,list_IDs,label_IDs,dtype='float32'):
self.dtype = dtype
self.list_IDs = list_IDs
self.label_IDs = label_IDs
def __getitem__(self, index):
data_storage_list = []
label_storage_list = []
img = np.pad(nibabel.load(self.list_IDs[index]).get_fdata(),((9,10),(9,10),(0,0)),'constant')
data_storage_list.append(img)
label_storage_list.append(nibabel.load(self.label_IDs[index]).get_fdata())
return [data_storage_list,label_storage_list]