我正在使用 DataLoader 从基于 numpy memmap 的自定义 Dataset 对象中读取数据。只要我在不改组的情况下读取数据,一切正常,但是,正如我设置shuffle=True
的那样,运行时崩溃。
我尝试通过使用置换向量并shuffle=False
在 DataLoader 中设置来在 Dataset 类中实现改组机制,但问题仍然存在。我还注意到,在洗牌时,__getitem__()
Dataset 对象的函数被调用了 n 次,其中 n 是 batch_size。
这是数据集代码:
class CustomDataset(Dataset):
num_pattern = 60112
base_folder = 'dataset'
def __init__(self, root):
self.root = os.path.expanduser(root)
self.output_ = np.memmap('{0}/output'.format(root), 'int64', 'r', shape=(60112, 62))
self.out_len = np.memmap('{0}/output-lengths'.format(root), 'int32', 'r', shape=(60112))
self.input_ = np.memmap('{0}/input'.format(root), 'float32', 'r', shape=(60112, 512, 1024))
self.in_len = np.memmap('{0}/input-lengths'.format(root), 'int32', 'r', shape=(60112))
def __len__(self):
return self.num_pattern
def __getitem__(self, index):
return (self.in_len[index], torch.from_numpy(self.input_[index])), (self.out_len[index], torch.from_numpy(self.output_[index]))
if __name__ == '__main__':
dataset = CustomDataset(root='/content/')
data_loader = data.DataLoader(dataset, batch_size=32, shuffle=False, num_workers=1)
for i, data in enumerate(data_loader, 0):
# training
错误堆栈如下:
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in _try_get_batch(self, timeout)
510 try:
--> 511 data = self.data_queue.get(timeout=timeout)
512 return (True, data)
9 frames
/usr/lib/python3.6/multiprocessing/queues.py in get(self, block, timeout)
103 timeout = deadline - time.monotonic()
--> 104 if not self._poll(timeout):
105 raise Empty
/usr/lib/python3.6/multiprocessing/connection.py in poll(self, timeout)
256 self._check_readable()
--> 257 return self._poll(timeout)
258
/usr/lib/python3.6/multiprocessing/connection.py in _poll(self, timeout)
413 def _poll(self, timeout):
--> 414 r = wait([self], timeout)
415 return bool(r)
/usr/lib/python3.6/multiprocessing/connection.py in wait(object_list, timeout)
910 while True:
--> 911 ready = selector.select(timeout)
912 if ready:
/usr/lib/python3.6/selectors.py in select(self, timeout)
375 try:
--> 376 fd_event_list = self._poll.poll(timeout)
377 except InterruptedError:
/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/signal_handling.py in handler(signum, frame)
62 # Python can still get and update the process status successfully.
---> 63 _error_if_any_worker_fails()
64 if previous_handler is not None:
RuntimeError: DataLoader worker (pid 3978) is killed by signal: Bus error.
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
<ipython-input-8-b407a8532808> in <module>()
5 data_loader = data.DataLoader(dataset, batch_size=4, shuffle=True, num_workers=1)
6
----> 7 for i, data in enumerate(data_loader, 0):
8 print(i)
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in __next__(self)
574 while True:
575 assert (not self.shutdown and self.batches_outstanding > 0)
--> 576 idx, batch = self._get_batch()
577 self.batches_outstanding -= 1
578 if idx != self.rcvd_idx:
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in _get_batch(self)
551 else:
552 while True:
--> 553 success, data = self._try_get_batch()
554 if success:
555 return data
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in _try_get_batch(self, timeout)
517 if not all(w.is_alive() for w in self.workers):
518 pids_str = ', '.join(str(w.pid) for w in self.workers if not w.is_alive())
--> 519 raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str))
520 if isinstance(e, queue.Empty):
521 return (False, None)
RuntimeError: DataLoader worker (pid(s) 3978) exited unexpectedly