我找到了一种解决方法torch.utils.data.Dataset,但是必须事先使用 dask 操作数据,以便每个分区都是一个用户,存储为自己的 parquet 文件,但以后只能读取一次。在以下代码中,标签和数据分别存储用于多元时间序列分类问题(但也可以轻松适应其他任务):
import dask.dataframe as dd
import pandas as pd
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, IterableDataset, Dataset
# Breakdown file
raw_ddf = dd.read_parquet("data.parquet") # Read huge file using dask
raw_ddf = raw_ddf.set_index("userid") # set the userid as index
userids = raw_ddf.index.unique().compute().values.tolist() # get a list of indices
new_ddf = raw_ddf.repartition(divisions = userids) # repartition by userids
new_ddf.to_parquet("my_folder") # this will save each user as its own parquet file within "my_folder"
# Dask to read the partitions
train_ddf = dd.read_parquet("my_folder/*.parquet") # read all files
# Read labels file
labels_df = pd.read("label.csv")
y_labels = np.array(labels_df["class"])
# Define the Dataset class
class UsersDataset(Dataset):
def __init__(self, dask_df, labels):
self.dask_df = dask_df
self.labels = labels
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
X_df = self.dask_df.get_partition(idx).compute()
X = np.row_stack([X_df])
X_tensor = torch.tensor(X, dtype=torch.float32)
y = self.labels[idx]
y_tensor = torch.tensor(y, dtype=torch.long)
sample = (X_tensor, y_tensor)
return sample
# Create a Dataset object
user_dataset = UsersDataset(dask_df=ddf_train, labels = y_train)
# Create a DataLoader object
dataloader = DataLoader(user_dataset, batch_size=4, shuffle=True, num_workers=0)
# Print output of the first batch to ensure it works
for i_batch, sample_batched in enumerate(dataloader):
print("Batch number ", i_batch)
print(sample_batched[0]) # print X
print(sample_batched[1]) # print y
# stop after first batch.
if i_batch == 0:
break
我想知道在使用 >= 2 个工作人员读取数据时如何调整我的方法,而不会出现重复条目。非常感谢您对此的任何见解。