我不确定是否有内置选项,但显然实现起来并不复杂:
from typing import List
import csv
import concurrent
def chunks(lst: List, n: int):
while lst:
chunk = lst[0:n]
lst = lst[n:]
yield chunk
def write_csv(csv_file_path: str, columns: List[str], rows: List[List]):
with open(csv_file_path, 'w') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(columns)
for row in rows:
csv_writer.writerow(row)
def write_csv_parallel(base_csv_file_path: str, columns: List[str], rows: List[List], csv_max_rows: int) -> List[str]:
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
chunked_rows = chunks(rows, csv_max_rows)
csv_writing_args = ((f"{base_csv_file_path}.{idx + 1}", columns, chunk_of_rows) for idx, chunk_of_rows
in enumerate(chunked_rows))
executor.map(lambda f: write_csv(*f), csv_writing_args)
if __name__ == "__main__":
columns = ["A", "B", "C"]
rows = [
["a1", "b1", "c1"],
["a2", "b2", "c2"],
["a3", "b3", "c3"],
["a4", "b4", "c4"],
["a5", "b5", "c5"],
["a6", "b6", "c6"],
["a7", "b7", "c7"],
["a8", "b8", "c8"],
["a9", "b9", "c9"],
["a10", "b10", "c10"]
]
base_csv_file_path = "/tmp/test_file.csv"
csv_file_paths = write_csv_parallel(base_csv_file_path, columns, rows, csv_max_rows=3)
print("data was written into the following files: \n" + "\n".join(csv_file_paths))