窗口化方法需要将所有数据移动到一个分区中,正如您在帖子中指出的那样,您的数据集太大了。为了解决这个问题,我稍微调整了这种方法。该方法在为每个分区构建偏移字典后计算每个分区的累积和。这允许计算每个分区的累积总和,同时对数据进行最少的重新洗牌:
首先让我们生成一些测试数据:
data = sc.parallelize([('a',1,25.0),('b',2,25.0),('c',3,50.0)]).toDF(['id','Count','Percent'])
这些是我调整的辅助方法(请参阅此处的原始代码)
from collections import defaultdict
from pyspark.sql import Row
import pyspark.sql.functions as F
from pyspark.sql import Window
def cumulative_sum_for_each_group_per_partition(partition_index, event_stream):
cumulative_sum = defaultdict(float)
for event in event_stream:
cumulative_sum["Count"] += event["Count"]
cumulative_sum["Percent"] += event["Percent"]
for grp, cumulative_sum in cumulative_sum .iteritems():
yield (grp, (partition_index, cumulative_sum))
def compute_offsets_per_group_factory(num_partitions):
def _mapper(partial_sum_stream):
per_partition_cumulative_sum = dict(partial_sum_stream)
cumulative_sum = 0
offset = {}
for partition_index in range(num_partitions):
offset[partition_index] = cumulative_sum
cumulative_sum += per_partition_cumulative_sum.get(partition_index, 0)
return offset
return _mapper
def compute_cumulative_sum_per_group_factory(global_offset):
def _mapper(partition_index, event_stream):
local_cumulative_sum = defaultdict(float)
for event in event_stream:
local_cumulative_sum["Count"] += event["Count"]
count_cumulative_sum = local_cumulative_sum["Count"] + global_offset.value["Count"][partition_index]
local_cumulative_sum["Percent"] += event["Percent"]
percentage_cumulative_sum = local_cumulative_sum["Percent"] + global_offset.value["Percent"][partition_index]
yield Row(CCount= count_cumulative_sum, CPercent = percentage_cumulative_sum, **event.asDict())
return _mapper
def compute_cumulative_sum(points_rdd):
# First pass to compute the cumulative offset dictionary
compute_offsets_per_group = compute_offsets_per_group_factory(points_rdd.getNumPartitions())
offsets_per_group = points_rdd.\
mapPartitionsWithIndex(cumulative_sum_for_each_group_per_partition, preservesPartitioning=True).\
groupByKey().mapValues(compute_offsets_per_group).\
collectAsMap()
# Second pass to compute the cumulative sum using the offset dictionary
sc = points_rdd.context
compute_cumulative_sum_per_group = compute_cumulative_sum_per_group_factory(sc.broadcast(offsets_per_group))
return points_rdd.\
mapPartitionsWithIndex(compute_cumulative_sum_per_group, preservesPartitioning=True)
在测试数据上使用这些辅助方法:
compute_cumulative_sum(data.rdd).toDF().show()
给出:
+------+--------+-----+-------+---+
|CCount|CPercent|Count|Percent| id|
+------+--------+-----+-------+---+
| 1.0| 25.0| 1| 25.0| a|
| 3.0| 50.0| 2| 25.0| b|
| 6.0| 100.0| 3| 50.0| c|
+------+--------+-----+-------+---+