使用以下示例数据进行测试,我假设时间戳采用纪元格式 -
[key1, ..., ..., 1557678233]
[key1, ..., ..., 1557678234]
[key2, ..., ..., 1557678235]
[key2, ..., ..., 1557678240]
[key2, ..., ..., 1557678271]
[key3, ..., ..., 1557678635]
[key3, ..., ..., 1557678636]
[key3, ..., ..., 1557678637]
[key3, ..., ..., 1557678638]
[key3, ..., ..., 1557678999]
//-- 如果记录需要处理或拒绝,则创建 udf 以返回
scala> spark.udf.register("recordStatusUDF", (ts:String) => {
| val ts_array = ts.split(",",-1)
| if ((ts_array.max.trim.toLong - ts_array.min.trim.toLong) <= 300) {
| "process"
| }
| else { "reject" }
| })
res83: org.apache.spark.sql.expressions.UserDefinedFunction = UserDefinedFunction(<function1>,StringType,Some(List(StringType)))
//-- 创建模式
scala> val schema = StructType(Seq(StructField("key", StringType, true),StructField("col2", StringType, true),StructField("col3", StringType, true),StructField("epoch_ts", StringType, true)))
schema: org.apache.spark.sql.types.StructType = StructType(StructField(key,StringType,true), StructField(col2,StringType,true), StructField(col3,StringType,true), StructField(epoch_ts,StringType,true))
//-- 创建数据框
scala> spark.createDataFrame(rdd,schema).createOrReplaceTempView("kafka_messages")
scala> spark.sql(s""" select x.key, recordStatusUDF(x.ts) as action_ind from ( select key, concat_ws(",", collect_list(epoch_ts)) as ts from kafka_messages group by key)x """).createOrReplaceTempView("action")
scala> val result = spark.sql(s""" select km.* from kafka_messages km inner join action ac on km.key = ac.key and ac.action_ind = "process" """)
result: org.apache.spark.sql.DataFrame = [key: string, col2: string ... 2 more fields]
scala> result.show(false)
+----+----+----+-----------+
|key |col2|col3|epoch_ts |
+----+----+----+-----------+
|key1| ...| ...| 1557678233|
|key1| ...| ...| 1557678234|
|key2| ...| ...| 1557678235|
|key2| ...| ...| 1557678240|
|key2| ...| ...| 1557678271|
+----+----+----+-----------+
您可以在每个 rdd(kafka 消息)上使用上述代码。希望这会有所帮助。