我有一个触发器,它为 Kinesis 上收到的每个事务执行 lambda 函数。生产者通过 PutRecordsRequest() 方法发送多个事务。Lambda函数如下;
var AWS = require('aws-sdk');
var firehose = new AWS.Firehose();
var fhStreamName = "transactions";
function writeToS3(jsonString,firehoseStreamName){
console.log("Writing to S3 : " + jsonString)
// Prepare storage to postings firehose stream...
var params = {
DeliveryStreamName: firehoseStreamName,
Record: {
Data: jsonString
}
};
// Store data!
firehose.putRecord(params, function(err, data) {
if (err) {
// This needs to be fired to Kinesis in the future...
console.log(err, err.stack);
}
else{
console.log(data);
}
});
}
function processEvent(event) {
// Convert data object because this is all that we need
var buf = new Buffer(event, "base64");
// Convert to actual string which is readable
var jsonString = buf.toString("utf8");
return jsonString;
}
exports.handler = function(event, context) {
var result = "";
// Loop events and register to firehose...
for(var i=0; i<event.Records.length; i++){
result = result + processEvent(event.Records[i].kinesis.data,fhStreamName);
}
writeToS3(result,fhStreamName);
context.done();
};
但是,在编写事务时,在 S3 上它们不会被编写为 JSON 数组。下面是一个例子:
{
"userName" : "val1",
"betID" : "val2",
"anotherID" : val3
}{
"userName" : "val4",
"anotherID" : "val5",
"productID" : val6,
}
这种格式的数据可以直接加载到 Athena 或 Redshift,还是必须在有效的数组中?我可以在这里看到http://docs.aws.amazon.com/redshift/latest/dg/copy-usage_notes-copy-from-json.html它仍然应该能够加载到 Redshift 中。
以下是在 Athena 中创建表时使用的属性...
ROW FORMAT SERDE 'org.openx.data.jsonserde.JsonSerDe'
WITH SERDEPROPERTIES (
'serialization.format' = '1'
) LOCATION 's3://asgaard-data/data/'
如何加载这些数据以便能够查询它?