有这段代码:
function query(url, dbName, collection, filter, requestId) {
MongoClient.connect(url, {native_parser:true, authSource:'admin'}, function(err, client) {
if (err) {
throw err;
}
const db = client.db(dbName);
var stream = db.collection(collection).find(filter, {fields:{_id: 0}}).stream();
var fileName = '/opt/' + requestId + '.txt';
var writer = fs.createWriteStream(fileName);
writer.write('[\n');
stream.on('end', function(){
writer.write('\n]');
});
stream.pipe(es.map(function (doc, next) {
doc = JSON.stringify(doc);
next(null, doc);
})).pipe(es.join(',\n')).pipe(writer).on('close', function(){
sftp.put(fileName, '/opt/' + requestId + '.txt')
.then(logger.info('Done uploading the file via SFTP'));
mqttClient.publish('response', 'The CSV for requestId has been uploaded FTP');
});
});
}
问题是当查询返回大量文档时,该函数将失败
/node_modules/map-stream/index.js:103
throw err
^
RangeError: Maximum call stack size exceeded
at Stream.ondata (internal/streams/legacy.js:14:18)
at emitOne (events.js:116:13)
at Stream.emit (events.js:211:7)
at Stream.<anonymous> (/node_modules/event-stream/index.js:298:12)
at Stream.stream.write (/node_modules/through/index.js:26:11)
at Stream.ondata (internal/streams/legacy.js:16:26)
at emitOne (events.js:116:13)
at Stream.emit (events.js:211:7)
at queueData (/node_modules/map-stream/index.js:43:21)
at next (/node_modules/map-stream/index.js:71:7)
at /node_modules/map-stream/index.js:85:7
at /opt/subscriber.js:84:7
at wrappedMapper (/node_modules/map-stream/index.js:84:19)
at Stream.stream.write (/node_modules/map-stream/index.js:96:21)
at Cursor.ondata (_stream_readable.js:639:20)
at emitOne (events.js:116:13)
这个函数所做的是获取一个过滤器,基于过滤器运行一个 mongodb 查询,并将结果文档写入一个文件,然后进行 ftp-ed。
该功能在next(null, doc);
关于如何改进代码而不必增加调用堆栈大小的任何建议?