我有一个用节点编写的非常简单的日志记录应用程序,它将数据存储在 mongo 中。它可以正常运行一段时间(一个小时),然后连接数量激增,我的所有写入都失败了一段时间,然后连接断开(可能它们超时)。我知道调用db.close()
很重要,节点的基于事件的异步方面使得在哪里进行调用变得不简单。
在我的请求处理程序中,我有:
//val is an object previously defined
database.writeRecord(val)
database.userExists(val['cookie'], function(c){//returns 1 if user exists, 0 if not
if(c==0)
database.createNewUserProfile(val['cookie'],'campaign',val, {cid:cid, event:event});
else
database.attachEventToUserProfile(val['cookie'], 'campaign',val, {cid:cid, event:event});
});
然后,在我的数据库处理程序中,我具有以下功能(请原谅此复制/粘贴的长度):
function writeRecord(object_passbyreference){
//make a deep copy
var value = new Object();
for (var attr in object_passbyreference) {
value[attr] = object_passbyreference[attr];
}
if(additionalattributes!=undefined){//is there anything else i want to throw in this record?
for (var attr in additionalattributes)
value[attr] = additionalattributes[attr];
}
dbname='beacon';
collectionname='testcollection';
var db = new Db(dbname, new Server(host, port, {}), {native_parser:false});
db.open(function(err, db) {
if(db === null)
console.log(' db is undefined');
else{
db.collection(collectionname, function(err, collection) {
collection.insert(value, function(err,doc){
if(err) {
console.log('Insert ERROR!!!!! Is the database running? '+JSON.stringify(err));
}
db.close();
}) // collection.insert
}); //db.collection
}
}); // db.open
}
function userExists(cookieid, callback){
var db = new Db('beacon', new Server(host, port, {}), {native_parser:false});
db.open(function(err, db) {//opendb++
if(db === null)
console.log(' db is undefined - profile');
else{
db.collection('userprofiles', function(err, collection) {
collection.find({cookie:cookieid}).toArray(function(e, results) {
if(e){
console.log (' ERROR IN saveUserProfileEvent error:'+JSON.stringify(e)+" cookie:"+JSON.stringify({cookie:cookieid}));
}
else{//the query went alright, callback the count ...
callback(results.length)//returns the size of the result set
}
db.close();
})
})
}
})
}
function createNewUserProfile(cookieid, eventtype, object_passbyreference, additionalattributes){
//make a deep copy, clone it
var object = new Object();
for (var attr in object_passbyreference) {
if(attr!='cookie')
object[attr] = object_passbyreference[attr];
}
if(additionalattributes!=undefined){//is there anything else i want to throw in this record?
for (var attr in additionalattributes)
object[attr] = additionalattributes[attr];
}
var recordtosave=new Object();
recordtosave['cookie']=cookieid;
recordtosave['items']=1;//keep this here, maybe just for fun, let's see if we can keep a count of total entries for this given user
recordtosave['campaign']=new Array();//new Array();
recordtosave['survey']=new Array();
recordtosave[eventtype]=[object];
recordtosave['demographics']=new Object();
recordtosave['demographics']['age']=functionobj.getAge();
recordtosave['demographics']['gender']=functionobj.getGender();
recordtosave['demographics']['income']=functionobj.getIncome();
var db = new Db('beacon', new Server(host, port, {}), {native_parser:false});
db.open(function(err, db) {//opendb++
if(db === null)
console.log(' db is undefined - profile');
else{
db.collection('userprofiles', function(err, collection) {
collection.insert(recordtosave, function(err,doc){
if(err){
console.log(' ERROR Adding new user '+cookieid);
}
else{
console.log('Added new user '+cookieid);
}
db.close();
})
})
}
})
}
function attachEventToUserProfile(cookieid, eventtype, object_passbyreference, additionalattributes){
var object = new Object();
for (var attr in object_passbyreference) {
if(attr!='cookie')
object[attr] = object_passbyreference[attr];
}
if(additionalattributes!=undefined){//is there anything else i want to throw in this record?
for (var attr in additionalattributes)
object[attr] = additionalattributes[attr];
}
var db = new Db('beacon', new Server(host, port, {}), {native_parser:false});
db.open(function(err, db) {
if(db === null)
console.log(' db is undefined - profile');
else{
db.collection('userprofiles', function(err, collection) {
eventtopush=new Object();
eventtopush[eventtype]=object
collection.update({cookie: cookieid}, {$inc: { 'items' : 1 }, $push: eventtopush}, function(e,doc){
if(e){
console.log(' ERROR Updating user '+cookieid);
}
else{
console.log('Updated user '+cookieid);
}
db.close();
});
})
}
});
}
我所有的功能都是这样的:
function f(param, callback){
db=new db;
db.open(...
db.collection(....
db.find/insert(....
callback()
db.close()
}
那是对的吗?我想要最里面的函数调用结束时的 db.close ,对吗?这意味着它将在一切完成后执行?这是在节点中进行 mongo 查询的正确方法吗?
查看 mongostat,我看到我的连接非常低且平坦,然后,在一段时间内,它们飙升至 ~800,我的写入开始失败(注意,时间间隔为 15 秒):
72 55 38 0 0 332 1.02g 62m 0 90k 307k 13 RTR 21:01:26
68 52 37 0 0 316 1.03g 62m 0 85k 250k 12 RTR 21:01:41
81 65 50 0 0 368 1.02g 62m 0 101k 280k 5 RTR 21:01:56
70 54 39 0 0 322 1.02g 62m 0 87k 306k 4 RTR 21:02:11
insert query update delete getmore command vsize res faults netIn netOut conn repl time
73 52 36 0 0 334 1.09g 63m 0 88k 235k 79 RTR 21:02:26
73 55 39 0 0 346 1.15g 64m 0 91k 250k 148 RTR 21:02:41
70 59 42 0 0 328 1.02g 62m 0 90k 269k 26 RTR 21:02:56
73 58 43 0 0 346 1.02g 62m 0 93k 303k 3 RTR 21:03:11
71 55 41 0 0 337 1.02g 62m 0 90k 243k 16 RTR 21:03:26
69 55 40 0 0 329 1.02g 62m 0 88k 322k 8 RTR 21:03:41
70 56 42 0 0 338 1.02g 62m 0 91k 261k 5 RTR 21:03:56
67 52 38 0 0 316 1.02g 62m 0 85k 261k 3 RTR 21:04:11
64 50 36 0 0 306 1.02g 62m 0 82k 243k 25 RTR 21:04:26
61 48 37 0 0 294 1.04g 62m 0 78k 259k 32 RTR 21:04:41
insert query update delete getmore command vsize res faults netIn netOut conn repl time
68 53 37 0 0 314 1.02g 62m 0 85k 241k 7 RTR 21:04:56
65 53 41 0 0 319 1.02g 62m 0 85k 229k 3 RTR 21:05:11
71 55 40 0 0 337 1.02g 62m 0 91k 274k 14 RTR 21:05:26
63 47 35 0 0 297 1.07g 63m 0 79k 220k 63 RTR 21:05:41
64 52 37 0 0 298 1.02g 62m 0 81k 230k 7 RTR 21:05:56
67 53 39 0 0 322 1.02g 62m 0 87k 230k 4 RTR 21:06:11
68 53 38 0 0 321 1.02g 62m 0 86k 273k 3 RTR 21:06:26
70 56 41 0 0 336 1.02g 62m 0 90k 275k 3 RTR 21:06:41
64 51 38 0 0 310 1.02g 62m 0 82k 220k 4 RTR 21:06:56
66 51 38 0 0 315 1.02g 62m 0 84k 290k 21 RTR 21:07:11
insert query update delete getmore command vsize res faults netIn netOut conn repl time
62 50 38 0 0 299 1.02g 62m 0 80k 240k 5 RTR 21:07:26
65 49 36 0 0 308 1.02g 62m 0 82k 235k 36 RTR 21:07:41
68 51 36 0 0 298 1.02g 62m 0 81k 255k 6 RTR 21:07:56
69 53 37 0 0 319 1.02g 62m 0 85k 235k 5 RTR 21:08:11
66 52 37 0 0 312 1.02g 62m 0 84k 236k 6 RTR 21:08:26
65 51 36 0 0 305 1.02g 62m 0 82k 214k 5 RTR 21:08:41
68 53 38 0 0 320 1.02g 62m 0 85k 265k 9 RTR 21:08:56
70 53 36 0 0 319 1.02g 62m 0 85k 226k 19 RTR 21:09:11
69 52 37 0 0 312 1.02g 62m 0 84k 270k 4 RTR 21:09:26
67 51 36 0 0 311 1.04g 63m 0 83k 256k 19 RTR 21:09:41
insert query update delete getmore command vsize res faults netIn netOut conn repl time
70 54 40 0 0 325 1.02g 62m 0 88k 288k 4 RTR 21:09:56
69 52 36 0 0 317 1.03g 62m 0 85k 272k 14 RTR 21:10:11
67 53 39 0 0 321 1.03g 62m 0 86k 253k 13 RTR 21:10:26
70 55 39 0 0 330 1.03g 62m 0 88k 264k 12 RTR 21:10:41
70 54 39 0 0 332 1.03g 63m 0 89k 264k 30 RTR 21:10:56
69 52 36 0 0 319 1.07g 63m 0 85k 288k 73 RTR 21:11:11
61 43 32 0 0 291 1.19g 66m 0 75k 250k 208 RTR 21:11:26
62 42 30 0 0 285 1.32g 68m 0 74k 252k 321 RTR 21:11:41
66 52 38 0 0 315 1.33g 69m 0 84k 274k 341 RTR 21:11:56
68 42 30 0 0 302 1.51g 72m 0 78k 217k 500 RTR 21:12:11
insert query update delete getmore command vsize res faults netIn netOut conn repl time
53 27 20 0 0 256 1.78g 77m 0 62k 175k 780 RTR 21:12:26
22 19 15 0 0 244 1.77g 78m 0 58k 158k 771 RTR 21:12:41
21 17 11 0 0 245 1.75g 77m 0 59k 139k 755 RTR 21:12:56
7 4 3 0 0 218 1.77g 78m 0 49k 103k 772 RTR 21:13:11
2 1 0 0 0 214 1.77g 77m 0 47k 85k 773 RTR 21:13:26
0 0 0 0 0 208 1.77g 77m 0 45k 79k 773 RTR 21:13:41
61 60 41 0 0 323 1.49g 73m 0 91k 338k 489 RTR 21:13:56
79 73 50 0 0 358 1.16g 65m 0 104k 328k 153 RTR 21:14:11
74 50 32 0 0 315 1.22g 66m 0 84k 241k 220 RTR 21:14:26
72 51 35 0 0 303 1.19g 66m 0 83k 290k 189 RTR 21:14:41
insert query update delete getmore command vsize res faults netIn netOut conn repl time
74 52 35 0 0 310 1.19g 66m 0 84k 224k 180 RTR 21:14:56
80 58 39 0 0 337 1.14g 65m 0 92k 344k 127 RTR 21:15:11
81 48 30 0 0 314 1.22g 67m 0 83k 242k 215 RTR 21:15:26
84 58 37 0 0 332 1.15g 65m 0 91k 275k 152 RTR 21:15:41
82 58 39 0 0 328 1.1g 64m 0 90k 331k 83 RTR 21:15:56
78 53 36 0 0 317 1.08g 64m 0 86k 271k 89 RTR 21:16:11
82 47 32 0 0 314 1.19g 66m 0 83k 257k 200 RTR 21:16:26
80 52 37 0 0 318 1.24g 67m 0 85k 304k 234 RTR 21:16:41
21:11:26左右,连接量迅速上升,流量没有明显增加。您之前可以看到,连接低且稳定的时间很长。
有人可以在这里指出我正确的方向吗?
答案更新
在下面的帮助下,我能够得到解决方案。基本上,你把你app.listen(port)
放在里面db.open()
。然后,除此之外,仍然在 main.js 中,您将对 db 的引用传递给您的路径处理程序。然后将其一直传递到实际的 db 调用,如果您获取集合并进行查询。那就不要打电话db.close()
了。
主.js:
mongodb = require('mongodb');
var serverOptions = {
'auto_reconnect': true,
'poolSize': 50
};
var serv = new mongodb.Server('localhost', 27017, serverOptions);
var db = new mongodb.Db('beacon', serv);
app.get('/log', function(req,res){ log.request(req,res, db) });//writes events
db.open(function (error, db) {
if (!module.parent) {
app.listen(8888);
console.log("Express server listening on port %d", app.address().port)
}
});
日志.js:
function request(req,res, db){
//build val object
database.writeRecord(db, val)
}
数据库.js:
function writeRecord(db, value){
collectionname='testwritespeed';
db.collection(collectionname, function(err, collection) {
collection.insert(value, function(err,doc){
if(err) {
console.log('Insert ERROR!!!!! Is the database running? '+JSON.stringify(err));
}
console.log(doc)
//don't do db.close() !!!!!
}) // collection.insert
}); //db.collection
}