我对 Node.js 的内部工作原理不是很熟悉,但据我所知,当您进行太多函数调用时,会出现“超出最大调用堆栈大小”错误。
我正在制作一个会跟踪链接的蜘蛛,并且在随机数量的爬网 URL 之后我开始收到这些错误。发生这种情况时,Node 不会为您提供堆栈跟踪,但我很确定我没有任何递归错误。
我正在使用请求来获取 URL,我正在使用cheerio来解析获取的 HTML 并检测新链接。堆栈溢出总是发生在cheerio 内部。当我将cheerio 换成htmlparser2 时,错误消失了。Htmlparser2 要轻得多,因为它只是在每个打开的标签上发出事件,而不是解析整个文档并构造一棵树。
我的理论是,cheerio 吃掉了堆栈中的所有内存,但我不确定这是否可能?
这是我的代码的简化版本(仅供阅读,不会运行):
var _       = require('underscore');
var fs      = require('fs');
var urllib  = require('url');
var request = require('request');
var cheerio = require('cheerio');
var mongo   = "This is a global connection to mongodb.";
var maxConc = 7;
var crawler = {
  concurrent: 0,
  queue:      [],
  fetched:    {},
  fetch: function(url) {
    var self = this;
    self.concurrent += 1;
    self.fetched[url] = 0;
    request.get(url, { timeout: 10000, pool: { maxSockets: maxConc } }, function(err, response, body){
      self.concurrent  -= 1;
      self.fetched[url] = 1;
      self.extract(url, body);
    });
  },
  extract: function(referrer, data) {
    var self = this;
    var urls = [];
    mongo.pages.insert({ _id: referrer, html: data, time: +(new Date) });
    /**
     * THE ERROR HAPPENS HERE, AFTER A RANDOM NUMBER OF FETCHED PAGES
    **/
    cheerio.load(data)('a').each(function(){
      var href = resolve(this.attribs.href, referer); // resolves relative urls, not important
      // Save the href only if it hasn't been fetched, it's not already in the queue and it's not already on this page
      if(href && !_.has(self.fetched, href) && !_.contains(self.queue, href) && !_.contains(urls, href))
        urls.push(href);
    });
    // Check the database to see if we already visited some urls.
    mongo.pages.find({ _id: { $in: urls } }, { _id: 1 }).toArray(function(err, results){
      if(err) results = [];
      else    results = _.pluck(results, '_id');
      urls = urls.filter(function(url){ return !_.contains(results, url); });
      self.push(urls);
    });
  },
  push: function(urls) {
    Array.prototype.push.apply( this.queue, urls );
    var url, self = this;
    while((url = self.queue.shift()) && this.concurrent < maxConc) {
      self.fetch( url );
    }
  }
};
crawler.fetch( 'http://some.test.url.com/' );