2

我正在摆弄使用 Node.js 从电子商务网站上抓取数据。我Request用来检索页面的 DOM 并Cheerio进行服务器端 DOM 选择。

const cheerio = require('cheerio');
const request = require('request');

// takes a URL, scrapes the page, and returns an object with the data
let scrapePage = (url) => {

    return new Promise((resolve, reject) => {

        request(url, (error, resp, body) => {

            if(error){
                reject(error);
            };

            let $ = cheerio.load(body); 
            let $url = url;
            let $price = $('#rt-mainbody > div > div.details > div.Data > div:nth-child(4) > div.description').text();

            let obj = {
                url: $url,
                price: $price
            }

            resolve(obj);

        });

    });

};

// Runs scrapePage in a loop
// There is a variable called arrayOfURLs defined elsewhere that contains 100s of URLs

for( let i = 0; i < arrayOfURLs.length; i++){
    scrapePage(arrayOfURLs[i])
        .then((obj) => {
            //write to a file
        })
        .catch((error) => {
        })
};

问题是我发送请求的服务器有时会发回空白数据,我假设是因为我发送了太多请求而没有任何暂停。由于 JS 的异步特性,我很难弄清楚如何在循环的每次迭代之间添加有效延迟。setTimeOut仅以同步方式添加 a 是不够的,因为setTimeOut它本身是异步的,而且我在服务器上运行它,所以没有Window对象。

编辑

上面的代码是我正在处理的简化版本。整个代码是这样的:

app.js

const fs = require('fs');
const path = 'urls.txt';
const path2 = 'results.txt';
const scraper = require('./scraper');

let scrapePage = (url) => {
    scraper.scrapePage(url)
        .then((obj) => {
            // console.log('obj from the scraper with Promises was received');
            // console.log(obj);
            // console.log('writing obj to a file');
            fs.appendFile(path2, JSON.stringify(obj) + ', ', (error) => {
                if(error){
                    console.log(error);
                } else {
                    // console.log('Successfully wrote to ' + path2);
                }
            })
        })
        .catch((error) => {
            console.log('There was an error scraping obj: ');
            console.log(error);
        })  
}

fs.readFile(path, 'utf8', (err, data) => {

  if (err){
    throw err;
  };

  var urlArray = JSON.parse(data);

  // this returns an Unexpected Identifier error    
  // const results = await Promise.all(urlArray.map(scrapePage));

  // this returns an Unexpected Token Function error
  // async function scrapePages(){
  //    const results = await Promise.all(urlArray.map(scrapePage));
  // };

});

scraper.js

const request = require('request');
const cheerio = require('cheerio');

exports.scrapePage = (url) => {
    return new Promise((resolve, reject) => {
        request(url, (error, resp, body) => {
            if(error){
                reject(error);
            };

            let $ = cheerio.load(body); 
            let $url = url;

            let $price = $('#rt-mainbody > div > div.details > div.itemData > div:nth-child(4) > div.description').text();

            let obj = {
                url: $url,
                price: $price
            }

            resolve(obj);

        })
    })
}
4

3 回答 3

3

在我看来,您在发送服务器响应之前并没有等待您的承诺得到解决。您可以使用async/await例如完全消除 for 循环

const results = await Promise.all(arrayOfURLs.map(scrapePage));
于 2017-12-18T01:52:44.663 回答
1
const cheerio = require('cheerio');
const request = require('request');
let scrapePage = (url) => {

return new Promise((resolve, reject) => {

    request(url, (error, resp, body) => {

        if(error){
            reject(error);
            return;
        };

        if(!body) {
             reject('Empty Body');
             return;
        }


        let $ = cheerio.load(body); 

        let $url = url;
        let $price = $('#rt-mainbody > div > div.details > div.Data > div:nth-child(4) > div.description').text();

        let obj = {
            url: $url,
            price: $price
        }

        resolve(obj);

    });

});
};

function processUrl(url){
 scrapePage(url)
    .then((obj) => {
        //write to a file
        if(i < arrayOfURLs.length) 
            processUrl(arrayOfURLs.pop())
    })
    .catch((error) => {
       arrayOfURLs.unshift(url);
        if(i < arrayOfURLs.length)  // put this in finally block
            processUrl(arrayOfURLs.pop())
    })
};
processUrl(arrayOfURLs.pop());

在这里,我们可以使用 arrayOfUrls 数组作为队列,如果我们收到错误或空白页,我们会再次将此 URL 推送到数组中。通过这种方式,我们可以以同步方式处理每个 URL。

于 2017-12-18T06:07:04.980 回答
1

如果你想拥有不超过 x 数量的活动连接,你可以使用throttle。或者,如果您希望每秒不超过 x 量,您可以使用throttlePeriod

如果只有一个请求失败,使用 Promise.all 将永远不会调用您的解析处理程序,因此您可以捕获任何错误并返回一个 Fail 对象

const Fail = function(details){this.details=details;};
const max10 = throttle(10)(scrapePage);//max 10 active connections
//const fivePerSecond = throttlePeriod(2,1000)(scrapePage); //start no more than 2 per second
Promise.all(
  arrayOfURLs.map(
    url =>
      max10(url)
      .catch(err=>new Fail([err,url]))
  )
)
.then(
  results =>{
    successes =
      results.filter(
        result=>(result&&result.constructor)!==Fail
      );
    failed =
      results.filter(
        result=>(result&&result.constructor)===Fail
      )
  }
);
于 2017-12-18T03:41:31.137 回答