3

I am building a content scraper for a tshirt website.

The goal is to enter a website through only one hardcoded url: http://shirts4mike.com

I will then find all the product pages for each tshirt, and then create a object with it's details. Then add it to an array.

When the array is full of the tshirts, I'll work through the array and log it into a CSV file.

Right now, I am having some trouble with the timing of the requests/responses and the function calls.

How can I make sure that I call the NEXT function on the right time? I understand that it's not working because of it's async nature.

How can I call secondScrape, lastScraper and convertJson2Csv at the right time so that the variables they're working with are not undefined?

I tried to use something such as response.end() but this is not working.

I'm assuming I NEED to use promises to make this work properly? and to be legible?

Any ideas? My code is below:

//Modules being used:
var cheerio = require('cheerio');
var request = require('request');
var moment = require('moment');

//hardcoded url
var url = 'http://shirts4mike.com/';

//url for tshirt pages
var urlSet = new Set();

var remainder;
var tshirtArray;


// Load front page of shirts4mike
request(url, function(error, response, html) {
    if(!error && response.statusCode == 200){
        var $ = cheerio.load(html);

    //iterate over links with 'shirt'
        $("a[href*=shirt]").each(function(){
            var a = $(this).attr('href');

            //create new link
            var scrapeLink = url + a;

            //for each new link, go in and find out if there is a submit button. 
            //If there, add it to the set
            request(scrapeLink, function(error,response, html){
                if(!error && response.statusCode == 200) {
                    var $ = cheerio.load(html);

                    //if page has a submit it must be a product page
                    if($('[type=submit]').length !== 0){

                        //add page to set
                        urlSet.add(scrapeLink);

                    } else if(remainder === undefined) {
                        //if not a product page, add it to remainder so it another scrape can be performed.
                        remainder = scrapeLink;                     
                    }
                }
            });
        });     
    }
    //call second scrape for remainder
    secondScrape();
});


function secondScrape() {
    request(remainder, function(error, response, html) {
        if(!error && response.statusCode == 200){
            var $ = cheerio.load(html);

            $("a[href*=shirt]").each(function(){
                var a = $(this).attr('href');

                //create new link
                var scrapeLink = url + a;

                request(scrapeLink, function(error,response, html){
                    if(!error && response.statusCode == 200){

                        var $ = cheerio.load(html);

                        //collect remaining product pages and add to set
                        if($('[type=submit]').length !== 0){
                            urlSet.add(scrapeLink);
                        }
                    }
                });
            });     
        }
    });
    console.log(urlSet);
    //call lastScraper so we can grab data from the set (product pages)
    lastScraper();
};



function lastScraper(){
    //scrape set, product pages
    for(var i = 0; i < urlSet.length; i++){
        var url = urlSet[i];

        request(url, function(error, response, html){
            if(!error && response.statusCode == 200){
                var $ = cheerio.load(html);

                //grab data and store as variables
                var price = $('.price').text();
                var img = $('.shirt-picture').find("img").attr("src");
                var title = $('body').find(".shirt-details > h1").text().slice(4);

                var tshirtObject = {};
                //add values into tshirt object

                tshirtObject.price = price;
                tshirtObject.img = img;
                tshirtObject.title = title;
                tshirtObject.url = url;
                tshirtObject.date = moment().format('MMMM Do YYYY, h:mm:ss a');

                //add the object into the array of tshirts
                tshirtArray.push(tshirtObject); 
            }
        });
    }
    //call function to iterate through tshirt objects in array in order to convert to JSON, then into CSV to be logged
    convertJson2Csv();
};
Quentin
  • 914,110
  • 126
  • 1,211
  • 1,335
bloppit
  • 621
  • 8
  • 22
  • A simple conversion to promises will still leave you with an inefficient process, in which pages are revisited. A good paradigm, would allow each page to be visited a maximum of once. – Roamer-1888 Sep 23 '16 at 03:40

4 Answers4

0

There is a npm module called request-promise.

simply:

var rp = require("request-promise");

and anywhere you are making a request you can switch with request-promise.

for instance:

rp(url)
.then(function(value){
  //do whatever
})
.catch(function(err){
  console.log(err)
})
Sunil Kumar
  • 3,142
  • 1
  • 19
  • 33
Shams Ali
  • 1
  • 2
0

You can use waterfall method of async module which can give you a smooth way to resolve this issue.

I just try to do your code with this module

Hope this will work for you

Format of waterfall

async.waterfall([
  function(callback) {
    callback(null, previousvalue);
  },
  function(previousvalue, callback) {}
], function(err, result) { //Final callback

});

var async = require('async');
var cheerio = require('cheerio');
var request = require('request');
var moment = require('moment');

//hardcoded url
var url = 'http://shirts4mike.com/';

//url for tshirt pages
var urlSet = new Set();

var remainder;
var tshirtArray = [];


async.waterfall([
  function(callback) {
    // Load front page of shirts4mike
    request(url, function(error, response, html) {
      if (!error && response.statusCode == 200) {
        var $ = cheerio.load(html);

        //iterate over links with 'shirt'
        $("a[href*=shirt]").each(function() {
          var a = $(this).attr('href');

          //create new link
          var scrapeLink = url + a;

          //for each new link, go in and find out if there is a submit button. 
          //If there, add it to the set
          request(scrapeLink, function(error, response, html) {
            if (!error && response.statusCode == 200) {
              var $ = cheerio.load(html);

              //if page has a submit it must be a product page
              if ($('[type=submit]').length !== 0) {

                //add page to set
                urlSet.add(scrapeLink);
                callback(null, true);

              } else if (remainder === undefined) {
                //if not a product page, add it to remainder so it another scrape can be performed.
                remainder = scrapeLink;
                callback(nul, true);
              }
            }
          });
        });
      }
      //call second scrape for remainder
      // secondScrape();
    });
  },
  function(previousvalue, callback) {
    request(remainder, function(error, response, html) {
      if (!error && response.statusCode == 200) {
        var $ = cheerio.load(html);

        $("a[href*=shirt]").each(function() {
          var a = $(this).attr('href');

          //create new link
          var scrapeLink = url + a;

          request(scrapeLink, function(error, response, html) {
            if (!error && response.statusCode == 200) {

              var $ = cheerio.load(html);

              //collect remaining product pages and add to set
              if ($('[type=submit]').length !== 0) {
                urlSet.add(scrapeLink);
              }
              callback(null, true);
            }
          });
        });
      }
    });
    console.log(urlSet);
    //call lastScraper so we can grab data from the set (product pages)
  },
  function(previousvalue, callback) {
    //scrape set, product pages
    for (var i = 0; i < urlSet.length; i++) {
      var url = urlSet[i];

      request(url, function(error, response, html) {
        if (!error && response.statusCode == 200) {
          var $ = cheerio.load(html);

          //grab data and store as variables
          var price = $('.price').text();
          var img = $('.shirt-picture').find("img").attr("src");
          var title = $('body').find(".shirt-details > h1").text().slice(4);

          var tshirtObject = {};
          //add values into tshirt object

          tshirtObject.price = price;
          tshirtObject.img = img;
          tshirtObject.title = title;
          tshirtObject.url = url;
          tshirtObject.date = moment().format('MMMM Do YYYY, h:mm:ss a');

          //add the object into the array of tshirts
          tshirtArray.push(tshirtObject);
        }
      });
    }
  }
], function(err, result) {
  //call function to iterate through tshirt objects in array in order to convert to JSON, then into CSV to be logged
  convertJson2Csv();
});
abdulbarik
  • 6,101
  • 5
  • 38
  • 59
0

You can use this example to convert the rest of your code sample.

promise = new Promise((resolve, reject) => ( 
    request("http://shirts4mike.com/", 
    (err, response, html) => (response.statusCode == 200 ? resolve(html): reject(err))
)));


promise.then(html => {
    var $ = cheerio.load(html);
    // continue
});
Oluwafemi Sule
  • 36,144
  • 1
  • 56
  • 81
0

You correctly identify promises as a way ahead to solving your timing issues.

In order to have promises available, you need to promisify request (or adopt a HTTP lib, whose methods return promises).

You could just fix the timing issues with promises, but you could also take the opportunity to improve the overall paradigm. Instead of discrete functions for virtually identical first/second/third stages, you can write a single function that calls itself recursively. Written correctly, this will ensure that each page in the target site is visited a maximum of once; revisits should be avoided on grounds of overall performance, and loading of the target server.

//Modules being used:
var Promise = require('path/to/bluebird');
var cheerio = require('cheerio');
var moment = require('moment');

// Promisify `request` to make `request.getAsync()` available.
// Ref: http://stackoverflow.com/questions/28308131/how-do-you-properly-promisify-request
var request = Promise.promisify(require('request'));
Promise.promisifyAll(request);

//hardcoded url
var url = 'http://shirts4mike.com/';

var urlSet = new Set();
var tshirtArray = [];

var maxLevels = 3; // limit the recursion to this number of levels.

function scrapePage(url_, levelCounter) {
    // Bale out if :
    //   a) the target url_ has been visited already,
    //   b) maxLevels has been reached.
    if(urlSet.has(url_) || levelCounter >= maxLevels) {
        return Promise.resolve();
    }
    urlSet.add(url_);

    return request.getAsync(url_).then(function(response, html) {
        var $;
        if(response.statusCode !== 200) {
            throw new Error('statusCode was not 200'); // will be caught below
        }
        $ = cheerio.load(html);
        if($('[type=submit]').length > 0) {
            // yay, it's a product page.
            tshirtArray.push({
                price: $('.price').text(),
                img: $('.shirt-picture').find("img").attr("src"),
                title: $('body').find(".shirt-details > h1").text().slice(4),
                url: url_,
                date: moment().format('MMMM Do YYYY, h:mm:ss a')
            });
        }
        // find any shirt links on page represented by $, visit each link in turn, and scrape.
        return Promise.all($("a[href*=shirt]").map(function(link) {
            return scrapePage(link.href, levelCounter + 1);
        }).get());
    }).catch(function(e) {
        // ensure "success" even if scraping threw an error.
        console.log(e);
        return null;
    });
}

scrapePage(url, 0).then(convertJson2Csv);

As you can see, a recursive solution :

  • avoids repetition of code,
  • will drill down as many levels as you wish - determined by the variable maxLevels.

Note: This is still not a good solution. There's an implicit assumption here, as in the original code, that all shirt pages are reachable from the site's home page, via "shirt" links alone. If shirts were reachable via eg "clothing" > "shirts", then the code above won't find any shirts.

Roamer-1888
  • 19,138
  • 5
  • 33
  • 44