0

What should i change in my code to avoid Scrapy retrieving same items during deep crawl into multiple pages?

Right now, Scrapy performs crawling and scraping like this

Visit Page-A >> ScrapeItem1 & Extract_link_to_Page-B >> Visit Page-B >> ScrapeItem2 & Extract_links_to_Pages-C-D-E >> ScrapeItems2-3-4-5 from Pages-C-D-E

Code looks like this

def category_page(self,response):
         next_page = response.xpath('').extract()

         for item in self.parse_attr(response):
             yield item

         if next_page:
             path = next_page.extract_first()
             nextpage = response.urljoin(path)
             yield scrapy.Request(nextpage,callback=category_page)

    def parse_attr(self, response):
        item = TradeItem()
        item['NameOfCompany'] = response.xpath('').extract_first().strip()
        item['Country'] = response.xpath('').extract_first().strip()
        item['TrustPt'] = response.xpath('').extract_first().strip()
        company_page = response.xpath('').extract_first()

        if company_page:
            company_page = response.urljoin(company_page)
            request = scrapy.Request(company_page, callback = self.company_data)
            request.meta['item'] = item
            yield request
        else:
            yield item

    def company_data(self, response):
        item = response.meta['item']
        item['Address'] = response.xpath('').extract()[1]
        product_page = response.xpath('').extract()[1]
        sell_page = response.xpath('').extract()[2]
        trust_page = response.xpath('').extract()[4]       

        if sell_page:
            sell_page = response.urljoin(sell_page)
            request = scrapy.Request(sell_page, callback = self.sell_data)
            request.meta['item3'] = item
            yield request   
        if product_page:
            product_page = response.urljoin(product_page)
            request = scrapy.Request(product_page, callback = self.product_data)
            request.meta['item2'] = item
            yield request
        if trust_page:
            trust_page = response.urljoin(trust_page)
            request = scrapy.Request(trust_page, callback = self.trust_data)
            request.meta['item4'] = item
            yield request           

        yield item

    def product_data(self, response):
        item = response.meta['item2']
        item ['SoldProducts'] = response.xpath('').extract()    
        yield item

    def sell_data(self, response):
        item = response.meta['item3']
        item ['SellOffers'] = response.xpath('').extract()
        yield item

    def trust_data(self, response):
        item = response.meta['item4']
        item ['TrustData'] = response.xpath('').extract()
        yield item

Problem is that items are repeated, because Scrapy performs PARTIAL scraping on each function/meta item. So, i get entries like this:

Step1:

{'Address': u'',
 'Country': u'',
 'NameOfCompany': u'',
 'TrustPoints': u''}

Step2:

{'Address': u'',
 'Country': ','
 'NameOfCompany': ',
 'SellOffers': [
 'TrustPoints': u''}

Step3:

{'Address': u'',
 'Country': u'',
 'NameOfCompany': u'',
 'SellOffers': [],
 'SoldProducts': [u' '],
 'TrustData': [u''],
 'TrustPoints': u''}

Each STEP repeats values from previous one. I know that this is caused by Scrapy visiting URLS multiple times. There is some error in my logic which i cannot full grasp.

Alex16237
  • 199
  • 10
  • I understand that this is related to passing multiple requests in correct order and saving output from all of them to one item, not four separate ones. How do i achieve that? I already use response.meta['item'], this works fine when only one request is passed, but fails on more consecutive requests. I need those requests to work in 'chain-like' fashion. – Alex16237 Feb 24 '19 at 15:40

1 Answers1

0

Problem solved.

Corresponding answer:

https://stackoverflow.com/a/16177544/11008259

Code is corrected for my case.

    def parse_attr(self, response):
        company_page = response.xpath('').extract_first()

        company_page = response.urljoin(company_page)
        request = scrapy.Request(company_page, callback = self.company_data)
        yield request

    def company_data(self, response):
        item = TradekeyItem()
        item['Address'] = response.xpath('').extract()[1]
        item['NameOfCompany'] = response.xpath('').extract()[1]

        product_page = response.xpath('').extract()[1]

        product_page = response.urljoin(product_page)
        request = scrapy.Request(product_page, callback = self.product_data, meta={'item': item})
        request.meta['item'] = item
        return request

    def product_data(self, response):
        item = response.meta['item']
        item ['SoldProducts'] = response.xpath('').extract()
        sell_page = response.xpath('').extract()[2]
        sell_page = response.urljoin(sell_page)
        request = scrapy.Request(sell_page, callback = self.sell_data, meta={'item': item})
        return request

    def sell_data(self, response):
        item = response.meta['item']
        item ['SellOffers'] = response.xpath('').extract()
        trust_page = response.xpath('').extract()[4]       
        trust_page = response.urljoin(trust_page)
        request = scrapy.Request(trust_page, callback = self.trust_data, meta={'item': item})
        return request

    def trust_data(self, response):
        item = response.meta['item']
        item ['TrustData'] = response.xpath('")]//text()').extract()
        yield item

We establish chain between items by not yielding items on each step, but yielding it at last step. Each function returns request to next one, therefore items are printed only when all of the functions complete their run.

Alex16237
  • 199
  • 10