I'm using Scrapy to craw data from website, and this is my code at file spider.py in folder spider of Scrapy
class ThumbSpider(scrapy.Spider):
userInput = readInputData('input/user_input.json')
name = 'thumb'
# start_urls = ['https://vietnamnews.vn/politics-laws', 'https://vietnamnews.vn/society']
def __init__(self, *args, **kwargs):
super(ThumbSpider, self).__init__(*args, **kwargs)
self.start_urls = kwargs.get('start_urls')
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for cssThumb in self.userInput['cssThumb']: # browse each cssThumb which user provides
items = response.css('{0}::attr(href)'.format(cssThumb)).getall() # access it
for item in items:
item = response.urljoin(item)
yield scrapy.Request(url=item, callback=self.parse_details)
def parse_details(self, response):
data = response.css('div.vnnews-text-post p span::text').extract()
with open('result/page_content.txt', 'a') as outfile:
json.dump(data, outfile)
yield data
I call class ThumbSpider
in file main.py and run this file in terminal
import json
import os
import modules.misc as msc
from scrapy.crawler import CrawlerProcess
from week_7.spiders.spider import NaviSpider, ThumbSpider
process2 = CrawlerProcess()
process2.crawl(ThumbSpider, start_urls=['https://vietnamnews.vn/politics-laws', 'https://vietnamnews.vn/society'])
process2.start()
My program doesn't get anything from 2 urls, but when I uncomment start_urls = ['https://vietnamnews.vn/politics-laws', 'https://vietnamnews.vn/society']
and delete __init__
and start_requests
methods in class ThumbSpider
and in file main.py edit process2.crawl(ThumbSpider, start_urls=msc.getUserChoices())
into process2.crawl(ThumbSpider)
it worked well. I don't know what happening. Anyone can help me, thank you so much