I'm still doing my first few scrapy projects and I came across this website with an infinite scroll, where the requested URL is the same every time. I have tried to look for solutions but all material I read up involve URLs with some distinction (page no, text etc). How do I go about extracting all names that come up from https://www.baincapital.com/people
. I have figured out my selectors etc but it's just returning the initially visible info. Any help will be appreciated.
My code so far:
import scrapy
from scrapy_splash import SplashRequest
class BainPeople(scrapy.Spider):
name = 'BainPeop'
start_urls = [
'https://www.baincapital.com/people'
]
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(url=url, callback = self.parse, args={"wait" : 3})
def parse(self, response):
name = response.css('h4 span::text').extract()
links = response.css('div.col-xs-6.col-sm-4.col-md-6.col-lg-3.grid.staff a::attr(href)').extract()
yield {'name' : name}
Updated code:
import scrapy
from selenium import webdriver
class BainpeopleSpider(scrapy.Spider):
name = 'bainpeople'
allowed_domains = ['https://www.baincapital.com/people']
start_urls = ['http://www.baincapital.com/people/']
def parse(self, response):
driver = webdriver.Chrome(executable_path='C:/Users/uchit.madhok/Downloads/chromedriver_win32/chromedriver')
driver.get('http://www.baincapital.com/people/')
name = driver.find_elements_by_css_selector("h4 span").text
links = driver.find_elements_by_css_selector('div.col-xs-6.col-sm-4.col-md-6.col-lg-3.grid.staff a').attr(href)
yield {
'name' : name
'links' : links
}
driver.close()
Final Code:
import scrapy
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
class BainpeopleSpider(scrapy.Spider):
name = 'bainpeople'
allowed_domains = ['baincapital.com']
start_urls = ['http://www.baincapital.com/people/']
def parse(self, response):
browser = webdriver.Chrome(executable_path='C:/Users/uchit.madhok/Downloads/chromedriver_win32/chromedriver')
browser.get('http://www.baincapital.com/people/')
elm = browser.find_element_by_tag_name('html')
i = 30
while i>0:
elm.send_keys(Keys.END)
time.sleep(8)
elm.send_keys(Keys.HOME)
i = i-1
links = list(map(lambda x: x.get_attribute('href'), browser.find_elements_by_css_selector('div.col-xs-6.col-sm-4.col-md-6.col-lg-3.grid.staff a')))
for j in links:
yield response.follow(str(j), callback = self.parse_detail)
def parse_detail(self, response):
name = response.css('h1.pageTitle::text').extract()
title = response.css('div.__location::text')[0].extract()
team = response.css('div.__location::text')[1].extract()
location = response.css('div.__location::text')[2].extract()
about = response.css('div.field-item.even p::text').extract()
sector = response.css('ul.focus_link a::text').extract()
yield {
'name' : name,
'title' : title,
'team' : team,
'location' : location,
'about' : about,
'sector' : sector
}