I have a Scrapy Splash scraper with Lua script.
The Lua script currently only initiates scrolling on a page to load more results on the search page.
From the search page I navigate to detail pages which I scrape.
However, on the detail page the photo carousel is not present in the DOM yet, it's loaded dynamically when the user clicks the #showphotos
element.
After clicking that element the following photocarousel HTML is loaded:
<div id="slider">
<div class="slider-inner">
<div class="item active">
<img src="https://www.example.com/images/1.jpg">
</div>
<div class="item">
<img src="https://www.example.com/images/2.jpg">
</div>
</div>
</div>
I already checked here and here.
So I tried writing some script:
click_script = """
function main(splash, args)
btn = splash:select_all('#showphotos')[0]
btn:mouse_click()
assert(splash:wait(0.5))
return {
num = #splash:select_all('#slider div.slider-inner'),
html = splash:html()
}
end
"""
As I'm very new to Splash and Lua I don't know where to add this code or where to call it from.
I've created a test detail page here.
My current code:
myscraper.py
import json
import re
import scrapy
import time
from scrapy_splash import SplashRequest
from scrapy.selector import Selector
from scrapy.http import HtmlResponse
from myresults.items import MyResultItem
class Spider(scrapy.Spider):
name = 'myscraper'
allowed_domains = ['example.com']
start_urls = ['https://www.example.com/results']
def start_requests(self):
# lua script for scroll to bottom while all objects appeared
lua_script = """
function main(splash, args)
local object_count = 0
local url = splash.args.url
splash:go(url)
splash:wait(0.5)
local get_object_count = splash:jsfunc([[
function (){
var objects = document.getElementsByClassName("object-adres");
return objects.length;
}
]])
temp_object_count = get_object_count()
local retry = 3
while object_count ~= temp_object_count do
splash:evaljs('window.scrollTo(0, document.body.scrollHeight);')
splash:wait(0.5)
object_count = temp_object_count
temp_object_count = get_object_count()
end
return splash:html()
end
"""
# yield first splash request with lua script and parse it from parse def
yield SplashRequest(
self.start_urls[0], self.parse,
endpoint='execute',
args={'lua_source': lua_script},
)
def parse(self, response):
# get all properties from first page which was generated with lua script
# get all adreslink from a tag
object_links = response.css('a.adreslink::attr(href)').getall()
for link in object_links:
# send request with each link and parse it from parse_object def
yield scrapy.Request(link, self.parse_object)
def parse_object(self, response):
# create new MyResultItem which will saved to json file
item = MyResultItem()
item['url'] = response.url # get url
yield item
items.py
import scrapy
class RentalItem(scrapy.Item):
id = scrapy.Field()
photos = scrapy.Field()
url = scrapy.Field()
pass