Up until 3 days ago I was able to scrape the target site. However, it started showing the error I will post below. When I looked at the source code of the site, I could not see any changes. It also returns as scrapy (200) response. I am using proxy and user-agent. I changed them but still the same result. I keep getting json decode error.
Error:
File "/usr/lib/python3.8/json/decoder.py", line 355, in raw_decode raise JSONDecodeError("Expecting value", s, err.value) from None json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
My code:
import scrapy
import json
import datetime
import bs4
import re
import time
from requests.models import PreparedRequest
import logging
from hepsibura_spider.items import HepsiburaSpiderItem
from scrapy.crawler import CrawlerProcess
class HepsiburaSpider(scrapy.Spider):
name = 'hepsibura'
# allowed_domains = ['www.hepsibura.com']
handle_httpstatus_list = [301]
def start_requests(self):
urls = [
'https://www.hepsiburada.com/monitor-bilgisayarlar-c-116465?filtreler=satici:Hepsiburada;?_random_number={rn}#tabIndex=0',
]
for url in urls:
params = []
# added a meta to provide the used url here
main_url, parameters = url.split('&') if '&' in url else url, None
parameters = parameters.split(':') if parameters else []
for parameter in parameters:
key, value = parameter.split('=')
params.append((key.strip(), value.strip()))
# params.append(('main_url', main_url))
if 'sayfa' not in dict(params):
params.append(('sayfa', '1'))
yield scrapy.Request(
url=url.format(rn=time.time()),
callback=self.parse_json,
meta={
'main_url': main_url,
'params': dict(params),
},
headers={
'Cache-Control': 'store, no-cache, must-revalidate, post-check=0, pre-check=0',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_10) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.5134.152 Safari/537.36',
}
)
def parse_json(self, response):
if response.status == 301:
logging.log(logging.INFO, 'Finished scraping')
return
current_url = response.request.url.split('&')[0].strip()
parameters = response.meta.get('params')
soup = bs4.BeautifulSoup(response.text,'lxml')
scripts = soup.select('script')
data_script = ''
for script in scripts:
# print(script.text)
if 'window.MORIA.PRODUCTLIST = {' in str(script):
print('Found the data')
data_script = str(script)
break
data_script = data_script.replace('<script type="text/javascript">','').replace('window.MORIA = window.MORIA || {};','').replace('window.MORIA.PRODUCTLIST = {','').replace('\'STATE\': ', '').replace('</script>','')[:-4]
json_data = json.loads(data_script)
products = json_data['data']['products']
for product in products:
item = HepsiburaSpiderItem()
item['rowid'] = hash(str(datetime.datetime.now()) + str(product['productId']))
item['date'] = str(datetime.datetime.now())
item['listing_id'] = product['variantList'][0]["listing"]["listingId"]
item['product_id'] = product['variantList'][0]["sku"].lower()
item['product_name'] = product['variantList'][0]['name']
item['price'] = float(product['variantList'][0]['listing']['priceInfo']['price'])
item['url'] = 'https://www.hepsiburada.com' + product['variantList'][0]["url"]
item['merchantName'] = product['variantList'][0]["listing"]["merchantName"].lower()
yield item
parameters['sayfa'] = int(parameters['sayfa']) + 1
req = PreparedRequest()
req.prepare_url(current_url, parameters)
yield scrapy.Request(
url=req.url,
callback=self.parse_json,
meta={
'params': parameters,
},
headers={
'Cache-Control': 'store, no-cache, must-revalidate, post-check=0, pre-check=0',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_10) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.5134.152 Safari/537.36',
}
)
if __name__ == '__main__':
process = CrawlerProcess()
process.crawl(HepsiburaSpider)
process.start()
I found somethings. Site changed their json format. Every requests generate unique id:
window.MORIA.PRODUCTLIST = Object.assign(window.MORIA.PRODUCTLIST || {}, {
'60cada8e-57dd-466e-f7af-62efca4fa8a8': {
How can I bypass this?
Thank you.