I wish to scrape star rating from this URL: https://seedly.sg/reviews/p2p-lending/funding-societies by using selenium.
While all the codes are fine, a TimeoutException is thrown and I plan to solve this problem by using wait.until(EC.presence_of_element_located, but I have not idea who to combine it with row.find_elements_by_xpath How can I do this?
##These are basic setups
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from time import sleep
import pandas as pd
'''Create new instance of Chrome in Incognito mode'''
##Adding the incognito argument to our webdriver
option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
##create a new instance of Chrome
browser = webdriver.Chrome('/Users/w97802/chromedriver')
'''Scrape Basic Info'''
from parsel import Selector
url = 'https://seedly.sg/reviews/p2p-lending/funding-societies'
browser.get(url)
selector = Selector(text=browser.page_source)
####################################################################
##This is the code to get stars ratings
'''Count stars for all pages'''
star_count_list = []
for i in range(0,16):
sleep(2)
ratingcolumn = browser.find_elements_by_xpath('//div[contains(@class,"qr0ren-7 euifNX")]')
for row in ratingcolumn:
star_count = 0
try:
stars = row.find_elements_by_xpath('.//span/span/span[contains(@style,"width:100%")]')
sleep(20)
except TimeoutException:
pass
for targets in stars:
star_count += 1
star_count_list.append(star_count)
'''Automation of getting to the next page'''
sleep(10)
WebDriverWait(browser, 15).until(EC.element_to_be_clickable((By.XPATH,'//*[@id="__next"]/div/div[2]/div/div/div[2]/div[3]/ul/div/div/ul/li[11]'))).click()
sleep(8)
print("going to the next page")
'''Print Stars Result'''
for i,e in enumerate(star_count_list, start=1):
print ('\n \n \n ' + str(i) + '. \n', e)