Im trying to scrape data from sciencedirect website. Im trying to automate the scraping process by accessing the journal issues one after the other by creating a list of xpaths and looping them. when im running the loop im unable to access the rest of the elements after accessing the first journal. This process worked for me on another website but not on this.
I also wanted to know is there any better way to access these elements apart from this process.
#Importing libraries
import requests
import os
import json
from selenium import webdriver
import pandas as pd
from bs4 import BeautifulSoup
import time
import requests
from time import sleep
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
#initializing the chromewebdriver|
driver=webdriver.Chrome(executable_path=r"C:/selenium/chromedriver.exe")
#website to be accessed
driver.get("https://www.sciencedirect.com/journal/journal-of-corporate-finance/issues")
#generating the list of xpaths to be accessed one after the other
issues=[]
for i in range(0,20):
docs=(str(i))
for j in range(1,7):
sets=(str(j))
con=("//*[@id=")+('"')+("0-accordion-panel-")+(docs)+('"')+("]/section/div[")+(sets)+("]/a")
issues.append(con)
#looping to access one issue after the other
for i in issues:
try:
hat=driver.find_element_by_xpath(i)
hat.click()
sleep(4)
driver.back()
except:
print("no more issues",i)