I'm trying to extract some data (eg. Dealer Name, Address, Phone# & Email ID) from a page https://www.mahindrausa.com/map-hours-directions-tractors-utvs-farming-equipment--dealership--locate-a-dealer using python with selenium library, but I can't extract the text using 'find_element_by_xpath' command.
Whenever I run the below program it gives me blank text with an error, I'm not sure what am I doing wrong here. Below is the error
NoSuchElementException: no such element: Unable to locate element: {"method":"xpath","selector":"//*[@id="locationsAR"]/div/ul/li[1]/a[2]"}
(Session info: chrome=83.0.4103.116)
Can someone please help?
from selenium import webdriver
import pandas as pd
data={}
abvr=['AL','AR','AZ','CA','CO','CT','DE','FL','GA','IA','ID','IL','IN','KS','KY','LA','MA','MD','ME','MI','MN','MO','MS','MT','NC','ND','NH','NJ','NM','NV','NY','OH','OK','OR','PA','SC','SD','TN','TX','UT','VA','VT','WA','WI','WV','WY']
df=pd.DataFrame(columns=['Name','Address 1','Address 2','Phone#','Email'])
path=r"C:\Program Files\chromedriver.exe"
driver=webdriver.Chrome(path)
driver.get("https://www.mahindrausa.com/map-hours-directions-tractors-utvs-farming-equipment--dealership--locate-a-dealer")
a=driver.find_elements_by_class_name("locations-list")
for s in abvr:
name="locations"+s
for n in a:
for k in n.find_elements_by_class_name("state-location"):
count=1
names= '//*[@id=\"'+name+'\"]/div/ul/li['+str(count)+']/h4'
address1='//*[@id=\"'+name+'\"]/div/ul/li['+str(count)+']/span[1]/span[1]'
address2='//*[@id=\"'+name+'\"]/div/ul/li['+str(count)+']/span[1]/span[2]'
phone='//*[@id=\"'+name+'\"]/div/ul/li['+str(count)+']/span[2]/a'
email='//*[@id=\"'+name+'\"]/div/ul/li['+str(count)+']/a[2]'
data['Name']=k.find_element_by_xpath(names).text
data['Address 1']=k.find_element_by_xpath(address1).text
data['Address 2']=k.find_element_by_xpath(address2).text
data['Phone#']=k.find_element_by_xpath(phone).text
data['Email']=k.find_element_by_xpath(email).text
df=df.append(data,ignore_index=True)
count=+1
driver.quit()
print(df)