I am trying to create a script for a web scraper using Selenium on Google Chrome. However I am receiving error:
raise MaxRetryError(_pool, url, error or ResponseError(cause)) urllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='127.0.0.1', >port=53051): Max retries exceeded with url: >/session/54df0082f43bf3d90fa9623057c3cf53/elements (Caused by >NewConnectionError(': Failed to establish a new connection: [Errno 61] Connection refused',))
This is my code so far concerning using selenium and webdriver.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
# Specifying incognito mode as you launch your browser[OPTIONAL]
option = webdriver.ChromeOptions()
option.add_argument("--incognito")
# Create new Instance of Chrome in incognito mode
browser = webdriver.Chrome(executable_path='/Users/emmanuelyamoah/Downloads/chromedriver-2', chrome_options=option)
# Go to desired website
browser.get("https://github.com/sonyemman")
# Wait 20 seconds for page to load
timeout = 20
try:
# Wait until the final element [Avatar link] is loaded.
# Assumption: If Avatar link is loaded, the whole page would be relatively loaded because it is among
# the last things to be loaded.
WebDriverWait(browser, timeout).until(EC.visibility_of_element_located((By.XPATH, "//img[@class='avatar width-full rounded-2']")))
except TimeoutException:
print("Timed out waiting for page to load")
browser.quit()