Recently I have started working with multiple tabs in Selenium and I have encountered a strange problem. When I'm executing this code:
WebDriverWait(driver, 10).until(EC.number_of_windows_to_be(2))
driver.switch_to.window(driver.window_handles[-1])
time.sleep(1)
url_in_page_source= eu.look_for_url_in_page_source(
page_html=driver.page_source,
left_delimiter='placeholder',
right_delimiter='placeholder'
)
driver.close()
driver.switch_to.window(driver.window_handles[0])
# time.sleep(10) # <--- this is fixing the error
return url_in_page_source
and immediately after the return statement when I'm trying to visit extracted url by driver.get()
I'm getting this error:
Message: no such window: target window already closed
from unknown error: web view not found
However I have found out that adding a simple time.sleep(10)
just before return
statement is fixing the issue, but what is more strange to me - when I have tried to lower the wait time just below 10 secs the error is still existing. I have no idea why it is happening. Maybe I'm doing something wrong. I will be very grateful for any help and explanations.
Edit:
Here's source code of the eu.look_for_url_in_page_source()
as per @JeffC request
def look_for_url_in_page_source(page_html, left_url_delimiter, right_url_delimiter):
print('Processing URL with f:look_for_url_in_page_source()')
# extracts multiple URLs from page_source
extracted_urls = []
while True:
# check if delimiters are present in page_html
find_left_url_delimiter = page_html.find(left_url_delimiter)
find_right_url_delimiter = page_html.find(right_url_delimiter)
if find_left_url_delimiter == -1 or find_right_url_delimiter == -1:
if len(extracted_urls) > 0:
return extracted_urls
print('f:look_for_url_in_page_source() was not able to get any text.')
return False
left_url_delimiter_pos = find_left_url_delimiter + len(left_url_delimiter)
right_url_delimiter_pos = page_html[left_url_delimiter_pos:].find(right_url_delimiter) + left_url_delimiter_pos
extracted_url = page_html[left_url_delimiter_pos:right_url_delimiter_pos].strip()
extracted_urls.append(extracted_url)
page_html = page_html[right_url_delimiter_pos:]