I would try an approach with selenium, since YT renders these pages with JS and I don't think its possible to scrape URLS with requests and bs4.
You use something like this:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
driver = webdriver.Firefox()
# driver = webdriver.Chrome() # If you would prefer to use Chrome
video_urls = []
def accept_cookies():
try:
elem = driver.find_element(By.XPATH, "/html/body/c-wiz/div/div/div/div[2]/div[1]/div[3]/div[1]/form[1]/div/div/button/span")
elem.click()
return True
except NoSuchElementException:
return False
def find_videos():
print("test")
try:
# CODE THAT COPIES THE URLS
return True
except NoSuchElementException:
return False
def activate_game():
try:
elem = driver.find_element(By.CLASS_NAME, "btn")
elem.click()
return True
except NoSuchElementException:
return False
def activate_scraping():
driver.get("https://www.youtube.com/@NetworkChuck/videos")
step = 0
tries = 0
while step < 2:
if tries <= 5: # 5 tries to accomplish the task
tries += 1
success = False
match step:
case 0:
success = accept_cookies()
case 1:
success = find_videos()
if success:
step += 1
tries = 0
else:
driver.implicitly_wait(2) # wait 2 secs before retrying the current step
else:
return False
assert "No results found." not in driver.page_source
driver.close()
return True
activate_scraping()
Note that I wrote that code so that it would retry to find the element so it doesn't crash if your connection is slow, also you can easily add steps.
You still need to copy the links but I think if you dive a little bit into the selenium docs you can manage to do that.
https://selenium-python.readthedocs.io/locating-elements.html