I am currently trying to retrieve the associated match links which are hrefs from this page.I cannot seem to find them straight off the bat using selenium/soup. I understand they might be from a api but I cant figure out how to find them under the section class of mls-l-module mls-l-module--match-list
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from time import sleep, time
import pandas as pd
import warnings
import numpy as np
from datetime import datetime
import json
from bs4 import BeautifulSoup
warnings.filterwarnings('ignore')
base_url = 'https://www.mlssoccer.com/schedule/scores#competition=mls-regular-season&club=all&date=2023-02-20'
# create an empty list to store urls.
urls = []
option = Options()
option.headless = False
driver = webdriver.Chrome("##########",options=option)
driver.get(base_url)
# click the cookie pop up
WebDriverWait(driver, 15).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[3]/div[2]/div/div[1]/div/div[2]/div/button[2]'))).click()
the output is expected to be a list of urls from this page, where I will loop to the next page and collect all href links for matches.Perhaps using selenium to render the page for soup is a better option