0

I have a question about how I make the crawling code for multiprocessing with Python. below picture is the feature what I imagine. However the problem is that operated processes can't accept URL list. please let me know the best solution what you think.

[![enter image description here][1]][1]

import csv
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from multiprocessing import Pool

start_time = time.time()

driver = webdriver.Chrome(executable_path='chromedriver')

# Login
driver.get('https://quasarzone.com/login?nextUrl=https://quasarzone.com/')
driver.find_element_by_name("login_id").send_keys("ID")
driver.find_element_by_name("password").send_keys("PW")
driver.find_element_by_xpath('//*[@id="frm"]/div/div[1]/p/a').click()
time.sleep(0.1)


all_urls = []
for i in range(1, 201):
    all_urls.append('https://quasarzone.com/bbs/qf_cmr?page={}'.format(i))


result = []


def next_page(urls):
    driver.get(urls)
    res = driver.page_source
    soup = BeautifulSoup(res, "html.parser", from_encoding='utf-8')
    data_name = soup.select('td:nth-child(4) > div > div')
    data_date = soup.select('td:nth-child(6) > span')
    data_title = soup.select('td:nth-child(3) > p > a')
    data_view = soup.select('td:nth-child(5) > span')

    for name, date, title, view in zip(data_name, data_date, data_title, data_view):
        result.append([name.get_text(), date.get_text(), title.get_text(), view.get_text()])


# Problem point!!
if __name__ == '__main__':
    with Pool(processes=4) as pool:
        pool.map(next_page, all_urls)
        pool.join()


f = open('crawling_review_quasarzone.csv', 'w', newline='', encoding='utf-8')
csv_writer = csv.writer(f)

header = ['name', 'date', 'title', 'view']
csv_writer.writerow(header)

for i in result:
    csv_writer.writerow(i)
f.close()

end_time = time.time()
spend_time = end_time - start_time
t = open('spending_time.txt', 'w')
t.write('total spending time: {} sec'.format(spend_time))
t.close()

driver.quit()
Ismael Padilla
  • 5,246
  • 4
  • 23
  • 35
Ian
  • 13
  • 3
  • Does this answer your question? [Python execute script using multiple browsers Selenium](https://stackoverflow.com/questions/68341133/python-execute-script-using-multiple-browsers-selenium) – Booboo Aug 17 '21 at 11:51

2 Answers2

0

I solved by myself. However I thinks it isn't the best case. Maybe I can use multithreading with multiprocessing. Anyway, I upload the code what I did.

import csv
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from concurrent.futures import ProcessPoolExecutor


board_name = 'cmr'

start_time = time.time()

options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument("disable-gpu")
options.add_argument("disable-infobars")
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(executable_path='chromedriver', options=options)


driver.get('https://quasarzone.com/login?nextUrl=https://quasarzone.com/')
driver.find_element_by_name("login_id").send_keys("id")
driver.find_element_by_name("password").send_keys("pw")
driver.find_element_by_xpath('//*[@id="frm"]/div/div[1]/p/a').click()
time.sleep(0.1)


def next_page(pages):
    result = []
    for i in pages:
        driver.get('https://quasarzone.com/bbs/qf_{}?page={}'.format(board_name, i))
        time.sleep(5)

        res = driver.page_source
        soup = BeautifulSoup(res, "html.parser")
        data_name = soup.select('td:nth-child(4) > div > div')
        data_date = soup.select('td:nth-child(6) > span')
        data_title = soup.select('td:nth-child(3) > p > a')
        data_view = soup.select('td:nth-child(5) > span')

        for name, date, title, view in zip(data_name, data_date, data_title, data_view):
            result.append([name.get_text(), date.get_text(), title.get_text(), view.get_text()])

    f = open('quasarzone_{}.csv'.format(board_name), 'w', newline='', encoding='utf-8')
    csv_writer = csv.writer(f)

    header = ['name', 'date', 'title', 'view']
    csv_writer.writerow(header)

    for i in result:
        csv_writer.writerow(i)
    f.close()


def multiProcessing():
    page_threshold = 100
    number_process = 4
    pool = ProcessPoolExecutor(max_workers=number_process)

    process = []
    for i in range(number_process+1):
        p = range(page_threshold * i, page_threshold * (i+1))
        process.append(p)
    pool.map(next_page, process)


if __name__ == '__main__':
    multiProcessing()


end_time = time.time()
spend_time = end_time - start_time

t = open('spending_time_{}.txt'.format(board_name), 'w')
t.write('total spending time of {}: {:.2f} sec'.format(board_name, spend_time))
t.close()
Ian
  • 13
  • 3
  • This is *not* the best case. First, this is something for which multithreading is a better fit. Although, here, you are creating only a single, reusable selenium session per pool process, which is good, there is no mechanism for quitting those sessions after you have processed all the pages. I suspect that you are ending up with some chromium background processes in the background that never terminate. See my "duplicate" comment to your question. – Booboo Aug 17 '21 at 12:02
  • It appears that function `multiProcessing` is rewriting the same output csv file over and over again overlaying previous data with new data and it is doing this in parallel. Both things seem wrong. Am I missing something? – Booboo Aug 17 '21 at 12:29
0

The following is how I would use a threading pool that "quits" the drivers when all the pages have been terminated. You could create a larger thread pool where each thread processes a smaller range of pages for greater concurrency.

What I don't understand is that your function next_page seems to be rewriting the same csv file over and over again clobbering the previous contents and you are doing this in parallel with other processes, which is bound to cause erroneous results. Switching to using threading, you either need to write separate files or serialize the writing to a single file by using a threading.Lock to enforce the serializtion and by doing the file opening in append mode where it is only the main thread that writes out the header row. Alternatively, have each submitted task return the rows to be written back to the main thread for writing.

I have also made other changes to the source to conform more closely to the PEP 8 Style Guide and to rename some of the variables and functions so that they better reflect what they represent. I also corrected what I perceived to be some logic errors, so look carefully at all the lines of code to make sure I didn't "over-correct" something. Finally, I corrected some minor English usage errors. Note that I could not run the code since I did not have the user id and password.

import csv
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import UnexpectedAlertPresentException

from concurrent.futures import ThreadPoolExecutor
import threading

class Driver:
    def __init__(self):
        options = webdriver.ChromeOptions()
        options.add_argument('headless')
        options.add_argument("disable-gpu")
        options.add_argument("disable-infobars")
        options.add_argument("--disable-extensions")
        self.driver = webdriver.Chrome(executable_path='chromedriver', options=options)

    def __del__(self):
        self.driver.quit() # clean up driver when we are cleaned up

thread_local = threading.local()

def create_driver():
    the_driver = getattr(thread_local, 'the_driver', None)
    if the_driver is None:
        the_driver = Driver()
        setattr(thread_local, 'the_driver', the_driver)
        # Special Initialization to login:
        driver = the_driver.driver
        driver.get('https://quasarzone.com/login?nextUrl=https://quasarzone.com/')
        driver.find_element_by_name("login_id").send_keys("id")
        driver.find_element_by_name("password").send_keys("pw")
        driver.find_element_by_xpath('//*[@id="frm"]/div/div[1]/p/a').click()
        # The following should be replaced by driver.implicitly_wait(3)
        # followed by a find for some element on the "sucessfully loged in page":
        #time.sleep(0.1)
        try:
            driver.implicitly_wait(3)
            driver.find_elements_by_class_name('banner-area')
        except UnexpectedAlertPresentException:
            s = 'Invalid login credentials.'
            print(s)
            raise Exception(s)
    return the_driver.driver

board_name = 'cmr'

def next_pages(pages):
    driver = create_driver()
    result = []
    for page in pages:
        driver.get('https://quasarzone.com/bbs/qf_{}?page={}'.format(board_name, page))
        # What does the following accomplish?
        #time.sleep(5)

        res = driver.page_source
        soup = BeautifulSoup(res, "html.parser")
        data_name = soup.select('td:nth-child(4) > div > div')
        data_date = soup.select('td:nth-child(6) > span')
        data_title = soup.select('td:nth-child(3) > p > a')
        data_view = soup.select('td:nth-child(5) > span')

        for name, date, title, view in zip(data_name, data_date, data_title, data_view):
            result.append([name.get_text(), date.get_text(), title.get_text(), view.get_text()])

    # The following is questionable:
    with open('quasarzone_{}.csv'.format(board_name), 'w', newline='', encoding='utf-8') as f:
        csv_writer = csv.writer(f)

        header = ['name', 'date', 'title', 'view']
        csv_writer.writerow(header)

        for row in result:
            csv_writer.writerow(row)

def process_pages():
    start_time = time.time()

    page_threshold = 100
    number_threads = 4
    # or, for example, page_threshold = 50; number_threads = 8
    pages_list = [range(page_threshold * i, page_threshold * (i+1)) for i in range(number_threads)]
    with ThreadPoolExecutor(max_workers=number_threads) as pool:
        pool.map(next_pages, pages_list)
    # Using the previous "with" context handler results in an implicit call to pool.shutdown(True)
    # at this point to wait for all the submitted tasks to complete. Alternatively,
    # the return value from `pool.map` could be iterated to ensure completion of
    # all submitted tasks, e.g. list(pool.map(...))

    end_time = time.time()
    elapsed_time = end_time - start_time

    with open('elapsed_time_{}.txt'.format(board_name), 'w') as t:
        t.write('Total elapsed time of {}: {:.2f} sec'.format(board_name, elapsed_time))

process_pages()

# Quit the selenium drivers:
del thread_local
import gc
gc.collect() # a little extra insurance
Booboo
  • 38,656
  • 3
  • 37
  • 60