import lxml.html
import mechanize, cookielib
import multiprocessing
browser = None
def download(i):
link = 'www.google.com'
response = browser.open(link)
tree = lxml.html.parse(response)
print tree
return 0
if __name__ == '__main__':
browser = mechanize.Browser()
cookie_jar = cookielib.LWPCookieJar()
browser.set_cookiejar(cookie_jar)
browser.set_handle_equiv(True)
browser.set_handle_gzip(True)
browser.set_handle_redirect(True)
browser.set_handle_referer(False) #inicialmente estava on mas deve ser melhor off
browser.set_handle_robots(False)
browser.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
browser.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:2.0.1) Gecko/20100101 Ubuntu/11.04 maverick Firefox/4.0.1')]
pool = multiprocessing.Pool(None)
tasks = range(8)
r = pool.map_async(download, tasks)
r.wait() # Wait on the results
If I remove the multiprocessing part, it works. If I don't call the browser inside the download function, it also works. However, it seems as if multiprocessing + mechanize is simply not working.
How can I fix this? It doesn't happen under linux.