I wrote the following Python code to crawl the images from the website www.style.com
import urllib2, urllib, random, threading
from bs4 import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class Images(threading.Thread):
def __init__(self, lock, src):
threading.Thread.__init__(self)
self.src = src
self.lock = lock
def run(self):
self.lock.acquire()
urllib.urlretrieve(self.src,'./img/'+str(random.choice(range(9999))))
print self.src+'get'
self.lock.release()
def imgGreb():
lock = threading.Lock()
site_url = "http://www.style.com"
html = urllib2.urlopen(site_url).read()
soup = BeautifulSoup(html)
img=soup.findAll(['img'])
for i in img:
print i.get('src')
Images(lock, i.get('src')).start()
if __name__ == '__main__':
imgGreb()
But I got this error:
IOError: [Errno 2] No such file or directory: '/images/homepage-2013-october/header/logo.png'
How can it be solved?
Also can this recursively find all the images in the website? I mean other images that are not on the homepage.
Thanks!