I'm trying to web scrape Google, but I keep getting duplicate images. It downloads about 200, but there are only 60 or so unique images. How do I get more unique images and eliminate duplicates?
Here's my code:
import json
import os
import time
import requests
from PIL import Image
from StringIO import StringIO
from requests.exceptions import ConnectionError
import string
import urllib
import random
def go(query, path):
BASE_PATH = os.path.join(path, query)
if not os.path.exists(BASE_PATH):
os.makedirs(BASE_PATH)
resultitem = 0
file_save_dir = BASE_PATH
filename_length = 10
filename_charset = string.ascii_letters + string.digits
ipaddress = '163.118.75.137'
url = 'https://ajax.googleapis.com/ajax/services/search/images?'\
'v=1.0&q=' + query + '&start=%d'
while(resultitem < 60):
response = requests.get(url % resultitem)
results = json.loads(response.text)
for result in results['responseData']['results']:
print result['unescapedUrl']
filename = ''.join(random.choice(filename_charset)
for s in range(filename_length))
urllib.urlretrieve (result['unescapedUrl'],
os.path.join(file_save_dir, filename + '.png'))
resultitem = resultitem + 1 # or + 8 Duplicates?
def main():
go('angry human face', 'myDirectory')
if __name__ == "__main__":
main()