Im working on a web crawler that will crawl only internal links using requests and bs4.
I have a rough working version below but Im not sure how to properly handle checking if a link has been crawled previously or not.
import re
import time
import requests
import argparse
from bs4 import BeautifulSoup
internal_links = set()
def crawler(new_link):
html = requests.get(new_link).text
soup = BeautifulSoup(html, "html.parser")
for link in soup.find_all('a', attrs={'href': re.compile("^http://")}):
if "href" in link.attrs:
print(link)
if link.attrs["href"] not in internal_links:
new_link = link.attrs["href"]
print(new_link)
internal_links.add(new_link)
print("All links found so far, ", internal_links)
time.sleep(6)
crawler(new_link)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('url', help='Pass the website url you wish to crawl')
args = parser.parse_args()
url = args.url
#Check full url has been passed otherwise requests will throw error later
try:
crawler(url)
except:
if url[0:4] != 'http':
print('Please try again and pass the full url eg http://example.com')
if __name__ == '__main__':
main()
These are the last few lines of the output:
All links found so far, {'http://quotes.toscrape.com/tableful', 'http://quotes.toscrape.com', 'http://quotes.toscrape.com/js', 'http://quotes.toscrape.com/scroll', 'http://quotes.toscrape.com/login', 'http://books.toscrape.com', 'http://quotes.toscrape.com/'}
<a href="http://quotes.toscrape.com/search.aspx">ViewState</a>
http://quotes.toscrape.com/search.aspx
All links found so far, {'http://quotes.toscrape.com/tableful', 'http://quotes.toscrape.com', 'http://quotes.toscrape.com/js', 'http://quotes.toscrape.com/search.aspx', 'http://quotes.toscrape.com/scroll', 'http://quotes.toscrape.com/login', 'http://books.toscrape.com', 'http://quotes.toscrape.com/'}
<a href="http://quotes.toscrape.com/random">Random</a>
http://quotes.toscrape.com/random
All links found so far, {'http://quotes.toscrape.com/tableful', 'http://quotes.toscrape.com', 'http://quotes.toscrape.com/js', 'http://quotes.toscrape.com/search.aspx', 'http://quotes.toscrape.com/scroll', 'http://quotes.toscrape.com/random', 'http://quotes.toscrape.com/login', 'http://books.toscrape.com', 'http://quotes.toscrape.com/'}
so it is working, but only up until a certain point and then it doesn't seem to follow the links any further.
Im sure its because of this line
for link in soup.find_all('a', attrs={'href': re.compile("^http://")}):
as that will only find the links that start with http and on a lot of the internal pages the links dont have that but when I try it like this
for link in soup.find_all('a')
the program runs very briefly and then ends:
http://books.toscrape.com
{'href': 'http://books.toscrape.com'}
http://books.toscrape.com
All links found so far, {'http://books.toscrape.com'}
index.html
{'href': 'index.html'}
index.html
All links found so far, {'index.html', 'http://books.toscrape.com'}