From the scrapy documentation: https://doc.scrapy.org/en/latest/topics/practices.html#running-multiple-spiders-in-the-same-process
import scrapy
from scrapy.crawler import CrawlerProcess
from .spiders import Spider1, Spider2
process = CrawlerProcess()
process.crawl(Crawler1)
process.crawl(Crawler2)
process.start() # the script will block here until all crawling jobs are finished
EDIT: If you wish to run multiple spiders two by two, you can do the following:
from twisted.internet import reactor, defer
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
configure_logging()
runner = CrawlerRunner()
spiders = [Spider1, Spider2, Spider3, Spider4]
def join_spiders(spiders):
"""Setup a new runner with the provided spiders"""
runner = CrawlerRunner()
# Add each spider to the current runner
for spider in spider:
runner.crawl(MySpider1)
# This will yield when all the spiders inside the runner finished
yield runner.join()
@defer.inlineCallbacks
def crawl(group_by=2):
# Yield a new runner containing `group_by` spiders
for i in range(0, len(spiders), step=group_by):
yield join_spiders(spiders[i:i + group_by])
# When we finished running all the spiders, stop the twisted reactor
reactor.stop()
crawl()
reactor.run() # the script will block here until the last crawl call is finished
Didn't tested all of this though, let me know if it works !