I've been searching on stackoverflow for a couple hours now and still haven't been able to find a suitable answer for what I am currently doing. I want to use Selenium to get past an initial page to click through and then transfer the cookies to Scrapy to then crawl the database. So far I keep on getting redirected to the initial login page.
I based off grabbing the cookies and putting them in the request off of this answer scrapy authentication login with cookies
class HooversTest(scrapy.Spider):
global starturls
name = "hooversTest"
allowed_domains = ["http://subscriber.hoovers.com"]
login_page = ["http://subscriber.hoovers.com/H/home/index.html"]
start_urls = ["http://subscriber.hoovers.com/H/company360/overview.html?companyId=99566395",
"http://subscriber.hoovers.com/H/company360/overview.html?companyId=10723000000000"]
def login(self, response):
return Request(url=self.login_page,
cookies=self.get_cookies(), callback=self.after_login)
def get_cookies(self):
self.driver = webdriver.Firefox()
self.driver.get("http://www.mergentonline.com/Hoovers/continue.php?status=sucess")
elem = self.driver.find_element_by_name("Continue")
elem.click()
time.sleep(15)
cookies = self.driver.get_cookies()
#reduce(lambda r, d: r.update(d) or r, cookies, {})
self.driver.close()
return cookies
def parse(self, response):
return Request(url="http://subscriber.hoovers.com/H/company360/overview.html?companyId=99566395",
cookies=self.get_cookies(), callback=self.after_login)
def after_login(self, response):
hxs = HtmlXPathSelector(response)
print hxs.select('//title').extract()