I know how to get to the page of results I want to scrape via selenium, but I can't figure out how to actually scrape the page of results. I've tried with mechanize, too, but that didn't get me much further. Here's where I am now:
import re
import urllib2
import csv
import os
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup, SoupStrainer
import datetime
import time
import smtplib
import atexit
import signal
import json
import os
import gspread
import sys
import gc
script_path = os.path.dirname(os.path.realpath(__file__))
driver = webdriver.PhantomJS(executable_path="/usr/bin/phantomjs", service_args=['--ignore-ssl-errors=true', '--ssl-protocol=any'])
#launches headless browser, completes proper search in Casenet
def main():
driver.get('https://www.courts.mo.gov/casenet/cases/nameSearch.do')
if 'Service Unavailable' in driver.page_source:
log('Casenet website seems to be down. Receiving "service unavailable"')
driver.quit()
gc.collect()
return False
court = Select(driver.find_element_by_id('courtId'))
court.select_by_visible_text('All Participating Courts')
case_enter = driver.find_element_by_id('inputVO.lastName')
case_enter.send_keys('Wakefield & Associates')
driver.find_element_by_id('findButton').click()
time.sleep(1)
number_of_pages = 204
for i in range(number_of_pages):
output_trs = []
party = (driver.find_element_by_class_name('outerTable'))
output_trs.append(party)
print output_trs
main()
Eventually the idea is to store the parties, case numbers and filing dates as strings in a .csv. When I print output_trs now, I get:
selenium.webdriver.remote.webelement.WebElement (session="c4e7b9e0-7a3b-11e8-83f2-b9030062270d", element=":wdc:1530125781332")
Appreciate any help.