Hi I would like to automate a process to download a file from a site, which has a form login When I use a browser, I can see a cookie in the Request Http Header. This seems to be required in order to be authorised successfully. Otherwise I end up with 401 error. Even if I send the request twice, it doesn't work as the first response doesn't contain the required cookie. Any suggestion if it is feasible to obtain the cookie from a Request Http Header using python.
Url to login: https://services.geoplace.co.uk/login
Url to download required file: https://services.geoplace.co.uk/api/downloadMatrix/getFile?fileName=30001_81s3.zip&fileType=LEVEL_3&fileVersion=May-2020&sfAccountId=xxx
import mechanize
import cookielib
from bs4 import BeautifulSoup as bs
import html2text
import html5lib#
import sys
# Browser
br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-agent', 'Chrome')]
# The site we will navigate into, handling it's session
br.open('https://login.geoplace.co.uk/login')
# View available forms
for f in br.forms():
print "Formm " + str(f)
# Select the second (index one) form (the first form is a search query box)
br.select_form(nr=0)
# User credentials
br.form['username'] = 'myusername'
br.form['password'] = 'mypassword'
# Login
response = br.submit()
br.open('https://services.geoplace.co.uk')
request = br.request
print request.header_items()
# if successful we have some cookies now
cookies = br._ua_handlers['_cookies'].cookiejar
# convert cookies into a dict usable by requests
cookie_dict = {}
for c in cookies:
cookie_dict[c.name] = c.value
print cookie_dict
br.open('https://services.geoplace.co.uk/api/downloadMatrix/getFile?
fileName=30001_81s3.zip&fileType=LEVEL_3&fileVersion=May-
2020&sfAccountId=xxx')