below is my code in which i am writing row by row (there are around 900 pages with 10 rows and 5 data in each row) is there any way to make this faster. currently it's taking 80 min to export the data into csv.Is there any way to make parallel Request to pages and make this code more efficient.
import requests
from urllib3.exceptions import InsecureRequestWarning
import csv
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup as bs
f = csv.writer(open('GEM.csv', 'w', newline=''))
f.writerow(['Bidnumber', 'Items', 'Quantitiy', 'Department', 'Enddate'])
def scrap_bid_data():
page_no = 1
while page_no < 910:
print('Hold on creating URL to fetch data...')
url = 'https://bidplus.gem.gov.in/bidlists?bidlists&page_no=' + str(page_no)
print('URL created: ' + url)
scraped_data = requests.get(url, verify=False)
soup_data = bs(scraped_data.text, 'lxml')
extracted_data = soup_data.find('div', {'id': 'pagi_content'})
if len(extracted_data) == 0:
break
else:
for idx in range(len(extracted_data)):
if (idx % 2 == 1):
bid_data = extracted_data.contents[idx].text.strip().split('\n')
bidno = bid_data[0].split(":")[-1]
items = bid_data[5].split(":")[-1]
qnty = int(bid_data[6].split(':')[1].strip())
dept = (bid_data[10] + bid_data[12].strip()).split(":")[-1]
edate = bid_data[17].split("End Date:")[-1]
f.writerow([bidno, items, qnty, dept, edate])
page_no=page_no+1
scrap_bid_data()