Excuse the title. This is just a very strange error I am getting. When I try to pull two xls files and one csv file into a dataframe I get this error:
FileNotFoundError: [Errno 2] No such file or directory: '.\\pa-dirty-price-crawler\\goldman_folder\\Trade_Detail_GSCO_GREAT_AJAX_OPERATING_PARTNERSHIP_L.P._COB08_Apr_2020_19_1586111993954.xls'
That error occurs when I do this:
nomura = get_nomura_df_by_date(start_date)
jpm = get_jpm_df_by_date(start_date)
gs = get_goldman_df_by_date(start_date)
Now if I comment out either nomura or jpm then there is no error at all. In other words if I do this
# nomura = get_nomura_df_by_date(start_date)
jpm = get_jpm_df_by_date(start_date)
gs = get_goldman_df_by_date(start_date)
Then I receive no error at all which is weird. The xls and csv files are all in separate folders in my workspace, i.e., I have one folder for goldman, one for nomura, and one for jpm.
Here is the entire code:
import win32com.client
import os, zipfile
import pandas as pd
import pyodbc
import sql_utils as sql
import datetime as dt
import time
import xlrd
output_file = r"C:\Users\morgan.weiss\workspace\pa-dirty-price-crawler\output\All_Bond_Repos.xlsx"
def get_jpm_zip():
Outlook = win32com.client.Dispatch("Outlook.Application")
olNs = Outlook.GetNamespace("MAPI")
Inbox = olNs.GetDefaultFolder(6)
Filter = ("@SQL=" + chr(34) + "urn:schemas:httpmail:subject" +
chr(34) + " Like '%JPMS Statement%' AND " +
chr(34) + "urn:schemas:httpmail:hasattachment" +
chr(34) + "=1")
Items = Inbox.Items.Restrict(Filter)
dates = []
for a in Items:
dates.append(a.senton.date())
for Item in Items:
for attachment in Item.Attachments:
attachment.SaveAsFile(r"C:\\Users\\morgan.weiss\\workspace\\pa-dirty-price-crawler\\jpm_folder\\" + attachment.FileName)
return dates
def get_nomura_csv():
Outlook = win32com.client.Dispatch("Outlook.Application")
olNs = Outlook.GetNamespace("MAPI")
Inbox = olNs.GetDefaultFolder(6)
Filter = ("@SQL=" + chr(34) + "urn:schemas:httpmail:subject" +
chr(34) + " Like '%Nomura (NSI) Repo%' AND " +
chr(34) + "urn:schemas:httpmail:hasattachment" +
chr(34) + "=1")
Items = Inbox.Items.Restrict(Filter)
dates = []
for a in Items:
dates.append(a.senton.date())
dates_attachment_map = {}
i = 0
for Item in Items:
for attachment in Item.Attachments:
dates_attachment_map[dates[i]] = attachment.FileName
i+=1
attachment.SaveAsFile(r"C:\\Users\\morgan.weiss\\workspace\\pa-dirty-price-crawler\\nomura_folder\\" + attachment.FileName)
return dates_attachment_map
def get_goldman_csv():
Outlook = win32com.client.Dispatch("Outlook.Application")
olNs = Outlook.GetNamespace("MAPI")
Inbox = olNs.GetDefaultFolder(6)
Filter = ("@SQL=" + chr(34) + "urn:schemas:httpmail:subject" +
chr(34) + " Like '%Repo Margin Summary from GOLDMAN SACHS%' AND " +
chr(34) + "urn:schemas:httpmail:hasattachment" +
chr(34) + "=1")
Items = Inbox.Items.Restrict(Filter)
dates = []
for a in Items:
dates.append(a.senton.date())
dates_attachment_map = {}
i = 0
for Item in Items:
for attachment in Item.Attachments:
if 'Trade_Detail_GSCO_GREAT_AJAX' in attachment.FileName:
dates_attachment_map[dates[i]] = attachment.FileName
i+=1
attachment.SaveAsFile(r"C:\\Users\\morgan.weiss\\workspace\\pa-dirty-price-crawler\\goldman_folder\\" + attachment.FileName)
return dates_attachment_map
def unzip_jpm_files():
jpm = get_jpm_zip()
dir_name = r'C:\Users\morgan.weiss\workspace\pa-dirty-price-crawler\jpm_folder'
extension = ".zip"
os.chdir(dir_name) # change directory from working dir to dir with files
jpm_map = {}
i = 0
for item in os.listdir(dir_name): # loop through items in dir
if item.endswith(extension): # check for ".zip" extension
file_name = os.path.abspath(item) # get full path of files
zip_ref = zipfile.ZipFile(file_name) # create zipfile object
zip_ref.extractall(dir_name, pwd=b'qpsqpwsr') # extract file to dir
zip_ref.close() # close file
os.remove(file_name) # delete zipped file
for item in os.listdir(dir_name):
if item.endswith(".xls"):
file_name = os.path.abspath(item)
jpm_map[jpm[i]] = file_name
i+=1
return jpm_map
def get_jpm_data_frames(path_name):
wb = xlrd.open_workbook(path_name, logfile=open(os.devnull, 'w'))
df = pd.read_excel(wb,skiprows=4,engine='xlrd')
return df
def get_nomura_data_frames(file_name):
dir_name = '.\\pa-dirty-price-crawler\\nomura_folder'
path_name = os.path.join(dir_name, file_name)
df = pd.read_csv(path_name)
return df
def get_gs_data_frames(file_name):
dir_name = '.\\pa-dirty-price-crawler\\goldman_folder'
path_name = os.path.join(dir_name, file_name)
print(os.path.isfile(path_name))
df = pd.read_excel(path_name,skiprows=9, nrows=12)
return df
def get_data_from_sql():
cnxn = pyodbc.connect(sql.connection_string)
df = pd.read_sql(sql.get_bond_repos,con=cnxn)
return df
def compare_dates(mail_date, date_time):
return mail_date.year == date_time.year and mail_date.month == date_time.month and mail_date.day == date_time.day
def get_jpm_df_by_date(date):
jpm_map = unzip_jpm_files()
time.sleep(10)
jpm_df = pd.DataFrame()
jpm_df = get_jpm_data_frames(jpm_map[date])
return jpm_df
def get_nomura_df_by_date(date):
nomura_map = get_nomura_csv()
time.sleep(10)
nomura_df = pd.DataFrame()
nomura_df = get_nomura_data_frames(nomura_map[date])
return nomura_df
def get_goldman_df_by_date(date):
goldman_map = get_goldman_csv()
time.sleep(10)
goldman_df = pd.DataFrame()
goldman_df = get_gs_data_frames(goldman_map[date])
return goldman_df
def edit_isin(df):
df['ISIN'] = df['ISIN'].apply(lambda x: x[2:])
df['ISIN'] = df['ISIN'].apply(lambda x: x[:-1])
return df
x = '2020-04-09'
start_date = dt.datetime.strptime(x, "%Y-%m-%d")
start_date = start_date.date()
df = get_data_from_sql()
# nomura = get_nomura_df_by_date(start_date)
gs = get_goldman_df_by_date(start_date)
jpm = get_jpm_df_by_date(start_date)
# gs = edit_isin(gs)
# df = df.set_index('Cusip').join(gs.set_index('ISIN'))
# print(df.head())
# df.to_excel(output_file, index=False)
Note that I took out my name and put "usr_name" in the post. I am baffled by this error and I have no idea why it's breaking.
EDIT:
I am starting to identify the issue. When I just call the function to get the zip folders then the code runs fine, the issue occurs when I unzip the folders when I am trying to get all the dataframes. Perhaps that may add some details that you guys could point out for a solution.