0

I need to perform pandas df operations on multiple files,

df1 = pd.read_csv("~/pathtofile/sample1.csv")
some_df=pd.read_csv("~/pathtofile/metainfo.csv")
df1.sort_values('col2')
df1 = df1[df1.col5 != 'N']
df1['new_col'] = df1['col3'] - df1['col2'] + 1
f = lambda row: '{col1}:{col2}-{col3}({col4})'.format(**row)
df1.astype(str).apply(f,1)
df4 = df1.assign(Unique=df1.astype(str).apply(f,1))
# print(df4)
##merge columns 
df44 = df4.merge(some_df, left_on='genes', right_on='name', suffixes=('','_1'))
df44 = df44.rename(columns={'id':'id_new'}).drop(['name_1'], axis=1)
# print(df44)
df44['some_col'] = df44['some_col'] + ':E' + 
df44.groupby('some_col').cumcount().add(1).astype(str).str.zfill(3)
print(df44)
##drop unwanted columns adapted from http://stackoverflow.com/questions/13411544/delete-column-from-pandas-dataframe
df4 = df44
df4.drop(df4.columns[[3,7,9,11,12,13]], axis=1, inplace=True)

df4 = df4[['col0', 'col1', 'col2', 'col4', 'col5', 'col6', 'col8']]
df4
df4.to_csv('foo.csv', index=False)

above code is just for one file, few questions 1) i have ~ 15 files i need to perform this set of commands how can i use this on all the 15 files 2)and write to 15 different csv 3) merge certain columns from all 15 df and make a matrix (for example just merging 3 dfs)

sample1 = pd.DataFrame.set_index(df4,['col1'])["col4"]
sample2 = pd.DataFrame.set_index(df5,['col1'])["col4"]
sample3 = pd.DataFrame.set_index(df6, ['col1'])["col4"]
concat =  pd.concat([sample1,sample2,sample3], axis=1).fillna(0)
# print(concat)
concat.reset_index(level=0, inplace=True)
concat.columns = ["newcol0", "col1", "col2", "col3"]
concat.to_csv('bar.csv', index=False)

Is there a better way to do this, than copy pasting it for 15 times?

1 Answers1

1

Well I just quickly put this together for the aforementioned code. I would suggest learning how to write scripts and generalize things. I didn't clean up the code or take out redundancies, I will leave that up to you. This should work from the command line if the code you posted works.

import sys
import pandas as pd

def load_df(input_file):
    df = pd.DataFrame(pd.read_csv(input_file))
    return df

def perform_operations(df):
    df.sort_values('col2')
    df = df[df.col5 != 'N']
    df['new_col'] = df['col3'] - df['col2'] + 1
    f = lambda row: '{col1}:{col2}-{col3}({col4})'.format(**row)
    df.astype(str).apply(f,1)
    df4 = df.assign(Unique=df.astype(str).apply(f,1))
    return df4

def merge_stuff(df, df1):
    df44 = df.merge(df1, left_on='genes', right_on='name', suffixes=('','_1'))
    df44 = df44.rename(columns={'id':'id_new'}).drop(['name_1'], axis=1)
    return df44


def group_and_drop(df):
    df['some_col'] = df['some_col'] + ':E' + 
    df.groupby('some_col').cumcount().add(1).astype(str).str.zfill(3)
    df4 = df
    df4.drop(df4.columns[[3,7,9,11,12,13]], axis=1, inplace=True)
    return df4

def write_out_csv(df):
    df = df[['col0', 'col1', 'col2', 'col4', 'col5', 'col6', 'col8']]
    df.to_csv('foo.csv', index=False)


def main():
    file_1 = sys.argv[1]
    file_2 = sys.argv[2]
    df = load_df(file_1)
    df1 = load_df(file_2)
    df4 = perform_operations(df)
    df44 = merge_stuff(df4, df1)
    grouped = group_and_drop(df44)
    write_out_csv(grouped)

if __name__ == '__main__':
    main() 
gold_cy
  • 13,648
  • 3
  • 23
  • 45