0

I'm totally new at python (especially at parallel computing) and recently I've got a task to count all words, count unique words, and find the top10 most frequent words from a given dataset (it contains 3 columns and 10k rows) in a parallel manner.

Since I'm completely new I've decided to take the easiest way and have used pandarallel package. The problem is that both code versions bring back the results of each chunk, but not the total results of the dataset... How could I solve this problem?

1ST CODE VERSION WITH 3 DEFINED SEPARATE FUNCTIONS:

#Version 1

# Please, calculate how many words are unique and count each word in the data set. The separation of the word in a sentence should be done by a space character. What are the top 10 most frequent words? Please lowercase all words.

import pandas as pd
import numpy as np
from multiprocessing import Pool, cpu_count
from datetime import datetime
#from collections import Counter
import re
from time import time, perf_counter
from pandarallel import pandarallel
import matplotlib.pyplot as plt


print("Number of cpu : ", cpu_count()) #4 cpus

start = time()
# #data = pd.read_csv("C:\\Users\\el ruchenzo\\Downloads\\covid_abstracts.csv", chunksize=1000) #nrows doesn't bring dataset back as TextFileReader object
# #data = [chunk for chunk in chunks_reader] cia chunk_reader=data, bet sitas veiksmas suletina viska; kiekvienas chunk yra dataframe
#

pandarallel.initialize(nb_workers=cpu_count())

df_iterator = pd.read_csv("C:\\Users\\el ruchenzo\\Downloads\\covid_abstracts.csv")#,
                         # chunksize=10000) #when set to 1000 it brings results by chunks and not in total;
                                           #also, when chunksize=10000 it works faster then when chunksize=1000

#df = pd.concat([chunk for chunk in df_iterator])

def transform_data(data):
    #transformed_data = data.apply(' '.join, axis=1)
    # comb_data = comb_data.str.replace("  ", " ")
    transformed_data = data.replace(r'\s+', ' ', regex=True)
    transformed_data = transformed_data.str.lower()
    return transformed_data

def count_words(data):
    total_words = sum(map(len, data))
    unique_words = data.nunique()
    print(f'Dataset contains {total_words:,} words in total and {unique_words:,} of them are unique (those unique "words" are urls to the articles). ')

def bring_top10_most_frequent_words(data):
    freq_dictionary = data.str.split(" ", expand=True).stack().value_counts().reset_index()
    freq_dictionary.columns = ['Word', 'Frequency']
    freq_dictionary['Frequency'] = freq_dictionary.apply(lambda x: "{:,}".format(x['Frequency']), axis=1)
    freq_dictionary.sort_values('Frequency')
    print(f'Top 10 of most frequent words and their frequencies are listed below:\n {freq_dictionary.head(10)}')

for i, df_chunk in enumerate(df_iterator):
    pandarallel.initialize(nb_workers=cpu_count())
    df_iterator.apply(transform_data)
    df_iterator.apply(count_words)
    df_iterator.apply(bring_top10_most_frequent_words)
    #count_words(transform_data(df_chunk))
    #bring_top10_most_frequent_words(transform_data(df_chunk)) #5.79 s, bet kiekviena chunka atskirai apskaiciuoja (chunksize=1000)

# for func in [transform_data, count_words, bring_top10_most_frequent_words]:
#     df_iterator.apply(func)
#df_iterator.apply(transform_data, count_words, bring_top10_most_frequent_words)

print(time() - start, 'seconds')

2ND CODE VERSION WITH 1 DEFINED FUNCTION:

import pandas as pd
import numpy as np
from multiprocessing import Pool, cpu_count
from datetime import datetime
#from collections import Counter
import re
from time import time, perf_counter
from pandarallel import pandarallel
import matplotlib.pyplot as plt


#print("Number of cpu : ", cpu_count()) #4 cpus

# #data = pd.read_csv("C:\\Users\\el ruchenzo\\Downloads\\covid_abstracts.csv", chunksize=1000) #nrows doesn't bring dataset back as TextFileReader object
# #data = [chunk for chunk in chunks_reader] cia chunk_reader=data, bet sitas veiksmas suletina viska; kiekvienas chunk yra dataframe
#

# pandarallel.initialize(nb_workers=cpu_count())

#data = pd.read_csv("C:\\Users\\el ruchenzo\\Downloads\\covid_abstracts.csv")#,
                         # chunksize=10000) #when set to 1000 it brings results by chunks and not in total;
                                           #also, when chunksize=10000 it works faster then when chunksize=1000

#df = pd.concat([chunk for chunk in df_iterator])

def func(data):
    #transformed_data = data.apply(' '.join, axis=1)
    # comb_data = comb_data.str.replace("  ", " ")
    transformed_data = data.replace(r'\s+', ' ', regex=True)
    transformed_data = transformed_data.str.lower()
    total_words = sum(map(len, transformed_data))
    unique_words = transformed_data.nunique()
    freq_dictionary = transformed_data.str.split(" ", expand=True).stack().value_counts().reset_index()
    freq_dictionary.columns = ['Word', 'Frequency']
    freq_dictionary['Frequency'] = freq_dictionary.apply(lambda x: "{:,}".format(x['Frequency']), axis=1)
    freq_dictionary.sort_values('Frequency')
    print(f'Dataset contains {total_words:,} words in total and {unique_words:,} of them are unique (those unique "words" are urls to the articles). '  
          f'\nTop 10 of most frequent words and their frequencies are listed below:\n {freq_dictionary.head(10)}')

# for i, df_chunk in enumerate(df_iterator):
#     count_words(transform_data(df_chunk))
#     bring_top10_most_frequent_words(transform_data(df_chunk)) #5.79 s, bet kiekviena chunka atskirai apskaiciuoja (chunksize=1000)

# for func in [transform_data, count_words, bring_top10_most_frequent_words]:
#     df_iterator[['title', 'abstract']].apply(func)
#df_iterator.apply(transform_data, count_words, bring_top10_most_frequent_words)


start = time()

if __name__ == '__main__':
    pandarallel.initialize() #nb_workers=cpu_count()
    df = pd.read_csv("C:\\Users\\el ruchenzo\\Downloads\\covid_abstracts.csv")
    cpus_max = cpu_count()
    timings = {}
    for c in range(1, cpus_max + 1):
        start_time = perf_counter()
        df.apply(func)
        timings[c] = perf_counter() - start_time
        plt.plot(timings.keys(), timings.values())
        plt.title('Processing time vs number of CPUs')
        plt.ylabel('Processing time (seconds)')
        plt.xlabel('Number of CPUs')
        plt.show()

print(time() - start, 'seconds')
kndrtt
  • 13
  • 4

0 Answers0