Im currently trying to use threads to get the count of every word in a file in a parallel manner, but at the current time my code gets slower when i add even just an extra thread. I feel like it should get a decrease in time as the threads increase until i bottleneck my cpu then my times should get slower again. I don't understand why its not acting parallel.
here is the code
import thread
import threading
import time
import sys
class CountWords(threading.Thread):
def __init__(self,lock,tuple):
threading.Thread.__init__(self)
self.lock = lock
self.list = tuple[1]
self.dit = tuple[0]
def run(self):
for word in self.list:
#self.lock.acquire()
if word in self.dit.keys():
self.dit[word] = self.dit[word] + 1
else:
self.dit[word] = 1
#self.lock.release()
def getWordsFromFile(numThreads, fileName):
lists = []
for i in range(int(numThreads)):
k = []
lists.append(k)
print len(lists)
file = open(fileName, "r") # uses .read().splitlines() instead of readLines() to get rid of "\n"s
all_words = map(lambda l: l.split(" "), file.read().splitlines())
all_words = make1d(all_words)
cur = 0
for word in all_words:
lists[cur].append(word)
if cur == len(lists) - 1:
cur = 0
else:
cur = cur + 1
return lists
def make1d(list):
newList = []
for x in list:
newList += x
return newList
def printDict(dit):# prints the dictionary nicely
for key in sorted(dit.keys()):
print key, ":", dit[key]
if __name__=="__main__":
print "Starting now"
start = int(round(time.time() * 1000))
lock=threading.Lock()
ditList=[]
threadList = []
args = sys.argv
numThreads = args[1]
fileName = "" + args[2]
for i in range(int(numThreads)):
ditList.append({})
wordLists = getWordsFromFile(numThreads, fileName)
zipped = zip(ditList,wordLists)
print "got words from file"
for tuple in zipped:
threadList.append(CountWords(lock,tuple))
for t in threadList:
t.start()
for t in threadList:
if t.isAlive():
t.join()
fin = int(round(time.time() * 1000)) - start
print "with", numThreads, "threads", "counting the words took :", fin, "ms"
#printDict(dit)