-1
import os,re
import math
from math import log10
import nltk.corpus
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from collections import defaultdict
python_file_root = './presidential_debates'

def getidf(token):
    document_occurance = 0
    for filename in os.listdir(python_file_root):
        file = open(os.path.join(python_file_root, filename), "r")
        for line in file:
            if re.search(r'\b' +token+ r'\b', line):
                document_occurance = document_occurance + 1
                break      
    if (document_occurance != 0):
        idf = log10(30 / document_occurance)                   
        return idf
    return -1

def normalize(filename,token):
    file = open(os.path.join(python_file_root, filename), "r")
    counts = dict()
    square = []
    count1 = 0
    for line in file:
        count1 = count1 + 1
        if line in counts:
            counts[line] += 1
        else:
            counts[line] = 1
    for key,value in counts.items():
        tf = 1 +log10(value)
        idf = getidf(key.rstrip())
        square.append((tf * idf)*(tf * idf))
    summ = sum(square)
    sqroot = math.sqrt(summ) 
    return sqroot

def getweight(filename,token):
    hit_count1 = 0
    final = 0
    file = open(os.path.join(python_file_root, filename), "r")
    idft = getidf(token)
    for line in file:
        if re.search(r'\b' +token+ r'\b', line):
            hit_count1 = hit_count1 + 1
    if (hit_count1 == 0):
        return 0
    else:    
        tf = 1 + log10(hit_count1)
    initial = idft * tf
    if(initial <= 0):
        final = 0
        return final
    else:
        normalize_fact = normalize(filename,token)
        final = initial / normalize_fact
        return final  

for filename in os.listdir(python_file_root):
    file = open(os.path.join(python_file_root, filename), "r")
    doc = file.read() 
    doc = doc.lower()
    stemmed = []
    tokenizer = RegexpTokenizer(r'[a-zA-Z]+')
    tokens = tokenizer.tokenize(doc)
    stoplist = stopwords.words('english')
    stop_removed = [word for word in tokens if word not in stoplist]
    with open(os.path.join(python_file_root, filename), "w") as f:
        for item in stop_removed:
            stemmer = PorterStemmer()
            stemmed = [stemmer.stem(item)]
            for items in stemmed:
                f.write("%s\n" % items)
print("\nIDF\n")
print("%.12f" % getidf("health"))
print("%.12f" % getidf("agenda"))
print("%.12f" % getidf("vector"))
print("%.12f" % getidf("reason"))
print("%.12f" % getidf("hispan"))
print("%.12f" % getidf("hispanic"))
print("\n")
print("%.12f" % getweight("2012-10-03.txt","health"))
print("%.12f" % getweight("1960-10-21.txt","reason"))
print("%.12f" % getweight("1976-10-22.txt","agenda"))
print("%.12f" % getweight("2012-10-16.txt","hispan"))
print("%.12f" % getweight("2012-10-16.txt","hispanic"))

I have 30 txt files and i have developed a program to find the idf and normalized tf-idf vectors. Im getting the correct values but the function getweight takes more than 15 minutes to generate the output. Can anyone suggest me a few methods for optimization. I donot want to use any other non-standard Python package.

  • 4
    I believe this might be better suited for https://codereview.stackexchange.com/. – Arnav Borborah Oct 01 '18 at 14:41
  • not a problem but you can change `hit_count1 = hit_count1 = 1` to just be `hit_count1 += 1` – Jacobr365 Oct 01 '18 at 14:45
  • You need to first profile your script and determine where it's spending the significant portions of its time. This can be done with a standard Python library. See [How can you profile a script?](https://stackoverflow.com/questions/582336/how-can-you-profile-a-script) – martineau Oct 01 '18 at 15:00

1 Answers1

0

Why do you create a new PorterStemmer for every word?

Apart from this obvious thing, try profiling your code. NLTI has the reputation of being really slow - so it may well be not your fault. If you profile, then you'll know.

Has QUIT--Anony-Mousse
  • 76,138
  • 12
  • 138
  • 194