#Reading files with txt extension
def get_sentences():
for root, dirs, files in os.walk("/Users/Documents/test1"):
for file in files:
if file.endswith(".txt"):
x_ = codecs.open(os.path.join(root,file),"r", "utf-8-sig")
for lines in x_.readlines():
yield lines
formoreprocessing = get_sentences()
#Tokenizing sentences of the text files
from nltk.tokenize import sent_tokenize
for i in formoreprocessing:
raw_docs = sent_tokenize(i)
tokenized_docs = [sent_tokenize(i) for sent in raw_docs]
'''Removing Stop Words'''
stopword_removed_sentences = []
from nltk.corpus import stopwords
stopset = set(stopwords.words("English"))
def strip_stopwords(sentence):
return ' '.join(word for word in sentence.split() if word not in stopset)
stopword_removed_sentences = (strip_stopwords(sentence) for sentence in raw_docs)
print(stopword_removed_sentences)
The above code is not printing what it is supposed to be. Instead it is throwing: at 0x1193417d8> as output. What is the mistake here? I am using python 3.5.