I am trying to parse several hundreds of sentences into their syntax trees and i need to do that fast, the problem is that if i use NLTK then i need to define a grammar, and i cant know that i only know its gonna be english. I tried using this statistical parser, and it works great for my purposes however the speed could be a lot better, is there a way to use nltk parsing without a grammar? In this snippet i am using a processing pool to do the processing in "parallel" but the speed leaves a lot to be desired.
import pickle
import re
from stat_parser.parser import Parser
from multiprocessing import Pool
import HTMLParser
def multy(a):
global parser
lst=re.findall('(\S.+?[.!?])(?=\s+|$)',a[1])
if len(lst)==0:
lst.append(a[1])
try:
ssd=parser.norm_parse(lst[0])
except:
ssd=['NNP','nothing']
with open('/var/www/html/internal','a') as f:
f.write("[[ss")
pickle.dump([a[0],ssd], f)
f.write("ss]]")
if __name__ == '__main__':
parser=Parser()
with open('/var/www/html/interface') as f:
data=f.read()
data=data.split("\n")
p = Pool(len(data))
Totalis_dict=dict()
listed=list()
h = HTMLParser.HTMLParser()
with open('/var/www/html/internal','w') as f:
f.write("")
for ind,each in enumerate(data):
listed.append([str(ind),h.unescape(re.sub('[^\x00-\x7F]+','',each))])
p.map(multy,listed)