I have been trying to understand the logic used by the beam-search algorithm in automatic speech recognition for the decoding part. The papers I've tried to follow are First-Pass Large Vocabulary Continuous Speech Recognition using Bi-Directional Recurrent DNNs, Lexicon-Free Conversational Speech Recognition with Neural Networks and Towards End-to-End Speech Recognition with Recurrent Neural Networks. The problem is that the idea behind the algorithm is not so easy to follow and there are a lot of typos in the pseudo-code provided in the papers. Also, this implementation from the second paper is incredible hard to follow and this one, from the last paper mentioned, doesn't includes a Language Model.
This is my implementation in Python, which fails because of some missing probabilities:
class BeamSearch(object):
"""
Decoder for audio to text.
From: https://arxiv.org/pdf/1408.2873.pdf (hardcoded)
"""
def __init__(self, alphabet='" abcdefghijklmnopqrstuvwxyz'):
# blank symbol plus alphabet
self.alphabet = '-' + alphabet
# index of each char
self.char_to_index = {c: i for i, c in enumerate(self.alphabet)}
def decode(self, probs, k=100):
"""
Decoder.
:param probs: matrix of size Windows X AlphaLength
:param k: beam size
:returns: most probable prefix in A_prev
"""
# List of prefixs, initialized with empty char
A_prev = ['']
# Probability of a prefix at windows time t to ending in blank
p_b = {('', 0): 1.0}
# Probability of a prefix at windows time t to not ending in blank
p_nb = {('', 0): 0.0}
# for each time window t
for t in range(1, probs.shape[0] + 1):
A_new = []
# for each prefix
for s in Z:
for c in self.alphabet:
if c == '-':
p_b[(s, t)] = probs[t-1][self.char_to_index[self.blank]] *\
(p_b[(s, t-1)] +\
p_nb[(s, t-1)])
A_new.append(s)
else:
s_new = s + c
# repeated chars
if len(s) > 0 and c == s[-1]:
p_nb[(s_new, t)] = probs[t-1][self.char_to_index[c]] *\
p_b[(s, t-1)]
p_nb[(s, t)] = probs[t-1][self.char_to_index[c]] *\
p_b[(s, t-1)]
# spaces
elif c == ' ':
p_nb[(s_new, t)] = probs[t-1][self.char_to_index[c]] *\
(p_b[(s, t-1)] +\
p_nb[(s, t-1)])
else:
p_nb[(s_new, t)] = probs[t-1][self.char_to_index[c]] *\
(p_b[(s, t-1)] +\
p_nb[(s, t-1)])
p_nb[(s, t)] = probs[t-1][self.char_to_index[c]] *\
(p_b[(s, t-1)] +\
p_nb[(s, t-1)])
if s_new not in A_prev:
p_b[(s_new, t)] = probs[t-1][self.char_to_index[self.blank]] *\
(p_b[(s, t-1)] +\
p_nb[(s, t-1)])
p_nb[(s_new, t)] = probs[t-1][self.char_to_index[c]] *\
p_nb[(s, t-1)]
A_new.append(s_new)
A = A_new
s_probs = map(lambda x: (x, (p_b[(x, t)] + p_nb[(x, t)])*len(x)), A_new)
xs = sorted(s_probs, key=lambda x: x[1], reverse=True)[:k]
Z, best_probs = zip(*xs)
return Z[0], best_probs[0]
Any help will be really appreciated.