I am new in this field and trying to re-run an example LSTM code copied from internet. The accuracy of the LSTM model is always 0.2 but the predicted output is totally correct which means the accuracy should be 1. Could anyone tell me why?
from numpy import array
from keras.models import Sequential, Dense, LSTM
length = 5
seq = array([i/float(length) for i in range(length)])
print(seq)
X = seq.reshape(length, 1, 1)
y = seq.reshape(length, 1)
# define LSTM configuration
n_neurons = length
n_batch = 1000
n_epoch = 1000
# create LSTM
model = Sequential()
model.add(LSTM(n_neurons, input_shape=(1, 1)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
# train LSTM
model.fit(X, y, epochs=n_epoch, batch_size=n_batch)#, verbose=2)
train_loss, train_acc = model.evaluate(X, y)
print('Training set accuracy:', train_acc
result = model.predict(X, batch_size=n_batch, verbose=0)
for value in result:
print('%.1f' % value)