I built my CNN Autoencoder in Keras. My data (not provided) are 2000 samples of 501 points. I split my data in 1500 samples for training, 500 for testing. I want to SAVE the decoder part. This is my code:
from tensorflow.keras.layers import Input, Dense, BatchNormalization, Flatten, Lambda, Activation, Conv1D, MaxPooling1D, UpSampling1D, Reshape
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers
import sys
import matplotlib.pyplot as plt
import numpy as np
import copy
# read data
data = # some data
# shuffle
import random
random.seed(4)
random.shuffle(data)
# split train/test
X_train = data[:1500]
X_test = data[1500:]
# reshaping for CNN
X_training = np.reshape(X_train, [1500, 501, 1])
X_testing = np.reshape(X_test, [500, 501, 1])
# normalize input
X_mean = X_training.mean()
X_training -= X_mean
X_std = X_training.std()
X_training /= X_std
X_testing -= X_mean
X_testing /= X_std
## MODEL ###
# ENCODER
input_sig = Input(batch_shape=(None,501,1))
x = Conv1D(256,3, activation='tanh', padding='valid')(input_sig)
x1 = MaxPooling1D(2)(x)
x2 = Conv1D(32,3, activation='tanh', padding='valid')(x1)
x3 = MaxPooling1D(2)(x2)
flat = Flatten()(x3)
encoded = Dense(32,activation = 'tanh')(flat)
# DECODER
x2_ = Conv1D(32, 3, activation='tanh', padding='valid')(x3)
x1_ = UpSampling1D(2)(x2_)
x_ = Conv1D(256, 3, activation='tanh', padding='valid')(x1_)
upsamp = UpSampling1D(2)(x_)
flat = Flatten()(upsamp)
decoded = Dense(501)(flat)
decoded = Reshape((501,1))(decoded)
autoencoder = Model(input_sig, decoded)
autoencoder.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
### TRAINING ###
epochs = 50
batch_size = 100
validation_split = 0.2
# train the model
history = autoencoder.fit(x = X_training, y = X_training,
epochs=epochs,
batch_size=batch_size,
validation_split=validation_split)
# Decoder
decoder = Model(inputs=encoded, outputs=decoded, name='decoder')
# save decoder
decoder.save('decoder.hdf5')
The error I am getting is
W1013 12:08:17.131777 140693540189952 network.py:1619] Model inputs must come from `tf.keras.Input` (thus holding past layer metadata), they cannot be the output of a previous non-Input layer. Here, a tensor specified as input to "decoder" was not an Input tensor, it was generated by layer dense.
Note that input tensors are instantiated via `tensor = tf.keras.Input(shape)`.
The tensor that caused the issue was: dense/Tanh:0
Traceback (most recent call last):
File "Autoenc_CNN_ISOTROPIC_oscillations.py", line 191, in <module>
decoder = Model(inputs=encoded, outputs=decoded, name='decoder')
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/training.py", line 122, in __init__
super(Model, self).__init__(*args, **kwargs)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/network.py", line 138, in __init__
self._init_graph_network(*args, **kwargs)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/training/tracking/base.py", line 456, in _method_wrapper
result = method(self, *args, **kwargs)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/network.py", line 284, in _init_graph_network
self.inputs, self.outputs)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/network.py", line 1814, in _map_graph_network
str(layers_with_complete_input))
ValueError: Graph disconnected: cannot obtain value for tensor Tensor("input_1:0", shape=(None, 501, 1), dtype=float32) at layer "input_1". The following previous layers were accessed without issue: []
How should I adjust the line
decoder = Model(inputs=encoded, outputs=decoded, name='decoder')
so that the inputs are such that I manage to save the trained decoder?