I am trying to implement the neural network model in Keras but I am getting a dimensionality issue. As per the Model architecture, I should get 1 as the output dimension from the last(Fully connected) layer but I am getting 2D data as output.
I am trying to implement the figure-4 from the paper.
My Implementation is as below:
import numpy as np
from tensorflow.keras import layers, Model
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.utils import plot_model
from tensorflow.python.keras.regularizers import l2
from keras.datasets import imdb
from keras.preprocessing import sequence
def get_model(vocabulary_size, embedding_dim, input_length, summary=True):
inputs = layers.Input(shape=(input_length))
x = layers.Embedding(vocabulary_size, embedding_dim)(inputs)
branch1 = layers.Conv1D(128, kernel_size=(3),padding='same',kernel_regularizer=l2(0.01), activation='relu')(x)
branch1 = layers.MaxPool1D(pool_size=(2))(branch1)
branch1 = layers.Dropout(0.5)(branch1)
branch1 = layers.BatchNormalization()(branch1)
branch1 = layers.LSTM(128, return_sequences=True)(branch1)
branch2 = layers.Conv1D(128, kernel_size=(5),padding='same',kernel_regularizer=l2(0.01), activation='relu')(x)
branch2 = layers.MaxPool1D(pool_size=(2))(branch2)
branch2 = layers.Dropout(0.5)(branch2)
branch2 = layers.BatchNormalization()(branch2)
branch2 = layers.LSTM(128, return_sequences=True)(branch2)
branch3 = layers.Conv1D(128, kernel_size=(7),padding='same',kernel_regularizer=l2(0.01), activation='relu')(x)
branch3 = layers.MaxPool1D(pool_size=(2))(branch3)
branch3 = layers.Dropout(0.5)(branch3)
branch3 = layers.BatchNormalization()(branch3)
branch3 = layers.LSTM(128, return_sequences=True)(branch3)
branch4 = layers.Conv1D(128, kernel_size=(9),padding='same',kernel_regularizer=l2(0.01), activation='relu')(x)
branch4 = layers.MaxPool1D(pool_size=(2))(branch4)
branch4 = layers.Dropout(0.5)(branch4)
branch4 = layers.BatchNormalization()(branch4)
branch4 = layers.LSTM(128, return_sequences=True)(branch4)
concat = layers.concatenate([branch1, branch2, branch3, branch4], name='Concatenate')
outputs = layers.Dense(1, activation='sigmoid')(concat)
model = Model(inputs, outputs)
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-3), metrics='accuracy')
print(model.summary())
return model
EMBEDDING_DIM = 32
VOCABULARY_SIZE = 5000
seq_length = 500
my_model = get_model(VOCABULARY_SIZE, EMBEDDING_DIM, seq_length)