I want my Neural network output to be either 0 or 1, and not probabilities between 0 and 1.
for the same I have designed step function for output layer, I want my output layer to just roundoff the output of previous(softmax) layer i.e. converting probabilities into 0 and 1.
My customised function is not giving the expected results . Kindly help.
My code is :
from keras.layers.core import Activation
from keras.models import Sequential
from keras import backend as K
# Custom activation function
from keras.layers import Activation
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
@tf.custom_gradient
def custom_activation(x):
print("tensor ",x)
ones = tf.ones(tf.shape(x), dtype=x.dtype.base_dtype)
zeros = tf.zeros(tf.shape(x), dtype=x.dtype.base_dtype)
def grad(dy):
return dy
print(" INSIDE ACTOVATION FUNCTION ")
return keras.backend.switch(x > .5, ones, zeros), grad
model = keras.models.Sequential()
model.add(keras.layers.Dense(32,input_dim=a,activation='relu'))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(64,activation="relu"))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(2,activation='softmax'))
model.add(Activation(custom_activation, name='custom_activation'))#output layer
### Compile the model
model.compile(loss="binary_crossentropy",optimizer='adam',metrics=["accuracy"])