I tried batch normalization for toy set [[1,2],[5,4]. Normalizaing among axis=0, we get
#[[-1/sqrt(2),-1/sqrt(2)],[1/sqrt(2), 1/sqrt(2)]]
However, my layer(axis=0) and layer(axis=1) both give incorrect result.
X = tf.constant([[1,2],[5,4]],dtype = tf.float32)
layer = keras.layers.BatchNormalization()
hidden = layer(X)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer(axis=0))
print(sess.run(layer.trainable_weights))
print(sess.run(hidden))
#results
#[array([1., 1.], dtype=float32), array([0., 0.], dtype=float32)]
#[[0.9995004 4.997502 ]
# [1.9990008 3.9980016]]
X = tf.constant([[1,2],[5,4]],dtype = tf.float32)
layer = keras.layers.BatchNormalization()
hidden = layer(X)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer(axis=1))
print(sess.run(layer.trainable_weights))
print(sess.run(hidden))
#results
#[array([1., 1.], dtype=float32), array([0., 0.], dtype=float32)]
#[[0.9995004 4.997502 ]
# [1.9990008 3.9980016]]
gamma=1 and beta=0 as trainable_weights shows. Then how this layer works?