I need to make an activation function which is not exist in tensorflow.How should I do? I ever saw this link, How to make a custom activation function with only Python in Tensorflow? but I still don't know how to implement the new type of activation funcation in the picture. relu,leaky_relu and a new type of relu
Asked
Active
Viewed 2,736 times
1 Answers
2
I think this one could serve you. I have only used functions that incorporate tensorflow in that way it is he who manages the backpropagation.
If you use python functions you would have to program both the forward and the backward. But the problem is when you have to save the function's masks of the piecewise function in a "cache" (personally I do not know how it is done and it would be interesting to know).
import numpy as np
import tensorflow as tf
def new_relu(x, k=0.2):
part_1 = tf.to_float(tf.math.less_equal(0.0, x))
part_2 = tf.to_float(tf.math.logical_and(tf.math.less_equal(-1.0, x), tf.math.less(x, 0.0)))
part_3 = tf.to_float(tf.math.less(x, -1.0))
return part_1*x + part_2*x*k #+ part_3*0
def new_relu_test():
# create data
x = tf.random_normal([10])*10000
y = new_relu(x)
with tf.Session():
diff = tf.test.compute_gradient_error(x, [10], y, [10])
print(diff)
# use in dense
x = tf.placeholder(shape=[None, 3], dtype=tf.float32)
nn = tf.layers.dense(x, 3, activation=new_relu)
EDIT: If you want the second parameter to be a tensor too, you must be the same size as the input.
import numpy as np
import tensorflow as tf
def new_relu(x, k=0.2):
part_1 = tf.to_float(tf.math.less_equal(0.0, x))
part_2 = tf.to_float(tf.math.logical_and(tf.math.less_equal(-1.0, x), tf.math.less(x, 0.0)))
part_3 = tf.to_float(tf.math.less(x, -1.0))
return part_1*x + part_2*x*k #+ part_3*0
def new_relu_test():
# create data
x = tf.random_normal([10])*10000
y = new_relu(x)
with tf.Session():
diff = tf.test.compute_gradient_error(x, [10], y, [10])
print(diff)
# use in dense
x = tf.placeholder(shape=[None, 3], dtype=tf.float32)
x_b = tf.placeholder(shape=[None], dtype=tf.float32)
nn_1 = tf.layers.dense(x, 3)
nn_2 = tf.layers.dense(x, 3)
nn = tf.layers.dense(nn_2, 1, activation=None)
new_r = new_relu(x, tf.tile(tf.expand_dims(x_b, -1), [1, 3]))
with tf.Session() as sess:
sess.run(tf.initializers.global_variables())
sess.run(new_r, feed_dict={x: np.random.rand(100, 3), x_b: np.random.rand(100)})
new_relu_test()
EDIT 2:
Using conv2d
import numpy as np
import tensorflow as tf
def new_relu(x, k=0.2):
part_1 = tf.to_float(tf.math.less_equal(0.0, x))
part_2 = tf.to_float(tf.math.logical_and(tf.math.less_equal(-1.0, x), tf.math.less(x, 0.0)))
part_3 = tf.to_float(tf.math.less(x, -1.0))
return part_1*x + part_2*x*k #+ part_3*0
def new_relu_test():
# create data
x = tf.random_normal([10])*10000
y = new_relu(x)
with tf.Session():
diff = tf.test.compute_gradient_error(x, [10], y, [10])
print(diff)
# use in dense
x = tf.placeholder(shape=[None, 28, 28, 3], dtype=tf.float32)
conv1_weights = tf.get_variable("weight",[3,3,3,32],initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable("bias", [32], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(x, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = new_relu(tf.nn.bias_add(conv1, conv1_biases))
with tf.Session() as sess:
sess.run(tf.initializers.global_variables())
sess.run(relu1, feed_dict={x: np.random.rand(100, 28, 28, 3)})
new_relu_test()

Adria Ciurana
- 904
- 1
- 9
- 19
-
Thank you,there is another problem,input of relu is a tensor,like this, conv1_weights = ...... conv1_biases = ....... conv1 = ...... relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases)) parameter of relu1 is a tensor, but new_relu is a value,How to modify it to make new_relu the same as relu1 – YFye Jan 19 '19 at 15:13
-
Ami has worked for me using your code, I have updated the comment so you can see it. – Adria Ciurana Jan 19 '19 at 15:47
-
I probably understand.But I still have some problem,I use new_relu replace tf.nn.relu and it runs.But I don't add This code ,With tf.session() as sess: sess.run(tf.initia.....) sess.run(relu1,feed_dict = .....),I add another answer and see if that's OK – YFye Jan 20 '19 at 02:56