0

I want to assign different activation functions for neurons in a linear layer but dont want to lose the grads.

How I do:

def __init__(self):
    super(DataNet, self).__init__()
    self.fc1 = nn.Linear(2, 5)
    self.fc2 = nn.Linear(5, 5)
    self.fc3 = nn.Linear(5, 5)
    self.fc4 = nn.Linear(5, 1)
    self.z_f = self.z_function

def z_function(self,x):
    x = torch.clamp(x, min=0, max=1)
    return x

def activation_functions(self):
    af_list = [
        torch.sin,
        torch.exp,
        torch.tanh,
        torch.square,
        torch.arccos
    ]
    return af_list

def a_f_s(self, layer):
    # assign_neuron_specific_activation_function
    neuron_list = []
    for i in range(5):
        neuron_list.append(layer[:,i:i+1])

    act_func_list = self.activation_functions()

    tuple_of_activated_parts = (
        torch.tensor(a(l), requires_grad=True) for l, a in zip(neuron_list, act_func_list)
    )
    out = torch.cat(tuple_of_activated_parts, dim=1)

    return out

def forward(self, x):
    x = self.fc1(x)
    x = self.a_f_s(x)
    x = self.fc2(x)
    x = self.a_f_s(x)
    x = self.fc3(x)
    x = self.a_f_s(x)
    x = self.fc4(x)
    x = self.z_f(x)
    return x

But I got the following error:

TypeError: cat() received an invalid combination of arguments - got (generator, dim=int), but expected one of:
 * (tuple of Tensors tensors, int dim, *, Tensor out)
 * (tuple of Tensors tensors, name dim, *, Tensor out)

Also, I tried following line to concatenate:

out = (torch.stack(tuple_of_activated_parts,dim=1)).squeeze(0)

And got the following error:

stack(): argument 'tensors' (position 1) must be tuple of Tensors, not generator

What should I do?

0 Answers0