I have the following nn model in pytorch, the forward method is not using dropout, but there is an instance of dropout in the class, and it gets printed when the class is instantiated. Is the model using dropout or not? Here is the code structure.
n_input_dim=train_ds[0][0].shape[0] #getting the input dimension of a sample
n_hidden1, n_hidden2, n_hidden3, n_hidden4, n_output =[1048, 724, 324, 121, 1]
class NN(nn.Module):
def __init__(self):
super(NN, self).__init__()
self.layer1=nn.Linear(n_input_dim, n_hidden1)
self.layer2=nn.Linear(n_hidden1, n_hidden2)
self.layer3=nn.Linear(n_hidden2, n_hidden3)
self.layer4=nn.Linear(n_hidden3, n_hidden4)
self.layer_out=nn.Linear(n_hidden4, n_output)
self.relu=nn.ReLU()
self.dropout=nn.Dropout(p=0.5)
def forward(self, inputs):
x=self.relu(self.layer1(inputs))
x=self.relu(self.layer2(x))
x=self.relu(self.layer3(x))
x=self.relu(self.layer4(x))
x=self.layer_out(x)
return x
learning_rate=0.01
epochs=300
loss_func=nn.L1Loss()
model=NN()
optimizer=torch.optim.Adam(model.parameters(), lr=learning_rate)
print(model)
NN(
(layer1): Linear(in_features=1125, out_features=1048, bias=True)
(layer2): Linear(in_features=1048, out_features=524, bias=True)
(layer3): Linear(in_features=524, out_features=262, bias=True)
(layer4): Linear(in_features=262, out_features=131, bias=True)
(layer_out): Linear(in_features=131, out_features=1, bias=True)
(relu): ReLU()
(dropout): Dropout(p=0.5, inplace=False)
)
I tried to use adding dropout in the forward function, but it printed the same.