How to add dropout layers automatically to a neural network in pytorch

I have a neural network in pytorch and make each layer automatically via the following structure:

class FCN(nn.Module):
    ##Neural Network
    def __init__(self,layers):
        super().__init__() #call __init__ from parent class 
        self.activation = nn.Tanh()
        self.loss_function = nn.MSELoss(reduction ='mean')
        'Initialise neural network as a list using nn.Modulelist'  
        self.linears = nn.ModuleList([nn.Linear(layers[i], layers[i+1]) for i in range(len(layers)-1)])
        self.iter = 0
        'Xavier Normal Initialization'
        for i in range(len(layers)-1):         
            nn.init.xavier_normal_(self.linears[i].weight.data, gain=1.0)            
            nn.init.zeros_(self.linears[i].bias.data)   
    'foward pass'
    def forward(self, x):
        if torch.is_tensor(x) != True:         
            x = torch.from_numpy(x)                
        a = x.float()
        for i in range(len(layers)-2):  
            z = self.linears[i](a)              
            a = self.activation(z)    
        a = self.linears[-1](a)
        return a

The following code also makes the network for me:

layers = np.array([2, 50, 50, 1])
model = FCN(layers)

Now, I am wondering how I can automatically add dropout layers to the network. I tried the following change in the network structure but it only gives me one dropout layer at the end:

self.linears = nn.ModuleList([nn.Linear(layers[i], layers[i+1]) for i in range(len(layers)-1) + nn.Dropout(p=0.5)]

I very much appreciate any help in this regard.

>Solution :

If you can add a dropout layer by "adding it" with + as you do (I havent seen that, but if it works that is dope!) you should just move the + DropOut before the range I assume i.e

self.linears = nn.ModuleList([nn.Linear(layers[i], layers[i+1])+ nn.Dropout(p=0.5) for i in range(len(layers)-1) ]

EDIT

As expected you can’t add it like that.

What you would do is to add a list with dropout-layers in the same way you do linear-layers, which you then use in your forward pass.

Below is an example; it might need to be tweaked to match your inputs etc

class FCN(nn.Module):
    ## Neural Network
    def __init__(self,layers):
        super().__init__()
        self.activation = nn.Tanh()
        self.loss_function = nn.MSELoss(reduction ='mean')
        'Initialise neural network as a list using nn.Modulelist'  
        self.linears = nn.ModuleList([nn.Linear(layers[i], layers[i+1]) for i in range(len(layers)-1)]) 
        self.dropout_layers = [nn.Dropout(p=0.5) for _ in range(len(layers)-1)]
        self.iter = 0
        'Xavier Normal Initialization'
        for i in range(len(layers)-1):         
            nn.init.xavier_normal_(self.linears[i].weight.data, gain=1.0)            
            nn.init.zeros_(self.linears[i].bias.data)

        def forward(self,x):
            for layer,dropout in zip(self.linears,self.dropout_layers):
                 x = layer(x)
                 x = dropout(x)
            return x

Leave a Reply