From 382912b087af409cc20628c711261c6bd3f99836 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Wed, 23 Dec 2020 21:03:45 +0800 Subject: Modify activation functions after conv or trans-conv in auto-encoder 1. Make activation functions be inplace ops 2. Change Leaky ReLU to ReLU in decoder --- models/auto_encoder.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'models/auto_encoder.py') diff --git a/models/auto_encoder.py b/models/auto_encoder.py index bb4a377..1be878f 100644 --- a/models/auto_encoder.py +++ b/models/auto_encoder.py @@ -36,12 +36,12 @@ class Encoder(nn.Module): self.batch_norm_fc = nn.BatchNorm1d(self.em_dim) def forward(self, x): - x = F.leaky_relu(self.batch_norm1(self.conv1(x)), 0.2) + x = F.leaky_relu(self.batch_norm1(self.conv1(x)), 0.2, inplace=True) x = self.max_pool1(x) - x = F.leaky_relu(self.batch_norm2(self.conv2(x)), 0.2) + x = F.leaky_relu(self.batch_norm2(self.conv2(x)), 0.2, inplace=True) x = self.max_pool2(x) - x = F.leaky_relu(self.batch_norm3(self.conv3(x)), 0.2) - x = F.leaky_relu(self.batch_norm4(self.conv4(x)), 0.2) + x = F.leaky_relu(self.batch_norm3(self.conv3(x)), 0.2, inplace=True) + x = F.leaky_relu(self.batch_norm4(self.conv4(x)), 0.2, inplace=True) x = self.max_pool3(x) x = x.view(-1, (64 * 8) * 2 * 4) embedding = self.batch_norm_fc(self.fc(x)) @@ -76,11 +76,11 @@ class Decoder(nn.Module): def forward(self, fa, fgs, fgd): x = torch.cat((fa, fgs, fgd), dim=1).view(-1, self.em_dim) - x = F.leaky_relu(self.batch_norm_fc(self.fc(x)), 0.2) + x = F.relu(self.batch_norm_fc(self.fc(x)), True) x = x.view(-1, 64 * 8, 4, 2) - x = F.leaky_relu(self.batch_norm1(self.trans_conv1(x)), 0.2) - x = F.leaky_relu(self.batch_norm2(self.trans_conv2(x)), 0.2) - x = F.leaky_relu(self.batch_norm3(self.trans_conv3(x)), 0.2) + x = F.relu(self.batch_norm1(self.trans_conv1(x)), True) + x = F.relu(self.batch_norm2(self.trans_conv2(x)), True) + x = F.relu(self.batch_norm3(self.trans_conv3(x)), True) x = F.sigmoid(self.trans_conv4(x)) return x -- cgit v1.2.3