diff options
-rw-r--r-- | models/auto_encoder.py | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/models/auto_encoder.py b/models/auto_encoder.py index bb4a377..1be878f 100644 --- a/models/auto_encoder.py +++ b/models/auto_encoder.py @@ -36,12 +36,12 @@ class Encoder(nn.Module): self.batch_norm_fc = nn.BatchNorm1d(self.em_dim) def forward(self, x): - x = F.leaky_relu(self.batch_norm1(self.conv1(x)), 0.2) + x = F.leaky_relu(self.batch_norm1(self.conv1(x)), 0.2, inplace=True) x = self.max_pool1(x) - x = F.leaky_relu(self.batch_norm2(self.conv2(x)), 0.2) + x = F.leaky_relu(self.batch_norm2(self.conv2(x)), 0.2, inplace=True) x = self.max_pool2(x) - x = F.leaky_relu(self.batch_norm3(self.conv3(x)), 0.2) - x = F.leaky_relu(self.batch_norm4(self.conv4(x)), 0.2) + x = F.leaky_relu(self.batch_norm3(self.conv3(x)), 0.2, inplace=True) + x = F.leaky_relu(self.batch_norm4(self.conv4(x)), 0.2, inplace=True) x = self.max_pool3(x) x = x.view(-1, (64 * 8) * 2 * 4) embedding = self.batch_norm_fc(self.fc(x)) @@ -76,11 +76,11 @@ class Decoder(nn.Module): def forward(self, fa, fgs, fgd): x = torch.cat((fa, fgs, fgd), dim=1).view(-1, self.em_dim) - x = F.leaky_relu(self.batch_norm_fc(self.fc(x)), 0.2) + x = F.relu(self.batch_norm_fc(self.fc(x)), True) x = x.view(-1, 64 * 8, 4, 2) - x = F.leaky_relu(self.batch_norm1(self.trans_conv1(x)), 0.2) - x = F.leaky_relu(self.batch_norm2(self.trans_conv2(x)), 0.2) - x = F.leaky_relu(self.batch_norm3(self.trans_conv3(x)), 0.2) + x = F.relu(self.batch_norm1(self.trans_conv1(x)), True) + x = F.relu(self.batch_norm2(self.trans_conv2(x)), True) + x = F.relu(self.batch_norm3(self.trans_conv3(x)), True) x = F.sigmoid(self.trans_conv4(x)) return x |