diff options
author | Jordan Gong <jordan.gong@protonmail.com> | 2021-01-21 21:50:28 +0800 |
---|---|---|
committer | Jordan Gong <jordan.gong@protonmail.com> | 2021-01-21 21:50:28 +0800 |
commit | 59ccfd7718babe94fac549fcfbfa22bb311f0bd8 (patch) | |
tree | 7c28ac5617509d01fcc85d265c634a9cf9f39c51 | |
parent | 2345f662ea39569de411eb45925127b617a9c27f (diff) |
Bug fixes
1. Turn off autograd while decoding canonical and pose features
2. Change default batch size to (4, 8)
-rw-r--r-- | config.py | 2 | ||||
-rw-r--r-- | models/auto_encoder.py | 19 |
2 files changed, 11 insertions, 10 deletions
@@ -33,7 +33,7 @@ config: Configuration = { # Batch size (pr, k) # `pr` denotes number of persons # `k` denotes number of sequences per person - 'batch_size': (2, 2), + 'batch_size': (4, 8), # Number of workers of Dataloader 'num_workers': 4, # Faster data transfer from RAM to GPU if enabled diff --git a/models/auto_encoder.py b/models/auto_encoder.py index 5e7558b..36be868 100644 --- a/models/auto_encoder.py +++ b/models/auto_encoder.py @@ -132,15 +132,16 @@ class AutoEncoder(nn.Module): # x_c1_t2 is the frame for later module (f_a_c1_t2, f_c_c1_t2, f_p_c1_t2) = self.encoder(x_c1_t2) - # Decode canonical features for HPM - x_c_c1_t2 = self.decoder( - torch.zeros_like(f_a_c1_t2), f_c_c1_t2, torch.zeros_like(f_p_c1_t2), - no_trans_conv=True - ) - # Decode pose features for Part Net - x_p_c1_t2 = self.decoder( - torch.zeros_like(f_a_c1_t2), torch.zeros_like(f_c_c1_t2), f_p_c1_t2 - ) + with torch.no_grad(): + # Decode canonical features for HPM + x_c_c1_t2 = self.decoder( + torch.zeros_like(f_a_c1_t2), f_c_c1_t2, torch.zeros_like(f_p_c1_t2), + no_trans_conv=True + ) + # Decode pose features for Part Net + x_p_c1_t2 = self.decoder( + torch.zeros_like(f_a_c1_t2), torch.zeros_like(f_c_c1_t2), f_p_c1_t2 + ) if self.training: # t1 is random time step, c2 is another condition |