diff options
author | Jordan Gong <jordan.gong@protonmail.com> | 2021-01-12 11:29:02 +0800 |
---|---|---|
committer | Jordan Gong <jordan.gong@protonmail.com> | 2021-01-12 11:29:02 +0800 |
commit | 966d4431c037b0c4641aa2a5fc22f05be064b331 (patch) | |
tree | 0239ba89d31857a7f086acf627fc1bbf167855a9 /models/rgb_part_net.py | |
parent | 7825f978f198e56958703f0d08f7ccbd8cef49ca (diff) | |
parent | 36cf502afe9b93efe31c244030270b0a62e644b8 (diff) |
Merge branch 'master' into python3.8
# Conflicts:
# models/model.py
Diffstat (limited to 'models/rgb_part_net.py')
-rw-r--r-- | models/rgb_part_net.py | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/models/rgb_part_net.py b/models/rgb_part_net.py index 39cbed6..95a3f2e 100644 --- a/models/rgb_part_net.py +++ b/models/rgb_part_net.py @@ -52,10 +52,12 @@ class RGBPartNet(nn.Module): def fc(self, x): return x @ self.fc_mat - def forward(self, x_c1, x_c2, y=None): + def forward(self, x_c1, x_c2=None, y=None): # Step 0: Swap batch_size and time dimensions for next step # n, t, c, h, w - x_c1, x_c2 = x_c1.transpose(0, 1), x_c2.transpose(0, 1) + x_c1 = x_c1.transpose(0, 1) + if self.training: + x_c2 = x_c2.transpose(0, 1) # Step 1: Disentanglement # t, n, c, h, w @@ -83,9 +85,9 @@ class RGBPartNet(nn.Module): loss = torch.sum(torch.stack(losses)) return loss, [loss.item() for loss in losses] else: - return x + return x.unsqueeze(1).view(-1) - def _disentangle(self, x_c1, x_c2, y): + def _disentangle(self, x_c1, x_c2=None, y=None): num_frames = len(x_c1) # Decoded canonical features and Pose images x_c_c1, x_p_c1 = [], [] @@ -95,7 +97,7 @@ class RGBPartNet(nn.Module): xrecon_loss, cano_cons_loss = [], [] for t2 in range(num_frames): t1 = random.randrange(num_frames) - output = self.ae(x_c1[t1], x_c1[t2], x_c2[t2], y) + output = self.ae(x_c1[t2], x_c1[t1], x_c2[t2], y) (x_c1_t2, f_p_t2, losses) = output # Decoded features or image @@ -128,8 +130,7 @@ class RGBPartNet(nn.Module): else: # evaluating for t2 in range(num_frames): - t1 = random.randrange(num_frames) - x_c1_t2 = self.ae(x_c1[t1], x_c1[t2], x_c2[t2]) + x_c1_t2 = self.ae(x_c1[t2]) # Decoded features or image (x_c_c1_t2, x_p_c1_t2) = x_c1_t2 # Canonical Features for HPM |