summaryrefslogtreecommitdiff
path: root/models/rgb_part_net.py
diff options
context:
space:
mode:
Diffstat (limited to 'models/rgb_part_net.py')
-rw-r--r--models/rgb_part_net.py128
1 files changed, 77 insertions, 51 deletions
diff --git a/models/rgb_part_net.py b/models/rgb_part_net.py
index 0ff8251..ba5a00e 100644
--- a/models/rgb_part_net.py
+++ b/models/rgb_part_net.py
@@ -2,6 +2,7 @@ import random
import torch
import torch.nn as nn
+import torch.nn.functional as F
from models import AutoEncoder, HorizontalPyramidMatching, PartNet
@@ -36,53 +37,16 @@ class RGBPartNet(nn.Module):
hpm_use_avg_pool, hpm_use_max_pool
)
- self.mse_loss = nn.MSELoss()
-
# TODO Weight inti here
- def pose_sim_loss(self, f_p_c1: torch.Tensor,
- f_p_c2: torch.Tensor) -> torch.Tensor:
- f_p_c1_mean = f_p_c1.mean(dim=0)
- f_p_c2_mean = f_p_c2.mean(dim=0)
- return self.mse_loss(f_p_c1_mean, f_p_c2_mean)
-
- def forward(self, x_c1, x_c2, y):
+ def forward(self, x_c1, x_c2, y=None):
# Step 0: Swap batch_size and time dimensions for next step
# n, t, c, h, w
x_c1, x_c2 = x_c1.transpose(0, 1), x_c2.transpose(0, 1)
# Step 1: Disentanglement
# t, n, c, h, w
- num_frames = len(x_c1)
- # Decoded canonical features and Pose images
- x_c_c1, x_p_c1 = [], []
- # Features required to calculate losses
- f_p_c1, f_p_c2 = [], []
- xrecon_loss, cano_cons_loss = torch.zeros(1), torch.zeros(1)
- for t2 in range(num_frames):
- t1 = random.randrange(num_frames)
- output = self.ae(x_c1[t1], x_c1[t2], x_c2[t2], y)
- (x_c1_t2, f_p_t2, losses) = output
-
- # Decoded features or image
- (x_c_c1_t2, x_p_c1_t2) = x_c1_t2
- # Canonical Features for HPM
- x_c_c1.append(x_c_c1_t2)
- # Pose image for Part Net
- x_p_c1.append(x_p_c1_t2)
-
- # Losses per time step
- # Used in pose similarity loss
- (f_p_c1_t2, f_p_c2_t2) = f_p_t2
- f_p_c1.append(f_p_c1_t2)
- f_p_c2.append(f_p_c2_t2)
- # Cross reconstruction loss and canonical loss
- (xrecon_loss_t2, cano_cons_loss_t2) = losses
- xrecon_loss += xrecon_loss_t2
- cano_cons_loss += cano_cons_loss_t2
-
- x_c_c1 = torch.stack(x_c_c1)
- x_p_c1 = torch.stack(x_p_c1)
+ ((x_c_c1, x_p_c1), losses) = self._disentangle(x_c1, x_c2, y)
# Step 2.a: HPM & Static Gait Feature Aggregation
# t, n, c, h, w
@@ -97,15 +61,77 @@ class RGBPartNet(nn.Module):
# p, n, c
# Step 3: Cat feature map together and calculate losses
- x = torch.cat([x_c, x_p])
- # Losses
- f_p_c1 = torch.stack(f_p_c1)
- f_p_c2 = torch.stack(f_p_c2)
- pose_sim_loss = self.pose_sim_loss(f_p_c1, f_p_c2)
- cano_cons_loss /= num_frames
- # TODO Implement Batch All triplet loss function
- batch_all_triplet_loss = 0
- loss = (xrecon_loss + pose_sim_loss + cano_cons_loss
- + batch_all_triplet_loss)
-
- return x, loss
+ x = torch.cat((x_c, x_p))
+
+ if self.training:
+ # TODO Implement Batch All triplet loss function
+ batch_all_triplet_loss = torch.tensor(0.)
+ print(*losses, batch_all_triplet_loss)
+ loss = torch.sum(torch.stack((*losses, batch_all_triplet_loss)))
+ return loss
+ else:
+ return x
+
+ def _disentangle(self, x_c1, x_c2, y):
+ num_frames = len(x_c1)
+ # Decoded canonical features and Pose images
+ x_c_c1, x_p_c1 = [], []
+ if self.training:
+ # Features required to calculate losses
+ f_p_c1, f_p_c2 = [], []
+ xrecon_loss, cano_cons_loss = [], []
+ for t2 in range(num_frames):
+ t1 = random.randrange(num_frames)
+ output = self.ae(x_c1[t1], x_c1[t2], x_c2[t2], y)
+ (x_c1_t2, f_p_t2, losses) = output
+
+ # Decoded features or image
+ (x_c_c1_t2, x_p_c1_t2) = x_c1_t2
+ # Canonical Features for HPM
+ x_c_c1.append(x_c_c1_t2)
+ # Pose image for Part Net
+ x_p_c1.append(x_p_c1_t2)
+
+ # Losses per time step
+ # Used in pose similarity loss
+ (f_p_c1_t2, f_p_c2_t2) = f_p_t2
+ f_p_c1.append(f_p_c1_t2)
+ f_p_c2.append(f_p_c2_t2)
+ # Cross reconstruction loss and canonical loss
+ (xrecon_loss_t2, cano_cons_loss_t2) = losses
+ xrecon_loss.append(xrecon_loss_t2)
+ cano_cons_loss.append(cano_cons_loss_t2)
+
+ x_c_c1 = torch.stack(x_c_c1)
+ x_p_c1 = torch.stack(x_p_c1)
+
+ # Losses
+ xrecon_loss = torch.sum(torch.stack(xrecon_loss))
+ pose_sim_loss = self._pose_sim_loss(f_p_c1, f_p_c2)
+ cano_cons_loss = torch.mean(torch.stack(cano_cons_loss))
+
+ return ((x_c_c1, x_p_c1),
+ (xrecon_loss, pose_sim_loss, cano_cons_loss))
+
+ else: # evaluating
+ for t2 in range(num_frames):
+ t1 = random.randrange(num_frames)
+ x_c1_t2 = self.ae(x_c1[t1], x_c1[t2], x_c2[t2])
+ # Decoded features or image
+ (x_c_c1_t2, x_p_c1_t2) = x_c1_t2
+ # Canonical Features for HPM
+ x_c_c1.append(x_c_c1_t2)
+ # Pose image for Part Net
+ x_p_c1.append(x_p_c1_t2)
+
+ x_c_c1 = torch.stack(x_c_c1)
+ x_p_c1 = torch.stack(x_p_c1)
+
+ return (x_c_c1, x_p_c1), None
+
+ @staticmethod
+ def _pose_sim_loss(f_p_c1: list[torch.Tensor],
+ f_p_c2: list[torch.Tensor]) -> torch.Tensor:
+ f_p_c1_mean = torch.stack(f_p_c1).mean(dim=0)
+ f_p_c2_mean = torch.stack(f_p_c2).mean(dim=0)
+ return F.mse_loss(f_p_c1_mean, f_p_c2_mean)