summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--models/auto_encoder.py15
-rw-r--r--models/model.py42
-rw-r--r--models/rgb_part_net.py8
-rw-r--r--requirements.txt2
4 files changed, 36 insertions, 31 deletions
diff --git a/models/auto_encoder.py b/models/auto_encoder.py
index dbd1da0..023b462 100644
--- a/models/auto_encoder.py
+++ b/models/auto_encoder.py
@@ -153,27 +153,18 @@ class AutoEncoder(nn.Module):
x_c1_t2_pred_ = self.decoder(f_a_c1_t1_, f_c_c1_t1_, f_p_c1_t2_)
x_c1_t2_pred = x_c1_t2_pred_.view(n, t, c, h, w)
- xrecon_loss = torch.stack([
- F.mse_loss(x_c1_t2[:, i, :, :, :], x_c1_t2_pred[:, i, :, :, :])
- for i in range(t)
- ]).sum()
-
f_c_c1_t1 = f_c_c1_t1_.view(n, t, -1)
f_c_c1_t2 = f_c_c1_t2_.view(n, t, -1)
f_c_c2_t2 = f_c_c2_t2_.view(n, t, -1)
- cano_cons_loss = torch.stack([
- F.mse_loss(f_c_c1_t1[:, i, :], f_c_c1_t2[:, i, :])
- + F.mse_loss(f_c_c1_t2[:, i, :], f_c_c2_t2[:, i, :])
- for i in range(t)
- ]).mean()
f_p_c1_t2 = f_p_c1_t2_.view(n, t, -1)
f_p_c2_t2 = f_p_c2_t2_.view(n, t, -1)
- pose_sim_loss = F.mse_loss(f_p_c1_t2.mean(1), f_p_c2_t2.mean(1))
return (
(f_a_c1_t2_, f_c_c1_t2_, f_p_c1_t2_),
- torch.stack((xrecon_loss, cano_cons_loss, pose_sim_loss * 10))
+ (x_c1_t2_pred,
+ (f_c_c1_t1, f_c_c1_t2, f_c_c2_t2),
+ (f_p_c1_t2, f_p_c2_t2))
)
else: # evaluating
return f_c_c1_t2_, f_p_c1_t2_
diff --git a/models/model.py b/models/model.py
index 22996fe..2a74c8c 100644
--- a/models/model.py
+++ b/models/model.py
@@ -172,6 +172,7 @@ class Model:
triplet_is_hard, triplet_is_mean, None
)
+ num_sampled_frames = dataset_config.get('num_sampled_frames', 30)
num_pairs = (self.pr*self.k-1) * (self.pr*self.k) // 2
num_pos_pairs = (self.k*(self.k-1)//2) * self.pr
@@ -230,18 +231,31 @@ class Model:
# forward + backward + optimize
x_c1 = batch_c1['clip'].to(self.device)
x_c2 = batch_c2['clip'].to(self.device)
- embedding, ae_losses, images = self.rgb_pn(x_c1, x_c2)
+ embedding, images, feature_for_loss = self.rgb_pn(x_c1, x_c2)
+ x_c1_pred = feature_for_loss[0]
+ xrecon_loss = torch.stack([
+ F.mse_loss(x_c1_pred[:, i, :, :, :], x_c1[:, i, :, :, :])
+ for i in range(num_sampled_frames)
+ ]).sum()
+ f_c_c1_t1, f_c_c1_t2, f_c_c2_t2 = feature_for_loss[1]
+ cano_cons_loss = torch.stack([
+ F.mse_loss(f_c_c1_t1[:, i, :], f_c_c1_t2[:, i, :])
+ + F.mse_loss(f_c_c1_t2[:, i, :], f_c_c2_t2[:, i, :])
+ for i in range(num_sampled_frames)
+ ]).mean()
+ f_p_c1_t2, f_p_c2_t2 = feature_for_loss[2]
+ pose_sim_loss = F.mse_loss(
+ f_p_c1_t2.mean(1), f_p_c2_t2.mean(1)
+ ) * 10
y = batch_c1['label'].to(self.device)
# Duplicate labels for each part
y = y.repeat(self.rgb_pn.module.num_total_parts, 1)
embedding = embedding.transpose(0, 1)
- trip_loss, dist, num_non_zero = self.triplet_loss(embedding, y)
- losses = torch.cat((
- ae_losses.view(-1, 3).mean(0),
- torch.stack((
- trip_loss[:self.rgb_pn.module.hpm_num_parts].mean(),
- trip_loss[self.rgb_pn.module.hpm_num_parts:].mean()
- ))
+ triplet_loss, dist, num_non_zero = self.triplet_loss(embedding, y)
+ hpm_loss = triplet_loss[:self.rgb_pn.module.hpm_num_parts].mean()
+ pn_loss = triplet_loss[self.rgb_pn.module.hpm_num_parts:].mean()
+ losses = torch.stack((
+ xrecon_loss, cano_cons_loss, pose_sim_loss, hpm_loss, pn_loss
))
loss = losses.sum()
loss.backward()
@@ -251,13 +265,13 @@ class Model:
running_loss += losses.detach()
# Write losses to TensorBoard
self.writer.add_scalar('Loss/all', loss, self.curr_iter)
- self.writer.add_scalars('Loss/disentanglement', dict(zip((
- 'Cross reconstruction loss', 'Canonical consistency loss',
- 'Pose similarity loss'
- ), ae_losses)), self.curr_iter)
+ self.writer.add_scalars('Loss/disentanglement', {
+ 'Cross reconstruction loss': xrecon_loss,
+ 'Canonical consistency loss': cano_cons_loss,
+ 'Pose similarity loss': pose_sim_loss
+ }, self.curr_iter)
self.writer.add_scalars('Loss/triplet loss', {
- 'HPM': losses[3],
- 'PartNet': losses[4]
+ 'HPM': hpm_loss, 'PartNet': pn_loss
}, self.curr_iter)
# None-zero losses in batch
if num_non_zero is not None:
diff --git a/models/rgb_part_net.py b/models/rgb_part_net.py
index 1cda91c..2853571 100644
--- a/models/rgb_part_net.py
+++ b/models/rgb_part_net.py
@@ -52,7 +52,7 @@ class RGBPartNet(nn.Module):
def forward(self, x_c1, x_c2=None):
# Step 1: Disentanglement
# n, t, c, h, w
- ((x_c, x_p), ae_losses, images) = self._disentangle(x_c1, x_c2)
+ ((x_c, x_p), images, f_loss) = self._disentangle(x_c1, x_c2)
# Step 2.a: Static Gait Feature Aggregation & HPM
# n, c, h, w
@@ -69,7 +69,7 @@ class RGBPartNet(nn.Module):
x = self.fc(x)
if self.training:
- return x.transpose(0, 1), ae_losses, images
+ return x.transpose(0, 1), images, f_loss
else:
return x.unsqueeze(1).view(-1)
@@ -78,7 +78,7 @@ class RGBPartNet(nn.Module):
device = x_c1_t2.device
if self.training:
x_c1_t1 = x_c1_t2[:, torch.randperm(t), :, :, :]
- ((f_a_, f_c_, f_p_), losses) = self.ae(x_c1_t2, x_c1_t1, x_c2_t2)
+ (f_a_, f_c_, f_p_), f_loss = self.ae(x_c1_t2, x_c1_t1, x_c2_t2)
# Decode features
x_c = self._decode_cano_feature(f_c_, n, t, device)
x_p_ = self._decode_pose_feature(f_p_, n, t, device)
@@ -95,7 +95,7 @@ class RGBPartNet(nn.Module):
i_p_ = torch.sigmoid(self.ae.decoder.trans_conv4(i_p_))
i_p = i_p_.view(n, t, c, h, w)
- return (x_c, x_p), losses, (i_a, i_c, i_p)
+ return (x_c, x_p), (i_a, i_c, i_p), f_loss
else: # evaluating
f_c_, f_p_ = self.ae(x_c1_t2)
diff --git a/requirements.txt b/requirements.txt
index 926a587..de81280 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-torch~=1.7.1
+torch~=1.8.0
torchvision~=0.8.0a0+ecf4e9c
numpy~=1.19.4
tqdm~=4.58.0