diff options
author | Jordan Gong <jordan.gong@protonmail.com> | 2021-03-05 20:10:06 +0800 |
---|---|---|
committer | Jordan Gong <jordan.gong@protonmail.com> | 2021-03-05 20:10:06 +0800 |
commit | e1cf9890578fccba7542dff8a96391bd5aefdf7d (patch) | |
tree | c08781e075b791b74fde38939ab2a51990b70b09 /models/auto_encoder.py | |
parent | 6db53397468a3fd6bf6fbb323ac514a98cc4f3cb (diff) | |
parent | 228b8cbdb067e159942adbb7892373b53593e87f (diff) |
Merge branch 'data_parallel' into data_parallel_py3.8
Diffstat (limited to 'models/auto_encoder.py')
-rw-r--r-- | models/auto_encoder.py | 15 |
1 files changed, 3 insertions, 12 deletions
diff --git a/models/auto_encoder.py b/models/auto_encoder.py index dbd1da0..023b462 100644 --- a/models/auto_encoder.py +++ b/models/auto_encoder.py @@ -153,27 +153,18 @@ class AutoEncoder(nn.Module): x_c1_t2_pred_ = self.decoder(f_a_c1_t1_, f_c_c1_t1_, f_p_c1_t2_) x_c1_t2_pred = x_c1_t2_pred_.view(n, t, c, h, w) - xrecon_loss = torch.stack([ - F.mse_loss(x_c1_t2[:, i, :, :, :], x_c1_t2_pred[:, i, :, :, :]) - for i in range(t) - ]).sum() - f_c_c1_t1 = f_c_c1_t1_.view(n, t, -1) f_c_c1_t2 = f_c_c1_t2_.view(n, t, -1) f_c_c2_t2 = f_c_c2_t2_.view(n, t, -1) - cano_cons_loss = torch.stack([ - F.mse_loss(f_c_c1_t1[:, i, :], f_c_c1_t2[:, i, :]) - + F.mse_loss(f_c_c1_t2[:, i, :], f_c_c2_t2[:, i, :]) - for i in range(t) - ]).mean() f_p_c1_t2 = f_p_c1_t2_.view(n, t, -1) f_p_c2_t2 = f_p_c2_t2_.view(n, t, -1) - pose_sim_loss = F.mse_loss(f_p_c1_t2.mean(1), f_p_c2_t2.mean(1)) return ( (f_a_c1_t2_, f_c_c1_t2_, f_p_c1_t2_), - torch.stack((xrecon_loss, cano_cons_loss, pose_sim_loss * 10)) + (x_c1_t2_pred, + (f_c_c1_t1, f_c_c1_t2, f_c_c2_t2), + (f_p_c1_t2, f_p_c2_t2)) ) else: # evaluating return f_c_c1_t2_, f_p_c1_t2_ |