diff options
author | Jordan Gong <jordan.gong@protonmail.com> | 2021-03-12 13:56:17 +0800 |
---|---|---|
committer | Jordan Gong <jordan.gong@protonmail.com> | 2021-03-12 13:56:17 +0800 |
commit | c74df416b00f837ba051f3947be92f76e7afbd88 (patch) | |
tree | 02983df94008bbb427c2066c5f619e0ffdefe1c5 /models/auto_encoder.py | |
parent | 1b8d1614168ce6590c5e029c7f1007ac9b17048c (diff) |
Code refactoring
1. Separate FCs and triplet losses for HPM and PartNet
2. Remove FC-equivalent 1x1 conv layers in HPM
3. Support adjustable learning rate schedulers
Diffstat (limited to 'models/auto_encoder.py')
-rw-r--r-- | models/auto_encoder.py | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/models/auto_encoder.py b/models/auto_encoder.py index e6a3e60..4fece69 100644 --- a/models/auto_encoder.py +++ b/models/auto_encoder.py @@ -171,7 +171,7 @@ class AutoEncoder(nn.Module): return ( (f_a_c1_t2_, f_c_c1_t2_, f_p_c1_t2_), - torch.stack((xrecon_loss, cano_cons_loss, pose_sim_loss * 10)) + (xrecon_loss, cano_cons_loss, pose_sim_loss * 10) ) else: # evaluating return f_c_c1_t2_, f_p_c1_t2_ |