summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--config.py12
-rw-r--r--models/model.py3
-rw-r--r--models/rgb_part_net.py5
3 files changed, 11 insertions, 9 deletions
diff --git a/config.py b/config.py
index 88ad371..03f2f0d 100644
--- a/config.py
+++ b/config.py
@@ -37,7 +37,7 @@ config: Configuration = {
# Batch size (pr, k)
# `pr` denotes number of persons
# `k` denotes number of sequences per person
- 'batch_size': (4, 8),
+ 'batch_size': (4, 6),
# Number of workers of Dataloader
'num_workers': 4,
# Faster data transfer from RAM to GPU if enabled
@@ -64,7 +64,7 @@ config: Configuration = {
# Embedding dimension for each part
'embedding_dims': 256,
# Triplet loss margins for HPM and PartNet
- 'triplet_margins': (0.2, 0.2),
+ 'triplet_margins': (1.5, 1.5),
},
'optimizer': {
# Global parameters
@@ -83,15 +83,15 @@ config: Configuration = {
# 'amsgrad': False,
# Local parameters (override global ones)
- 'auto_encoder': {
- 'weight_decay': 0.001
- },
+ # 'auto_encoder': {
+ # 'weight_decay': 0.001
+ # },
},
'scheduler': {
# Period of learning rate decay
'step_size': 500,
# Multiplicative factor of decay
- 'gamma': 0.9,
+ 'gamma': 1,
}
},
# Model metadata
diff --git a/models/model.py b/models/model.py
index a42a5c6..11ec2f6 100644
--- a/models/model.py
+++ b/models/model.py
@@ -314,7 +314,8 @@ class Model:
)
# Init models
- model_hp = self.hp.get('model', {})
+ model_hp: dict = self.hp.get('model', {}).copy()
+ model_hp.pop('triplet_margins', None)
self.rgb_pn = RGBPartNet(self.in_channels, self.in_size, **model_hp)
# Try to accelerate computation using CUDA or others
self.rgb_pn = self.rgb_pn.to(self.device)
diff --git a/models/rgb_part_net.py b/models/rgb_part_net.py
index fc1406c..4d7ba7f 100644
--- a/models/rgb_part_net.py
+++ b/models/rgb_part_net.py
@@ -76,8 +76,8 @@ class RGBPartNet(nn.Module):
def _disentangle(self, x_c1_t2, x_c2_t2=None):
n, t, c, h, w = x_c1_t2.size()
device = x_c1_t2.device
- x_c1_t1 = x_c1_t2[:, torch.randperm(t), :, :, :]
if self.training:
+ x_c1_t1 = x_c1_t2[:, torch.randperm(t), :, :, :]
((f_a_, f_c_, f_p_), losses) = self.ae(x_c1_t2, x_c1_t1, x_c2_t2)
# Decode features
x_c = self._decode_cano_feature(f_c_, n, t, device)
@@ -100,7 +100,8 @@ class RGBPartNet(nn.Module):
else: # evaluating
f_c_, f_p_ = self.ae(x_c1_t2)
x_c = self._decode_cano_feature(f_c_, n, t, device)
- x_p = self._decode_pose_feature(f_p_, n, t, c, h, w, device)
+ x_p_ = self._decode_pose_feature(f_p_, n, t, c, h, w, device)
+ x_p = x_p_.view(n, t, self.pn_in_channels, self.h // 4, self.w // 4)
return (x_c, x_p), None, None
def _decode_appr_feature(self, f_a_, n, t, device):