From c52fdc2748e272a5195303299a9739291be32281 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Sun, 21 Feb 2021 19:00:30 +0800 Subject: Remove FConv blocks --- config.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'config.py') diff --git a/config.py b/config.py index 424bf5b..88ad371 100644 --- a/config.py +++ b/config.py @@ -49,22 +49,14 @@ config: Configuration = { # Auto-encoder feature channels coefficient 'ae_feature_channels': 64, # Appearance, canonical and pose feature dimensions - 'f_a_c_p_dims': (128, 128, 64), + 'f_a_c_p_dims': (192, 192, 96), # Use 1x1 convolution in dimensionality reduction 'hpm_use_1x1conv': False, # HPM pyramid scales, of which sum is number of parts 'hpm_scales': (1, 2, 4), # Global pooling method 'hpm_use_avg_pool': True, - 'hpm_use_max_pool': False, - # FConv feature channels coefficient - 'fpfe_feature_channels': 32, - # FConv blocks kernel sizes - 'fpfe_kernel_sizes': ((5, 3), (3, 3), (3, 3)), - # FConv blocks paddings - 'fpfe_paddings': ((2, 1), (1, 1), (1, 1)), - # FConv blocks halving - 'fpfe_halving': (0, 2, 3), + 'hpm_use_max_pool': True, # Attention squeeze ratio 'tfa_squeeze_ratio': 4, # Number of parts after Part Net -- cgit v1.2.3 From 9001f7e13d8985b220bd218d8de716bc586dbdcf Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Fri, 26 Feb 2021 20:17:03 +0800 Subject: Update default config --- config.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'config.py') diff --git a/config.py b/config.py index 88ad371..03f2f0d 100644 --- a/config.py +++ b/config.py @@ -37,7 +37,7 @@ config: Configuration = { # Batch size (pr, k) # `pr` denotes number of persons # `k` denotes number of sequences per person - 'batch_size': (4, 8), + 'batch_size': (4, 6), # Number of workers of Dataloader 'num_workers': 4, # Faster data transfer from RAM to GPU if enabled @@ -64,7 +64,7 @@ config: Configuration = { # Embedding dimension for each part 'embedding_dims': 256, # Triplet loss margins for HPM and PartNet - 'triplet_margins': (0.2, 0.2), + 'triplet_margins': (1.5, 1.5), }, 'optimizer': { # Global parameters @@ -83,15 +83,15 @@ config: Configuration = { # 'amsgrad': False, # Local parameters (override global ones) - 'auto_encoder': { - 'weight_decay': 0.001 - }, + # 'auto_encoder': { + # 'weight_decay': 0.001 + # }, }, 'scheduler': { # Period of learning rate decay 'step_size': 500, # Multiplicative factor of decay - 'gamma': 0.9, + 'gamma': 1, } }, # Model metadata -- cgit v1.2.3 From 46391257ff50848efa1aa251ab3f15dc8b7a2d2c Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Sat, 27 Feb 2021 22:14:21 +0800 Subject: Implement Batch Hard triplet loss and soft margin --- config.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'config.py') diff --git a/config.py b/config.py index 03f2f0d..f76cea5 100644 --- a/config.py +++ b/config.py @@ -63,8 +63,10 @@ config: Configuration = { 'tfa_num_parts': 16, # Embedding dimension for each part 'embedding_dims': 256, - # Triplet loss margins for HPM and PartNet - 'triplet_margins': (1.5, 1.5), + # Batch Hard or Batch All + 'triplet_is_hard': True, + # Triplet loss margins for HPM and PartNet, None for soft margin + 'triplet_margins': None, }, 'optimizer': { # Global parameters -- cgit v1.2.3 From c96a6c88fa63d62ec62807abf957c9a8df307b43 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Sun, 28 Feb 2021 22:13:43 +0800 Subject: Modify default parameters 1. Change ReLU to Leaky ReLU in decoder 2. Add 8-scale-pyramid in HPM --- config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'config.py') diff --git a/config.py b/config.py index f76cea5..9072982 100644 --- a/config.py +++ b/config.py @@ -53,7 +53,7 @@ config: Configuration = { # Use 1x1 convolution in dimensionality reduction 'hpm_use_1x1conv': False, # HPM pyramid scales, of which sum is number of parts - 'hpm_scales': (1, 2, 4), + 'hpm_scales': (1, 2, 4, 8), # Global pooling method 'hpm_use_avg_pool': True, 'hpm_use_max_pool': True, -- cgit v1.2.3 From fed5e6a9b35fda8306147e9ce772dfbf3142a061 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Sun, 28 Feb 2021 23:11:05 +0800 Subject: Implement sum of loss default in [1] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [1]A. Hermans, L. Beyer, and B. Leibe, “In defense of the triplet loss for person re-identification,” arXiv preprint arXiv:1703.07737, 2017. --- config.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'config.py') diff --git a/config.py b/config.py index 9072982..4c108e2 100644 --- a/config.py +++ b/config.py @@ -65,6 +65,8 @@ config: Configuration = { 'embedding_dims': 256, # Batch Hard or Batch All 'triplet_is_hard': True, + # Use non-zero mean or sum + 'triplet_is_mean': True, # Triplet loss margins for HPM and PartNet, None for soft margin 'triplet_margins': None, }, -- cgit v1.2.3 From 6002b2d2017912f90e8917e6e8b71b78ce58e7c2 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Mon, 1 Mar 2021 18:20:38 +0800 Subject: New scheduler and new config --- config.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'config.py') diff --git a/config.py b/config.py index 4c108e2..e70c2bd 100644 --- a/config.py +++ b/config.py @@ -72,8 +72,6 @@ config: Configuration = { }, 'optimizer': { # Global parameters - # Iteration start to optimize non-disentangling parts - # 'start_iter': 0, # Initial learning rate of Adam Optimizer 'lr': 1e-4, # Coefficients used for computing running averages of @@ -87,15 +85,15 @@ config: Configuration = { # 'amsgrad': False, # Local parameters (override global ones) - # 'auto_encoder': { - # 'weight_decay': 0.001 - # }, + 'auto_encoder': { + 'weight_decay': 0.001 + }, }, 'scheduler': { - # Period of learning rate decay - 'step_size': 500, - # Multiplicative factor of decay - 'gamma': 1, + # Step start to decay + 'start_step': 15_000, + # Multiplicative factor of decay in the end + 'final_gamma': 0.001, } }, # Model metadata @@ -109,6 +107,6 @@ config: Configuration = { # Restoration iteration (multiple models, e.g. nm, bg and cl) 'restore_iters': (0, 0, 0), # Total iteration for training (multiple models) - 'total_iters': (80_000, 80_000, 80_000), + 'total_iters': (25_000, 25_000, 25_000), }, } -- cgit v1.2.3 From 0527c5b657c7b4fdfd7d57bf9bc5334eac480731 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Wed, 3 Mar 2021 10:04:38 +0800 Subject: Add L2 penalty to global --- config.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'config.py') diff --git a/config.py b/config.py index e70c2bd..d6de788 100644 --- a/config.py +++ b/config.py @@ -80,14 +80,14 @@ config: Configuration = { # Term added to the denominator # 'eps': 1e-8, # Weight decay (L2 penalty) - # 'weight_decay': 0, + 'weight_decay': 0.001, # Use AMSGrad or not # 'amsgrad': False, # Local parameters (override global ones) - 'auto_encoder': { - 'weight_decay': 0.001 - }, + # 'auto_encoder': { + # 'weight_decay': 0.001 + # }, }, 'scheduler': { # Step start to decay -- cgit v1.2.3 From c74df416b00f837ba051f3947be92f76e7afbd88 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Fri, 12 Mar 2021 13:56:17 +0800 Subject: Code refactoring 1. Separate FCs and triplet losses for HPM and PartNet 2. Remove FC-equivalent 1x1 conv layers in HPM 3. Support adjustable learning rate schedulers --- config.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'config.py') diff --git a/config.py b/config.py index d6de788..8abeba3 100644 --- a/config.py +++ b/config.py @@ -50,19 +50,17 @@ config: Configuration = { 'ae_feature_channels': 64, # Appearance, canonical and pose feature dimensions 'f_a_c_p_dims': (192, 192, 96), - # Use 1x1 convolution in dimensionality reduction - 'hpm_use_1x1conv': False, # HPM pyramid scales, of which sum is number of parts 'hpm_scales': (1, 2, 4, 8), # Global pooling method 'hpm_use_avg_pool': True, 'hpm_use_max_pool': True, - # Attention squeeze ratio - 'tfa_squeeze_ratio': 4, # Number of parts after Part Net 'tfa_num_parts': 16, - # Embedding dimension for each part - 'embedding_dims': 256, + # Attention squeeze ratio + 'tfa_squeeze_ratio': 4, + # Embedding dimensions for each part + 'embedding_dims': (256, 256), # Batch Hard or Batch All 'triplet_is_hard': True, # Use non-zero mean or sum @@ -91,9 +89,14 @@ config: Configuration = { }, 'scheduler': { # Step start to decay - 'start_step': 15_000, + 'start_step': 500, # Multiplicative factor of decay in the end - 'final_gamma': 0.001, + 'final_gamma': 0.01, + + # Local parameters (override global ones) + 'hpm': { + 'final_gamma': 0.001 + } } }, # Model metadata @@ -107,6 +110,6 @@ config: Configuration = { # Restoration iteration (multiple models, e.g. nm, bg and cl) 'restore_iters': (0, 0, 0), # Total iteration for training (multiple models) - 'total_iters': (25_000, 25_000, 25_000), + 'total_iters': (30_000, 40_000, 60_000), }, } -- cgit v1.2.3 From da922be042d96338a3f207386e410b6746d046f5 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Sun, 14 Mar 2021 21:07:28 +0800 Subject: Bug fix when transforming and new config --- config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'config.py') diff --git a/config.py b/config.py index 8abeba3..c928067 100644 --- a/config.py +++ b/config.py @@ -94,9 +94,9 @@ config: Configuration = { 'final_gamma': 0.01, # Local parameters (override global ones) - 'hpm': { - 'final_gamma': 0.001 - } + # 'hpm': { + # 'final_gamma': 0.001 + # } } }, # Model metadata -- cgit v1.2.3 From b6e5972b64cc61fc967cf3d098fc629d781adce4 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Mon, 22 Mar 2021 19:32:16 +0800 Subject: Add embedding visualization and validate on testing set --- config.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'config.py') diff --git a/config.py b/config.py index c928067..3d98263 100644 --- a/config.py +++ b/config.py @@ -19,6 +19,8 @@ config: Configuration = { 'root_dir': 'data/CASIA-B-MRCNN-V2/SEG', # The number of subjects for training 'train_size': 74, + # The number of subjects for validating (Part of testing set) + 'val_size': 10, # Number of sampled frames per sequence (Training only) 'num_sampled_frames': 30, # Truncate clips longer than `truncate_threshold` -- cgit v1.2.3 From 5a063855dbecb8f1a86ad25d9e61a9c8b63312b3 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Thu, 25 Mar 2021 12:23:23 +0800 Subject: Bug fixes and refactoring 1. Correct trained model signature 2. Move `val_size` to system config --- config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'config.py') diff --git a/config.py b/config.py index 3d98263..66eab98 100644 --- a/config.py +++ b/config.py @@ -9,7 +9,9 @@ config: Configuration = { # Directory used in training or testing for temporary storage 'save_dir': 'runs', # Recorde disentangled image or not - 'image_log_on': False + 'image_log_on': False, + # The number of subjects for validating (Part of testing set) + 'val_size': 10, }, # Dataset settings 'dataset': { @@ -19,8 +21,6 @@ config: Configuration = { 'root_dir': 'data/CASIA-B-MRCNN-V2/SEG', # The number of subjects for training 'train_size': 74, - # The number of subjects for validating (Part of testing set) - 'val_size': 10, # Number of sampled frames per sequence (Training only) 'num_sampled_frames': 30, # Truncate clips longer than `truncate_threshold` -- cgit v1.2.3