summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--config.py71
-rw-r--r--models/model.py16
-rw-r--r--test/model.py29
-rw-r--r--utils/configuration.py0
4 files changed, 74 insertions, 42 deletions
diff --git a/config.py b/config.py
index cbe5e07..cd36cc5 100644
--- a/config.py
+++ b/config.py
@@ -39,35 +39,48 @@ config = {
},
# Hyperparameter tuning
'hyperparameter': {
- # Auto-encoder feature channels coefficient
- 'ae_feature_channels': 64,
- # Appearance, canonical and pose feature dimensions
- 'f_a_c_p_dims': (128, 128, 64),
- # HPM pyramid scales, of which sum is number of parts
- 'hpm_scales': (1, 2, 4),
- # Global pooling method
- 'hpm_use_avg_pool': True,
- 'hpm_use_max_pool': True,
- # FConv feature channels coefficient
- 'fpfe_feature_channels': 32,
- # FConv blocks kernel sizes
- 'fpfe_kernel_sizes': ((5, 3), (3, 3), (3, 3)),
- # FConv blocks paddings
- 'fpfe_paddings': ((2, 1), (1, 1), (1, 1)),
- # FConv blocks halving
- 'fpfe_halving': (0, 2, 3),
- # Attention squeeze ratio
- 'tfa_squeeze_ratio': 4,
- # Number of parts after Part Net
- 'tfa_num_parts': 16,
- # Embedding dimension for each part
- 'embedding_dims': 256,
- # Triplet loss margin
- 'triplet_margin': 0.2,
- # Initial learning rate of Adam Optimizer
- 'lr': 1e-4,
- # Betas of Adam Optimizer
- 'betas': (0.9, 0.999),
+ 'model': {
+ # Auto-encoder feature channels coefficient
+ 'ae_feature_channels': 64,
+ # Appearance, canonical and pose feature dimensions
+ 'f_a_c_p_dims': (128, 128, 64),
+ # HPM pyramid scales, of which sum is number of parts
+ 'hpm_scales': (1, 2, 4),
+ # Global pooling method
+ 'hpm_use_avg_pool': True,
+ 'hpm_use_max_pool': True,
+ # FConv feature channels coefficient
+ 'fpfe_feature_channels': 32,
+ # FConv blocks kernel sizes
+ 'fpfe_kernel_sizes': ((5, 3), (3, 3), (3, 3)),
+ # FConv blocks paddings
+ 'fpfe_paddings': ((2, 1), (1, 1), (1, 1)),
+ # FConv blocks halving
+ 'fpfe_halving': (0, 2, 3),
+ # Attention squeeze ratio
+ 'tfa_squeeze_ratio': 4,
+ # Number of parts after Part Net
+ 'tfa_num_parts': 16,
+ # Embedding dimension for each part
+ 'embedding_dims': 256,
+ # Triplet loss margin
+ 'triplet_margin': 0.2,
+ },
+ 'optimizer': {
+ # Initial learning rate of Adam Optimizer
+ 'lr': 1e-4,
+ # Coefficients used for computing running averages of
+ # gradient and its square
+ 'betas': (0.9, 0.999),
+ # Weight decay (L2 penalty)
+ 'weight_decay': 0.001,
+ },
+ 'scheduler': {
+ # Period of learning rate decay
+ 'step_size': 500,
+ # Multiplicative factor of decay
+ 'gamma': 0.9,
+ }
},
# Model metadata
'model': {
diff --git a/models/model.py b/models/model.py
index 7e4edaf..617966f 100644
--- a/models/model.py
+++ b/models/model.py
@@ -108,11 +108,12 @@ class Model:
dataset = self._parse_dataset_config(dataset_config)
dataloader = self._parse_dataloader_config(dataset, dataloader_config)
# Prepare for model, optimizer and scheduler
- hp = self.hp.copy()
- lr, betas = hp.pop('lr', 1e-4), hp.pop('betas', (0.9, 0.999))
- self.rgb_pn = RGBPartNet(self.train_size, self.in_channels, **hp)
- self.optimizer = optim.Adam(self.rgb_pn.parameters(), lr, betas)
- self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, 500, 0.9)
+ model_hp = self.hp.get('model', {})
+ optim_hp = self.hp.get('optimizer', {})
+ sched_hp = self.hp.get('scheduler', {})
+ self.rgb_pn = RGBPartNet(self.train_size, self.in_channels, **model_hp)
+ self.optimizer = optim.Adam(self.rgb_pn.parameters(), **optim_hp)
+ self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, **sched_hp)
self.writer = SummaryWriter(self._log_name)
# Try to accelerate computation using CUDA or others
self._accelerate()
@@ -197,9 +198,8 @@ class Model:
iter_, dataset_config, dataset_selectors
)
# Init models
- hp = self.hp.copy()
- hp.pop('lr'), hp.pop('betas')
- self.rgb_pn = RGBPartNet(ae_in_channels=self.in_channels, **hp)
+ model_hp = self.hp.get('model', {})
+ self.rgb_pn = RGBPartNet(ae_in_channels=self.in_channels, **model_hp)
# Try to accelerate computation using CUDA or others
self._accelerate()
diff --git a/test/model.py b/test/model.py
index f679908..5d60475 100644
--- a/test/model.py
+++ b/test/model.py
@@ -2,20 +2,39 @@ import os
from config import config
from models.model import Model
+from utils.dataset import ClipConditions
conf = config
+os.chdir('..')
def test_default_signature():
- os.chdir('..')
model = Model(conf['system'], conf['model'], conf['hyperparameter'])
casiab = model._parse_dataset_config(conf['dataset'])
model._parse_dataloader_config(casiab, conf['dataloader'])
assert model._log_name == os.path.join(
'runs', 'logs', 'RGB-GaitPart_80000_64_128_128_64_1_2_4_True_True_32_5_'
'3_3_3_3_3_2_1_1_1_1_1_0_2_3_4_16_256_0.2_0.0001_0.9_'
- '0.999_CASIA-B_74_30_15_3_64_32_8_16')
+ '0.999_0.001_500_0.9_CASIA-B_74_30_15_3_64_32_8_16')
assert model._signature == ('RGB-GaitPart_80000_0_64_128_128_64_1_2_4_True_'
- 'True_32_5_3_3_3_3_3_2_1_1_1_1_1_0_2_3_4_16_256_'
- '0.2_0.0001_0.9_0.999_CASIA-B_74_30_15_3_64_32_'
- '8_16')
+ 'True_32_5_3_3_3_3_3_2_1_1_1_1_1_0_2_3_4_16_'
+ '256_0.2_0.0001_0.9_0.999_0.001_500_0.9_CASIA-B'
+ '_74_30_15_3_64_32_8_16')
+
+
+def test_default_signature_with_selector():
+ model = Model(conf['system'], conf['model'], conf['hyperparameter'])
+ casiab = model._parse_dataset_config(dict(
+ **conf['dataset'],
+ **{'selector': {'conditions': ClipConditions({r'nm-0\d', r'bg-0\d'})}}
+ ))
+ model._parse_dataloader_config(casiab, conf['dataloader'])
+ assert model._log_name == os.path.join(
+ 'runs', 'logs', 'RGB-GaitPart_80000_64_128_128_64_1_2_4_True_True_32_5_'
+ '3_3_3_3_3_2_1_1_1_1_1_0_2_3_4_16_256_0.2_0.0001_0.9_'
+ '0.999_0.001_500_0.9_CASIA-B_74_30_15_3_64_32_bg-0\\d_'
+ 'nm-0\\d_8_16')
+ assert model._signature == ('RGB-GaitPart_80000_0_64_128_128_64_1_2_4_True_'
+ 'True_32_5_3_3_3_3_3_2_1_1_1_1_1_0_2_3_4_16_'
+ '256_0.2_0.0001_0.9_0.999_0.001_500_0.9_CASIA-B'
+ '_74_30_15_3_64_32_bg-0\\d_nm-0\\d_8_16')
diff --git a/utils/configuration.py b/utils/configuration.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/utils/configuration.py