diff options
author | Jordan Gong <jordan.gong@protonmail.com> | 2021-03-25 12:23:35 +0800 |
---|---|---|
committer | Jordan Gong <jordan.gong@protonmail.com> | 2021-03-25 12:23:35 +0800 |
commit | f065f227b0bcdac61db9240f7df6ea2b748a89b7 (patch) | |
tree | 9ad619fcf1205b3ab438840f4ded1d1455e39ff3 | |
parent | 89d702fb2adb588864be489e3d53ffa2746a52af (diff) | |
parent | 5a063855dbecb8f1a86ad25d9e61a9c8b63312b3 (diff) |
Merge branch 'master' into python3.8
-rw-r--r-- | config.py | 6 | ||||
-rw-r--r-- | models/model.py | 8 | ||||
-rw-r--r-- | utils/configuration.py | 2 |
3 files changed, 8 insertions, 8 deletions
@@ -9,7 +9,9 @@ config: Configuration = { # Directory used in training or testing for temporary storage 'save_dir': 'runs', # Recorde disentangled image or not - 'image_log_on': False + 'image_log_on': False, + # The number of subjects for validating (Part of testing set) + 'val_size': 10, }, # Dataset settings 'dataset': { @@ -19,8 +21,6 @@ config: Configuration = { 'root_dir': 'data/CASIA-B-MRCNN-V2/SEG', # The number of subjects for training 'train_size': 74, - # The number of subjects for validating (Part of testing set) - 'val_size': 10, # Number of sampled frames per sequence (Training only) 'num_sampled_frames': 30, # Truncate clips longer than `truncate_threshold` diff --git a/models/model.py b/models/model.py index be2ddcb..b2947a0 100644 --- a/models/model.py +++ b/models/model.py @@ -79,6 +79,7 @@ class Model: self.scheduler: Optional[optim.lr_scheduler.StepLR] = None self.writer: Optional[SummaryWriter] = None self.image_log_on = system_config.get('image_log_on', False) + self.val_size = system_config.get('val_size', 10) self.CASIAB_GALLERY_SELECTOR = { 'selector': {'conditions': ClipConditions({r'nm-0[1-4]'})} @@ -147,13 +148,12 @@ class Model: self.is_train = True # Validation dataset # (the first `val_size` subjects from evaluation set) - val_size = dataset_config.pop('val_size', 10) val_dataset_config = copy.deepcopy(dataset_config) train_size = dataset_config.get('train_size', 74) - val_dataset_config['train_size'] = train_size + val_size + val_dataset_config['train_size'] = train_size + self.val_size val_dataset_config['selector']['classes'] = ClipClasses({ str(c).zfill(3) - for c in range(train_size + 1, train_size + val_size + 1) + for c in range(train_size + 1, train_size + self.val_size + 1) }) val_dataset = self._parse_dataset_config(val_dataset_config) val_dataloader = iter(self._parse_dataloader_config( @@ -569,7 +569,7 @@ class Model: for (iter_, total_iter, (condition, selector)) in zip( iters, self.total_iters, dataset_selectors.items() ): - self.curr_iter = iter_ + self.curr_iter = iter_ - 1 self.total_iter = total_iter self._dataset_sig = self._make_signature( dict(**dataset_config, **selector), diff --git a/utils/configuration.py b/utils/configuration.py index 959791b..8dcae07 100644 --- a/utils/configuration.py +++ b/utils/configuration.py @@ -8,13 +8,13 @@ class SystemConfiguration(TypedDict): CUDA_VISIBLE_DEVICES: str save_dir: str image_log_on: bool + val_size: int class DatasetConfiguration(TypedDict): name: str root_dir: str train_size: int - val_size: int num_sampled_frames: int truncate_threshold: int discard_threshold: int |