From 7b5ff0a8dd2406af74776f31b1e2afabf9fadacd Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Fri, 22 Jan 2021 18:12:41 +0800 Subject: Handle unexpected restore iter 1. Skip finished model before load it 2. Raise error when restore iter is greater than total iter --- models/model.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/models/model.py b/models/model.py index 3b54363..1a62bae 100644 --- a/models/model.py +++ b/models/model.py @@ -116,6 +116,13 @@ class Model: self.curr_iters, self.total_iters, dataset_selectors.items() ): print(f'Training model {condition} ...') + # Skip finished model + if curr_iter == total_iter: + continue + # Check invalid restore iter + elif curr_iter > total_iter: + raise ValueError("Restore iter '{}' should less than total " + "iter '{}'".format(curr_iter, total_iter)) self.curr_iter = curr_iter self.total_iter = total_iter self.fit( -- cgit v1.2.3 From f3321b6782aff20df2c029239cc0f8b9a59ffde7 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Sat, 23 Jan 2021 00:16:58 +0800 Subject: Evaluation bug fixes and code review 1. Return full cached clip in evaluation 2. Add multi-iter checkpoints support in evaluation 3. Remove duplicated code while transforming --- eval.py | 2 +- models/model.py | 53 +++++++++++++++++++++++++---------------------------- utils/dataset.py | 3 +++ 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/eval.py b/eval.py index 7b68220..ec48066 100644 --- a/eval.py +++ b/eval.py @@ -14,7 +14,7 @@ dataset_selectors = { 'cl': {'conditions': ClipConditions({r'nm-0\d', r'cl-0\d'})}, } -accuracy = model.predict_all(config['model']['total_iter'], config['dataset'], +accuracy = model.predict_all(config['model']['total_iters'], config['dataset'], dataset_selectors, config['dataloader']) rank = 5 np.set_printoptions(formatter={'float': '{:5.2f}'.format}) diff --git a/models/model.py b/models/model.py index 1a62bae..6b799ad 100644 --- a/models/model.py +++ b/models/model.py @@ -220,7 +220,7 @@ class Model: def predict_all( self, - iter_: int, + iters: tuple[int], dataset_config: DatasetConfiguration, dataset_selectors: dict[ str, dict[str, Union[ClipClasses, ClipConditions, ClipViews]] @@ -234,7 +234,7 @@ class Model: ) # Get pretrained models at iter_ checkpoints = self._load_pretrained( - iter_, dataset_config, dataset_selectors + iters, dataset_config, dataset_selectors ) # Init models model_hp = self.hp.get('model', {}) @@ -250,37 +250,32 @@ class Model: self.rgb_pn.load_state_dict(checkpoint['model_state_dict']) for sample in tqdm(gallery_dataloader, desc='Transforming gallery', unit='clips'): - label = sample.pop('label').item() - clip = sample.pop('clip').to(self.device) - feature = self.rgb_pn(clip).detach() - gallery_samples.append({ - **{'label': label}, - **sample, - **{'feature': feature} - }) + gallery_samples.append(self._get_eval_sample(sample)) gallery_samples = default_collate(gallery_samples) # Probe for (condition, dataloader) in probe_dataloaders.items(): checkpoint = torch.load(checkpoints[condition]) self.rgb_pn.load_state_dict(checkpoint['model_state_dict']) - probe_samples[condition] = [] + probe_samples_c = [] for sample in tqdm(dataloader, desc=f'Transforming probe {condition}', unit='clips'): - label = sample.pop('label').item() - clip = sample.pop('clip').to(self.device) - feature = self.rgb_pn(clip).detach() - probe_samples[condition].append({ - **{'label': label}, - **sample, - **{'feature': feature} - }) - for (k, v) in probe_samples.items(): - probe_samples[k] = default_collate(v) + probe_samples_c.append(self._get_eval_sample(sample)) + probe_samples[condition] = default_collate(probe_samples_c) return self._evaluate(gallery_samples, probe_samples) + def _get_eval_sample(self, sample: dict[str, Union[list, torch.Tensor]]): + label = sample.pop('label').item() + clip = sample.pop('clip').to(self.device) + feature = self.rgb_pn(clip).detach() + return { + **{'label': label}, + **sample, + **{'feature': feature} + } + def _evaluate( self, gallery_samples: dict[str, Union[list[str], torch.Tensor]], @@ -331,20 +326,22 @@ class Model: def _load_pretrained( self, - iter_: int, + iters: tuple[int], dataset_config: DatasetConfiguration, dataset_selectors: dict[ str, dict[str, Union[ClipClasses, ClipConditions, ClipViews]] ] ) -> dict[str, str]: checkpoints = {} - self.curr_iter = iter_ - for (k, v) in dataset_selectors.items(): + for (iter_, (condition, selector)) in zip( + iters, dataset_selectors.items() + ): + self.curr_iter = self.total_iter = iter_ self._dataset_sig = self._make_signature( - dict(**dataset_config, **v), + dict(**dataset_config, **selector), popped_keys=['root_dir', 'cache_on'] ) - checkpoints[k] = self._checkpoint_name + checkpoints[condition] = self._checkpoint_name return checkpoints def _split_gallery_probe( @@ -372,10 +369,10 @@ class Model: for (condition, dataset) in probe_datasets.items() } probe_dataloaders = { - condtion: self._parse_dataloader_config( + condition: self._parse_dataloader_config( dataset, dataloader_config ) - for (condtion, dataset) in probe_datasets.items() + for (condition, dataset) in probe_datasets.items() } elif dataset_name == 'FVG': # TODO diff --git a/utils/dataset.py b/utils/dataset.py index cd8b0f1..bbd42c3 100644 --- a/utils/dataset.py +++ b/utils/dataset.py @@ -201,6 +201,9 @@ class CASIAB(data.Dataset): self._cached_clips[clip_name] = clip else: # Load cache cached_clip = self._cached_clips[clip_name] + # Return full clips while evaluating + if not self._is_train: + return cached_clip cached_clip_frame_names \ = self._cached_clips_frame_names[clip_path] # Index the original clip via sampled frame names -- cgit v1.2.3 From a040400d7caa267d4bfbe8e5520568806f92b3d4 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Sat, 23 Jan 2021 00:43:20 +0800 Subject: Type hint fixes --- models/model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/models/model.py b/models/model.py index 8992914..1dd195e 100644 --- a/models/model.py +++ b/models/model.py @@ -220,7 +220,7 @@ class Model: def predict_all( self, - iters: tuple[int], + iters: Tuple[int], dataset_config: DatasetConfiguration, dataset_selectors: Dict[ str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]] @@ -266,7 +266,7 @@ class Model: return self._evaluate(gallery_samples, probe_samples) - def _get_eval_sample(self, sample: dict[str, Union[list, torch.Tensor]]): + def _get_eval_sample(self, sample: Dict[str, Union[List, torch.Tensor]]): label = sample.pop('label').item() clip = sample.pop('clip').to(self.device) feature = self.rgb_pn(clip).detach() @@ -326,7 +326,7 @@ class Model: def _load_pretrained( self, - iters: tuple[int], + iters: Tuple[int], dataset_config: DatasetConfiguration, dataset_selectors: Dict[ str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]] -- cgit v1.2.3