diff options
Diffstat (limited to 'models')
-rw-r--r-- | models/model.py | 31 |
1 files changed, 14 insertions, 17 deletions
diff --git a/models/model.py b/models/model.py index 75e478e..a3b6d3a 100644 --- a/models/model.py +++ b/models/model.py @@ -13,9 +13,6 @@ from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from models.rgb_part_net import RGBPartNet -from utils.configuration import DataloaderConfiguration, \ - HyperparameterConfiguration, DatasetConfiguration, ModelConfiguration, \ - SystemConfiguration from utils.dataset import CASIAB, ClipConditions, ClipViews, ClipClasses from utils.sampler import TripletSampler @@ -23,9 +20,9 @@ from utils.sampler import TripletSampler class Model: def __init__( self, - system_config: SystemConfiguration, - model_config: ModelConfiguration, - hyperparameter_config: HyperparameterConfiguration + system_config: Dict, + model_config: Dict, + hyperparameter_config: Dict ): self.disable_acc = system_config.get('disable_acc', False) if self.disable_acc: @@ -107,11 +104,11 @@ class Model: def fit_all( self, - dataset_config: DatasetConfiguration, + dataset_config: Dict, dataset_selectors: Dict[ str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]] ], - dataloader_config: DataloaderConfiguration, + dataloader_config: Dict, ): for (curr_iter, total_iter, (condition, selector)) in zip( self.curr_iters, self.total_iters, dataset_selectors.items() @@ -133,8 +130,8 @@ class Model: def fit( self, - dataset_config: DatasetConfiguration, - dataloader_config: DataloaderConfiguration, + dataset_config: Dict, + dataloader_config: Dict, ): self.is_train = True dataset = self._parse_dataset_config(dataset_config) @@ -264,11 +261,11 @@ class Model: def predict_all( self, iters: Tuple[int], - dataset_config: DatasetConfiguration, + dataset_config: Dict, dataset_selectors: Dict[ str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]] ], - dataloader_config: DataloaderConfiguration, + dataloader_config: Dict, ) -> Dict[str, torch.Tensor]: # Transform data to features gallery_samples, probe_samples = self.transform( @@ -387,7 +384,7 @@ class Model: def _load_pretrained( self, iters: Tuple[int], - dataset_config: DatasetConfiguration, + dataset_config: Dict, dataset_selectors: Dict[ str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]] ] @@ -406,8 +403,8 @@ class Model: def _split_gallery_probe( self, - dataset_config: DatasetConfiguration, - dataloader_config: DataloaderConfiguration, + dataset_config: Dict, + dataloader_config: Dict, ) -> Tuple[DataLoader, Dict[str, DataLoader]]: dataset_name = dataset_config.get('name', 'CASIA-B') if dataset_name == 'CASIA-B': @@ -457,7 +454,7 @@ class Model: def _parse_dataset_config( self, - dataset_config: DatasetConfiguration + dataset_config: Dict ) -> Union[CASIAB]: self.in_channels = dataset_config.get('num_input_channels', 3) self.in_size = dataset_config.get('frame_size', (64, 48)) @@ -477,7 +474,7 @@ class Model: def _parse_dataloader_config( self, dataset: Union[CASIAB], - dataloader_config: DataloaderConfiguration + dataloader_config: Dict ) -> DataLoader: config: Dict = dataloader_config.copy() (self.pr, self.k) = config.pop('batch_size', (8, 16)) |