summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--config.py4
-rw-r--r--models/model.py31
-rw-r--r--utils/configuration.py74
-rw-r--r--utils/misc.py5
4 files changed, 17 insertions, 97 deletions
diff --git a/config.py b/config.py
index 8a8d93a..cd36cc5 100644
--- a/config.py
+++ b/config.py
@@ -1,6 +1,4 @@
-from utils.configuration import Configuration
-
-config: Configuration = {
+config = {
'system': {
# Disable accelerator
'disable_acc': False,
diff --git a/models/model.py b/models/model.py
index d2e09fc..617966f 100644
--- a/models/model.py
+++ b/models/model.py
@@ -13,9 +13,6 @@ from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from models.rgb_part_net import RGBPartNet
-from utils.configuration import DataloaderConfiguration, \
- HyperparameterConfiguration, DatasetConfiguration, ModelConfiguration, \
- SystemConfiguration
from utils.dataset import CASIAB, ClipConditions, ClipViews, ClipClasses
from utils.sampler import TripletSampler
@@ -23,9 +20,9 @@ from utils.sampler import TripletSampler
class Model:
def __init__(
self,
- system_config: SystemConfiguration,
- model_config: ModelConfiguration,
- hyperparameter_config: HyperparameterConfiguration
+ system_config: Dict,
+ model_config: Dict,
+ hyperparameter_config: Dict
):
self.disable_acc = system_config['disable_acc']
if self.disable_acc:
@@ -89,11 +86,11 @@ class Model:
def fit_all(
self,
- dataset_config: DatasetConfiguration,
+ dataset_config: Dict,
dataset_selectors: Dict[
str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]]
],
- dataloader_config: DataloaderConfiguration,
+ dataloader_config: Dict,
):
for (condition, selector) in dataset_selectors.items():
print(f'Training model {condition} ...')
@@ -104,8 +101,8 @@ class Model:
def fit(
self,
- dataset_config: DatasetConfiguration,
- dataloader_config: DataloaderConfiguration,
+ dataset_config: Dict,
+ dataloader_config: Dict,
):
self.is_train = True
dataset = self._parse_dataset_config(dataset_config)
@@ -185,11 +182,11 @@ class Model:
def predict_all(
self,
iter_: int,
- dataset_config: DatasetConfiguration,
+ dataset_config: dict,
dataset_selectors: Dict[
str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]]
],
- dataloader_config: DataloaderConfiguration,
+ dataloader_config: dict,
) -> Dict[str, torch.Tensor]:
self.is_train = False
# Split gallery and probe dataset
@@ -296,7 +293,7 @@ class Model:
def _load_pretrained(
self,
iter_: int,
- dataset_config: DatasetConfiguration,
+ dataset_config: Dict,
dataset_selectors: Dict[
str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]]
]
@@ -313,8 +310,8 @@ class Model:
def _split_gallery_probe(
self,
- dataset_config: DatasetConfiguration,
- dataloader_config: DataloaderConfiguration,
+ dataset_config: Dict,
+ dataloader_config: Dict,
) -> Tuple[DataLoader, Dict[str, DataLoader]]:
dataset_name = dataset_config.get('name', 'CASIA-B')
if dataset_name == 'CASIA-B':
@@ -364,7 +361,7 @@ class Model:
def _parse_dataset_config(
self,
- dataset_config: DatasetConfiguration
+ dataset_config: Dict
) -> Union[CASIAB]:
self.train_size = dataset_config.get('train_size', 74)
self.in_channels = dataset_config.get('num_input_channels', 3)
@@ -385,7 +382,7 @@ class Model:
def _parse_dataloader_config(
self,
dataset: Union[CASIAB],
- dataloader_config: DataloaderConfiguration
+ dataloader_config: Dict
) -> DataLoader:
config: Dict = dataloader_config.copy()
(self.pr, self.k) = config.pop('batch_size')
diff --git a/utils/configuration.py b/utils/configuration.py
index 71584c0..e69de29 100644
--- a/utils/configuration.py
+++ b/utils/configuration.py
@@ -1,74 +0,0 @@
-from typing import TypedDict, Optional, Union, Tuple, Dict
-
-from utils.dataset import ClipClasses, ClipConditions, ClipViews
-
-
-class SystemConfiguration(TypedDict):
- disable_acc: bool
- CUDA_VISIBLE_DEVICES: str
- save_dir: str
-
-
-class DatasetConfiguration(TypedDict):
- name: str
- root_dir: str
- train_size: int
- num_sampled_frames: int
- discard_threshold: int
- selector: Optional[Dict[str, Union[ClipClasses, ClipConditions, ClipViews]]]
- num_input_channels: int
- frame_size: Tuple[int, int]
- cache_on: bool
-
-
-class DataloaderConfiguration(TypedDict):
- batch_size: Tuple[int, int]
- num_workers: int
- pin_memory: bool
-
-
-class ModelHPConfiguration(TypedDict):
- ae_feature_channels: int
- f_a_c_p_dims: Tuple[int, int, int]
- hpm_scales: Tuple[int, ...]
- hpm_use_avg_pool: bool
- hpm_use_max_pool: bool
- fpfe_feature_channels: int
- fpfe_kernel_sizes: Tuple[Tuple, ...]
- fpfe_paddings: Tuple[Tuple, ...]
- fpfe_halving: Tuple[int, ...]
- tfa_squeeze_ratio: int
- tfa_num_parts: int
- embedding_dims: int
- triplet_margin: float
-
-
-class OptimizerHPConfiguration(TypedDict):
- lr: int
- betas: Tuple[float, float]
- weight_decay: float
-
-
-class SchedulerHPConfiguration(TypedDict):
- step_size: int
- gamma: float
-
-
-class HyperparameterConfiguration(TypedDict):
- model: ModelHPConfiguration
- optimizer: OptimizerHPConfiguration
- scheduler: SchedulerHPConfiguration
-
-
-class ModelConfiguration(TypedDict):
- name: str
- restore_iter: int
- total_iter: int
-
-
-class Configuration(TypedDict):
- system: SystemConfiguration
- dataset: DatasetConfiguration
- dataloader: DataloaderConfiguration
- hyperparameter: HyperparameterConfiguration
- model: ModelConfiguration
diff --git a/utils/misc.py b/utils/misc.py
index b850830..c047618 100644
--- a/utils/misc.py
+++ b/utils/misc.py
@@ -1,9 +1,8 @@
import os
+from typing import Dict
-from utils.configuration import SystemConfiguration
-
-def set_visible_cuda(config: SystemConfiguration):
+def set_visible_cuda(config: Dict):
"""Set environment variable CUDA device(s)"""
CUDA_VISIBLE_DEVICES = config.get('CUDA_VISIBLE_DEVICES', None)
if CUDA_VISIBLE_DEVICES: