summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--config.py4
-rw-r--r--models/model.py31
-rw-r--r--utils/configuration.py59
-rw-r--r--utils/misc.py5
4 files changed, 17 insertions, 82 deletions
diff --git a/config.py b/config.py
index 47ded38..cbe5e07 100644
--- a/config.py
+++ b/config.py
@@ -1,6 +1,4 @@
-from utils.configuration import Configuration
-
-config: Configuration = {
+config = {
'system': {
# Disable accelerator
'disable_acc': False,
diff --git a/models/model.py b/models/model.py
index 1154d7f..7cf6ed0 100644
--- a/models/model.py
+++ b/models/model.py
@@ -13,9 +13,6 @@ from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from models.rgb_part_net import RGBPartNet
-from utils.configuration import DataloaderConfiguration, \
- HyperparameterConfiguration, DatasetConfiguration, ModelConfiguration, \
- SystemConfiguration
from utils.dataset import CASIAB, ClipConditions, ClipViews, ClipClasses
from utils.sampler import TripletSampler
@@ -23,9 +20,9 @@ from utils.sampler import TripletSampler
class Model:
def __init__(
self,
- system_config: SystemConfiguration,
- model_config: ModelConfiguration,
- hyperparameter_config: HyperparameterConfiguration
+ system_config: Dict,
+ model_config: Dict,
+ hyperparameter_config: Dict
):
self.disable_acc = system_config['disable_acc']
if self.disable_acc:
@@ -89,11 +86,11 @@ class Model:
def fit_all(
self,
- dataset_config: DatasetConfiguration,
+ dataset_config: Dict,
dataset_selectors: Dict[
str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]]
],
- dataloader_config: DataloaderConfiguration,
+ dataloader_config: Dict,
):
for (condition, selector) in dataset_selectors.items():
print(f'Training model {condition} ...')
@@ -104,8 +101,8 @@ class Model:
def fit(
self,
- dataset_config: DatasetConfiguration,
- dataloader_config: DataloaderConfiguration,
+ dataset_config: Dict,
+ dataloader_config: Dict,
):
self.is_train = True
dataset = self._parse_dataset_config(dataset_config)
@@ -184,11 +181,11 @@ class Model:
def predict_all(
self,
iter_: int,
- dataset_config: DatasetConfiguration,
+ dataset_config: dict,
dataset_selectors: Dict[
str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]]
],
- dataloader_config: DataloaderConfiguration,
+ dataloader_config: dict,
) -> Dict[str, torch.Tensor]:
self.is_train = False
# Split gallery and probe dataset
@@ -296,7 +293,7 @@ class Model:
def _load_pretrained(
self,
iter_: int,
- dataset_config: DatasetConfiguration,
+ dataset_config: Dict,
dataset_selectors: Dict[
str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]]
]
@@ -313,8 +310,8 @@ class Model:
def _split_gallery_probe(
self,
- dataset_config: DatasetConfiguration,
- dataloader_config: DataloaderConfiguration,
+ dataset_config: Dict,
+ dataloader_config: Dict,
) -> Tuple[DataLoader, Dict[str: DataLoader]]:
dataset_name = dataset_config.get('name', 'CASIA-B')
if dataset_name == 'CASIA-B':
@@ -364,7 +361,7 @@ class Model:
def _parse_dataset_config(
self,
- dataset_config: DatasetConfiguration
+ dataset_config: Dict
) -> Union[CASIAB]:
self.train_size = dataset_config.get('train_size', 74)
self.in_channels = dataset_config.get('num_input_channels', 3)
@@ -385,7 +382,7 @@ class Model:
def _parse_dataloader_config(
self,
dataset: Union[CASIAB],
- dataloader_config: DataloaderConfiguration
+ dataloader_config: Dict
) -> DataLoader:
config: Dict = dataloader_config.copy()
(self.pr, self.k) = config.pop('batch_size')
diff --git a/utils/configuration.py b/utils/configuration.py
deleted file mode 100644
index 455abe8..0000000
--- a/utils/configuration.py
+++ /dev/null
@@ -1,59 +0,0 @@
-from typing import TypedDict, Optional, Union, Tuple, Dict
-
-from utils.dataset import ClipClasses, ClipConditions, ClipViews
-
-
-class SystemConfiguration(TypedDict):
- disable_acc: bool
- CUDA_VISIBLE_DEVICES: str
- save_dir: str
-
-
-class DatasetConfiguration(TypedDict):
- name: str
- root_dir: str
- train_size: int
- num_sampled_frames: int
- discard_threshold: int
- selector: Optional[Dict[str, Union[ClipClasses, ClipConditions, ClipViews]]]
- num_input_channels: int
- frame_size: Tuple[int, int]
- cache_on: bool
-
-
-class DataloaderConfiguration(TypedDict):
- batch_size: Tuple[int, int]
- num_workers: int
- pin_memory: bool
-
-
-class HyperparameterConfiguration(TypedDict):
- ae_feature_channels: int
- f_a_c_p_dims: Tuple[int, int, int]
- hpm_scales: Tuple[int, ...]
- hpm_use_avg_pool: bool
- hpm_use_max_pool: bool
- fpfe_feature_channels: int
- fpfe_kernel_sizes: Tuple[Tuple, ...]
- fpfe_paddings: Tuple[Tuple, ...]
- fpfe_halving: Tuple[int, ...]
- tfa_squeeze_ratio: int
- tfa_num_parts: int
- embedding_dims: int
- triplet_margin: float
- lr: int
- betas: Tuple[float, float]
-
-
-class ModelConfiguration(TypedDict):
- name: str
- restore_iter: int
- total_iter: int
-
-
-class Configuration(TypedDict):
- system: SystemConfiguration
- dataset: DatasetConfiguration
- dataloader: DataloaderConfiguration
- hyperparameter: HyperparameterConfiguration
- model: ModelConfiguration
diff --git a/utils/misc.py b/utils/misc.py
index b850830..c047618 100644
--- a/utils/misc.py
+++ b/utils/misc.py
@@ -1,9 +1,8 @@
import os
+from typing import Dict
-from utils.configuration import SystemConfiguration
-
-def set_visible_cuda(config: SystemConfiguration):
+def set_visible_cuda(config: Dict):
"""Set environment variable CUDA device(s)"""
CUDA_VISIBLE_DEVICES = config.get('CUDA_VISIBLE_DEVICES', None)
if CUDA_VISIBLE_DEVICES: