diff options
Diffstat (limited to 'utils')
| -rw-r--r-- | utils/configuration.py | 26 | ||||
| -rw-r--r-- | utils/dataset.py | 28 | ||||
| -rw-r--r-- | utils/sampler.py | 5 | 
3 files changed, 29 insertions, 30 deletions
diff --git a/utils/configuration.py b/utils/configuration.py index 8b265e8..ef6b757 100644 --- a/utils/configuration.py +++ b/utils/configuration.py @@ -1,4 +1,4 @@ -from typing import TypedDict, Optional, Union +from typing import TypedDict, Optional, Union, Tuple, Dict  from utils.dataset import ClipClasses, ClipConditions, ClipViews @@ -15,28 +15,28 @@ class DatasetConfiguration(TypedDict):      train_size: int      num_sampled_frames: int      discard_threshold: int -    selector: Optional[dict[str, Union[ClipClasses, ClipConditions, ClipViews]]] +    selector: Optional[Dict[str, Union[ClipClasses, ClipConditions, ClipViews]]]      num_input_channels: int -    frame_size: tuple[int, int] +    frame_size: Tuple[int, int]      cache_on: bool  class DataloaderConfiguration(TypedDict): -    batch_size: tuple[int, int] +    batch_size: Tuple[int, int]      num_workers: int      pin_memory: bool  class ModelHPConfiguration(TypedDict):      ae_feature_channels: int -    f_a_c_p_dims: tuple[int, int, int] -    hpm_scales: tuple[int, ...] +    f_a_c_p_dims: Tuple[int, int, int] +    hpm_scales: Tuple[int, ...]      hpm_use_avg_pool: bool      hpm_use_max_pool: bool      fpfe_feature_channels: int -    fpfe_kernel_sizes: tuple[tuple, ...] -    fpfe_paddings: tuple[tuple, ...] -    fpfe_halving: tuple[int, ...] +    fpfe_kernel_sizes: Tuple[Tuple, ...] +    fpfe_paddings: Tuple[Tuple, ...] +    fpfe_halving: Tuple[int, ...]      tfa_squeeze_ratio: int      tfa_num_parts: int      embedding_dims: int @@ -45,7 +45,7 @@ class ModelHPConfiguration(TypedDict):  class SubOptimizerHPConfiguration(TypedDict):      lr: int -    betas: tuple[float, float] +    betas: Tuple[float, float]      eps: float      weight_decay: float      amsgrad: bool @@ -53,7 +53,7 @@ class SubOptimizerHPConfiguration(TypedDict):  class OptimizerHPConfiguration(TypedDict):      lr: int -    betas: tuple[float, float] +    betas: Tuple[float, float]      eps: float      weight_decay: float      amsgrad: bool @@ -78,8 +78,8 @@ class ModelConfiguration(TypedDict):      name: str      restore_iter: int      total_iter: int -    restore_iters: tuple[int, ...] -    total_iters: tuple[int, ...] +    restore_iters: Tuple[int, ...] +    total_iters: Tuple[int, ...]  class Configuration(TypedDict): diff --git a/utils/dataset.py b/utils/dataset.py index cd8b0f1..ea678fb 100644 --- a/utils/dataset.py +++ b/utils/dataset.py @@ -1,7 +1,7 @@  import os  import random  import re -from typing import Optional, NewType, Union +from typing import Optional, NewType, Union, List, Tuple, Set, Dict  import numpy as np  import torch @@ -11,9 +11,9 @@ from sklearn.preprocessing import LabelEncoder  from torch.utils import data  from tqdm import tqdm -ClipClasses = NewType('ClipClasses', set[str]) -ClipConditions = NewType('ClipConditions', set[str]) -ClipViews = NewType('ClipViews', set[str]) +ClipClasses = NewType('ClipClasses', Set[str]) +ClipConditions = NewType('ClipConditions', Set[str]) +ClipViews = NewType('ClipViews', Set[str])  class CASIAB(data.Dataset): @@ -26,11 +26,11 @@ class CASIAB(data.Dataset):              train_size: int = 74,              num_sampled_frames: int = 30,              discard_threshold: int = 15, -            selector: Optional[dict[ +            selector: Optional[Dict[                  str, Union[ClipClasses, ClipConditions, ClipViews]              ]] = None,              num_input_channels: int = 3, -            frame_size: tuple[int, int] = (64, 32), +            frame_size: Tuple[int, int] = (64, 32),              cache_on: bool = False      ):          """ @@ -75,15 +75,15 @@ class CASIAB(data.Dataset):          self.views: np.ndarray[np.str_]          # Labels, classes, conditions and views in dataset,          #   set of three attributes above -        self.metadata: dict[str, list[np.int64, str]] +        self.metadata: Dict[str, List[np.int64, str]]          # Dictionaries for indexing frames and frame names by clip name          # and chip path when cache is on -        self._cached_clips_frame_names: Optional[dict[str, list[str]]] = None -        self._cached_clips: Optional[dict[str, torch.Tensor]] = None +        self._cached_clips_frame_names: Optional[Dict[str, List[str]]] = None +        self._cached_clips: Optional[Dict[str, torch.Tensor]] = None          # Video clip directory names -        self._clip_names: list[str] = [] +        self._clip_names: List[str] = []          clip_names = sorted(os.listdir(self._root_dir))          if self._is_train: @@ -170,7 +170,7 @@ class CASIAB(data.Dataset):      def __getitem__(              self,              index: int -    ) -> dict[str, Union[np.int64, str, torch.Tensor]]: +    ) -> Dict[str, Union[np.int64, str, torch.Tensor]]:          label = self.labels[index]          condition = self.conditions[index]          view = self.views[index] @@ -215,8 +215,8 @@ class CASIAB(data.Dataset):      def _load_cached_video(              self,              clip: torch.Tensor, -            frame_names: list[str], -            sampled_frame_names: list[str] +            frame_names: List[str], +            sampled_frame_names: List[str]      ) -> torch.Tensor:          # Mask the original clip when it is long enough          if len(frame_names) >= self._num_sampled_frames: @@ -246,7 +246,7 @@ class CASIAB(data.Dataset):          return clip      def _sample_frames(self, clip_path: str, -                       is_caching: bool = False) -> list[str]: +                       is_caching: bool = False) -> List[str]:          if self._cache_on:              if is_caching:                  # Sort frame in advance for loading convenience diff --git a/utils/sampler.py b/utils/sampler.py index cdf1984..0977f94 100644 --- a/utils/sampler.py +++ b/utils/sampler.py @@ -1,6 +1,5 @@  import random -from collections.abc import Iterator -from typing import Union +from typing import Union, Tuple, Iterator  import numpy as np  from torch.utils import data @@ -12,7 +11,7 @@ class TripletSampler(data.Sampler):      def __init__(              self,              data_source: Union[CASIAB], -            batch_size: tuple[int, int] +            batch_size: Tuple[int, int]      ):          super().__init__(data_source)          self.metadata_labels = data_source.metadata['labels']  | 
