diff options
author | Jordan Gong <jordan.gong@protonmail.com> | 2021-01-07 19:55:00 +0800 |
---|---|---|
committer | Jordan Gong <jordan.gong@protonmail.com> | 2021-01-07 20:16:09 +0800 |
commit | e5a73abd80578aa5e46d8d444466d1e6346ec6ec (patch) | |
tree | 9af6211c406e995a4c2129b931ad2a83bc05c0b5 /utils | |
parent | 98b6e6dc3be6f88abb72e351c8f2da2b23b8ab85 (diff) |
Type hint for python version lower than 3.9
Diffstat (limited to 'utils')
-rw-r--r-- | utils/configuration.py | 4 | ||||
-rw-r--r-- | utils/dataset.py | 18 | ||||
-rw-r--r-- | utils/sampler.py | 2 |
3 files changed, 12 insertions, 12 deletions
diff --git a/utils/configuration.py b/utils/configuration.py index aa04b32..455abe8 100644 --- a/utils/configuration.py +++ b/utils/configuration.py @@ -1,4 +1,4 @@ -from typing import TypedDict, Optional, Union, Tuple +from typing import TypedDict, Optional, Union, Tuple, Dict from utils.dataset import ClipClasses, ClipConditions, ClipViews @@ -15,7 +15,7 @@ class DatasetConfiguration(TypedDict): train_size: int num_sampled_frames: int discard_threshold: int - selector: Optional[dict[str, Union[ClipClasses, ClipConditions, ClipViews]]] + selector: Optional[Dict[str, Union[ClipClasses, ClipConditions, ClipViews]]] num_input_channels: int frame_size: Tuple[int, int] cache_on: bool diff --git a/utils/dataset.py b/utils/dataset.py index 0a33693..e691157 100644 --- a/utils/dataset.py +++ b/utils/dataset.py @@ -1,7 +1,7 @@ import os import random import re -from typing import Optional, NewType, Union, List, Tuple +from typing import Optional, NewType, Union, List, Tuple, Set, Dict import numpy as np import torch @@ -11,9 +11,9 @@ from sklearn.preprocessing import LabelEncoder from torch.utils import data from tqdm import tqdm -ClipClasses = NewType('ClipClasses', set[str]) -ClipConditions = NewType('ClipConditions', set[str]) -ClipViews = NewType('ClipViews', set[str]) +ClipClasses = NewType('ClipClasses', Set[str]) +ClipConditions = NewType('ClipConditions', Set[str]) +ClipViews = NewType('ClipViews', Set[str]) class CASIAB(data.Dataset): @@ -26,7 +26,7 @@ class CASIAB(data.Dataset): train_size: int = 74, num_sampled_frames: int = 30, discard_threshold: int = 15, - selector: Optional[dict[ + selector: Optional[Dict[ str, Union[ClipClasses, ClipConditions, ClipViews] ]] = None, num_input_channels: int = 3, @@ -75,12 +75,12 @@ class CASIAB(data.Dataset): self.views: np.ndarray[np.str_] # Labels, classes, conditions and views in dataset, # set of three attributes above - self.metadata = dict[str, List[np.int64, str]] + self.metadata = Dict[str, List[np.int64, str]] # Dictionaries for indexing frames and frame names by clip name # and chip path when cache is on - self._cached_clips_frame_names: Optional[dict[str, List[str]]] = None - self._cached_clips: Optional[dict[str, torch.Tensor]] = None + self._cached_clips_frame_names: Optional[Dict[str, List[str]]] = None + self._cached_clips: Optional[Dict[str, torch.Tensor]] = None # Video clip directory names self._clip_names: List[str] = [] @@ -170,7 +170,7 @@ class CASIAB(data.Dataset): def __getitem__( self, index: int - ) -> dict[str, Union[np.int64, str, torch.Tensor]]: + ) -> Dict[str, Union[np.int64, str, torch.Tensor]]: label = self.labels[index] condition = self.conditions[index] view = self.views[index] diff --git a/utils/sampler.py b/utils/sampler.py index 734acf9..c1bf3dd 100644 --- a/utils/sampler.py +++ b/utils/sampler.py @@ -1,5 +1,5 @@ import random -from collections.abc import Iterator +from collections import Iterator from typing import Union, Tuple import numpy as np |