summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJordan Gong <jordan.gong@protonmail.com>2021-01-07 19:55:00 +0800
committerJordan Gong <jordan.gong@protonmail.com>2021-01-07 19:55:00 +0800
commit98b6e6dc3be6f88abb72e351c8f2da2b23b8ab85 (patch)
tree05f690b2411acae88ae81bb716703dcab4557842
parent4a284084c253b9114fc02e1782962556ff113761 (diff)
Type hint for python version lower than 3.9
-rw-r--r--models/auto_encoder.py8
-rw-r--r--models/hpm.py4
-rw-r--r--models/layers.py20
-rw-r--r--models/model.py14
-rw-r--r--models/part_net.py13
-rw-r--r--models/rgb_part_net.py15
-rw-r--r--utils/configuration.py18
-rw-r--r--utils/dataset.py16
-rw-r--r--utils/sampler.py4
9 files changed, 59 insertions, 53 deletions
diff --git a/models/auto_encoder.py b/models/auto_encoder.py
index 7c1f7ef..1e7c323 100644
--- a/models/auto_encoder.py
+++ b/models/auto_encoder.py
@@ -1,3 +1,5 @@
+from typing import Tuple
+
import torch
import torch.nn as nn
import torch.nn.functional as F
@@ -12,7 +14,7 @@ class Encoder(nn.Module):
self,
in_channels: int = 3,
feature_channels: int = 64,
- output_dims: tuple[int, int, int] = (128, 128, 64)
+ output_dims: Tuple[int, int, int] = (128, 128, 64)
):
super().__init__()
self.feature_channels = feature_channels
@@ -67,7 +69,7 @@ class Decoder(nn.Module):
def __init__(
self,
- input_dims: tuple[int, int, int] = (128, 128, 64),
+ input_dims: Tuple[int, int, int] = (128, 128, 64),
feature_channels: int = 64,
out_channels: int = 3,
):
@@ -116,7 +118,7 @@ class AutoEncoder(nn.Module):
num_class: int = 74,
channels: int = 3,
feature_channels: int = 64,
- embedding_dims: tuple[int, int, int] = (128, 128, 64)
+ embedding_dims: Tuple[int, int, int] = (128, 128, 64)
):
super().__init__()
self.encoder = Encoder(channels, feature_channels, embedding_dims)
diff --git a/models/hpm.py b/models/hpm.py
index 66503e3..7505ed7 100644
--- a/models/hpm.py
+++ b/models/hpm.py
@@ -1,3 +1,5 @@
+from typing import Tuple
+
import torch
import torch.nn as nn
@@ -9,7 +11,7 @@ class HorizontalPyramidMatching(nn.Module):
self,
in_channels: int,
out_channels: int = 128,
- scales: tuple[int, ...] = (1, 2, 4),
+ scales: Tuple[int, ...] = (1, 2, 4),
use_avg_pool: bool = True,
use_max_pool: bool = True,
**kwargs
diff --git a/models/layers.py b/models/layers.py
index a9f04b3..7f2ccec 100644
--- a/models/layers.py
+++ b/models/layers.py
@@ -1,4 +1,4 @@
-from typing import Union
+from typing import Union, Tuple
import torch
import torch.nn as nn
@@ -10,7 +10,7 @@ class BasicConv2d(nn.Module):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, tuple[int, int]],
+ kernel_size: Union[int, Tuple[int, int]],
**kwargs
):
super().__init__()
@@ -29,7 +29,7 @@ class VGGConv2d(BasicConv2d):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, tuple[int, int]] = 3,
+ kernel_size: Union[int, Tuple[int, int]] = 3,
padding: int = 1,
**kwargs
):
@@ -47,7 +47,7 @@ class BasicConvTranspose2d(nn.Module):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, tuple[int, int]],
+ kernel_size: Union[int, Tuple[int, int]],
**kwargs
):
super().__init__()
@@ -66,7 +66,7 @@ class DCGANConvTranspose2d(BasicConvTranspose2d):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, tuple[int, int]] = 4,
+ kernel_size: Union[int, Tuple[int, int]] = 4,
stride: int = 2,
padding: int = 1,
is_last_layer: bool = False,
@@ -104,7 +104,7 @@ class FocalConv2d(BasicConv2d):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, tuple[int, int]],
+ kernel_size: Union[int, Tuple[int, int]],
halving: int,
**kwargs
):
@@ -124,8 +124,8 @@ class FocalConv2dBlock(nn.Module):
self,
in_channels: int,
out_channels: int,
- kernel_sizes: tuple[int, int],
- paddings: tuple[int, int],
+ kernel_sizes: Tuple[int, int],
+ paddings: Tuple[int, int],
halving: int,
use_pool: bool = True,
**kwargs
@@ -151,7 +151,7 @@ class BasicConv1d(nn.Module):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, tuple[int]],
+ kernel_size: Union[int, Tuple[int]],
**kwargs
):
super().__init__()
@@ -167,7 +167,7 @@ class HorizontalPyramidPooling(BasicConv2d):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, tuple[int, int]] = 1,
+ kernel_size: Union[int, Tuple[int, int]] = 1,
use_avg_pool: bool = True,
use_max_pool: bool = True,
**kwargs
diff --git a/models/model.py b/models/model.py
index 1dc0f23..4deced0 100644
--- a/models/model.py
+++ b/models/model.py
@@ -1,5 +1,5 @@
import os
-from typing import Union, Optional
+from typing import Union, Optional, Tuple, List
import numpy as np
import torch
@@ -195,9 +195,9 @@ class Model:
def _batch_splitter(
self,
- batch: list[dict[str, Union[np.int64, str, torch.Tensor]]]
- ) -> tuple[dict[str, Union[list[str], torch.Tensor]],
- dict[str, Union[list[str], torch.Tensor]]]:
+ batch: List[dict[str, Union[np.int64, str, torch.Tensor]]]
+ ) -> Tuple[dict[str, Union[List[str], torch.Tensor]],
+ dict[str, Union[List[str], torch.Tensor]]]:
"""
Disentanglement need two random conditions, this function will
split pr * k * 2 samples to 2 dicts each containing pr * k
@@ -212,7 +212,7 @@ class Model:
def _make_signature(self,
config: dict,
- popped_keys: Optional[list] = None) -> str:
+ popped_keys: Optional[List] = None) -> str:
_config = config.copy()
if popped_keys:
for key in popped_keys:
@@ -220,12 +220,12 @@ class Model:
return self._gen_sig(list(_config.values()))
- def _gen_sig(self, values: Union[tuple, list, str, int, float]) -> str:
+ def _gen_sig(self, values: Union[Tuple, List, str, int, float]) -> str:
strings = []
for v in values:
if isinstance(v, str):
strings.append(v)
- elif isinstance(v, (tuple, list)):
+ elif isinstance(v, (Tuple, List)):
strings.append(self._gen_sig(v))
else:
strings.append(str(v))
diff --git a/models/part_net.py b/models/part_net.py
index ac7c434..6d8d4e1 100644
--- a/models/part_net.py
+++ b/models/part_net.py
@@ -1,4 +1,5 @@
import copy
+from typing import Tuple
import torch
import torch.nn as nn
@@ -12,9 +13,9 @@ class FrameLevelPartFeatureExtractor(nn.Module):
self,
in_channels: int = 3,
feature_channels: int = 32,
- kernel_sizes: tuple[tuple, ...] = ((5, 3), (3, 3), (3, 3)),
- paddings: tuple[tuple, ...] = ((2, 1), (1, 1), (1, 1)),
- halving: tuple[int, ...] = (0, 2, 3)
+ kernel_sizes: Tuple[Tuple, ...] = ((5, 3), (3, 3), (3, 3)),
+ paddings: Tuple[Tuple, ...] = ((2, 1), (1, 1), (1, 1)),
+ halving: Tuple[int, ...] = (0, 2, 3)
):
super().__init__()
num_blocks = len(kernel_sizes)
@@ -112,9 +113,9 @@ class PartNet(nn.Module):
self,
in_channels: int = 3,
feature_channels: int = 32,
- kernel_sizes: tuple[tuple, ...] = ((5, 3), (3, 3), (3, 3)),
- paddings: tuple[tuple, ...] = ((2, 1), (1, 1), (1, 1)),
- halving: tuple[int, ...] = (0, 2, 3),
+ kernel_sizes: Tuple[Tuple, ...] = ((5, 3), (3, 3), (3, 3)),
+ paddings: Tuple[Tuple, ...] = ((2, 1), (1, 1), (1, 1)),
+ halving: Tuple[int, ...] = (0, 2, 3),
squeeze_ratio: int = 4,
num_part: int = 16
):
diff --git a/models/rgb_part_net.py b/models/rgb_part_net.py
index 3037da0..39cbed6 100644
--- a/models/rgb_part_net.py
+++ b/models/rgb_part_net.py
@@ -1,4 +1,5 @@
import random
+from typing import Tuple, List
import torch
import torch.nn as nn
@@ -16,14 +17,14 @@ class RGBPartNet(nn.Module):
num_class: int = 74,
ae_in_channels: int = 3,
ae_feature_channels: int = 64,
- f_a_c_p_dims: tuple[int, int, int] = (128, 128, 64),
- hpm_scales: tuple[int, ...] = (1, 2, 4),
+ f_a_c_p_dims: Tuple[int, int, int] = (128, 128, 64),
+ hpm_scales: Tuple[int, ...] = (1, 2, 4),
hpm_use_avg_pool: bool = True,
hpm_use_max_pool: bool = True,
fpfe_feature_channels: int = 32,
- fpfe_kernel_sizes: tuple[tuple, ...] = ((5, 3), (3, 3), (3, 3)),
- fpfe_paddings: tuple[tuple, ...] = ((2, 1), (1, 1), (1, 1)),
- fpfe_halving: tuple[int, ...] = (0, 2, 3),
+ fpfe_kernel_sizes: Tuple[Tuple, ...] = ((5, 3), (3, 3), (3, 3)),
+ fpfe_paddings: Tuple[Tuple, ...] = ((2, 1), (1, 1), (1, 1)),
+ fpfe_halving: Tuple[int, ...] = (0, 2, 3),
tfa_squeeze_ratio: int = 4,
tfa_num_parts: int = 16,
embedding_dims: int = 256,
@@ -142,8 +143,8 @@ class RGBPartNet(nn.Module):
return (x_c_c1, x_p_c1), None
@staticmethod
- def _pose_sim_loss(f_p_c1: list[torch.Tensor],
- f_p_c2: list[torch.Tensor]) -> torch.Tensor:
+ def _pose_sim_loss(f_p_c1: List[torch.Tensor],
+ f_p_c2: List[torch.Tensor]) -> torch.Tensor:
f_p_c1_mean = torch.stack(f_p_c1).mean(dim=0)
f_p_c2_mean = torch.stack(f_p_c2).mean(dim=0)
return F.mse_loss(f_p_c1_mean, f_p_c2_mean)
diff --git a/utils/configuration.py b/utils/configuration.py
index f3ae0b3..aa04b32 100644
--- a/utils/configuration.py
+++ b/utils/configuration.py
@@ -1,4 +1,4 @@
-from typing import TypedDict, Optional, Union
+from typing import TypedDict, Optional, Union, Tuple
from utils.dataset import ClipClasses, ClipConditions, ClipViews
@@ -17,32 +17,32 @@ class DatasetConfiguration(TypedDict):
discard_threshold: int
selector: Optional[dict[str, Union[ClipClasses, ClipConditions, ClipViews]]]
num_input_channels: int
- frame_size: tuple[int, int]
+ frame_size: Tuple[int, int]
cache_on: bool
class DataloaderConfiguration(TypedDict):
- batch_size: tuple[int, int]
+ batch_size: Tuple[int, int]
num_workers: int
pin_memory: bool
class HyperparameterConfiguration(TypedDict):
ae_feature_channels: int
- f_a_c_p_dims: tuple[int, int, int]
- hpm_scales: tuple[int, ...]
+ f_a_c_p_dims: Tuple[int, int, int]
+ hpm_scales: Tuple[int, ...]
hpm_use_avg_pool: bool
hpm_use_max_pool: bool
fpfe_feature_channels: int
- fpfe_kernel_sizes: tuple[tuple, ...]
- fpfe_paddings: tuple[tuple, ...]
- fpfe_halving: tuple[int, ...]
+ fpfe_kernel_sizes: Tuple[Tuple, ...]
+ fpfe_paddings: Tuple[Tuple, ...]
+ fpfe_halving: Tuple[int, ...]
tfa_squeeze_ratio: int
tfa_num_parts: int
embedding_dims: int
triplet_margin: float
lr: int
- betas: tuple[float, float]
+ betas: Tuple[float, float]
class ModelConfiguration(TypedDict):
diff --git a/utils/dataset.py b/utils/dataset.py
index ded9fd5..0a33693 100644
--- a/utils/dataset.py
+++ b/utils/dataset.py
@@ -1,7 +1,7 @@
import os
import random
import re
-from typing import Optional, NewType, Union
+from typing import Optional, NewType, Union, List, Tuple
import numpy as np
import torch
@@ -30,7 +30,7 @@ class CASIAB(data.Dataset):
str, Union[ClipClasses, ClipConditions, ClipViews]
]] = None,
num_input_channels: int = 3,
- frame_size: tuple[int, int] = (64, 32),
+ frame_size: Tuple[int, int] = (64, 32),
cache_on: bool = False
):
"""
@@ -75,15 +75,15 @@ class CASIAB(data.Dataset):
self.views: np.ndarray[np.str_]
# Labels, classes, conditions and views in dataset,
# set of three attributes above
- self.metadata = dict[str, list[np.int64, str]]
+ self.metadata = dict[str, List[np.int64, str]]
# Dictionaries for indexing frames and frame names by clip name
# and chip path when cache is on
- self._cached_clips_frame_names: Optional[dict[str, list[str]]] = None
+ self._cached_clips_frame_names: Optional[dict[str, List[str]]] = None
self._cached_clips: Optional[dict[str, torch.Tensor]] = None
# Video clip directory names
- self._clip_names: list[str] = []
+ self._clip_names: List[str] = []
clip_names = sorted(os.listdir(self._root_dir))
if self._is_train:
@@ -215,8 +215,8 @@ class CASIAB(data.Dataset):
def _load_cached_video(
self,
clip: torch.Tensor,
- frame_names: list[str],
- sampled_frame_names: list[str]
+ frame_names: List[str],
+ sampled_frame_names: List[str]
) -> torch.Tensor:
# Mask the original clip when it is long enough
if len(frame_names) >= self._num_sampled_frames:
@@ -246,7 +246,7 @@ class CASIAB(data.Dataset):
return clip
def _sample_frames(self, clip_path: str,
- is_caching: bool = False) -> list[str]:
+ is_caching: bool = False) -> List[str]:
if self._cache_on:
if is_caching:
# Sort frame in advance for loading convenience
diff --git a/utils/sampler.py b/utils/sampler.py
index cdf1984..734acf9 100644
--- a/utils/sampler.py
+++ b/utils/sampler.py
@@ -1,6 +1,6 @@
import random
from collections.abc import Iterator
-from typing import Union
+from typing import Union, Tuple
import numpy as np
from torch.utils import data
@@ -12,7 +12,7 @@ class TripletSampler(data.Sampler):
def __init__(
self,
data_source: Union[CASIAB],
- batch_size: tuple[int, int]
+ batch_size: Tuple[int, int]
):
super().__init__(data_source)
self.metadata_labels = data_source.metadata['labels']