summaryrefslogtreecommitdiff
path: root/models
diff options
context:
space:
mode:
authorJordan Gong <jordan.gong@protonmail.com>2021-02-19 22:43:17 +0800
committerJordan Gong <jordan.gong@protonmail.com>2021-02-19 22:43:17 +0800
commit4049566103a00aa6d5a0b1f73569bdc5435714ca (patch)
treed84604773f05eab030ff2106c43cb2c091b6e8fc /models
parentd12dd6b04a4e7c2b1ee43ab6f36f25d0c35ca364 (diff)
parent969030864495e7c2b419400fd81ee0fad83de41e (diff)
Merge branch 'python3.8' into disentangling_only_py3.8
# Conflicts: # models/hpm.py # models/layers.py # models/model.py # models/part_net.py # models/rgb_part_net.py # utils/configuration.py
Diffstat (limited to 'models')
-rw-r--r--models/auto_encoder.py14
-rw-r--r--models/layers.py10
-rw-r--r--models/model.py54
-rw-r--r--models/rgb_part_net.py6
4 files changed, 44 insertions, 40 deletions
diff --git a/models/auto_encoder.py b/models/auto_encoder.py
index 2d715db..e17caed 100644
--- a/models/auto_encoder.py
+++ b/models/auto_encoder.py
@@ -1,3 +1,5 @@
+from typing import Tuple
+
import torch
import torch.nn as nn
import torch.nn.functional as F
@@ -11,9 +13,9 @@ class Encoder(nn.Module):
def __init__(
self,
in_channels: int = 3,
- frame_size: tuple[int, int] = (64, 48),
+ frame_size: Tuple[int, int] = (64, 48),
feature_channels: int = 64,
- output_dims: tuple[int, int, int] = (128, 128, 64)
+ output_dims: Tuple[int, int, int] = (128, 128, 64)
):
super().__init__()
self.feature_channels = feature_channels
@@ -74,9 +76,9 @@ class Decoder(nn.Module):
def __init__(
self,
- input_dims: tuple[int, int, int] = (128, 128, 64),
+ input_dims: Tuple[int, int, int] = (128, 128, 64),
feature_channels: int = 64,
- feature_size: tuple[int, int] = (4, 3),
+ feature_size: Tuple[int, int] = (4, 3),
out_channels: int = 3,
):
super().__init__()
@@ -125,9 +127,9 @@ class AutoEncoder(nn.Module):
def __init__(
self,
channels: int = 3,
- frame_size: tuple[int, int] = (64, 48),
+ frame_size: Tuple[int, int] = (64, 48),
feature_channels: int = 64,
- embedding_dims: tuple[int, int, int] = (128, 128, 64)
+ embedding_dims: Tuple[int, int, int] = (128, 128, 64)
):
super().__init__()
self.encoder = Encoder(channels, frame_size,
diff --git a/models/layers.py b/models/layers.py
index 1b4640f..8228f49 100644
--- a/models/layers.py
+++ b/models/layers.py
@@ -1,4 +1,4 @@
-from typing import Union
+from typing import Union, Tuple
import torch.nn as nn
import torch.nn.functional as F
@@ -9,7 +9,7 @@ class BasicConv2d(nn.Module):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, tuple[int, int]],
+ kernel_size: Union[int, Tuple[int, int]],
**kwargs
):
super().__init__()
@@ -28,7 +28,7 @@ class VGGConv2d(BasicConv2d):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, tuple[int, int]] = 3,
+ kernel_size: Union[int, Tuple[int, int]] = 3,
padding: int = 1,
**kwargs
):
@@ -46,7 +46,7 @@ class BasicConvTranspose2d(nn.Module):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, tuple[int, int]],
+ kernel_size: Union[int, Tuple[int, int]],
**kwargs
):
super().__init__()
@@ -65,7 +65,7 @@ class DCGANConvTranspose2d(BasicConvTranspose2d):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, tuple[int, int]] = 4,
+ kernel_size: Union[int, Tuple[int, int]] = 4,
stride: int = 2,
padding: int = 1,
is_last_layer: bool = False,
diff --git a/models/model.py b/models/model.py
index 3f24936..c8f0450 100644
--- a/models/model.py
+++ b/models/model.py
@@ -1,6 +1,6 @@
import os
from datetime import datetime
-from typing import Union, Optional
+from typing import Union, Optional, Tuple, List, Dict, Set
import numpy as np
import torch
@@ -54,12 +54,12 @@ class Model:
self.is_train: bool = True
self.in_channels: int = 3
- self.in_size: tuple[int, int] = (64, 48)
+ self.in_size: Tuple[int, int] = (64, 48)
self.pr: Optional[int] = None
self.k: Optional[int] = None
- self._gallery_dataset_meta: Optional[dict[str, list]] = None
- self._probe_datasets_meta: Optional[dict[str, dict[str, list]]] = None
+ self._gallery_dataset_meta: Optional[Dict[str, List]] = None
+ self._probe_datasets_meta: Optional[Dict[str, Dict[str, List]]] = None
self._model_name: str = self.meta.get('name', 'RGB-GaitPart')
self._hp_sig: str = self._make_signature(self.hp)
@@ -107,8 +107,8 @@ class Model:
def fit_all(
self,
dataset_config: DatasetConfiguration,
- dataset_selectors: dict[
- str, dict[str, Union[ClipClasses, ClipConditions, ClipViews]]
+ dataset_selectors: Dict[
+ str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]]
],
dataloader_config: DataloaderConfiguration,
):
@@ -140,7 +140,7 @@ class Model:
dataloader = self._parse_dataloader_config(dataset, dataloader_config)
# Prepare for model, optimizer and scheduler
model_hp = self.hp.get('model', {})
- optim_hp: dict = self.hp.get('optimizer', {}).copy()
+ optim_hp: Dict = self.hp.get('optimizer', {}).copy()
sched_hp = self.hp.get('scheduler', {})
self.rgb_pn = RGBPartNet(self.in_channels, self.in_size, **model_hp,
image_log_on=self.image_log_on)
@@ -243,10 +243,10 @@ class Model:
def transform(
self,
- iters: tuple[int],
+ iters: Tuple[int],
dataset_config: DatasetConfiguration,
- dataset_selectors: dict[
- str, dict[str, Union[ClipClasses, ClipConditions, ClipViews]]
+ dataset_selectors: Dict[
+ str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]]
],
dataloader_config: DataloaderConfiguration
):
@@ -288,7 +288,7 @@ class Model:
return gallery_samples, probe_samples
- def _get_eval_sample(self, sample: dict[str, Union[list, torch.Tensor]]):
+ def _get_eval_sample(self, sample: Dict[str, Union[List, torch.Tensor]]):
label = sample.pop('label').item()
clip = sample.pop('clip').to(self.device)
x_c, x_p = self.rgb_pn(clip).detach()
@@ -300,12 +300,12 @@ class Model:
def _load_pretrained(
self,
- iters: tuple[int],
+ iters: Tuple[int],
dataset_config: DatasetConfiguration,
- dataset_selectors: dict[
- str, dict[str, Union[ClipClasses, ClipConditions, ClipViews]]
+ dataset_selectors: Dict[
+ str, Dict[str, Union[ClipClasses, ClipConditions, ClipViews]]
]
- ) -> dict[str, str]:
+ ) -> Dict[str, str]:
checkpoints = {}
for (iter_, (condition, selector)) in zip(
iters, dataset_selectors.items()
@@ -322,7 +322,7 @@ class Model:
self,
dataset_config: DatasetConfiguration,
dataloader_config: DataloaderConfiguration,
- ) -> tuple[DataLoader, dict[str, DataLoader]]:
+ ) -> Tuple[DataLoader, Dict[str, DataLoader]]:
dataset_name = dataset_config.get('name', 'CASIA-B')
if dataset_name == 'CASIA-B':
gallery_dataset = self._parse_dataset_config(
@@ -377,7 +377,7 @@ class Model:
dataset_config,
popped_keys=['root_dir', 'cache_on']
)
- config: dict = dataset_config.copy()
+ config: Dict = dataset_config.copy()
name = config.pop('name', 'CASIA-B')
if name == 'CASIA-B':
return CASIAB(**config, is_train=self.is_train)
@@ -391,7 +391,7 @@ class Model:
dataset: Union[CASIAB],
dataloader_config: DataloaderConfiguration
) -> DataLoader:
- config: dict = dataloader_config.copy()
+ config: Dict = dataloader_config.copy()
(self.pr, self.k) = config.pop('batch_size', (8, 16))
if self.is_train:
triplet_sampler = TripletSampler(dataset, (self.pr, self.k))
@@ -404,9 +404,9 @@ class Model:
def _batch_splitter(
self,
- batch: list[dict[str, Union[np.int64, str, torch.Tensor]]]
- ) -> tuple[dict[str, Union[list[str], torch.Tensor]],
- dict[str, Union[list[str], torch.Tensor]]]:
+ batch: List[Dict[str, Union[np.int64, str, torch.Tensor]]]
+ ) -> Tuple[Dict[str, Union[List[str], torch.Tensor]],
+ Dict[str, Union[List[str], torch.Tensor]]]:
"""
Disentanglement need two random conditions, this function will
split pr * k * 2 samples to 2 dicts each containing pr * k
@@ -420,8 +420,8 @@ class Model:
return default_collate(_batch[0]), default_collate(_batch[1])
def _make_signature(self,
- config: dict,
- popped_keys: Optional[list] = None) -> str:
+ config: Dict,
+ popped_keys: Optional[List] = None) -> str:
_config = config.copy()
if popped_keys:
for key in popped_keys:
@@ -429,16 +429,16 @@ class Model:
return self._gen_sig(list(_config.values()))
- def _gen_sig(self, values: Union[tuple, list, set, str, int, float]) -> str:
+ def _gen_sig(self, values: Union[Tuple, List, Set, str, int, float]) -> str:
strings = []
for v in values:
if isinstance(v, str):
strings.append(v)
- elif isinstance(v, (tuple, list)):
+ elif isinstance(v, (Tuple, List)):
strings.append(self._gen_sig(v))
- elif isinstance(v, set):
+ elif isinstance(v, Set):
strings.append(self._gen_sig(sorted(list(v))))
- elif isinstance(v, dict):
+ elif isinstance(v, Dict):
strings.append(self._gen_sig(list(v.values())))
else:
strings.append(str(v))
diff --git a/models/rgb_part_net.py b/models/rgb_part_net.py
index f18d675..797e02b 100644
--- a/models/rgb_part_net.py
+++ b/models/rgb_part_net.py
@@ -1,3 +1,5 @@
+from typing import Tuple
+
import torch
import torch.nn as nn
@@ -8,9 +10,9 @@ class RGBPartNet(nn.Module):
def __init__(
self,
ae_in_channels: int = 3,
- ae_in_size: tuple[int, int] = (64, 48),
+ ae_in_size: Tuple[int, int] = (64, 48),
ae_feature_channels: int = 64,
- f_a_c_p_dims: tuple[int, int, int] = (128, 128, 64),
+ f_a_c_p_dims: Tuple[int, int, int] = (128, 128, 64),
image_log_on: bool = False
):
super().__init__()