summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--models/hpm.py4
-rw-r--r--models/layers.py18
-rw-r--r--models/model.py10
-rw-r--r--utils/configuration.py8
-rw-r--r--utils/dataset.py30
-rw-r--r--utils/sampler.py4
6 files changed, 36 insertions, 38 deletions
diff --git a/models/hpm.py b/models/hpm.py
index f387154..1773f56 100644
--- a/models/hpm.py
+++ b/models/hpm.py
@@ -1,5 +1,3 @@
-from typing import Tuple
-
import torch
import torch.nn as nn
from torchvision.models import resnet50
@@ -10,7 +8,7 @@ from models.layers import HorizontalPyramidPooling
class HorizontalPyramidMatching(nn.Module):
def __init__(
self,
- scales: Tuple[int] = (1, 2, 4, 8),
+ scales: tuple[int, ...] = (1, 2, 4, 8),
out_channels: int = 256,
use_avg_pool: bool = False,
**kwargs
diff --git a/models/layers.py b/models/layers.py
index 9b17205..cba6e47 100644
--- a/models/layers.py
+++ b/models/layers.py
@@ -1,8 +1,8 @@
-from typing import Union, Tuple
+from typing import Union
import torch
-import torch.nn.functional as F
import torch.nn as nn
+import torch.nn.functional as F
class BasicConv2d(nn.Module):
@@ -10,7 +10,7 @@ class BasicConv2d(nn.Module):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, Tuple[int, int]],
+ kernel_size: Union[int, tuple[int, int]],
**kwargs
):
super().__init__()
@@ -29,7 +29,7 @@ class VGGConv2d(BasicConv2d):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, Tuple[int, int]] = 3,
+ kernel_size: Union[int, tuple[int, int]] = 3,
padding: int = 1,
**kwargs
):
@@ -47,7 +47,7 @@ class BasicConvTranspose2d(nn.Module):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, Tuple[int, int]],
+ kernel_size: Union[int, tuple[int, int]],
**kwargs
):
super().__init__()
@@ -66,7 +66,7 @@ class DCGANConvTranspose2d(BasicConvTranspose2d):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, Tuple[int, int]] = 4,
+ kernel_size: Union[int, tuple[int, int]] = 4,
stride: int = 2,
padding: int = 1,
is_last_layer: bool = False,
@@ -88,7 +88,7 @@ class FocalConv2d(BasicConv2d):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, Tuple[int, int]],
+ kernel_size: Union[int, tuple[int, int]],
halving: int,
**kwargs
):
@@ -108,7 +108,7 @@ class BasicConv1d(nn.Module):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, Tuple[int]],
+ kernel_size: Union[int, tuple[int]],
**kwargs
):
super(BasicConv1d, self).__init__()
@@ -124,7 +124,7 @@ class HorizontalPyramidPooling(BasicConv2d):
self,
in_channels: int,
out_channels: int,
- kernel_size: Union[int, Tuple[int, int]] = 1,
+ kernel_size: Union[int, tuple[int, int]] = 1,
use_avg_pool: bool = False,
**kwargs
):
diff --git a/models/model.py b/models/model.py
index 369d6c2..cb0e756 100644
--- a/models/model.py
+++ b/models/model.py
@@ -1,4 +1,4 @@
-from typing import List, Dict, Union, Tuple
+from typing import Union
import torch
from torch.utils.data.dataloader import default_collate
@@ -7,15 +7,15 @@ from torch.utils.data.dataloader import default_collate
class Model:
def __init__(
self,
- batch_size: Tuple[int, int]
+ batch_size: tuple[int, int]
):
(self.pr, self.k) = batch_size
def _batch_splitter(
self,
- batch: List[Dict[str, Union[str, torch.Tensor]]]
- ) -> List[Tuple[Dict[str, List[Union[str, torch.Tensor]]],
- Dict[str, List[Union[str, torch.Tensor]]]]]:
+ batch: list[dict[str, Union[str, torch.Tensor]]]
+ ) -> list[tuple[dict[str, list[Union[str, torch.Tensor]]],
+ dict[str, list[Union[str, torch.Tensor]]]]]:
"""
Disentanglement cannot be processed on different subjects at the
same time, we need to load `pr` subjects one by one. The batch
diff --git a/utils/configuration.py b/utils/configuration.py
index 32b9bec..84bd064 100644
--- a/utils/configuration.py
+++ b/utils/configuration.py
@@ -1,4 +1,4 @@
-from typing import TypedDict, Tuple
+from typing import TypedDict
import torch
@@ -16,12 +16,12 @@ class DatasetConfiguration(TypedDict):
num_sampled_frames: int
discard_threshold: int
num_input_channels: int
- frame_size: Tuple[int, int]
+ frame_size: tuple[int, int]
cache_on: bool
class DataloaderConfiguration(TypedDict):
- batch_size: Tuple[int, int]
+ batch_size: tuple[int, int]
num_workers: int
pin_memory: bool
@@ -29,7 +29,7 @@ class DataloaderConfiguration(TypedDict):
class HyperparameterConfiguration(TypedDict):
hidden_dim: int
lr: int
- betas: Tuple[float, float]
+ betas: tuple[float, float]
hard_or_all: str
margin: float
diff --git a/utils/dataset.py b/utils/dataset.py
index ecdd2d9..9f9229a 100644
--- a/utils/dataset.py
+++ b/utils/dataset.py
@@ -1,18 +1,18 @@
import os
import random
import re
-from typing import Optional, Dict, NewType, Union, List, Set, Tuple
+from typing import Optional, NewType, Union
import numpy as np
import torch
+import torchvision.transforms as transforms
from PIL import Image
from torch.utils import data
-import torchvision.transforms as transforms
from tqdm import tqdm
-ClipLabels = NewType('ClipLabels', Set[str])
-ClipConditions = NewType('ClipConditions', Set[str])
-ClipViews = NewType('ClipViews', Set[str])
+ClipLabels = NewType('ClipLabels', set[str])
+ClipConditions = NewType('ClipConditions', set[str])
+ClipViews = NewType('ClipViews', set[str])
class CASIAB(data.Dataset):
@@ -25,11 +25,11 @@ class CASIAB(data.Dataset):
train_size: int = 74,
num_sampled_frames: int = 30,
discard_threshold: int = 15,
- selector: Optional[Dict[
+ selector: Optional[dict[
str, Union[ClipLabels, ClipConditions, ClipLabels]
]] = None,
num_input_channels: int = 3,
- frame_size: Tuple[int, int] = (64, 32),
+ frame_size: tuple[int, int] = (64, 32),
cache_on: bool = False
):
"""
@@ -77,15 +77,15 @@ class CASIAB(data.Dataset):
self.conditions: np.ndarray[np.str_]
self.views: np.ndarray[np.str_]
# Video clip directory names
- self._clip_names: List[str] = []
+ self._clip_names: list[str] = []
# Labels, conditions and views in dataset,
# set of three attributes above
- self.metadata = Dict[str, Set[str]]
+ self.metadata = dict[str, set[str]]
# Dictionaries for indexing frames and frame names by clip name
# and chip path when cache is on
- self._cached_clips_frame_names: Optional[Dict[str, List[str]]] = None
- self._cached_clips: Optional[Dict[str, torch.Tensor]] = None
+ self._cached_clips_frame_names: Optional[dict[str, list[str]]] = None
+ self._cached_clips: Optional[dict[str, torch.Tensor]] = None
clip_names = sorted(os.listdir(self.root_dir))
@@ -172,7 +172,7 @@ class CASIAB(data.Dataset):
def __len__(self) -> int:
return len(self.labels)
- def __getitem__(self, index: int) -> Dict[str, Union[str, torch.Tensor]]:
+ def __getitem__(self, index: int) -> dict[str, Union[str, torch.Tensor]]:
label = self.labels[index]
condition = self.conditions[index]
view = self.views[index]
@@ -217,8 +217,8 @@ class CASIAB(data.Dataset):
def _load_cached_video(
self,
clip: torch.Tensor,
- frame_names: List[str],
- sampled_frame_names: List[str]
+ frame_names: list[str],
+ sampled_frame_names: list[str]
) -> torch.Tensor:
# Mask the original clip when it is long enough
if len(frame_names) >= self.num_sampled_frames:
@@ -248,7 +248,7 @@ class CASIAB(data.Dataset):
return clip
def _sample_frames(self, clip_path: str,
- is_caching: bool = False) -> List[str]:
+ is_caching: bool = False) -> list[str]:
if self.cache_on:
if is_caching:
# Sort frame in advance for loading convenience
diff --git a/utils/sampler.py b/utils/sampler.py
index 8dec846..0a177d1 100644
--- a/utils/sampler.py
+++ b/utils/sampler.py
@@ -1,5 +1,5 @@
import random
-from typing import Iterator, Tuple
+from collections.abc import Iterator
import numpy as np
from torch.utils import data
@@ -11,7 +11,7 @@ class TripletSampler(data.Sampler):
def __init__(
self,
data_source: CASIAB,
- batch_size: Tuple[int, int]
+ batch_size: tuple[int, int]
):
super().__init__(data_source)
self.metadata_labels = data_source.metadata['labels']