diff options
Diffstat (limited to 'models/rgb_part_net.py')
-rw-r--r-- | models/rgb_part_net.py | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/models/rgb_part_net.py b/models/rgb_part_net.py index 67acac3..80b3e17 100644 --- a/models/rgb_part_net.py +++ b/models/rgb_part_net.py @@ -1,3 +1,5 @@ +from typing import Tuple + import torch import torch.nn as nn @@ -13,19 +15,19 @@ class RGBPartNet(nn.Module): ae_in_channels: int = 3, ae_in_size: tuple[int, int] = (64, 48), ae_feature_channels: int = 64, - f_a_c_p_dims: tuple[int, int, int] = (128, 128, 64), + f_a_c_p_dims: Tuple[int, int, int] = (128, 128, 64), hpm_use_1x1conv: bool = False, - hpm_scales: tuple[int, ...] = (1, 2, 4), + hpm_scales: Tuple[int, ...] = (1, 2, 4), hpm_use_avg_pool: bool = True, hpm_use_max_pool: bool = True, fpfe_feature_channels: int = 32, - fpfe_kernel_sizes: tuple[tuple, ...] = ((5, 3), (3, 3), (3, 3)), - fpfe_paddings: tuple[tuple, ...] = ((2, 1), (1, 1), (1, 1)), - fpfe_halving: tuple[int, ...] = (0, 2, 3), + fpfe_kernel_sizes: Tuple[Tuple, ...] = ((5, 3), (3, 3), (3, 3)), + fpfe_paddings: Tuple[Tuple, ...] = ((2, 1), (1, 1), (1, 1)), + fpfe_halving: Tuple[int, ...] = (0, 2, 3), tfa_squeeze_ratio: int = 4, tfa_num_parts: int = 16, embedding_dims: int = 256, - triplet_margins: tuple[float, float] = (0.2, 0.2), + triplet_margins: Tuple[float, float] = (0.2, 0.2), image_log_on: bool = False ): super().__init__() @@ -84,7 +86,7 @@ class RGBPartNet(nn.Module): pn_ba_trip = self.pn_ba_trip( x[self.hpm_num_parts:], y[self.hpm_num_parts:] ) - losses = torch.stack((*losses, hpm_ba_trip, pn_ba_trip)) + losses = (*losses, hpm_ba_trip, pn_ba_trip) return losses, images else: return x.unsqueeze(1).view(-1) |