From c74df416b00f837ba051f3947be92f76e7afbd88 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Fri, 12 Mar 2021 13:56:17 +0800 Subject: Code refactoring 1. Separate FCs and triplet losses for HPM and PartNet 2. Remove FC-equivalent 1x1 conv layers in HPM 3. Support adjustable learning rate schedulers --- models/part_net.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'models/part_net.py') diff --git a/models/part_net.py b/models/part_net.py index 29cf9cd..f2236bf 100644 --- a/models/part_net.py +++ b/models/part_net.py @@ -111,17 +111,21 @@ class PartNet(nn.Module): def __init__( self, in_channels: int = 128, + embedding_dims: int = 256, + num_parts: int = 16, squeeze_ratio: int = 4, - num_part: int = 16 ): super().__init__() - self.num_part = num_part - self.tfa = TemporalFeatureAggregator( - in_channels, squeeze_ratio, self.num_part - ) + self.num_part = num_parts self.avg_pool = nn.AdaptiveAvgPool2d(1) self.max_pool = nn.AdaptiveMaxPool2d(1) + self.tfa = TemporalFeatureAggregator( + in_channels, squeeze_ratio, self.num_part + ) + self.fc_mat = nn.Parameter( + torch.empty(num_parts, in_channels, embedding_dims) + ) def forward(self, x): n, t, c, h, w = x.size() @@ -138,4 +142,8 @@ class PartNet(nn.Module): # p, n, t, c x = self.tfa(x) + + # p, n, c + x = x @ self.fc_mat + # p, n, d return x -- cgit v1.2.3