From 6ffc1c06f66277d37877fc13fb1ffa585598d6d7 Mon Sep 17 00:00:00 2001
From: Jordan Gong <jordan.gong@protonmail.com>
Date: Thu, 14 Jan 2021 23:43:29 +0800
Subject: Enable optimizer fine tuning

---
 models/model.py | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)

(limited to 'models')

diff --git a/models/model.py b/models/model.py
index 740cdf3..8797636 100644
--- a/models/model.py
+++ b/models/model.py
@@ -133,12 +133,21 @@ class Model:
         dataloader = self._parse_dataloader_config(dataset, dataloader_config)
         # Prepare for model, optimizer and scheduler
         model_hp = self.hp.get('model', {})
-        optim_hp = self.hp.get('optimizer', {})
+        optim_hp: dict = self.hp.get('optimizer', {}).copy()
+        ae_optim_hp = optim_hp.pop('auto_encoder', {})
+        pn_optim_hp = optim_hp.pop('part_net', {})
+        hpm_optim_hp = optim_hp.pop('hpm', {})
+        fc_optim_hp = optim_hp.pop('fc', {})
         sched_hp = self.hp.get('scheduler', {})
         self.rgb_pn = RGBPartNet(self.train_size, self.in_channels, **model_hp)
         # Try to accelerate computation using CUDA or others
         self.rgb_pn = self.rgb_pn.to(self.device)
-        self.optimizer = optim.Adam(self.rgb_pn.parameters(), **optim_hp)
+        self.optimizer = optim.Adam([
+            {'params': self.rgb_pn.ae.parameters(), **ae_optim_hp},
+            {'params': self.rgb_pn.pn.parameters(), **pn_optim_hp},
+            {'params': self.rgb_pn.hpm.parameters(), **hpm_optim_hp},
+            {'params': self.rgb_pn.fc_mat, **fc_optim_hp},
+        ], **optim_hp)
         self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, **sched_hp)
         self.writer = SummaryWriter(self._log_name)
 
-- 
cgit v1.2.3


From 59ccfd7718babe94fac549fcfbfa22bb311f0bd8 Mon Sep 17 00:00:00 2001
From: Jordan Gong <jordan.gong@protonmail.com>
Date: Thu, 21 Jan 2021 21:50:28 +0800
Subject: Bug fixes

1. Turn off autograd while decoding canonical and pose features
2. Change default batch size to (4, 8)
---
 models/auto_encoder.py | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

(limited to 'models')

diff --git a/models/auto_encoder.py b/models/auto_encoder.py
index 5e7558b..36be868 100644
--- a/models/auto_encoder.py
+++ b/models/auto_encoder.py
@@ -132,15 +132,16 @@ class AutoEncoder(nn.Module):
         # x_c1_t2 is the frame for later module
         (f_a_c1_t2, f_c_c1_t2, f_p_c1_t2) = self.encoder(x_c1_t2)
 
-        # Decode canonical features for HPM
-        x_c_c1_t2 = self.decoder(
-            torch.zeros_like(f_a_c1_t2), f_c_c1_t2, torch.zeros_like(f_p_c1_t2),
-            no_trans_conv=True
-        )
-        # Decode pose features for Part Net
-        x_p_c1_t2 = self.decoder(
-            torch.zeros_like(f_a_c1_t2), torch.zeros_like(f_c_c1_t2), f_p_c1_t2
-        )
+        with torch.no_grad():
+            # Decode canonical features for HPM
+            x_c_c1_t2 = self.decoder(
+                torch.zeros_like(f_a_c1_t2), f_c_c1_t2, torch.zeros_like(f_p_c1_t2),
+                no_trans_conv=True
+            )
+            # Decode pose features for Part Net
+            x_p_c1_t2 = self.decoder(
+                torch.zeros_like(f_a_c1_t2), torch.zeros_like(f_c_c1_t2), f_p_c1_t2
+            )
 
         if self.training:
             # t1 is random time step, c2 is another condition
-- 
cgit v1.2.3


From 04c9d3210ff659bbe00dedb2d193a748e7a97b54 Mon Sep 17 00:00:00 2001
From: Jordan Gong <jordan.gong@protonmail.com>
Date: Thu, 21 Jan 2021 23:32:53 +0800
Subject: Print average losses after 100 iters

---
 models/model.py        | 20 +++++++++++++-------
 models/rgb_part_net.py |  5 ++---
 2 files changed, 15 insertions(+), 10 deletions(-)

(limited to 'models')

diff --git a/models/model.py b/models/model.py
index 8797636..3b54363 100644
--- a/models/model.py
+++ b/models/model.py
@@ -164,6 +164,9 @@ class Model:
 
         # Training start
         start_time = datetime.now()
+        running_loss = torch.zeros(4).to(self.device)
+        print(f"{'Iter':^5} {'Loss':^6} {'Xrecon':^8} {'PoseSim':^8}",
+              f"{'CanoCons':^8} {'BATrip':^8} {'LR':^9}")
         for (batch_c1, batch_c2) in dataloader:
             self.curr_iter += 1
             # Zero the parameter gradients
@@ -172,24 +175,27 @@ class Model:
             x_c1 = batch_c1['clip'].to(self.device)
             x_c2 = batch_c2['clip'].to(self.device)
             y = batch_c1['label'].to(self.device)
-            loss, metrics = self.rgb_pn(x_c1, x_c2, y)
+            losses = self.rgb_pn(x_c1, x_c2, y)
+            loss = losses.sum()
             loss.backward()
             self.optimizer.step()
             # Step scheduler
             self.scheduler.step()
 
+            # Statistics and checkpoint
+            running_loss += losses.detach()
             # Write losses to TensorBoard
-            self.writer.add_scalar('Loss/all', loss.item(), self.curr_iter)
+            self.writer.add_scalar('Loss/all', loss, self.curr_iter)
             self.writer.add_scalars('Loss/details', dict(zip([
                 'Cross reconstruction loss', 'Pose similarity loss',
                 'Canonical consistency loss', 'Batch All triplet loss'
-            ], metrics)), self.curr_iter)
+            ], losses)), self.curr_iter)
 
             if self.curr_iter % 100 == 0:
-                print('{0:5d} loss: {1:6.3f}'.format(self.curr_iter, loss),
-                      '(xrecon = {:f}, pose_sim = {:f},'
-                      ' cano_cons = {:f}, ba_trip = {:f})'.format(*metrics),
-                      'lr:', self.scheduler.get_last_lr()[0])
+                print(f'{self.curr_iter:5d} {running_loss.sum() / 100:6.3f}',
+                      '{:f} {:f} {:f} {:f}'.format(*running_loss / 100),
+                      f'{self.scheduler.get_last_lr()[0]:.3e}')
+                running_loss.zero_()
 
             if self.curr_iter % 1000 == 0:
                 torch.save({
diff --git a/models/rgb_part_net.py b/models/rgb_part_net.py
index f39b40b..e707c26 100644
--- a/models/rgb_part_net.py
+++ b/models/rgb_part_net.py
@@ -80,9 +80,8 @@ class RGBPartNet(nn.Module):
 
         if self.training:
             batch_all_triplet_loss = self.ba_triplet_loss(x, y)
-            losses = (*losses, batch_all_triplet_loss)
-            loss = torch.sum(torch.stack(losses))
-            return loss, [loss.item() for loss in losses]
+            losses = torch.stack((*losses, batch_all_triplet_loss))
+            return losses
         else:
             return x.unsqueeze(1).view(-1)
 
-- 
cgit v1.2.3


From d750dd9dafe3cda3b1331ad2bfecb53c8c2b1267 Mon Sep 17 00:00:00 2001
From: Jordan Gong <jordan.gong@protonmail.com>
Date: Thu, 21 Jan 2021 23:47:11 +0800
Subject: A type hint fix

---
 models/model.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'models')

diff --git a/models/model.py b/models/model.py
index bed28a5..f4604c8 100644
--- a/models/model.py
+++ b/models/model.py
@@ -133,7 +133,7 @@ class Model:
         dataloader = self._parse_dataloader_config(dataset, dataloader_config)
         # Prepare for model, optimizer and scheduler
         model_hp = self.hp.get('model', {})
-        optim_hp: dict = self.hp.get('optimizer', {}).copy()
+        optim_hp: Dict = self.hp.get('optimizer', {}).copy()
         ae_optim_hp = optim_hp.pop('auto_encoder', {})
         pn_optim_hp = optim_hp.pop('part_net', {})
         hpm_optim_hp = optim_hp.pop('hpm', {})
-- 
cgit v1.2.3