summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJordan Gong <jordan.gong@protonmail.com>2021-01-23 16:04:35 +0800
committerJordan Gong <jordan.gong@protonmail.com>2021-01-23 16:04:35 +0800
commit59ccf61fed4d95b7fe91bb9552f0deb2f2c75b76 (patch)
treee02025da7eff50e2f4d3212f98f41842c8b2b115
parent8f5bef7f3d10ba0994ce51d9f84100c26218d6ee (diff)
Add late start support for non-disentangling parts
-rw-r--r--config.py2
-rw-r--r--models/model.py24
-rw-r--r--utils/configuration.py1
3 files changed, 20 insertions, 7 deletions
diff --git a/config.py b/config.py
index 991a3a6..641e8fb 100644
--- a/config.py
+++ b/config.py
@@ -70,6 +70,8 @@ config: Configuration = {
},
'optimizer': {
# Global parameters
+ # Iteration start to optimize non-disentangling parts
+ # 'start_iter': 10,
# Initial learning rate of Adam Optimizer
'lr': 1e-4,
# Coefficients used for computing running averages of
diff --git a/models/model.py b/models/model.py
index 6b799ad..cccb6c4 100644
--- a/models/model.py
+++ b/models/model.py
@@ -141,6 +141,7 @@ class Model:
# Prepare for model, optimizer and scheduler
model_hp = self.hp.get('model', {})
optim_hp: dict = self.hp.get('optimizer', {}).copy()
+ start_iter = optim_hp.pop('start_iter', 0)
ae_optim_hp = optim_hp.pop('auto_encoder', {})
pn_optim_hp = optim_hp.pop('part_net', {})
hpm_optim_hp = optim_hp.pop('hpm', {})
@@ -151,9 +152,6 @@ class Model:
self.rgb_pn = self.rgb_pn.to(self.device)
self.optimizer = optim.Adam([
{'params': self.rgb_pn.ae.parameters(), **ae_optim_hp},
- {'params': self.rgb_pn.pn.parameters(), **pn_optim_hp},
- {'params': self.rgb_pn.hpm.parameters(), **hpm_optim_hp},
- {'params': self.rgb_pn.fc_mat, **fc_optim_hp},
], **optim_hp)
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, **sched_hp)
self.writer = SummaryWriter(self._log_name)
@@ -173,8 +171,18 @@ class Model:
start_time = datetime.now()
running_loss = torch.zeros(4).to(self.device)
print(f"{'Iter':^5} {'Loss':^6} {'Xrecon':^8} {'PoseSim':^8}",
- f"{'CanoCons':^8} {'BATrip':^8} {'LR':^9}")
+ f"{'CanoCons':^8} {'BATrip':^8} LR(s)")
for (batch_c1, batch_c2) in dataloader:
+ if self.curr_iter == start_iter:
+ self.optimizer.add_param_group(
+ {'params': self.rgb_pn.pn.parameters(), **pn_optim_hp}
+ )
+ self.optimizer.add_param_group(
+ {'params': self.rgb_pn.hpm.parameters(), **hpm_optim_hp}
+ )
+ self.optimizer.add_param_group(
+ {'params': self.rgb_pn.fc_mat, **fc_optim_hp}
+ )
self.curr_iter += 1
# Zero the parameter gradients
self.optimizer.zero_grad()
@@ -186,8 +194,6 @@ class Model:
loss = losses.sum()
loss.backward()
self.optimizer.step()
- # Step scheduler
- self.scheduler.step()
# Statistics and checkpoint
running_loss += losses.detach()
@@ -199,11 +205,15 @@ class Model:
], losses)), self.curr_iter)
if self.curr_iter % 100 == 0:
+ lrs = self.scheduler.get_last_lr()
print(f'{self.curr_iter:5d} {running_loss.sum() / 100:6.3f}',
'{:f} {:f} {:f} {:f}'.format(*running_loss / 100),
- f'{self.scheduler.get_last_lr()[0]:.3e}')
+ ' '.join(('{:.3e}'.format(lr) for lr in lrs)))
running_loss.zero_()
+ # Step scheduler
+ self.scheduler.step()
+
if self.curr_iter % 1000 == 0:
torch.save({
'iter': self.curr_iter,
diff --git a/utils/configuration.py b/utils/configuration.py
index 8b265e8..c4c4b4d 100644
--- a/utils/configuration.py
+++ b/utils/configuration.py
@@ -52,6 +52,7 @@ class SubOptimizerHPConfiguration(TypedDict):
class OptimizerHPConfiguration(TypedDict):
+ start_iter: int
lr: int
betas: tuple[float, float]
eps: float