summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--config.py14
-rw-r--r--models/auto_encoder.py2
-rw-r--r--models/model.py61
3 files changed, 46 insertions, 31 deletions
diff --git a/config.py b/config.py
index b643c75..9893b97 100644
--- a/config.py
+++ b/config.py
@@ -8,6 +8,8 @@ config = {
'save_dir': 'runs',
# Recorde disentangled image or not
'image_log_on': False,
+ # Image log per n steps
+ 'image_log_steps': 100,
# The number of subjects for validating (Part of testing set)
'val_size': 20,
},
@@ -37,7 +39,7 @@ config = {
# Batch size (pr, k)
# `pr` denotes number of persons
# `k` denotes number of sequences per person
- 'batch_size': (4, 6),
+ 'batch_size': (4, 5),
# Number of workers of Dataloader
'num_workers': 4,
# Faster data transfer from RAM to GPU if enabled
@@ -84,18 +86,22 @@ config = {
# Local parameters (override global ones)
# 'auto_encoder': {
- # 'weight_decay': 0.001
+ # 'lr': 1e-3
# },
},
'scheduler': {
# Step start to decay
'start_step': 500,
+ # Step stop decaying
+ # 'stop_step': 30_000,
# Multiplicative factor of decay in the end
'final_gamma': 0.01,
# Local parameters (override global ones)
- # 'hpm': {
- # 'final_gamma': 0.001
+ # 'auto_encoder': {
+ # 'start_step': 0,
+ # 'stop_step': 500,
+ # 'final_gamma': 0.5
# }
}
},
diff --git a/models/auto_encoder.py b/models/auto_encoder.py
index 1028767..d11ec99 100644
--- a/models/auto_encoder.py
+++ b/models/auto_encoder.py
@@ -154,7 +154,7 @@ class AutoEncoder(nn.Module):
return (
(f_a_c1_t2, f_c_c1_t2, f_p_c1_t2),
- (xrecon_loss / 10, cano_cons_loss, pose_sim_loss * 10)
+ (xrecon_loss, cano_cons_loss, pose_sim_loss * 10)
)
else: # evaluating
return f_a_c1_t2, f_c_c1_t2, f_p_c1_t2
diff --git a/models/model.py b/models/model.py
index 78a9c0f..896d8d3 100644
--- a/models/model.py
+++ b/models/model.py
@@ -76,6 +76,7 @@ class Model:
self.scheduler: Optional[optim.lr_scheduler.StepLR] = None
self.writer: Optional[SummaryWriter] = None
self.image_log_on = system_config.get('image_log_on', False)
+ self.image_log_steps = system_config.get('image_log_steps', 100)
self.val_size = system_config.get('val_size', 10)
self.CASIAB_GALLERY_SELECTOR = {
@@ -193,8 +194,8 @@ class Model:
triplet_is_hard, triplet_is_mean, None
)
- self.num_pairs = (self.pr*self.k-1) * (self.pr*self.k) // 2
- self.num_pos_pairs = (self.k*(self.k-1)//2) * self.pr
+ self.num_pairs = (self.pr * self.k - 1) * (self.pr * self.k) // 2
+ self.num_pos_pairs = (self.k * (self.k - 1) // 2) * self.pr
# Try to accelerate computation using CUDA or others
self.rgb_pn = self.rgb_pn.to(self.device)
@@ -208,24 +209,31 @@ class Model:
], **optim_hp)
# Scheduler
- start_step = sched_hp.get('start_step', 15_000)
+ start_step = sched_hp.get('start_step', 0)
+ stop_step = sched_hp.get('stop_step', self.total_iter)
final_gamma = sched_hp.get('final_gamma', 0.001)
ae_start_step = ae_sched_hp.get('start_step', start_step)
+ ae_stop_step = ae_sched_hp.get('stop_step', stop_step)
ae_final_gamma = ae_sched_hp.get('final_gamma', final_gamma)
- ae_all_step = self.total_iter - ae_start_step
+ ae_all_step = ae_stop_step - ae_start_step
hpm_start_step = hpm_sched_hp.get('start_step', start_step)
+ hpm_stop_step = hpm_sched_hp.get('stop_step', stop_step)
hpm_final_gamma = hpm_sched_hp.get('final_gamma', final_gamma)
- hpm_all_step = self.total_iter - hpm_start_step
+ hpm_all_step = hpm_stop_step - hpm_start_step
pn_start_step = pn_sched_hp.get('start_step', start_step)
+ pn_stop_step = pn_sched_hp.get('stop_step', stop_step)
pn_final_gamma = pn_sched_hp.get('final_gamma', final_gamma)
- pn_all_step = self.total_iter - pn_start_step
+ pn_all_step = pn_stop_step - pn_start_step
self.scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=[
- lambda t: ae_final_gamma ** ((t - ae_start_step) / ae_all_step)
- if t > ae_start_step else 1,
- lambda t: hpm_final_gamma ** ((t - hpm_start_step) / hpm_all_step)
- if t > hpm_start_step else 1,
- lambda t: pn_final_gamma ** ((t - pn_start_step) / pn_all_step)
- if t > pn_start_step else 1,
+ lambda t: 1 if t <= ae_start_step
+ else ae_final_gamma ** ((t - ae_start_step) / ae_all_step)
+ if ae_start_step < t <= ae_stop_step else ae_final_gamma,
+ lambda t: 1 if t <= hpm_start_step
+ else hpm_final_gamma ** ((t - hpm_start_step) / hpm_all_step)
+ if hpm_start_step < t <= hpm_stop_step else hpm_final_gamma,
+ lambda t: 1 if t <= pn_start_step
+ else pn_final_gamma ** ((t - pn_start_step) / pn_all_step)
+ if pn_start_step < t <= pn_stop_step else pn_final_gamma,
])
self.writer = SummaryWriter(self._log_name)
@@ -275,24 +283,25 @@ class Model:
'Train', embed_c, embed_p, hpm_result, pn_result, loss, losses
)
- if self.curr_iter % 100 == 99:
- # Write disentangled images
- if self.image_log_on:
- i_a, i_c, i_p = images
+ # Write disentangled images
+ if self.image_log_on and self.curr_iter % self.image_log_steps \
+ == self.image_log_steps - 1:
+ i_a, i_c, i_p = images
+ self.writer.add_images(
+ 'Appearance image', i_a, self.curr_iter
+ )
+ self.writer.add_images(
+ 'Canonical image', i_c, self.curr_iter
+ )
+ for i, (o, p) in enumerate(zip(x_c1, i_p)):
self.writer.add_images(
- 'Appearance image', i_a, self.curr_iter
+ f'Original image/batch {i}', o, self.curr_iter
)
self.writer.add_images(
- 'Canonical image', i_c, self.curr_iter
+ f'Pose image/batch {i}', p, self.curr_iter
)
- for i, (o, p) in enumerate(zip(x_c1, i_p)):
- self.writer.add_images(
- f'Original image/batch {i}', o, self.curr_iter
- )
- self.writer.add_images(
- f'Pose image/batch {i}', p, self.curr_iter
- )
+ if self.curr_iter % 100 == 99:
# Validation
embed_c = self._flatten_embedding(embed_c)
embed_p = self._flatten_embedding(embed_p)
@@ -349,7 +358,7 @@ class Model:
def _write_embedding(self, tag, embed, x, y):
frame = x[:, 0, :, :, :].cpu()
n, c, h, w = frame.size()
- padding = torch.zeros(n, c, h, (h-w) // 2)
+ padding = torch.zeros(n, c, h, (h - w) // 2)
padded_frame = torch.cat((padding, frame, padding), dim=-1)
self.writer.add_embedding(
embed,