diff options
-rw-r--r-- | config.py | 6 | ||||
-rw-r--r-- | models/model.py | 13 |
2 files changed, 11 insertions, 8 deletions
@@ -5,7 +5,7 @@ config: Configuration = { # Disable accelerator 'disable_acc': False, # GPU(s) used in training or testing if available - 'CUDA_VISIBLE_DEVICES': '0', + 'CUDA_VISIBLE_DEVICES': '0,1', # Directory used in training or testing for temporary storage 'save_dir': 'runs', # Recorde disentangled image or not @@ -30,14 +30,14 @@ config: Configuration = { # Resolution after resize, can be divided 16 'frame_size': (64, 48), # Cache dataset or not - 'cache_on': False, + 'cache_on': True, }, # Dataloader settings 'dataloader': { # Batch size (pr, k) # `pr` denotes number of persons # `k` denotes number of sequences per person - 'batch_size': (4, 6), + 'batch_size': (6, 8), # Number of workers of Dataloader 'num_workers': 4, # Faster data transfer from RAM to GPU if enabled diff --git a/models/model.py b/models/model.py index 497a0ea..2eeaf5e 100644 --- a/models/model.py +++ b/models/model.py @@ -175,13 +175,15 @@ class Model: num_pos_pairs = (self.k*(self.k-1)//2) * self.pr # Try to accelerate computation using CUDA or others + self.rgb_pn = nn.DataParallel(self.rgb_pn) self.rgb_pn = self.rgb_pn.to(self.device) + self.triplet_loss = nn.DataParallel(self.triplet_loss) self.triplet_loss = self.triplet_loss.to(self.device) self.optimizer = optim.Adam([ - {'params': self.rgb_pn.ae.parameters(), **ae_optim_hp}, - {'params': self.rgb_pn.pn.parameters(), **pn_optim_hp}, - {'params': self.rgb_pn.hpm.parameters(), **hpm_optim_hp}, - {'params': self.rgb_pn.fc_mat, **fc_optim_hp} + {'params': self.rgb_pn.module.ae.parameters(), **ae_optim_hp}, + {'params': self.rgb_pn.module.pn.parameters(), **pn_optim_hp}, + {'params': self.rgb_pn.module.hpm.parameters(), **hpm_optim_hp}, + {'params': self.rgb_pn.module.fc_mat, **fc_optim_hp} ], **optim_hp) sched_final_gamma = sched_hp.get('final_gamma', 0.001) sched_start_step = sched_hp.get('start_step', 15_000) @@ -227,7 +229,7 @@ class Model: y = y.repeat(self.rgb_pn.num_total_parts, 1) trip_loss, dist, num_non_zero = self.triplet_loss(embedding, y) losses = torch.cat(( - ae_losses, + ae_losses.mean(0), torch.stack(( trip_loss[:self.rgb_pn.hpm_num_parts].mean(), trip_loss[self.rgb_pn.hpm_num_parts:].mean() @@ -389,6 +391,7 @@ class Model: model_hp.pop('triplet_margins', None) self.rgb_pn = RGBPartNet(self.in_channels, self.in_size, **model_hp) # Try to accelerate computation using CUDA or others + self.rgb_pn = nn.DataParallel(self.rgb_pn) self.rgb_pn = self.rgb_pn.to(self.device) self.rgb_pn.eval() |