diff options
author | Jordan Gong <jordan.gong@protonmail.com> | 2021-02-15 12:03:07 +0800 |
---|---|---|
committer | Jordan Gong <jordan.gong@protonmail.com> | 2021-02-15 14:06:59 +0800 |
commit | 6fb1c7cb34a65769c018a08324387af419355b32 (patch) | |
tree | c940384179b7f492592fb11f066f0c816ee57ce6 /models/model.py | |
parent | d51312415a32686793d3f0d14eda7fa7cc3990ea (diff) |
Add DataParallel support on new codebase
Diffstat (limited to 'models/model.py')
-rw-r--r-- | models/model.py | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/models/model.py b/models/model.py index f79b832..eb12285 100644 --- a/models/model.py +++ b/models/model.py @@ -150,12 +150,13 @@ class Model: self.rgb_pn = RGBPartNet(self.in_channels, **model_hp, image_log_on=self.image_log_on) # Try to accelerate computation using CUDA or others + self.rgb_pn = nn.DataParallel(self.rgb_pn) self.rgb_pn = self.rgb_pn.to(self.device) self.optimizer = optim.Adam([ - {'params': self.rgb_pn.ae.parameters(), **ae_optim_hp}, - {'params': self.rgb_pn.pn.parameters(), **pn_optim_hp}, - {'params': self.rgb_pn.hpm.parameters(), **hpm_optim_hp}, - {'params': self.rgb_pn.fc_mat, **fc_optim_hp} + {'params': self.rgb_pn.module.ae.parameters(), **ae_optim_hp}, + {'params': self.rgb_pn.module.pn.parameters(), **pn_optim_hp}, + {'params': self.rgb_pn.module.hpm.parameters(), **hpm_optim_hp}, + {'params': self.rgb_pn.module.fc_mat, **fc_optim_hp} ], **optim_hp) sched_gamma = sched_hp.get('gamma', 0.9) sched_step_size = sched_hp.get('step_size', 500) @@ -194,8 +195,14 @@ class Model: x_c2 = batch_c2['clip'].to(self.device) y = batch_c1['label'].to(self.device) # Duplicate labels for each part - y = y.unsqueeze(1).repeat(1, self.rgb_pn.num_total_parts) + y = y.unsqueeze(1).repeat(1, self.rgb_pn.module.num_total_parts) losses, images = self.rgb_pn(x_c1, x_c2, y) + losses = torch.stack(( + # xrecon cano_cons pose_sim + losses[0].sum(), losses[1].mean(), losses[2].mean(), + # hpm_ba_trip pn_ba_trip + losses[3].mean(), losses[4].mean() + )) loss = losses.sum() loss.backward() self.optimizer.step() |