summaryrefslogtreecommitdiff
path: root/models
diff options
context:
space:
mode:
authorJordan Gong <jordan.gong@protonmail.com>2021-01-14 17:14:51 +0800
committerJordan Gong <jordan.gong@protonmail.com>2021-01-14 17:14:51 +0800
commit89472cfae2d7c9af923b6029e4c811c76e1e6482 (patch)
treef8533903a6bff81637cf35c8d37b2c52d165e65d /models
parent07f22376fae0bb38ddc95413366d57a97a01f17d (diff)
parent8572f5c8292e5798912ad54764c9d3a99afb49ec (diff)
Merge branch 'python3.8' into python3.7
Diffstat (limited to 'models')
-rw-r--r--models/model.py11
1 files changed, 2 insertions, 9 deletions
diff --git a/models/model.py b/models/model.py
index 258fd71..aa45d66 100644
--- a/models/model.py
+++ b/models/model.py
@@ -134,7 +134,7 @@ class Model:
sched_hp = self.hp.get('scheduler', {})
self.rgb_pn = RGBPartNet(self.train_size, self.in_channels, **model_hp)
# Try to accelerate computation using CUDA or others
- self.rgb_pn = self._accelerate(self.rgb_pn)
+ self.rgb_pn = self.rgb_pn.to(self.device)
self.optimizer = optim.Adam(self.rgb_pn.parameters(), **optim_hp)
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, **sched_hp)
self.writer = SummaryWriter(self._log_name)
@@ -193,13 +193,6 @@ class Model:
self.writer.close()
break
- def _accelerate(self, model: nn.Module) -> nn.Module:
- if not self.disable_acc:
- if torch.cuda.device_count() > 1:
- model = nn.DataParallel(model)
- model = model.to(self.device)
- return model
-
def predict_all(
self,
iter_: int,
@@ -222,7 +215,7 @@ class Model:
model_hp = self.hp.get('model', {})
self.rgb_pn = RGBPartNet(ae_in_channels=self.in_channels, **model_hp)
# Try to accelerate computation using CUDA or others
- self.rgb_pn = self._accelerate(self.rgb_pn)
+ self.rgb_pn = self.rgb_pn.to(self.device)
self.rgb_pn.eval()
gallery_samples, probe_samples = [], {}