diff options
author | Jordan Gong <jordan.gong@protonmail.com> | 2021-01-12 14:23:44 +0800 |
---|---|---|
committer | Jordan Gong <jordan.gong@protonmail.com> | 2021-01-12 14:23:44 +0800 |
commit | 2b73aafe971f6014634eafbf130cc666531026bc (patch) | |
tree | 42f109a019316b074603107cac2d1011bc43e705 /models/model.py | |
parent | d7471a613fe1c8c4d3826648de705a65403ed6c2 (diff) | |
parent | 36c65d17e29bcdbc27089d3b3eba177c8da549e8 (diff) |
Merge branch 'python3.8' into python3.7
# Conflicts:
# utils/configuration.py
Diffstat (limited to 'models/model.py')
-rw-r--r-- | models/model.py | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/models/model.py b/models/model.py index 7e4edaf..617966f 100644 --- a/models/model.py +++ b/models/model.py @@ -108,11 +108,12 @@ class Model: dataset = self._parse_dataset_config(dataset_config) dataloader = self._parse_dataloader_config(dataset, dataloader_config) # Prepare for model, optimizer and scheduler - hp = self.hp.copy() - lr, betas = hp.pop('lr', 1e-4), hp.pop('betas', (0.9, 0.999)) - self.rgb_pn = RGBPartNet(self.train_size, self.in_channels, **hp) - self.optimizer = optim.Adam(self.rgb_pn.parameters(), lr, betas) - self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, 500, 0.9) + model_hp = self.hp.get('model', {}) + optim_hp = self.hp.get('optimizer', {}) + sched_hp = self.hp.get('scheduler', {}) + self.rgb_pn = RGBPartNet(self.train_size, self.in_channels, **model_hp) + self.optimizer = optim.Adam(self.rgb_pn.parameters(), **optim_hp) + self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, **sched_hp) self.writer = SummaryWriter(self._log_name) # Try to accelerate computation using CUDA or others self._accelerate() @@ -197,9 +198,8 @@ class Model: iter_, dataset_config, dataset_selectors ) # Init models - hp = self.hp.copy() - hp.pop('lr'), hp.pop('betas') - self.rgb_pn = RGBPartNet(ae_in_channels=self.in_channels, **hp) + model_hp = self.hp.get('model', {}) + self.rgb_pn = RGBPartNet(ae_in_channels=self.in_channels, **model_hp) # Try to accelerate computation using CUDA or others self._accelerate() |