summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJordan Gong <jordan.gong@protonmail.com>2021-03-04 13:29:24 +0800
committerJordan Gong <jordan.gong@protonmail.com>2021-03-04 13:29:24 +0800
commitecb8d8d750cd4a81494feb5dcb582641f73d67ff (patch)
tree77fe52feb9a823a142bb3877dfd28b70257b2d36
parent9b3f5e6cf5c3868b068e69f3a30e1d0afef3973d (diff)
parent8578a141969720ec93b9bc172c8f20d0ef66ed16 (diff)
Merge branch 'master' into data_parallel
-rw-r--r--models/model.py14
1 files changed, 9 insertions, 5 deletions
diff --git a/models/model.py b/models/model.py
index 1f8ae23..e8b16a9 100644
--- a/models/model.py
+++ b/models/model.py
@@ -1,4 +1,5 @@
import os
+import random
from datetime import datetime
from typing import Union, Optional
@@ -201,14 +202,17 @@ class Model:
self.writer = SummaryWriter(self._log_name)
+ # Set seeds for reproducibility
+ random.seed(0)
+ torch.manual_seed(0)
self.rgb_pn.train()
# Init weights at first iter
if self.curr_iter == 0:
self.rgb_pn.apply(self.init_weights)
else: # Load saved state dicts
checkpoint = torch.load(self._checkpoint_name)
- iter_, loss = checkpoint['iter'], checkpoint['loss']
- print('{0:5d} loss: {1:.3f}'.format(iter_, loss))
+ random.setstate(checkpoint['rand_states'][0])
+ torch.set_rng_state(checkpoint['rand_states'][1])
self.rgb_pn.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optim_state_dict'])
self.scheduler.load_state_dict(checkpoint['sched_state_dict'])
@@ -327,11 +331,10 @@ class Model:
if self.curr_iter % 1000 == 0:
torch.save({
- 'iter': self.curr_iter,
+ 'rand_states': (random.getstate(), torch.get_rng_state()),
'model_state_dict': self.rgb_pn.state_dict(),
'optim_state_dict': self.optimizer.state_dict(),
'sched_state_dict': self.scheduler.state_dict(),
- 'loss': loss,
}, self._checkpoint_name)
if self.curr_iter == self.total_iter:
@@ -428,7 +431,8 @@ class Model:
def _get_eval_sample(self, sample: dict[str, Union[list, torch.Tensor]]):
label = sample.pop('label').item()
clip = sample.pop('clip').to(self.device)
- feature = self.rgb_pn(clip).detach()
+ with torch.no_grad():
+ feature = self.rgb_pn(clip)
return {
**{'label': label},
**sample,