summaryrefslogtreecommitdiff
path: root/models/model.py
diff options
context:
space:
mode:
authorJordan Gong <jordan.gong@protonmail.com>2021-02-14 20:36:17 +0800
committerJordan Gong <jordan.gong@protonmail.com>2021-02-14 20:36:17 +0800
commitbe508061aeb3049a547c4e0c92d21c254689c1d5 (patch)
tree3aea2a7c8e9d8090ea4ca8045b780ceb3647d2d7 /models/model.py
parent929c48093c9f49a515420eb28d2678e48756b300 (diff)
Memory usage improvement
This update separates input data to two batches, which reduces ~30% memory usage.
Diffstat (limited to 'models/model.py')
-rw-r--r--models/model.py21
1 files changed, 16 insertions, 5 deletions
diff --git a/models/model.py b/models/model.py
index f79b832..bd05115 100644
--- a/models/model.py
+++ b/models/model.py
@@ -182,7 +182,7 @@ class Model:
# Training start
start_time = datetime.now()
running_loss = torch.zeros(5, device=self.device)
- print(f"{'Time':^8} {'Iter':^5} {'Loss':^6}",
+ print(f"{'Time':^8} {'Iter':^5} {'Loss':^5}",
f"{'Xrecon':^8} {'CanoCons':^8} {'PoseSim':^8}",
f"{'BATripH':^8} {'BATripP':^8} {'LRs':^19}")
for (batch_c1, batch_c2) in dataloader:
@@ -190,12 +190,21 @@ class Model:
# Zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
+ # Feed data twice in order to reduce memory usage
x_c1 = batch_c1['clip'].to(self.device)
- x_c2 = batch_c2['clip'].to(self.device)
y = batch_c1['label'].to(self.device)
# Duplicate labels for each part
y = y.unsqueeze(1).repeat(1, self.rgb_pn.num_total_parts)
- losses, images = self.rgb_pn(x_c1, x_c2, y)
+ # Feed condition 1 clips first
+ losses, images = self.rgb_pn(x_c1, y)
+ (xrecon_loss, hpm_ba_trip, pn_ba_trip) = losses
+ x_c2 = batch_c2['clip'].to(self.device)
+ # Then feed condition 2 clips
+ cano_cons_loss, pose_sim_loss = self.rgb_pn(x_c2, is_c1=False)
+ losses = torch.stack((
+ xrecon_loss, cano_cons_loss, pose_sim_loss,
+ hpm_ba_trip, pn_ba_trip
+ ))
loss = losses.sum()
loss.backward()
self.optimizer.step()
@@ -225,7 +234,9 @@ class Model:
self.writer.add_images(
'Canonical image', i_c, self.curr_iter
)
- for (i, (o, a, p)) in enumerate(zip(x_c1, i_a, i_p)):
+ for (i, (o, a, p)) in enumerate(zip(
+ batch_c1['clip'], i_a, i_p
+ )):
self.writer.add_images(
f'Original image/batch {i}', o, self.curr_iter
)
@@ -239,7 +250,7 @@ class Model:
remaining_minute, second = divmod(time_used.seconds, 60)
hour, minute = divmod(remaining_minute, 60)
print(f'{hour:02}:{minute:02}:{second:02}',
- f'{self.curr_iter:5d} {running_loss.sum() / 100:6.3f}',
+ f'{self.curr_iter:5d} {running_loss.sum() / 100:5.3f}',
'{:f} {:f} {:f} {:f} {:f}'.format(*running_loss / 100),
'{:.3e} {:.3e}'.format(lrs[0], lrs[1]))
running_loss.zero_()