From ebb2f93ac01f40d00968daaf9a2ad96c24ce7ab3 Mon Sep 17 00:00:00 2001 From: Jordan Gong Date: Mon, 8 Aug 2022 19:32:51 +0800 Subject: Optimize batching --- libs/criteria.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'libs/criteria.py') diff --git a/libs/criteria.py b/libs/criteria.py index 6954cf3..baa36ce 100644 --- a/libs/criteria.py +++ b/libs/criteria.py @@ -8,17 +8,17 @@ class InfoNCELoss(nn.Module): super().__init__() self.temp = temp - def forward(self, feat1: Tensor, feat2: Tensor) -> tuple[Tensor, Tensor]: - bz = feat1.size(0) - feat1_norm = F.normalize(feat1) - feat2_norm = F.normalize(feat2) + def forward(self, feature: Tensor) -> tuple[Tensor, Tensor]: + bz = feature.size(0) // 2 + feat_norm = F.normalize(feature) + feat1_norm, feat2_norm = feat_norm.split(bz) logits = feat1_norm @ feat2_norm.T pos_logits_mask = torch.eye(bz, dtype=torch.bool) pos_logits = logits[pos_logits_mask].unsqueeze(-1) neg_logits = logits[~pos_logits_mask].view(bz, -1) # Put the positive at first (0-th) and maximize its likelihood logits = torch.cat([pos_logits, neg_logits], dim=1) - labels = torch.zeros(bz, dtype=torch.long, device=feat1.device) + labels = torch.zeros(bz, dtype=torch.long, device=feature.device) loss_contra = F.cross_entropy(logits / self.temp, labels) acc_contra = (logits.argmax(dim=1) == labels).float().mean() -- cgit v1.2.3