Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions src/pytorch_metric_learning/losses/generic_pair_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ def mat_based_loss(self, mat, indices_tuple):
pos_mask, neg_mask = torch.zeros_like(mat), torch.zeros_like(mat)
pos_mask[a1, p] = 1
neg_mask[a2, n] = 1
self._assert_either_pos_or_neg(pos_mask, neg_mask)
return self._compute_loss(mat, pos_mask, neg_mask)

def pair_based_loss(self, mat, indices_tuple):
Expand All @@ -38,3 +39,7 @@ def pair_based_loss(self, mat, indices_tuple):
if len(a2) > 0:
neg_pair = mat[a2, n]
return self._compute_loss(pos_pair, neg_pair, indices_tuple)

@staticmethod
def _assert_either_pos_or_neg(pos_mask, neg_mask):
assert not torch.any((pos_mask != 0) & (neg_mask != 0)), "Each pair should be either be positive or negative"
32 changes: 0 additions & 32 deletions tests/losses/test_cross_batch_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,6 @@ def test_loss(self):
batch_size = 32
for inner_loss in [ContrastiveLoss(), MultiSimilarityLoss()]:
inner_miner = MultiSimilarityMiner(0.3)
outer_miner = MultiSimilarityMiner(0.2)
self.loss = CrossBatchMemory(
loss=inner_loss,
embedding_size=self.embedding_size,
Expand Down Expand Up @@ -267,10 +266,6 @@ def test_loss(self):
labels = torch.randint(0, num_labels, (batch_size,)).to(TEST_DEVICE)
loss = self.loss(embeddings, labels)
loss_with_miner = self.loss_with_miner(embeddings, labels)
oa1, op, oa2, on = outer_miner(embeddings, labels)
loss_with_miner_and_input_indices = self.loss_with_miner2(
embeddings, labels, (oa1, op, oa2, on)
)
all_embeddings = torch.cat([all_embeddings, embeddings])
all_labels = torch.cat([all_labels, labels])

Expand Down Expand Up @@ -308,33 +303,6 @@ def test_loss(self):
torch.isclose(loss_with_miner, correct_loss_with_miner)
)

# loss with inner and outer miner
indices_tuple = inner_miner(
embeddings, labels, all_embeddings, all_labels
)
a1, p, a2, n = lmu.remove_self_comparisons(
indices_tuple,
self.loss_with_miner2.curr_batch_idx,
self.loss_with_miner2.memory_size,
)
a1 = torch.cat([oa1, a1])
p = torch.cat([op, p])
a2 = torch.cat([oa2, a2])
n = torch.cat([on, n])
correct_loss_with_miner_and_input_indice = inner_loss(
embeddings,
labels,
(a1, p, a2, n),
all_embeddings,
all_labels,
)
self.assertTrue(
torch.isclose(
loss_with_miner_and_input_indices,
correct_loss_with_miner_and_input_indice,
)
)

def test_queue(self):
for test_enqueue_mask in [False, True]:
for dtype in TEST_DTYPES:
Expand Down