-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathloss.py
More file actions
33 lines (27 loc) · 1.13 KB
/
loss.py
File metadata and controls
33 lines (27 loc) · 1.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import torch
from torch import nn
import torch.nn.functional as F
def l2_loss(emb1, emb2):
return nn.functional.mse_loss(emb1, emb2, reduction='none').sum(dim=1).mean()
def ce_loss(logits, labels):
return nn.functional.cross_entropy(logits, labels, reduction='mean')
def ce_loss_targeted(logits, targets):
return -F.cross_entropy(logits, targets)
def dlr_loss(logits, labels):
B = logits.size(0)
sorted_logits, sorted_idx = logits.sort(dim=1, descending=True)
label_logits = logits[torch.arange(B), labels]
second_best = torch.where(
sorted_idx[:, 0] == labels,
sorted_logits[:, 1],
sorted_logits[:, 0]
)
denom = sorted_logits[:, 0] - sorted_logits[:, 2] + 1e-12
return -((label_logits - second_best) / denom).mean()
def dlr_loss_targeted(logits, labels, targets):
B = logits.size(0)
sorted_logits, _ = logits.sort(dim=1)
label_logits = logits[torch.arange(B), labels]
target_logits = logits[torch.arange(B), targets]
denom = sorted_logits[:, -1] - 0.5 * (sorted_logits[:, -3] + sorted_logits[:, -4]) + 1e-12
return -((label_logits - target_logits) / denom).mean()