-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathloss.py
61 lines (48 loc) · 1.75 KB
/
loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import torch
import torch.nn as nn
class SegmentationLosses(object):
def __init__(self, weight=None, batch_average=True, ignore_index=255, cuda=False):
self.ignore_index = ignore_index
self.weight = weight
self.batch_average = batch_average
self.cuda = cuda
def build_loss(self, mode='ce'):
"""Choices: ['ce' or 'focal']"""
if mode == 'ce':
return self.CrossEntropyLoss
elif mode == 'focal':
return self.FocalLoss
else:
raise NotImplementedError
def CrossEntropyLoss(self, logit, target):
n, c, h, w = logit.size()
criterion = nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_index)
if self.cuda:
criterion = criterion.cuda()
loss = criterion(logit, target.long())
#print('loss = {}'.format(loss.item()))
#if self.batch_average:
# loss /= n
return loss
def FocalLoss(self, logit, target, gamma=2, alpha=0.5):
n, c, h, w = logit.size()
criterion = nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_index)
if self.cuda:
criterion = criterion.cuda()
logpt = -criterion(logit, target.long())
pt = torch.exp(logpt)
if alpha is not None:
logpt *= alpha
loss = -((1 - pt) ** gamma) * logpt
if self.batch_average:
loss /= n
return loss
'''
if __name__ == "__main__":
loss = SegmentationLosses(cuda=True)
a = torch.rand(1, 3, 7, 7).cuda()
b = torch.rand(1, 7, 7).cuda()
print(loss.CrossEntropyLoss(a, b).item())
print(loss.FocalLoss(a, b, gamma=0, alpha=None).item())
print(loss.FocalLoss(a, b, gamma=2, alpha=0.5).item())
'''