Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
latencytime9527 authored Sep 11, 2022
1 parent 2aa6738 commit 99b8218
Show file tree
Hide file tree
Showing 5 changed files with 172 additions and 0 deletions.
35 changes: 35 additions & 0 deletions losses/L1_mask_Loss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import torch
import torch.nn as nn
from torch.nn.modules import Module


class L1_Mask_Loss(Module):
def __init__(self, weight=1):
super(L1_Mask_Loss, self).__init__()
assert weight >= 1, "the input weight must be larger than 1"
if weight == 1:
self.add_ele = 1
else:
self.add_ele = weight / (weight - 1)
self.factor = 1.0 / self.add_ele
self.weight = weight
self.criterion = nn.L1Loss(reduction="mean")

def forward(self, gt, pred, mask, loss_weight=1):
pred = pred.requires_grad_()
mask = (mask * self.weight + self.add_ele) * self.factor
gt = gt * mask
pred = pred * mask
loss = self.criterion(gt, pred) * loss_weight

return loss


if __name__ == '__main__':
criterion = L1_Mask_Loss(weight=10)
a = torch.abs(torch.randn(2, 1, 16, 16))
b = torch.abs(torch.randn(2, 1, 16, 16))
c = torch.abs(torch.randn(2, 1, 16, 16))
loss = criterion(a, b, c)
loss.backward()
print(loss)
6 changes: 6 additions & 0 deletions losses/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,7 @@
from losses.L1_mask_Loss import L1_Mask_Loss
from losses.landmark_loss import LMK_Loss
from losses.sobel import Sobel_Loss
from losses.sobel_mask_loss import Sobel_Mask_Loss

__all__ = ["Sobel_Loss", "LMK_Loss", "L1_Mask_Loss", "Sobel_Mask_Loss"]

33 changes: 33 additions & 0 deletions losses/landmark_loss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.modules import Module


class LMK_Loss(Module):
def __init__(self):
super(LMK_Loss, self).__init__()

def forward(self, gt, pred, weight=None):
pred = pred.requires_grad_()
dot_sum = (pred * gt).sum(axis=1)
predm = torch.sqrt((pred * pred).sum(axis=1))
gtm = torch.sqrt((gt * gt).sum(axis=1))
if weight is None:
loss = (1 - dot_sum / (predm * gtm)).sum() / pred.shape[0]
else:
loss = ((1 - dot_sum / (predm * gtm)) * weight).sum() / pred.shape[0]

return loss


if __name__ == '__main__':
criterion = LMK_Loss()
a = torch.abs(torch.randn(2, 2, 16, 16))
b = torch.abs(torch.randn(2, 2, 16, 16))
c = torch.abs(torch.randn(2, 16, 16))
loss = criterion(a, b, c)
loss.backward()
print(loss)
43 changes: 43 additions & 0 deletions losses/sobel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.modules import Module


class Sobel_Loss(Module):
def __init__(self):
super(Sobel_Loss, self).__init__()
x = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype=np.float32).reshape(1, 1, 3, 3)
y = x.copy().T.reshape(1, 1, 3, 3)
x = torch.from_numpy(x).cuda()
y = torch.from_numpy(y).cuda()
self.kernelx = Variable(x.contiguous())
self.kernely = Variable(y.contiguous())
self.criterion = torch.nn.L1Loss(reduction="mean")

def forward(self, target, prediction, direction="x"):
if direction == "x":
tx = target
px = prediction
sobel_tx = F.conv2d(tx, self.kernelx, padding=1)
sobel_px = F.conv2d(px, self.kernelx, padding=1)
loss = self.criterion(sobel_tx, sobel_px)
else:
ty = target
py = prediction
sobel_ty = F.conv2d(ty, self.kernely, padding=1)
sobel_py = F.conv2d(py, self.kernely, padding=1)
loss = self.criterion(sobel_ty, sobel_py)

return loss


if __name__ == '__main__':
criterion = Sobel_Loss()
a = torch.abs(torch.randn(2, 1, 16, 16))
b = torch.abs(torch.randn(2, 1, 16, 16))
loss = criterion(a, b)
loss.backward()
print(loss)
55 changes: 55 additions & 0 deletions losses/sobel_mask_loss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.modules import Module


class Sobel_Mask_Loss(Module):
def __init__(self, weight=1):
super(Sobel_Mask_Loss, self).__init__()
assert weight >= 1, "the input weight must be larger than 1"
if weight == 1:
self.add_ele = 1
else:
self.add_ele = weight / (weight - 1)
self.factor = 1.0 / self.add_ele
self.weight = weight
x = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1], ], dtype=np.float32).reshape(1, 1, 3, 3)
y = x.copy().T.reshape(1, 1, 3, 3)
x = torch.from_numpy(x).cuda()
y = torch.from_numpy(y).cuda()
self.kernelx = Variable(x.contiguous())
self.kernely = Variable(y.contiguous())
self.criterion = torch.nn.L1Loss(reduction="mean")

def forward(self, gt, pred, mask, direction="x", loss_weight=1):
pred = pred.requires_grad_()
mask = (mask * self.weight + self.add_ele) * self.factor
gt = gt * mask
pred = pred * mask
if direction == "x":
tx = gt
px = pred
sobel_tx = F.conv2d(tx, self.kernelx, padding=1)
sobel_px = F.conv2d(px, self.kernelx, padding=1)
loss = self.criterion(sobel_tx, sobel_px)
else:
ty = gt
py = pred
sobel_ty = F.conv2d(ty, self.kernely, padding=1)
sobel_py = F.conv2d(py, self.kernely, padding=1)
loss = self.criterion(sobel_ty, sobel_py) * loss_weight

return loss


if __name__ == '__main__':
criterion = Sobel_Mask_Loss()
a = torch.abs(torch.randn(2, 1, 16, 16))
b = torch.abs(torch.randn(2, 1, 16, 16))
c = torch.abs(torch.randn(2, 1, 16, 16))
loss = criterion(a, b, c)
loss.backward()
print(loss)

0 comments on commit 99b8218

Please sign in to comment.