Loss functions

I’m lost too.

source

huber_loss


def huber_loss(
    preds, target, mask, use_mask:bool=False, padding_mask:NoneType=None, delta:int=1
):

preds: [bs x num_patch x n_vars x patch_len] targets: [bs x num_patch x n_vars x patch_len] mask: [bs x num_patch x n_vars] padding_mask: [bs x num_patch]


source

cosine_similarity_loss


def cosine_similarity_loss(
    preds, target, mask, use_mask:bool=False, padding_mask:NoneType=None
):

preds: [bs x num_patch x n_vars x patch_len] targets: [bs x num_patch x n_vars x patch_len] mask: [bs x num_patch x n_vars]


source

cosine_similarity


def cosine_similarity(
    preds, target, mask, use_mask:bool=False, padding_mask:NoneType=None
):

source

mape


def mape(
    preds, target, mask, use_mask:bool=False
):

source

mae


def mae(
    preds, target, mask, use_mask:bool=False, padding_mask:NoneType=None
):

source

rmse


def rmse(
    preds, target, mask, use_mask:bool=False, padding_mask:NoneType=None
):

source

mse


def mse(
    preds, target, mask, use_mask:bool=False, padding_mask:NoneType=None
):

source

r2_score


def r2_score(
    preds, target, mask, use_mask:bool=False
):

source

masked_mae_loss


def masked_mae_loss(
    preds, target, mask, use_mask:bool=False, padding_mask:NoneType=None
):

preds: [bs x num_patch x n_vars x patch_len] targets: [bs x num_patch x n_vars x patch_len] mask: [bs x num_patch x n_vars] padding_mask: [bs x num_patch]


source

masked_mse_loss


def masked_mse_loss(
    preds, target, mask, use_mask:bool=False, padding_mask:NoneType=None
):

preds: [bs x num_patch x n_vars x patch_len] targets: [bs x num_patch x n_vars x patch_len] mask: [bs x num_patch x n_vars] padding_mask: [bs x num_patch]


source

patch_continuity_loss


def patch_continuity_loss(
    preds
):

preds: [bs x num_patch x n_vars x patch_len] targets: [bs x num_patch x n_vars x patch_len]

x = torch.randn(2,2, 2, 10)

patch_continuity_loss(x)
tensor(0.2936)

source

FocalLoss


def FocalLoss(
    weight:NoneType=None, gamma:float=2.0, reduction:str='mean', ignore_index:int=-100
):

adapted from tsai, weighted multiclass focal loss https://github.com/timeseriesAI/tsai/blob/bdff96cc8c4c8ea55bc20d7cffd6a72e402f4cb2/tsai/losses.py#L116C1-L140C20

criterion = FocalLoss(gamma=0.7, weight=None, ignore_index=0)
batch_size = 10

n_patch = 721
n_class = 5
#m = torch.nn.Softmax(dim=-1)
logits = torch.randn(batch_size, n_class, n_patch)
target = torch.randint(0, n_class, size=(batch_size, n_patch))
criterion(logits, target)
tensor(8.4217)

source

KLDivLoss


def KLDivLoss(
    weight:NoneType=None, ignore_index:int=-100
):

Kullback-Leibler Divergence Loss with masking for ignore_index. Handles soft labels with ignore_index marked as -100.

Args: logits: [bs x n_classes x pred_labels] - model predictions targets: [bs x n_classes x soft_labels] - soft labels, with ignore_index positions marked as -100 or [bs x n_labels] - hard labels

x = torch.randn(4,5,10)
y = torch.randint(0,5, size=(4,10))
y_og = y.clone()
y[0,0] = -100

KLDivLoss(ignore_index=-100)(x,y)
tensor(0.4065)