= torch.randn(2,2, 2, 10)
x
patch_continuity_loss(x)
tensor(0.2936)
huber_loss (preds, target, mask, use_mask=False, padding_mask=None, delta=1)
preds: [bs x num_patch x n_vars x patch_len] targets: [bs x num_patch x n_vars x patch_len] mask: [bs x num_patch x n_vars] padding_mask: [bs x num_patch]
cosine_similarity_loss (preds, target, mask, use_mask=False, padding_mask=None)
preds: [bs x num_patch x n_vars x patch_len] targets: [bs x num_patch x n_vars x patch_len] mask: [bs x num_patch x n_vars]
cosine_similarity (preds, target, mask, use_mask=False, padding_mask=None)
mape (preds, target, mask, use_mask=False)
mae (preds, target, mask, use_mask=False, padding_mask=None)
rmse (preds, target, mask, use_mask=False, padding_mask=None)
mse (preds, target, mask, use_mask=False, padding_mask=None)
r2_score (preds, target, mask, use_mask=False)
masked_mae_loss (preds, target, mask, use_mask=False, padding_mask=None)
preds: [bs x num_patch x n_vars x patch_len] targets: [bs x num_patch x n_vars x patch_len] mask: [bs x num_patch x n_vars] padding_mask: [bs x num_patch]
masked_mse_loss (preds, target, mask, use_mask=False, padding_mask=None)
preds: [bs x num_patch x n_vars x patch_len] targets: [bs x num_patch x n_vars x patch_len] mask: [bs x num_patch x n_vars] padding_mask: [bs x num_patch]
patch_continuity_loss (preds)
preds: [bs x num_patch x n_vars x patch_len] targets: [bs x num_patch x n_vars x patch_len]
FocalLoss (weight=None, gamma=2.0, reduction='mean', ignore_index=-100)
adapted from tsai, weighted multiclass focal loss https://github.com/timeseriesAI/tsai/blob/bdff96cc8c4c8ea55bc20d7cffd6a72e402f4cb2/tsai/losses.py#L116C1-L140C20
criterion = FocalLoss(gamma=0.7, weight=None, ignore_index=0)
batch_size = 10
n_patch = 721
n_class = 5
#m = torch.nn.Softmax(dim=-1)
logits = torch.randn(batch_size, n_class, n_patch)
target = torch.randint(0, n_class, size=(batch_size, n_patch))
criterion(logits, target)
tensor(8.4217)
KLDivLoss (weight=None, ignore_index=-100)
*Kullback-Leibler Divergence Loss with masking for ignore_index. Handles soft labels with ignore_index marked as -100.
Args: logits: [bs x n_classes x pred_labels] - model predictions targets: [bs x n_classes x soft_labels] - soft labels, with ignore_index positions marked as -100 or [bs x n_labels] - hard labels*