JEPA For Time Series

I guess so!

Torch


source

create_masks


def create_masks(
    x, patch_size, patch_stride, context_mask_range, target_mask_range, melt_channels_to_batch:bool=False
):

source

apply_masks


def apply_masks(
    x, masks
):

source

JEPABlock


def JEPABlock(
    dim, num_heads, mlp_ratio:float=4.0, qkv_bias:bool=False, qk_scale:NoneType=None, drop:float=0.0,
    attn_drop:float=0.0, act_layer:type=GELU, norm_layer:type=LayerNorm, rotary_pes:bool=False
):

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool


source

Encoder


def Encoder(
    c_in, num_patches, patch_size, patch_stride, d_model, nhead, num_layers, use_tst_block:bool=False,
    shared_embedding:bool=True, pe_type:str='tAPE', mlp_ratio:float=4.0, qkv_bias:bool=True, qk_scale:NoneType=None,
    drop_rate:float=0.0, attn_drop_rate:float=0.0, norm_layer:type=LayerNorm, jepa:bool=True,
    embed_activation:GELU=GELU(approximate='none'), init_std:float=0.02, tokenizer_type:str='simple',
    tokenizer_kwargs:dict={}
):

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool


source

mse_variance_loss


def mse_variance_loss(
    pred, target_ema, representations, alpha:float=0.2
):

source

loss_pred


def loss_pred(
    pred, target_ema, representations:NoneType=None, alpha:float=0.2
):

source

variance_loss


def variance_loss(
    x
):

source

Predictor


def Predictor(
    num_patches, encoder_embed_dim:int=128, predictor_embed_dim:int=128, nhead:int=2, num_layers:int=1,
    use_tst_block:bool=False, pe_type:str='tAPE', mlp_ratio:float=4.0, qkv_bias:bool=True, qk_scale:NoneType=None,
    drop_rate:float=0.0, attn_drop_rate:float=0.0, norm_layer:type=LayerNorm,
    embed_activation:GELU=GELU(approximate='none'), init_std:float=0.02,
    c_in_mask_tokens:int=1, # number of channels in the encoder (if treating channels sep)
):

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool


source

JEPASimpleLightning


def JEPASimpleLightning(
    learning_rate, train_size, batch_size, n_gpus, patchtsjepa_encoder_kwargs, patchtsjepa_predictor_kwargs,
    weight_decay:float=0.04, use_weight_decay_scheduler:bool=False, final_weight_decay:float=0.4, epochs:int=100,
    optimizer_type:str='adamw', scheduler_type:str='OneCycle',
    target_mask_range:tuple=(0.05, 0.3), # the target can be up to 50% of the original x
    context_mask_range:tuple=(0.5, 1.0), # the context can be up to 80% of masked out target (1-target_mask_ratio)
    mask_block_range:tuple=(1, 30), ema_decay:float=0.996, scheduler_kwargs:dict={}, transforms:NoneType=None,
    loss_fn:function=loss_pred
):

Hooks to be used in LightningModule.

ECG-JEPA

Adapted from https://github.com/sehunfromdaegu/ECG_JEPA/tree/master


source

ECGJEPALightning


def ECGJEPALightning(
    encoder_kwargs, predictor_kwargs, learning_rate, train_size, batch_size, n_gpus, weight_decay:float=0.04,
    use_weight_decay_scheduler:bool=False, final_weight_decay:float=0.4, epochs:int=100, optimizer_type:str='adamw',
    scheduler_type:str='OneCycle', ema_decay:float=0.996, scheduler_kwargs:dict={}, transforms:NoneType=None
):

Hooks to be used in LightningModule.


source

MaskTransformerPredictor


def MaskTransformerPredictor(
    d_model:int=384, predictor_embed_dim:int=192, num_layers:int=4, nhead:int=6, mlp_ratio:float=4.0,
    qkv_bias:bool=False, qk_scale:NoneType=None, drop_rate:float=0.0, attn_drop_rate:float=0.0,
    drop_path_rate:float=0.0, norm_layer:type=LayerNorm, init_std:float=0.02, pe_type:str='sincos', c_in:int=9,
    num_patches:int=50, patch_size:int=50
):

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool


source

MaskTransformer


def MaskTransformer(
    d_model:int=384, num_layers:int=12, nhead:int=6, mlp_ratio:float=4.0, qkv_bias:bool=False,
    qk_scale:NoneType=None, drop_rate:float=0.0, attn_drop_rate:float=0.0, drop_path_rate:float=0.0,
    norm_layer:type=LayerNorm, init_std:float=0.02, mask_scale:tuple=(0.3, 0.5), mask_type:str='block',
    pe_type:str='sincos', c_in:int=3, num_patches:int=50, patch_size:int=50
):

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool


source

Predictor_Block


def Predictor_Block(
    predictor_embed_dim:int=192, depth:int=4, num_heads:int=6, mlp_ratio:float=4.0, qkv_bias:bool=False,
    qk_scale:NoneType=None, drop_rate:float=0.0, attn_drop_rate:float=0.0, drop_path_rate:float=0.0
):

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool


source

Encoder_Block


def Encoder_Block(
    embed_dim:int=384, depth:int=12, num_heads:int=6, mlp_ratio:float=4.0, qkv_bias:bool=False,
    qk_scale:NoneType=None, drop_rate:float=0.0, attn_drop_rate:float=0.0, drop_path_rate:float=0.0,
    norm_layer:type=LayerNorm
):

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool


source

get_2d_sincos_pos_embed


def get_2d_sincos_pos_embed(
    embed_dim, grid_size_h, grid_size_w, cls_token:bool=False
):

grid_size_h: int of the grid height grid_size_w: int of the grid width return: pos_embed: [grid_size_h*grid_size_w, embed_dim] or [1+grid_size_h*grid_size_w, embed_dim] (w/ or w/o cls_token)


source

get_2d_sincos_pos_embed_from_grid


def get_2d_sincos_pos_embed_from_grid(
    embed_dim, grid
):

source

get_1d_sincos_pos_embed_from_grid


def get_1d_sincos_pos_embed_from_grid(
    embed_dim, pos
):

embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D)