def InceptionTokenizer( c_in, # the number of input channels patch_size, # the length of the patches (either stft or interval length) d_model, # the dimension of the initial linear layers for inputting patches into transformer patch_stride:NoneType=None, # the stride of the patches shared_embedding:bool=True, tokenizer_kwargs:VAR_KEYWORD):
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to, etc.
.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
def LinearTokenizer( c_in, # the number of input channels patch_size, # the length of the patches (either stft or interval length) d_model, # the dimension of the initial linear layers for inputting patches into transformer shared_embedding:bool=False, # indicator of whether to project each channel individually or together):
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to, etc.
.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
/opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/fastcore/docscrape.py:259: UserWarning: potentially wrong underline length...
Tokenizer class based on a Conv1D that handles both nested and regular tensors.
--- in
Tokenizer class based on a Conv1D that handles both nested and regular tensors.
---...
else: warn(msg)
Tokenizer class based on a Conv1D that handles both nested and regular tensors.
c_in (int): Number of input channels
patch_size (int): Size of each patch/kernel
d_model (int): Output embedding dimension
max_len =100batch_size =2n_vars =7patch_size =10patch_stride =10d_model =128seq_lens = torch.randint(50, max_len, (batch_size,))# Create input tensors with different sequence lengthsx_list = [torch.randn(6, length) for length in seq_lens]x_nested = torch.nested.as_nested_tensor(x_list, layout=torch.jagged)tokenizer = TS_Tokenizer(c_in=7, patch_size=10, d_model=128*7, shared_embedding=False)#tokenizer = InceptionTokenizer(c_in=7, patch_size=10, patch_stride=10, d_model=128*7, shared_embedding=True, bottleneck_channels=16*7)x = torch.randn(2, 6, 100)print([i.shape for i in x_nested])r = tokenizer(x_nested)print(r.shape)print([r.shape for r in r.unbind()])
[torch.Size([6, 99]), torch.Size([6, 83])]
---------------------------------------------------------------------------RuntimeError Traceback (most recent call last)
Cell In[6], line 18 16 x = torch.randn(2, 6, 100)
17print([i.shape for i in x_nested])
---> 18 r = tokenizer(x_nested)
19print(r.shape)
20print([r.shape for r in r.unbind()])
File /opt/miniconda3/envs/timeflies/lib/python3.12/site-packages/torch/nn/modules/module.py:1739, in Module._wrapped_call_impl(self, *args, **kwargs) 1737returnself._compiled_call_impl(*args, **kwargs) # type: ignore[misc] 1738else:
-> 1739returnself._call_impl(*args, **kwargs)
File /opt/miniconda3/envs/timeflies/lib/python3.12/site-packages/torch/nn/modules/module.py:1750, in Module._call_impl(self, *args, **kwargs) 1745# If we don't have any hooks, we want to skip the rest of the logic in 1746# this function, and just call forward. 1747ifnot (self._backward_hooks orself._backward_pre_hooks orself._forward_hooks orself._forward_pre_hooks
1748or _global_backward_pre_hooks or _global_backward_hooks
1749or _global_forward_hooks or _global_forward_pre_hooks):
-> 1750return forward_call(*args, **kwargs)
1752 result =None 1753 called_always_called_hooks =set()
Cell In[4], line 44, in TS_Tokenizer.forward(self, x) 41 pad_amount =self.patch_stride - ((seq_len -self.patch_size) %self.patch_stride)
42 x = F.pad(x, (0, pad_amount), 'constant', value=0.)
---> 44 x =self.proj(x) # [batch_size, d_model, num_patches] 46ifnotself.shared_embedding:
47 x = x.reshape(bs, self.c_in, self.d_model //self.c_in, -1)
File /opt/miniconda3/envs/timeflies/lib/python3.12/site-packages/torch/nn/modules/module.py:1739, in Module._wrapped_call_impl(self, *args, **kwargs) 1737returnself._compiled_call_impl(*args, **kwargs) # type: ignore[misc] 1738else:
-> 1739returnself._call_impl(*args, **kwargs)
File /opt/miniconda3/envs/timeflies/lib/python3.12/site-packages/torch/nn/modules/module.py:1750, in Module._call_impl(self, *args, **kwargs) 1745# If we don't have any hooks, we want to skip the rest of the logic in 1746# this function, and just call forward. 1747ifnot (self._backward_hooks orself._backward_pre_hooks orself._forward_hooks orself._forward_pre_hooks
1748or _global_backward_pre_hooks or _global_backward_hooks
1749or _global_forward_hooks or _global_forward_pre_hooks):
-> 1750return forward_call(*args, **kwargs)
1752 result =None 1753 called_always_called_hooks =set()
File /opt/miniconda3/envs/timeflies/lib/python3.12/site-packages/torch/nn/modules/conv.py:375, in Conv1d.forward(self, input) 374defforward(self, input: Tensor) -> Tensor:
--> 375returnself._conv_forward(input, self.weight, self.bias)
File /opt/miniconda3/envs/timeflies/lib/python3.12/site-packages/torch/nn/modules/conv.py:370, in Conv1d._conv_forward(self, input, weight, bias) 358ifself.padding_mode !="zeros":
359return F.conv1d(
360 F.pad(
361input, self._reversed_padding_repeated_twice, mode=self.padding_mode
(...) 368self.groups,
369 )
--> 370return F.conv1d(
371input, weight, bias, self.stride, self.padding, self.dilation, self.groups
372 )
RuntimeError: Given groups=7, weight of size [896, 1, 10], expected input[2, 6, 100] to have 7 channels, but got 6 channels instead
seq_lens = torch.randint(50, max_len, (batch_size * n_vars,))# Create input tensors with different sequence lengthsx_list = [torch.randn(7, length) for length in seq_lens]x_nested = torch.nested.as_nested_tensor(x_list, layout=torch.jagged)tokenizer = TS_Tokenizer(c_in=7, patch_size=10, d_model=128)x = torch.randn(1, 7, 100)tokenizer(x_nested).shape
def PatchEncoder( c_in, # the number of input channels patch_len, # the length of the patches (either stft or interval length) d_model, # the dimension of the initial linear layers for inputting patches into transformer shared_embedding, # indicator of whether to project each channel individually or together):
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to, etc.
.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
patch_len =750d_model =12n_vars =7max_len =100bs =3# Create sequences of different lengthsseq_lens = torch.randint(50, max_len, (bs,))# Create input tensors with different sequence lengthsx = torch.randn(bs, 100, n_vars, patch_len)p = PatchEncoder(c_in=n_vars, patch_len=patch_len, d_model=d_model, shared_embedding=False)t = p(x)t.shape
torch.Size([3, 100, 7, 12])
patch_len =750d_model =12n_vars =7max_len =100bs =3# Create sequences of different lengthsseq_lens = torch.randint(50, max_len, (bs,))# Create input tensors with different sequence lengthsx_list = [torch.randn(length, n_vars, patch_len) for length in seq_lens]x_nested = torch.nested.as_nested_tensor(x_list, layout=torch.jagged)p = PatchEncoder(c_in=n_vars, patch_len=patch_len, d_model=d_model, shared_embedding=False)t = p(x_nested)t[0].shape, t[1].shape, t[2].shape