from nbdev.config import get_config
Dual RNN Models
Pytorch Models for Sequential Data
= get_config().config_file.parent
project_root = project_root / 'test_data/WienerHammerstein' f_path
= get_hdf_files(f_path)
hdf_files = 300
init_sz = ['u']
u = ['y']
y = DataBlock(blocks=(SequenceBlock.from_hdf(u+y,TensorSequencesInput,clm_shift=[0,-1]),
seq =[-1])),
SequenceBlock.from_hdf(y,TensorSequencesOutput,clm_shift=CreateDict([DfHDFCreateWindows(win_sz=500+1,stp_sz=100,clm='u')]),
get_items=ApplyToDict(FuncSplitter(lambda o: 'valid' in str(o))))
splitter= seq.dataloaders(hdf_files,bs=32,dl_type=TfmdDL) db
0][0].shape,db.one_batch()[0][1].shape db.one_batch()[
(torch.Size([500, 2]), torch.Size([500, 2]))
=1) db.show_batch(max_n
Autoregressive Prognosis
Same RNN used for State Estimation and Prediction
ARProg
ARProg (n_u, n_x, n_y, init_sz, hidden_p=0.0, input_p=0.0, weight_p=0.0, rnn_type='gru', ret_full_hidden=False, stateful=False, normalization='', **kwargs)
*Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to
, etc.
.. note:: As per the example above, an __init__()
call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*
= db.one_batch()
xb,yb = ARProg(len(u),0,len(y),init_sz=init_sz,num_layers=1,hidden_size=100).to(xb.device)
model model(xb).shape
torch.Size([32, 500, 1])
from fastai.callback.training import ShortEpochCallback
= ARProg(len(u),0,len(y),init_sz=init_sz,num_layers=1,hidden_size=50)
model = Learner(db,model,loss_func=SkipNLoss(mse,init_sz))
lrn
lrn.add_cb(TbpttResetCB())
lrn.add_cb(ShortEpochCallback())#lrn.fit(1,lr=3e-3)
1,3e-3,pct_start=0.2) lrn.fit_flat_cos(
epoch | train_loss | valid_loss | time |
---|---|---|---|
0 | 00:01 |