Skip to content

Lstmregressor

Bases: nn.Module

Standard LSTM model with PyTorch Lightning.

Source code in make_us_rich/pipelines/training/model.py
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
def __init__(self,
    batch_size: int,
    dropout_rate: float,
    hidden_size: int,
    number_of_features: int,
    number_of_layers: int,
    run_on_gpu: bool,
):
    super().__init__()
    self.batch_size = batch_size
    self.dropout_rate = dropout_rate
    self.hidden_size = hidden_size
    self.n_features = number_of_features
    self.number_of_layers = number_of_layers
    self.run_on_gpu = run_on_gpu

    self.lstm = nn.LSTM(
        batch_first=True,
        dropout=self.dropout_rate,
        hidden_size=self.hidden_size,
        input_size=self.n_features,
        num_layers=self.number_of_layers,
    )

    self.regressor = nn.Linear(self.hidden_size, 1)

forward(x)

Forward pass through the model.

lstm_out = (batch_size, sequence_length, hidden_size)

Source code in make_us_rich/pipelines/training/model.py
39
40
41
42
43
44
45
46
47
48
49
def forward(self, x):
    """
    Forward pass through the model.

    lstm_out = (batch_size, sequence_length, hidden_size)
    """
    if self.run_on_gpu:
        self.lstm.flatten_parameters()
    _, (hidden, _) = self.lstm(x)
    out = hidden[-1]
    return self.regressor(out)

Last update: 2022-05-04