Skip to content

Pricepredictor

Bases: pl.LightningModule

Training model with PyTorch Lightning.

Initialize the model.

Parameters:

Name Type Description Default
batch_size int

Batch size for training.

required
dropout_rate float

Dropout rate for the LSTM.

required
hidden_size int

Hidden size for the LSTM.

required
learning_rate float

Learning rate for the optimizer.

required
number_of_features int

Number of features in the input.

required
number_of_layers int

Number of layers in the LSTM.

required
run_on_gpu bool

Whether to run the model on the GPU.

required
criterion nn.Module

Loss function to use.

nn.MSELoss()

Returns:

Type Description
None
Source code in make_us_rich/pipelines/training/model.py
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
def __init__(self,
    batch_size: int,
    dropout_rate: float,
    hidden_size: int,
    learning_rate: float,
    number_of_features: int,
    number_of_layers: int,
    run_on_gpu: bool,
    criterion: nn.Module = nn.MSELoss(),
) -> None:
    """
    Initialize the model.

    Parameters
    ----------
    batch_size: int
        Batch size for training.
    dropout_rate: float
        Dropout rate for the LSTM.
    hidden_size: int
        Hidden size for the LSTM.
    learning_rate: float
        Learning rate for the optimizer.
    number_of_features: int
        Number of features in the input.
    number_of_layers: int
        Number of layers in the LSTM.
    run_on_gpu: bool
        Whether to run the model on the GPU.
    criterion: nn.Module
        Loss function to use.

    Returns
    -------
    None
    """
    super().__init__()
    self.model = LSTMRegressor(
        batch_size, dropout_rate, hidden_size, number_of_features, number_of_layers, run_on_gpu,
    )
    self.learning_rate = learning_rate
    self.criterion = criterion
    self.save_hyperparameters()

configure_optimizers()

Configure the optimizer.

Returns:

Type Description
torch.optim.adamw.AdamW

Optimizer.

Source code in make_us_rich/pipelines/training/model.py
192
193
194
195
196
197
198
199
200
201
def configure_optimizers(self) -> torch.optim.AdamW:
    """
    Configure the optimizer.

    Returns
    -------
    torch.optim.adamw.AdamW
        Optimizer.
    """
    return torch.optim.AdamW(self.parameters(), lr=self.learning_rate)

forward(x, labels=None)

Forward pass through the model.

Parameters:

Name Type Description Default
x

Input data.

required
labels

Labels for the data.

None

Returns:

Name Type Description
loss float

Loss for the model.

output torch.Tensor

Output of the model.

Source code in make_us_rich/pipelines/training/model.py
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
def forward(self, x, labels=None) -> Tuple[float, torch.Tensor]:
    """
    Forward pass through the model.

    Parameters
    ----------
    x: torch.Tensor
        Input data.
    labels: torch.Tensor
        Labels for the data.

    Returns
    -------
    loss: float
        Loss for the model.
    output: torch.Tensor
        Output of the model.
    """
    output = self.model(x)
    if labels is not None:
        loss = self.criterion(output, labels.unsqueeze(dim=1))
        return loss, output
    return output

test_step(batch, batch_idx)

Test step.

Parameters:

Name Type Description Default
batch Tuple[torch.Tensor, torch.Tensor]

Tuple of input data and labels.

required
batch_idx int

Batch index.

required

Returns:

Type Description
Dict

Dictionary with the test loss.

Source code in make_us_rich/pipelines/training/model.py
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
def test_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int) -> Dict:
    """
    Test step.

    Parameters
    ----------
    batch: Tuple[torch.Tensor, torch.Tensor]
        Tuple of input data and labels.
    batch_idx: int
        Batch index.

    Returns
    -------
    Dict
        Dictionary with the test loss.
    """
    sequences, labels = batch
    loss, _ = self(sequences, labels)
    self.log("test/loss", loss, on_step=True, on_epoch=True)
    return {"loss": loss}

training_step(batch, batch_idx)

Training step.

Parameters:

Name Type Description Default
batch Tuple[torch.Tensor, torch.Tensor]

Tuple of input data and labels.

required
batch_idx int

Batch index.

required

Returns:

Type Description
Dict

Dictionary with the train loss.

Source code in make_us_rich/pipelines/training/model.py
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int) -> Dict:
    """
    Training step.

    Parameters
    ----------
    batch: Tuple[torch.Tensor, torch.Tensor]
        Tuple of input data and labels.
    batch_idx: int
        Batch index.

    Returns
    -------
    Dict
        Dictionary with the train loss.
    """
    sequences, labels = batch
    loss, _ = self(sequences, labels)
    self.log("train/loss", loss, on_step=True, on_epoch=True)
    return {"loss": loss}

validation_step(batch, batch_idx)

Validation step.

Parameters:

Name Type Description Default
batch Tuple[torch.Tensor, torch.Tensor]

Tuple of input data and labels.

required
batch_idx int

Batch index.

required

Returns:

Type Description
Dict

Dictionary with the valid loss.

Source code in make_us_rich/pipelines/training/model.py
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
def validation_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int) -> Dict:
    """
    Validation step.

    Parameters
    ----------
    batch: Tuple[torch.Tensor, torch.Tensor]
        Tuple of input data and labels.
    batch_idx: int
        Batch index.

    Returns
    -------
    Dict
        Dictionary with the valid loss.
    """
    sequences, labels = batch
    loss, _ = self(sequences, labels)
    self.log("valid/loss", loss, on_step=True, on_epoch=True)
    return {"loss": loss}

Last update: 2022-05-04