We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
import os import torch from torch.utils.data import Dataset from pytorch_lightning import Trainer, LightningModule class RandomDataset(Dataset): def __init__(self, size, length): self.len = length self.data = torch.randn(length, size) def __getitem__(self, index): return self.data[index] def __len__(self): return self.len class BoringModel(LightningModule): def __init__(self): """ Testing PL Module Use as follows: - subclass - modify the behavior for what you want class TestModel(BaseTestModel): def training_step(...): # do your own thing or: model = BaseTestModel() model.training_epoch_end = None """ super().__init__() self.layer = torch.nn.Linear(32, 2) def forward(self, x): return self.layer(x) def loss(self, batch, prediction): # An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction)) def step(self, x): x = self.layer(x) out = torch.nn.functional.mse_loss(x, torch.ones_like(x)) return out def training_step(self, batch, batch_idx): output = self.layer(batch) loss = self.loss(batch, output) return {"loss": loss} def training_step_end(self, training_step_outputs): return training_step_outputs def training_epoch_end(self, outputs) -> None: torch.stack([x["loss"] for x in outputs]).mean() def validation_step(self, batch, batch_idx): output = self.layer(batch) loss = self.loss(batch, output) return {"x": loss} def validation_epoch_end(self, outputs) -> None: torch.stack([x['x'] for x in outputs]).mean() def test_step(self, batch, batch_idx): output = self.layer(batch) loss = self.loss(batch, output) return {"y": loss} def test_epoch_end(self, outputs) -> None: torch.stack([x["y"] for x in outputs]).mean() def configure_optimizers(self): optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1) return [optimizer], [lr_scheduler] def run_test(): class TestModel(BoringModel): def on_train_epoch_start(self) -> None: print('override any method to prove your bug') # fake data train_data = torch.utils.data.DataLoader(RandomDataset(32, 64)) val_data = torch.utils.data.DataLoader(RandomDataset(32, 64)) test_data = torch.utils.data.DataLoader(RandomDataset(32, 64)) # model model = TestModel() trainer = Trainer( default_root_dir=os.getcwd(), # ----------------------------------------------------------------------------- max_steps=10, # HERE HAS NO EFFECT IN COMBINATION WITH accumulate_grad_batches # ----------------------------------------------------------------------------- accumulate_grad_batches=2, weights_summary=None, ) trainer.fit(model, train_data, val_data) trainer.test(test_dataloaders=test_data) if __name__ == '__main__': run_test()
This training runs for all 1000 epochs instead of just max steps.
Training runs for max_steps.
The tests with TODO marker can be found here #4190
The text was updated successfully, but these errors were encountered:
@SeanNaren please take a look!
Sorry, something went wrong.
SeanNaren
Successfully merging a pull request may close this issue.
🐛 Bug
This training runs for all 1000 epochs instead of just max steps.
Expected behavior
Training runs for max_steps.
Additional context
The tests with TODO marker can be found here #4190
The text was updated successfully, but these errors were encountered: