Skip to content

Commit

Permalink
fixed megatron lm conversion bug (PTL related) (NVIDIA#5038)
Browse files Browse the repository at this point in the history
Signed-off-by: David Mosallanezhad <dmosallanezh@nvidia.com>

Signed-off-by: David Mosallanezhad <dmosallanezh@nvidia.com>
Co-authored-by: David Mosallanezhad <dmosallanezh@nvidia.com>
  • Loading branch information
2 people authored and jubick1337 committed Oct 4, 2022
1 parent 4dd1113 commit 821237d
Showing 1 changed file with 4 additions and 3 deletions.
7 changes: 4 additions & 3 deletions examples/nlp/language_modeling/megatron_lm_ckpt_to_nemo.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@

import torch
from apex.transformer import parallel_state
from pytorch_lightning.core.saving import _load_state as ptl_load_state
from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml
from pytorch_lightning.trainer.trainer import Trainer
from pytorch_lightning.utilities.cloud_io import load as pl_load
Expand Down Expand Up @@ -220,10 +221,10 @@ def add_optimizer_state(lm_checkpoint, new_checkpoint, megatron_amp_o2=True):
def load_model(cls, checkpoint, strict, **kwargs):
try:
if 'cfg' in kwargs:
model = cls._load_model_state(checkpoint, strict=strict, **kwargs)
model = ptl_load_state(cls, checkpoint, strict=strict, **kwargs)
else:
model = cls._load_model_state(
checkpoint, strict=strict, cfg=checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].cfg, **kwargs
model = ptl_load_state(
cls, checkpoint, strict=strict, cfg=checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].cfg, **kwargs
)
# register the artifacts
cfg = checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].cfg
Expand Down

0 comments on commit 821237d

Please sign in to comment.