Skip to content

Commit

Permalink
Apply isort and black reformatting
Browse files Browse the repository at this point in the history
Signed-off-by: ashors1 <ashors1@users.noreply.github.com>
  • Loading branch information
ashors1 committed Jul 9, 2024
1 parent 7ef94a1 commit 2cf377c
Showing 1 changed file with 4 additions and 2 deletions.
6 changes: 4 additions & 2 deletions nemo/collections/llm/gpt/model/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@
from nemo.lightning.megatron_parallel import MaskedTokenLossReduction
from nemo.lightning.pytorch.optim import MegatronOptimizerModule, OptimizerModule

HAVE_TE=True
HAVE_TE = True
try:
import transformer_engine
except (ImportError, ModuleNotFoundError):
HAVE_TE=False
HAVE_TE = False

if TYPE_CHECKING:
from megatron.core.models.gpt.gpt_model import GPTModel as MCoreGPTModel
Expand Down Expand Up @@ -82,12 +82,14 @@ def local_layer_spec(config: "GPTConfig") -> ModuleSpec:
num_experts=config.num_moe_experts, moe_grouped_gemm=config.moe_grouped_gemm, qk_layernorm=config.qk_layernorm
)


def default_layer_spec(config: "GPTConfig") -> ModuleSpec:
if HAVE_TE:
return transformer_engine_layer_spec(config)
else:
return local_layer_spec(config)


@dataclass
class GPTConfig(TransformerConfig, io.IOMixin):
# From megatron.core.models.gpt.gpt_model.GPTModel
Expand Down

0 comments on commit 2cf377c

Please sign in to comment.