From 51c9661922b78cfdb863a8d4360031cd39fb50e0 Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Fri, 12 Jul 2024 01:54:25 -0400 Subject: [PATCH 1/3] Gemma 2 (#9672) * gemma2 initial commit Signed-off-by: Chen Cui * enable conversion on cpu Signed-off-by: Chen Cui * fix code scanning Signed-off-by: Chen Cui * typo in config Signed-off-by: Chen Cui * fix output layer and add comments Signed-off-by: Chen Cui * refactor model customize to one function Signed-off-by: Chen Cui * unpin transformers version Signed-off-by: Chen Cui * Apply isort and black reformatting Signed-off-by: cuichenx --------- Signed-off-by: Chen Cui Signed-off-by: cuichenx Co-authored-by: cuichenx --- .../conf/megatron_gemma2_config.yaml | 225 +++++++++++++ .../megatron/gemma2/__init__.py | 13 + .../megatron/gemma2/gemma2_modules.py | 280 ++++++++++++++++ .../megatron/gemma2/gemma2_spec.py | 51 +++ .../language_modeling/megatron_bert_model.py | 83 ++--- .../language_modeling/megatron_gpt_model.py | 18 +- .../convert_gemma2_hf_to_nemo.py | 311 ++++++++++++++++++ 7 files changed, 941 insertions(+), 40 deletions(-) create mode 100644 examples/nlp/language_modeling/conf/megatron_gemma2_config.yaml create mode 100644 nemo/collections/nlp/models/language_modeling/megatron/gemma2/__init__.py create mode 100644 nemo/collections/nlp/models/language_modeling/megatron/gemma2/gemma2_modules.py create mode 100644 nemo/collections/nlp/models/language_modeling/megatron/gemma2/gemma2_spec.py create mode 100644 scripts/checkpoint_converters/convert_gemma2_hf_to_nemo.py diff --git a/examples/nlp/language_modeling/conf/megatron_gemma2_config.yaml b/examples/nlp/language_modeling/conf/megatron_gemma2_config.yaml new file mode 100644 index 000000000000..374223c20daf --- /dev/null +++ b/examples/nlp/language_modeling/conf/megatron_gemma2_config.yaml @@ -0,0 +1,225 @@ +name: megatron_gemma2 +restore_from_path: null # used when starting from a .nemo file + +trainer: + devices: 1 + num_nodes: 1 + accelerator: gpu + precision: 32 + logger: False # logger provided by exp_manager + enable_checkpointing: False + use_distributed_sampler: False + max_epochs: -1 # PTL default. In practice, max_steps will be reached first. + max_steps: 100000 # consumed_samples = global_step * micro_batch_size * data_parallel_size * accumulate_grad_batches + log_every_n_steps: 10 + val_check_interval: 100 + limit_val_batches: 50 + limit_test_batches: 500 + accumulate_grad_batches: 1 # do not modify, grad acc is automatic for training megatron models + gradient_clip_val: 1.0 + benchmark: False + enable_model_summary: False # default PTL callback for this does not support model parallelism, instead we log manually + +exp_manager: + explicit_log_dir: null + exp_dir: null + name: megatron_llama + create_wandb_logger: False + wandb_logger_kwargs: + project: null + name: null + resume_if_exists: True + resume_ignore_no_checkpoint: True + create_checkpoint_callback: True + checkpoint_callback_params: + monitor: val_loss + save_top_k: 10 + mode: min + always_save_nemo: False # saves nemo file during validation, not implemented for model parallel + save_nemo_on_train_end: False # not recommended when training large models on clusters with short time limits + filename: 'megatron_gpt--{val_loss:.2f}-{step}-{consumed_samples}' + model_parallel_size: ${multiply:${model.tensor_model_parallel_size}, ${model.pipeline_model_parallel_size}} + +model: + mcore_gpt: True + # specify micro_batch_size, global_batch_size, and model parallelism + # gradient accumulation will be done automatically based on data_parallel_size + micro_batch_size: 4 # limited by GPU memory + global_batch_size: 8 # will use more micro batches to reach global batch size + tensor_model_parallel_size: 1 # intra-layer model parallelism + pipeline_model_parallel_size: 1 # inter-layer model parallelism + virtual_pipeline_model_parallel_size: null # interleaved pipeline + + # model architecture + encoder_seq_length: 8192 + max_position_embeddings: ${.encoder_seq_length} + num_layers: 42 # 9b: 18 | 27b: 46 + hidden_size: 3584 # 9b: 3584 | 27b: 4608 + ffn_hidden_size: 28672 # Transformer FFN hidden size. Usually 4 * hidden_size. | 9b: 28672 | 27b: 72728 + num_attention_heads: 16 # 9b: 16 | 27b: 32 + init_method_std: 0.02 # Standard deviation of the zero mean normal distribution used for weight initialization.') + use_scaled_init_method: True # use scaled residuals initialization + hidden_dropout: 0.0 # Dropout probability for hidden state transformer. + attention_dropout: 0.0 # Dropout probability for attention + ffn_dropout: 0.0 # Dropout probability in the feed-forward layer. + kv_channels: 256 # Projection weights dimension in multi-head attention. Set to hidden_size // num_attention_heads if null | 9b: 256 | 27b: 128 + apply_embedding_scaling: True # scale sqrt(hidden_size) + apply_query_key_layer_scaling: False # scale Q * K^T by 1 / layer-number. + normalization: 'rmsnorm' # Normalization layer to use. Options are 'layernorm', 'rmsnorm' + layernorm_zero_centered_gamma: True + layernorm_epsilon: 1e-6 + do_layer_norm_weight_decay: False # True means weight decay on all params + make_vocab_size_divisible_by: 128 # Pad the vocab size to be divisible by this value for computation efficiency. + pre_process: True # add embedding + post_process: True # add pooler + persist_layer_norm: True # Use of persistent fused layer norm kernel. + bias: False # Whether to use bias terms in all weight matrices. + activation: 'fast-geglu' # Options ['gelu', 'geglu', 'swiglu', 'reglu', 'squared-relu', 'fast-geglu', 'fast-swiglu', 'fast-reglu'] + headscale: False # Whether to learn extra parameters that scale the output of the each self-attention head. + transformer_block_type: 'pre_ln' # Options ['pre_ln', 'post_ln', 'normformer'] + openai_gelu: True # Use OpenAI's GELU instead of the default GeLU + normalize_attention_scores: True # Whether to scale the output Q * K^T by 1 / sqrt(hidden_size_per_head). This arg is provided as a configuration option mostly for compatibility with models that have been weight-converted from HF. You almost always want to se this to True. + position_embedding_type: 'rope' # Position embedding type. Options ['learned_absolute', 'rope'] + rotary_percentage: 1.0 # If using position_embedding_type=rope, then the per head dim is multiplied by this. + attention_type: 'multihead' # Attention type. Options ['multihead'] + share_embeddings_and_output_weights: True # Share embedding and output layer weights. + overlap_p2p_comm: False # Overlap p2p communication with computes. This argument is valid only when `virtual_pipeline_model_parallel_size` is larger than 1 + batch_p2p_comm: True # Batch consecutive inter-peer send/recv operations. This argument is valid only when `virtual_pipeline_model_parallel_size` is larger than 1 + num_query_groups: 8 # Number of query groups for group query attention. If None, normal attention is used. | 9b: 8 | 27b: 16 + mcore_customization_config: + query_pre_attn_scalar: 224 # Custom scale factor (normally sqrt dim) in SDPA 9b: 224 | 27b: 144 + attn_logit_softcapping: 50.0 # Prevents attention outputs from growing excessively by scaling them to a fixed range + final_logit_softcapping: 30.0 # Prevents final logits from growing excessively by scaling them to a fixed range + + tokenizer: + library: 'sentencepiece' + type: null + model: ??? # /path/to/tokenizer.model + vocab_file: null + merge_file: null + delimiter: null # only used for tabular tokenizer + sentencepiece_legacy: False # Legacy=True allows you to add special tokens to sentencepiece tokenizers. + + # Mixed precision + native_amp_init_scale: 4294967296 # 2 ** 32 + native_amp_growth_interval: 1000 + hysteresis: 2 # Gradient scale hysteresis + fp32_residual_connection: False # Move residual connections to fp32 + fp16_lm_cross_entropy: False # Move the cross entropy unreduced loss calculation for lm head to fp16 + + # Megatron O2-style half-precision + megatron_amp_O2: False # Enable O2-level automatic mixed precision using main parameters + grad_allreduce_chunk_size_mb: 125 + + # Fusion + grad_div_ar_fusion: True # Fuse grad division into torch.distributed.all_reduce. Only used with O2 and no pipeline parallelism.. + gradient_accumulation_fusion: False # Fuse weight gradient accumulation to GEMMs. Only used with pipeline parallelism and O2. + bias_activation_fusion: False # Use a kernel that fuses the bias addition from weight matrices with the subsequent activation function. + bias_dropout_add_fusion: False # Use a kernel that fuses the bias addition, dropout and residual connection addition. + masked_softmax_fusion: True # Use a kernel that fuses the attention softmax with it's mask. + get_attention_mask_from_fusion: True # When using fused softmax it will create the attention mask so we won't copy it to the pipeline stages. + + + # Miscellaneous + seed: 1234 + resume_from_checkpoint: null # manually set the checkpoint file to load from + use_cpu_initialization: False # Init weights on the CPU (slow for large models) + onnx_safe: False # Use work-arounds for known problems with Torch ONNX exporter. + apex_transformer_log_level: 30 # Python logging level displays logs with severity greater than or equal to this + gradient_as_bucket_view: True # PyTorch DDP argument. Allocate gradients in a contiguous bucket to save memory (less fragmentation and buffer memory) + sync_batch_comm: False # Enable stream synchronization after each p2p communication between pipeline stages + + ## Activation Checkpointing + # NeMo Megatron supports 'selective' activation checkpointing where only the memory intensive part of attention is checkpointed. + # These memory intensive activations are also less compute intensive which makes activation checkpointing more efficient for LLMs (20B+). + # See Reducing Activation Recomputation in Large Transformer Models: https://arxiv.org/abs/2205.05198 for more details. + # 'full' will checkpoint the entire transformer layer. + activations_checkpoint_granularity: null # 'selective' or 'full' + activations_checkpoint_method: null # 'uniform', 'block' + # 'uniform' divides the total number of transformer layers and checkpoints the input activation + # of each chunk at the specified granularity. When used with 'selective', 'uniform' checkpoints all attention blocks in the model. + # 'block' checkpoints the specified number of layers per pipeline stage at the specified granularity + activations_checkpoint_num_layers: null + # when using 'uniform' this creates groups of transformer layers to checkpoint. Usually set to 1. Increase to save more memory. + # when using 'block' this this will checkpoint the first activations_checkpoint_num_layers per pipeline stage. + num_micro_batches_with_partial_activation_checkpoints: null + # This feature is valid only when used with pipeline-model-parallelism. + # When an integer value is provided, it sets the number of micro-batches where only a partial number of Transformer layers get checkpointed + # and recomputed within a window of micro-batches. The rest of micro-batches in the window checkpoint all Transformer layers. The size of window is + # set by the maximum outstanding micro-batch backpropagations, which varies at different pipeline stages. The number of partial layers to checkpoint + # per micro-batch is set by 'activations_checkpoint_num_layers' with 'activations_checkpoint_method' of 'block'. + # This feature enables using activation checkpoint at a fraction of micro-batches up to the point of full GPU memory usage. + activations_checkpoint_layers_per_pipeline: null + # This feature is valid only when used with pipeline-model-parallelism. + # When an integer value (rounded down when float is given) is provided, it sets the number of Transformer layers to skip checkpointing at later + # pipeline stages. For example, 'activations_checkpoint_layers_per_pipeline' of 3 makes pipeline stage 1 to checkpoint 3 layers less than + # stage 0 and stage 2 to checkpoint 6 layers less stage 0, and so on. This is possible because later pipeline stage + # uses less GPU memory with fewer outstanding micro-batch backpropagations. Used with 'num_micro_batches_with_partial_activation_checkpoints', + # this feature removes most of activation checkpoints at the last pipeline stage, which is the critical execution path. + + ## Sequence Parallelism + # Makes tensor parallelism more memory efficient for LLMs (20B+) by parallelizing layer norms and dropout sequentially + # See Reducing Activation Recomputation in Large Transformer Models: https://arxiv.org/abs/2205.05198 for more details. + sequence_parallel: False + + ## Transformer Engine + transformer_engine: True + fp8: False # enables fp8 in TransformerLayer forward + fp8_e4m3: False # sets fp8_format = recipe.Format.E4M3 + fp8_hybrid: True # sets fp8_format = recipe.Format.HYBRID + fp8_margin: 0 # scaling margin + fp8_interval: 1 # scaling update interval + fp8_amax_history_len: 1024 # Number of steps for which amax history is recorded per tensor + fp8_amax_compute_algo: max # 'most_recent' or 'max'. Algorithm for computing amax from history + reduce_amax: True # Perform reduction to sync amax tensors across GPUs after every iteration + use_emha: False # Use fused multi-head attention for large sequence-length. Note this is not yet supported. Please set to False. + + data: + # Path to data must be specified by the user. + # Supports List, String and Dictionary + # List : can override from the CLI: "model.data.data_prefix=[.5,/raid/data/pile/my-gpt3_00_text_document,.5,/raid/data/pile/my-gpt3_01_text_document]", + # Or see example below: + # data_prefix: + # - .5 + # - /raid/data/pile/my-gpt3_00_text_document + # - .5 + # - /raid/data/pile/my-gpt3_01_text_document + # Dictionary: can override from CLI "model.data.data_prefix"={"train":[1.0, /path/to/data], "validation":/path/to/data, "test":/path/to/test} + # Or see example below: + # "model.data.data_prefix: {train:[1.0,/path/to/data], validation:[/path/to/data], test:[/path/to/test]}" + # data_prefix: ??? + index_mapping_dir: null # path to save index mapping .npy files, by default will save in the same location as data_prefix + data_impl: mmap + splits_string: 900,50,50 + seq_length: ${model.encoder_seq_length} + skip_warmup: True + num_workers: 2 + dataloader_type: single # cyclic + reset_position_ids: False # Reset position ids after end-of-document token + reset_attention_mask: False # Reset attention mask after end-of-document token + eod_mask_loss: False # Mask loss for the end of document tokens + validation_drop_last: True # Set to false if the last partial validation samples is to be consumed + no_seqlen_plus_one_input_tokens: False # Set to True to disable fetching (sequence length + 1) input tokens, instead get (sequence length) input tokens and mask the last token + pad_samples_to_global_batch_size: False # Set to True if you want to pad the last partial batch with -1's to equal global batch size + shuffle_documents: True # Set to False to disable documents shuffling. Sample index will still be shuffled + + # Nsys profiling options + nsys_profile: + enabled: False + start_step: 10 # Global batch to start profiling + end_step: 10 # Global batch to end profiling + ranks: [0] # Global rank IDs to profile + gen_shape: False # Generate model and kernel details including input shapes + + optim: + name: fused_adam + lr: 2e-4 + weight_decay: 0.01 + betas: + - 0.9 + - 0.98 + sched: + name: CosineAnnealing + warmup_steps: 500 + constant_steps: 50000 + min_lr: 2e-5 diff --git a/nemo/collections/nlp/models/language_modeling/megatron/gemma2/__init__.py b/nemo/collections/nlp/models/language_modeling/megatron/gemma2/__init__.py new file mode 100644 index 000000000000..d9155f923f18 --- /dev/null +++ b/nemo/collections/nlp/models/language_modeling/megatron/gemma2/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo/collections/nlp/models/language_modeling/megatron/gemma2/gemma2_modules.py b/nemo/collections/nlp/models/language_modeling/megatron/gemma2/gemma2_modules.py new file mode 100644 index 000000000000..fd2472c5fe49 --- /dev/null +++ b/nemo/collections/nlp/models/language_modeling/megatron/gemma2/gemma2_modules.py @@ -0,0 +1,280 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Callable, Optional + +import torch +from megatron.core import parallel_state, tensor_parallel +from megatron.core.fusions.fused_softmax import FusedScaleMaskSoftmax +from megatron.core.packed_seq_params import PackedSeqParams +from megatron.core.tensor_parallel import ColumnParallelLinear +from megatron.core.transformer import MegatronModule, TransformerConfig +from megatron.core.transformer.custom_layers.transformer_engine import TENorm, TERowParallelLinear +from megatron.core.transformer.enums import AttnMaskType +from megatron.core.transformer.utils import attention_mask_func +from megatron.core.utils import divide +from torch import Tensor + + +def get_swa(seq_q, seq_kv, w): + """Create the equivalent attention mask fro SWA in [seq_q, seq_kv] shape""" + m = torch.ones(seq_q, seq_kv, dtype=torch.bool, device="cuda") + mu = torch.triu(m, diagonal=seq_kv - seq_q - w[0]) + ml = torch.tril(mu, diagonal=seq_kv - seq_q + w[1]) + ml = ~ml + return ml + + +def logit_softcapping(logits: torch.Tensor, scale: Optional[float]): + """Prevents logits from growing excessively by scaling them to a fixed range""" + if not scale: + return logits + return scale * torch.tanh(logits / scale) + + +class Gemma2DotProductAttention(MegatronModule): + """ + Region where selective activation recomputation is applied. + This region is memory intensive but less compute intensive which + makes activation checkpointing more efficient for LLMs (20B+). + See Reducing Activation Recomputation in Large Transformer Models: https://arxiv.org/abs/2205.05198 for more details. + + We use the following notation: + h: hidden size + n: number of attention heads + p: number of tensor model parallel partitions + b: batch size + s: sequence length + """ + + def __init__( + self, + config: TransformerConfig, + layer_number: int, + attn_mask_type: AttnMaskType, + attention_type: str, + attention_dropout: float = None, + ): + super().__init__(config=config) + + self.config: TransformerConfig = config + + assert ( + self.config.context_parallel_size == 1 + ), "Context parallelism is only supported by TEDotProductAttention!" + + self.layer_number = max(1, layer_number) + + self.window_size = None + if self.layer_number % 2 == 0: + self.window_size = config.window_size + + self.attn_mask_type = attn_mask_type + self.attention_type = attention_type # unused for now + + projection_size = self.config.kv_channels * self.config.num_attention_heads + + # Per attention head and per partition values. + world_size = parallel_state.get_tensor_model_parallel_world_size() + self.hidden_size_per_partition = divide(projection_size, world_size) + self.hidden_size_per_attention_head = divide(projection_size, config.num_attention_heads) + self.num_attention_heads_per_partition = divide(self.config.num_attention_heads, world_size) + self.num_query_groups_per_partition = divide(self.config.num_query_groups, world_size) + + coeff = None + self.norm_factor = math.sqrt(config.query_pre_attn_scalar) + + if self.config.apply_query_key_layer_scaling: + coeff = self.layer_number + self.norm_factor *= coeff + + self.scale_mask_softmax = FusedScaleMaskSoftmax( + input_in_fp16=self.config.fp16, + input_in_bf16=self.config.bf16, + attn_mask_type=self.attn_mask_type, + scaled_masked_softmax_fusion=self.config.masked_softmax_fusion, + mask_func=attention_mask_func, + softmax_in_fp32=self.config.attention_softmax_in_fp32, + scale=coeff, + ) + + # Dropout. Note that for a single iteration, this layer will generate + # different outputs on different number of parallel partitions but + # on average it should not be partition dependent. + self.attention_dropout = torch.nn.Dropout( + self.config.attention_dropout if attention_dropout is None else attention_dropout + ) + + def forward( + self, + query: Tensor, + key: Tensor, + value: Tensor, + attention_mask: Tensor, + attn_mask_type: AttnMaskType = None, + packed_seq_params: PackedSeqParams = None, + ): + assert packed_seq_params is None, ( + "Packed sequence is not supported by DotProductAttention." "Please use TEDotProductAttention instead." + ) + + # =================================== + # Raw attention scores. [b, n/p, s, s] + # =================================== + + # expand the key and value [sk, b, ng, hn] -> [sk, b, np, hn] + # This is a noop for normal attention where ng == np. When using group query attention this + # creates a view that has the keys and values virtually repeated along their dimension to + # match the number of queries. + + # attn_mask_type is not used. + if self.num_attention_heads_per_partition // self.num_query_groups_per_partition > 1: + key = key.repeat_interleave( + self.num_attention_heads_per_partition // self.num_query_groups_per_partition, dim=2 + ) + value = value.repeat_interleave( + self.num_attention_heads_per_partition // self.num_query_groups_per_partition, dim=2 + ) + + # [b, np, sq, sk] + output_size = ( + query.size(1), + query.size(2), + query.size(0), + key.size(0), + ) + + # [sq, b, np, hn] -> [sq, b * np, hn] + # This will be a simple view when doing normal attention, but in group query attention + # the key and value tensors are repeated to match the queries so you can't use simple strides + # to extract the queries. + query = query.reshape(output_size[2], output_size[0] * output_size[1], -1) + # [sk, b, np, hn] -> [sk, b * np, hn] + key = key.view(output_size[3], output_size[0] * output_size[1], -1) + + # preallocting input tensor: [b * np, sq, sk] + matmul_input_buffer = parallel_state.get_global_memory_buffer().get_tensor( + (output_size[0] * output_size[1], output_size[2], output_size[3]), + query.dtype, + "mpu", + ) + + # Raw attention scores. [b * np, sq, sk] + matmul_result = torch.baddbmm( + matmul_input_buffer, + query.transpose(0, 1), # [b * np, sq, hn] + key.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] + beta=0.0, + alpha=(1.0 / self.norm_factor), + ) + # Gemma 2 specific: + matmul_result = logit_softcapping(matmul_result, self.config.attn_logit_softcapping) + + # change view to [b, np, sq, sk] + attention_scores = matmul_result.view(*output_size) + + # =========================== + # Attention probs and dropout + # =========================== + + # sliding window attention + if attention_mask is not None and self.window_size is not None: + attention_mask = get_swa(query.size(0), key.size(0), self.window_size) + + # attention scores and attention mask [b, np, sq, sk] + attention_probs: Tensor = self.scale_mask_softmax(attention_scores, attention_mask) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + + if not self.config.sequence_parallel: + with tensor_parallel.get_cuda_rng_tracker().fork(): + attention_probs = self.attention_dropout(attention_probs) + else: + attention_probs = self.attention_dropout(attention_probs) + + # ========================= + # Context layer. [sq, b, hp] + # ========================= + + # value -> context layer. + # [sk, b, np, hn] --> [b, np, sq, hn] + + # context layer shape: [b, np, sq, hn] + output_size = ( + value.size(1), + value.size(2), + query.size(0), + value.size(3), + ) + + # change view [sk, b * np, hn] + value = value.view(value.size(0), output_size[0] * output_size[1], -1) + + # change view [b * np, sq, sk] + attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) + + # matmul: [b * np, sq, hn] + context = torch.bmm(attention_probs, value.transpose(0, 1)) + + # change view [b, np, sq, hn] + context = context.view(*output_size) + + # [b, np, sq, hn] --> [sq, b, np, hn] + context = context.permute(2, 0, 1, 3).contiguous() + + # [sq, b, np, hn] --> [sq, b, hp] + new_context_shape = context.size()[:-2] + (self.hidden_size_per_partition,) + context = context.view(*new_context_shape) + return context + + +class TERowParallelLinearLayerNorm(TERowParallelLinear): + def __init__( + self, + input_size: int, + output_size: int, + *, + config: TransformerConfig, + init_method: Callable, + bias: bool, + input_is_parallel: bool, + skip_bias_add: bool, + is_expert: bool, + tp_comm_buffer_name: str = None, + ): + super().__init__( + input_size, + output_size, + config=config, + init_method=init_method, + bias=bias, + input_is_parallel=input_is_parallel, + skip_bias_add=skip_bias_add, + is_expert=is_expert, + tp_comm_buffer_name=tp_comm_buffer_name, + ) + self.post_layernorm = TENorm(config, output_size) + + def forward(self, x): + output, bias = super().forward(x) + return self.post_layernorm(output), bias + + +class Gemma2OutputLayer(ColumnParallelLinear): + def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None): + output, bias = super().forward(input_, weight) + output = logit_softcapping(output, self.config.final_logit_softcapping) + return output, bias diff --git a/nemo/collections/nlp/models/language_modeling/megatron/gemma2/gemma2_spec.py b/nemo/collections/nlp/models/language_modeling/megatron/gemma2/gemma2_spec.py new file mode 100644 index 000000000000..32b2535c1010 --- /dev/null +++ b/nemo/collections/nlp/models/language_modeling/megatron/gemma2/gemma2_spec.py @@ -0,0 +1,51 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add +from megatron.core.transformer import ModuleSpec, TransformerLayer, TransformerLayerSubmodules +from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules +from megatron.core.transformer.custom_layers.transformer_engine import TELayerNormColumnParallelLinear +from megatron.core.transformer.enums import AttnMaskType +from megatron.core.transformer.mlp import MLP, MLPSubmodules + +from nemo.collections.nlp.models.language_modeling.megatron.gemma2.gemma2_modules import ( + Gemma2DotProductAttention, + TERowParallelLinearLayerNorm, +) + + +def get_gemma2_layer_spec(): + return ModuleSpec( + module=TransformerLayer, + submodules=TransformerLayerSubmodules( + self_attention=ModuleSpec( + module=SelfAttention, + params={"attn_mask_type": AttnMaskType.causal}, + submodules=SelfAttentionSubmodules( + linear_qkv=TELayerNormColumnParallelLinear, + core_attention=Gemma2DotProductAttention, # use unfused SDPA for attn logit softcapping + linear_proj=TERowParallelLinearLayerNorm, # post attn RMSNorm + ), + ), + self_attn_bda=get_bias_dropout_add, + mlp=ModuleSpec( + module=MLP, + submodules=MLPSubmodules( + linear_fc1=TELayerNormColumnParallelLinear, + linear_fc2=TERowParallelLinearLayerNorm, # post mlp RMSNorm + ), + ), + mlp_bda=get_bias_dropout_add, + ), + ) diff --git a/nemo/collections/nlp/models/language_modeling/megatron_bert_model.py b/nemo/collections/nlp/models/language_modeling/megatron_bert_model.py index 984fca5f1259..f35cb77a76e9 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_bert_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_bert_model.py @@ -214,8 +214,8 @@ def model_provider_func(self, pre_process, post_process): return model def _validate_trainer(self): - """ Certain trainer configurations can break training. - Here we try to catch them and raise an error. + """Certain trainer configurations can break training. + Here we try to catch them and raise an error. """ if self.trainer.accumulate_grad_batches > 1: raise ValueError( @@ -308,7 +308,11 @@ def forward( model = self.model if self.mcore_bert: - output_tensor = model(input_ids, attention_mask, tokentype_ids=token_type_ids,) + output_tensor = model( + input_ids, + attention_mask, + tokentype_ids=token_type_ids, + ) else: output_tensor = model( input_ids, @@ -423,21 +427,24 @@ def training_step(self, dataloader_iter): self.log('lr', lr, batch_size=1) self.log('global_step', self.trainer.global_step, prog_bar=True, batch_size=1) self.log( - 'consumed_samples', self._compute_consumed_samples_after_training_step(), prog_bar=True, batch_size=1, + 'consumed_samples', + self._compute_consumed_samples_after_training_step(), + prog_bar=True, + batch_size=1, ) return loss_mean[0] def _make_data_iterator_list(self, data_iterator: Iterator) -> List[Iterator]: - """ Convert data iterator into form expected by Megatron - With interleaved pipeline parallelism, Megatron expects a - list of one data iterator per model chunk. Each model - chunk independently gets data from its data iterator, so - we need to interact with the data iterator multiple times - for each microbatch step. Instead of incorporating this - logic into the data loader, we cache the iterator's output - to the first model chunk and reuse it in the other model - chunks. + """Convert data iterator into form expected by Megatron + With interleaved pipeline parallelism, Megatron expects a + list of one data iterator per model chunk. Each model + chunk independently gets data from its data iterator, so + we need to interact with the data iterator multiple times + for each microbatch step. Instead of incorporating this + logic into the data loader, we cache the iterator's output + to the first model chunk and reuse it in the other model + chunks. """ if not isinstance(self.model, list) or len(self.model) == 1: @@ -703,9 +710,9 @@ def build_train_valid_test_datasets(self): ] if self.trainer.limit_val_batches <= 1.0 and isinstance(self.trainer.limit_val_batches, float): - train_valid_test_num_samples[ - 1 - ] = 1 # This is to make sure we only have one epoch on every validation iteration + train_valid_test_num_samples[1] = ( + 1 # This is to make sure we only have one epoch on every validation iteration + ) self._train_ds, self._validation_ds, self._test_ds = dataset_utils.build_train_valid_test_datasets( cfg=self.cfg, @@ -739,20 +746,20 @@ def build_train_valid_test_datasets(self): return self._train_ds, self._validation_ds, self._test_ds def backward(self, *args, **kwargs): - """ LightningModule hook to do backward. - We want this to do nothing since we run backward in the fwd/bwd functions from megatron-core. - No need to call it here. + """LightningModule hook to do backward. + We want this to do nothing since we run backward in the fwd/bwd functions from megatron-core. + No need to call it here. """ return def optimizer_zero_grad(self, *args, **kwargs): - """ LightningModule hook to zero grad. - We want this to do nothing as we are zeroing grads during the training_step. + """LightningModule hook to zero grad. + We want this to do nothing as we are zeroing grads during the training_step. """ return def _append_sequence_parallel_module_grads(self, module, grads): - """ Helper method for allreduce_sequence_parallel_gradients""" + """Helper method for allreduce_sequence_parallel_gradients""" for param in module.parameters(): sequence_parallel_param = getattr(param, 'sequence_parallel', False) @@ -822,12 +829,12 @@ def setup(self, stage=None): self.setup_transformer_engine_tp_groups() def setup_transformer_engine_tp_groups(self): - """ This should be called after model parallel groups have been initialized - and only needs to be called when using Transformer Engine. + """This should be called after model parallel groups have been initialized + and only needs to be called when using Transformer Engine. """ for module in self.get_bert_module_list(): """Set TP group - Copied from: https://github.com/NVIDIA/TransformerEngine/blob/main/transformer_engine/pytorch/transformer.py#L398 + Copied from: https://github.com/NVIDIA/TransformerEngine/blob/main/transformer_engine/pytorch/transformer.py#L398 """ # Deep iterate but skip self to avoid infinite recursion. for index, child in enumerate(module.modules()): @@ -849,9 +856,9 @@ def get_bert_module_list(self): return [self.model] def allreduce_sequence_parallel_gradients(self): - """ All-reduce layernorm parameters across model parallel nodes when sequence parallelism is used. - Modified from megatron-lm: - https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/blob/3f91f09bb2ab32f9904b47f46f19d2fc3f518ed8/megatron/training.py#L425 + """All-reduce layernorm parameters across model parallel nodes when sequence parallelism is used. + Modified from megatron-lm: + https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/blob/3f91f09bb2ab32f9904b47f46f19d2fc3f518ed8/megatron/training.py#L425 """ grads = [] @@ -931,10 +938,10 @@ def setup_test_data(self, cfg): self._test_dl = self.build_pretraining_data_loader(self._test_ds, consumed_samples) def transfer_batch_to_device(self, batch: Any, device: torch.device, dataloader_idx: int) -> Any: - """ PTL hook: https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#transfer-batch-to-device - When using pipeline parallelism, we need the global batch to remain on the CPU, - since the memory overhead will be too high when using a large number of microbatches. - Microbatches are transferred from CPU to GPU inside the pipeline. + """PTL hook: https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#transfer-batch-to-device + When using pipeline parallelism, we need the global batch to remain on the CPU, + since the memory overhead will be too high when using a large number of microbatches. + Microbatches are transferred from CPU to GPU inside the pipeline. """ return batch @@ -1154,17 +1161,19 @@ def on_load_checkpoint(self, checkpoint) -> None: parallel_state.set_virtual_pipeline_model_parallel_rank(0) def build_transformer_config(self) -> TransformerConfig: - """ Builds the megatron core gpt transformer config for the model. - For attributes in the nemo model config that are the same - as the megatron core TransformerConfig, we will use the value from the nemo model config. - For attributes in TransformerConfig that are not in the nemo model config, we add custom logic. + """Builds the megatron core gpt transformer config for the model. + For attributes in the nemo model config that are the same + as the megatron core TransformerConfig, we will use the value from the nemo model config. + For attributes in TransformerConfig that are not in the nemo model config, we add custom logic. """ activation = self.cfg.get('activation', 'gelu') assert activation == 'gelu', "Only gelu activation is support for BERT at the moment." normalization = self.cfg.get('normalization', 'layernorm') - layernorm_zero_centered_gamma = self.cfg.get('normalization', 'layernorm') == 'layernorm1p' + layernorm_zero_centered_gamma = self.cfg.get('normalization', 'layernorm') == 'layernorm1p' or self.cfg.get( + "layernorm_zero_centered_gamma", False + ) if normalization == 'layernorm': normalization = 'LayerNorm' elif normalization == 'rmsnorm': diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py index 69cd06021f50..f09bbe279cd3 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py @@ -38,6 +38,7 @@ from nemo.collections.nlp.data.language_modeling.megatron.gpt_dataset import build_train_valid_test_datasets from nemo.collections.nlp.data.language_modeling.megatron.gpt_fim_dataset import GPTFIMDataset, GPTFIMDatasetConfig from nemo.collections.nlp.models.language_modeling.megatron.falcon.falcon_spec import get_falcon_layer_spec +from nemo.collections.nlp.models.language_modeling.megatron.gemma2.gemma2_spec import get_gemma2_layer_spec from nemo.collections.nlp.models.language_modeling.megatron.gpt_full_te_layer_autocast_spec import ( get_gpt_full_te_layer_autocast_spec, ) @@ -154,6 +155,7 @@ def get_specs(spec_name, num_experts=None, moe_grouped_gemm=False, use_te=True, "": get_gpt_layer_local_spec(num_experts, moe_grouped_gemm), "te_gpt": get_gpt_layer_with_transformer_engine_spec(num_experts, moe_grouped_gemm), "megatron_falcon_gpt": get_falcon_layer_spec(), + "megatron_gemma2": get_gemma2_layer_spec(), "megatron_gpt_full_te_layer_autocast": get_gpt_full_te_layer_autocast_spec(), "modelopt": get_gpt_layer_modelopt_spec(num_experts), "te_gpt_hyena": get_gpt_layer_with_te_and_hyena_spec(hyena_cfg), @@ -163,6 +165,15 @@ def get_specs(spec_name, num_experts=None, moe_grouped_gemm=False, use_te=True, return name_spec_dict[spec_name] +def mcore_model_customize(cfg, model): + if cfg.get("apply_embedding_scaling", False) and parallel_state.is_pipeline_first_stage(): + extend_instance(model.embedding, EmbeddingScalingMixin) + if cfg.get("mcore_customization_config", {}).get("final_logit_softcapping", 0): + from nemo.collections.nlp.models.language_modeling.megatron.gemma2.gemma2_modules import Gemma2OutputLayer + + extend_instance(model.output_layer, Gemma2OutputLayer) + + class EmbeddingScalingMixin(torch.nn.Module): """ A mixin class for scaling embeddings in Megatron GPT. @@ -441,8 +452,7 @@ def model_provider_func(self, pre_process, post_process): seq_len_interpolation_factor=self.cfg.get('seq_len_interpolation_factor', None), rotary_base=self.cfg.get('rotary_base', 10000), ) - if self.cfg.get("apply_embedding_scaling", False) and parallel_state.is_pipeline_first_stage(): - extend_instance(model.embedding, EmbeddingScalingMixin) + mcore_model_customize(self.cfg, model) else: assert self.cfg.get('num_query_groups', None) is None or self.cfg.get( 'num_query_groups', None @@ -2057,7 +2067,9 @@ def build_transformer_config(self) -> TransformerConfig: ) normalization = self.cfg.get('normalization', 'layernorm').lower() - layernorm_zero_centered_gamma = self.cfg.get('normalization', 'layernorm') == 'layernorm1p' + layernorm_zero_centered_gamma = self.cfg.get('normalization', 'layernorm') == 'layernorm1p' or self.cfg.get( + "layernorm_zero_centered_gamma", False + ) if normalization == 'layernorm': normalization = 'LayerNorm' elif normalization == 'rmsnorm': diff --git a/scripts/checkpoint_converters/convert_gemma2_hf_to_nemo.py b/scripts/checkpoint_converters/convert_gemma2_hf_to_nemo.py new file mode 100644 index 000000000000..fb296cf25c68 --- /dev/null +++ b/scripts/checkpoint_converters/convert_gemma2_hf_to_nemo.py @@ -0,0 +1,311 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Requires HF transformers updated to v4.42 to support Gemma 2 Models + + huggingface-cli login + >>> from huggingface_hub import snapshot_download + >>> snapshot_download(repo_id="google/gemma-2-9b", local_dir="/path/to/gemma2/checkpoints/hf/9b") + + python3 /opt/NeMo/scripts/checkpoint_converters/convert_gemma2_hf_to_nemo.py \ + --input_name_or_path /path/to/gemma2/checkpoints/hf/9b \ + --output_path /path/to/gemma2-9b.nemo \ + --tokenizer_path /path/to/gemma2/checkpoints/hf/9b/tokenizer.model + [--cpu] + +If you encounter a torch.cuda.OutOfMemoryError, try converting on CPU with --cpu. +""" + +import os +from argparse import ArgumentParser + +import torch + +from megatron.core import parallel_state +from omegaconf import OmegaConf +from transformers import AutoModelForCausalLM, AutoTokenizer + +from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel +from nemo.collections.nlp.modules.common.megatron.utils import get_ltor_masks_and_position_ids +from nemo.collections.nlp.parts.megatron_trainer_builder import MegatronTrainerBuilder +from nemo.collections.nlp.parts.utils_funcs import torch_dtype_from_precision +from nemo.utils import logging + + +def create_rename_keys(num_hidden_layers): + rename_keys = [] + for i in range(num_hidden_layers): + # Attention layers + rename_keys.extend( + [ + ( + f"model.layers.{i}.self_attn.o_proj.weight", + f"model.decoder.layers.{i}.self_attention.linear_proj.weight", + ), + ( + f"model.layers.{i}.self_attn.q_proj.weight", + f"model.decoder.layers.{i}.self_attention.linear_q.weight", + ), + ( + f"model.layers.{i}.self_attn.k_proj.weight", + f"model.decoder.layers.{i}.self_attention.linear_k.weight", + ), + ( + f"model.layers.{i}.self_attn.v_proj.weight", + f"model.decoder.layers.{i}.self_attention.linear_v.weight", + ), + # MLP and LayerNorm + (f"model.layers.{i}.mlp.gate_proj.weight", f"model.decoder.layers.{i}.mlp.linear_fc1_gate.weight"), + (f"model.layers.{i}.mlp.up_proj.weight", f"model.decoder.layers.{i}.mlp.linear_fc1_proj.weight"), + (f"model.layers.{i}.mlp.down_proj.weight", f"model.decoder.layers.{i}.mlp.linear_fc2.weight"), + ( + f"model.layers.{i}.input_layernorm.weight", + f"model.decoder.layers.{i}.self_attention.linear_qkv.layer_norm_weight", + ), + ( + f"model.layers.{i}.pre_feedforward_layernorm.weight", + f"model.decoder.layers.{i}.mlp.linear_fc1.layer_norm_weight", + ), + ( + f"model.layers.{i}.post_attention_layernorm.weight", + f"model.decoder.layers.{i}.self_attention.linear_proj.post_layernorm.weight", + ), + ( + f"model.layers.{i}.post_feedforward_layernorm.weight", + f"model.decoder.layers.{i}.mlp.linear_fc2.post_layernorm.weight", + ), + ] + ) + + # Non layer dependent keys + rename_keys.extend( + [ + ("model.embed_tokens.weight", "model.embedding.word_embeddings.weight"), + ("model.norm.weight", "model.decoder.final_layernorm.weight"), + ] + ) + + return rename_keys + + +def rename_model_keys(model_state_dict, rename_keys): + """ + Rename keys in the model's state dictionary based on the provided mappings. + + Parameters: + model_state_dict (dict): The state dictionary of the model. + rename_keys (list): A list of tuples with the mapping (old_key, new_key). + + Returns: + dict: A new state dictionary with updated key names. + """ + + # Create a new state dictionary with updated key names + new_state_dict = {} + + # Track keys from the original state dict to ensure all are processed + remaining_keys = set(model_state_dict.keys()) + + # Iterate over the rename mappings + for old_key, new_key in rename_keys: + if old_key in model_state_dict: + # Rename the key and remove it from the tracking set + new_state_dict[new_key] = model_state_dict[old_key] + remaining_keys.remove(old_key) + + # Check if any keys were not converted from old to new + for old_key in remaining_keys: + print(f"Warning: Key '{old_key}' was not converted.") + + return new_state_dict + + +def adjust_tensor_shapes(model, nemo_state_dict): + """ + Adapt tensor shapes in the state dictionary to ensure compatibility with a different model structure. + + Parameters: + nemo_state_dict (dict): The state dictionary of the model. + + Returns: + dict: The updated state dictionary with modified tensor shapes for compatibility. + """ + model_config = model.cfg + num_query_groups = model_config["num_query_groups"] + head_num = model_config["num_attention_heads"] + hidden_size = model_config["hidden_size"] + head_size = model_config["kv_channels"] + heads_per_group = head_num // num_query_groups + + # Note: For 'key' and 'value' weight and biases, NeMo uses a consolidated tensor 'query_key_value'. + for key_ in list(nemo_state_dict.keys()): + if 'mlp.linear_fc1_gate.weight' in key_: + key_gate = key_ + key_proj = key_.replace('mlp.linear_fc1_gate.weight', 'mlp.linear_fc1_proj.weight') + new_key = key_.replace('mlp.linear_fc1_gate.weight', 'mlp.linear_fc1.weight') + gate_weight = nemo_state_dict[key_gate] + proj_weight = nemo_state_dict[key_proj] + nemo_state_dict[new_key] = torch.cat((gate_weight, proj_weight)) + if 'layernorm.weight' in key_ or 'layer_norm_weight' in key_: + nemo_state_dict[key_] = nemo_state_dict[key_] + if 'self_attention.linear_q.weight' in key_: + key_q = key_ + key_k = key_.replace('linear_q', 'linear_k') + key_v = key_.replace('linear_q', 'linear_v') + key_qkv = key_.replace('linear_q', 'linear_qkv') + + # [(head_num + 2 * num_query_groups) * head_size, hidden_size] + # -> [head_num, head_size, hidden_size], 2 * [num_query_groups, head_size, hidden_size] + q_weight, k_weight, v_weight = nemo_state_dict[key_q], nemo_state_dict[key_k], nemo_state_dict[key_v] + q_weight = q_weight.reshape(head_num, head_size, hidden_size) + k_weight = k_weight.reshape(num_query_groups, head_size, hidden_size) + v_weight = v_weight.reshape(num_query_groups, head_size, hidden_size) + + qkv_weight = torch.empty((0, head_size, hidden_size), device=q_weight.device) + for i in range(num_query_groups): + qkv_weight = torch.cat((qkv_weight, q_weight[i * heads_per_group : (i + 1) * heads_per_group, :, :])) + qkv_weight = torch.cat((qkv_weight, k_weight[i : i + 1, :, :])) + qkv_weight = torch.cat((qkv_weight, v_weight[i : i + 1, :, :])) + qkv_weight = qkv_weight.reshape([head_size * (head_num + 2 * num_query_groups), hidden_size]) + nemo_state_dict[key_qkv] = qkv_weight + del nemo_state_dict[key_q], nemo_state_dict[key_k], nemo_state_dict[key_v] + + return nemo_state_dict + + +def adjust_nemo_config(model_config, ref_config): + model_config["encoder_seq_length"] = ref_config["max_position_embeddings"] + model_config["num_layers"] = ref_config["num_hidden_layers"] + model_config["ffn_hidden_size"] = ref_config["intermediate_size"] + model_config["hidden_size"] = ref_config["hidden_size"] + model_config["num_attention_heads"] = ref_config["num_attention_heads"] + model_config["num_query_groups"] = ref_config["num_key_value_heads"] + model_config["kv_channels"] = ref_config["head_dim"] + model_config["layernorm_epsilon"] = ref_config["rms_norm_eps"] + model_config["window_size"] = (ref_config["sliding_window_size"], 0) + model_config["layernorm_zero_centered_gamma"] = True + model_config["name"] = 'megatron_gemma2' + model_config['mcore_customization_config'] = { + "attn_logit_softcapping": ref_config["attn_logit_softcapping"], + "final_logit_softcapping": ref_config["final_logit_softcapping"], + "query_pre_attn_scalar": ref_config["query_pre_attn_scalar"], + } + return model_config + + +def get_args(): + parser = ArgumentParser() + parser.add_argument("--input_name_or_path", type=str) + parser.add_argument("--tokenizer_path", type=str) + parser.add_argument( + "--hparams_file", + type=str, + default=os.path.join( + os.path.dirname(__file__), '../../examples/nlp/language_modeling/conf/megatron_gemma2_config.yaml' + ), + required=False, + help="Path config for restoring. It's created during training and may need to be modified during restore if restore environment is different than training. Ex: /raid/nemo_experiments/megatron_gpt/hparams.yaml", + ) + parser.add_argument("--output_path", type=str, default=None, help="Path to output .nemo file.") + parser.add_argument( + "--precision", type=str, default="bf16", choices=["bf16", "32"], help="Precision for checkpoint weight saved" + ) + parser.add_argument("--run_verification", action="store_true") + parser.add_argument("--cpu", action="store_true") + + args = parser.parse_args() + return args + + +def verify(nemo_model, hf_tokenizer, hf_model): + # Verifications + input_texts = [ + 'query: how much protein should a female eat', + ] + logging.info(f"Running verifications {input_texts} ...") + + # Tokenize the input texts + hf_tokenizer.pad_token = hf_tokenizer.eos_token + batch_dict = hf_tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors='pt') + batch_dict_cuda = {k: v.cuda() for k, v in batch_dict.items()} + hf_model = hf_model.cuda().eval() + nemo_model = nemo_model.eval() + + hf_outputs = hf_model(**batch_dict_cuda, output_hidden_states=True) + + parallel_state._set_global_memory_buffer() + ids = batch_dict_cuda['input_ids'] + + id_tensors = [torch.unsqueeze(torch.LongTensor(id_list), dim=0) for id_list in ids.cpu()] + + masks_and_position_ids = [ + get_ltor_masks_and_position_ids(id_tensor, hf_tokenizer.eos_token, False, False, False) + for id_tensor in id_tensors + ] + for tokens, attn_mask_and_pos_ids in zip(id_tensors, masks_and_position_ids): + attn_mask, _, pos_ids = attn_mask_and_pos_ids + outputs = nemo_model( + tokens=tokens.cuda(), text_position_ids=pos_ids.cuda(), attention_mask=attn_mask.cuda(), labels=None + ) + + hf_next_token = hf_outputs.logits[0, -1].argmax() + next_token = outputs.squeeze()[-1].argmax() + + logging.info(f"HF predicted next token is: '{hf_tokenizer._convert_id_to_token(hf_next_token)}'.") + logging.info(f"NeMo predicted next token is: '{hf_tokenizer._convert_id_to_token(next_token)}'.") + assert ( + hf_next_token == next_token + ), f'prediction mismatch: {hf_tokenizer.decode(hf_next_token)} != {hf_tokenizer.decode(next_token)}' + + +def convert(args): + logging.info(f"Loading checkpoint from HF Gemma 2: `{args.input_name_or_path}`") + hf_tokenizer = AutoTokenizer.from_pretrained(args.input_name_or_path) + hf_model = AutoModelForCausalLM.from_pretrained(args.input_name_or_path) + logging.info("HF Model loading done.") + + nemo_config = OmegaConf.load(args.hparams_file) + nemo_config.model = adjust_nemo_config(nemo_config.model, hf_model.config.__dict__) + nemo_config.model.tokenizer["model"] = args.tokenizer_path + + nemo_config.trainer["precision"] = args.precision + if args.cpu: + nemo_config.model['use_cpu_initialization'] = True + nemo_config.trainer['accelerator'] = 'cpu' + trainer = MegatronTrainerBuilder(nemo_config).create_trainer() + model = MegatronGPTModel(nemo_config.model, trainer) + + rename_keys = create_rename_keys(nemo_config.model.num_layers) + old_state_dict = hf_model.state_dict() + new_state_dict = rename_model_keys(model_state_dict=old_state_dict, rename_keys=rename_keys) + + nemo_state_dict = adjust_tensor_shapes(model, new_state_dict) + model.load_state_dict(nemo_state_dict, strict=False) + + if args.run_verification and not args.cpu: + logging.info(f'=' * 100) + verify(model, hf_tokenizer, hf_model) + logging.info(f'=' * 100) + + dtype = torch_dtype_from_precision(args.precision) + model = model.to(dtype=dtype) + model.cfg.use_cpu_initialization = False + model.save_to(args.output_path) + logging.info(f'NeMo model saved to: {args.output_path}') + + +if __name__ == '__main__': + args = get_args() + convert(args) From ff29c454f3f29bb9fc2514d8859d1f53446061b9 Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Thu, 15 Aug 2024 15:23:28 -0400 Subject: [PATCH 2/3] typo Signed-off-by: Chen Cui --- .../nlp/models/language_modeling/megatron_gpt_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py index f68e5282fb83..f0e763f91d6a 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py @@ -181,7 +181,7 @@ def get_specs(spec_name, transformer_config=None, use_te=True, hyena_cfg: Dict = def mcore_model_customize(cfg, model): if cfg.get("apply_embedding_scaling", False) and parallel_state.is_pipeline_first_stage(): extend_instance(model.embedding, EmbeddingScalingMixin) - if self.cfg.get('scale_positional_embedding', False): + if cfg.get('scale_positional_embedding', False): model.rotary_pos_emb.inv_freq = apply_rope_scaling(model.rotary_pos_emb.inv_freq) if cfg.get("mcore_customization_config", {}).get("final_logit_softcapping", 0): from nemo.collections.nlp.models.language_modeling.megatron.gemma2.gemma2_modules import Gemma2OutputLayer From 391ef23df90c545c60d206bdeef1decc7b0067ee Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Fri, 16 Aug 2024 01:11:09 -0400 Subject: [PATCH 3/3] import in function to fix test Signed-off-by: Chen Cui --- .../nlp/models/language_modeling/megatron_gpt_model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py index f0e763f91d6a..49f749e4a40e 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py @@ -39,7 +39,6 @@ from nemo.collections.nlp.data.language_modeling.megatron.gpt_dataset import build_train_valid_test_datasets from nemo.collections.nlp.data.language_modeling.megatron.gpt_fim_dataset import GPTFIMDataset, GPTFIMDatasetConfig from nemo.collections.nlp.models.language_modeling.megatron.falcon.falcon_spec import get_falcon_layer_spec -from nemo.collections.nlp.models.language_modeling.megatron.gemma2.gemma2_spec import get_gemma2_layer_spec from nemo.collections.nlp.models.language_modeling.megatron.gpt_full_te_layer_autocast_spec import ( get_gpt_full_te_layer_autocast_spec, ) @@ -155,6 +154,8 @@ def mcore_supports_moe() -> bool: ## TODO: This function will not work if TE is not installed def get_specs(spec_name, transformer_config=None, use_te=True, hyena_cfg: Dict = None): + from nemo.collections.nlp.models.language_modeling.megatron.gemma2.gemma2_spec import get_gemma2_layer_spec + # else cases for backwards compatibility with neva num_experts = transformer_config.num_moe_experts if transformer_config else None moe_grouped_gemm = transformer_config.moe_grouped_gemm if transformer_config else False