diff --git a/nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py b/nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py index 941048304f6a..46d6455327af 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py @@ -427,7 +427,7 @@ def inference_epoch_end(self, outputs, mode, data_cfg): ) # Gather the outputs object from all data parallel ranks since we are using the DistributedSampler which splits data across DDP ranks. - gathered_outputs = [None for _ in range(self.world_size)] + gathered_outputs = [None for _ in range(parallel_state.get_data_parallel_world_size())] torch.distributed.all_gather_object( gathered_outputs, [ @@ -439,6 +439,7 @@ def inference_epoch_end(self, outputs, mode, data_cfg): } for x in output ], + group=parallel_state.get_data_parallel_group(), ) # Figure out what the suffix of the file should be. @@ -455,7 +456,7 @@ def inference_epoch_end(self, outputs, mode, data_cfg): # PTL models have a self.global_rank attribute and we want to write to disk only on global rank 0. if self.global_rank == 0: - for rank in range(0, self.world_size): + for rank in range(0, parallel_state.get_data_parallel_world_size()): for batch in gathered_outputs[rank]: for pred, label, input, category in zip( batch['preds'], batch['labels'], batch['inputs'], batch['categories']