From 153c30780a2bc72f115b4c697cc303ddc1d451ea Mon Sep 17 00:00:00 2001 From: Sergii Dymchenko Date: Thu, 6 Jul 2023 19:40:29 -0700 Subject: [PATCH] Fix require_grad typos (#6930) Signed-off-by: Sergii Dymchenko --- .../modules/transformer/transformer_generators.py | 12 ++++++------ .../common/transformer/transformer_generators.py | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/nemo/collections/asr/modules/transformer/transformer_generators.py b/nemo/collections/asr/modules/transformer/transformer_generators.py index 504fdf076d3d..6e17151dcd1b 100644 --- a/nemo/collections/asr/modules/transformer/transformer_generators.py +++ b/nemo/collections/asr/modules/transformer/transformer_generators.py @@ -188,7 +188,7 @@ def freeze(self) -> None: param.requires_grad = False self.decoder.eval() for param in self.log_softmax.parameters(): - param.require_grad = False + param.requires_grad = False self.log_softmax.eval() def unfreeze(self) -> None: @@ -201,7 +201,7 @@ def unfreeze(self) -> None: param.requires_grad = True self.decoder.train() for param in self.log_softmax.parameters(): - param.require_grad = True + param.requires_grad = True self.log_softmax.train() @contextmanager @@ -701,10 +701,10 @@ def freeze(self) -> None: param.requires_grad = False self.decoders[model_num].eval() for param in self.log_softmaxes[model_num].parameters(): - param.require_grad = False + param.requires_grad = False self.log_softmaxes[model_num].eval() for param in self.encoders[model_num].parameters(): - param.require_grad = False + param.requires_grad = False self.encoders[model_num].eval() def unfreeze(self) -> None: @@ -718,10 +718,10 @@ def unfreeze(self) -> None: param.requires_grad = True self.decoders[model_num].train() for param in self.log_softmaxes[model_num].parameters(): - param.require_grad = True + param.requires_grad = True self.log_softmaxes[model_num].train() for param in self.encoders[model_num].parameters(): - param.require_grad = True + param.requires_grad = True self.encoders[model_num].train() @contextmanager diff --git a/nemo/collections/nlp/modules/common/transformer/transformer_generators.py b/nemo/collections/nlp/modules/common/transformer/transformer_generators.py index 504fdf076d3d..6e17151dcd1b 100644 --- a/nemo/collections/nlp/modules/common/transformer/transformer_generators.py +++ b/nemo/collections/nlp/modules/common/transformer/transformer_generators.py @@ -188,7 +188,7 @@ def freeze(self) -> None: param.requires_grad = False self.decoder.eval() for param in self.log_softmax.parameters(): - param.require_grad = False + param.requires_grad = False self.log_softmax.eval() def unfreeze(self) -> None: @@ -201,7 +201,7 @@ def unfreeze(self) -> None: param.requires_grad = True self.decoder.train() for param in self.log_softmax.parameters(): - param.require_grad = True + param.requires_grad = True self.log_softmax.train() @contextmanager @@ -701,10 +701,10 @@ def freeze(self) -> None: param.requires_grad = False self.decoders[model_num].eval() for param in self.log_softmaxes[model_num].parameters(): - param.require_grad = False + param.requires_grad = False self.log_softmaxes[model_num].eval() for param in self.encoders[model_num].parameters(): - param.require_grad = False + param.requires_grad = False self.encoders[model_num].eval() def unfreeze(self) -> None: @@ -718,10 +718,10 @@ def unfreeze(self) -> None: param.requires_grad = True self.decoders[model_num].train() for param in self.log_softmaxes[model_num].parameters(): - param.require_grad = True + param.requires_grad = True self.log_softmaxes[model_num].train() for param in self.encoders[model_num].parameters(): - param.require_grad = True + param.requires_grad = True self.encoders[model_num].train() @contextmanager