Skip to content

Commit

Permalink
Fix require_grad typos (#6930)
Browse files Browse the repository at this point in the history
Signed-off-by: Sergii Dymchenko <sdym@fb.com>
Signed-off-by: Gerald Shen <geshen@nvidia.com>
  • Loading branch information
kit1980 authored and gshennvm committed Jul 12, 2023
1 parent 9d3e731 commit b06588e
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 12 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def freeze(self) -> None:
param.requires_grad = False
self.decoder.eval()
for param in self.log_softmax.parameters():
param.require_grad = False
param.requires_grad = False
self.log_softmax.eval()

def unfreeze(self) -> None:
Expand All @@ -201,7 +201,7 @@ def unfreeze(self) -> None:
param.requires_grad = True
self.decoder.train()
for param in self.log_softmax.parameters():
param.require_grad = True
param.requires_grad = True
self.log_softmax.train()

@contextmanager
Expand Down Expand Up @@ -701,10 +701,10 @@ def freeze(self) -> None:
param.requires_grad = False
self.decoders[model_num].eval()
for param in self.log_softmaxes[model_num].parameters():
param.require_grad = False
param.requires_grad = False
self.log_softmaxes[model_num].eval()
for param in self.encoders[model_num].parameters():
param.require_grad = False
param.requires_grad = False
self.encoders[model_num].eval()

def unfreeze(self) -> None:
Expand All @@ -718,10 +718,10 @@ def unfreeze(self) -> None:
param.requires_grad = True
self.decoders[model_num].train()
for param in self.log_softmaxes[model_num].parameters():
param.require_grad = True
param.requires_grad = True
self.log_softmaxes[model_num].train()
for param in self.encoders[model_num].parameters():
param.require_grad = True
param.requires_grad = True
self.encoders[model_num].train()

@contextmanager
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def freeze(self) -> None:
param.requires_grad = False
self.decoder.eval()
for param in self.log_softmax.parameters():
param.require_grad = False
param.requires_grad = False
self.log_softmax.eval()

def unfreeze(self) -> None:
Expand All @@ -201,7 +201,7 @@ def unfreeze(self) -> None:
param.requires_grad = True
self.decoder.train()
for param in self.log_softmax.parameters():
param.require_grad = True
param.requires_grad = True
self.log_softmax.train()

@contextmanager
Expand Down Expand Up @@ -701,10 +701,10 @@ def freeze(self) -> None:
param.requires_grad = False
self.decoders[model_num].eval()
for param in self.log_softmaxes[model_num].parameters():
param.require_grad = False
param.requires_grad = False
self.log_softmaxes[model_num].eval()
for param in self.encoders[model_num].parameters():
param.require_grad = False
param.requires_grad = False
self.encoders[model_num].eval()

def unfreeze(self) -> None:
Expand All @@ -718,10 +718,10 @@ def unfreeze(self) -> None:
param.requires_grad = True
self.decoders[model_num].train()
for param in self.log_softmaxes[model_num].parameters():
param.require_grad = True
param.requires_grad = True
self.log_softmaxes[model_num].train()
for param in self.encoders[model_num].parameters():
param.require_grad = True
param.requires_grad = True
self.encoders[model_num].train()

@contextmanager
Expand Down

0 comments on commit b06588e

Please sign in to comment.