Skip to content

Commit

Permalink
Revert "Use quantized 8-bit Adam optimizer"
Browse files Browse the repository at this point in the history
This reverts commit a8df9db.
  • Loading branch information
tleyden committed Oct 17, 2023
1 parent a8df9db commit ba6383c
Showing 1 changed file with 1 addition and 7 deletions.
8 changes: 1 addition & 7 deletions dalm/training/rag_e2e/train_rage2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,6 @@
save_model_hook,
)
from dalm.utils import load_dataset
import bitsandbytes as bnb


logger = get_logger(__name__)

Expand Down Expand Up @@ -348,11 +346,7 @@ def train_e2e(


logger.info("create optimizer")

# Avoid OOM errors by using the 8-bit quantized Adam optimizer
# optimizer = torch.optim.Adam(rag_model.parameters(), lr=learning_rate)
optimizer = bnb.optim.Adam8bit(rag_model.parameters(), lr=learning_rate)

optimizer = torch.optim.Adam(rag_model.parameters(), lr=learning_rate)
logger.info("/create optimizer")

# Scheduler and math around the number of training steps.
Expand Down

0 comments on commit ba6383c

Please sign in to comment.