diff --git a/src/llama_recipes/finetuning.py b/src/llama_recipes/finetuning.py
index 5f7f8b24abcf45e1984279f4f6e6ae07345084d8..79f971a0a9fb35a4152c3c96d5eb941bc0a13df6 100644
--- a/src/llama_recipes/finetuning.py
+++ b/src/llama_recipes/finetuning.py
@@ -226,12 +226,13 @@ def main(**kwargs):
             momentum_dtype=torch.bfloat16,
             variance_dtype=torch.bfloat16,
             use_kahan_summation=False,
+            weight_decay=train_config.weight_decay,
         )
     else:
         optimizer = optim.AdamW(
             model.parameters(),
             lr=train_config.lr,
-            weight_decay=0.0,
+            weight_decay=train_config.weight_decay,
         )
     scheduler = StepLR(optimizer, step_size=1, gamma=train_config.gamma)