diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 2055cc22f951c2..097c54a9862773 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -3691,7 +3691,6 @@ def training_step( with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: - loss *= self.args.gradient_accumulation_steps self.accelerator.backward(loss, **kwargs) # Finally we need to normalize the loss for reporting if num_items_in_batch is None: