Skip to content

Commit

Permalink
fix warm-up error
Browse files Browse the repository at this point in the history
  • Loading branch information
layumi committed May 20, 2019
1 parent 389180f commit e9028e0
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 10 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ You may refer to [Here](https://github.com/layumi/Person_reID_baseline_matconvne
Different framework need to be tuned in a different way.

## Some News
**20 May 2019:** Linear Warm Up is added. It can be used by simply added `--warm_up`. You also can set warm-up the first 5 epoch by `--warm_epoch 5`.
**20 May 2019:** Linear Warm Up is added. You also can set warm-up the first K epoch by `--warm_epoch K`. If K <=0, there will be no warm-up.

**What's new:** FP16 has been added. It can be used by simply added `--fp16`. You need to install [apex](https://github.com/NVIDIA/apex) and update your pytorch to 1.0.

Expand Down
2 changes: 1 addition & 1 deletion model/ft_ResNet50/opts.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@ stride: 2
train_all: false
use_NAS: false
use_dense: false
warm_up: true
warm_epoch: 0
9 changes: 4 additions & 5 deletions model/ft_ResNet50/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
parser.add_argument('--erasing_p', default=0, type=float, help='Random Erasing probability, in [0,1]')
parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
parser.add_argument('--use_NAS', action='store_true', help='use NAS' )
parser.add_argument('--warm_up', action='store_true', help='use NAS' )
parser.add_argument('--warm_epoch', default=0, type=int, help='the first K epoch that needs warm up')
parser.add_argument('--lr', default=0.05, type=float, help='learning rate')
parser.add_argument('--droprate', default=0.5, type=float, help='drop rate')
parser.add_argument('--PCB', action='store_true', help='use PCB+ResNet50' )
Expand Down Expand Up @@ -157,8 +157,8 @@ def train_model(model, criterion, optimizer, scheduler, num_epochs=25):

#best_model_wts = model.state_dict()
#best_acc = 0.0
warm_up = 0.1
warm_iteration = round(dataset_sizes['train']/opt.batchsize)*5
warm_up = 0.1 # We start from the 0.1*lrRate
warm_iteration = round(dataset_sizes['train']/opt.batchsize)*opt.warm_epoch # first 5 epoch

for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
Expand Down Expand Up @@ -220,9 +220,8 @@ def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
loss += criterion(part[i+1], labels)

# backward + optimize only if in training phase
if epoch<5 and phase == 'train':
if epoch<opt.warm_epoch and phase == 'train':
warm_up = min(1.0, warm_up + 0.9 / warm_iteration)
print(warm_up)
loss *= warm_up

if phase == 'train':
Expand Down
5 changes: 2 additions & 3 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,7 @@
parser.add_argument('--erasing_p', default=0, type=float, help='Random Erasing probability, in [0,1]')
parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
parser.add_argument('--use_NAS', action='store_true', help='use NAS' )
parser.add_argument('--warm_up', action='store_true', help='warm up learning rate/ In the code, I warm up the loss' )
parser.add_argument('--warm_epoch', default=5, type=int, help='the first K epoch that needs warm up')
parser.add_argument('--warm_epoch', default=0, type=int, help='the first K epoch that needs warm up')
parser.add_argument('--lr', default=0.05, type=float, help='learning rate')
parser.add_argument('--droprate', default=0.5, type=float, help='drop rate')
parser.add_argument('--PCB', action='store_true', help='use PCB+ResNet50' )
Expand Down Expand Up @@ -221,7 +220,7 @@ def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
loss += criterion(part[i+1], labels)

# backward + optimize only if in training phase
if epoch<5 and phase == 'train':
if epoch<opt.warm_epoch and phase == 'train':
warm_up = min(1.0, warm_up + 0.9 / warm_iteration)
loss *= warm_up

Expand Down

0 comments on commit e9028e0

Please sign in to comment.