Commit 998940bc authored by zhiyang.zhou's avatar zhiyang.zhou

minor change

parent f45e81d0
Pipeline #196 canceled with stages
......@@ -20,7 +20,7 @@ parser.add_argument('--no_cuda', action='store_true', default=False, help='disab
parser.add_argument('-b', '--batch_size', default=32, type=int, help='mini-batch size (default: 32)')
parser.add_argument('--lr', type=float, default=0.05, metavar='LR', help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum')
parser.add_argument('--weight_decay', '--wd', default=2e-4, type=float, metavar='W')
parser.add_argument('--weight_decay', '--wd', default=0, type=float, metavar='W')
parser.add_argument('--epochs', type=int, default=20, metavar='N', help='number of epochs to train')
parser.add_argument('--schedule', type=int, nargs='+', default=[10, 15],
help='Decrease learning rate at these epochs.')
......@@ -154,7 +154,8 @@ def main():
print(args)
# model = get_model(args.model_name, num_classes=NUM_CLASSES).to(device)
model = models.resnet50(pretrained=False, num_classes=NUM_CLASSES).to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
optimizer = optim.Adam(model.parameters(), args.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
for epoch in range(1, args.epochs + 1):
# adjust learning rate for SGD
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment