From 8fe594e5d2365a953ccc16cfb57170c497d19a7b Mon Sep 17 00:00:00 2001
From: Claudio Michaelis <claudio.michaelis@uni-tuebingen.de>
Date: Thu, 25 Jul 2019 05:05:42 +0200
Subject: [PATCH] Add option to use linear scaling rule (#1038)

* Add option to use linear scaling rule

* Ensure pep8 conformity
---
 tools/train.py | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/tools/train.py b/tools/train.py
index ee2012f..8c3290a 100644
--- a/tools/train.py
+++ b/tools/train.py
@@ -35,6 +35,10 @@ def parse_args():
         default='none',
         help='job launcher')
     parser.add_argument('--local_rank', type=int, default=0)
+    parser.add_argument(
+        '--autoscale-lr',
+        action='store_true',
+        help='automatically scale lr with the number of gpus')
     args = parser.parse_args()
     if 'LOCAL_RANK' not in os.environ:
         os.environ['LOCAL_RANK'] = str(args.local_rank)
@@ -56,6 +60,10 @@ def main():
         cfg.resume_from = args.resume_from
     cfg.gpus = args.gpus
 
+    if args.autoscale_lr:
+        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
+        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
+
     # init distributed env first, since logger depends on the dist info.
     if args.launcher == 'none':
         distributed = False
-- 
GitLab