From 4b984a7c543e7dfbeafe613ff0db18a7fdb76f43 Mon Sep 17 00:00:00 2001
From: CapMocha <57527488+CapMocha@users.noreply.github.com>
Date: Sat, 18 Jan 2020 23:06:58 +0800
Subject: [PATCH] bug for distributed training (#1985)

fix a bug for distributed training in windows platform
---
 mmdet/apis/train.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mmdet/apis/train.py b/mmdet/apis/train.py
index a79537f..f68de4f 100644
--- a/mmdet/apis/train.py
+++ b/mmdet/apis/train.py
@@ -73,7 +73,7 @@ def parse_losses(losses):
     log_vars['loss'] = loss
     for loss_name, loss_value in log_vars.items():
         # reduce loss when distributed training
-        if dist.is_initialized():
+        if dist.is_available() and dist.is_initialized():
             loss_value = loss_value.data.clone()
             dist.all_reduce(loss_value.div_(dist.get_world_size()))
         log_vars[loss_name] = loss_value.item()
-- 
GitLab