diff --git a/.travis.yml b/.travis.yml
index bba12f1c1540d5bafe3039f2f66fa20d78ece973..127b9030a6fcc860a7c72af2d635908dc23eb190 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,7 +2,7 @@ dist: xenial
 language: python
 
 install:
-  - pip install flake8
+  - pip install flake8 yapf
 
 python:
   - "3.5"
@@ -11,3 +11,4 @@ python:
 
 script:
   - flake8
+  - yapf -r -d --style .style.yapf mmdet/ tools/
\ No newline at end of file
diff --git a/mmdet/core/anchor/anchor_generator.py b/mmdet/core/anchor/anchor_generator.py
index 8995ea61d75ad38fa747e63deafd4b048a410d76..61304307415392b17f78dd82f53c731dcca1a144 100644
--- a/mmdet/core/anchor/anchor_generator.py
+++ b/mmdet/core/anchor/anchor_generator.py
@@ -33,12 +33,14 @@ class AnchorGenerator(object):
             ws = (w * self.scales[:, None] * w_ratios[None, :]).view(-1)
             hs = (h * self.scales[:, None] * h_ratios[None, :]).view(-1)
 
+        # yapf: disable
         base_anchors = torch.stack(
             [
                 x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),
                 x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)
             ],
             dim=-1).round()
+        # yapf: enable
 
         return base_anchors
 
diff --git a/mmdet/core/anchor/guided_anchor_target.py b/mmdet/core/anchor/guided_anchor_target.py
index 2e9540644b995ba74f5e8e261cf880f9f687c596..7284a73c3ade0c62862e8cf009609274d4dcc0fe 100644
--- a/mmdet/core/anchor/guided_anchor_target.py
+++ b/mmdet/core/anchor/guided_anchor_target.py
@@ -62,12 +62,13 @@ def ga_loc_target(gt_bboxes_list,
     all_ignore_map = []
     for lvl_id in range(num_lvls):
         h, w = featmap_sizes[lvl_id]
-        loc_targets = torch.zeros(img_per_gpu,
-                                  1,
-                                  h,
-                                  w,
-                                  device=gt_bboxes_list[0].device,
-                                  dtype=torch.float32)
+        loc_targets = torch.zeros(
+            img_per_gpu,
+            1,
+            h,
+            w,
+            device=gt_bboxes_list[0].device,
+            dtype=torch.float32)
         loc_weights = torch.full_like(loc_targets, -1)
         ignore_map = torch.zeros_like(loc_targets)
         all_loc_targets.append(loc_targets)
@@ -175,17 +176,18 @@ def ga_shape_target(approx_list,
     if gt_bboxes_ignore_list is None:
         gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
     (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list,
-     neg_inds_list) = multi_apply(ga_shape_target_single,
-                                  approx_flat_list,
-                                  inside_flag_flat_list,
-                                  square_flat_list,
-                                  gt_bboxes_list,
-                                  gt_bboxes_ignore_list,
-                                  img_metas,
-                                  approxs_per_octave=approxs_per_octave,
-                                  cfg=cfg,
-                                  sampling=sampling,
-                                  unmap_outputs=unmap_outputs)
+     neg_inds_list) = multi_apply(
+         ga_shape_target_single,
+         approx_flat_list,
+         inside_flag_flat_list,
+         square_flat_list,
+         gt_bboxes_list,
+         gt_bboxes_ignore_list,
+         img_metas,
+         approxs_per_octave=approxs_per_octave,
+         cfg=cfg,
+         sampling=sampling,
+         unmap_outputs=unmap_outputs)
     # no valid anchors
     if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]):
         return None
diff --git a/mmdet/core/bbox/assigners/approx_max_iou_assigner.py b/mmdet/core/bbox/assigners/approx_max_iou_assigner.py
index 1283f7f525e5678eb8793ca112d851273056f346..867a56b87e869d6b38ca379a4ff1231ffd9f7e20 100644
--- a/mmdet/core/bbox/assigners/approx_max_iou_assigner.py
+++ b/mmdet/core/bbox/assigners/approx_max_iou_assigner.py
@@ -101,14 +101,12 @@ class ApproxMaxIoUAssigner(MaxIoUAssigner):
         if (self.ignore_iof_thr > 0) and (gt_bboxes_ignore is not None) and (
                 gt_bboxes_ignore.numel() > 0):
             if self.ignore_wrt_candidates:
-                ignore_overlaps = bbox_overlaps(bboxes,
-                                                gt_bboxes_ignore,
-                                                mode='iof')
+                ignore_overlaps = bbox_overlaps(
+                    bboxes, gt_bboxes_ignore, mode='iof')
                 ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
             else:
-                ignore_overlaps = bbox_overlaps(gt_bboxes_ignore,
-                                                bboxes,
-                                                mode='iof')
+                ignore_overlaps = bbox_overlaps(
+                    gt_bboxes_ignore, bboxes, mode='iof')
                 ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
             overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
 
diff --git a/mmdet/core/bbox/assigners/max_iou_assigner.py b/mmdet/core/bbox/assigners/max_iou_assigner.py
index 57a1e750456da7cd5fa251aed3f36416563ce7d0..99dcbe8e0f640fb84f0d9e995212405b0fe628ef 100644
--- a/mmdet/core/bbox/assigners/max_iou_assigner.py
+++ b/mmdet/core/bbox/assigners/max_iou_assigner.py
@@ -107,8 +107,9 @@ class MaxIoUAssigner(BaseAssigner):
         num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)
 
         # 1. assign -1 by default
-        assigned_gt_inds = overlaps.new_full(
-            (num_bboxes, ), -1, dtype=torch.long)
+        assigned_gt_inds = overlaps.new_full((num_bboxes, ),
+                                             -1,
+                                             dtype=torch.long)
 
         # for each anchor, which gt best overlaps with it
         # for each anchor, the max iou of all gts
diff --git a/mmdet/core/bbox/bbox_target.py b/mmdet/core/bbox/bbox_target.py
index aa1fbc67430672185a1a01cbc5338a1912928b84..20b3957bcfe303225a3f5704e46c1a2832d85935 100644
--- a/mmdet/core/bbox/bbox_target.py
+++ b/mmdet/core/bbox/bbox_target.py
@@ -62,10 +62,10 @@ def bbox_target_single(pos_bboxes,
 
 
 def expand_target(bbox_targets, bbox_weights, labels, num_classes):
-    bbox_targets_expand = bbox_targets.new_zeros((bbox_targets.size(0),
-                                                  4 * num_classes))
-    bbox_weights_expand = bbox_weights.new_zeros((bbox_weights.size(0),
-                                                  4 * num_classes))
+    bbox_targets_expand = bbox_targets.new_zeros(
+        (bbox_targets.size(0), 4 * num_classes))
+    bbox_weights_expand = bbox_weights.new_zeros(
+        (bbox_weights.size(0), 4 * num_classes))
     for i in torch.nonzero(labels > 0).squeeze(-1):
         start, end = labels[i] * 4, (labels[i] + 1) * 4
         bbox_targets_expand[i, start:end] = bbox_targets[i, :]
diff --git a/mmdet/core/evaluation/mean_ap.py b/mmdet/core/evaluation/mean_ap.py
index 8f0474aae21a530efc173ea426a08d1691daab72..74a972b1f6c901c246eb68419528f9683bd061b2 100644
--- a/mmdet/core/evaluation/mean_ap.py
+++ b/mmdet/core/evaluation/mean_ap.py
@@ -279,8 +279,8 @@ def eval_map(det_results,
                     bbox[:, 3] - bbox[:, 1] + 1)
                 for k, (min_area, max_area) in enumerate(area_ranges):
                     num_gts[k] += np.sum(
-                        np.logical_not(cls_gt_ignore[j]) &
-                        (gt_areas >= min_area) & (gt_areas < max_area))
+                        np.logical_not(cls_gt_ignore[j])
+                        & (gt_areas >= min_area) & (gt_areas < max_area))
         # sort all det bboxes by score, also sort tp and fp
         cls_dets = np.vstack(cls_dets)
         num_dets = cls_dets.shape[0]
@@ -312,11 +312,12 @@ def eval_map(det_results,
         all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])
         all_num_gts = np.vstack(
             [cls_result['num_gts'] for cls_result in eval_results])
-        mean_ap = [
-            all_ap[all_num_gts[:, i] > 0, i].mean()
-            if np.any(all_num_gts[:, i] > 0) else 0.0
-            for i in range(num_scales)
-        ]
+        mean_ap = []
+        for i in range(num_scales):
+            if np.any(all_num_gts[:, i] > 0):
+                mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())
+            else:
+                mean_ap.append(0.0)
     else:
         aps = []
         for cls_result in eval_results:
@@ -368,8 +369,8 @@ def print_map_summary(mean_ap, results, dataset=None):
         for j in range(num_classes):
             row_data = [
                 label_names[j], num_gts[i, j], results[j]['num_dets'],
-                '{:.3f}'.format(recalls[i, j]), '{:.3f}'.format(
-                    precisions[i, j]), '{:.3f}'.format(aps[i, j])
+                '{:.3f}'.format(recalls[i, j]),
+                '{:.3f}'.format(precisions[i, j]), '{:.3f}'.format(aps[i, j])
             ]
             table_data.append(row_data)
         table_data.append(['mAP', '', '', '', '', '{:.3f}'.format(mean_ap[i])])
diff --git a/mmdet/core/post_processing/bbox_nms.py b/mmdet/core/post_processing/bbox_nms.py
index cb3fe21e036ee33cabc027567c26f651ac7424ef..01beecd43abc7641d43773b192f988a55f4295d9 100644
--- a/mmdet/core/post_processing/bbox_nms.py
+++ b/mmdet/core/post_processing/bbox_nms.py
@@ -45,8 +45,9 @@ def multiclass_nms(multi_bboxes,
             _scores *= score_factors[cls_inds]
         cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1)
         cls_dets, _ = nms_op(cls_dets, **nms_cfg_)
-        cls_labels = multi_bboxes.new_full(
-            (cls_dets.shape[0], ), i - 1, dtype=torch.long)
+        cls_labels = multi_bboxes.new_full((cls_dets.shape[0], ),
+                                           i - 1,
+                                           dtype=torch.long)
         bboxes.append(cls_dets)
         labels.append(cls_labels)
     if bboxes:
diff --git a/mmdet/datasets/__init__.py b/mmdet/datasets/__init__.py
index 786b0593f00ce85ee695f66d6c46fa0ecf5afdf1..6d0f12f60b2f64bb9cee039a8e3e593ad5b131cd 100644
--- a/mmdet/datasets/__init__.py
+++ b/mmdet/datasets/__init__.py
@@ -15,6 +15,6 @@ __all__ = [
     'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset',
     'CityscapesDataset', 'GroupSampler', 'DistributedGroupSampler',
     'build_dataloader', 'to_tensor', 'random_scale', 'show_ann',
-    'ConcatDataset', 'RepeatDataset', 'ExtraAugmentation',
-    'WIDERFaceDataset', 'DATASETS', 'build_dataset'
+    'ConcatDataset', 'RepeatDataset', 'ExtraAugmentation', 'WIDERFaceDataset',
+    'DATASETS', 'build_dataset'
 ]
diff --git a/mmdet/datasets/builder.py b/mmdet/datasets/builder.py
index 3d4e2caafe38a5831620f93ac60399869cafa1d0..a8b0e29c5dac8cccf7a8ea125dad20354ddd141e 100644
--- a/mmdet/datasets/builder.py
+++ b/mmdet/datasets/builder.py
@@ -29,8 +29,8 @@ def _concat_dataset(cfg, default_args=None):
 
 def build_dataset(cfg, default_args=None):
     if cfg['type'] == 'RepeatDataset':
-        dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args),
-                                cfg['times'])
+        dataset = RepeatDataset(
+            build_dataset(cfg['dataset'], default_args), cfg['times'])
     elif isinstance(cfg['ann_file'], (list, tuple)):
         dataset = _concat_dataset(cfg, default_args)
     else:
diff --git a/mmdet/datasets/cityscapes.py b/mmdet/datasets/cityscapes.py
index fcbd43c12412644a1e81087891372f78659041b8..51ca049873fe8af2a62085cdbfd779564552793d 100644
--- a/mmdet/datasets/cityscapes.py
+++ b/mmdet/datasets/cityscapes.py
@@ -5,5 +5,5 @@ from .registry import DATASETS
 @DATASETS.register_module
 class CityscapesDataset(CocoDataset):
 
-    CLASSES = ('person', 'rider', 'car', 'truck', 'bus',
-               'train', 'motorcycle', 'bicycle')
+    CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
+               'bicycle')
diff --git a/mmdet/datasets/extra_aug.py b/mmdet/datasets/extra_aug.py
index a9f4f44c5cce27a71bc7af989436e7a0842d1937..c1bda3eec3ad1e55f674c8a64e2a66169d8b6cc5 100644
--- a/mmdet/datasets/extra_aug.py
+++ b/mmdet/datasets/extra_aug.py
@@ -115,8 +115,8 @@ class RandomCrop(object):
                 left = random.uniform(w - new_w)
                 top = random.uniform(h - new_h)
 
-                patch = np.array((int(left), int(top), int(left + new_w),
-                                  int(top + new_h)))
+                patch = np.array(
+                    (int(left), int(top), int(left + new_w), int(top + new_h)))
                 overlaps = bbox_overlaps(
                     patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
                 if overlaps.min() < min_iou:
diff --git a/mmdet/models/anchor_heads/ga_retina_head.py b/mmdet/models/anchor_heads/ga_retina_head.py
index c39ab8d684f1aa3383b798095bd9ebc433fc45ba..40ad0499f0b3ef99859ddf796c1197f1d0dd0f76 100644
--- a/mmdet/models/anchor_heads/ga_retina_head.py
+++ b/mmdet/models/anchor_heads/ga_retina_head.py
@@ -30,21 +30,23 @@ class GARetinaHead(GuidedAnchorHead):
         for i in range(self.stacked_convs):
             chn = self.in_channels if i == 0 else self.feat_channels
             self.cls_convs.append(
-                ConvModule(chn,
-                           self.feat_channels,
-                           3,
-                           stride=1,
-                           padding=1,
-                           conv_cfg=self.conv_cfg,
-                           norm_cfg=self.norm_cfg))
+                ConvModule(
+                    chn,
+                    self.feat_channels,
+                    3,
+                    stride=1,
+                    padding=1,
+                    conv_cfg=self.conv_cfg,
+                    norm_cfg=self.norm_cfg))
             self.reg_convs.append(
-                ConvModule(chn,
-                           self.feat_channels,
-                           3,
-                           stride=1,
-                           padding=1,
-                           conv_cfg=self.conv_cfg,
-                           norm_cfg=self.norm_cfg))
+                ConvModule(
+                    chn,
+                    self.feat_channels,
+                    3,
+                    stride=1,
+                    padding=1,
+                    conv_cfg=self.conv_cfg,
+                    norm_cfg=self.norm_cfg))
 
         self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
         self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2,
@@ -59,15 +61,13 @@ class GARetinaHead(GuidedAnchorHead):
             self.feat_channels,
             kernel_size=3,
             deformable_groups=self.deformable_groups)
-        self.retina_cls = MaskedConv2d(self.feat_channels,
-                                       self.num_anchors *
-                                       self.cls_out_channels,
-                                       3,
-                                       padding=1)
-        self.retina_reg = MaskedConv2d(self.feat_channels,
-                                       self.num_anchors * 4,
-                                       3,
-                                       padding=1)
+        self.retina_cls = MaskedConv2d(
+            self.feat_channels,
+            self.num_anchors * self.cls_out_channels,
+            3,
+            padding=1)
+        self.retina_reg = MaskedConv2d(
+            self.feat_channels, self.num_anchors * 4, 3, padding=1)
 
     def init_weights(self):
         for m in self.cls_convs:
diff --git a/mmdet/models/anchor_heads/ga_rpn_head.py b/mmdet/models/anchor_heads/ga_rpn_head.py
index b7788b6a9c2b3ebb4febc43a5024ad92a174b494..aecef2b2169b9acd5fd767c2a4696b68f6ac9511 100644
--- a/mmdet/models/anchor_heads/ga_rpn_head.py
+++ b/mmdet/models/anchor_heads/ga_rpn_head.py
@@ -17,10 +17,8 @@ class GARPNHead(GuidedAnchorHead):
         super(GARPNHead, self).__init__(2, in_channels, **kwargs)
 
     def _init_layers(self):
-        self.rpn_conv = nn.Conv2d(self.in_channels,
-                                  self.feat_channels,
-                                  3,
-                                  padding=1)
+        self.rpn_conv = nn.Conv2d(
+            self.in_channels, self.feat_channels, 3, padding=1)
         super(GARPNHead, self)._init_layers()
 
     def init_weights(self):
@@ -43,19 +41,21 @@ class GARPNHead(GuidedAnchorHead):
              img_metas,
              cfg,
              gt_bboxes_ignore=None):
-        losses = super(GARPNHead, self).loss(cls_scores,
-                                             bbox_preds,
-                                             shape_preds,
-                                             loc_preds,
-                                             gt_bboxes,
-                                             None,
-                                             img_metas,
-                                             cfg,
-                                             gt_bboxes_ignore=gt_bboxes_ignore)
-        return dict(loss_rpn_cls=losses['loss_cls'],
-                    loss_rpn_bbox=losses['loss_bbox'],
-                    loss_anchor_shape=losses['loss_shape'],
-                    loss_anchor_loc=losses['loss_loc'])
+        losses = super(GARPNHead, self).loss(
+            cls_scores,
+            bbox_preds,
+            shape_preds,
+            loc_preds,
+            gt_bboxes,
+            None,
+            img_metas,
+            cfg,
+            gt_bboxes_ignore=gt_bboxes_ignore)
+        return dict(
+            loss_rpn_cls=losses['loss_cls'],
+            loss_rpn_bbox=losses['loss_bbox'],
+            loss_anchor_shape=losses['loss_shape'],
+            loss_anchor_loc=losses['loss_loc'])
 
     def get_bboxes_single(self,
                           cls_scores,
diff --git a/mmdet/models/detectors/cascade_rcnn.py b/mmdet/models/detectors/cascade_rcnn.py
index f0564c9eb16ccf5184f94d29e33c889fbe80c143..f76bdc53293370348449f0c204d8a0b535a23791 100644
--- a/mmdet/models/detectors/cascade_rcnn.py
+++ b/mmdet/models/detectors/cascade_rcnn.py
@@ -282,13 +282,12 @@ class CascadeRCNN(BaseDetector, RPNTestMixin):
                     mask_roi_extractor = self.mask_roi_extractor[i]
                     mask_head = self.mask_head[i]
                     if det_bboxes.shape[0] == 0:
-                        segm_result = [
-                            [] for _ in range(mask_head.num_classes - 1)
-                        ]
+                        mask_classes = mask_head.num_classes - 1
+                        segm_result = [[] for _ in range(mask_classes)]
                     else:
                         _bboxes = (
-                            det_bboxes[:, :4] * scale_factor
-                            if rescale else det_bboxes)
+                            det_bboxes[:, :4] *
+                            scale_factor if rescale else det_bboxes)
                         mask_rois = bbox2roi([_bboxes])
                         mask_feats = mask_roi_extractor(
                             x[:len(mask_roi_extractor.featmap_strides)],
@@ -321,13 +320,12 @@ class CascadeRCNN(BaseDetector, RPNTestMixin):
 
         if self.with_mask:
             if det_bboxes.shape[0] == 0:
-                segm_result = [
-                    [] for _ in range(self.mask_head[-1].num_classes - 1)
-                ]
+                mask_classes = self.mask_head[-1].num_classes - 1
+                segm_result = [[] for _ in range(mask_classes)]
             else:
                 _bboxes = (
-                    det_bboxes[:, :4] * scale_factor
-                    if rescale else det_bboxes)
+                    det_bboxes[:, :4] *
+                    scale_factor if rescale else det_bboxes)
                 mask_rois = bbox2roi([_bboxes])
                 aug_masks = []
                 for i in range(self.num_stages):
diff --git a/mmdet/models/detectors/htc.py b/mmdet/models/detectors/htc.py
index 0384aa99b110a6ecbf3ad5038ec9b07343fef28c..bcf410f35cb35e628ca8f9fc4bb47242ae8c0c93 100644
--- a/mmdet/models/detectors/htc.py
+++ b/mmdet/models/detectors/htc.py
@@ -205,9 +205,10 @@ class HybridTaskCascade(CascadeRCNN):
                 gt_bboxes_ignore = [None for _ in range(num_imgs)]
 
             for j in range(num_imgs):
-                assign_result = bbox_assigner.assign(
-                    proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],
-                    gt_labels[j])
+                assign_result = bbox_assigner.assign(proposal_list[j],
+                                                     gt_bboxes[j],
+                                                     gt_bboxes_ignore[j],
+                                                     gt_labels[j])
                 sampling_result = bbox_sampler.sample(
                     assign_result,
                     proposal_list[j],
@@ -308,13 +309,12 @@ class HybridTaskCascade(CascadeRCNN):
                 if self.with_mask:
                     mask_head = self.mask_head[i]
                     if det_bboxes.shape[0] == 0:
-                        segm_result = [
-                            [] for _ in range(mask_head.num_classes - 1)
-                        ]
+                        mask_classes = mask_head.num_classes - 1
+                        segm_result = [[] for _ in range(mask_classes)]
                     else:
                         _bboxes = (
-                            det_bboxes[:, :4] * scale_factor
-                            if rescale else det_bboxes)
+                            det_bboxes[:, :4] *
+                            scale_factor if rescale else det_bboxes)
                         mask_pred = self._mask_forward_test(
                             i, x, _bboxes, semantic_feat=semantic_feat)
                         segm_result = mask_head.get_seg_masks(
@@ -342,13 +342,12 @@ class HybridTaskCascade(CascadeRCNN):
 
         if self.with_mask:
             if det_bboxes.shape[0] == 0:
-                segm_result = [
-                    [] for _ in range(self.mask_head[-1].num_classes - 1)
-                ]
+                mask_classes = self.mask_head[-1].num_classes - 1
+                segm_result = [[] for _ in range(mask_classes)]
             else:
                 _bboxes = (
-                    det_bboxes[:, :4] * scale_factor
-                    if rescale else det_bboxes)
+                    det_bboxes[:, :4] *
+                    scale_factor if rescale else det_bboxes)
 
                 mask_rois = bbox2roi([_bboxes])
                 aug_masks = []
diff --git a/mmdet/models/detectors/test_mixins.py b/mmdet/models/detectors/test_mixins.py
index 9a179e9e1b44cbb5284c3b1c1c1ebfea09177c68..6924818d0b6bf0b8340c3ef509b60ab1b0e9ba85 100644
--- a/mmdet/models/detectors/test_mixins.py
+++ b/mmdet/models/detectors/test_mixins.py
@@ -91,9 +91,10 @@ class BBoxTestMixin(object):
         # after merging, bboxes will be rescaled to the original image size
         merged_bboxes, merged_scores = merge_aug_bboxes(
             aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
-        det_bboxes, det_labels = multiclass_nms(
-            merged_bboxes, merged_scores, rcnn_test_cfg.score_thr,
-            rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img)
+        det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
+                                                rcnn_test_cfg.score_thr,
+                                                rcnn_test_cfg.nms,
+                                                rcnn_test_cfg.max_per_img)
         return det_bboxes, det_labels
 
 
@@ -121,9 +122,11 @@ class MaskTestMixin(object):
             if self.with_shared_head:
                 mask_feats = self.shared_head(mask_feats)
             mask_pred = self.mask_head(mask_feats)
-            segm_result = self.mask_head.get_seg_masks(
-                mask_pred, _bboxes, det_labels, self.test_cfg.rcnn, ori_shape,
-                scale_factor, rescale)
+            segm_result = self.mask_head.get_seg_masks(mask_pred, _bboxes,
+                                                       det_labels,
+                                                       self.test_cfg.rcnn,
+                                                       ori_shape, scale_factor,
+                                                       rescale)
         return segm_result
 
     def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
diff --git a/mmdet/models/detectors/two_stage.py b/mmdet/models/detectors/two_stage.py
index c30047648c11b1bb903e5496ddc7db72e1417891..f70510cecaf633bc140f5d0280b6040db7788e73 100644
--- a/mmdet/models/detectors/two_stage.py
+++ b/mmdet/models/detectors/two_stage.py
@@ -125,9 +125,10 @@ class TwoStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin,
                 gt_bboxes_ignore = [None for _ in range(num_imgs)]
             sampling_results = []
             for i in range(num_imgs):
-                assign_result = bbox_assigner.assign(
-                    proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
-                    gt_labels[i])
+                assign_result = bbox_assigner.assign(proposal_list[i],
+                                                     gt_bboxes[i],
+                                                     gt_bboxes_ignore[i],
+                                                     gt_labels[i])
                 sampling_result = bbox_sampler.sample(
                     assign_result,
                     proposal_list[i],
@@ -146,8 +147,9 @@ class TwoStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin,
                 bbox_feats = self.shared_head(bbox_feats)
             cls_score, bbox_pred = self.bbox_head(bbox_feats)
 
-            bbox_targets = self.bbox_head.get_target(
-                sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn)
+            bbox_targets = self.bbox_head.get_target(sampling_results,
+                                                     gt_bboxes, gt_labels,
+                                                     self.train_cfg.rcnn)
             loss_bbox = self.bbox_head.loss(cls_score, bbox_pred,
                                             *bbox_targets)
             losses.update(loss_bbox)
@@ -179,8 +181,9 @@ class TwoStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin,
                 mask_feats = bbox_feats[pos_inds]
             mask_pred = self.mask_head(mask_feats)
 
-            mask_targets = self.mask_head.get_target(
-                sampling_results, gt_masks, self.train_cfg.rcnn)
+            mask_targets = self.mask_head.get_target(sampling_results,
+                                                     gt_masks,
+                                                     self.train_cfg.rcnn)
             pos_labels = torch.cat(
                 [res.pos_gt_labels for res in sampling_results])
             loss_mask = self.mask_head.loss(mask_pred, mask_targets,
diff --git a/mmdet/models/mask_heads/fused_semantic_head.py b/mmdet/models/mask_heads/fused_semantic_head.py
index 550e08e61f7ab336bb6d377dcaf0bb2644656b6d..ee6910f48d7512f2149e97809390a19b654d3c79 100644
--- a/mmdet/models/mask_heads/fused_semantic_head.py
+++ b/mmdet/models/mask_heads/fused_semantic_head.py
@@ -98,7 +98,7 @@ class FusedSemanticHead(nn.Module):
         x = self.conv_embedding(x)
         return mask_pred, x
 
-    @force_fp32(apply_to=('mask_pred',))
+    @force_fp32(apply_to=('mask_pred', ))
     def loss(self, mask_pred, labels):
         labels = labels.squeeze(1).long()
         loss_semantic_seg = self.criterion(mask_pred, labels)
diff --git a/mmdet/models/roi_extractors/single_level.py b/mmdet/models/roi_extractors/single_level.py
index 8731126440367178265209c3e1d3fca473556c02..7eaab120e6e1a6a2b1243752b23348f3b0cca713 100644
--- a/mmdet/models/roi_extractors/single_level.py
+++ b/mmdet/models/roi_extractors/single_level.py
@@ -72,7 +72,7 @@ class SingleRoIExtractor(nn.Module):
         target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
         return target_lvls
 
-    @force_fp32(apply_to=('feats',), out_fp16=True)
+    @force_fp32(apply_to=('feats', ), out_fp16=True)
     def forward(self, feats, rois):
         if len(feats) == 1:
             return self.roi_layers[0](feats[0], rois)
diff --git a/tools/analyze_logs.py b/tools/analyze_logs.py
index c9f603f46fd3bba4226733c953b3d8bd2ef598d8..2810c98f104e954a764b8add4aa38a577a92a27b 100644
--- a/tools/analyze_logs.py
+++ b/tools/analyze_logs.py
@@ -51,9 +51,9 @@ def plot_curve(log_dicts, args):
         for j, metric in enumerate(metrics):
             print('plot curve of {}, metric is {}'.format(
                 args.json_logs[i], metric))
-            assert metric in log_dict[epochs[
-                0]], '{} does not contain metric {}'.format(
-                    args.json_logs[i], metric)
+            if metric not in log_dict[epochs[0]]:
+                raise KeyError('{} does not contain metric {}'.format(
+                    args.json_logs[i], metric))
 
             if 'mAP' in metric:
                 xs = np.arange(1, max(epochs) + 1)
diff --git a/tools/convert_datasets/pascal_voc.py b/tools/convert_datasets/pascal_voc.py
index 5fb5cb4b7080f134287494f7f0283bed42b351cb..029eeb0a974bdba573727d5f321b5bcf5b52425e 100644
--- a/tools/convert_datasets/pascal_voc.py
+++ b/tools/convert_datasets/pascal_voc.py
@@ -69,16 +69,17 @@ def cvt_annotations(devkit_path, years, split, out_file):
         years = [years]
     annotations = []
     for year in years:
-        filelist = osp.join(devkit_path, 'VOC{}/ImageSets/Main/{}.txt'.format(
-            year, split))
+        filelist = osp.join(devkit_path,
+                            'VOC{}/ImageSets/Main/{}.txt'.format(year, split))
         if not osp.isfile(filelist):
             print('filelist does not exist: {}, skip voc{} {}'.format(
                 filelist, year, split))
             return
         img_names = mmcv.list_from_file(filelist)
         xml_paths = [
-            osp.join(devkit_path, 'VOC{}/Annotations/{}.xml'.format(
-                year, img_name)) for img_name in img_names
+            osp.join(devkit_path,
+                     'VOC{}/Annotations/{}.xml'.format(year, img_name))
+            for img_name in img_names
         ]
         img_paths = [
             'VOC{}/JPEGImages/{}.jpg'.format(year, img_name)