diff --git a/mmdet/__init__.py b/mmdet/__init__.py index 8b5e1ac77ad2a9596409bba4464f746d56ef3176..1c4f7e8fcc54041e383b72d48860ccbdc3afc41c 100644 --- a/mmdet/__init__.py +++ b/mmdet/__init__.py @@ -1 +1,3 @@ from .version import __version__, short_version + +__all__ = ['__version__', 'short_version'] diff --git a/mmdet/core/__init__.py b/mmdet/core/__init__.py index 1eb03f76acdfaff65c96e5316e3b4898b7a7af6a..81ee7311bcd5e7c534e55902ddc799a4e83225b3 100644 --- a/mmdet/core/__init__.py +++ b/mmdet/core/__init__.py @@ -1,8 +1,8 @@ -from .rpn_ops import * -from .bbox_ops import * -from .mask_ops import * -from .losses import * -from .eval import * -from .parallel import * -from .post_processing import * -from .utils import * +from .rpn_ops import * # noqa: F401, F403 +from .bbox_ops import * # noqa: F401, F403 +from .mask_ops import * # noqa: F401, F403 +from .losses import * # noqa: F401, F403 +from .eval import * # noqa: F401, F403 +from .parallel import * # noqa: F401, F403 +from .post_processing import * # noqa: F401, F403 +from .utils import * # noqa: F401, F403 diff --git a/mmdet/core/mask_ops/segms.py b/mmdet/core/mask_ops/segms.py index b2ae6b69a1ff206b085799fa82527e1d17be0a4f..9809aae3a277b2bf49d6bba945f779a4f76a0461 100644 --- a/mmdet/core/mask_ops/segms.py +++ b/mmdet/core/mask_ops/segms.py @@ -1,3 +1,4 @@ +# flake8: noqa # This file is copied from Detectron. # Copyright (c) 2017-present, Facebook, Inc. diff --git a/mmdet/core/rpn_ops/__init__.py b/mmdet/core/rpn_ops/__init__.py index 4d5f9244dde2b244bbe42d54640e8a648277c506..0ff430a4be1825fbbaa3cb31d54de8790aa2fb90 100644 --- a/mmdet/core/rpn_ops/__init__.py +++ b/mmdet/core/rpn_ops/__init__.py @@ -1,2 +1,4 @@ -from .anchor_generator import * -from .anchor_target import * +from .anchor_generator import AnchorGenerator +from .anchor_target import anchor_target + +__all__ = ['AnchorGenerator', 'anchor_target'] diff --git a/mmdet/core/utils/dist_utils.py b/mmdet/core/utils/dist_utils.py index fc102c60d4ef9bbd2f513e0796d3e017eaf09559..e0361f9e41ab4242d4fbfc0118d12d9d74f336fb 100644 --- a/mmdet/core/utils/dist_utils.py +++ b/mmdet/core/utils/dist_utils.py @@ -38,7 +38,8 @@ def _init_dist_slurm(backend, **kwargs): raise NotImplementedError -# modified from https://github.com/NVIDIA/apex/blob/master/apex/parallel/distributed.py#L9 +# modified from +# https://github.com/NVIDIA/apex/blob/master/apex/parallel/distributed.py#L9 def all_reduce_coalesced(tensors): buckets = OrderedDict() for tensor in tensors: diff --git a/mmdet/datasets/__init__.py b/mmdet/datasets/__init__.py index c5ec4e8f9f9438032ab6b5c30ab44fcd45df546a..6c3c8e4d77cdddf20fc6318a602229e6bc8e1cef 100644 --- a/mmdet/datasets/__init__.py +++ b/mmdet/datasets/__init__.py @@ -1,3 +1,10 @@ from .coco import CocoDataset +from .loader import (collate, GroupSampler, DistributedGroupSampler, + build_dataloader) +from .utils import DataContainer, to_tensor, random_scale, show_ann -__all__ = ['CocoDataset'] +__all__ = [ + 'CocoDataset', 'collate', 'GroupSampler', 'DistributedGroupSampler', + 'build_dataloader', 'DataContainer', 'to_tensor', 'random_scale', + 'show_ann' +] diff --git a/mmdet/datasets/coco.py b/mmdet/datasets/coco.py index 63b42b383dc48f92ee4f3fe6ca857dd3bdc5ceaf..f5463873e1ee1dfbf3528931acb3feee9f47b136 100644 --- a/mmdet/datasets/coco.py +++ b/mmdet/datasets/coco.py @@ -117,7 +117,10 @@ class CocoDataset(Dataset): gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] - # each mask consists of one or several polys, each poly is a list of float. + # Two formats are provided. + # 1. mask: a binary map of the same size of the image. + # 2. polys: each mask consists of one or several polys, each poly is a + # list of float. if with_mask: gt_masks = [] gt_mask_polys = [] diff --git a/mmdet/datasets/utils/__init__.py b/mmdet/datasets/utils/__init__.py index de3ea43bdf4e4cc526119054954fdd1acf811c38..4a46a9f97d26ef1007464fba45f3beb72eac717a 100644 --- a/mmdet/datasets/utils/__init__.py +++ b/mmdet/datasets/utils/__init__.py @@ -1,2 +1,4 @@ from .data_container import DataContainer -from .misc import * +from .misc import to_tensor, random_scale, show_ann + +__all__ = ['DataContainer', 'to_tensor', 'random_scale', 'show_ann'] diff --git a/mmdet/models/__init__.py b/mmdet/models/__init__.py index 07930688e533e6c65a4fce93209c495eeb17e756..aca6399e45e3e21c40d8e2470b233ac0d992888e 100644 --- a/mmdet/models/__init__.py +++ b/mmdet/models/__init__.py @@ -1,2 +1,9 @@ -from .detectors import * -from .builder import * +from .detectors import BaseDetector, RPN, FasterRCNN, MaskRCNN +from .builder import (build_neck, build_rpn_head, build_roi_extractor, + build_bbox_head, build_mask_head, build_detector) + +__all__ = [ + 'BaseDetector', 'RPN', 'FasterRCNN', 'MaskRCNN', 'build_backbone', + 'build_neck', 'build_rpn_head', 'build_roi_extractor', 'build_bbox_head', + 'build_mask_head', 'build_detector' +] diff --git a/mmdet/models/backbones/__init__.py b/mmdet/models/backbones/__init__.py index f9e21e83d1469167d35de22c6511f6c09c260727..107507ceaf6d1a36cafe07197cefd9693a13a49b 100644 --- a/mmdet/models/backbones/__init__.py +++ b/mmdet/models/backbones/__init__.py @@ -1 +1,3 @@ from .resnet import resnet + +__all__ = ['resnet'] diff --git a/mmdet/models/bbox_heads/convfc_bbox_head.py b/mmdet/models/bbox_heads/convfc_bbox_head.py index 02e2a6b6d859e728a47f98fe857f1e71c2a6754a..f7bd7f80a9fc00bd3fc020ccd7d834eb45905067 100644 --- a/mmdet/models/bbox_heads/convfc_bbox_head.py +++ b/mmdet/models/bbox_heads/convfc_bbox_head.py @@ -43,17 +43,21 @@ class ConvFCRoIHead(BBoxHead): self.fc_out_channels = fc_out_channels # add shared convs and fcs - self.shared_convs, self.shared_fcs, last_layer_dim = self._add_conv_fc_branch( - self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) + self.shared_convs, self.shared_fcs, last_layer_dim = \ + self._add_conv_fc_branch( + self.num_shared_convs, self.num_shared_fcs, self.in_channels, + True) self.shared_out_channels = last_layer_dim # add cls specific branch - self.cls_convs, self.cls_fcs, self.cls_last_dim = self._add_conv_fc_branch( - self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) + self.cls_convs, self.cls_fcs, self.cls_last_dim = \ + self._add_conv_fc_branch( + self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg specific branch - self.reg_convs, self.reg_fcs, self.reg_last_dim = self._add_conv_fc_branch( - self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) + self.reg_convs, self.reg_fcs, self.reg_last_dim = \ + self._add_conv_fc_branch( + self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: diff --git a/mmdet/models/necks/fpn.py b/mmdet/models/necks/fpn.py index b4e21864bff4d6a8f6bd25d46c5ff81aa3068965..6a256cae3647bcafa54ee2671cb7167f75fc9f95 100644 --- a/mmdet/models/necks/fpn.py +++ b/mmdet/models/necks/fpn.py @@ -111,7 +111,8 @@ class FPN(nn.Module): ] # part 2: add extra levels if self.num_outs > len(outs): - # use max pool to get more levels on top of outputs (Faster R-CNN, Mask R-CNN) + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) if not self.add_extra_convs: for i in range(self.num_outs - used_backbone_levels): outs.append(F.max_pool2d(outs[-1], 1, stride=2)) diff --git a/mmdet/models/utils/__init__.py b/mmdet/models/utils/__init__.py index f9c0dac6f537f339d2ffc7b1e425f756f754efe6..c759ca9aba1a07d983ae3a0d0305faab910b17a5 100644 --- a/mmdet/models/utils/__init__.py +++ b/mmdet/models/utils/__init__.py @@ -1,5 +1,8 @@ from .conv_module import ConvModule from .norm import build_norm_layer -from .weight_init import * +from .weight_init import xavier_init, normal_init, uniform_init, kaiming_init -__all__ = ['ConvModule', 'build_norm_layer'] +__all__ = [ + 'ConvModule', 'build_norm_layer', 'xavier_init', 'normal_init', + 'uniform_init', 'kaiming_init' +] diff --git a/mmdet/ops/__init__.py b/mmdet/ops/__init__.py index 52e5808016cb94e63a7501cef7b1292805eb3491..5b63224c3476ad189445fe2f6ee2b7182aee661a 100644 --- a/mmdet/ops/__init__.py +++ b/mmdet/ops/__init__.py @@ -1,3 +1,5 @@ from .nms import nms, soft_nms from .roi_align import RoIAlign, roi_align from .roi_pool import RoIPool, roi_pool + +__all__ = ['nms', 'soft_nms', 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool'] diff --git a/mmdet/ops/nms/__init__.py b/mmdet/ops/nms/__init__.py index 1cf8569b97b3a568458428776b1dbd6737882389..c4407041ad733d51eca3006b8aefa82e02bbfcde 100644 --- a/mmdet/ops/nms/__init__.py +++ b/mmdet/ops/nms/__init__.py @@ -1 +1,3 @@ from .nms_wrapper import nms, soft_nms + +__all__ = ['nms', 'soft_nms'] diff --git a/mmdet/ops/roi_align/__init__.py b/mmdet/ops/roi_align/__init__.py index ae27e21d6c78e9ffd8d13e8c71017ef6f365fb5e..4cb037904a24e613c4b15305cdf8ded6c0072a1b 100644 --- a/mmdet/ops/roi_align/__init__.py +++ b/mmdet/ops/roi_align/__init__.py @@ -1,2 +1,4 @@ from .functions.roi_align import roi_align from .modules.roi_align import RoIAlign + +__all__ = ['roi_align', 'RoIAlign'] diff --git a/mmdet/ops/roi_align/gradcheck.py b/mmdet/ops/roi_align/gradcheck.py index e2c51e64bb7b5eba9da3087d83cfa1083f965bbc..394cd69c5064e097becf12752755ee510045193b 100644 --- a/mmdet/ops/roi_align/gradcheck.py +++ b/mmdet/ops/roi_align/gradcheck.py @@ -5,7 +5,7 @@ from torch.autograd import gradcheck import os.path as osp import sys sys.path.append(osp.abspath(osp.join(__file__, '../../'))) -from roi_align import RoIAlign +from roi_align import RoIAlign # noqa: E402 feat_size = 15 spatial_scale = 1.0 / 8 diff --git a/mmdet/ops/roi_pool/__init__.py b/mmdet/ops/roi_pool/__init__.py index 9c8506d319d3c9c2300860a6c0d64259e43e7916..eb2c57eabd6fa002c970c1f8d199d80d0a9b689c 100644 --- a/mmdet/ops/roi_pool/__init__.py +++ b/mmdet/ops/roi_pool/__init__.py @@ -1,2 +1,4 @@ from .functions.roi_pool import roi_pool from .modules.roi_pool import RoIPool + +__all__ = ['roi_pool', 'RoIPool'] diff --git a/mmdet/ops/roi_pool/gradcheck.py b/mmdet/ops/roi_pool/gradcheck.py index c27d317a03b10927c40f9ef74e68f3333d63d007..c39616086a240cf57cf115d4264eb32b9cc9f7c7 100644 --- a/mmdet/ops/roi_pool/gradcheck.py +++ b/mmdet/ops/roi_pool/gradcheck.py @@ -4,7 +4,7 @@ from torch.autograd import gradcheck import os.path as osp import sys sys.path.append(osp.abspath(osp.join(__file__, '../../'))) -from roi_pool import RoIPool +from roi_pool import RoIPool # noqa: E402 feat = torch.randn(4, 16, 15, 15, requires_grad=True).cuda() rois = torch.Tensor([[0, 0, 0, 50, 50], [0, 10, 30, 43, 55], diff --git a/setup.py b/setup.py index 81dd749f14b44ccbf98a85b91686d5173797bfce..7cb44e538e3ce611a00135a588ebe37a486e3388 100644 --- a/setup.py +++ b/setup.py @@ -61,7 +61,7 @@ def get_hash(): def write_version_py(): - content = """# GENERATED VERSION FILE + content = """# GENERATED VERSION FILE # TIME: {} __version__ = '{}' diff --git a/tools/test.py b/tools/test.py index f1fb9cda91e5ff2f9466de3880aef6f886a4e355..4e2ecd2fd09a7b7fefad43c5e08bade89c85bf9d 100644 --- a/tools/test.py +++ b/tools/test.py @@ -6,7 +6,7 @@ from mmcv.runner import load_checkpoint, parallel_test, obj_from_dict from mmdet import datasets from mmdet.core import scatter, MMDataParallel, results2json, coco_eval -from mmdet.datasets.loader import collate, build_dataloader +from mmdet.datasets import collate, build_dataloader from mmdet.models import build_detector, detectors diff --git a/tools/train.py b/tools/train.py index f60b5c0bffd3c602eca75cc0d03eb9eb383dd951..b72adebbc190071ef8e9093c24791b3c653becb7 100644 --- a/tools/train.py +++ b/tools/train.py @@ -13,7 +13,7 @@ from mmdet import datasets, __version__ from mmdet.core import (init_dist, DistOptimizerHook, DistSamplerSeedHook, MMDataParallel, MMDistributedDataParallel, CocoDistEvalRecallHook, CocoDistEvalmAPHook) -from mmdet.datasets.loader import build_dataloader +from mmdet.datasets import build_dataloader from mmdet.models import build_detector, RPN