diff --git a/mmdet/core/bbox_ops/__init__.py b/mmdet/core/bbox_ops/__init__.py
index 885dab67c5ab3e15fba026d188fa870d6f64c9c8..22163f75ef5484a48ed223e417f83537522532a7 100644
--- a/mmdet/core/bbox_ops/__init__.py
+++ b/mmdet/core/bbox_ops/__init__.py
@@ -1,15 +1,14 @@
 from .geometry import bbox_overlaps
-from .sampling import (random_choice, bbox_assign, bbox_assign_via_overlaps,
+from .sampling import (random_choice, bbox_assign, bbox_assign_wrt_overlaps,
                        bbox_sampling, sample_positives, sample_negatives)
-from .transforms import (bbox_transform, bbox_transform_inv, bbox_flip,
-                         bbox_mapping, bbox_mapping_back, bbox2roi, roi2bbox,
-                         bbox2result)
+from .transforms import (bbox2delta, delta2bbox, bbox_flip, bbox_mapping,
+                         bbox_mapping_back, bbox2roi, roi2bbox, bbox2result)
 from .bbox_target import bbox_target
 
 __all__ = [
     'bbox_overlaps', 'random_choice', 'bbox_assign',
-    'bbox_assign_via_overlaps', 'bbox_sampling', 'sample_positives',
-    'sample_negatives', 'bbox_transform', 'bbox_transform_inv', 'bbox_flip',
+    'bbox_assign_wrt_overlaps', 'bbox_sampling', 'sample_positives',
+    'sample_negatives', 'bbox2delta', 'delta2bbox', 'bbox_flip',
     'bbox_mapping', 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result',
     'bbox_target'
 ]
diff --git a/mmdet/core/bbox_ops/bbox_target.py b/mmdet/core/bbox_ops/bbox_target.py
index ce1f885e184a37779c7636f8c6053248e8cd3330..2e205c3850c9bc232b99826a23e79f416a3dbcfb 100644
--- a/mmdet/core/bbox_ops/bbox_target.py
+++ b/mmdet/core/bbox_ops/bbox_target.py
@@ -1,8 +1,7 @@
-import mmcv
 import torch
 
-from .geometry import bbox_overlaps
-from .transforms import bbox_transform, bbox_transform_inv
+from .transforms import bbox2delta
+from ..utils import multi_apply
 
 
 def bbox_target(pos_proposals_list,
@@ -13,33 +12,23 @@ def bbox_target(pos_proposals_list,
                 reg_num_classes=1,
                 target_means=[.0, .0, .0, .0],
                 target_stds=[1.0, 1.0, 1.0, 1.0],
-                return_list=False):
-    img_per_gpu = len(pos_proposals_list)
-    all_labels = []
-    all_label_weights = []
-    all_bbox_targets = []
-    all_bbox_weights = []
-    for img_id in range(img_per_gpu):
-        pos_proposals = pos_proposals_list[img_id]
-        neg_proposals = neg_proposals_list[img_id]
-        pos_gt_bboxes = pos_gt_bboxes_list[img_id]
-        pos_gt_labels = pos_gt_labels_list[img_id]
-        debug_img = debug_imgs[img_id] if cfg.debug else None
-        labels, label_weights, bbox_targets, bbox_weights = proposal_target_single(
-            pos_proposals, neg_proposals, pos_gt_bboxes, pos_gt_labels,
-            reg_num_classes, cfg, target_means, target_stds)
-        all_labels.append(labels)
-        all_label_weights.append(label_weights)
-        all_bbox_targets.append(bbox_targets)
-        all_bbox_weights.append(bbox_weights)
+                concat=True):
+    labels, label_weights, bbox_targets, bbox_weights = multi_apply(
+        proposal_target_single,
+        pos_proposals_list,
+        neg_proposals_list,
+        pos_gt_bboxes_list,
+        pos_gt_labels_list,
+        cfg=cfg,
+        reg_num_classes=reg_num_classes,
+        target_means=target_means,
+        target_stds=target_stds)
 
-    if return_list:
-        return all_labels, all_label_weights, all_bbox_targets, all_bbox_weights
-
-    labels = torch.cat(all_labels, 0)
-    label_weights = torch.cat(all_label_weights, 0)
-    bbox_targets = torch.cat(all_bbox_targets, 0)
-    bbox_weights = torch.cat(all_bbox_weights, 0)
+    if concat:
+        labels = torch.cat(labels, 0)
+        label_weights = torch.cat(label_weights, 0)
+        bbox_targets = torch.cat(bbox_targets, 0)
+        bbox_weights = torch.cat(bbox_weights, 0)
     return labels, label_weights, bbox_targets, bbox_weights
 
 
@@ -47,8 +36,8 @@ def proposal_target_single(pos_proposals,
                            neg_proposals,
                            pos_gt_bboxes,
                            pos_gt_labels,
-                           reg_num_classes,
                            cfg,
+                           reg_num_classes=1,
                            target_means=[.0, .0, .0, .0],
                            target_stds=[1.0, 1.0, 1.0, 1.0]):
     num_pos = pos_proposals.size(0)
@@ -62,8 +51,8 @@ def proposal_target_single(pos_proposals,
         labels[:num_pos] = pos_gt_labels
         pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
         label_weights[:num_pos] = pos_weight
-        pos_bbox_targets = bbox_transform(pos_proposals, pos_gt_bboxes,
-                                          target_means, target_stds)
+        pos_bbox_targets = bbox2delta(pos_proposals, pos_gt_bboxes,
+                                      target_means, target_stds)
         bbox_targets[:num_pos, :] = pos_bbox_targets
         bbox_weights[:num_pos, :] = 1
     if num_neg > 0:
diff --git a/mmdet/core/bbox_ops/sampling.py b/mmdet/core/bbox_ops/sampling.py
index bcee761e10e5ef8537235cd629d187f874bb08fd..28043182acf41583373a86729bf10f309f384e8a 100644
--- a/mmdet/core/bbox_ops/sampling.py
+++ b/mmdet/core/bbox_ops/sampling.py
@@ -20,30 +20,36 @@ def random_choice(gallery, num):
 
 def bbox_assign(proposals,
                 gt_bboxes,
-                gt_crowd_bboxes=None,
+                gt_bboxes_ignore=None,
                 gt_labels=None,
                 pos_iou_thr=0.5,
                 neg_iou_thr=0.5,
                 min_pos_iou=.0,
                 crowd_thr=-1):
-    """Assign a corresponding gt bbox or background to each proposal/anchor
-    This function assign a gt bbox to every proposal, each proposals will be
-    assigned with -1, 0, or a positive number. -1 means don't care, 0 means
-    negative sample, positive number is the index (1-based) of assigned gt.
-    If gt_crowd_bboxes is not None, proposals which have iof(intersection over foreground)
-    with crowd bboxes over crowd_thr will be ignored
+    """Assign a corresponding gt bbox or background to each proposal/anchor.
+
+    Each proposals will be assigned with `-1`, `0`, or a positive integer.
+
+    - -1: don't care
+    - 0: negative sample, no assigned gt
+    - positive integer: positive sample, index (1-based) of assigned gt
+
+    If `gt_bboxes_ignore` is specified, bboxes which have iof (intersection
+    over foreground) with `gt_bboxes_ignore` above `crowd_thr` will be ignored.
+
     Args:
-        proposals(Tensor): proposals or RPN anchors, shape (n, 4)
-        gt_bboxes(Tensor): shape (k, 4)
-        gt_crowd_bboxes(Tensor): shape(m, 4)
-        gt_labels(Tensor, optional): shape (k, )
-        pos_iou_thr(float): iou threshold for positive bboxes
-        neg_iou_thr(float or tuple): iou threshold for negative bboxes
-        min_pos_iou(float): minimum iou for a bbox to be considered as a positive bbox,
-                            for RPN, it is usually set as 0, for Fast R-CNN,
-                            it is usually set as pos_iou_thr
-        crowd_thr: ignore proposals which have iof(intersection over foreground) with
-        crowd bboxes over crowd_thr
+        proposals (Tensor): Proposals or RPN anchors, shape (n, 4).
+        gt_bboxes (Tensor): Ground truth bboxes, shape (k, 4).
+        gt_bboxes_ignore (Tensor, optional): shape(m, 4).
+        gt_labels (Tensor, optional): shape (k, ).
+        pos_iou_thr (float): IoU threshold for positive bboxes.
+        neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
+        min_pos_iou (float): Minimum iou for a bbox to be considered as a
+            positive bbox. For RPN, it is usually set as 0.3, for Fast R-CNN,
+            it is usually set as pos_iou_thr
+        crowd_thr (float): IoF threshold for ignoring bboxes. Negative value
+            for not ignoring any bboxes.
+
     Returns:
         tuple: (assigned_gt_inds, argmax_overlaps, max_overlaps), shape (n, )
     """
@@ -54,20 +60,20 @@ def bbox_assign(proposals,
         raise ValueError('No gt bbox or proposals')
 
     # ignore proposals according to crowd bboxes
-    if (crowd_thr > 0) and (gt_crowd_bboxes is
-                            not None) and (gt_crowd_bboxes.numel() > 0):
-        crowd_overlaps = bbox_overlaps(proposals, gt_crowd_bboxes, mode='iof')
+    if (crowd_thr > 0) and (gt_bboxes_ignore is
+                            not None) and (gt_bboxes_ignore.numel() > 0):
+        crowd_overlaps = bbox_overlaps(proposals, gt_bboxes_ignore, mode='iof')
         crowd_max_overlaps, _ = crowd_overlaps.max(dim=1)
         crowd_bboxes_inds = torch.nonzero(
             crowd_max_overlaps > crowd_thr).long()
         if crowd_bboxes_inds.numel() > 0:
             overlaps[crowd_bboxes_inds, :] = -1
 
-    return bbox_assign_via_overlaps(overlaps, gt_labels, pos_iou_thr,
+    return bbox_assign_wrt_overlaps(overlaps, gt_labels, pos_iou_thr,
                                     neg_iou_thr, min_pos_iou)
 
 
-def bbox_assign_via_overlaps(overlaps,
+def bbox_assign_wrt_overlaps(overlaps,
                              gt_labels=None,
                              pos_iou_thr=0.5,
                              neg_iou_thr=0.5,
diff --git a/mmdet/core/bbox_ops/transforms.py b/mmdet/core/bbox_ops/transforms.py
index ca45d157dce0e511085101cf29ec53e9ba505cc3..0d8f6f44f20df5c019dc8ed9ea46c2eb6c411c66 100644
--- a/mmdet/core/bbox_ops/transforms.py
+++ b/mmdet/core/bbox_ops/transforms.py
@@ -3,7 +3,7 @@ import numpy as np
 import torch
 
 
-def bbox_transform(proposals, gt, means=[0, 0, 0, 0], stds=[1, 1, 1, 1]):
+def bbox2delta(proposals, gt, means=[0, 0, 0, 0], stds=[1, 1, 1, 1]):
     assert proposals.size() == gt.size()
 
     proposals = proposals.float()
@@ -31,12 +31,12 @@ def bbox_transform(proposals, gt, means=[0, 0, 0, 0], stds=[1, 1, 1, 1]):
     return deltas
 
 
-def bbox_transform_inv(rois,
-                       deltas,
-                       means=[0, 0, 0, 0],
-                       stds=[1, 1, 1, 1],
-                       max_shape=None,
-                       wh_ratio_clip=16 / 1000):
+def delta2bbox(rois,
+               deltas,
+               means=[0, 0, 0, 0],
+               stds=[1, 1, 1, 1],
+               max_shape=None,
+               wh_ratio_clip=16 / 1000):
     means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)
     stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)
     denorm_deltas = deltas * stds + means
@@ -69,10 +69,14 @@ def bbox_transform_inv(rois,
 
 
 def bbox_flip(bboxes, img_shape):
-    """Flip bboxes horizontally
+    """Flip bboxes horizontally.
+
     Args:
-        bboxes(Tensor): shape (..., 4*k)
-        img_shape(Tensor): image shape
+        bboxes(Tensor or ndarray): Shape (..., 4*k)
+        img_shape(tuple): Image shape.
+
+    Returns:
+        Same type as `bboxes`: Flipped bboxes.
     """
     if isinstance(bboxes, torch.Tensor):
         assert bboxes.shape[-1] % 4 == 0
@@ -101,8 +105,11 @@ def bbox_mapping_back(bboxes, img_shape, scale_factor, flip):
 
 def bbox2roi(bbox_list):
     """Convert a list of bboxes to roi format.
+
     Args:
-        bbox_list (Tensor): a list of bboxes corresponding to a list of images
+        bbox_list (list[Tensor]): a list of bboxes corresponding to a batch
+            of images.
+
     Returns:
         Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2]
     """
@@ -129,11 +136,13 @@ def roi2bbox(rois):
 
 
 def bbox2result(bboxes, labels, num_classes):
-    """Convert detection results to a list of numpy arrays
+    """Convert detection results to a list of numpy arrays.
+
     Args:
         bboxes (Tensor): shape (n, 5)
         labels (Tensor): shape (n, )
         num_classes (int): class number, including background class
+
     Returns:
         list(ndarray): bbox results of each class
     """
diff --git a/mmdet/core/eval/eval_hooks.py b/mmdet/core/eval/eval_hooks.py
index c02aec975c7f4cfa9555172b0dcad3235ec09434..870830ef3960cad77786546708d1836d7b5ae663 100644
--- a/mmdet/core/eval/eval_hooks.py
+++ b/mmdet/core/eval/eval_hooks.py
@@ -11,7 +11,6 @@ from pycocotools.cocoeval import COCOeval
 from torch.utils.data import Dataset
 
 from .coco_utils import results2json, fast_eval_recall
-from .recall import eval_recalls
 from ..parallel import scatter
 from mmdet import datasets
 from mmdet.datasets.loader import collate
diff --git a/mmdet/core/eval/mean_ap.py b/mmdet/core/eval/mean_ap.py
index 9a33f7640409993db3e11cedd587f1cd14c38aa5..5f47c1368af0e3385bc8e49cc5d35b99726ce722 100644
--- a/mmdet/core/eval/mean_ap.py
+++ b/mmdet/core/eval/mean_ap.py
@@ -9,9 +9,9 @@ def average_precision(recalls, precisions, mode='area'):
     """Calculate average precision (for single or multiple scales).
 
     Args:
-        recalls(ndarray): shape (num_scales, num_dets) or (num_dets, )
-        precisions(ndarray): shape (num_scales, num_dets) or (num_dets, )
-        mode(str): 'area' or '11points', 'area' means calculating the area
+        recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )
+        precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )
+        mode (str): 'area' or '11points', 'area' means calculating the area
             under precision-recall curve, '11points' means calculating
             the average precision of recalls at [0, 0.1, ..., 1]
 
@@ -60,11 +60,11 @@ def tpfp_imagenet(det_bboxes,
     """Check if detected bboxes are true positive or false positive.
 
     Args:
-        det_bbox(ndarray): the detected bbox
-        gt_bboxes(ndarray): ground truth bboxes of this image
-        gt_ignore(ndarray): indicate if gts are ignored for evaluation or not
-        default_iou_thr(float): the iou thresholds for medium and large bboxes
-        area_ranges(list or None): gt bbox area ranges
+        det_bbox (ndarray): the detected bbox
+        gt_bboxes (ndarray): ground truth bboxes of this image
+        gt_ignore (ndarray): indicate if gts are ignored for evaluation or not
+        default_iou_thr (float): the iou thresholds for medium and large bboxes
+        area_ranges (list or None): gt bbox area ranges
 
     Returns:
         tuple: two arrays (tp, fp) whose elements are 0 and 1
@@ -115,10 +115,10 @@ def tpfp_imagenet(det_bboxes,
                     max_iou = ious[i, j]
                     matched_gt = j
             # there are 4 cases for a det bbox:
-            # 1. this det bbox matches a gt, tp = 1, fp = 0
-            # 2. this det bbox matches an ignored gt, tp = 0, fp = 0
-            # 3. this det bbox matches no gt and within area range, tp = 0, fp = 1
-            # 4. this det bbox matches no gt but is beyond area range, tp = 0, fp = 0
+            # 1. it matches a gt, tp = 1, fp = 0
+            # 2. it matches an ignored gt, tp = 0, fp = 0
+            # 3. it matches no gt and within area range, tp = 0, fp = 1
+            # 4. it matches no gt but is beyond area range, tp = 0, fp = 0
             if matched_gt >= 0:
                 gt_covered[matched_gt] = 1
                 if not (gt_ignore[matched_gt] or gt_area_ignore[matched_gt]):
@@ -137,10 +137,10 @@ def tpfp_default(det_bboxes, gt_bboxes, gt_ignore, iou_thr, area_ranges=None):
     """Check if detected bboxes are true positive or false positive.
 
     Args:
-        det_bbox(ndarray): the detected bbox
-        gt_bboxes(ndarray): ground truth bboxes of this image
-        gt_ignore(ndarray): indicate if gts are ignored for evaluation or not
-        iou_thr(float): the iou thresholds
+        det_bbox (ndarray): the detected bbox
+        gt_bboxes (ndarray): ground truth bboxes of this image
+        gt_ignore (ndarray): indicate if gts are ignored for evaluation or not
+        iou_thr (float): the iou thresholds
 
     Returns:
         tuple: (tp, fp), two arrays whose elements are 0 and 1
@@ -227,15 +227,16 @@ def eval_map(det_results,
     """Evaluate mAP of a dataset.
 
     Args:
-        det_results(list): a list of list, [[cls1_det, cls2_det, ...], ...]
-        gt_bboxes(list): ground truth bboxes of each image, a list of K*4 array
-        gt_labels(list): ground truth labels of each image, a list of K array
-        gt_ignore(list): gt ignore indicators of each image, a list of K array
-        scale_ranges(list, optional): [(min1, max1), (min2, max2), ...]
-        iou_thr(float): IoU threshold
-        dataset(None or str): dataset name, there are minor differences in
+        det_results (list): a list of list, [[cls1_det, cls2_det, ...], ...]
+        gt_bboxes (list): ground truth bboxes of each image, a list of K*4
+            array.
+        gt_labels (list): ground truth labels of each image, a list of K array
+        gt_ignore (list): gt ignore indicators of each image, a list of K array
+        scale_ranges (list, optional): [(min1, max1), (min2, max2), ...]
+        iou_thr (float): IoU threshold
+        dataset (None or str): dataset name, there are minor differences in
             metrics for different datsets, e.g. "voc07", "imagenet_det", etc.
-        print_summary(bool): whether to print the mAP summary
+        print_summary (bool): whether to print the mAP summary
 
     Returns:
         tuple: (mAP, [dict, dict, ...])
@@ -265,7 +266,8 @@ def eval_map(det_results,
                       area_ranges) for j in range(len(cls_dets))
         ]
         tp, fp = tuple(zip(*tpfp))
-        # calculate gt number of each scale, gts ignored or beyond scale are not counted
+        # calculate gt number of each scale, gts ignored or beyond scale
+        # are not counted
         num_gts = np.zeros(num_scales, dtype=int)
         for j, bbox in enumerate(cls_gts):
             if area_ranges is None:
diff --git a/mmdet/core/losses/losses.py b/mmdet/core/losses/losses.py
index 4f183e13d8a8aa88b87ce42e443faf10b65d323f..d0e642f807c94844d4442c8ef119e0a11ec2820f 100644
--- a/mmdet/core/losses/losses.py
+++ b/mmdet/core/losses/losses.py
@@ -30,13 +30,13 @@ def sigmoid_focal_loss(pred,
                        weight,
                        gamma=2.0,
                        alpha=0.25,
-                       size_average=True):
+                       reduction='elementwise_mean'):
     pred_sigmoid = pred.sigmoid()
     pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
     weight = (alpha * target + (1 - alpha) * (1 - target)) * weight
     weight = weight * pt.pow(gamma)
     return F.binary_cross_entropy_with_logits(
-        pred, target, weight, size_average=size_average)
+        pred, target, weight, size_average=reduction)
 
 
 def weighted_sigmoid_focal_loss(pred,
diff --git a/mmdet/core/rpn_ops/anchor_target.py b/mmdet/core/rpn_ops/anchor_target.py
index 3cf651b5c46e98da6673e58c0299c6f7a90e632f..f449507499e9f35092a2f7430c70f5c2fe0ff24c 100644
--- a/mmdet/core/rpn_ops/anchor_target.py
+++ b/mmdet/core/rpn_ops/anchor_target.py
@@ -1,6 +1,6 @@
 import torch
 
-from ..bbox_ops import bbox_assign, bbox_transform, bbox_sampling
+from ..bbox_ops import bbox_assign, bbox2delta, bbox_sampling
 from ..utils import multi_apply
 
 
@@ -99,8 +99,8 @@ def anchor_target_single(flat_anchors, valid_flags, gt_bboxes, img_meta,
     if len(pos_inds) > 0:
         pos_anchors = anchors[pos_inds, :]
         pos_gt_bbox = gt_bboxes[assigned_gt_inds[pos_inds] - 1, :]
-        pos_bbox_targets = bbox_transform(pos_anchors, pos_gt_bbox,
-                                          target_means, target_stds)
+        pos_bbox_targets = bbox2delta(pos_anchors, pos_gt_bbox, target_means,
+                                      target_stds)
         bbox_targets[pos_inds, :] = pos_bbox_targets
         bbox_weights[pos_inds, :] = 1.0
         labels[pos_inds] = 1
diff --git a/mmdet/models/bbox_heads/bbox_head.py b/mmdet/models/bbox_heads/bbox_head.py
index 941903aba544b2d5ee7f7f6685664f4ab6f27df4..67dba03959231b5ed0f784ac97542911b56cc785 100644
--- a/mmdet/models/bbox_heads/bbox_head.py
+++ b/mmdet/models/bbox_heads/bbox_head.py
@@ -1,7 +1,7 @@
 import torch.nn as nn
 import torch.nn.functional as F
 
-from mmdet.core import (bbox_transform_inv, multiclass_nms, bbox_target,
+from mmdet.core import (delta2bbox, multiclass_nms, bbox_target,
                         weighted_cross_entropy, weighted_smoothl1, accuracy)
 
 
@@ -101,9 +101,8 @@ class BBoxHead(nn.Module):
         scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
 
         if bbox_pred is not None:
-            bboxes = bbox_transform_inv(rois[:, 1:], bbox_pred,
-                                        self.target_means, self.target_stds,
-                                        img_shape)
+            bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means,
+                                self.target_stds, img_shape)
         else:
             bboxes = rois[:, 1:]
             # TODO: add clip here
diff --git a/mmdet/models/roi_extractors/single_level.py b/mmdet/models/roi_extractors/single_level.py
index b850d6ab680eaf0ef3a03890d75181d570d3b9f4..6aa29e598e58696634d7934ecc00bb7105084d62 100644
--- a/mmdet/models/roi_extractors/single_level.py
+++ b/mmdet/models/roi_extractors/single_level.py
@@ -41,10 +41,10 @@ class SingleLevelRoI(nn.Module):
     def map_roi_levels(self, rois, num_levels):
         """Map rois to corresponding feature levels (0-based) by scales.
 
-        scale < finest_scale: level 0
-        finest_scale <= scale < finest_scale * 2: level 1
-        finest_scale * 2 <= scale < finest_scale * 4: level 2
-        scale >= finest_scale * 4: level 3
+        - scale < finest_scale: level 0
+        - finest_scale <= scale < finest_scale * 2: level 1
+        - finest_scale * 2 <= scale < finest_scale * 4: level 2
+        - scale >= finest_scale * 4: level 3
         """
         scale = torch.sqrt(
             (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
@@ -52,12 +52,13 @@ class SingleLevelRoI(nn.Module):
         target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
         return target_lvls
 
-    def sample_proposals(self, proposals, gt_bboxes, gt_crowds, gt_labels,
-                         cfg):
+    def sample_proposals(self, proposals, gt_bboxes, gt_bboxes_ignore,
+                         gt_labels, cfg):
         proposals = proposals[:, :4]
         assigned_gt_inds, assigned_labels, argmax_overlaps, max_overlaps = \
-            bbox_assign(proposals, gt_bboxes, gt_crowds, gt_labels,
-            cfg.pos_iou_thr, cfg.neg_iou_thr, cfg.min_pos_iou, cfg.crowd_thr)
+            bbox_assign(proposals, gt_bboxes, gt_bboxes_ignore, gt_labels,
+                        cfg.pos_iou_thr, cfg.neg_iou_thr, cfg.min_pos_iou,
+                        cfg.crowd_thr)
 
         if cfg.add_gt_as_proposals:
             proposals = torch.cat([gt_bboxes, proposals], dim=0)
@@ -80,7 +81,8 @@ class SingleLevelRoI(nn.Module):
         pos_gt_bboxes = gt_bboxes[pos_assigned_gt_inds, :]
         pos_gt_labels = assigned_labels[pos_inds]
 
-        return (pos_proposals, neg_proposals, pos_assigned_gt_inds, pos_gt_bboxes, pos_gt_labels)
+        return (pos_proposals, neg_proposals, pos_assigned_gt_inds,
+                pos_gt_bboxes, pos_gt_labels)
 
     def forward(self, feats, rois):
         """Extract roi features with the roi layer. If multiple feature levels
diff --git a/mmdet/models/rpn_heads/rpn_head.py b/mmdet/models/rpn_heads/rpn_head.py
index 68a81833e099f508ae4f776e62558fd3afb3e1d9..e67d7ae973f05c60c8e226009cfb4234c0894f69 100644
--- a/mmdet/models/rpn_heads/rpn_head.py
+++ b/mmdet/models/rpn_heads/rpn_head.py
@@ -5,7 +5,7 @@ import torch
 import torch.nn as nn
 import torch.nn.functional as F
 
-from mmdet.core import (AnchorGenerator, anchor_target, bbox_transform_inv,
+from mmdet.core import (AnchorGenerator, anchor_target, delta2bbox,
                         multi_apply, weighted_cross_entropy, weighted_smoothl1,
                         weighted_binary_cross_entropy)
 from mmdet.ops import nms
@@ -225,9 +225,8 @@ class RPNHead(nn.Module):
                 rpn_bbox_pred = rpn_bbox_pred[order, :]
                 anchors = anchors[order, :]
                 scores = scores[order]
-            proposals = bbox_transform_inv(anchors, rpn_bbox_pred,
-                                           self.target_means, self.target_stds,
-                                           img_shape)
+            proposals = delta2bbox(anchors, rpn_bbox_pred, self.target_means,
+                                   self.target_stds, img_shape)
             w = proposals[:, 2] - proposals[:, 0] + 1
             h = proposals[:, 3] - proposals[:, 1] + 1
             valid_inds = torch.nonzero((w >= cfg.min_bbox_size) &