From 0d5233a32dac4288760f6d819ddfef9c1e020cf7 Mon Sep 17 00:00:00 2001
From: Kai Chen <chenkaidev@gmail.com>
Date: Fri, 23 Aug 2019 20:25:17 +0800
Subject: [PATCH] Make data pre-processing pipeline customizable (#935)

* define data pipelines

* update two config files

* minor fix for config files

* allow img_scale to be optional and update config

* add some docstrings

* add extra aug to transform

* bug fix for mask resizing

* fix cropping

* add faster rcnn example

* fix imports

* fix robustness testing

* add img_norm_cfg to img_meta

* fix the inference api with the new data pipeline

* fix proposal loading

* delete args of DefaultFormatBundle

* add more configs

* update configs

* bug fix

* add a brief doc

* update gt_labels in RandomCrop

* fix key error for new apis

* bug fix for masks of crowd bboxes

* add argument data_root

* minor fix

* update new hrnet configs

* update docs

* rename MultiscaleFlipAug to MultiScaleFlipAug

* add __repr__ for all transforms

* move DATA_PIPELINE.md to docs/

* fix image url
---
 configs/cascade_mask_rcnn_r101_fpn_1x.py      |  76 +--
 configs/cascade_mask_rcnn_r50_caffe_c4_1x.py  |  78 +--
 configs/cascade_mask_rcnn_r50_fpn_1x.py       |  76 +--
 .../cascade_mask_rcnn_x101_32x4d_fpn_1x.py    |  76 +--
 .../cascade_mask_rcnn_x101_64x4d_fpn_1x.py    |  76 +--
 configs/cascade_rcnn_r101_fpn_1x.py           |  76 +--
 configs/cascade_rcnn_r50_caffe_c4_1x.py       |  78 +--
 configs/cascade_rcnn_r50_fpn_1x.py            |  76 +--
 configs/cascade_rcnn_x101_32x4d_fpn_1x.py     |  76 +--
 configs/cascade_rcnn_x101_64x4d_fpn_1x.py     |  76 +--
 .../faster_rcnn_r50_fpn_1x_cityscapes.py      |  53 +-
 .../mask_rcnn_r50_fpn_1x_cityscapes.py        |  53 +-
 ...ascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py |  49 +-
 .../cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py    |  80 +--
 .../dcn/faster_rcnn_dconv_c3-c5_r50_fpn_1x.py |  49 +-
 ...ster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py |  49 +-
 configs/dcn/faster_rcnn_dpool_r50_fpn_1x.py   |  49 +-
 .../faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py    |  49 +-
 configs/dcn/faster_rcnn_mdpool_r50_fpn_1x.py  |  49 +-
 .../dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py   |  53 +-
 .../double_heads/dh_faster_rcnn_r50_fpn_1x.py |  49 +-
 .../faster_rcnn_r50_fpn_attention_0010_1x.py  |  49 +-
 ...ster_rcnn_r50_fpn_attention_0010_dcn_1x.py |  49 +-
 .../faster_rcnn_r50_fpn_attention_1111_1x.py  |  49 +-
 ...ster_rcnn_r50_fpn_attention_1111_dcn_1x.py |  49 +-
 configs/fast_mask_rcnn_r101_fpn_1x.py         |  59 +-
 configs/fast_mask_rcnn_r50_caffe_c4_1x.py     |  61 +-
 configs/fast_mask_rcnn_r50_fpn_1x.py          |  59 +-
 configs/fast_rcnn_r101_fpn_1x.py              |  64 +-
 configs/fast_rcnn_r50_caffe_c4_1x.py          |  66 +-
 configs/fast_rcnn_r50_fpn_1x.py               |  64 +-
 configs/faster_rcnn_ohem_r50_fpn_1x.py        |  49 +-
 configs/faster_rcnn_r101_fpn_1x.py            |  49 +-
 configs/faster_rcnn_r50_caffe_c4_1x.py        |  51 +-
 configs/faster_rcnn_r50_fpn_1x.py             |  49 +-
 configs/faster_rcnn_x101_32x4d_fpn_1x.py      |  49 +-
 configs/faster_rcnn_x101_64x4d_fpn_1x.py      |  49 +-
 ...train_640_800_r101_caffe_fpn_gn_2x_4gpu.py |  55 +-
 ...os_mstrain_640_800_x101_64x4d_fpn_gn_2x.py |  55 +-
 configs/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py |  50 +-
 configs/fp16/faster_rcnn_r50_fpn_fp16_1x.py   |  49 +-
 configs/fp16/mask_rcnn_r50_fpn_fp16_1x.py     |  49 +-
 configs/fp16/retinanet_r50_fpn_fp16_1x.py     |  50 +-
 .../mask_rcnn_r16_gcb_c3-c5_r50_fpn_1x.py     |  53 +-
 ...sk_rcnn_r16_gcb_c3-c5_r50_fpn_syncbn_1x.py |  53 +-
 .../mask_rcnn_r4_gcb_c3-c5_r50_fpn_1x.py      |  53 +-
 ...ask_rcnn_r4_gcb_c3-c5_r50_fpn_syncbn_1x.py |  53 +-
 configs/gcnet/mask_rcnn_r50_fpn_sbn_1x.py     |  49 +-
 configs/ghm/retinanet_ghm_r50_fpn_1x.py       |  56 +-
 configs/gn+ws/faster_rcnn_r50_fpn_gn_ws_1x.py |  49 +-
 .../mask_rcnn_r50_fpn_gn_ws_20_23_24e.py      |  49 +-
 configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py   |  49 +-
 .../mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py      |  49 +-
 configs/gn/mask_rcnn_r101_fpn_gn_2x.py        |  49 +-
 configs/gn/mask_rcnn_r50_fpn_gn_2x.py         |  49 +-
 configs/gn/mask_rcnn_r50_fpn_gn_contrib_2x.py |  49 +-
 .../grid_rcnn/grid_rcnn_gn_head_r50_fpn_2x.py |  49 +-
 .../grid_rcnn_gn_head_x101_32x4d_fpn_2x.py    |  49 +-
 .../ga_fast_r50_caffe_fpn_1x.py               |  60 +-
 .../ga_faster_r50_caffe_fpn_1x.py             |  49 +-
 .../ga_faster_x101_32x4d_fpn_1x.py            |  49 +-
 .../ga_retinanet_r50_caffe_fpn_1x.py          |  50 +-
 .../ga_retinanet_x101_32x4d_fpn_1x.py         |  50 +-
 .../ga_rpn_r101_caffe_rpn_1x.py               |  49 +-
 .../ga_rpn_r50_caffe_fpn_1x.py                |  49 +-
 .../ga_rpn_x101_32x4d_fpn_1x.py               |  49 +-
 .../cascade_mask_rcnn_hrnetv2p_w32_20e.py     |  81 +--
 .../hrnet/cascade_rcnn_hrnetv2p_w32_20e.py    |  94 ++-
 configs/hrnet/faster_rcnn_hrnetv2p_w18_1x.py  |  55 +-
 configs/hrnet/faster_rcnn_hrnetv2p_w32_1x.py  |  53 +-
 configs/hrnet/faster_rcnn_hrnetv2p_w40_1x.py  |  55 +-
 configs/hrnet/fcos_hrnetv2p_w32_gn_1x_4gpu.py |  54 +-
 configs/hrnet/htc_hrnetv2p_w32_20e.py         |  87 ++-
 configs/hrnet/mask_rcnn_hrnetv2p_w18_1x.py    |  62 +-
 configs/hrnet/mask_rcnn_hrnetv2p_w32_1x.py    |  62 +-
 ...-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py |  87 ++-
 configs/htc/htc_r101_fpn_20e.py               |  82 ++-
 configs/htc/htc_r50_fpn_1x.py                 |  82 ++-
 configs/htc/htc_r50_fpn_20e.py                |  82 ++-
 .../htc/htc_without_semantic_r50_fpn_1x.py    |  76 +--
 configs/htc/htc_x101_32x4d_fpn_20e_16gpu.py   |  55 +-
 configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py   |  82 ++-
 .../libra_rcnn/libra_fast_rcnn_r50_fpn_1x.py  |  59 +-
 .../libra_faster_rcnn_r101_fpn_1x.py          |  49 +-
 .../libra_faster_rcnn_r50_fpn_1x.py           |  49 +-
 .../libra_faster_rcnn_x101_64x4d_fpn_1x.py    |  49 +-
 .../libra_rcnn/libra_retinanet_r50_fpn_1x.py  |  50 +-
 configs/mask_rcnn_r101_fpn_1x.py              |  49 +-
 configs/mask_rcnn_r50_caffe_c4_1x.py          |  51 +-
 configs/mask_rcnn_r50_fpn_1x.py               |  49 +-
 configs/mask_rcnn_x101_32x4d_fpn_1x.py        |  49 +-
 configs/mask_rcnn_x101_64x4d_fpn_1x.py        |  49 +-
 configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x.py  |  49 +-
 configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x.py   |  49 +-
 configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x.py  |  49 +-
 .../faster_rcnn_r50_fpn_1x_voc0712.py         |  51 +-
 configs/pascal_voc/ssd300_voc.py              |  83 +--
 configs/pascal_voc/ssd512_voc.py              |  83 +--
 configs/retinanet_r101_fpn_1x.py              |  50 +-
 configs/retinanet_r50_fpn_1x.py               |  50 +-
 configs/retinanet_x101_32x4d_fpn_1x.py        |  50 +-
 configs/retinanet_x101_64x4d_fpn_1x.py        |  50 +-
 configs/rpn_r101_fpn_1x.py                    |  49 +-
 configs/rpn_r50_caffe_c4_1x.py                |  49 +-
 configs/rpn_r50_fpn_1x.py                     |  49 +-
 configs/rpn_x101_32x4d_fpn_1x.py              |  49 +-
 configs/rpn_x101_64x4d_fpn_1x.py              |  49 +-
 .../scratch_faster_rcnn_r50_fpn_gn_6x.py      |  49 +-
 .../scratch_mask_rcnn_r50_fpn_gn_6x.py        |  49 +-
 configs/ssd300_coco.py                        |  77 ++-
 configs/ssd512_coco.py                        |  77 ++-
 configs/wider_face/ssd300_wider_face.py       |  94 +--
 demo/data_pipeline.png                        | Bin 0 -> 84111 bytes
 docs/DATA_PIPELINE.md                         | 115 ++++
 docs/GETTING_STARTED.md                       |   8 +-
 mmdet/apis/inference.py                       |  59 +-
 mmdet/core/evaluation/eval_hooks.py           |   6 +-
 mmdet/datasets/__init__.py                    |   6 +-
 mmdet/datasets/coco.py                        |  42 +-
 mmdet/datasets/custom.py                      | 288 ++------
 mmdet/datasets/pipelines/__init__.py          |  16 +
 mmdet/datasets/pipelines/compose.py           |  35 +
 mmdet/datasets/pipelines/formating.py         | 157 +++++
 mmdet/datasets/pipelines/loading.py           | 145 ++++
 mmdet/datasets/pipelines/test_aug.py          |  38 ++
 mmdet/datasets/pipelines/transforms.py        | 634 ++++++++++++++++++
 mmdet/datasets/registry.py                    |   1 +
 mmdet/datasets/utils.py                       |  68 --
 mmdet/models/detectors/base.py                |   9 +-
 mmdet/models/detectors/cascade_rcnn.py        |   5 +-
 mmdet/models/detectors/rpn.py                 |   4 +-
 mmdet/utils/registry.py                       |  10 +-
 tools/test.py                                 |   2 +-
 tools/test_robustness.py                      |  17 +-
 134 files changed, 4720 insertions(+), 3434 deletions(-)
 create mode 100644 demo/data_pipeline.png
 create mode 100644 docs/DATA_PIPELINE.md
 create mode 100644 mmdet/datasets/pipelines/__init__.py
 create mode 100644 mmdet/datasets/pipelines/compose.py
 create mode 100644 mmdet/datasets/pipelines/formating.py
 create mode 100644 mmdet/datasets/pipelines/loading.py
 create mode 100644 mmdet/datasets/pipelines/test_aug.py
 create mode 100644 mmdet/datasets/pipelines/transforms.py
 delete mode 100644 mmdet/datasets/utils.py

diff --git a/configs/cascade_mask_rcnn_r101_fpn_1x.py b/configs/cascade_mask_rcnn_r101_fpn_1x.py
index 1b63d2b..0ad9c88 100644
--- a/configs/cascade_mask_rcnn_r101_fpn_1x.py
+++ b/configs/cascade_mask_rcnn_r101_fpn_1x.py
@@ -44,13 +44,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -62,13 +57,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -80,13 +70,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ],
     mask_roi_extractor=dict(
         type='SingleRoIExtractor',
@@ -196,6 +181,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -203,35 +213,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/cascade_mask_rcnn_r50_caffe_c4_1x.py b/configs/cascade_mask_rcnn_r50_caffe_c4_1x.py
index cdf5ff0..dd5f356 100644
--- a/configs/cascade_mask_rcnn_r50_caffe_c4_1x.py
+++ b/configs/cascade_mask_rcnn_r50_caffe_c4_1x.py
@@ -52,13 +52,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='BBoxHead',
             with_avg_pool=True,
@@ -69,13 +64,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='BBoxHead',
             with_avg_pool=True,
@@ -86,13 +76,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ],
     mask_roi_extractor=None,
     mask_head=dict(
@@ -198,42 +183,49 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
-    imgs_per_gpu=1,
+    imgs_per_gpu=2,
     workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/cascade_mask_rcnn_r50_fpn_1x.py b/configs/cascade_mask_rcnn_r50_fpn_1x.py
index 39450b8..c9f007e 100644
--- a/configs/cascade_mask_rcnn_r50_fpn_1x.py
+++ b/configs/cascade_mask_rcnn_r50_fpn_1x.py
@@ -44,13 +44,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -62,13 +57,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -80,13 +70,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ],
     mask_roi_extractor=dict(
         type='SingleRoIExtractor',
@@ -196,6 +181,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -203,35 +213,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/cascade_mask_rcnn_x101_32x4d_fpn_1x.py b/configs/cascade_mask_rcnn_x101_32x4d_fpn_1x.py
index e8a31d0..3167be4 100644
--- a/configs/cascade_mask_rcnn_x101_32x4d_fpn_1x.py
+++ b/configs/cascade_mask_rcnn_x101_32x4d_fpn_1x.py
@@ -46,13 +46,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -64,13 +59,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -82,13 +72,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ],
     mask_roi_extractor=dict(
         type='SingleRoIExtractor',
@@ -198,6 +183,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -205,35 +215,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/cascade_mask_rcnn_x101_64x4d_fpn_1x.py b/configs/cascade_mask_rcnn_x101_64x4d_fpn_1x.py
index 1661a10..0c5434e 100644
--- a/configs/cascade_mask_rcnn_x101_64x4d_fpn_1x.py
+++ b/configs/cascade_mask_rcnn_x101_64x4d_fpn_1x.py
@@ -46,13 +46,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -64,13 +59,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -82,13 +72,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ],
     mask_roi_extractor=dict(
         type='SingleRoIExtractor',
@@ -198,6 +183,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -205,35 +215,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/cascade_rcnn_r101_fpn_1x.py b/configs/cascade_rcnn_r101_fpn_1x.py
index a091314..a790c2b 100644
--- a/configs/cascade_rcnn_r101_fpn_1x.py
+++ b/configs/cascade_rcnn_r101_fpn_1x.py
@@ -44,13 +44,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -62,13 +57,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -80,13 +70,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ])
 # model training and testing settings
 train_cfg = dict(
@@ -177,6 +162,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -184,35 +194,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/cascade_rcnn_r50_caffe_c4_1x.py b/configs/cascade_rcnn_r50_caffe_c4_1x.py
index 978724b..0dd10ab 100644
--- a/configs/cascade_rcnn_r50_caffe_c4_1x.py
+++ b/configs/cascade_rcnn_r50_caffe_c4_1x.py
@@ -52,13 +52,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='BBoxHead',
             with_avg_pool=True,
@@ -69,13 +64,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='BBoxHead',
             with_avg_pool=True,
@@ -86,13 +76,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
     ])
 # model training and testing settings
 train_cfg = dict(
@@ -186,42 +171,49 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
-    imgs_per_gpu=1,
+    imgs_per_gpu=2,
     workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/cascade_rcnn_r50_fpn_1x.py b/configs/cascade_rcnn_r50_fpn_1x.py
index 045f0eb..96269fa 100644
--- a/configs/cascade_rcnn_r50_fpn_1x.py
+++ b/configs/cascade_rcnn_r50_fpn_1x.py
@@ -44,13 +44,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -62,13 +57,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -80,13 +70,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ])
 # model training and testing settings
 train_cfg = dict(
@@ -177,6 +162,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -184,35 +194,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/cascade_rcnn_x101_32x4d_fpn_1x.py b/configs/cascade_rcnn_x101_32x4d_fpn_1x.py
index 7ce7aab..6de3d37 100644
--- a/configs/cascade_rcnn_x101_32x4d_fpn_1x.py
+++ b/configs/cascade_rcnn_x101_32x4d_fpn_1x.py
@@ -46,13 +46,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -64,13 +59,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -82,13 +72,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ])
 # model training and testing settings
 train_cfg = dict(
@@ -179,6 +164,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -186,35 +196,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/cascade_rcnn_x101_64x4d_fpn_1x.py b/configs/cascade_rcnn_x101_64x4d_fpn_1x.py
index 401dfac..d6e9d1f 100644
--- a/configs/cascade_rcnn_x101_64x4d_fpn_1x.py
+++ b/configs/cascade_rcnn_x101_64x4d_fpn_1x.py
@@ -46,13 +46,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -64,13 +59,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -82,13 +72,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ])
 # model training and testing settings
 train_cfg = dict(
@@ -179,6 +164,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -186,35 +196,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py b/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py
index 0ccacd2..2f2c035 100644
--- a/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py
+++ b/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py
@@ -102,49 +102,56 @@ dataset_type = 'CityscapesDataset'
 data_root = 'data/cityscapes/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(
+        type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(2048, 1024),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=1,
     workers_per_gpu=2,
     train=dict(
-        type='RepeatDataset',  # to avoid reloading datasets frequently
+        type='RepeatDataset',
         times=8,
         dataset=dict(
             type=dataset_type,
             ann_file=data_root +
             'annotations/instancesonly_filtered_gtFine_train.json',
             img_prefix=data_root + 'train/',
-            img_scale=[(2048, 800), (2048, 1024)],
-            img_norm_cfg=img_norm_cfg,
-            multiscale_mode='range',
-            size_divisor=32,
-            flip_ratio=0.5,
-            with_mask=False,
-            with_crowd=True,
-            with_label=True)),
+            pipeline=train_pipeline)),
     val=dict(
         type=dataset_type,
         ann_file=data_root +
         'annotations/instancesonly_filtered_gtFine_val.json',
         img_prefix=data_root + 'val/',
-        img_scale=(2048, 1024),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root +
         'annotations/instancesonly_filtered_gtFine_val.json',
         img_prefix=data_root + 'val/',
-        img_scale=(2048, 1024),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 # lr is set for a batch size of 8
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
diff --git a/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py b/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py
index 85f32f7..8408250 100644
--- a/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py
+++ b/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py
@@ -116,49 +116,56 @@ dataset_type = 'CityscapesDataset'
 data_root = 'data/cityscapes/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(
+        type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(2048, 1024),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=1,
     workers_per_gpu=2,
     train=dict(
-        type='RepeatDataset',  # to avoid reloading datasets frequently
+        type='RepeatDataset',
         times=8,
         dataset=dict(
             type=dataset_type,
             ann_file=data_root +
             'annotations/instancesonly_filtered_gtFine_train.json',
             img_prefix=data_root + 'train/',
-            img_scale=[(2048, 800), (2048, 1024)],
-            img_norm_cfg=img_norm_cfg,
-            multiscale_mode='range',
-            size_divisor=32,
-            flip_ratio=0.5,
-            with_mask=True,
-            with_crowd=True,
-            with_label=True)),
+            pipeline=train_pipeline)),
     val=dict(
         type=dataset_type,
         ann_file=data_root +
         'annotations/instancesonly_filtered_gtFine_val.json',
         img_prefix=data_root + 'val/',
-        img_scale=(2048, 1024),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root +
         'annotations/instancesonly_filtered_gtFine_val.json',
         img_prefix=data_root + 'val/',
-        img_scale=(2048, 1024),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 # lr is set for a batch size of 8
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
diff --git a/configs/dcn/cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py b/configs/dcn/cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py
index 2865922..27476d3 100644
--- a/configs/dcn/cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py
+++ b/configs/dcn/cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py
@@ -184,6 +184,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -191,35 +216,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/dcn/cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py b/configs/dcn/cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py
index d3ea852..9f9f10c 100644
--- a/configs/dcn/cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py
+++ b/configs/dcn/cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py
@@ -11,9 +11,7 @@ model = dict(
         frozen_stages=1,
         style='pytorch',
         dcn=dict(
-            modulated=False,
-            deformable_groups=1,
-            fallback_on_stride=False),
+            modulated=False, deformable_groups=1, fallback_on_stride=False),
         stage_with_dcn=(False, True, True, True)),
     neck=dict(
         type='FPN',
@@ -49,13 +47,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -67,13 +60,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -85,13 +73,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ])
 # model training and testing settings
 train_cfg = dict(
@@ -182,6 +165,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -189,35 +197,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/dcn/faster_rcnn_dconv_c3-c5_r50_fpn_1x.py b/configs/dcn/faster_rcnn_dconv_c3-c5_r50_fpn_1x.py
index 0f02899..11c7dd3 100644
--- a/configs/dcn/faster_rcnn_dconv_c3-c5_r50_fpn_1x.py
+++ b/configs/dcn/faster_rcnn_dconv_c3-c5_r50_fpn_1x.py
@@ -105,6 +105,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -112,35 +137,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/dcn/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py b/configs/dcn/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py
index 31aa198..9156b0d 100644
--- a/configs/dcn/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py
+++ b/configs/dcn/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py
@@ -110,6 +110,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -117,35 +142,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/dcn/faster_rcnn_dpool_r50_fpn_1x.py b/configs/dcn/faster_rcnn_dpool_r50_fpn_1x.py
index ec38722..bddcce4 100644
--- a/configs/dcn/faster_rcnn_dpool_r50_fpn_1x.py
+++ b/configs/dcn/faster_rcnn_dpool_r50_fpn_1x.py
@@ -108,6 +108,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -115,35 +140,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/dcn/faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py b/configs/dcn/faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py
index 1aca07e..484b4af 100644
--- a/configs/dcn/faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py
+++ b/configs/dcn/faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py
@@ -105,6 +105,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -112,35 +137,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/dcn/faster_rcnn_mdpool_r50_fpn_1x.py b/configs/dcn/faster_rcnn_mdpool_r50_fpn_1x.py
index bb8d3c4..fba0b33 100644
--- a/configs/dcn/faster_rcnn_mdpool_r50_fpn_1x.py
+++ b/configs/dcn/faster_rcnn_mdpool_r50_fpn_1x.py
@@ -108,6 +108,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -115,35 +140,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py b/configs/dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py
index 6f910aa..c3de699 100644
--- a/configs/dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py
+++ b/configs/dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py
@@ -10,9 +10,7 @@ model = dict(
         frozen_stages=1,
         style='pytorch',
         dcn=dict(
-            modulated=False,
-            deformable_groups=1,
-            fallback_on_stride=False),
+            modulated=False, deformable_groups=1, fallback_on_stride=False),
         stage_with_dcn=(False, True, True, True)),
     neck=dict(
         type='FPN',
@@ -121,6 +119,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -128,35 +151,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/double_heads/dh_faster_rcnn_r50_fpn_1x.py b/configs/double_heads/dh_faster_rcnn_r50_fpn_1x.py
index cef4657..708f672 100644
--- a/configs/double_heads/dh_faster_rcnn_r50_fpn_1x.py
+++ b/configs/double_heads/dh_faster_rcnn_r50_fpn_1x.py
@@ -105,6 +105,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -112,35 +137,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x.py b/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x.py
index 4adba5d..db20d8d 100644
--- a/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x.py
+++ b/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x.py
@@ -106,6 +106,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -113,35 +138,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x.py b/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x.py
index f8e1f83..d264b9b 100644
--- a/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x.py
+++ b/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x.py
@@ -109,6 +109,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -116,35 +141,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x.py b/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x.py
index 4a43cce..f3eace2 100644
--- a/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x.py
+++ b/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x.py
@@ -106,6 +106,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -113,35 +138,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x.py b/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x.py
index 0a3cf0e..f39edee 100644
--- a/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x.py
+++ b/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x.py
@@ -109,6 +109,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -116,35 +141,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/fast_mask_rcnn_r101_fpn_1x.py b/configs/fast_mask_rcnn_r101_fpn_1x.py
index 0a9fc45..576d488 100644
--- a/configs/fast_mask_rcnn_r101_fpn_1x.py
+++ b/configs/fast_mask_rcnn_r101_fpn_1x.py
@@ -74,45 +74,56 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=2000),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(
+        type='Collect',
+        keys=['img', 'proposals', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=None),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img', 'proposals']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
         proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl',
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'train2017/',
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
         proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
         proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/fast_mask_rcnn_r50_caffe_c4_1x.py b/configs/fast_mask_rcnn_r50_caffe_c4_1x.py
index 1aa97e0..cca8710 100644
--- a/configs/fast_mask_rcnn_r50_caffe_c4_1x.py
+++ b/configs/fast_mask_rcnn_r50_caffe_c4_1x.py
@@ -73,45 +73,56 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=2000),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(
+        type='Collect',
+        keys=['img', 'proposals', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=None),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img', 'proposals']),
+        ])
+]
 data = dict(
-    imgs_per_gpu=1,
+    imgs_per_gpu=2,
     workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
         proposal_file=data_root + 'proposals/rpn_r50_c4_1x_train2017.pkl',
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'train2017/',
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
         proposal_file=data_root + 'proposals/rpn_r50_c4_1x_val2017.pkl',
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
         proposal_file=data_root + 'proposals/rpn_r50_c4_1x_val2017.pkl',
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/fast_mask_rcnn_r50_fpn_1x.py b/configs/fast_mask_rcnn_r50_fpn_1x.py
index 81a46e6..c324356 100644
--- a/configs/fast_mask_rcnn_r50_fpn_1x.py
+++ b/configs/fast_mask_rcnn_r50_fpn_1x.py
@@ -74,45 +74,56 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=2000),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(
+        type='Collect',
+        keys=['img', 'proposals', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=None),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img', 'proposals']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
         proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl',
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'train2017/',
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
         proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
         proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/fast_rcnn_r101_fpn_1x.py b/configs/fast_rcnn_r101_fpn_1x.py
index 51cca7a..06d8812 100644
--- a/configs/fast_rcnn_r101_fpn_1x.py
+++ b/configs/fast_rcnn_r101_fpn_1x.py
@@ -30,11 +30,8 @@ model = dict(
         target_stds=[0.1, 0.1, 0.2, 0.2],
         reg_class_agnostic=False,
         loss_cls=dict(
-            type='CrossEntropyLoss',
-            use_sigmoid=False,
-            loss_weight=1.0),
-        loss_bbox=dict(
-            type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+        loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
 # model training and testing settings
 train_cfg = dict(
     rcnn=dict(
@@ -60,45 +57,54 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=2000),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=None),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img', 'proposals']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
         proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl',
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'train2017/',
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
         proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
         proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/fast_rcnn_r50_caffe_c4_1x.py b/configs/fast_rcnn_r50_caffe_c4_1x.py
index 5d78baf..d1c6f3a 100644
--- a/configs/fast_rcnn_r50_caffe_c4_1x.py
+++ b/configs/fast_rcnn_r50_caffe_c4_1x.py
@@ -38,11 +38,8 @@ model = dict(
         target_stds=[0.1, 0.1, 0.2, 0.2],
         reg_class_agnostic=False,
         loss_cls=dict(
-            type='CrossEntropyLoss',
-            use_sigmoid=False,
-            loss_weight=1.0),
-        loss_bbox=dict(
-            type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+        loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
 # model training and testing settings
 train_cfg = dict(
     rcnn=dict(
@@ -68,45 +65,54 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=2000),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=None),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img', 'proposals']),
+        ])
+]
 data = dict(
-    imgs_per_gpu=1,
+    imgs_per_gpu=2,
     workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
         proposal_file=data_root + 'proposals/rpn_r50_c4_1x_train2017.pkl',
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'train2017/',
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
         proposal_file=data_root + 'proposals/rpn_r50_c4_1x_val2017.pkl',
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
         proposal_file=data_root + 'proposals/rpn_r50_c4_1x_val2017.pkl',
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/fast_rcnn_r50_fpn_1x.py b/configs/fast_rcnn_r50_fpn_1x.py
index 8d58453..b22dede 100644
--- a/configs/fast_rcnn_r50_fpn_1x.py
+++ b/configs/fast_rcnn_r50_fpn_1x.py
@@ -30,11 +30,8 @@ model = dict(
         target_stds=[0.1, 0.1, 0.2, 0.2],
         reg_class_agnostic=False,
         loss_cls=dict(
-            type='CrossEntropyLoss',
-            use_sigmoid=False,
-            loss_weight=1.0),
-        loss_bbox=dict(
-            type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
+            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+        loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
 # model training and testing settings
 train_cfg = dict(
     rcnn=dict(
@@ -60,45 +57,54 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=2000),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=None),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img', 'proposals']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
         proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl',
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'train2017/',
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
         proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
         proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/faster_rcnn_ohem_r50_fpn_1x.py b/configs/faster_rcnn_ohem_r50_fpn_1x.py
index 9e65c10..e2dbc8d 100644
--- a/configs/faster_rcnn_ohem_r50_fpn_1x.py
+++ b/configs/faster_rcnn_ohem_r50_fpn_1x.py
@@ -102,6 +102,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -109,35 +134,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/faster_rcnn_r101_fpn_1x.py b/configs/faster_rcnn_r101_fpn_1x.py
index f77028e..e0ad706 100644
--- a/configs/faster_rcnn_r101_fpn_1x.py
+++ b/configs/faster_rcnn_r101_fpn_1x.py
@@ -102,6 +102,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -109,35 +134,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/faster_rcnn_r50_caffe_c4_1x.py b/configs/faster_rcnn_r50_caffe_c4_1x.py
index ebbcc4e..ddd8113 100644
--- a/configs/faster_rcnn_r50_caffe_c4_1x.py
+++ b/configs/faster_rcnn_r50_caffe_c4_1x.py
@@ -107,42 +107,49 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
-    imgs_per_gpu=1,
+    imgs_per_gpu=2,
     workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/faster_rcnn_r50_fpn_1x.py b/configs/faster_rcnn_r50_fpn_1x.py
index a6049cd..0c5e243 100644
--- a/configs/faster_rcnn_r50_fpn_1x.py
+++ b/configs/faster_rcnn_r50_fpn_1x.py
@@ -102,6 +102,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -109,35 +134,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/faster_rcnn_x101_32x4d_fpn_1x.py b/configs/faster_rcnn_x101_32x4d_fpn_1x.py
index 50b479d..8f3c6f7 100644
--- a/configs/faster_rcnn_x101_32x4d_fpn_1x.py
+++ b/configs/faster_rcnn_x101_32x4d_fpn_1x.py
@@ -104,6 +104,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -111,35 +136,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/faster_rcnn_x101_64x4d_fpn_1x.py b/configs/faster_rcnn_x101_64x4d_fpn_1x.py
index 4397d2d..90ad0ac 100644
--- a/configs/faster_rcnn_x101_64x4d_fpn_1x.py
+++ b/configs/faster_rcnn_x101_64x4d_fpn_1x.py
@@ -104,6 +104,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -111,35 +136,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/fcos/fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu.py b/configs/fcos/fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu.py
index ac21fad..0356a57 100644
--- a/configs/fcos/fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu.py
+++ b/configs/fcos/fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu.py
@@ -57,6 +57,35 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(
+        type='Resize',
+        img_scale=[(1333, 640), (1333, 800)],
+        multiscale_mode='value',
+        keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=4,
     workers_per_gpu=4,
@@ -64,37 +93,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=[(1333, 640), (1333, 800)],
-        multiscale_mode='value',
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(
     type='SGD',
diff --git a/configs/fcos/fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x.py b/configs/fcos/fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x.py
index d932bcf..c6f725a 100644
--- a/configs/fcos/fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x.py
+++ b/configs/fcos/fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x.py
@@ -58,6 +58,35 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(
+        type='Resize',
+        img_scale=[(1333, 640), (1333, 800)],
+        multiscale_mode='value',
+        keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -65,37 +94,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=[(1333, 640), (1333, 800)],
-        multiscale_mode='value',
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(
     type='SGD',
diff --git a/configs/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py b/configs/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py
index 6243c36..2c3248a 100644
--- a/configs/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py
+++ b/configs/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py
@@ -57,6 +57,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=4,
     workers_per_gpu=4,
@@ -64,36 +89,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(
     type='SGD',
diff --git a/configs/fp16/faster_rcnn_r50_fpn_fp16_1x.py b/configs/fp16/faster_rcnn_r50_fpn_fp16_1x.py
index 161a975..20ff20b 100644
--- a/configs/fp16/faster_rcnn_r50_fpn_fp16_1x.py
+++ b/configs/fp16/faster_rcnn_r50_fpn_fp16_1x.py
@@ -105,6 +105,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -112,35 +137,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/fp16/mask_rcnn_r50_fpn_fp16_1x.py b/configs/fp16/mask_rcnn_r50_fpn_fp16_1x.py
index 31b32f0..23cbcf5 100644
--- a/configs/fp16/mask_rcnn_r50_fpn_fp16_1x.py
+++ b/configs/fp16/mask_rcnn_r50_fpn_fp16_1x.py
@@ -119,6 +119,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -126,35 +151,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/fp16/retinanet_r50_fpn_fp16_1x.py b/configs/fp16/retinanet_r50_fpn_fp16_1x.py
index 87432f7..a0577e7 100644
--- a/configs/fp16/retinanet_r50_fpn_fp16_1x.py
+++ b/configs/fp16/retinanet_r50_fpn_fp16_1x.py
@@ -60,6 +60,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -67,36 +92,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/gcnet/mask_rcnn_r16_gcb_c3-c5_r50_fpn_1x.py b/configs/gcnet/mask_rcnn_r16_gcb_c3-c5_r50_fpn_1x.py
index 480e403..bc94907 100644
--- a/configs/gcnet/mask_rcnn_r16_gcb_c3-c5_r50_fpn_1x.py
+++ b/configs/gcnet/mask_rcnn_r16_gcb_c3-c5_r50_fpn_1x.py
@@ -9,9 +9,7 @@ model = dict(
         out_indices=(0, 1, 2, 3),
         frozen_stages=1,
         style='pytorch',
-        gcb=dict(
-            ratio=1./16.,
-        ),
+        gcb=dict(ratio=1. / 16., ),
         stage_with_gcb=(False, True, True, True)),
     neck=dict(
         type='FPN',
@@ -120,6 +118,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -127,35 +150,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/gcnet/mask_rcnn_r16_gcb_c3-c5_r50_fpn_syncbn_1x.py b/configs/gcnet/mask_rcnn_r16_gcb_c3-c5_r50_fpn_syncbn_1x.py
index 349a52a..ad06c9b 100644
--- a/configs/gcnet/mask_rcnn_r16_gcb_c3-c5_r50_fpn_syncbn_1x.py
+++ b/configs/gcnet/mask_rcnn_r16_gcb_c3-c5_r50_fpn_syncbn_1x.py
@@ -11,9 +11,7 @@ model = dict(
         out_indices=(0, 1, 2, 3),
         frozen_stages=1,
         style='pytorch',
-        gcb=dict(
-            ratio=1./16.,
-        ),
+        gcb=dict(ratio=1. / 16., ),
         stage_with_gcb=(False, True, True, True),
         norm_eval=False,
         norm_cfg=norm_cfg),
@@ -124,6 +122,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -131,35 +154,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/gcnet/mask_rcnn_r4_gcb_c3-c5_r50_fpn_1x.py b/configs/gcnet/mask_rcnn_r4_gcb_c3-c5_r50_fpn_1x.py
index 08b2605..5568b2f 100644
--- a/configs/gcnet/mask_rcnn_r4_gcb_c3-c5_r50_fpn_1x.py
+++ b/configs/gcnet/mask_rcnn_r4_gcb_c3-c5_r50_fpn_1x.py
@@ -9,9 +9,7 @@ model = dict(
         out_indices=(0, 1, 2, 3),
         frozen_stages=1,
         style='pytorch',
-        gcb=dict(
-            ratio=1./4.,
-        ),
+        gcb=dict(ratio=1. / 4., ),
         stage_with_gcb=(False, True, True, True)),
     neck=dict(
         type='FPN',
@@ -120,6 +118,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -127,35 +150,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/gcnet/mask_rcnn_r4_gcb_c3-c5_r50_fpn_syncbn_1x.py b/configs/gcnet/mask_rcnn_r4_gcb_c3-c5_r50_fpn_syncbn_1x.py
index b8b5564..6a32126 100644
--- a/configs/gcnet/mask_rcnn_r4_gcb_c3-c5_r50_fpn_syncbn_1x.py
+++ b/configs/gcnet/mask_rcnn_r4_gcb_c3-c5_r50_fpn_syncbn_1x.py
@@ -11,9 +11,7 @@ model = dict(
         out_indices=(0, 1, 2, 3),
         frozen_stages=1,
         style='pytorch',
-        gcb=dict(
-            ratio=1./4.,
-        ),
+        gcb=dict(ratio=1. / 4., ),
         stage_with_gcb=(False, True, True, True),
         norm_eval=False,
         norm_cfg=norm_cfg),
@@ -124,6 +122,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -131,35 +154,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/gcnet/mask_rcnn_r50_fpn_sbn_1x.py b/configs/gcnet/mask_rcnn_r50_fpn_sbn_1x.py
index b261934..819c78f 100644
--- a/configs/gcnet/mask_rcnn_r50_fpn_sbn_1x.py
+++ b/configs/gcnet/mask_rcnn_r50_fpn_sbn_1x.py
@@ -120,6 +120,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -127,35 +152,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/ghm/retinanet_ghm_r50_fpn_1x.py b/configs/ghm/retinanet_ghm_r50_fpn_1x.py
index eddf368..ea90fb7 100644
--- a/configs/ghm/retinanet_ghm_r50_fpn_1x.py
+++ b/configs/ghm/retinanet_ghm_r50_fpn_1x.py
@@ -35,11 +35,7 @@ model = dict(
             use_sigmoid=True,
             loss_weight=1.0),
         loss_bbox=dict(
-            type='GHMR',
-            mu=0.02,
-            bins=10,
-            momentum=0.7,
-            loss_weight=10.0)))
+            type='GHMR', mu=0.02, bins=10, momentum=0.7, loss_weight=10.0)))
 # training and testing settings
 train_cfg = dict(
     assigner=dict(
@@ -62,6 +58,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -69,36 +90,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws_1x.py b/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws_1x.py
index 0eef1e9..eef21db 100644
--- a/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws_1x.py
+++ b/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws_1x.py
@@ -109,6 +109,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -116,35 +141,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_20_23_24e.py b/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_20_23_24e.py
index da39db1..8856c84 100644
--- a/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_20_23_24e.py
+++ b/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_20_23_24e.py
@@ -128,6 +128,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -135,35 +160,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py b/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py
index c023ec3..ce0348f 100644
--- a/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py
+++ b/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py
@@ -128,6 +128,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -135,35 +160,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py b/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py
index 271ed6e..b250590 100644
--- a/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py
+++ b/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py
@@ -130,6 +130,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -137,35 +162,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/gn/mask_rcnn_r101_fpn_gn_2x.py b/configs/gn/mask_rcnn_r101_fpn_gn_2x.py
index ae09042..ae1aeb8 100644
--- a/configs/gn/mask_rcnn_r101_fpn_gn_2x.py
+++ b/configs/gn/mask_rcnn_r101_fpn_gn_2x.py
@@ -124,6 +124,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -131,35 +156,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/gn/mask_rcnn_r50_fpn_gn_2x.py b/configs/gn/mask_rcnn_r50_fpn_gn_2x.py
index bbeaa78..43ecf9e 100644
--- a/configs/gn/mask_rcnn_r50_fpn_gn_2x.py
+++ b/configs/gn/mask_rcnn_r50_fpn_gn_2x.py
@@ -124,6 +124,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -131,35 +156,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/gn/mask_rcnn_r50_fpn_gn_contrib_2x.py b/configs/gn/mask_rcnn_r50_fpn_gn_contrib_2x.py
index 87db0f5..ee4ffaf 100644
--- a/configs/gn/mask_rcnn_r50_fpn_gn_contrib_2x.py
+++ b/configs/gn/mask_rcnn_r50_fpn_gn_contrib_2x.py
@@ -124,6 +124,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -131,35 +156,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/grid_rcnn/grid_rcnn_gn_head_r50_fpn_2x.py b/configs/grid_rcnn/grid_rcnn_gn_head_r50_fpn_2x.py
index cbe3451..5a2c071 100644
--- a/configs/grid_rcnn/grid_rcnn_gn_head_r50_fpn_2x.py
+++ b/configs/grid_rcnn/grid_rcnn_gn_head_r50_fpn_2x.py
@@ -113,6 +113,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -120,35 +145,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=None)
diff --git a/configs/grid_rcnn/grid_rcnn_gn_head_x101_32x4d_fpn_2x.py b/configs/grid_rcnn/grid_rcnn_gn_head_x101_32x4d_fpn_2x.py
index b08f809..989065e 100644
--- a/configs/grid_rcnn/grid_rcnn_gn_head_x101_32x4d_fpn_2x.py
+++ b/configs/grid_rcnn/grid_rcnn_gn_head_x101_32x4d_fpn_2x.py
@@ -115,6 +115,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -122,35 +147,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=None)
diff --git a/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x.py b/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x.py
index 269967d..98fabd2 100644
--- a/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x.py
+++ b/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x.py
@@ -59,48 +59,54 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=300),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=None),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img', 'proposals']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        num_max_proposals=300,
         proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl',
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'train2017/',
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        num_max_proposals=300,
         proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        num_max_proposals=300,
         proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x.py b/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x.py
index f78e1c1..791368c 100644
--- a/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x.py
+++ b/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x.py
@@ -127,6 +127,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -134,35 +159,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x.py b/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x.py
index 61e7b99..b0f2cb3 100644
--- a/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x.py
+++ b/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x.py
@@ -127,6 +127,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -134,35 +159,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x.py b/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x.py
index ae6a18a..175b964 100644
--- a/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x.py
+++ b/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x.py
@@ -84,6 +84,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -91,36 +116,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x.py b/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x.py
index 32f2bd6..7bc3484 100644
--- a/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x.py
+++ b/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x.py
@@ -84,6 +84,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -91,36 +116,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/guided_anchoring/ga_rpn_r101_caffe_rpn_1x.py b/configs/guided_anchoring/ga_rpn_r101_caffe_rpn_1x.py
index c3d3b65..8d81f32 100644
--- a/configs/guided_anchoring/ga_rpn_r101_caffe_rpn_1x.py
+++ b/configs/guided_anchoring/ga_rpn_r101_caffe_rpn_1x.py
@@ -85,6 +85,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_label=False),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -92,35 +117,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 # runner configs
diff --git a/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x.py b/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x.py
index a4b6b6d..9d6b7ce 100644
--- a/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x.py
+++ b/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x.py
@@ -85,6 +85,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_label=False),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -92,35 +117,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 # runner configs
diff --git a/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x.py b/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x.py
index 9eb1a69..1c5b7a0 100644
--- a/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x.py
+++ b/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x.py
@@ -85,6 +85,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_label=False),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -92,35 +117,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 # runner configs
diff --git a/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e.py b/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e.py
index 06e0c16..17fe945 100644
--- a/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e.py
+++ b/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e.py
@@ -30,10 +30,7 @@ model = dict(
                 block='BASIC',
                 num_blocks=(4, 4, 4, 4),
                 num_channels=(32, 64, 128, 256)))),
-    neck=dict(
-        type='HRFPN',
-        in_channels=[32, 64, 128, 256],
-        out_channels=256),
+    neck=dict(type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256),
     rpn_head=dict(
         type='RPNHead',
         in_channels=256,
@@ -63,13 +60,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -81,13 +73,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -99,13 +86,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ],
     mask_roi_extractor=dict(
         type='SingleRoIExtractor',
@@ -215,6 +197,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -222,35 +229,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e.py b/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e.py
index 512c652..65eedd1 100644
--- a/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e.py
+++ b/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e.py
@@ -30,10 +30,7 @@ model = dict(
                 block='BASIC',
                 num_blocks=(4, 4, 4, 4),
                 num_channels=(32, 64, 128, 256)))),
-    neck=dict(
-        type='HRFPN',
-        in_channels=[32, 64, 128, 256],
-        out_channels=256),
+    neck=dict(type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256),
     rpn_head=dict(
         type='RPNHead',
         in_channels=256,
@@ -48,10 +45,7 @@ model = dict(
         loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
     bbox_roi_extractor=dict(
         type='SingleRoIExtractor',
-        roi_layer=dict(
-            type='RoIAlign',
-            out_size=7,
-            sample_num=2),
+        roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
         out_channels=256,
         featmap_strides=[4, 8, 16, 32]),
     bbox_head=[
@@ -66,13 +60,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -84,13 +73,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -102,13 +86,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
     ])
 # model training and testing settings
 train_cfg = dict(
@@ -192,17 +171,38 @@ test_cfg = dict(
         nms_thr=0.7,
         min_bbox_size=0),
     rcnn=dict(
-        score_thr=0.05,
-        nms=dict(type='nms', iou_thr=0.5),
-        max_per_img=100),
+        score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100),
     keep_all_stages=False)
 # dataset settings
 dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53],
-    std=[58.395, 57.12, 57.375],
-    to_rgb=True)
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -210,35 +210,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x.py b/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x.py
index ceada23..f3a298f 100644
--- a/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x.py
+++ b/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x.py
@@ -119,44 +119,51 @@ test_cfg = dict(
 # dataset settings
 dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
-img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
-                    std=[58.395, 57.12, 57.375], to_rgb=True)
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
-    workers_per_gpu=4,
+    workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x.py b/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x.py
index 41dfade..72ad914 100644
--- a/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x.py
+++ b/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x.py
@@ -119,8 +119,33 @@ test_cfg = dict(
 # dataset settings
 dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
-img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
-                    std=[58.395, 57.12, 57.375], to_rgb=True)
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -128,35 +153,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x.py b/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x.py
index 72d6e57..6214790 100644
--- a/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x.py
+++ b/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x.py
@@ -119,44 +119,51 @@ test_cfg = dict(
 # dataset settings
 dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
-img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
-                    std=[58.395, 57.12, 57.375], to_rgb=True)
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
-    workers_per_gpu=4,
+    workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/hrnet/fcos_hrnetv2p_w32_gn_1x_4gpu.py b/configs/hrnet/fcos_hrnetv2p_w32_gn_1x_4gpu.py
index d7a6817..f91df1f 100644
--- a/configs/hrnet/fcos_hrnetv2p_w32_gn_1x_4gpu.py
+++ b/configs/hrnet/fcos_hrnetv2p_w32_gn_1x_4gpu.py
@@ -9,8 +9,8 @@ model = dict(
                 num_modules=1,
                 num_branches=1,
                 block='BOTTLENECK',
-                num_blocks=(4,),
-                num_channels=(64,)),
+                num_blocks=(4, ),
+                num_channels=(64, )),
             stage2=dict(
                 num_modules=1,
                 num_branches=2,
@@ -73,6 +73,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=4,
     workers_per_gpu=4,
@@ -80,36 +105,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(
     type='SGD',
diff --git a/configs/hrnet/htc_hrnetv2p_w32_20e.py b/configs/hrnet/htc_hrnetv2p_w32_20e.py
index 3de5b4f..b1f9ff5 100644
--- a/configs/hrnet/htc_hrnetv2p_w32_20e.py
+++ b/configs/hrnet/htc_hrnetv2p_w32_20e.py
@@ -32,10 +32,7 @@ model = dict(
                 block='BASIC',
                 num_blocks=(4, 4, 4, 4),
                 num_channels=(32, 64, 128, 256)))),
-    neck=dict(
-        type='HRFPN',
-        in_channels=[32, 64, 128, 256],
-        out_channels=256),
+    neck=dict(type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256),
     rpn_head=dict(
         type='RPNHead',
         in_channels=256,
@@ -65,13 +62,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -83,13 +75,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -101,13 +88,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ],
     mask_roi_extractor=dict(
         type='SingleRoIExtractor',
@@ -232,6 +214,35 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='SegResizeFlipPadRescale', scale_factor=1 / 8),
+    dict(type='DefaultFormatBundle'),
+    dict(
+        type='Collect',
+        keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip', flip_ratio=0.5),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -239,38 +250,18 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
         seg_prefix=data_root + 'stuffthingmaps/train2017/',
-        seg_scale_factor=1 / 8,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True,
-        with_semantic_seg=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x.py b/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x.py
index e8dcfe4..153ce55 100644
--- a/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x.py
+++ b/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x.py
@@ -9,8 +9,8 @@ model = dict(
                 num_modules=1,
                 num_branches=1,
                 block='BOTTLENECK',
-                num_blocks=(4,),
-                num_channels=(64,)),
+                num_blocks=(4, ),
+                num_channels=(64, )),
             stage2=dict(
                 num_modules=1,
                 num_branches=2,
@@ -29,10 +29,7 @@ model = dict(
                 block='BASIC',
                 num_blocks=(4, 4, 4, 4),
                 num_channels=(18, 36, 72, 144)))),
-    neck=dict(
-        type='HRFPN',
-        in_channels=[18, 36, 72, 144],
-        out_channels=256),
+    neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256),
     rpn_head=dict(
         type='RPNHead',
         in_channels=256,
@@ -133,8 +130,33 @@ test_cfg = dict(
 # dataset settings
 dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
-img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
-                    std=[58.395, 57.12, 57.375], to_rgb=True)
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -142,35 +164,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 # if you use 8 GPUs for training, please change lr to 0.02
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
diff --git a/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x.py b/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x.py
index 3abf2b2..a334ca3 100644
--- a/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x.py
+++ b/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x.py
@@ -9,8 +9,8 @@ model = dict(
                 num_modules=1,
                 num_branches=1,
                 block='BOTTLENECK',
-                num_blocks=(4,),
-                num_channels=(64,)),
+                num_blocks=(4, ),
+                num_channels=(64, )),
             stage2=dict(
                 num_modules=1,
                 num_branches=2,
@@ -29,10 +29,7 @@ model = dict(
                 block='BASIC',
                 num_blocks=(4, 4, 4, 4),
                 num_channels=(32, 64, 128, 256)))),
-    neck=dict(
-        type='HRFPN',
-        in_channels=[32, 64, 128, 256],
-        out_channels=256),
+    neck=dict(type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256),
     rpn_head=dict(
         type='RPNHead',
         in_channels=256,
@@ -132,8 +129,33 @@ test_cfg = dict(
 # dataset settings
 dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
-img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
-                    std=[58.395, 57.12, 57.375], to_rgb=True)
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -141,35 +163,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/htc/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py b/configs/htc/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py
index 4c95819..f06904c 100644
--- a/configs/htc/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py
+++ b/configs/htc/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py
@@ -54,13 +54,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -72,13 +67,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -90,13 +80,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ],
     mask_roi_extractor=dict(
         type='SingleRoIExtractor',
@@ -221,6 +206,39 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
+    dict(
+        type='Resize',
+        img_scale=[(1600, 400), (1600, 1400)],
+        multiscale_mode='range',
+        keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='SegResizeFlipPadRescale', scale_factor=1 / 8),
+    dict(type='DefaultFormatBundle'),
+    dict(
+        type='Collect',
+        keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip', flip_ratio=0.5),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=1,
     workers_per_gpu=1,
@@ -228,39 +246,18 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=[(1600, 400), (1600, 1400)],
-        multiscale_mode='range',
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
         seg_prefix=data_root + 'stuffthingmaps/train2017/',
-        seg_scale_factor=1 / 8,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True,
-        with_semantic_seg=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/htc/htc_r101_fpn_20e.py b/configs/htc/htc_r101_fpn_20e.py
index de6711d..36584a3 100644
--- a/configs/htc/htc_r101_fpn_20e.py
+++ b/configs/htc/htc_r101_fpn_20e.py
@@ -46,13 +46,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -64,13 +59,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -82,13 +72,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ],
     mask_roi_extractor=dict(
         type='SingleRoIExtractor',
@@ -213,6 +198,35 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='SegResizeFlipPadRescale', scale_factor=1 / 8),
+    dict(type='DefaultFormatBundle'),
+    dict(
+        type='Collect',
+        keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip', flip_ratio=0.5),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -220,38 +234,18 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
         seg_prefix=data_root + 'stuffthingmaps/train2017/',
-        seg_scale_factor=1 / 8,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True,
-        with_semantic_seg=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/htc/htc_r50_fpn_1x.py b/configs/htc/htc_r50_fpn_1x.py
index 02da445..d77d60c 100644
--- a/configs/htc/htc_r50_fpn_1x.py
+++ b/configs/htc/htc_r50_fpn_1x.py
@@ -46,13 +46,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -64,13 +59,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -82,13 +72,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ],
     mask_roi_extractor=dict(
         type='SingleRoIExtractor',
@@ -213,6 +198,35 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='SegResizeFlipPadRescale', scale_factor=1 / 8),
+    dict(type='DefaultFormatBundle'),
+    dict(
+        type='Collect',
+        keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip', flip_ratio=0.5),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -220,38 +234,18 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
         seg_prefix=data_root + 'stuffthingmaps/train2017/',
-        seg_scale_factor=1 / 8,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True,
-        with_semantic_seg=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/htc/htc_r50_fpn_20e.py b/configs/htc/htc_r50_fpn_20e.py
index 4410a55..9bc49af 100644
--- a/configs/htc/htc_r50_fpn_20e.py
+++ b/configs/htc/htc_r50_fpn_20e.py
@@ -46,13 +46,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -64,13 +59,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -82,13 +72,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ],
     mask_roi_extractor=dict(
         type='SingleRoIExtractor',
@@ -213,6 +198,35 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='SegResizeFlipPadRescale', scale_factor=1 / 8),
+    dict(type='DefaultFormatBundle'),
+    dict(
+        type='Collect',
+        keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip', flip_ratio=0.5),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -220,38 +234,18 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
         seg_prefix=data_root + 'stuffthingmaps/train2017/',
-        seg_scale_factor=1 / 8,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True,
-        with_semantic_seg=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/htc/htc_without_semantic_r50_fpn_1x.py b/configs/htc/htc_without_semantic_r50_fpn_1x.py
index 8adbe36..2a4b777 100644
--- a/configs/htc/htc_without_semantic_r50_fpn_1x.py
+++ b/configs/htc/htc_without_semantic_r50_fpn_1x.py
@@ -46,13 +46,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -64,13 +59,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -82,13 +72,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ],
     mask_roi_extractor=dict(
         type='SingleRoIExtractor',
@@ -198,6 +183,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip', flip_ratio=0.5),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -205,35 +215,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/htc/htc_x101_32x4d_fpn_20e_16gpu.py b/configs/htc/htc_x101_32x4d_fpn_20e_16gpu.py
index 2846c57..830a655 100644
--- a/configs/htc/htc_x101_32x4d_fpn_20e_16gpu.py
+++ b/configs/htc/htc_x101_32x4d_fpn_20e_16gpu.py
@@ -215,6 +215,35 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='SegResizeFlipPadRescale', scale_factor=1 / 8),
+    dict(type='DefaultFormatBundle'),
+    dict(
+        type='Collect',
+        keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip', flip_ratio=0.5),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=1,
     workers_per_gpu=1,
@@ -222,38 +251,18 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
         seg_prefix=data_root + 'stuffthingmaps/train2017/',
-        seg_scale_factor=1 / 8,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True,
-        with_semantic_seg=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py b/configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py
index 095a81b..6c5dada 100644
--- a/configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py
+++ b/configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py
@@ -48,13 +48,8 @@ model = dict(
             target_stds=[0.1, 0.1, 0.2, 0.2],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -66,13 +61,8 @@ model = dict(
             target_stds=[0.05, 0.05, 0.1, 0.1],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0)),
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
         dict(
             type='SharedFCBBoxHead',
             num_fcs=2,
@@ -84,13 +74,8 @@ model = dict(
             target_stds=[0.033, 0.033, 0.067, 0.067],
             reg_class_agnostic=True,
             loss_cls=dict(
-                type='CrossEntropyLoss',
-                use_sigmoid=False,
-                loss_weight=1.0),
-            loss_bbox=dict(
-                type='SmoothL1Loss',
-                beta=1.0,
-                loss_weight=1.0))
+                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
     ],
     mask_roi_extractor=dict(
         type='SingleRoIExtractor',
@@ -215,6 +200,35 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='SegResizeFlipPadRescale', scale_factor=1 / 8),
+    dict(type='DefaultFormatBundle'),
+    dict(
+        type='Collect',
+        keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip', flip_ratio=0.5),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=1,
     workers_per_gpu=1,
@@ -222,38 +236,18 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
         seg_prefix=data_root + 'stuffthingmaps/train2017/',
-        seg_scale_factor=1 / 8,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True,
-        with_semantic_seg=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x.py b/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x.py
index 365c014..5855f23 100644
--- a/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x.py
+++ b/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x.py
@@ -75,46 +75,55 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=2000),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadProposals', num_max_proposals=None),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img', 'proposals']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
-    workers_per_gpu=0,
+    workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
         proposal_file=data_root +
         'libra_proposals/rpn_r50_fpn_1x_train2017.pkl',
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'train2017/',
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
         proposal_file=data_root + 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl',
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
         proposal_file=data_root + 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl',
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        img_prefix=data_root + 'val2017/',
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x.py b/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x.py
index 38af55d..fec7052 100644
--- a/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x.py
+++ b/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x.py
@@ -120,6 +120,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -127,35 +152,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x.py b/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x.py
index 2ab33dc..8e1284a 100644
--- a/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x.py
+++ b/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x.py
@@ -120,6 +120,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -127,35 +152,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x.py b/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x.py
index 26172a4..fbd50c0 100644
--- a/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x.py
+++ b/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x.py
@@ -122,6 +122,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -129,35 +154,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/libra_rcnn/libra_retinanet_r50_fpn_1x.py b/configs/libra_rcnn/libra_retinanet_r50_fpn_1x.py
index 1e0cab9..ab36054 100644
--- a/configs/libra_rcnn/libra_retinanet_r50_fpn_1x.py
+++ b/configs/libra_rcnn/libra_retinanet_r50_fpn_1x.py
@@ -74,6 +74,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -81,36 +106,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/mask_rcnn_r101_fpn_1x.py b/configs/mask_rcnn_r101_fpn_1x.py
index 2840be7..280808c 100644
--- a/configs/mask_rcnn_r101_fpn_1x.py
+++ b/configs/mask_rcnn_r101_fpn_1x.py
@@ -116,6 +116,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -123,35 +148,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/mask_rcnn_r50_caffe_c4_1x.py b/configs/mask_rcnn_r50_caffe_c4_1x.py
index f901c51..15fdafb 100644
--- a/configs/mask_rcnn_r50_caffe_c4_1x.py
+++ b/configs/mask_rcnn_r50_caffe_c4_1x.py
@@ -120,42 +120,49 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
-    imgs_per_gpu=1,
+    imgs_per_gpu=2,
     workers_per_gpu=2,
     train=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/mask_rcnn_r50_fpn_1x.py b/configs/mask_rcnn_r50_fpn_1x.py
index 59a84c8..04f6d22 100644
--- a/configs/mask_rcnn_r50_fpn_1x.py
+++ b/configs/mask_rcnn_r50_fpn_1x.py
@@ -116,6 +116,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -123,35 +148,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/mask_rcnn_x101_32x4d_fpn_1x.py b/configs/mask_rcnn_x101_32x4d_fpn_1x.py
index 051801b..74d6823 100644
--- a/configs/mask_rcnn_x101_32x4d_fpn_1x.py
+++ b/configs/mask_rcnn_x101_32x4d_fpn_1x.py
@@ -118,6 +118,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -125,35 +150,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/mask_rcnn_x101_64x4d_fpn_1x.py b/configs/mask_rcnn_x101_64x4d_fpn_1x.py
index 434bf69..18e4244 100644
--- a/configs/mask_rcnn_x101_64x4d_fpn_1x.py
+++ b/configs/mask_rcnn_x101_64x4d_fpn_1x.py
@@ -118,6 +118,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -125,35 +150,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x.py b/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x.py
index 682d308..0a523f6 100644
--- a/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x.py
+++ b/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x.py
@@ -127,6 +127,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -134,35 +159,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x.py b/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x.py
index 16ebde5..6f2a791 100644
--- a/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x.py
+++ b/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x.py
@@ -127,6 +127,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -134,35 +159,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x.py b/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x.py
index a6b7203..009b0ad 100644
--- a/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x.py
+++ b/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x.py
@@ -128,6 +128,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -135,35 +160,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py b/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py
index 07e72ff..b4b533a 100644
--- a/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py
+++ b/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py
@@ -102,11 +102,36 @@ dataset_type = 'VOCDataset'
 data_root = 'data/VOCdevkit/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1000, 600), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1000, 600),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
     train=dict(
-        type='RepeatDataset',  # to avoid reloading datasets frequently
+        type='RepeatDataset',
         times=3,
         dataset=dict(
             type=dataset_type,
@@ -115,35 +140,17 @@ data = dict(
                 data_root + 'VOC2012/ImageSets/Main/trainval.txt'
             ],
             img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'],
-            img_scale=(1000, 600),
-            img_norm_cfg=img_norm_cfg,
-            size_divisor=32,
-            flip_ratio=0.5,
-            with_mask=False,
-            with_crowd=True,
-            with_label=True)),
+            pipeline=train_pipeline)),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
         img_prefix=data_root + 'VOC2007/',
-        img_scale=(1000, 600),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
         img_prefix=data_root + 'VOC2007/',
-        img_scale=(1000, 600),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/pascal_voc/ssd300_voc.py b/configs/pascal_voc/ssd300_voc.py
index 551ecda..2a5756d 100644
--- a/configs/pascal_voc/ssd300_voc.py
+++ b/configs/pascal_voc/ssd300_voc.py
@@ -23,6 +23,7 @@ model = dict(
         anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),
         target_means=(.0, .0, .0, .0),
         target_stds=(0.1, 0.1, 0.2, 0.2)))
+# model training and testing settings
 cudnn_benchmark = True
 train_cfg = dict(
     assigner=dict(
@@ -42,14 +43,50 @@ test_cfg = dict(
     min_bbox_size=0,
     score_thr=0.02,
     max_per_img=200)
-# model training and testing settings
 # dataset settings
 dataset_type = 'VOCDataset'
 data_root = 'data/VOCdevkit/'
 img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile', to_float32=True),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(
+        type='PhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(
+        type='Expand',
+        mean=img_norm_cfg['mean'],
+        to_rgb=img_norm_cfg['to_rgb'],
+        ratio_range=(1, 4)),
+    dict(
+        type='MinIoURandomCrop',
+        min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
+        min_crop_size=0.3),
+    dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(300, 300),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=False),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
-    imgs_per_gpu=4,
-    workers_per_gpu=2,
+    imgs_per_gpu=8,
+    workers_per_gpu=3,
     train=dict(
         type='RepeatDataset',
         times=10,
@@ -60,51 +97,17 @@ data = dict(
                 data_root + 'VOC2012/ImageSets/Main/trainval.txt'
             ],
             img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'],
-            img_scale=(300, 300),
-            img_norm_cfg=img_norm_cfg,
-            size_divisor=None,
-            flip_ratio=0.5,
-            with_mask=False,
-            with_crowd=False,
-            with_label=True,
-            test_mode=False,
-            extra_aug=dict(
-                photo_metric_distortion=dict(
-                    brightness_delta=32,
-                    contrast_range=(0.5, 1.5),
-                    saturation_range=(0.5, 1.5),
-                    hue_delta=18),
-                expand=dict(
-                    mean=img_norm_cfg['mean'],
-                    to_rgb=img_norm_cfg['to_rgb'],
-                    ratio_range=(1, 4)),
-                random_crop=dict(
-                    min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)),
-            resize_keep_ratio=False)),
+            pipeline=train_pipeline)),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
         img_prefix=data_root + 'VOC2007/',
-        img_scale=(300, 300),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=None,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True,
-        resize_keep_ratio=False),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
         img_prefix=data_root + 'VOC2007/',
-        img_scale=(300, 300),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=None,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True,
-        resize_keep_ratio=False))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)
 optimizer_config = dict()
diff --git a/configs/pascal_voc/ssd512_voc.py b/configs/pascal_voc/ssd512_voc.py
index f01404a..4fa7df6 100644
--- a/configs/pascal_voc/ssd512_voc.py
+++ b/configs/pascal_voc/ssd512_voc.py
@@ -23,6 +23,7 @@ model = dict(
         anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]),
         target_means=(.0, .0, .0, .0),
         target_stds=(0.1, 0.1, 0.2, 0.2)))
+# model training and testing settings
 cudnn_benchmark = True
 train_cfg = dict(
     assigner=dict(
@@ -42,14 +43,50 @@ test_cfg = dict(
     min_bbox_size=0,
     score_thr=0.02,
     max_per_img=200)
-# model training and testing settings
 # dataset settings
 dataset_type = 'VOCDataset'
 data_root = 'data/VOCdevkit/'
 img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile', to_float32=True),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(
+        type='PhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(
+        type='Expand',
+        mean=img_norm_cfg['mean'],
+        to_rgb=img_norm_cfg['to_rgb'],
+        ratio_range=(1, 4)),
+    dict(
+        type='MinIoURandomCrop',
+        min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
+        min_crop_size=0.3),
+    dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(512, 512),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=False),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
-    imgs_per_gpu=4,
-    workers_per_gpu=2,
+    imgs_per_gpu=8,
+    workers_per_gpu=3,
     train=dict(
         type='RepeatDataset',
         times=10,
@@ -60,51 +97,17 @@ data = dict(
                 data_root + 'VOC2012/ImageSets/Main/trainval.txt'
             ],
             img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'],
-            img_scale=(512, 512),
-            img_norm_cfg=img_norm_cfg,
-            size_divisor=None,
-            flip_ratio=0.5,
-            with_mask=False,
-            with_crowd=False,
-            with_label=True,
-            test_mode=False,
-            extra_aug=dict(
-                photo_metric_distortion=dict(
-                    brightness_delta=32,
-                    contrast_range=(0.5, 1.5),
-                    saturation_range=(0.5, 1.5),
-                    hue_delta=18),
-                expand=dict(
-                    mean=img_norm_cfg['mean'],
-                    to_rgb=img_norm_cfg['to_rgb'],
-                    ratio_range=(1, 4)),
-                random_crop=dict(
-                    min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)),
-            resize_keep_ratio=False)),
+            pipeline=train_pipeline)),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
         img_prefix=data_root + 'VOC2007/',
-        img_scale=(512, 512),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=None,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True,
-        resize_keep_ratio=False),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
         img_prefix=data_root + 'VOC2007/',
-        img_scale=(512, 512),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=None,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True,
-        resize_keep_ratio=False))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)
 optimizer_config = dict()
diff --git a/configs/retinanet_r101_fpn_1x.py b/configs/retinanet_r101_fpn_1x.py
index fb68f93..837207c 100644
--- a/configs/retinanet_r101_fpn_1x.py
+++ b/configs/retinanet_r101_fpn_1x.py
@@ -57,6 +57,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -64,36 +89,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/retinanet_r50_fpn_1x.py b/configs/retinanet_r50_fpn_1x.py
index 2e82468..8255d5a 100644
--- a/configs/retinanet_r50_fpn_1x.py
+++ b/configs/retinanet_r50_fpn_1x.py
@@ -57,6 +57,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -64,36 +89,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/retinanet_x101_32x4d_fpn_1x.py b/configs/retinanet_x101_32x4d_fpn_1x.py
index 1b0aaaa..f31555d 100644
--- a/configs/retinanet_x101_32x4d_fpn_1x.py
+++ b/configs/retinanet_x101_32x4d_fpn_1x.py
@@ -59,6 +59,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -66,36 +91,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/retinanet_x101_64x4d_fpn_1x.py b/configs/retinanet_x101_64x4d_fpn_1x.py
index f5631f7..47c87d6 100644
--- a/configs/retinanet_x101_64x4d_fpn_1x.py
+++ b/configs/retinanet_x101_64x4d_fpn_1x.py
@@ -59,6 +59,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -66,36 +91,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/configs/rpn_r101_fpn_1x.py b/configs/rpn_r101_fpn_1x.py
index f1eecd2..bcda8c1 100644
--- a/configs/rpn_r101_fpn_1x.py
+++ b/configs/rpn_r101_fpn_1x.py
@@ -57,6 +57,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_label=False),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -64,35 +89,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 # runner configs
diff --git a/configs/rpn_r50_caffe_c4_1x.py b/configs/rpn_r50_caffe_c4_1x.py
index caf0108..3d7d15b 100644
--- a/configs/rpn_r50_caffe_c4_1x.py
+++ b/configs/rpn_r50_caffe_c4_1x.py
@@ -57,6 +57,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_label=False),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -64,35 +89,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 # runner configs
diff --git a/configs/rpn_r50_fpn_1x.py b/configs/rpn_r50_fpn_1x.py
index 96e71e0..b892825 100644
--- a/configs/rpn_r50_fpn_1x.py
+++ b/configs/rpn_r50_fpn_1x.py
@@ -57,6 +57,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_label=False),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -64,35 +89,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 # runner configs
diff --git a/configs/rpn_x101_32x4d_fpn_1x.py b/configs/rpn_x101_32x4d_fpn_1x.py
index 2a3faef..709b5de 100644
--- a/configs/rpn_x101_32x4d_fpn_1x.py
+++ b/configs/rpn_x101_32x4d_fpn_1x.py
@@ -59,6 +59,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_label=False),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -66,35 +91,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 # runner configs
diff --git a/configs/rpn_x101_64x4d_fpn_1x.py b/configs/rpn_x101_64x4d_fpn_1x.py
index 182a0b3..b2946d1 100644
--- a/configs/rpn_x101_64x4d_fpn_1x.py
+++ b/configs/rpn_x101_64x4d_fpn_1x.py
@@ -59,6 +59,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_label=False),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -66,35 +91,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=False,
-        with_label=False),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
 # runner configs
diff --git a/configs/scratch/scratch_faster_rcnn_r50_fpn_gn_6x.py b/configs/scratch/scratch_faster_rcnn_r50_fpn_gn_6x.py
index cbb0e23..5621d07 100644
--- a/configs/scratch/scratch_faster_rcnn_r50_fpn_gn_6x.py
+++ b/configs/scratch/scratch_faster_rcnn_r50_fpn_gn_6x.py
@@ -107,6 +107,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -114,35 +139,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(
     type='SGD',
diff --git a/configs/scratch/scratch_mask_rcnn_r50_fpn_gn_6x.py b/configs/scratch/scratch_mask_rcnn_r50_fpn_gn_6x.py
index 97a7ef2..321619d 100644
--- a/configs/scratch/scratch_mask_rcnn_r50_fpn_gn_6x.py
+++ b/configs/scratch/scratch_mask_rcnn_r50_fpn_gn_6x.py
@@ -124,6 +124,31 @@ dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=2,
     workers_per_gpu=2,
@@ -131,35 +156,17 @@ data = dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_train2017.json',
         img_prefix=data_root + 'train2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0.5,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=train_pipeline),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=True,
-        with_crowd=True,
-        with_label=True),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(1333, 800),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=32,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(
     type='SGD',
diff --git a/configs/ssd300_coco.py b/configs/ssd300_coco.py
index e48a6e6..f34d52e 100644
--- a/configs/ssd300_coco.py
+++ b/configs/ssd300_coco.py
@@ -47,6 +47,43 @@ test_cfg = dict(
 dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile', to_float32=True),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(
+        type='PhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(
+        type='Expand',
+        mean=img_norm_cfg['mean'],
+        to_rgb=img_norm_cfg['to_rgb'],
+        ratio_range=(1, 4)),
+    dict(
+        type='MinIoURandomCrop',
+        min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
+        min_crop_size=0.3),
+    dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(300, 300),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=False),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=8,
     workers_per_gpu=3,
@@ -57,51 +94,17 @@ data = dict(
             type=dataset_type,
             ann_file=data_root + 'annotations/instances_train2017.json',
             img_prefix=data_root + 'train2017/',
-            img_scale=(300, 300),
-            img_norm_cfg=img_norm_cfg,
-            size_divisor=None,
-            flip_ratio=0.5,
-            with_mask=False,
-            with_crowd=False,
-            with_label=True,
-            test_mode=False,
-            extra_aug=dict(
-                photo_metric_distortion=dict(
-                    brightness_delta=32,
-                    contrast_range=(0.5, 1.5),
-                    saturation_range=(0.5, 1.5),
-                    hue_delta=18),
-                expand=dict(
-                    mean=img_norm_cfg['mean'],
-                    to_rgb=img_norm_cfg['to_rgb'],
-                    ratio_range=(1, 4)),
-                random_crop=dict(
-                    min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)),
-            resize_keep_ratio=False)),
+            pipeline=train_pipeline)),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(300, 300),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=None,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True,
-        resize_keep_ratio=False),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(300, 300),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=None,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True,
-        resize_keep_ratio=False))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
 optimizer_config = dict()
diff --git a/configs/ssd512_coco.py b/configs/ssd512_coco.py
index 5824263..67fe030 100644
--- a/configs/ssd512_coco.py
+++ b/configs/ssd512_coco.py
@@ -47,6 +47,43 @@ test_cfg = dict(
 dataset_type = 'CocoDataset'
 data_root = 'data/coco/'
 img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile', to_float32=True),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(
+        type='PhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(
+        type='Expand',
+        mean=img_norm_cfg['mean'],
+        to_rgb=img_norm_cfg['to_rgb'],
+        ratio_range=(1, 4)),
+    dict(
+        type='MinIoURandomCrop',
+        min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
+        min_crop_size=0.3),
+    dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(300, 300),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=False),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=8,
     workers_per_gpu=3,
@@ -57,51 +94,17 @@ data = dict(
             type=dataset_type,
             ann_file=data_root + 'annotations/instances_train2017.json',
             img_prefix=data_root + 'train2017/',
-            img_scale=(512, 512),
-            img_norm_cfg=img_norm_cfg,
-            size_divisor=None,
-            flip_ratio=0.5,
-            with_mask=False,
-            with_crowd=False,
-            with_label=True,
-            test_mode=False,
-            extra_aug=dict(
-                photo_metric_distortion=dict(
-                    brightness_delta=32,
-                    contrast_range=(0.5, 1.5),
-                    saturation_range=(0.5, 1.5),
-                    hue_delta=18),
-                expand=dict(
-                    mean=img_norm_cfg['mean'],
-                    to_rgb=img_norm_cfg['to_rgb'],
-                    ratio_range=(1, 4)),
-                random_crop=dict(
-                    min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)),
-            resize_keep_ratio=False)),
+            pipeline=train_pipeline)),
     val=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(512, 512),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=None,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True,
-        resize_keep_ratio=False),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
-        img_scale=(512, 512),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=None,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True,
-        resize_keep_ratio=False))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
 optimizer_config = dict()
diff --git a/configs/wider_face/ssd300_wider_face.py b/configs/wider_face/ssd300_wider_face.py
index 53cafc1..6a4184e 100644
--- a/configs/wider_face/ssd300_wider_face.py
+++ b/configs/wider_face/ssd300_wider_face.py
@@ -23,6 +23,7 @@ model = dict(
         anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),
         target_means=(.0, .0, .0, .0),
         target_stds=(0.1, 0.1, 0.2, 0.2)))
+# model training and testing settings
 cudnn_benchmark = True
 train_cfg = dict(
     assigner=dict(
@@ -42,11 +43,47 @@ test_cfg = dict(
     min_bbox_size=0,
     score_thr=0.02,
     max_per_img=200)
-# model training and testing settings
 # dataset settings
 dataset_type = 'WIDERFaceDataset'
 data_root = 'data/WIDERFace/'
 img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile', to_float32=True),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(
+        type='PhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(
+        type='Expand',
+        mean=img_norm_cfg['mean'],
+        to_rgb=img_norm_cfg['to_rgb'],
+        ratio_range=(1, 4)),
+    dict(
+        type='MinIoURandomCrop',
+        min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
+        min_crop_size=0.3),
+    dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(300, 300),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=False),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
 data = dict(
     imgs_per_gpu=60,
     workers_per_gpu=2,
@@ -55,57 +92,20 @@ data = dict(
         times=2,
         dataset=dict(
             type=dataset_type,
-            ann_file=[
-                data_root + 'train.txt',
-            ],
-            img_prefix=[data_root + 'WIDER_train/'],
-            img_scale=(300, 300),
-            min_size=17,  # throw away very small faces to improve training,
-            # because 300x300 is too low resolution to detect them
-            img_norm_cfg=img_norm_cfg,
-            size_divisor=None,
-            flip_ratio=0.5,
-            with_mask=False,
-            with_crowd=False,
-            with_label=True,
-            test_mode=False,
-            extra_aug=dict(
-                photo_metric_distortion=dict(
-                    brightness_delta=32,
-                    contrast_range=(0.5, 1.5),
-                    saturation_range=(0.5, 1.5),
-                    hue_delta=18),
-                expand=dict(
-                    mean=img_norm_cfg['mean'],
-                    to_rgb=img_norm_cfg['to_rgb'],
-                    ratio_range=(1, 4)),
-                random_crop=dict(
-                    min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)),
-            resize_keep_ratio=False)),
+            ann_file=data_root + 'train.txt',
+            img_prefix=data_root + 'WIDER_train/',
+            min_size=17,
+            pipeline=train_pipeline)),
     val=dict(
         type=dataset_type,
-        ann_file=data_root + '/val.txt',
+        ann_file=data_root + 'val.txt',
         img_prefix=data_root + 'WIDER_val/',
-        img_scale=(300, 300),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=None,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True,
-        resize_keep_ratio=False),
+        pipeline=test_pipeline),
     test=dict(
         type=dataset_type,
-        ann_file=data_root + '/val.txt',
+        ann_file=data_root + 'val.txt',
         img_prefix=data_root + 'WIDER_val/',
-        img_scale=(300, 300),
-        img_norm_cfg=img_norm_cfg,
-        size_divisor=None,
-        flip_ratio=0,
-        with_mask=False,
-        with_label=False,
-        test_mode=True,
-        resize_keep_ratio=False))
+        pipeline=test_pipeline))
 # optimizer
 optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)
 optimizer_config = dict()
@@ -122,7 +122,7 @@ log_config = dict(
     interval=1,
     hooks=[
         dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHook')
+        # dict(type='TensorboardLoggerHook')
     ])
 # yapf:enable
 # runtime settings
diff --git a/demo/data_pipeline.png b/demo/data_pipeline.png
new file mode 100644
index 0000000000000000000000000000000000000000..6ac3fee2bc62753681b2e42a9705dd7eefbee443
GIT binary patch
literal 84111
zcmd42V|Zpw(=Hs_wrwYqiEZ1qF|lpi_QaeR6WcZ?wkFxv-1l=o@AK~c?PI?`zw5}A
zV|B0Us$R9a&gwcliBOOehljy|0RjSomy{4u0s?{*00IJ$g8~Ht0=6Sq$Oqg2c2*J>
z0{T9McM52LnG4Da0s+;>!M+=U|B+FUQ4t07_w;=B^lVr5oYnSh1DcH!N6lkrEh7i5
z!^hX&41gY>T7~+bXN^CPnubmQt>&QvAUl@k(W91;Kh4IGqf0MVpnAE+;p3*^KZcrr
zo&bc;eOZ9&6#-4ar|B=r=HX+2staG{=Hb5v4j(oBJo?k)&D=C{3=nG`J_0BKs*}I)
zVQ&0e1|R?!{__~9UIC!w!iVXPx;n+i;Uj=d(?7z0%QTOioc|g4N5h|<;bTA(APl(p
z)A^SkfOUXHfGR*I;0K@ts7?WJ@wfbc_WY~!k5QJtWdJ>YcLJJl@QB#BMC8<rj7o7V
zoC3Te@={9rYWntu*51}`k>3K-{9}qE)9aH8JF}}t%G#Fx)(nercLA_u=AtC03<L%R
zhlGlTg-1k8LP|zO$H>IW#>K<K!zUmjC8MCIq^6-^U|?))Vs7E!?CkQ*+dm*6C?qT@
zDlRTAH7hGCudJ-Rs<xrAsja8CZ(w+Qe0*|xetCI)ePet7@aXv0<<;%o{nHyYWjHgy
zBD8~qrZW%_Ci0&z@EFA%KM>IGc1aOI6?eV!-*J8$T$#slhDW8}SW}<Wvh2^+P!G@3
z5>tz!b21(E$(K)+nw&1aRMy*D*R(g6eyd*9vPMlkPua9;-Aw&><LMCqA(9BBLrLEB
zT(6`_1mbgt;sKKg%vU<oXMhc;5mL<G9{pDGA+`kdNmYfcnUXLR8E)~Gz+@T#BRm4}
z&`hU2!lB8MK;IKD3cLEqZ#pZ=F|GttOHVCd%#9RA#jJv1E7gK-PaPG4X8Ty%T)<Qk
zz!gjxZgVZpu&UNsLWiN$wA!;2+Mf)uhm=dy&_~#AqJ4uws+M@2c?bJJ(J;yJ(aeEX
z%7zqn4VD1^F+#o`*@{)4xSW<o*sH;IzjsUWGP-3`v`mpkxTWvQEx)DH{o(%dfv4<G
zmN*_KhP5+)_>1%Uf$s$@uT6N`vQ{a4`yJUYkO;5>14t<`<Bz<aL*X2ewt{hq%lLiX
zpKoqAhYk@n1Aii)he?m12xsx6LN(niH@)y$2-)!|s%Rl@L#qXYJx$`i|9tPgI=Gby
zD=0rUZ^I7q<LY^b@;aRS5_*W_H@uhrIGpF858MRopBKLaxk6vdyc>KwsqL^nr0Ep8
z<m=BL$njeb1E`B2Usf=*`-OnT8G0yo#`8lHDufz-keEl%B#7T>v}KPzSEFrx&MJ9(
z;(2*ZA$?8&uOC%}<C!$ghIT74&UMOFe#92z-P)E)#*zqO7n%h3Jwf^(hnF_XO;3jW
z<y!*BRfVtPL)v3T#G+ZSYFQ5P4FaE{LTKcOBMlL;4_6H~MB1H+4HZ;h8y@UU6-le?
z+jc|qSU-ukJtAAZQ#Y_Yjj0-mxAWjRL)EZeR}(T=6YXU~3Rqtrg|#=)K);0e9l~K2
ziTHZD-yFPjzq1)Qe_vQc|ILIXTzkp?aLk3-w|Dm86Ni%kw_rin<5OJ;A0l#47WP)T
zqEMpOl}t$8aQJdZ2_JRz-TMdGK!eRUQAcq4t-XRyl4T?F=GMU1A4Ldprn0#vu#bzO
zS`&**8Eszhk$1Gu9j|h{KFp#0=7V_9a9_Q8d+t-oH0gGoi;qZ~+hwcRNLQfr2MU%A
zEe@67QKHYyODJx~O(2&@pVZFc{SJX?iFGF`#XUdUJqvPsfUK{!5>*9MBPdRgEEY^y
z^G^KL(XsX#1A9?|0#SoV6iqGAZ92_+%zk;}x_vtkk42)~c^x^mn=1pZm8+^2n~*fK
zs>aaS23y-3FNRjpyGPYz2}n^m?n57oGiirIj#E1ec&7`!JqK+By&cWQ#IDFkvIH{=
zTrX+4NgYDtJ4!F-mzGU8{2<!Yob}ykr{|AcrJIa3H+BS<ZH0@Pmsy{yOmr(4&@c`?
zMIILx9J85nXA9=dmI*CFUf>LEInmf!dCJqTP+t+?zh|3K%DXAoCYp5Z&=dt0jyo_X
zSc7z^Zj1#rvS0}^CUaNa!-{mpz!EC4`+=Jm-D^13;Y7|;#cg!#Cafjs9A5A&-flT9
z4qRWGkpZNpTjx6L98VA=%$v3iVT17Ka%D_;T_LruvDSYo;hRnRrD=Ii1>;_)=f}h1
zVvV}SkGtaGz1tusWCJRYn0iR2dzg2a?mK*l74VsQzmMn3n)~N|Z842_1wTm1u6;<Y
zrvqrJ&0&U&0r#}ntHz7sx@DEA2yZVBx@``la>%W@HYe~|CRg3&Z#()3HdN*s=of>d
zcq{EqK>Q7tbhgxAa{@i14q0Xr=&xm<V@DYcKfg8A&WJvB;?psia|IdasA?CBP}ic;
z%6FOysba>8znt4qSg{3cb+;-<g$1dfouzmtaxRPrREf7K4Jxq9Olz@q4xNelOvQiE
z>_JTEs&*439w&b^D-ceLzGL@h*^t=bfti<0O{guNU8QM;fiOZ?kZD4E{gLbu+)8F6
z%tunKT^xFOB!ew54P`CNqJ2=w;8N*hw@EM7{>4s1vvAUKg-{EY#axiT<`viAgh3@C
zQYypwor=e^&NKd(qy#vAWr&yBsP>TpFYMzd+8Rkm_AdD<Lrda!wf+FZ=<WFMYP*Pe
zVGB<uO3$IgpV}=8k056*;~u;BJD><~`%9=#LPsaLp|v6K1=nXtv(;(K*}|Pna3xY{
zrAjN=HwxrgsCfkn=?g<x(@vP?KNU6_>7RFLkxyjNW;|Z-s}a=BR_x2;7G{`nI(YAX
zY~(lD@BeBPuPFDZgsY(_#>8CY;=<YtszuM;+mMr1oxm2hcZ))V?>KNDZ&};(W$-P@
z@dIJJ6O4f0HO~A+Ewn<tmZn#CJwHm|?$Y5Ie0=Dp=GL&Lty(DE2?n*T%yaA6aL6Im
z7kxZ1E4DhSIIe=d2JuClzWsX}-)inQ>$K!b_KOmHd5lCuX<&=Srcf)+So^vV{GG1)
z3P->5y6yBXRf&zZb8Je7F#jV$^ty_YoiO`ws1dj7k4_=`zAQBb-b0aM0`hM&xtF)a
z6mHW+ZGoBEovQG3we~zfKWY>^9}Juc@GlM-!e>$&=*S(i8A>6wr4pQIS<r5AU#i<_
zCt$3KT3tMXqa{KL8aF(2AzYx!ElWtYou+vyHH0B>y%T-l&m2`FzCX{M!gQ<8Fnw9Z
zJdEBPrs|7PW(NsMgMKLREE!YS5$H}x1nE8k!_pe$J&~y-t@~{f@^zrwv;KfJt?v=&
zO5CnjXnL`AV`dI?mi3&A@TNS9_qp-A9$m{bnk}xJvueZFj-yat!D_=3>aAGblK##{
zEyEWaH+@P6y)a(hb4R*e`wlGIC#_OztPUYm`4Q=x_teXYQn;N?2JmY9d!H5Eftl$j
zTw!~-E<;@@f(*KB4pQM3&?z1G28<U-%@ktXLXtUf70ARt3Ay0%E@4QU;Zvgl{@Jw8
z>{AODK^Z4p*!^g02Oc~jit{G5K_g^!x8{>dnz~a917-)1;9-QXL+9wm1Hl1lUo|}^
zum-$}o=e6uzaOcyfXL~FVoM1_x@DIRlJL3Az>b|t!?w>*3KTu`zZ5oHyw4wkV@V|m
zdykb?jtozzeLB1_x3|4Nzs?`7%BQ87^D7KS5xQb!a*(!rgF@?nxI+NDZ$-G}Ih?^G
z1oDS-A|X*$0$G5{(>T8Do`W8o@S+Rf%GnRnr$NgNCv^IFy=0c2$J1Lzjt>2bJ55HN
z;=o*cr_>a17em*INtkl9g3n&x%Qgq!<<CS24xDMiW;+(U1)Y*~y7^$A_6719ln9lW
z+9muvfbYqd<@y43;mL+}u^ZDCVLs(wD7iIHum`Uhn3h)llWSea;;!e$KASGsNLwq!
z0g%5#h~0$+5B6jMo1fGzv84XqVQozYsV0MF@Gxq46|6u`G!cfaK|rEDwI$pV{B6K*
zR9%Dpe|oTr=_|N4teW;8m3aO0;s@|0nyKKh0N(=cbUwH>B==WI$57>RkcxsF2#<J{
ze2~z}m?Tkd`Y~=OVMN8(T$<(V!E<h0m!Q5lN&2aq+rj<Lz}9j~YDE4_z$MR*FtF$o
z2$oC!EOJ;5(3$142!Rn^?|xG5d?LZp0<zE83XAJFQhMIN(gQ=Gm#F3<#r6`1Tx?xL
z^()!E*XQ+9Vj2|NR1cXq(Yfg9-`v^leqhQ2rtk!}JR<&*eQluQZEeeF@-Dza$9(^>
zxKAQ1P0iATB)QOdwK8)FagUaTYGeUCWSS_=R<k4MB<{ZMDvqC5{<SL5YnU`sj&!xR
zKI*8BQ0nt+aS`^pQ>pf{(uY@0Etz=Uy-CW<YL}qd0Rr<qvwYU+NCj$Q2=B^feRVbh
zp)bLK&AEl${A9z~L2LH_OvMYfiiwX?k}jp0c11yy!L{U)Pf^|xJW+Y-bPkR|dV`ru
zJ|M_aB$SLM%LyA;?z(S!@ut-vf-&Id!Gt9BmJ;+r`oi+LRW|t;a6?X-?G4|PWp79Z
zLPaz6#+USNzapx6FqWV`ps^{B;1E{Loj3zX_<h&8pCYH!le*QN%KOhFPG(l39(ork
zvNZu8%^3%IdnNa%c>G-<`xGad=wfi>N7vg$QL8usxT)}(&CICHv)X3GjjHzs3O6D&
z*uwgl5@mUWV|v+T(SelOF6|Ec#;@O+3&@)p!D7!d96;umsn^9>6bWL)#|ttM(-d4#
zt1`y%vt{Z^-Y>g7OY}5tDaboyT_L+IT*`W#9;+o{z6?^sEELcoC-YT<T~^031&iT!
z@Ma(`I;BVJ3Rj5j+f};z4vC<k%fu~c?<mAV3xAetj`E>e>#Qg|@0T@4v$Wo+fF06i
z?};?hw?hV=DY&=w-nS!2p*!aW=Re3l+3&JGsz1&<<094D)Y_N#MQdq-N}(ye%eAAn
zpkD>PZUpHK#i}}z`(z3sl)<cWn*BsQq---IENgg)Rmbu#FrZdpF%NcSPCnVEeNf=-
z?C^Xz6FoYqC}_JP;^l&-=7mC2WXmBa$<s8Cr@>U30ZCTOwrJKta^@NkbQrwP)!b$B
zxymS_N^9E*CRhn+x@A={jGU>Rx`LLvM_0+mtTlMbRPJvwDfb-Iuks2z3s@afc!lM8
z-)&?+aui6e81DeT;KFm&%|F}Qpl{SE^7q*ub&tTexf`L0=pLjNnNet@uoOP#7ISAL
zgW_narky=AcC;+~64}}$oYHUum!r#3LbtnN%jUmz4i+|zi%dOhTggSrBR&3Th$dVx
zX6K)`D*i<=wENfvLHjyG9TN|Qk>7S^=F5Uxc{qVErd54#WW+~~=yK>Qs!rY>zNM|D
zbhj2lEOgEaj!C&H6>;yWI*X1c!m6NCj=<?1DK0{WlhL<sS%|x#4L?E8&ilL-gTjTL
zF<0g$Jlz_ZBVilhpORjzatg0c3%f8K{ogV(3PkCShFEFP6tXR>sB~+<gbl@G);JcT
z>O*MkQ1tw$^OMUrMOzQ52lO4q)&l0j_^!8LxMkhcexiyO!gby5LOJn`=^LffhZarB
zX1fZhnb2XHoP>I<Mx)3wx}@I};$+J90jbc+V~m@iP8CM!tjy1kpd3n5U@O8sG=w&2
zy4p^NtfIQqQH5S!_d!vHY1YI-bk>SuV-9}VoG%4`BYX(W8HcNbq0bMyAmb2>7;6V2
z&}-aGU~MX?qQQ~!1NZT5LY3E;kAnMh$OD=Qj;mvrk)?{e%WI&RO=XKc^WAz?q*SOK
zTa5HegFHypVJCYxIMeSz9*C0T_(C{zdm+s8mQAs(%F##3MU=1S4w;$OiCnX@(Q4Y@
zmzN;R@=GCel){LMX9351D;CgYDX8o?lLeAZZk+cwCavx?6}F*2*(rls;O2kt#dINr
zHaNp~a>-+KH!dP)kG@G4_bZei*Jx7-ymmL*3ET$He}Bn9|AG3*)TvOo6^hy7VoC~p
z`u5{+O(EN_%^zYIUT&ckSL8cu<ouOLyUO=pDD?g<$(CPLKQdg$6G?=i-Z`XlQF8Vk
zet+RebNQXh9?3d~V2xF&_VufmJMBO&Y!_O>I#%GrM&ow;RW5u@|GoY(fqxqN;Zi>C
zp>eKL)-h<8?Ky%J8}>X=Z1AOcC9?d%iwBOUygW`IrSGsisl2y8Jicv1fjd76T#4M?
zv84rZ!XaFTBA==|fnVL-dAeOE0e<e7V+_jX&)a0&uA)&4`KFt898b8=%Zu}Xs>?p=
z%K+08A^Pj34>0)9?t$<Qy1cT25Rp{Ya65}x*+s^wnZj{M{p<o!7u%ELK%Qjvyq**=
zB@-Wd95NDM_z%q%l^Lo56t=@!6CdzTvOeXHG8?G9T5_exYDNJNWOVy{M)5g=%u=%1
zjC>I?o5*Se0TL!IOhF*O#GUABH~|u<1{SOcijgs)Xr8f5GYOpN>Z%wKa}(V!C>)|{
zkVME>n(7f?fHn|{V0=oS1e@4W3K1}{<}93KP=af7!bx;Bt$2Uw$QckB2<b1H1b|Z5
zKS~p4=f!q5@WKD513DRY+dUmn(6c!Y2VM?`UfvE+DDB(5)27@4|52gGQM%LCOC2av
zZ3>qzH*G|Am#3^sN<Cr1o(K|89;mY-i7j!BrkADDeMCsn`Ag}~gnW}DY*{R5vf-eo
zMmnNJR2sWTB)|fe$<23ik9P{VbSJ~c<I-w9QVX8yE+9v!r;UOn*h1Bbn;r^fqSL;l
zG=dEX)$~ET5!vQiiDFXO4f?C(U`c{LPnP6zlNLbngvHA|f<7d&j#(EJPZ=s+`APzh
zpa>FSsU5c()T-FTfSEHe`t@0(as`qjh1^QNgyA^{cz#1=B*jc4EC9Qqf(q@+{p}C_
zx_|g0YFF!Fn6+hiK-KvZq)mcBLA@GekVZhw-Q&3879$aY^p_RsEVib4Y_M#?;!ts@
zG?>_uDoG!<CZ@j#l1a%?Q0_9!h%fd(&ia48Af>wibCaTsshLHT%+^VPLgt3=V9-z_
zk^j>r5Wqj4DJFp<GoMVxjhY<(XI8)x|7}jDOeir4uH+H|B1>$7N;4yCnGz;3)%2o)
zrSED>>Lj}%`eSP_zT|z_+fA0fWPwDq!Y`bFmAEE!&hw#5y8U{EvR?d6>H+mdA7yM8
zv7sF-ZmHHp8T#?NkdyND%X1?b2NNYq&CV0bkU)$3=<!V`uH7G623gilPB=Ss*61$7
zq)`?p^F8CdVwx<=c5dci+C<)mV$~)ol){g~>x^~OEX(2X7cT{*<KS5!rTlw!V;K(L
zbx30KEmU=qY{AZG5f$4g-(Z5pEKCzIm1Zl$ef%VcWui~`Jd>79rn$2&rS=G&%tzU3
zU`BDAQV5@H8s{pjWf|xobK$0+Z*x!W_wOwp;Xcfo**5m*6Go?pk;mYV)3?9GrS)_x
zj<?Ze)(;E!qvy?gqDq;l$SwAEtn%@G$!=Ju!kyP0!yKDb0qv#F8gT0JLa%6w;q82p
z`&4tA+Uu~8q*AE6T--<U8*Rww{f_rqDg<+!ocp1LYn@r|blz3{z4u6pC>E7ITnDcl
z0)JkLBkP$G?-(X;=^#xkjCq?5E<2e&touu&?u5n|z9W&G)x*rH1p_zgqp26o*Gy)y
z(#=sdx<vRCIte2lg{*nt%PbSk=b0GlcA1|=hiGxScVHqAMc9KS?ID}{ni%H1-*&=(
z^tbcvM47z}phME;ny}$*ys_qbJ4?bZI8-cP$T9q)9RFD4!-L2Oa{=oTjhf->5WpL-
zqJo#m&K#2Ek(xZR;!+RV&{18=Wr;Q0XZE$-6Ewx#p3?`BoIOAyh2je)MP3UT2S3tW
zl}TL~>gBXI9*hU+Y7K*wy>GU+{JFj;#m}xybP`w#Y9Fwv!29)0g!|2d8Ho}TKGKEj
ze!8IIW)OPHOqeGz<zRm!!9FRJ^5^FK%mG`b*NE(>_J+t9$R^=$OY5e9q{hibCrRZL
zTydK)6{mW3vgs{}9fy)qR`vk77XuPj9?j|;S;*_Fl(ita+KC1&RIdBeBoHRTM=M8A
zVUtz)69cdvy!5(O14ufvtqf)2SVMMvu!`p$CayqyNJvh5>MSp-f>Lf{B<}#(;});;
z@qkgI{7Ga!MfmoRPuOR4g?y`kE2Z9vx$4jvGP-=;QhEk64nuu`ZP0-L+aEK*^h6OJ
z7g6#u&)FCyXxh>mtF1TdumagSZ2P}aL?g+S$c?NOX*B)uDX|>_>=cP9w5<f~bx){z
z=x1gQ=(M!7SNmi*Y0nQi4lb1+P&%wvfIrWqc)d&XcvR(Ytb(3qBH9vP3|>H2)>duO
z%%4V@)w#ara<|pU#l|j5O3KrY^WmKk;|+?-sm~0kxfu)<Fe8@&Il@)##<%B8h^Zt;
zk>jm$%xdpWKd3m%3sSnlkWY2VaBUTW((S4g-!t?j{cu}cRCf_`EwS4bjZnY-g?ooK
zPMoxnyN8_R4gV23V_Tfs=iF~+hN@v8P}!Vh!j?NJ7W4v!_@mx`2{Q_@W5uh?>T1>2
zH8@0@Ev_rWC;aw&r>~VQXPP>j6(65~08bESxM>k=4@>PWwoWgM`3dcOd?+<j2{y;y
zFXre+P>sh@T}!YsmqOyAbJh>X{7c7xy$tI_8F9%4kQ(W!10W(f4EVSzCyzxw<H;`j
zqKITcEk`Hz^5;N@lH%iY<4K#A$+P6r39XCsi<(}|yowyv)5Af!x9Pm|{rX=o`@`yQ
z`dyxddR;2=`n(&TH1_O+%+=lfw7nyoZ)fD)wmaZ@(e8p(bQ8SI*>iBS(0Tq7*X3Vx
zC+f?`c)}UgqF;uAV4Nz%USP2xv~If<SGzsG6I7+SV6S>rb2LJCX`dV;ETA|;gvCMQ
zRThXUE||R`$X<YgM-GoHp7u#$mLLBX#nybx<39q~{5-Py#6UYqDm!~w-25G;3jRv-
zWR?c<$MOuc4GRunfiNz(^VQHO5bdN}#!KmKhVbvBGm)pSf|c_NgoG{2nn~+qF8lyS
zL8p8Z6aXOV60s}Pd%|M95<hiHbz6sz?8;Zgr`(GDF8g--rXo3lUc$%Z=l9W}5kCFv
zoIcJ4VW{XF^x9zoJW?F6WL{7~%9RYdmEogY9J&?Z7buAB#$A>!5+z_0=W>0BBi2y?
z>C!MA?<De9ns;>FeYQqT188f^PU%L|e10!?`Ay%_lTClF{Uhl291J<SEK|Z*+VlZz
zHq`t63hein@2HA+^JkS^mCXAB83yzcCr~Xwr*l8(`th{n_d&#fDh~GZi|7qA#MH4W
zRykJMT86cI$9##9$_$%a9lZEmbo}RrKo~k`4J0x{1X6?wk6_A}vK}(8x@T6)GUFg%
z%-&cWr#grL@!WogHoe5fO&~qH40(A&6P1+DkCO$j8&b<j1G;@8x@+zL)wjC;;uMf4
zgVG7p!SV_pGeei(C%M-iZ<rqHQEc<7y>1;*pLR1fgwbRS9ZLg(+9Z7-SNI1j&=AgX
zE$mA^5IA?BI+)9rnYC5dHk2Jw`@`zNxziYs73dhUDbf=QxE%8jw5L>B-DSUXwFzWn
zJSJ1zOQd?ZJSWtq*!ao~Wti&6rU5<Xj)^1u0a|Zzx@XdR+wm(|9LEM!2o-qW5ZWJO
zXs^m4j64oZ4H*u?+Mf7<OnWMD0%a3rZx@*cCWN(2_~+NKzES~PVojT~5NI<Ne3fmS
zmKW4GbR8Sz%jPw4<k;c3!)@U}{MRis_)0|k3;z0?IGGzib0Y<$SLr%1dK8%<DiK{C
z%}*;*xv0E%8Mk+l_R`gaQ*_IRz<AKUmb;6Ccy<x!Pwq|3Nkb_O?83Uq4qajk5Te&U
z^;6TihSY<q`?gJy?Vd2N1Vf?n9U&Z9wGvt3lokH)ksXyedLrvZPDAYA`Z@W8;{~*5
zGHXB?m&Ub)_{gSbA67f)H#!_Xu*`J*lbX<FcfhC5fwnl23ka>YVctOyX!u89-2!D4
z+tjgM{_dZ(ZPcj7;Zx%Cdv<kO2GPxFWT+|(h~L?G9l}wms!`Ez&%lXj|KQq4SE|Kr
z_|(THVzO2&Vc05?*Y%4UD;G>Sh4<{C3fIB9z1D;Txwx|hN~(KAW-JLQ*RmYSbD`n-
zV?F2ik-22555d-Ni)QYUzD?TZ#z5bzGYR}QNWE}l4c_5Lw<H|S^TUCy0Md01v~sUd
zgUgEGSabLGP=xM?wSHm7_gUmNpm>P|Z#Vxm2LD|P?rn`*f5^35$`m}t0)f+9P<X!l
zs1fOzvXVj&e5_@amf<=kVB%@0TkDYaG~oa)@L4HF6t)Ddq|l`jmHWBxr0}wGMY~KJ
zc;}{es~iapnh0X+>n`$mnAb)>Ee?FL1(PKAM4=tbaVUWglWl}PvE;aEz90IAnye0k
z(m7<+GVNroFK*2O$M`w;bA>L}X-EH#5Etc+JOZGu9y*~-Wj(=1H1!T}8V3dlIQJ@n
zty6$dz(fH%9Ni4IsWnjyJ)T1W!5DRPP>SqFV2e*j1w^^Zf|fq+#CaHq<NNG0UNMw@
zc44ALMCg)bKB^c*AN1o<vM03kh=XX&uZ&O%^I43Yu@&n|UyL6S$>6ae=?La4yD(%)
z#Y*KJ?BdlqlfJv>Y5Kw60sxcyDusVz9FLyv1yg%X_v;Z;+&#PckT8QxEwrv2uj~-G
z|7PdBRykinz;Vs7PM{U$dwffH(j*brl(}C1gFn~glZtWV6g#$6v@@w<Alay6<HGWU
z*)|&;cEr;4GbBUT+E={_Qg0FCtDTeCt*-f?{dr-VDEYjL^S-=Rpqub<UBl;n*q@1@
zc&&$D0&x=^26L3^m1p-^*oPc!`cbj8hS*j~m%}Mm!?;jl7=M5nN!6%XnTzA^=6~=u
z2ZEHC@;K%7OU|Db^ay~z-45j+ScTeda>O#T{n~-kRwH88{V__=F7{`$YKdiwKL_&D
zX!^UD)?}-f-&_jXF-Em#e6GRwjLB|G4Ho#Kn#mwi@61@3`|wiiT@YS5zt!}_lWe8@
zXLs&r3XBsFn7MBo91W|a@Y1D*P2cLoM^~gdy38600kJ)hEA_^s<U2EARW9+MxMD-D
zD`}i$pj%pu;W}yaC_9h{FmJ0y={+(ehbDIz+~fF4^vu0SjERwP;BPl$c&dnK(TjMy
z1HScTAsNhb)YHHnclA6Wl>E<znlH(wMN&A4KZ*C@-y3)8WIDdQhk0R+^<u+Ow&*3S
zPp*C`g@#MjRo4a&c#A(zGX%&SNASzZe(`A=i+hCR4WgULIAph9p-t~NKS{EpZmfJF
z-W#!W2qlS16s1r&OSltyQsN;RN?@RKZuw1pus_KRl}&umQMiSd!iXJ}KBSi3>TjQ$
zt8D_wh}mQ$jbMo*Bxc|=^<#oDg~sk~Z{tw3;+q#G++(WYMU9|);+sX&J(5*p;>o}?
zH8CUTN5-7Us*mhVT-pB=%&HO=f@*4alo7q}ic6s+vjCasU||zkQZi^kwIxv_*oFo?
z=&A}k{(q#82u)3aPynTM-wGp3YRZrV7od6)N<=a-)g@~oq8(4D!_mb3Q}LOiqhuzy
z5=+2|ErpQY0IKx=54{P^fRYSgpSS=}CI$`~nCNn$#3i`?kCZ|~`*j^LGX4+cL)CrS
zI^PHNML>F{4qL^>I!Y`5&&T}Vr{j*EY?7!$p^`s(2##@roqSR@&Z^-pENBPQ6kGS7
zj~J>hDER?I4l$IDaq{JRu)Id`oK2SiBa<PjIkH3?N@8&Dulrpv2?jqVZRoMeDf`et
z3=?vvSaC{5l+<T_6UVKw)(WFzy*;P~lm_Y^u&rV0Kfc4}ukZ2rmNaPKz9<hZL7vB_
z)9vH;vU`DOJ`*Was;{rpy|2^xqwSpvT9PMJ9r$>AzRW9|LwFp=$@9a{@q@bE+s_x;
zGjEDvT9Bxq<_bk4jvV4t5FV5a8Vth%{Sp=mrk*)&)cXDT8rbO#*thF5JSr`3%U{tc
z_=Zv_yC<v;3Eh836I)YpVL8n&zB#@Ek>B7p{|5EwIm}Jv!f-JpToUi#w#TX_N*+_=
zF7W^#2cclvaF{Z!*0n+kY;cS)h%a=NmUd>m_U@)KzP$I4ac-q;eY<l2#Qo{<_BZZG
z36UVOCg`_b{t=9K;ZOe0^lV|D04$aKXGC8^5Q6|Y@k9XtJu&*rfKz}gJVsFfySw->
z%t{b?7lcfz88jX&o(*~Loib9q@0^CZFQS(5lgygg7R{eqAXQ1OO@JK*vR#I@9~>P5
z?{mbi@{^EZTc_6!YIooZ0iW}5vmKChU8W!cBSX1Y333sVo};g?<HrXn*2Ak8^~~~i
zo?qVk{l_~!I+aaHU|_xi58*77K%#(eh4QPz2Zsmfs6aj#0_6szKUx9Tv?DNEz7DhE
z>U1JdB85=4ezync15BVGzCRZ>O7=5<repa}Z}%sN^WH~ldWE<pKly!K|2g?I!V-a9
zezcRUaYECouEvRXW}N^(8AQMs$hLPMK0S04^tN|Z9Ne=Nd4>3RVJcaw1T^T#5gfwN
zp|3Bh{$#`>V;vGUf@-OxhU^U-KoLYEV>D!Jq`!pXv7)P`C0EF?8XK(02uOcLCOCvw
zTL%BX2IS`)2}6GLyl>Iz3+(~u7A)wn5dejP5?Eaw1z51AUr^LYtwH0V0Bs>5)&>T!
zh`y1I<b!!_IteS3e92i`q0m8N6J2H$gar2g4mV;;5JboTXbA~6Gtq_mgC5i~WBZms
zXeUoI$DgBCNYD|(BOOL$cqG<1#F9`A4GbvS@hOKA8UPIYL)JCGKu?ZzAdvw`rv3zX
z0HY_?Pux(7oES_*`-`L=TvIbPcyVM6=nqFz6+Z)D_ymoZ8W`~YFG?r#O#cmj0B|B=
z#?r)${lCnd;s_~udvpxwkLkdGc@xmT@X?pHDw9YO3SK#JMM#1Kg-v)#opcv4jgVk~
zo7j62p`tyY>A)f=-bI%%NOr@=dP5{YsAH58+o%^K-Z7yqiL@)@XQ&%G0MPoPt*JZI
zBkexfGyck?gKO%Fj3Sk!xA{GYH&U`ZODRa?+&JOrs&ZX1Pt6m%EuLRm0(4a=_9Abj
z)<hZ#Whnn#;c9dra1!V+@L#_kN=qnz@@GL#XPL5fcuaz8k6B5Ny2v+1i*eSr8%S%~
zQfFLRrp}%mv18g$SJ9Kt^G>qhY<3<6(wjDym~@nBiWOxo>(rH0w<OCvv5i}}+-1VH
zBrm2Wp5~fi$JpsP_oX(hDY3wpf4R_Vj5ezFydu_|?>u@_Hrku0>2qILk}GvU`A8+6
z&9s02H7aa<A>$7&+H&&)9ZYkvQ{dN<pDbyWwPutf)6(%_K{Ex=L-}Z>mg=@t=@FkD
zT*Oi}xK6&}u|&v7vwrnpAj#XRr$rG*Q$pBMiDY*2f16}j!m^dH^=`zx5=+d=Mj9cp
z_>dX)p@!zLE%LP*;7~FOXX(wln}0qX_f(qpc~h3$ePK>j`&3W15=VtHWA$0e*R-|^
z(O93i%c~^U8`Eh6V|FzZgVr6hE@HPpKBc2WiQA6)Qt$P$WUWW($+QB5(@6q!$}u|q
zD`i+2ZOBzwZQZOYXtZjPNj8}koaKV-k(J-)jr7B<>Shww^-R8QbaA1&`Wxf-1!ttH
z4Nb9tLX`m!)#=5%vSwHp>kgG5@bkDcl0&Xgx~?bcgRaW<YOSiev1v14dI5%vDjz1T
zkhSbS@YhYw1`R11m_w(}51(jl$?xz!)Y~No6G0%q&&aVbFKtI;CMgWjv-hzY-brdP
zc+i|@Cxq(-^!qO>j|(Phe%ux%t0cpy@Iuye6KH|zXWQ@iBZ**O!zAlJ>p>4Yni8I0
zjMf_QXu7J~wlSXG?MOm+pm;yq1d_gTiBxxi!2gWId&V(}omW!RmMZniG)*>c|6O)V
z8eh`1(Ey6Oh>yo4fA>3MXroDY-bN}XC{dhVo4pPZv9yRA5`i-#Q8{3)$Y`{l$j@_k
z^sA8>y_@$qj|<13(#BvEwlG~p4R}|&&-`-Nx8%fCiv|@|Z}AiN;#AZWy6Y0Rnd2L5
zh4@J&u2^JedEOt8wkN<JdYeI)v$h)Cs$Sxkr~cxT8tf`bwoI;(J!#q+pD0Z}UYFft
zKa(bE(irV3XwR{EeFBe()!p(2n2`~qO2i#_rX`OKnA-*fAOjgw`7l{T<`IfB<HDlg
zq2Rx|gQAWs^593oCSwboAO|ye3dR`|WDo8B5~{H3M<_r)xzFVR!D}}#2F4qX2S=3M
zyan?ms0hjtZcegS$*Cs!Nwb1G6WN%dKyGNrrq!!IHO@0CzzNPxtC6qXTBrRCkH(O#
zufpyk77**`ftr;gn%Kp{Ie;enHGbt+Z!4Hk!qn|cFc&ul=1H<%Z8jXyn73OUySnoB
zgZMBHjRN~XRqT%<_-coE{>roNu}ad~e53_i?{zvfL|>5Ast{YQSmJT;*K-1T{~&@K
zh2f`bjv6M1hDfZcHi1KfR)3Ho-9zhD-cb{9Pr}z~heRmw*ISyC4Uk@tn*Kj?2-2mZ
za#G-GIf#3=E3-hLT$4N-Iz@zx^HA6px`jF!QONhQ43LtXBVGU<9v!{ZI6SU_&#gXE
zyBOo#XCFv{yOqkIOx9@YOYU6P*$-b^tTwR!0gfY<#6>N4KvLGU!}V`;`LYy`xcg$<
zpz8%L4eg19Z3Y6)J~MeV6??<63&JJG3hLaHEJIpNQGOP`L|4SUbYy|6rTGjDPxMYm
zZ(daMg=76i=vzwNP)VS^T}-~Q4CM1}VY&38w_hnUY;E{4u6LYP%h-HBYM(h$bbVO9
zZj}MgPNrNLN>HM5gv`P%oN++pFfpwH4aCG6-fi$E=sRs2>~1=zsxv}sFc=Z?$f*qr
zjTOn<$Wl(?Eq<RPoTg_28$r#@)oiTR1-j0f>mQ$9hkTI9nj<M-whsud(*fRdLD3?T
zM&;~JiYIN*K4oYgCX5^@tU}P{kgutOb?1i5a;4oiMtJzJyvZ<ZzzM7FV3!@(Ck64$
z)X(52N>8bB3GY}5iV=Q&%)s2Lxao#x=3gD~NX@kJ5qIp5GtkQKQ)IINE3C3fpn~jM
zi2!d97|*Cgz!W<EvVkS&l$E74H7WNg__d0Wh7*lpcqw>Ca5tK_tzZDgh7#2&+UWO3
zMJ(}aDa+`QEUT#B?nzaBbthW9!_-Oi9hJW#lv8C64NLHa<j6B4Qqe4+tuxF;LPJw0
zC3`u|lSbWJ28|^NVv~~KPvkusGPPAd;e=3;oZ<0iiRzmKa?DnFc&8|p4fL?=4m1<l
zOGU-OW9LPdP|He8a1uDujVfky`;f;9G{s!2kJZnXQaqo*{CYgwKF?A+`dzm>ubb9V
zQSj*LCq%a;0XRa<Y#kGHS8iJ6P&-z?_U&N<zPVrK_|0&k7=u;YAPl(Hc|PPSLyN%?
z^OeEddUm1cR#tNgG#VrX#M?XXs<#IOu}kUgjd6F{je4A!Q4#eNGoRy|vonExychxE
z`8k$2NUPhVBbk{c?8Tl5I<p#Y{}!6V8yNRknAO9M_QOf8Qd!e1S>8kuV80cP$iI8F
zZhw>6poRbJxyCR`*L4lA58cAN1-qSba^xL#>E0{Pb+MW)<cH@eIm35j2HGf~->vf0
z$DEL+kgrgvF4dK93OUSkQXte51>=Hiy$<yn-PRw6?2(FasyJS+#c-Vr{q`eXew{Ct
zlzIJ5;he59Y-?39%zaIG$dOW?D%XT4mQOjbA>@O@rGq$zrk9v<hE)u46W>-Kpnp0L
zc02UsC*V&2Ey^Ye&g|jq1ulFB)4?;#PNo8ka~XH{9@oQV)Eqv&P+!~PQI0S={5!t{
z?Muky=@84QAv-Txx#G%uBM_cvZ<O9{eDyAs+kO5Oevq^pgi3xbPS@vkFk1x~IafYn
za1%S45Fytqcq#<^%yLPxqUeqaoaT)EprIovh<6fNkfvrJa%3e*0ApTfaOVE~Nc!F7
zcGOX8tg;>E@pD$Hvnkr>6g|a~@<24ELvKc@RV&}HEny(+zUj}V!cxRj4g!+@huhvN
z^d!qzy#*A^has)3HCuNSRw{}r4iDFW{w(ua@ymw|!90m}NXzP01ZG}?^GCFhr5j$^
z*Hp^kq^ryMsX&G?>4*DK-4$~OhMPIJMTIB~J-ms$Qvp1DX~M!o16k6L7MR~_ds2=B
zi}SRuPv~Zkv=a`%mIoPp70BqMZkKkuT(LGtxm-t;U5m5$?|i&NN7vWS;i1jXk4fX>
z>MkJHXXKf$_h)=ARCzn_m%U;razxK+G|9&wi;Kdq7x58OsiZu%y-oqjk%toS`K`<$
zHb?IrZ34{Ck1NKwJLyj<m{ePRJyisJJ#oC%zXR*b#14>~(j2tJz@;jGNH?=9M%6H9
z7iKu%iE}{7;~A^2jqCCTAzH}BU8@WRQDP5pJ{bbn4BuR+j$m0sL8*RTx}7Z<6QEuZ
zqo*|DL4<`lAis3Hnc?F2as!Mlj=#Bo1090?yy_9ro)tGtscpeYptK4nRl7!m978@s
z$jPvabqe!{-5-J2jWGVcrtKq5;OSB+*C#CBCmaXLi{=g2t=DOU=u=G)6s9LQs^V_3
zf@MvDnnQmwNFaqgbw)AXz6OFAl4nW~8LXG-GBLosq;8;wC1$eW7_Z1fnKf(sBSy9v
z9&%4-aqn->{;Z5ISQFvPup8Du&9FGRCWxM&(}NifKR;c4+5M0Fkh(ga5(L43&Q)E~
zG)PLJrv7_4FQj|R+m9p8GNA^?-!IcVVIf~g9J9wJ&ml>2x^;G9L4}J<EndyAt0yAh
z3PCxetNet}JKlpTCZ-fb7fuzVi&%s1jPa{fZLU<A#pmMU#{i}KYW(+`H|i-fX1p>F
z8Cc`Rtx?-f2BOxV>Q!Cq?(6R^_E#(IQ`z!n^`dXqZ5dSUqFpOF_7H^5iQu^|GZ#Dk
zh6ouaPJ=^`IX@>WGUM`tIVeH(A4Xx=E)MM`L#;sg3#L!;i6OX-1S#WS(QuWXgY>w(
zD%04UzAei*QRshzOxyX#ExfKE__|!C3Bbi(lwRWE&nmcx&y&|=1HKVOX!%?F@#fnf
z^kCmw7-ssbtSc<m#t*GH0zv4{`fzM5K<pyCSXcWKh84P#g{QYBGoH?%F;9%S3f6kw
zOg@*;U0z|BN#YhMwpjBGck0pw<T(}B9iGcNOnV*iR*85VqgB})z{gzdhl<o6)vO?4
z(P|kr+!Oo0<DFYOOuh)%8f`uia%CBZkA3*d?VF&+&L|+lRLZO6pY-OA@9E*)jUm)|
zM32;w*4@Q`M;Z&(>!7m7YUKC1?=5tk8l$>%7>0+Yz#6z)z-JChS~9Hs*CrA^?Iiy`
zi5a2!g*G&Gjw9|(hWUq0cdmvYhqN{Qu%($nEC3q@>3*T0Hd&Yao@}<8WT`YJFQPwR
zpK5!jSa-2Uc{S3|`JP+^1}TBF8A8u^2><4-Y(db)TYvP{2za5IDbt$Js&gtb@kXwN
zGyHv1N7-*eE6h4Z0aHA}AWc*hxwN*(&R!~F0ZQ!xGB6;P5?@<(es1?OBZ{9V&<yz_
zdQ41ij}<2*DK1Ud`0sNy2)gf$&~Pv!$+OI~=Ij^;th`nc-(S>0?s0s0oTxZJfeU(3
z_UVHAu{P&iDPb3`d&lUq$O`!~&kJ*|3b!gk!zS~h#SLBhx=l!nOCVNQ7nM(kZSu$E
zqhrIO#ey&XEABN9Cv}i>O<V%zruu+Mq&J48D_6<kj=Vnu2ya4o>+pmOZ|dO70$4;!
z1UOfJ>>T4^RxqsIuT#HP^Sby{%F$%5uKyKO(cmzP%{YH=+bK+uJmrfR_-12vXP&Ah
zq_px+0}wa63y;{=$f+lv%<s}M0A*nKxe*OPcI5X#01j6g@3ICcr`Sv`I5nPS7MCQa
z@O|V<Ka{X-=S>=G0rqFYJJp6Pnz4)JCz5q=^jgE8jUYM7YuckWt)4Tf#sn=3#@_Mc
zU(>>*aaQpyE)3Sj{$6fcQb+e17rR`L)1>yMbg2dpbl9<xxr-A_BA5|4Hi2GVd6TSw
z5DHkbLDfN1p^+Y9Myy)ncq-;S=3)>tG0t~o!4XQ)wWGWS#Y`(gcByB_8%}`lM=i@S
zx!keM%nXU>)f{lood{#+NzR+1?WQ%JvoC|HH5?JJ=1MEs`r5Rc8#xZhOjch<voFh2
zxIOY0icmG@xic(;SW)43L@Lf8dfBNIZWP#qXtFS<JdYn9XQ2Knfj34%Kx<;|%>Gpe
zA!h`Q_N9L1`Iznele8Mp8oPd>6Vs9ZNE=5(L!SK$s%Di~_-Nj~%)AU1)VoXdrL~7S
z2-Pz?h1IZVv3*^ZEbtAjeX{-*i8eKzFyVd%*@1P~-XsEmi@H9PFmxk5Vj2Rz#(@%8
z@hKR7u)kq?UXBH_?CH^N-^a0?6Tzo=eMYvj33f_r$C(F@{$zwLII)@lizQQD?0Ex6
z)*pBW{F_c8cvQ6jrn@GAS3A!GD7u9r;RHJ(BH1{+4>>Gs(_r0Su@dzyQImI|PeLRA
zl&LYF@5%s^n+ujo!gC|m?4)n}kdH-?3)dfUWwLvqK$O<@WJzeUl@pUljOH3#ceyfw
z1swZ!z)@?2W=@VJtjf#FO|C=%n7~GDx*CRM_6CIqEsq)<k=*<t@&&f(Y|mtrp`v4_
zbLvkzFZ_pY7*2r2Ux6LA{D+Pht9R<sdD&aM<UvYR`u3nUSJy3NzYa0rm^s{JL7JiC
zBZI3%wwdvzL9gz7uEEaHP;Bx(hc$OX*zmUrSMO9jk9D9ADUa{1s2;31^d4Q}u0=B3
za{(uNsD7491T6Q2U24|V^bs@tnxvU4BFxRYatVPUZbxe%!{yYdRkL&9<?pL;DN9i9
z<{4O=y(G6fuA<Qi4#Tp;A7U96fkj>v4jh8&k%=a-SH)z|_esP1^@}_>VH>j)F7}~3
z0i+lM9H|J|zu;(KlW9gNnsPV%Z&h_C%zfRldlSgXe!mE<{vGSt-xB*gnig%n?SyjO
zxbrpZE-N6>lMHPy_Q+A`6gKMXq72k+2KcCG74WbNI0^(F@RnXc^9<0aw)wKA7f_<`
z<ai()Xc}rT?iXt5kJ8g*dWF;W!QK<6+*@YU3s3^W`i&C8gqHC+lLYr|o7?d&=EB(s
z{}nJLCi!k}=TlwxCWj&yI~&;7>R*0nQdLkrQ+hI=>$I;o@b&^PTP0paJbx8Ew*;L3
zkW+IvXy=a_6=OJXOlaG3$)Re>X9FNQAhI#yFIos=)mgF%=s$S^_?wn0SY5jN=Acv*
z_@-}$$Fw&b6fSHC|Db#D*BS1C#@b9%mElZ5`6vimrF|Y;`eBXytq5!#atqr6+RlxG
zWm>W*-OR4otXH0YkSd9^`U20+{(3&MNq;vOL`%#8M}Kp<u$#L#L}@A1$xWIwmsNwH
zzaqRx=*tl~?#$a7{$!{NJAYr;)?CEIt%C^)%jbO==D%YqY-U`yX5mI^mJau;6*%}S
zlxI8o`K6Gie?_OC7}p*K<Jn8}w1}m^6CFM=^ly`C;1nhpv>)Vcd5#Pa8d66J`2hsn
zg^*fATw=1<1+Bz<I3wD+E+$-<MLTT2u<<ZjTupEPm2Cm=2h1D(Y$KXvRb^fP&K*oz
z(1{I=HA_vgJw9?_F6=_8s7$4R9RhNwubb~m6|P*yJE%;}ppcu2<a--K&WKm`hn0&q
z&?Tt^#?es<N$4zZRW_Qn3J5JAwWm86h3+N*-e#HE2K_sflAOLCh?2`*M=W7k&_5l!
z*Jo4wAQKhe`l&5|?y3TarV-D*KeaZ;!DEFp_!593Jkyv#LO4Yu&zGPtX;J{O&;J|n
zt8-*V_Ekd!en^LC8FLCNSh3I^jZrj!g!B#s{$l%*ts4lAV@D=}zzvsPmrsftOgXhS
zx-iu}s_0&19X{2&9f)A|EdW9k7$7)HljkpC+0CB?h-B)FP=kOyfiO?VehBbLHaZ2e
z&LyLRLOC~Nx|4*OK~(b#lcn1k9IQfxd=0Kea1%@mD%rhawtr~LR0|DbD96Gq$OQg7
z4mONGKupsy>mJUgw`xuep6#IlzVZ(cZx)G;UrF%>CGAu{vmhR-bBwnSCqjtzvV50X
z;LtlMfsZ!U3!F}N(#V7X5@LMr=lNVkSpD~4<M8ilUSG>JT+W_@%^o+`zvhP(=i=8o
zG;H68>nP;&%z>x2i~LLhuhg%U1Bdwx%f|VhxrJsSEHg{$+9dwud{CI&d5BYnx>>L)
z9CFRE;N8*}Aq*Z_J68Zw;s-d{TpES<*6x`t=KLqSKBWU5xhza6-+ie_U)vW7R(z+*
z191`VBCXclUeIe&+95stox%i}@h0Qn8TpB;kk&IGDGYR9OSCO4v)c3@qIPqht_W}Z
z@^H+t14()`mx9!1MY;QMT%!B0lImX=>{?CA!?yjUJ*LM^?TiCKzB5d&J4n6Cf_X@>
z9~oWQ(_bt`j1})Aqf6eA1Lq5=+~4@A1z-dvnvO)-8wJ#9$vGeM1!5vVugSe1W88;%
zxD-dXh*^UCo`sC8N*s44C>}lGGyj6!e+{d_FA~^2IJ!1~q_ip^;qh@P%$+@nT)Y5O
zrY2MHyXj#BQu~1aI|7H~e|SSXt-O^D;0P31zE*&MnLlCc3oH|KooF1ZO8h&80qAK0
z6nbB@EorrUSB>%iq_e*)&3w^h`_AiLm)zoL5+ATzyAWGC4EN>9_Mc@4;KLR1e_34h
zod#$rN_(`ec{RCt%G7q}gEHMmAjlD3Y--;GtSSa_IKjR)v$;bvzLT>u91Ka=RxrmZ
z;_cwl7kk+MH(juv?cyC>I`~(!Oh%jEjSBY2kULymlj|6(^6Urpr0<pO9YVR&r#VXN
z*uBP@{<jx#-LsGXI8(Gp(5*}Lpx_;_y$i#OwtSF`(p=A*PhGu;gooCBNCh|SNM&<&
zw0BQ0bN73dr&RQBlK)I&4OiG9+0L_lbB3;HcGCL4Hr@aIG{C63-&_jjkEi}@m!tls
z_lGndaQfIM0j&Ao@%hIw(aoV@q5NvK{|?%}I#r(xW11)aQ}F*dQJ6OK+X5U+G(W$x
zF!;TcK&2s3$}2jdkOVeaIE5cDeo_T2@5T|g{8j_0m5<k<4Ly9RA=8&qRvw3YGlxb^
z>A|=GAvvO!lA{rcZ;|~T|E><*!uQEQjd7hvuA!|m(rlrT+k+Ydvc%dv!nBH)wH?uk
z35^;?^@SQ;kW42kS@J`vziFVBOqI|tm5E-7V)?t_mk(8w&T8Ar<vPFkm|0{W!h}1r
zO1`!62r>;3ObY2aJ~9z)iAn-rN@6Y1xkqAR?){#K*f0#4lw>*+Wp9z~u;B2J^4EAI
zdLn2H;sPzvM1ahikkSJY@Y-MU9l|a=68^67|Gy9qM;v|mb<jnp<Lge(AAB%yxQ>{X
zhG~9$d}Wf7CejT)dqV`gB&gI7FM9*<1PTd8;hC^dlRue8aIh7+<XTv;1+3&^5SRs~
z<a$^z5nsN&kDu?$$2rN%^UdVyi>Xs?cG9@6j(<=$zn8PG2QV!I<92UqTpFhRbmBOe
zZ#Vx}P&c`$=_v-{pHEmqq}&8#z1$K~7!A~Xg;&BtgF-4k65$x}U{Uc3H6)U6@kqAB
z6`t`x7bKQ|Yp^dTappTty@!zZgHf;=W%&2Ec-jg&zd?P;-m_Yj23~@KIgs%YQ4H|O
z$YEDSxI7GrTJ8Pfw|#xV-f}*ILfvB0?MPqw$iVJyWPCLg)ROW}+|czJ;Y1e0f*pW@
z^!<{zWl}^cphya^RxG>q(!>wEU|?{)vEz#YlWK`q;g#_JmeAxQ0R@7etOHkh^>mVt
zxvlRzcqpHT;xq%hGOoi~K#?PD9hop9$luxi9$et)6?)<x5A=(OT62UV@a`7j2XGoc
zSuKBn8`*LE`d!qx8{u@4JYiC6-68#AuCoj82R(;xpd84sEMM<g>j_`-o_sC!$k)gm
zl2?wN_IFZmDX7rcptL-_0lvkxtHh@^ln&D8ms55;d|yv5L0~G-_-1DgAYVbnlanjH
z?hqcKg06sgY`Tk`QArpr&_h^!Qm%oQ^y~nH2;bnMQO!QVAmxYaheN_0Wg%u8Ja@ta
zW!y)K7icvf9^ujDwA$$%DqvkkDr^MU&uQN#w>te>eT%`&U?c_yM57wI(2&UdWZmwc
zh|mfk$ybzj656?lfr)O!?Lb}Z#OPr?ui(B?{%~wjYqnr#(T#`&BRW*<kgFmJXzx31
zGuH)g2|-4Cb&tINi@UE3i?Z$7l^k+N0bzzvIz^=hkQ5LkR7y%pItCa*q=yiY5TtA9
z?(PNwK{}Q0kdOvp-}pSw`@HY_?eE*)xBu<^XAX{od+z(bu4}DzoolW0yk^6j{nG#C
z_A>ef0p&E|A>HK~GJML%e)2pVX(+?$IMmg=>p8!~z-7R?Uiz8&ve%;&y4NY=CO`kV
zoPyF6UQu_scfPm?UB(uPGH$xQI=KAa*=ebxM<j?(s^LH&oQ;A0v@OV4g|o!d{N8nT
z^C!=<7|*MAx~r?*>#LLI>z#?$*Q)}}XGg`JY%9N-hfI4ru_%T*S5AgaTI{^R-L~AX
zo0~<h5ohP1Z;r;S5KnqcNCamF_td<<*FvY+{8U)XZsz?`yL?GZD|q|FTOM<o=jEq-
z1=k}yF7@8z*fkr%7HNYa9v!p`7v+7*V$*Lr<y<Ms0#U4|5}jBnVr?IfLN-v_w2O?S
z-qhsKu&eoICzWqLnMAyjct-+{zGzF{*Yt|nWw=K-Z{<^&Yu=UTs|IQo0b3Z;4-IwV
z;F;4;^Cu@}pjs!DBc9AcClMaSVKHwRr$k;$O~E9?sS{xa%!=(O^yYSqRqY*={#`BU
z<}<PBI0kh7lB-?adU}RE{Ks9r8tSuj3smA!954GB%xC8P=SEgv<hU7}ETlq4jx%@Z
z(fMl@PC^Z!^cEEBsOw{nS(_IBkIvzs`NtV*tJ!`TrVI(2=(*XqEAITeA8&<ofN$ak
z@n;s)$~mT@Kt*z_$||b(YqMuM^pTL*HLcFZx2OnJYmYEGQWU1sU>NRUyZC|Qfx8rl
zsW$6^By+_SD`RnQ;?a9`bgKE}bMcgv4vB?N`vmdInhthvk73S&*`tvERwNE;Nk?-W
zgN{3>$e<S>GmIG$7H<NAH0MnSFhhG<f`dE89#KJrJ46wY2f`iXj;JESeck~wYn?x&
zP5S6gI<Pe2N9W<?kmY0P#+J4u@jti==213T9c>$!J^NdQSIj*7-;s)(Zs>%8!H^*j
zcoTd%pC@uT>L33t<h;M@Ay3*M?0Clfk>-|I%N?G%R-bFSfxT#Rq5sia3_<@dFW(n4
z)<#Ir$u5fo=7k;G-^HaE^6U9)rUE=a5f}ar(MkKw(%n+{S@QDyWnBDjGkLfEW$&#L
z-u<4!q((=Yq1d}z7dOZwW!xYazDiCt-ddqFb{X=Xl9(3&11}hSDy;2mm&Kmu@SmDK
z9Z|R_cZuq)w9rhxW@OhJ=c}A7IbP)=T$+>LF<x8-uz;j=IE5lO+$z72?Syi^m`Pt)
zwbya-y*jPQ1aIYZ$#0F#fBq4_M^{SRXwPVM@;;fKqn@QH{&G8c(%;-z?L?Z=h|@@z
z@mY5Ew~nZgJc^f`c?R)0X`-5wNMl^l73Lx<)yi1I=Y%8Dw|83GXc-mO*#zv~q~~>#
z2V2B4=a~^v9Miv&RIUx=pi$$>(o}7K^!&-Yk`ho4scWyI&Qy*CZ)X~HRv`N{(oa+_
zdhn&qVy}!^FdN0#eMQwFTFz~WGT&r-G7bJbt!KQ2d>?m<O0485NzZ=Z43~Zq?ik$o
z^Z(=O+Mk^-!j+zlNLv~uGz+(vJ>~hbT+bob{o)6)RCn}tO_i!)z~+zG_2n+jZ?oZa
zx-GCO7w6l-hzF%VdMPBJLz{JImCpLA&q=n<_4T#P?g(%Hm^9rW2fk#JEnLe>A-uy{
z8p=i6FHygPo)&86e#T{1-3@Cg8GLY@g%vXABeS(AV<{orJv@X@L-JMP!eP*i`?<I>
z@XY|JvmvYkvMP@80N`mh2~?wjH&c`I{%E{&C;WcWKgK6zi5Ug3FR~fSokBgL6<6D+
z27+|PhH{dYL^c4;kKy{^pHrFel4d}}(F{AvO;nC0%RF>Es_uGmzmMf>3P8vxJ}#bg
znBJpM=;qs#*A;DWb=?|M$ecI2zM}f(RPmz-Q?C2=7M>If%SItrUu*QGg?y~}nMzao
z56JPEs~~K^a4}Z~xs!XTg)zSWBk0$K%XV`eR*G`d%eiH(6zuR&u4savjph9z{7Shh
z-Mj=41fAHBsaYu(Csg)RXY!oJNy?n3WPK%*TsQI09eF;h1vxJ&Sdn|jY^WC7m8V;P
zM?r`NOMn2mCLtK(+B9Lw`n;4sG*tFje#aBU<wbsKlw;1-0@J(KDe)xiYTOTTV5ozn
z%ezpvmEic9mP!2q78<>PveQ<vQ;GQTMS?^yVmGhz*^WJ-_3nXR>e95Vjs^%S<EX-J
z%)OTb&Px_&2~jUHW#3rtETjHbillCEDJ4omR+S)vp4^7&&G;Z7yqgvhl(vF2lGhgG
zUnQWfPAUO{V2kqWkV{RX@m$q+9#~1`M+`~qrE9Ny!uTYT<lD8{h6C9*-@gIp@ka;q
z3YpQY)n_TGUa)WcW&7n7GBcUZ_wc~wPd_hA3<sL)2TnSbefyNcrN1s+SsZb*<m2g8
zrK~&jx5riwW+*bqMjmNcX8E^wzEC8~sT(oQDWqa@_<Z4n-q=>iIQ1gEl~i5)eIPYK
zOD>1|=x`~8Tg=I`%YY9%7Eg!lxVC}i<k@KaV5u^=P*}r7Y!c6+`h<&9P=3k1h7<zT
zsLD&kAFyICUMLOq)bK)bn?$bI6Abs5<vtAmVB%n=A=9J6m0e@apTIpve}~IOT=%KW
z({7TVdhfolR-hhH_`r$`=279a(^6B<6yv?y+t6*k#WPz}NHEB>I&*SCHOM1;4rPrR
zj!IN4xGPywNeoNwrjlfQN)PE|p5z19OGv=7CSO?lC0%Zazbc$Ad@wrLGbQ_3Ep2A+
zY(1_nn`!<+wXjKTj&}ZQq%gT06p}XslV<RMQhKsB$UkZcHpaKIz@CPk`nSdADdx1w
zE6xiFwp>WKxI`2C3Bn#vaa3ND+B$!$YfGTxN&Y##Ry}$pg&5>G+GF#0?3wzbsf}0}
zUFqekZd*n{);pNjycg5H&m%sV8)TV0Eu&gAfIoUh3|1l7aOn_lao8d87~n67QW$k3
zJ8>v0WF|N4LF<)(YV^SeJG0_jh%kvrjF;e#zIP2n`uEA=r@r^L-!j@*Ch&!!)#Lan
zg8Z~fvFcSa&K!(F-^M*cs2FF4DxIhYfU@6h9<EPqfQ!Ei&kBnQiU_?3(_h+=%@?hN
zi5L?tjAsoiNwJvH8R&F%`o27=$AHzqGt`MjheBbhn{bxzTK=!QJL&MC<R@&gETGpW
zx47`8@!q|o3C8R-65eLrUOzS7>8p{4sOH-0iFV%pI<`dVvT%FC;r%VmZrh9nlsY~v
znX_l{PC=5Cqqb7&(@u&{bWDQ_BH^L3AK0_krk8BW`oNVCTR6SdxQjMZ&XT|fuMVf+
zWOvrBZ|_PG^3oq)4NY!FK-c^bI=pZdc(mwgDF)p<AEw)Gi!1y#CUn)<(ZU56V$X3O
zad~_PoOi7L4z~X;8y?=`ch9i~<_1TFDk<3-y*HZGQ`2VO4|o6}SL!<Ys7XjMAG3{0
zH->pRA^c5+XL=09XWAW>BKaVyjWy@;!s{a(7J@^(uNS|EuQ}Uc`Oeeuu87Kw=YG4b
z-8Ymr$jctq#Fnu{(g%4|e~j~G9uf{kkR8)wQZtoQlzw8ax_pL2uoGJ1DrCPv$bFV{
z0AgPsgj{Q|ooqrPbzkTE%lt;+{KiPK=}~ojivz180v%$s=K+`Tck>*>H&r5yi)sR5
zPMS%>5O4&$akJ~&qolgK#)>l2W&SNNgL*@Exon*eMWRKG0wjL~b+f?9O>x&!v;t4?
z^kW0q%?4Q@Yi*&IRvlW5_2iDdun)p8-ozqGXMf)|hj($XtVe4;v^LDg1JT(jjWJx>
z0qYptm(zOJYjF=BOtOLd64xN>t)z8aHg~j&x5ej&WTBxP!Vf_gT1@ZV#fi%My?}Zs
zo>M0)TBdQsGpsYG4xF!=)Zattp|Vz028ofcaLGsKgZi4-a+aw5HzNDi8~M|>cuNdZ
zog}zZA4&_ZVPR~2D5^r}Jb&i#csXBp15Xe)EkC$N+**0hb=0i}Ee|G*lMzXVY@3dH
z?G>dbu_zICNCkWQbotm~_fVy9URjv+FpX$F!<ndfn^r_Sw=o?<Qz9nU>|Io$2Q<b<
zrN_nlg!<H;x$v{iV@5JTHs}EJPoYHNkYyXY{%@zoEWsptx063OVnM&AKBwYlDQ9vE
zjfU$HoWl|EN~aG@_v~728BOA7ZgJ(<%A&R&)j}m;w54Zm1c6+#8}|nE8H=cROjN?`
zQU)kp(o#OGMNy``Q0X3PbE!hIACO*_LguUPxVK%+9Mhd%YQl<G^-ie=SEe4#xbTuV
z-<8`1>*HHZ#`>KG;%?L!ejzS)ZQ-T1GFpEi_n-q~TOY0aC0{&G>K?PpmNOaniTOYf
zVA~Oss+(JtUluHma5T2Jb4SK-1z8J5Vsa(-wrWnLQbixueJ$jAX!Dqp)U0Ly?yYT?
z-P%t?vfV7fHg}_*-pV{=mq!On-SJ8EO1Jlt2YhoGbNY$WOU&BP)}W!1sU4i*p5Tww
zgDbBZi(3+FN9T7K<NEJ|q#isA$ia5J^p7Gjc^{k^Nrn|O8IcrIG)eKZszqhej|>OR
z#!m^u_s&z89{#={Y*sR0f{n>Rj*b#88@liw^cY6VUq7-HHOj?$*{d~}ZsarwDj|d%
z9?l<r)X9>}`}7OOjg?x1)k)Vtz;W0g5Mo@Rso!1|tAelf_97XMa4Y-fBPm%&y2CBo
zgDt#n&+nfuiOzvKi|cu_!lylGfd@<VyNM*)D(w=K5ARFI$MzV5W$T2F@laTu+>S@H
z(kj^<CDGiO52BR<S?NlD))JMnfHqt@Ys#$z*S*x|+pNd}WIhvCbXHCKaxqZB-W*4z
z?w<LQW{o@&s3W=^ls@vT%0sAJ=_SD9hJ|!ckLv&FZZAvgM6sk;^4y~iKPA+ZJs&d+
ziqlG-O>_*in7fYE9#m87W41&Km=n^RVii2Ve7#a}<6!`5#8l2X6QA=LwB}DbTuhq3
zn^!CXgB5Kv>oHcA4piVP)pK-zFHxG6`?_bE->x$#kwdkt2n(eem|h}&fy`HydEU+U
z%wK~2=pesXZ!z{Mc9S`T37n=XD1ERQ?drH?a5|C@9m;`cN;`-QxH=<O5{g+xt#-y1
zZMD9|*VBOXca<2@D7|D4s3Zp5YYPW}s0Ew>UA}SZ#1^)WUX&RfQW^pIvh%3!Y-c#W
zAp2?GhNxqWE4D|y&+AA+*jvulq5Yn=sUzCZq&8)v`?qlmoZ#D|JCS1yR(%Jhx<`US
zTc;tA=f;9>ywmaBw2%z}^J=(Y7C&WT90)KF+EU{PuW@csIzGi=8tE-jV_=JqCP2F7
zB+q$*2~9RVs%b~=bA3AwnV!?&i8?nC8x$f@U@@g%b-)f;^ijretYX2{B*cf8q;Cbe
zd_fY=Myk>Q<H(1xQtAlg#8-3=mNK;;O#3#NKfX*I6jGmh#ltx(H2pLHt$hynvheu0
zC0Ly$Oi)EAG*nqVV)o-u_h~@*Ep9KVACMMaj03xp_Qj}@(7Defx~E9=$ubDJo3UkA
zOqM>)>ovmP@6Y?pGz)uX(8}oT5?$G>6m!m!#s2-zEE>wRkJV3#lPxG4j!1t+KoHlP
z8-<jcfR<KwZzw-TWJ5ADF*EYv{a51nbx+#k1orRN>nS>~4vI7t32KGL%vjJ!!ifsy
zqY(CnnBQx>ua7^80PgUe=@PY&7rA5Z#FdMAvr?hc<jv;XS1Om{imRyVu^#AQe-tbO
zWDY+5k;o2De3!irLknHpJT=xh87-Hc^SLM-O~1G0O<Q}7*v0hdjeRp9%T%x=@ZgZz
z+J)p}s4T%bA~<9PTlIA%Rn<{A?q-eFkNcl5u-cg{A0_+Vye}M7>8J>igO0{WfEw%{
zf3jQ}oHGDb{^%k%TAz$qrAl+!Pgq?fI<jKPIiF|&Ov4{9IuU<OKdD5w^ePE=c>lfH
z1W}qEp-hr^`*;@sUa9Y@*Sy0x5jOk>9Q~bPy|jEUfPt&ab7ovv5bdw?><MQ=fuA+(
zx$GXvJzwp$2>y847F8)&v?d9%lBh6CDcl&}cjLF`y{+74x0&Y1V6S}rAWlCmFYney
z$D{r+>P0sC#FhPFv%q%V$Q85Ns8Dq8*@uJ)lc-9$_tly(Aq?}n%?cLxBVtqidRO%9
z#=%bi@^AUYqgOAla9P^tB(4#{#vgFEmOEf=f4VIKUi$Xq=X2~(;~B%q;y8_k^_nub
zBg~-yy#~BjuJmA6Dx~9Oy-#*NKa{*Ub)E%)M9KM-bBztfdf(ONL|Qk1VtUB~xo(oa
zAL1LfRTUjPJ|^1?(24sWVs_-WAB;><U#1FP%KKlg&p%Tnt5qlS$y^HRMU2(u>?PQ&
zxgA}e>1NW3sm`^wjj#Zm%H6F17bHDYgW%a`-Ofr4^6mPdk9_`0dSx_ih|5$L8@+ak
z{WEx-s>9mpjOe7tg<1A?lNl|rBj9(Qx`=VDt-)_)+51a981?F&%cTrU$*-SR4x@YQ
z`Pm@P0NVWVlhY~+v4v)O;l`#-UWI!~R+FGd!i5B~q6Z}JH&Dw@W{es-c;{66dgS&y
zWon;Lp##=4>LYo3HFL<VA$IWev!acJ04e00zdN`{PzrO-g*9i8B3fxc{F|ab(j&d;
zosYBDYmcKbj&C?AhiECptLBYmeSst{InS4Lr@|CUo%vfhepXmp%Fi%cB!sFs&|>KL
zq%Or+rQC;l<|HD0EN3Y|o!wK)KOj3Iu3j{fcEE0-;+ln&l$Zv)9*x~rtl3*;aLW)_
z=rc(2(Ullcn;ZKnEY__}baAqO``+fpm{BDWd&v%)(<H54<MNbMpXEa!S7V1hy2EX3
z%;pP;q1${3Z0Ye|k668&N)h(hRWMyg?2YA)_qdmI1KA~Oa50mRrDOXb4igK4i-any
z52*|U&}QQc-wpBrJm)52ND}%~bXNX=eiAxgTv5F3lT%RJ7!q-5==bpG5w;^9fhAOd
zB`@c-Rh1@qRKK=^mo9iE+G&t2^o$;)2IL+P1b&K0x~XM&841|`Cd*?Za<m0`uOh4=
zNe`wjd^rnnw|sYkfDF;Fl!BX|8Wo!9SG|rank=ayS5DX}gN5Tk5L}7~BDA6$-Vd$~
zxl5?rtF78D^Y^YrTX~50Qfs{KQOJv^ly-Cdr;$J%!(K~J`{<d58eGNI^{rrfcFbFJ
z+;tGksZWbmF-`@d0PBbB-MW}9k9!C8vkC`tET#ssR4|}~GLIYe#os0Q)nre<`cKEZ
z;6$;4SAwu2EA@eM?bq`BJfHjcU8i{n#`kxF(Da^9$(jCCFt$fBCVWE;eaDe4SZ6Pa
zN&}NUHXz!n@i&N=Dv~DZ$d0ko-<M9-QyQ&t1@}ULeU07Lw-@qLnDz+YbO%9#7C^D+
ztP4PEq{HFP<*O=P!SSWb)v=j~xAp`{4aNe#8>dnW_a~yXMAJ*%s>c?RVT~*g3+Mfl
zL=T6Tgy&Iv?se4;C>sr(5l6syN`At%$X=m$`=;O~+RUF;uP`fGOBpbV69W;GH0tyU
zV<GO_Ey>YlngtRmsV~9O_91pb6RMHk@m5IWc;yF#4p(v^HKCNuJrCEoZ2Vy|xt6IJ
z8(qpugw9YRvZ?M#QBR7zyWa@4Kux$btca>LpnnAc(|B0NiTx(#F(ZbkABkRi@(_$K
zj?m6)kH11#_0}rdT+86TypworsMzT2y-cN?Tpd91*5DpAhY4e?PTEEZBKHP(vAJ(H
zWyxQ@?MX$t<qO!NRA0+=_O&iYr3%~i0aqUN$zv15N4xu|7Oe@49u2og$~fNx{8-y@
z71hNjqeQ7aNGK{JKs;(!N$=M-l{e8k+(VP2_3ljq?#5}yX-SXhp<Tq6(&0Uls8&N{
zXw$Y<(Tg(G_y2Il#-x{!SP!BUmY3c6wqqoM0LTZ<5lI;&Gn?<uCOSrSiWbH8d;wk&
zyzZL#cn*2*`=dKy)g~BjtoR-y82WHP3K8vtk&1tV9C&2<s){D>1DVy*R37C6R&c?*
z<%a~9p^u#Cpzk{JCcPXlus2T$E$I}pRVw=P-`xVh+-XT|;ykCw5}T0OaX7(`R-wcK
zcs|~RS~aR+zt2qK*P!p4^E*TFHyQf^scE#&FH2g&317!es#-AcoJfI{+ltrU3H}6j
zf7cDSo+sG!mR7X9&I<IX@~wYC`sE=R676Pp8C_Hme=ioi0YnRNA3p-l3ac^-mB>Y!
zo&+j$?L4%YcDZiSEn=x`px9WTa^Q;MhTAWhfLV&F@CFT>n+gRqTicYlF6#}e{K%*>
z1j?&N5g%fn<Cv|tNC-p^9(W^d7Vi)ql~vPcW2un<85IFbQI<6R4~u8n8|~1`LI)M%
z$5@U!?QnjaX{K>jN$fJw5e&SQQWFm;{ab*K$cNS297N@GQUFPZa<*2~_@;u}244ZH
z&j3J5826kOzK|V<ZnLs-oRtD`<@bKEtzsR!+1(Su;~>IS+0=(-YE@oA%uf+X*kp+8
zz96it1BBN1-o%+euZuo&i!q6uJiy6tU>1kc{uq$CcS<zRB?4d0dyW8RGyUni(<xFR
zI_mX%t;7T3M|^UknM8_qL7!g9z=|mJJ~KuZJohCKVm+q5e@mEEk(MDr;U>2F%yvEd
zv9f2l`mn}}?(k(5wjTIiQX-X4fbutM1f1|4mfdatP>nK-HbkO|C5@JE`cfZ+5>QGp
zz}Uw#3m*xW+**+^h1VsTAFQ!!b~kgZXZoDdcIt}ME$M*fkLna9K%JF6USVV5r3_7!
zmE-^}G4MBCEoZVwKF7+m)~<>g981Sj>9*Cp0HDG1_-I$bwgu-wNu^O@Fwgk`O<auL
zV!~o)=^!wNOVz~k0gz7~Re$Y9!m0}6uD}ZSL6j8=ckRXT@n+M64VF=pK50DrzewUQ
z@38ef`Xf?9!>AOe+rLI<XplP=ZLqvXj=nr+WN^nA#(ik@i^M-#DVCKA9kcQ)R(R;^
zXTq#d9qraZ+;Us@HbjBSuOy@s)|T`?YSb@EhcPro36gS-)rSA7UYl5)bu}-su}C%^
z@s$8ZDMF?J7{EkrF9TGeK#Cygk1}~tf$8EVLSK6Q;?(UX)tK8M)lIT!5E$_WYLZ_&
z#$FMQgGVt(?Ff;+F%}=%m-g?d!fdnDwhT9F(&pd8VPVR?TQP$@D<ab}DIxSpiJE_G
z06-1ht?ZE$U(?3xTwC)pi%i-D#MoH5A!R$v<KD(4ML_1TcB+XKTqUQsayDT@97fCQ
zDwG_*#AoRCTBz5rnW`9Pw8yfwmNT=aQhJUtU8`ldC$McJ>l@<;Pip#4<>@NyjL2$u
zq0=i<i--Y7W}qC<V}mmS)IJ|iA^a^eZNy-0XEaLfCfVHHj+ZjSir9!j%;JH8P6^A#
zo3V))ZuIS=@WNJAkr<}g+-Q#WT8~KWeFwypoz^hS0N2A67}=^CXA^o}$0f~`)8MPb
zy*9*uZiRWX=OgrVoEJA0H(^$a`>Uijb7|g#pUH#t(YC(eVEc!@E(4axwKqhWLdPyM
zlrC5HtJ3a#!x+aA&h*zG5TJ3*!o$qdO_L*3WrN6X>I&Wu5>LwUH%|*X?|cEOWG?Em
z-9f?CrDyh!dAz|vx|uYfqowtKZhJ5o(t2{NbxHgbg#FRvcvx8$nE!CwhHFA0H)%2y
zWHe>WWlW}FI*B_HVbi^tC>{~Yq%M7?^B|K30EPbym&@{Nm|?1&1$pSdfpWa^f;4fr
zoz<d)CgQfUP?R0gn4oci6)JhhP0IqPFCOY(q;IHEY5FKW+#X^XD^7p-FDtI|YHt6V
z^D*0&4@>n8Y(xpp%va~Of9=E;WL0OrQ_pzs*L*WAzc%59pkY&jbTX!6&LBVv?&h;9
zdA28sBWa6~xc`mV9<h)_;5kLyE6xeks92<?${lu@a?{qv+t4(0EZ4;VDwMBfZva0{
z3a1dA+qcK3P9V>MJ*d(&lX+aUx~n2i!As+1n=;{48BFk%i)`CrCdv0yuiNEM%!Jzk
zJF{p*Ifa<t=NZ%2lNcNxCn&D&ji<i>cZ->-W)d;lm;B;Z5~dr2h|{df4dPxWEtRi)
z_mlPf78D;=LOyJLZ@mln&-hht08r<jxP4Pz>ta!eu^PGvRkr=9?AGkNg*s8%voU8a
zc&Wck5CA7C_2hrbtZfAUjKEW~0AO&Ah6)>eueuquEe?Cj+w>h5*LvxpgP?dyUOX*W
zLSY_N|I&?$iE5PXet{!rQcha5W*?b;EEVkePJYHs1FSGx#(8Q8X(0vURSeFJB2^nD
zwNzy_`0Ikv@J6Xf(+M*^MNO8xj|+xwZ%dW(&3$0e>M%P;|B(RQ7aHRC*Yxu1YAiN<
zX=-RQjDp7Exr>;8!^+>y15u0Q{VFA<u^4rnpOVV^llnmKAU+>U_fYpfrYxy76T+~+
zT#~q*@lKwQ4bl<Atte#mBb7B%dtPvUbZBKuqa)2b#f4#gZR|60aafU`uEC!@BfB^5
zD^{-w*!9@$DSZL23|(`{<c<5nVj<KC;+MVLd}dNMxtcmB=uenky((bE&4S3hm!j>9
zfRt|L*K{gm3OK8D^L>yxe~PD#NbocDo9CNM2DdWO_Qwvg*0hSuf14h+dm=i~vNvRH
zjkB(j1&0n82gUR-e9+<K@kTr=-R_Cw+}R#c=usWx_g5Nd3jHHsyn@%ggltw<pUmIk
zE*!-$%As>>zm61kR(eb0Nr%-RqpQDs_mw;gJem=|At;z7Wl?!v!*yNY7KdHquAt#f
z=*p?aZ-}mid=V(!@z<&=gugIH*M4xBk*zXM14eUU$ldh80TQC{`VESV^4dfjqr3Xh
zg5il|mp~Q}i^9BEe7|;I2~hr=<GZgi`hCM!W~vk!v9XORtHd0(>x?enuoslKg)qVQ
zmhZfomlM{`BiU|2yG$H7OTORsM8`XIN#5y6tder=e($W%vO?wNyh}i$iZF{0_*@*i
z0~xMJnU9C7yY7iM%huqG@6*)QXADEX&euGSAJKp7>d1`I)F#zzPJW7D=gP4fPqmWk
zW~s`Bx(ju&b!?vpz9dQA+JjwHNx4Wsn+<l1Hl%Mmc9ih03Xe)x<(o~ruu3%4V+3fg
z_m7A!=>N3?xs{zLDYt%Kw=0hv!?6D<n;*IDM^>}Z>OI}+_*MO~5L%G~D_2szCu*qO
zgB~C97QNr348`nECt#!>NGm#|oVr)7sp`_C00VluzCkv_d?lMjRwNlqL0qO}i6Xsb
z5(}uO0e~~5nwCwrQ6PFwd_N{oJi?UC_TQ#ejL{v6;_`7!qrSuK4{~jQkNgSeayX(t
zv}a)!1gDaXHOb#}eqV)6BgpFI(OODjq8x>J)He;Vm-wKG+x*oOad#LZ)$c_sj-xw)
ze1`8I9}_rR@MRbUI=)XG`Q3Hyis}r^?|(QT>2&<b=9!cQHUL?D*)c@#Qa|lL*1rW?
z&Q>v1Gok|9Ey7}C`&bHPfNWoZrKkw~A-xzf`Ai&0_NgNVVpMp!&c9J4n7DEH6_FFl
zYL_@Q^l9gn0_N}}Jt{R{h{5x98#fcv6!Wfq`a{{KtJ^pmv4&HT56=(j)jrxvdrTy<
zb_1QP36eh|QT>a6fX!K;E}je%X^kTfRR&j%6e$6R>fRa<Aqv|IQOs|@=P{+FW?p_%
z*_y3RQIbi2a{lAXBZ&jz1}1L~6U37;c|3Xmoak0&1}k2|d*J>KntfHB1jWy}_Lab4
z>4b?vw;Xi}<EG>Lm{dQ6IrEKHeelBix*T^;|4kn_=)tGgk4p#^RKj)BqMd62b`d=u
z6*^GaMy~j9SN$Y)+gW^P`{6{TwT_`EPw07QYGirwy+8I#rz3*hMEV2V<oCt=8P<Ew
zb+A;*eCTtUyw@~3U|@1H*xQxMKi5AjH&-vslGwKZ`N2|}zMwDx0LS#wik8Hq1~Wxz
zwA;Br`sssgr#f{)8Ngt%z0K7OHkPF&7^n1iQMl^!Ezx>nodkpR->^?xCz=n>@qlG%
zKikUvMs-SS)gA}pZqJ68kj0f00C@PHLPXo`N>teB!p7`ol@^q4GbW;~3RHqsl~OhC
zvx<}RXa%S|6Oj5NZ#B+(({kV8X4xR`D6uBcURN5J=E|X~i=RH!A~)Dj4H`bhgOOK?
z+4235ACZWmu^`j2_o<FLyD$GktM){PAW8m0>+TKD0kdcGYKOW6@DYrTHrnf=TYx<S
z428`g36{pHpMHOLaQ`To;nzD4J`~8`h3&-ny@{;!YTu1sBDAP_kHXiH2oGf%XYf}d
zieUPqNXFOjrj2KhGXAT&wKz~sR?=yVyA<1KeUVlO^tFosu<8=(-;Vc2mj-B7tn(+j
zr6GZXHL+fYTV#jagK89T`~@Am?&|&A0&;ko9<V%}vy$<5*Kj30tLS+?&zx2aHwN=0
z^S8a?Pz?oPR*2Pj+?+rFP(&myU;wS86iUGEW__I$Zokj|*Iw>WS{WJ(0&ZpwTQ4op
zCc0Tvmuqs1`-Eh+_&_o`Oo3qjs)SAGkl!`jb0-Ii5Yjtnt%BZrD%&wFGVQ|qq4zHr
z(A+M>8Z-X&&5;2b7VD(q%3!XIQu=6M&2RiZ^Ccr5$X}fSN-|^fI75J-J?HMlH1o}L
zQw1mecsCwsVO*+N&qwvgT(cVr=zc4*L;OJo0S>?K80-H^F}x<#kYxBrL7&g281%r3
zAt8Ol7r|Ky=0&)MbEe*G(UoIbH9FdVR3GDW8JfD^@wcOu3Z8qv8k8nE9JhBPxzp5<
zm~6*fwcoia=qXLju*(IM!&)#|xMTA>U&t)(PY>w_YDApvoCFJTa*sj+7)Un?IH(K?
zx?Iiy*dSCIdjFpuc5qb8CD+YcxQeo-ad+LW$MuDUQovY89S|~QFSPOI+!9RvmnQcq
zuUXa;20oTMa^G|XN?W86CDJmBB&UVtXelM{-${}W14lvOaR1Q@xJg)l$4G514}5u|
z6gp8A;jcC+td5*Sal1n^6hoj5hh6Q;NX317WfNTfdy}Zp{5OI*Q$B41=h&5~hR@fy
z780$_;}#N~5^TCN5&~n~Ld`oU@^G=?|M&>SiI2y?a+z<Vge}%S+pC-cWz%YHuq!(L
z5oA>Vad&DnA8WAw#;5n=bb%VA-iY3g>Az_=fCpqw4nfaIg2d1_rTLzbsa1gR-~G7~
z09%M&=B3@cgeP7Co4K(;5>N#WF}uo&F8QS7EHS=uJj+&xajOkXs!6;IGPMtXQF(rQ
zmB!Zk>}H&g4X;wev3@3<^W3XKClsZ-?2hQmFm*w*&8f}g;~r5RG;MFu+0D13Qvf64
z^g+0v-oi#*G=Ww8axyIobfT^3@0AM4iw<;j=EIW!V!vAiRK#{oztX751<#}XtL?%r
z!q!$f<>UcJVza9Ho;ne*MnTCnq6_&i_Z~tyvF2RVaGKzXKJ`Z2V0Ipj_SbIr2koB8
zv9OGj{O#M%#~zgGD3_`MIB9!b<-2gLGx}_m!<y>%@c|hy9*6oS311A0FuS$xM%^eL
z3xP)27!s%}r#V}Py>+rUH5XoAMK@*i&aB-)Ik+axJnP@L!ME*~`d4$i8H6=Ls5cg&
z(_VtLgRanhn~7-a7LcDg`u@2!{w8ES<MhCvz8$TU$*Nqs*!-q_#yAx+yzzggXT|2O
z+_C#1nu1^jKkOMG-~x*O5&1Pt%b?jT<8rOYe+_0gy|!8{!3=NiVcMNNaY_FsyLnPQ
zewf;3D_UXJ5!K~T^(LmWK3k%mxgn6iJmVj8`)q5bZN>h}!hb=N(8ZU>&FaoQ|M`Th
zhkmvYM#1T}{lJp@bfNI81@iweqFBSC@gZwCH-yQ>N{IE8v0(B!2juD&e%Oa8{~k(Z
zTDM?uue0=#JjP9zN^qi!^PLpU6-m1gaN4WIh~uqG<}k>YYz&z4oEE2hF+0;gC(D1t
zm{p4-An%H>lE9F&HA@)29PMEBegl6lZ9PK6AKPgk9RFoMmODcceyrm$RV~Px{cnNb
zh8b5Nzr`O$^N9(Wc7%0Pz9@ey{h3MhYt9EV_q|H%;oPzkuD{~<f6kDLHFFfci(X<C
zf#rH!ukZ(b?XP(7n=MzNKH*^TivP*&%75m+{dStd+nKy08Tj8rKN&(5YX4DZ_~T(C
zS^`|_b-zo*|ID%r;ry0}EP8r_VgHq)w<l)E;JP9=`GZ7MLfuK(jqhwqw3Kk<bKZ|Y
zZYSVaB^_IjldzbNr$4VxX@&~YwnU!vJgvbQ4(d5#w5_?iT>Sc?qV%*75I3*^d`41w
zp9*t8++fSJt>ugJ3pkC>#ofp04WMl}UE*OndqAC_WikoK5lGI;<tRzHfN#8D^Qo}3
z&s=`}w59UQ@@Yx<H5=MY>Wd=2cKgdnR`T-DTt9zDQx#lKl7Cf0Snb32oFk0UuGw3s
z(<F=fq%N9+Wk|N-w2Wh|LZ7{T{RI69gS=8+y$NZFs63M=gSS>{#qNa7Mc}6!dS$3s
zP*94a;t|cbVn8GH#}iTk<-41im5Fk$ehB@tjP1;^hnJpF((adlghDP(rykH|M14Nh
zoKbByDC32_5xtE|8ptY?yB_OkVBa9h8F+5Cd#>WoxbwsZ(U~IM)qS2NFA4BMVV=Z*
zvVf5k7$&oiTnRth$IB#4^T2E79@lW<!WVZZ$_(C0cL(@tViX$ySHFq5!z9O;ww$q<
zmxpN?N!k;=d;gl}={VBYEQ+*tz;-P-lnA#JTd&<%`+qCOphwe^iuhBEL5~KAG2G<d
z{}*D6|9>ubb9*+hLK*qlS@id8f}ZwB@^{J}0%8Wu_jv%v89hRugfvf$(VU1B#mD#x
z_|#>zCL;BEI`B2};TfSq%5uOhaL{8sZ>w&{(*;S7`)Ny3lK%y58X!zlM-==)n+D;w
zpssZ@Xg8~3rzPIzh6mGt`tSWa#~Bb*jE5WlA*&FTyXEgj^lR^;+B9aZ=Pb;2VBlf(
zGx*p$@FAO!7>2UazyQzJxg;j4A0sO#xNaV9zosqppWIl<e|uaEa|+y^rwa!MN6HO|
zw^#Rm(SufAPj-r4zym#dl!@oL3N8?D_eK5vza~(OBL9P^=U)wj%KoVpt9Mw%;lkpj
z&-WMC*<$CnlskBEyyoao-!EPEe_VD+{2ynUWDe<3z{#}RLnyY7{tLK;%LRt}8?dXB
zcgjSMgs0cIDA98C2UhGbm4lH5*~x=CdaN6&IF;?Ni&wqx!(d`~Ow3(?Htvg|d8Fj=
zfR6o`__qcLXVS7*tk)LlupUr5Sk&zGkR>G~qT=7yM?BRHJ>?R&sa1A?><T#{QO=s3
zwwH3nJ=f$Nektsra_Z*bakjvREk^>SBQ^Z3sqqz;co+U5Mx{YWafnR%(E5tQT^^?^
z0VZTNWY8+OdB*Mt9!BghGh!j;rdQMnnbwvB4%t$Hx6#zqX#fY&UD<QwKO+^F3P?yx
z>S-AVPDTXGMWyXkgeIZX+s8OhXONis^eYYx)dvfl-Xkh<CsGHi7?-Y*ZpIF?EN(Kc
zZ_nHh{Ab4coJkLNR+LSP#Uuu1HT%8RPs53^cgX}#ZflmJ-&i?=&LG{_`5cTfhE}aB
z{?;zEYfrBiZo#HQV#!GhERZ@Ldd@56g_POkl&9aE>GlsOe>f?9e0h2ZE47GSpV84v
zGm^vce-R`4c{*i4FCBSxxcV+fQ>?MEC6PHrFTGC%`>}qxB(~0cv2!>Rn-`HPrCy}>
zpE4U0+$-(~fxRWJjI$BT0>YBb!jTFScpfb`&(ISla_@J9$79(7vC6#L-OEtex^Uf$
z$1$zdI|pVpymn-WsmlKFra1f@?AaQ~$LQrBiuk0;u%azwp74j7hLnEAIg98M2g#=<
zshn@|;n95)hinWFX2p;0?;cD>3%Q&64BLe(;?$Bk;S7}2@GNwk^TC){uE0V2WVDk!
z83KD71lQSZbo4bHOeby*XVk8j{>Vjskz{k}Ic$MTjTu$>U;V=JIu&BNhN3GR4s<B?
zrJ#p`dK(0&9jjaF7V8yqi2IBECyA&)vzd8p78bS~^x13C)zzJ_5}&#nv8Gzz_U?BN
zgu)mTKmu1?q&|(<RXURUhRKx%bghHsbaHsqn0svI*sLOxIF^Y{8kknxJl9j~2*$Tx
zv2eM;-7zO(Eex%q^pWG*Fr3D6Z4v3=s?(#DlonG)aoikyPZOkyAFHNsZ((50l#ZT<
zVhY%>hiEjk4gE4keN)lCweUR#i|M@tVtW>m%&xS6Cu=^G>hGp+rA+#?Zk*Hy2T#n?
z<@gD!FUkwh=CN1lf5J;<)tzUmI1gv^n6HFi`k7)Cge5<%3kOXz-U&yNxdkHS5vM-y
zO%Yh`&<KZu^q$U6EHi@(nM%AXm$5pXL2JQ|2b{Vs*Je~#ZMr<*Aj<;g!`D|UM4qSZ
zbJxGFmjHdy`PTJMTaTYxudlUwFNi9H?zO&;omP(7)ssp!7)cbSU-Dc(UI+;aGq0zI
ztCgJr!l9Qf5WR3t98E&W+iChF?A6R^CP<p50U_3ErI@+wwdzYcEw&Q!&|O;7@Iy{x
zGD)_G02cTt#YFE53Xpq<<Kly}tD_TD1wqV-=Ybqxyr!179b9LZ@0$p1P7AHsfyMwz
zW_mqw@5IR7!{d0|Nu}qOMJZaWl${FL_ajeRT}^FbhB!<ookXxS^;v^=ma6($xbe?<
znR<jlvBA4*QO$4|>-2(y3T`o!d`FoC4S)BG`l#g3k3>y1)EO)N_-THx3n9J_;NrBp
zq!^+fPan+pDjzp6jld^fa6DmNg`kH^0^&lP%u_H-h|-pqaMmF1#KGHu5X6+XIA{6_
z1!K2s1fLsyeokqREPnzB-d=PQ6^018f(RL+*$a@F6`S(d-1GSA;{LfSPw{E5j7SgL
zy)vO9LQlQd^_4aWKj9+SSak7~#-y4&ecutj+wnJWsp{9gb-BcK5Zsm?;^x@yHB#;E
zzj}RAUG$20hu|4%xK}6#625VLGI-`y42h`0;y1sMIO$<$oATkPz4v(n_}cq;VE){T
zissGnyi&I1ODucXN_!7}IDWiY19V*u&h?36-csk?Sskygvpbm|u4`K!Gf`Fq{dmN<
zG0CRBQlkfI6hVeEa09O9&*Bh8Oh9;adh6qy`A&KHL%CQ|1&=a_>VrL8rRP((tY+zj
zAy5qQtkY^V^Ewy%h&IEN2myMh<MX3C)XG!7zoNt>j97)}UXNt^-h~(IHh3%bo+W%K
z5$KsA8az%J@w-=yl(gAPQ^RK3-rMqQzw?F33;V?xrr!H*D#7z9a4)EHTZ|TF51;05
z#`a{J_GXS{cDLv1YPbE{;(lxl$tcVV;%7q#V_PJr<v#{a2Gzjr`O9*d*Wfi>&3rc)
z?w{46S{U{3g5`#38NIMM2xuqwsLp%rRNTe8u8ZedB-ZbRX;+3Db6v%+hDY6ELJ7LX
z1Ll1b#S&xaJX#b&8N#FS>#}7pWCcx<TfrjT7%aO|Hkq^YELC^8KZPQ#@_H`HgLg5G
zjsJtb%(BEESro#!C4pWMoUtQ;PVU3faEyh6PP6;%$IrX4$BZ5Sxxn;Y_^m<)0CnC7
zOo6}s#M2kIq~-*3K>r&-*1v)kFG)ayCU(9o46{_8kA#rd`MHVKVh-&yF#H$dSwJ>R
z2;kP=;#njnD`tI-$Sr*6r=oYBUSt*xUD0}B98dm7=oAm_y1kxm5rtraczl-hk!!j8
zKSFva;zKbTj)x%k7#M2++yC^@bQunV$|-P7Od(dzK6IzI`TpKPH%J^D5#eVQ6t)g1
zc1wu?tydZrW9wZ;)%+XV5Vw>o13v5Jrh!ieQ(V#ULx#5(3jqpl>l6(~-OD@O_|&n-
z(R9ij|DV8g#MNCuVE8+Id4~B>-}=Q}HirK!9YI{t|J${1f8t$cz5kuwOl;mhy?z3K
z_i#Dso~Lm*cUz}3qz{})GnlUG0ppjfkPp=9^I!Rm)d(B>f6cuyvXY)v_$CsV6Ekjq
zcjNZ3cTYqaCGIe3L{6BPd^rEdj7GWeqSJC9ftyo(`dNZs#y`c5OY(Pu*!zSScak<U
z!t<ke;ht?o4$&?|ZtMAtR{rgJ?jHI$e`QNab9Ddn+!_<o--12IKpeA~W&0lWU#ntI
z)AX*MqF2i1>VQC)NKcB}Vq5WA#*=eB$M03uC(YglMMU1ZwwhUH?`e*LbE(+5{@Nui
z;9_6adc@iNXSQlJ$#bpd(~(x!PRa3){JGNDlIwU*&d0Cf9kmwQgzj#B!!PM^-pX$!
z*T>C_nroLUL~4|e_;l9*L15dHaP?>4D65qq)6cDI8lDj?TV#lWt<@JlQu+qO_(Bv3
zMc&W-^YO@kAC&yx{7VXQb&nzq_+_&l+95LIHz(2>k*~j#&6JS88WSDrr0Sal<KB=0
z?A{pVJ*ni(i(OTy{)g-@*5Wx=FniY7FxNi>epy<oeQ81FO^Cnihq)AEWjTiNJm3GJ
zEBt@zR%E5V&2{Y$<arqHoehy>`6U_sL%?U5@2HXVoF(tg-SIHw0H9NRhA-Uxx0vxo
z!e?g?N=da(db>+vqHC~}QI_T)-M#Huxg=rritL5pjUKU3x4<Z{+hNep%^7{;y=J!L
z`$~)>$pGPq`&`>~!q$L<amLO%w+pWlx&t@h>~b0e6J6#QnK^19fx^A3xx~`@AkB%w
zO7qk=+abuCe_wEB7@4_GfvGF8cKRiRzg0#nF#sKQ&Fv=YnZ+TkL2e?fw0nlX1kJuU
z3>Fb#XZb~!PV`9#i^SO38*w^m4FIp>{LQ?lqAK7ZolH))qsaxd^G$q2vTrifQh`rB
zC#6%!>=7trP@sfhi&#OHr=x*rS{vVW2{i<*kdheXt!RRScBn_%W(R<q;`r^za*USL
zE5$|981dF2oj{vZTHl>#ji`asFI78S9R~l+yt0V!-_;XMGyEaCv3gdS6Me^UX~)}@
zyP2=NKe~=3`uS>`nJ^39z-XvIErnCi-vv`8YwiBAsLSyWGL{o20RJqGVbvjcr)q$X
zGyMbB?&E|?#Nw*fB-`J$UZKhk4Tk{B367hK?;Y#tHcoe6SC5WGQWl$iF9_Mfi&j!n
z-D$3=J6hgMeW6^`RR3aTW=BVJ@73fTe<daR+^SJ|4@;T)LdZaVk6(PQoy`e2+j_0*
z)wlSZi;Dy8NCNGe2Q9E!UAxTWxklGiJxS4~qs^78U%XUJCTj$aR)q`tQ_tzb5s>y4
ziPj=*i|hIT<-wNm5`=wPM$mPNTXvJV@>nHt-N?NYvk`UY=~27wpL7EP0^_ug!bkQ=
zKP9vtt9tNt4cSqUeYNudgUk5~mcHMfM=6g*YHV_AY+aPOLRGrQ5024dkX&b#*cnSy
zUD4vfRn0`J%V2_jfGU=j8<ys<_*z(y!f85*Kg539NPTl1Ve9AuugeZq0xythmSgR2
zF7NrvkD5K!3T44B@f6ate>`p}4NYp`tUzh-(7o2(F?zn+ylAhdOr{Sl{y2})j9Tb+
zH?t{`DJI-pt`uMGj9mNr`f=@x>Xrt6E{Bc>ppY3K89NnBt-&FYJI_c3+Z0LH-&Bny
z@jF5;sNjR|EOy0nUvBTvh9ipte``zy*NdnJ_CcD>V_k}@zz<6VgXKFfE`J0UYjK#4
z?^Bx#u8$N!$?JxUAzEWuZ$dR!QE>42B~xLr9XJnKF~iM$3ygO=;!4?R%!#9@hYA+z
zP9T``;@y1Ap;5Lc2>JqJ;Me!hU{|`nnoN5WU<h%@2zWL)&k{Zp{g!Z~g3(tK7Mf#_
z>}r1$t~LXQhWV**<*LhfZg)!*%i>!Inr%}YdTdjux&w=AWIs60L(0%7xIT9EvMFc=
zu%XiReC<u{>KW_#(mgG~^`V|tTp_dKHPp0(Y#&Kt;6bk=9C1+QkL(fx@wp5|%%GI?
z0GVRwR|(<vVKx$X53IE&>xI%m_~2(<orFf&10yIp!plpgogdp>7#3AlDEMJK_r=BP
z=9unTEkzF5SebljbO|CYXxh*pnfJ7;6FeQ1Kn+tnE(8;Ot4|BTnF&)%jUG7yP11)d
z<!-kFN4S5lPa`zA5g9zrd6{=~x^k7zujGUVa8@+sRfi-?SH!KWPA|*;(e$6XiTe*b
zyDVOe$C7`DT#T$`=S+7kA2f-sI1^0y_siI2^ll5Xey`5yc+jggsj1&w9`4iOj`7ld
znRA=;H>>lvguH<oT+EptcU7l%guvO9uKaQvc?|#bL4fS@mC1t%83KwPZo&1tQz{lg
zGZ9bA2%F+Ku`P=DF?2?Uc`5zN4?bgQQzl@XsrmeF7ac?Zr;tsVuYGizQ5AJ3G0}QF
zx~i|PWqf6=#<T(m4Su$+{)!kGSUcEJ5haauJdja8+P(y9-8{KQtBE878w*!mfEQp(
zc)yyry~j5uaQKD`67FAd_fC%WI<9Tr8Qdt&FQ;v|=aryp@RV^agvH`kcYvznqlA1+
zb%HT_ycz%U9tarJV^-TUvr&#&d_&<`X3Dn+o|DK=Zo+w3=wX9b6+I@|GGzCLgWcF0
zsUFs04DdnaMy-V8nU_}N`YFgx8M_=r_5i*{o(;aBrUZ0G<)_dnZ^m}47c5kL%%DP_
zW7PCa4RfQ$|M^tw^$<=d{>F2Y&@f>mYj3W>@Zo*u`w2JHgs32WfN7|5P|(W<x+dQ9
z)S9GkoFcZxo@8ME{0is$MkDl0R|Ym<8poa)5OS9QI`P;fGxSGc=vWWi<zpdt6jO2`
zxK$!dJ-iq>)?;1i>cwG`Qb3!Nm(6nt1H?0d)P&LatgqGszmQFKeqoGjtHMRZj@qDu
zB-alXSX~QlX{$hcm3$!3FwR*Vcl)D$sfMWIz0KSv8ap~$Z;|UwJX^2k8dto5b8^{V
zZVpH2S{1z?V{Zs(Rxd-kG^6N0mTt>$gN85tar@aa^2t@J3wv8Ha+ZdCsI9{2-&B<b
z$5}0Lf75B>hJYzce4G!M&9F3AgYJOw-h8^<%{pZaQgi~T$P*hHshbj@fL<MiAZtbD
zrOxQH{s?2<jx)p_90d1b*TK(+nX~00mGoKeNAVH@vw#gbjDv}&orIQD_oc72^4aD$
zf@%yi!A+gY@UODui>KfK)~@2EMItP4UCTua#^f42`7V>_blcMX$@m<uLscyISjC+0
z7<_2TY~4fqpVN@DZ4AylV;VeTTsx?}UwA3PwHN4~kI%3%8tabk!>!P^$nW4fEii<7
zWc>R01bqO9WAGev6EoWSL5FLU+9|q9clw)A{h!C@kT8tFnZ)6Z`xSAALUZ7HcYq@X
z#JmHkaUXv{*3UjM1fz!O{oLHaufV+lR64QNKAArk?(34cICq#bRG?U71G2M09FFfQ
ztA|<q^COYBIorb8O}jVu%uLr1%5Lw=@5Qlq4chG(eMpsNnc}@Q-8VDZiM1m<M>`}a
zURmc{R_&}gxZD3*Dmi8JtEBEOO%HiIr6yV*(mJQUhE}p=zit35E>hN|lD5{R*_<CU
z2dAV||2F4YNpzJ@+jjE!@$n1Xo6(+`g8a6(D9G7IC?G#8H_pi{epWdxNCG3px`-H7
zATx>(s>mtvDthQk%hz-VG%ziB%nS^c3b)0x#3Ht3;bjXqSa@9>9d8mBeZ!z^`4uF8
z1>ii6!d12)2$9qx+{xq*tMjP79z)4Bl-r|X)uUH9;2@j#*FCpg3JZkmPPk6{_pSGm
zjr4Md{Iov+e8g@lFka&~tBT9?5z}o{_tzh-EGrh-!$Ks@p}}IG<|I-#xy@3(&Oakm
zzyf61@_<<vY@nPUg4AA%ramt(S=^+72;+m#6~i6w0yfOJ{90?U(}m$p4gOjtO$`=x
zIhMMB2*^*bBW+5E^~F5O!a<(HKRpEVgc-*AD7UN>Z+M@^-)i{IYqV5qHel;ueg4U1
z`U!5QsX;CB2c*yox|jSef9zo+Ne=(y;QoxMAS+{{lb!Quqmh$}<^|4Ux?EeL{gbCF
zQ}&%6<d><>KzwyI_cni~A=^rw$u{{QB&qU*qZI=rc>ABJ!J7!Xmmt5{BFytFKkgzz
zJ+=K8>HO)TvVCxs^k88WH!~-JZ2hvLlvb%1H}_?ZFG8MxX6Oz#gXIH@*bYq?QyM&0
zn<8nt3$B7dMFIkl5`?y`bOtu1(faAmL&#y3<Hz+lgq>|l@+?wkBUoBQt9lcz7b5m)
z0d@T3t{zqjJ_OfXhf(srBnfbE#5i2#sQ>n(gfHJUihl$ne*nK*f@PZawHW37Yjtyl
zYg$ilN;BJDgOSXfPINS<9|!;sAdhGy@V<XEF7?hwttv(CQw@;DxH!pB28P=k4Cg*7
zVB$<-AL?f)yMnAg#wP?-4oGZ>Ju-!ZUGsh<{Ya#RCLYMl2CrO@-bdtZw>u0*rYVTS
zWYJVNiK9~I%^q`E=*UBcJBQd}e*LJAIOfe*cy_Z#N*v{NW%i5Yyev}V_o>h$lY$xe
z7lvg5lnEWqNvM|NVA9AKhF{Xg;jND(a}ok@DtQTwTn><$a*<xng@$iI&-xdM36LXu
z1srqZ!?DQ#)D|+D)z*fLY#a%%Pm^9Lg#N84;vGxVG70ME|DcztMu#7o&S%n5{-KN_
zON8@%sqZB>iQB^wqtt+R!g4EI-4+B%cs=94`IvTZep(3DJ?PAP0B>7I^0cUX!EW~J
zAW$D=K{0PF>dmKGl|@@D8ocA;fJ8ste>#vvgX6f`2T%IsJP%-0N7owmZ7{Z?eK0rK
zFkvicS@_`4q|0bTg<&fL2!IFLNOXlE$nb)B64P|!4;pO|T##u0?shiBZoW+79hW~&
zvj_}R&(JiDoBE{^Jm3D7?@UF|!7-?j<Cif1Iw3tEg0Fw`c*c}TBiv~aLqQ8r8err9
zFVfyJEXuC!`v!@jLAsGfX$fhhK}1lbOG3ICkdzp@OAsUmk*=Y;OL}MlX{5VF!1owD
z&+ELd>weyCyWi*i;s-Y8m^JHIYhQc(_utAF?R+VC9-VPTZpWDn30C!iMrQ}{Y2|&h
zNV#2;X2oO$Iyf#?+~1*lOj*G%B7>8kN<dYy*;(c+-+Tm4I-$?jk8>Lx)M#Hxnsv=Q
z<wQrJ<P+943H`x>H-X=4K~GE=tvL2gVLV_vjIrtwFW5Q$jnqp*G&~VrQA@csSMP~F
zm3CSTPn6K?)qc`ndQ!7e_#9Ti!R(i~Awz4FT&lf7u0Xr=ZK(i%!l~+6Fxt;2fiDPO
z048a-iypUP(n<zJFvmoBLP%*H9=&25!1c<>j|YKNsj3@Oyirt0Gk#yQ<j4298_xTR
zGBYUWcD*d4dq)K)J&no9+Z*;uG!GO5#`e6n#n+;yO)qRtXr7GLT}G0nwGDlpx;^c5
z6q3pT6Rk>%thVdnqm|j_S-zy)e&vMnawbC<Xvi^ZYTHPtc%u3Rp92@lDf6QNG5E4L
z8OOkOefx7lup}8fNwMYW1Zdm63aU_#>R>bo#%@4vbW~<m{vOmvZq4H+U29o3ejY3F
z0FaoE1yJH)ft(d--p7KX>e1!{Xu@%=g_5Fe=&g0|uqc*+H@`eZID*?H_12V40vm0Y
zqZ2PZk8_i{alDd3aMhGDt2~HaFzHejRP|tTo>Qim-Q$;R0cgZ+NnLbvThM!#dIgI*
zO__0xj^_A{4BkusMScJl@+}!I08wooh_Voa9pyGRmwyW!T7|LW0!G4ZIOl7xVZb&4
zhiwe{15~{59BDn*P@n<d#%w_<OP=}DE_4V0Yth7zZjeh<Lsl61t2`ffX)H-MpyS_b
zkjDX3l-_vHNO}10p<v&CMW>pDrn2s+*zKaWZ@cYCf>7(3N&DbkGFLOx03gs8wMQG4
zFW3bqO9L@FZ0M0cn0<PtAj)<fXn>IkfBB8sl$DU-RLE@8ow<Np8Dd)vS&%SDhi`xI
ztkR{4gRDR-o_?FBD|><G%l2?=Pzv}tf?3mDKl~Z9#V<~pfJ<z3p2z*-z6{v}B|!-x
zVnS@hpxrZ(8$Lk7hZ)8ZoD1o=04q}y<B>nmI02U1Ls(?|1RfSAl7zMjiERqUE3!hn
zlttL%fbm*Hz{uFZf+`8N8)Y1BYx1ZNrHgcoY^m~kst7`6t0Abi46z2$n4<Jgs@v^u
zE5y24WRG$bd!~vQDp^V4f`UjuUb|4(HgYW#B=$`$AaQlFZy6u$e1?;4*JzajUqZrO
z?S#*&9xce|5`<V=3XP|g=ZGEaz+}X<^6D)tW&m$?7Z~J`rLh=qpjPA-18%V&3p#3>
z)LD2Gj7ZH73{VeVT0zK^^IG!HvptFRKJLRgmgmiqi#EK&LnN^7zt5Z=JDTg=fCfV(
zAVjKGQd%J)y-QG5XT0zcFlcuRcC(4!y#alYMKj^vV=7D~W7ET`27VP@^$ssw(w#!{
z5Jma2uGg~MuW~UJSoMIVj$?h=cF$Ez*BLKWn9Ys1zyXQX3cw{0pRWT17*>NTEGrYm
zyl*gE%38~425pdXo_69dBzFeO@dn5%i<EP^GHpC!>dpesj#<0Q@0yKO10Erorkjc1
zw8GE7t95`=*U@dug0jaI1^4KKJ4HTN>zA9+XNSc_abP<tl8sq0O(FehNDVj8WlNtp
zaYAJl-F#F39ToHe=yrr@|7LiJ^aO7ffEb}}^N%$+sp0QvYu5tr<U>4v6OTK&BfW91
zM)2p-SJ_<QLD}kDb5p>Cv)XDE<8LS0#2N1#sQ7`O^QP6!uD1wGr~amgz{<;$Uj87h
zQ5cq{<2>_u{;yu9p4So9DG!tABB)lN#v36?FQ=q$RSuIJh{2whk6xe&V>kYr6tK)Z
zs?@Cbk-D1iBXsi5K=MkTH{<A#SA6hxWjo6a&MHK;GI={`n$wc4M0l`*g;7o?Bsc}c
zU}CtAp^#wwAQyz;M@XHHvm95f(GP|g6ebqAgh6<ltik}9!F$Qjv5V>%g~G9&x;Quz
z2)#V<mZ!o4!0f=cMHhX@eAfM}(Fr(JOUZ}aFF(@03(4f}lDeQ91C!KF4CvvHs!yJf
zZSS+|AFWa0y$s4psB{!GjGVOeSAWf#P(OT0ZPX~B|H52Z{1PXxCYV6oZTJz&4XImD
zmZ?%h9xf*#5DLE5d{U*XDPs9lvJZ&9PE^!GA(}>lCV-KT#e&G}2|5l28P0qA(Qgas
zW|)Rne{Kud4zW-K`X<c9UK{ybq_H?QFymcKPO4|HIQ?#EIK#E=JR>lXv^AG)8*`F7
zB1l{+*;<b5_2dOZAX-p`SJkr_y8+E7Fg-d$Y!W<HuDK_-b{}FMkc%WSMZek&O|vq@
zl3uP-+=|3gz<&|bE>$U9nMt#h`25`q)q|snS>zigg<i(SN*8Bmy@F1XXaNaf6&hkR
z%HjDEpabMpkQ8WHCZ4NZ05N|G${97OtA3wU(yWtp4n6=#o$%4f5blfxxeG}5DfQ{H
zvL(-`6kTf_zGRn_haLt(A&C~7kHC{0J}ud4f0M$od1;C#@oChSkUp;&jkNzwrOCA(
zJDn5{n4`~T8@iN|$1gzX1P-{ZyVCUeg)>cakWca=CD`B0de0hghIjFpJ-k)th41^A
zq}MlAM=vTC@`z2~`EJJ>m;y8EUUg~39x9#%@&!LWZ0=!K2fB?>G8ub+p-V(@?Mu9f
zSH!#8W_Kl{KE-S)q?H!K^~Nm%Lbhm29^y5R0iat!i6&-jmF~cAkC#@J%=p8C4ZVN1
zm%_l2885^~)Ucnv-4oTAe`Di~%%2hCid};t(^b^BYC>X*j%tUfl#cnDq9f2F5p-W4
zXp8vloapRoL3NQ6Sgb-crfnm!q4wP%gsNL=I;Bw$A(+SNl-Yx!))P-tz6lkf<$V7U
zg-|~l_I`VZ@$?X9`psqGuS3M4!oRv)qE*(0IT=yZ*nsBajMn;9YI2|`WC=PW$6yu}
zA&YT^LG8hkAWXe~n3$0A*%apS_%bq=G}uKA&)=(xxGV@&i0xGTjpVC(Tcz_T1p+r?
z5KA>JFzdE``!A%MbAKF$tkhRV(117A*u<5({u=Toly&=b3D0JkO(FN^_j!al)*z>~
ze!?>%^$DLxmL?`i%&r=D-U$pJq?z(~iv^5yDUkk)!J--vB=K*`<EmAxHo0>$wMqG$
z6jH@JVCrjHU<Aml%s)NAPUU@7>w|^`>ZgpG+s6~LlfZ#BLnHJk_7}~V1(nM?8=yVS
zp9I~2jQ}wEedC(&?3@kWYA)}xqkdwb=SCmI`H}o+z*%iRfHLO6Vd8yP#ILzqLt^i0
zGQ@sw3L4h&at$wmL)_yqxWB_EW}nId8}jJu{%*ST$koC&>WIVKpSOTh%)=?fCT=J2
zEEhgG`*op6*+<_Ej*dSP8@&iSF9$!8)1&0lu+3VAI?f`of&z~)I>=uDIFx`lo7(a7
zXO2Y|jKFD@L3pXA#|AhNf;;0$j0#aa+6ZGR*+u{y5$XKAUx>`VU1^KFltbw{>`zQS
zX6fvKhglmj->UJy6ZDG-@yZ>s_sd?58xpqkcelW6kN_f(eqpwB(jW6jJyM(j9QrGc
zq4!$efb8fLSWE8`*C{4jU6WRsX*Tv@bK{u07zo2F;v~@ePyrw5{rgPPCe#;SKi0ws
zwy%n(^8~>GOnMLg02+si-V$l8-k=9^=OpT*<9=+p?}`Wnav(oCY4|M_IZP<ta+yKI
z&VurgKqcPIk_nYC<$pA(C)PT1Sq2qjQYa12fpp+wEsm7IWb4m*Y#AQB?FEG%4dE1q
zay>Q==Y&id50IPbo&kAaQi=Mj{q2?(ZSUVdBeaYPHt>y_{etqeR2?U{_cgBz&dc7|
zo$cIE+$FLQ{6dM>f=<;EbF1Zb=ZD%er2JL_D?nZi_`Scb;L1PjBzfKv@&CZtxqR1M
zqQ!l2gsaj7mV0tgc7C*e<Ks=ewhh4E5`7EFv|@}~m!h8I=_3KwcBX66Q^83_YibC4
z8znxPG1w~(1R{XeNhN%JUgO@u`e86cNF3uDheNP^%t>}K;lOe2)oeh=Z6LE}C;M*4
zfAB%-e+$vjQTvTZzg5*A(yk$<mIIP+SqTZdp*-o{`NfC#sQE3&th;~%U|fV^A1YNw
zY786zZK;8b>TkCmiZ459iQnwAORO77k*sFI{2YFip}JC+*ut7_OWoj5rFta*lOSv1
zMwV<pjsKA7LD&Jy5A4^UFjcC^LUUF_$7uPfi7ES8FPz?uzVDQ@74$H>C20{AC(aDA
z3|feLI2C>mls+1}Yyvp`W<V?Er3_6vb;kZ)Nf&)P!tr6Re&^}#T}+$4Ut$k|j1A5z
zemz>4JnA#tYYlLI&yeAGwtWP^BDa5z19|=`n?=7?zfRz`Hc}(;_xGYv*82N{T$|)g
zR02AYH5mfgiF`~rJ+IY#K-j98y9cdJzsWPd;2;6Zp8tha{st=zmxVetYxszz-FUi)
zBiZK1vDlXlA3_3k)<c1&|Llk2Z!5YK?m-1`yT_b3*Tl8fp_i<BMqgu)yh$I6t-{~<
zXEJX05_O_(6f7t@Dzb>D@T9JCzV&n+6cl$>2XZPOcKraz2-{&3iYe3RupK2E>|JPP
z9jT<1`Q97Ek%9e4y%Oza{pcq7S@W0LWmAe(x|^;M9>A$z^Vu9t64`XwOxcQ{Sv2tJ
zm3$@z`1YES#YT?<uPVt8v0u9n^boczOR8qmU;x&w&#VT?fox>eX*Ggk_*QiTs%DUY
zD+Wq$(=C|3*6|0963@mu?)(TZEuRbOOf{cD$#R+3mAm@9_ox%{atv*4RkQe@TB~su
z=<JvIOsXuAR)%|zaA?ZRK!|tbFPUlz<XZZ+i2<ENgMzmgVa@2^Y&*h}KPM=P_;oFj
zM$qBGz`fE*AqL)CCc!<&^8=1(>3DOp<jg}quKg$9%YszryM0`heaOawf#VvxC}bH5
zZ;Sd$R1J~tME9LINn^U~(!uzm{$NT3s6L%y;<dGUbwAPFKWGh{Kf5M9x0zJm^xQQq
zpMS<p=#(F;Ppb2^eAz)L_<a}$17=G_)H{ydA3rcckfX9T2#=qU10mN8va!gmnmzW_
z9&$DGG|VPuIl+8t9SIpRYzJ(toMJtABX_wAwxrP`vAnFOcfh7H!)9-9qb&)oTZa4I
ztpS;34~$02-H^|K%7%Uzn7KWV2Z|<;@p&d)x|Zm@{qtHYI|9JfkRsDGO0XsY+8NM%
z17-An0Vj)Dh<zWXRYR}fVK><!?%ZYot<dI|t95c?AZQYO@Vu99dvk3Gm6~51)Lc0$
zD*V!`7sMK}J!<U`r1}94Ec>Dj=wepp*qE{y$ma+(Z)3?t?erOtK+9BpL-nfu1b{)B
z)qM&HKpf4Q<`1>#LQeW@qaAki?ejLEaL_7!Hh($_p*avHC0Uf9bEK!`Q@7;5gSaqM
z%a#Z6bDpuJJz|pC+)g$4J|R`fN_08Nt3fVe*oTKP{t&0S*K~BQWAtIH-HdTnw9y__
znk4p8xLnZ`sw);pQ7MA}efqufsdAvu;@N%4Fv^r-8#QUq!2Lwc@fg=)aTj`R)?w>^
zktl!V>8e9<*IL{~XYCqVmSRZ+_;bGwCvOMZK-Zmr)m>IcDY@mZ8dSUh_-*Ga#51L1
z+%}8-|MVn_y;gRyM2UV8wmbhT6bzmYa_w;IzJk68BXt#<JAOUJ+YR)uzx$y#ZJI5y
zld!4FP3j!^s2rx@OqyGVhh;(Vbb+ST8)S$7&&ecb3aHa`T^ASHfvSumYv~Jz%1yc+
zCCD|7Ph)HBf1M|QVYcTK+=;y&9+@#jx+~7ib@jH7Yt=}fM@_4Y&yluE)jDZR`gnnq
zd-d5UIq=tL_)qt4{}f|@mM(faEY{L>B6z?!d|bV`9V=;G@el!g0v_CeolnF^IGG$F
z9`lN{UBrX2a*))A@z^UP`UTLkr(XhI*`9d1w-+3Ljq?#n48g4owU1(opH?|2oPTKl
z$-xh@VnB;Y5DDz<g@)PIhO1{rl1Lz*TNA_`oSr{XsZX|#B3PH8_(uCiqdEf(GQ16U
z2&zDDw&F$r{i?8jPZM@rpF~5WB}fzTN#c7kVmQXunYx$n(D3T1q9fOX*I_Rr>nRh^
z@qL7Jfr&$ARJ!2<@1ywvgK2o_fP1EF^rX3-Jc@<S5pG7y6Fh@Bo(g>U<q5^L%7}+g
zF^e+Ayglh^+owV*>d_^z+vQb{sJsY2z8Gq@v}gCgYKQkJxyd4^G~qqVr5Vd#WMV9(
z85{>4tj33}gZRGfmsp<Cc^{NG20$H_zv>H4S7fgy&@eCIY!&y!PN0^>P*n<d?sqG%
zqhzRw0R737!hUXM88jNF#BoAV{F7VADnv|rolj{pr<i7ja&curIdPx+i0$TC7@eLq
zC@n?2A|G?=l83+1#j|d*PaW@efe(4+1pw!m?nf7Rua*|~xl^}~rS1_PX`s6kGh<Z8
zkxr<>miPXiK({ipD!};@?gJXZk3w|k)kj+7e(NnWjUIYfxW*Xms&H;TqoE#?_1zxj
z3p#Nhl}*n<m>pUTt19S;N)xKT`n=uo>vxsQj>Th7dQsZ}UT`@rB$eywrQkX&cOc-<
z;CB{&%#>NO;T4DS(!)db7~*+`3)DGueq^u^U~yi?2SYk4el~1%JMwq*E-qP!BOKce
zC~T#?3M!P8qN}!xsT``-L`pbOHWO${8@>B+oT*@o-DpW$;?=-IH}Cj9?+O)Vp$^Nw
z#+){{e_6{Vr^9#mhFGIWMOxB_4_WaVX6GQBV*3s`(PtmCfs1ZqIQE4Hq$bxbU%1-J
zn7ag%@~2Ev(RBjDcj3$*G%le_{@ylTbA|6iteYDIdzc;f8QQAiqO)34Vn%ovnSu-)
zRh_6HhAm#@u(O7(_JH12JLbogjl*hp#F-7~c0pvcJjmx7K%6Fz9K|sTfqanh!WrB0
zlc{E<kbs|)=yFLO=@Gmwn^NZOk9+7V*yYhX>TyJodBi1=t_HAF{v5jbO^}R??GUYR
zQd@a$YP(wt*#ZD+msaQdP7;Kxm+BC+=*)KbQuz5|1_>$<?&uG*U^}W=Ih;~YU1Ef0
z6!BUe!WFXKilEs_^{IcAs+_1zwlP}$9#}}8ING2sLW^BqZ%dOIgmY$NHnNN}370U5
zQ7FW-^^+6H7$?*$FrExt`inDjodt{dg&~eFQVvGH@>>X&j)l-}bM420oia6=pxuDm
zzur&>%g(Y1FwjpKqG+gdye`o|(aCevS{gvsMFo777Oqm-hYcl-bu!-13IQcUc+}x*
zn$}p|je9>3dArr*UuCmY{-2hsW1;VH_ebh8Xzz`boUt20FR;CwUvRS7Cy<^3;Yfob
z7#z6wxka%G#F~vE%*LP6bUrsp@4YQyUk>0vBy>Da-TYjhFuKQ=w_(my&ZHUB#Sm_N
zC1%$_fDFyjqE~D&CyuQAF)y#U4Us?};y&|P78(mI6m&(%e)kJLM^^fSy^;IOe{c8w
zys}g`v7X`$r5I>_iK5W>BB#tds}R{D*Zur)51G=o3xL0Vh(;!p_qC<wPG&_rv&FMk
z07-?tsE^x=YLfxH9qRR?(FiyCbmz|5P5}A$hp?{3`s6fUuqs&vLG>I9hHnK*@adCH
z6_~DxK%-S{g2}9wmQ>)W0L|=UaL#`)-4WWWLE_G^qP}0)!!<yUvyUUxVLllzMFtL>
z1Rw3g4cq-KQa6jRG-0v4>cZGX+8eyuGI`N!B7Y+S1%RAWVJ;quH#2|dV#R9-4zj!G
z#CVD=84WLQ4|y}MhRF<1YJklJnWh9dS0~+IrzUTB|0~TDOh#I4z5V9(liP>v-Y7$l
z)c_e)DY?q4^iMpoZKCH&B7yg%IodO2HXH!3!{`iY%W|eejXg*5C+l)KzHEB;ipS4;
z{q0!3IR;0k9P54~xs&y1XBGBa;ii4BOPNcBe>m>oc*;AV^N_5_p4m~N=`jLjWJ$2>
zn4~g<VI_4Yy*9QKD8FX>VHRMg9OtIm!kMRmBI{^JSkb_zk8!1Wny^v(avUk3qnp5@
z;sY=)f4VA7eB<w_%X{HKQeO*P(^llQ6?)CQPcP%j<ZuQ$VZ&}W&21t*?wVBQQ9clq
zvBmw%!Pm~x^-2|rNuYY-;>hrNA9}nevMDQ>rjG<fNNBhWU2|4&SY&y$9j9C6tB~I(
zc1>5QPuUvivCpJyNd19U<Y=Nt@6>*}QG@w>;1x(?08D5Xzi+vIs>8^w8kcO0KC+I)
z;ezS$X8t{8R@OoVZU{MX?-aRi|LN^xk$hTEFtZpw9p8^?g7f%u*Y@jqYJeL*1Mm(s
z^NoKx=DPy8rM~Z$!YQ5Y;yxcy@w;Y?<tGA$H%u+RF11Ofb1lOnk>e_k5do5)I~AXI
zJ=4fFx^D#k!%Jk>7s1O%nyC}z)Ib)%DKlX)QTk#nSh+fu5l0(ca7^)4v0H*LEvvkK
z`_EVnB*5%FK;mS>*ZCrs$`fmxV1~iy?Jy*56c5CXpFKb{oROU!iovHah>1Ts;;n|_
zI8i!24`a#Y@>OIcmg!}kK|_o=%8(|pr{FDq0!J2Bj%Y@(pA1Tk06HkB=B)%Z8qF^q
z;59#R>XoYWdhiE-weltU2Q*rNeb2>y(BpMPYRr$X5GwP>@nFHO!8Yd;ya#_)o&N!N
z|LOPe4XA0_cxvqj1fR)~Ho`z(oFQ{emFN`gB-8ucgOiEvp;+nYZ%rO>Q#=hpu4%~Z
zi$)f^)wP}k;il5wX?m-T@r7h_bh2PZAx9QCNiZWXS_X>{LEb$8)rYmaVy{S+oNG=U
z1ABBNc#W^J&JtY{JQdwo_p$ylQ1$MfdSlkG;g^2O!OWke5FcP%g*_pr^r_iI@sCw0
z-L!qGI{20>?7>Bku|F5#kq`_w?NDm*ASvO4NjT>OUbsSIRd_J~*15C)qOWx_t;N&6
zyFU(5VR&ttpZYo_3oYH8>={t>iE^n>#}>);(|`dmdH92lWyM_cv{yN)eYAd=hmwto
zRYA!zZ>?KQG=LYI<1<`^rZT1%52GALlX68asjq^%Co9yD%80!0ob5$W^hx-r!N--(
z@b@E98>?uiarqYVK=c)MCVr0%Ukqe$D7;B6;w<Ccjd9^T@QCk@--QU}E`)!oJOty3
zWaJm@XQ``X$9+-TOG=m_1f?9$xZb#^aeo5;eqYiOmqWxf+W{*u!S<~&_-;s3MWme<
zJgbWzfKXQga?`6?HBmOz=eWfOxpxqra_MSUq2r}Bmi-r@f5}gGOpQBN^YQ;KuD;RP
z&Jd6sd6WK8ULLTA11zP(aE0%#A)9*gXnJ)l!q_g~g)F@ge5gML^8}Y@>V3VvN)1qd
z1=x6}1uj3{<YPILx`q8CKs4n1R42k{Xa9_QIME*{xw>?3ic3Mz7JO6tm*koV4J>G7
zxs#&nm%JXy9kleftC8FlN&mD3N3nlW(NC{n<P(}m84?EVtVONO#S3~4ITP@|YC{23
zNkD3aq5=Jvc`Qyaz!l3lB_Pb1)0h*o_X}Y!jR0IVp^R~)YhJ^GjOT>>F+JY&Q8BK5
zBOSwkx6ap9WP}h_L&h+IbAf|Q(K87J^qsBgErf)sbtW^_0}n{QwA6}PRJ%o$vI2^t
z<u4&rX`8VsSYEAQpacB{@xjtkVkowKetQ#h)TJI#S<tE2OH+v8+x?+y8B#zDS@9Qw
z<a}mZtw5CDXX$90LYQsX^|giI1~q_ujNdPpma>ylhe#576J&cVqvWrSr>=ekr)GJ&
zq(s8d$3s7*%=U<)4*@)^Z;lx-2umXhaCud<G!YQBZ8BsPSp6guVT<|sS-@rqpqAMy
zcv=pMn*pLKCeb_Ehr3yfJs{8#!lxJ_bic%)2D?(VymjFN(A-So15!ESe%pbrDf|9o
zHRgaNz>ZI|vmJ7BB)<N_g5GjQ9vK`9WSe?<kCg~JF4+1UyIlc#4EAzer9dZu#yQz^
z!ft4m-$**r`Rdo>T}7=5jX^R}_F?<D`hZ{)jnk=VKz4|=l0t%R0G<G=oZ*Rp))M^X
z_W%?#h4c39!y3=7N)#Zoxt|3YJN>qUxESN74jx+$u80u>D2NoCJ{u`QTf!>{b!HIy
zl2A22WBEmaL@*EtFj7w)VT!@l5$4footAYBCX>CX2!DqwjoJ38ktgqhUksWEThveK
zF_t1^Lu|}%L5*;o*yFh|Jgml#nqOHHnY^7OHocnqLVEZE{QuYm;Hr|<4?f_lNE+~&
z;$9qNY}*w9wmjtkr9+07H^YMf8|cmVlFUY2t+%N9({4P(-fssADOGns1LP@F5=k#!
zzbqr)?J%A)P#jqsC)u*T9Aj1jLS8FWfGu_h@urH|9gS{y2WV63(V9y#G+JJNCY}nj
z`l5jS!tPz~w{8_TD}FJp1V2{I{s(_tl(X;|yi~zZ4ZMbCpYZ_F72qm_%#w^zxzD23
z4rjaP<t>VVVCT4eAGO{*xpDh3xzARQX&s~lX0-dW!JI}++Zc#qo7SKse3Y<Eldnp8
zSn7=7MtTz{_j!{_Ine*jRa)ssRKs*$^A5RI63~qm1#t)br(hA#v)5~qa^(p)oRG@$
zfIrV_Dc$wdt8*5<%>_^|dnQ_P+cQ$!G5M+Zqa6sBY2Bh4juknXUMyhAU|~$3GeQ%n
zVqv$5s#NpzPe1-AEx_F}*lp-kuHbTX=R(BK#OzcY4p7d5KhMx-ql&QkEVB$2$og+l
zq|rv@(S&)kJ*glC&RyW$`6*6_z%7huXOxpBEkml7mhT&X)&*_9ZJ0zvGr%jrK0(Pr
z$G(Xdv6ZTg$J{-@6-bSq{I=6-*o3Hp{R_NQ?@4|5$i4^9U|4)ME6DiDFc=^`Ixsa~
zs;W4xe=Bc7iCr%<e}lPE)MG3ZHk<45^N|#d0yb-@TX|9b5BAC-T|ga@Ncx4R_tQD-
z%Yg_Ff2gb2s~+1he?ThB#<ESpCXkGGfs?7&DV!AV7na1KkFBtSf5nZEmF<KhF3VZ%
zPr^-lLI9$%1*sHcA#9S+3b*5<pEHg)^Wwz=rm5d!Mo%<G-klNGOac-R4GC=t8ELy6
zx(~6@Hs6?(YG#yc6U^lCtRoH6mfMj;Q=B7Mse#PX5PssUexy8+@Pt2Gz7(moBu#3i
zqwbSA0UogauZF)&1zp;1$$tL0#Yptz=+VNqpwK3LaeN6ppd)uYJzd>Z+HhNV^|%e-
zEG7tpJL{%wcaOF8S4U&2jsTJN10e5zEEZFp)*o4=4KW_SHx(c``Yi~im;Gfm)Gk2^
zD$pM6u3QUk_%X_v&#PzhAnC<|^gC#xj$i>~xC0;je7M7v)~2un-ZwXmdq<pN^a|u2
zZB;nflhD{wYn_ALLk5Yz+;&%>p>x-rlGe^^`>L|}P+cQk)hXOUhrk{4)*2^MfhI=t
z)}vVEl6F9cGHuZr<SC3PE-C|*)3RAN*DHC=-apQ2KX%6}%v4M-=rd`eep|_mu4fLo
zG`Ie*W@)k_8oS<OdIujUGFU8Yrr)R&$@Es2vHzh+ox4{zG%Mt#f{G@AOL5LSmeR$=
zu;(E?>a_(@u;^(;G2!R12+?-*w^TIo8)fFF-jf6E<erYkK;XfCAx1hx!!{Wul2KQ`
zAWXUCj=KX?!C6!3*aWnhW?o9+9vMWx`5udh-GbMW+!QoJ#jN{p>@7xY+|@mj8Yt@~
z1T+?~;L)QuP6~F-iw>y(Vy_Z2WECnyCQ6QEIBVb|S@mEZSm24q_$T%J4T>d1H70;R
zeWM(PFXW^Lmu;=&#`u@5QXh0_E)n?K0*F;yXnN)0Pi!qO7wFQ6Ws{yB5)aFHunTP-
zjV^Buw`X~}`LRz)0(Bg~^YO^&)g6X9nIB^KZ9q@#yvtciExqw{QRfSdHc*GhFo_Mk
zwzt!Qj2$>_gR_XN7aRTxlBy>*<sLTt*1-o_3jkxI;y}8f&$LbOcm7R|Wc<|JSbO?2
z&`0p+!XW{(cNjYP*gIYP6~R-w;;KMOub-vV+$;&Dz3x;)Sa#}hVQ`SmvaUOyT9z_(
zdR@gIZJ0((cXje0Ttmi;GaI11gcYTL8N@q?&)$AxvM^5m2NSYBiFjI)>B%yIsa~Iu
z45H(KU6aR20HS5<Mns(=s>A~0UyY0EvwaDC7f{YG1P#n7=PC~m$$y9Z8-b^;f|*DQ
zd!)FLT|PNkfta`rNZ9$r?Fs}bGq2*j?45j+Qm6aXunGU;2{chcMn)uBySr;LZ1(d-
zdWVRjM=lFNnzSb1Sd%8H=;Lw;QysVYP<OVLlOGi^PZD+^kAulFgVO(j9TxSj=-!5*
zkn#&P6cYfl7#*PIH2Xqce?EUb%F(LnqfDdBSs~OV7|=tXY3qCSJ+C{Zpag;=_RKoU
z7SJ4j?tOaI9)haZ(ZF3BL%4|@m}jS5lQ2{e#y4XqpTY?bnN)T-kRqM8YEGyl?=PkC
z*#1EG?;9b-7f>*S5YrUDcr(YRU8}q2csSX2q`xfGd=FtP=g)&b-i8&m8x#n>9D+D_
zi=cFTyjh?IJex-!>r&hGL?-CEOScFbvQCSq920kw5ECL?J{DF)WVw6j*t4VS0zU>m
zu1#6TJ>0cG@?~51qiY`X781z77X1{T;2%t9j4d#WOPPx+8^HAuRVbXB+tHq~rcuuj
zh|nKITAfcco*1ql6zUAU1cbA3&j11{^HxlUZ@%-PIGyHj<_myPrm5Zk5EHn@CYTW(
zJo|-<3p{S|xT5?S2Ex$+P7Ju9xRU^tGRnmyYVU*2cb%lfen2(;jY0}Jm&K*|@elIh
z&*kia3f3@EUp&h<7(rOP@QHs}^0<<Zz<eKgd&v$f8|Ysr<q#qSS%??EvMn!w-;#p{
zTq6TSHyRR70uyhxW1I(`sLKecQV-TUvH64b%YocA?=Q!}na{3p3qQPH7`0(aAETs!
z0K3X|#oA4cvQE|XGEZF?^kGOO5kE7CJR{l!H99Kry?`sf`vCgznzSVQJ80BDFj00r
z5vQ|fG?I;Z1{Hv~DWQKQtf&B1&~OwdBo6u!g_aW|VpsxvX*s&7Zxt?R4-4TsFs=)+
z?aIHeQ4wV$O#}xX&S+zi1peh3L@CsmtXS**3T>zhSp;<4>Wih*-FNok15jO5o}dfL
zrL?B<<_<0qVVl%etl>?10o|Ov<d9{=0ra&3RZw{EC|-l;hbfQYksfr@zb<Q7P2fza
zqtq^#%geiHqd#I8-J3+YlxVA#HCBSXc+Tjf`*Q=aSo);uC7&6L51F-*g?%AR(jy&*
zC8~{9qd!BIz}})J7VYnHlt>wxw2h8w0`f^hMGL`C4~e+P7^Up3Hv?yDx&G>;;;dFF
z)XLGai9jej5%$N<ILeI|NCf=3bcM3wbiz$Ts|Ew`(#&v6sy#CRlp`!L|8w*=cV6An
zQ8qx;`}dV+f`}$Rg!D+{>)cQNWQ^t*2a30x&!3=V-lK}+xjBxUKa8t{`*{j?wt1xi
z>0z|^5~AF31XX!`(s5-QW*v|Fy0FDL=1!H$<CTS1h<W!2uB9b^9*{r<sgRfJa0k~v
zFu0+jWsO$otnGvZb&D?Eog0)boaFw%_HoMaq-F5M;oCV|c^S$hK}i)BmUr~hZU`n<
z2UkZHzB_K@L4)+zESiFz5EhxHiP8HW$QNmgvh%+)MgRm?%grVrAr%#gHT%k&e{GKe
z5v`P77CbaG&I!+!KgnqkOdKS=UQ>u)`bb;olMr=!>xrDdY6-yXvQ#4Nb>!*CP2!_%
z$i<5)y8K~{u~Xg;Drt@cVSX+75fL^huTcT7ga)@=vi1#VlssoB=-$XQ5pnbbJ~rTD
z>sQ#4_&cvrgt5mj+BVH4_r}PRzN#*o3ce)=uG%xWkC-%l9U%%VH#L>7b8-lqqvDDu
zkg6~<w1x8`+4^xTt7Ka}VYP~D10^6wJq#?er*j-XiG7O)7(r!o#%>g2s}<7Y%L%3?
zC4U)8y?1bC_vLu>A182cMmy_=A&j1jfvHF2I18_$Y>Y>>_1j>wzj5Za9tKC-7-`)B
zf@3&@*xVPlDm&F3J0YtwuHf*>(4(4hh=LcZS5dlx&?`f6k$JuOY+U(1cZ4;<jP}lZ
z#LSl^gTj|&(hc6~!*^EGPX(owWJN33@OQ~bJ<d>F(|CvpzIRsh!_ricju~Z>G(a+E
z#7YB^Iwi?29($@C+4`-`s-}w2+`mQl9ejSK^vegIXR{x1E!CE>H37MO7t>6TMAVmP
zyzm=~56_Qk9-q0qA#w|^X&dafK?nHJ>@$C$!i<fOPWuCzhpvZ96e_-6U{*VxcP|_T
zUwF5nX&xd;&b&fKNPKjk$#F7c;)(Q8CuUw-c1t;3v$xbf{e|YeE$@u_dFkbANJutv
zad(m^kap{K^!|(!804b1sn31%P^DQVnE%CcB<u(92eVj*L-@6sx{BNfYT`Mq>n~jP
z&16<}CUBcsL3zR>0`F}}*0F)$5(xUqZ;F4tT|jnJG-+2Iav`>bLQSj*5IGlPG>fv6
zXoweUU}0rfE8#NT#kGA2bn<&e=s*^Hlf-*m-M8bJ628qLpfTQsYGzhZ1CCD8%gRhG
zpkfmqfFU6*UG}-(fGL13+YD=Ho%kkv<m{~vi3GOC2nAmMjfGIDUFVaFvf6m1Z`D@+
z=JzQiHPmf}h2S63C@n#2Ie{80zOI{^caypiQ-I8^tv`O?w%zqwuOIF2CFR757IMHl
ziVCJu_Oy;OAj;&@po)j`^R5D{=VUg#)NF57wAD(2gc?1yZOH^k1;=}P?#}Zp|15PY
zBjvvr)@D1Ot0y>A!rjq3as%p|+lS}r?v8^uyC7_dXRsfINDtNZQ4f#xoCv2wq+@XC
zvf}JEp{oyTpqyzCPaV_U&pl%W#LU?FWojB?IhP!#u1@v8i+>5jNOZx%*FmP6uV<$T
z@2sCJX=B|^hlsztNxjTFV$03Ke4@|Qn$0ANtZ-O0qJrCCLbN+-+n?}s3HsePV`%Ah
zkk<^%A@AGB`cV7dROrC3^6%i)Qt1RGR3O}T3;Hz37Z+%C_}|eZY*%+&(*{T<q1(Ld
zt^6s>06=DioX*^ieX|Vd?u!{~3M9R3CoNSPmNpAc#%UA$EbxotzZ9B1rcP1zG#p>c
zs>^{a)1lk_g~<{DtVfamrO^k1Nmcy)Lp0+rV7ys1MBY4_T=3CmPNj+%_Nr2{Zad(<
zIfC((tOMkwf|e^`eE%*Da<7ir|7Mv4nZ2kv0eF)^oyHqsqg`Lf9H${bvf(IcM(k66
z83`h}UGa_OB>wFt$tKUgM~>RICCY0MKwWa2FV%GB={|TcV<gH7n8PedU6tqC1vb(^
zx_nk@%OV2EQm`p~hCHI4`c4}hWS3!g#T)7TSMX=basC7PJs<$Gay?yPtpfN4axxR;
z=&JF1OCcqNz%@L@4j_Zynic+5#;jWQDmdAH$8O{i&7zwxMBjmt5NSyP_sX75O^w)S
z_FNk9*zphsU}^>Q)NT%~s6LXq=_(O)vQ4*?-t|QEnB;*J02i`=hW`__qXF=ZKrfYd
zx~%BkhZBKRYPcq%&@dq;uICEcfC9=~f#~S+Z|hddcy}K=Jv~JAtZ$pGMhItuA^o4C
zbP=NF>e*2yj`Vw@Q|VtTV6F+&IN9O5HlGjyF3?Qv>1dYzim{WH7$2cSE`Z=on3T|$
zvwTs1^DTJ;nyyLM#Z&lq<)>$5<f{~`0BucS0QecM4FEa@u9lq_vYZwY=`mTvn`OvX
z|2$ck^Vg}&;I;}iSVchX3p78wu0pGMIGXD5XN99*=>OH78Ngb4UQT_|cVs*MDe!G#
zHG!B@bxc9%&t9GN|LD8?)62Qta(`U9ky+&+q4#mWe!gg9PE_=c@3ilK9K+QUB26Wb
zyTm48YBBOH{_DpnJ3y9<d3dgqd)VMb5A5Uf9$2uyUxJ<Y?}F$hk7q-GAQs3tt0btT
zhGMYJCrtK{YvCNxTU#cBgd(jYGD&0(X*!ew9F77pZr|N_{Vg57qoyj`P0Xf81KzZD
zFp<`Q!21l1R-e&v5EIb<K|?HoZe7)gRvIg)Ce+NvbyT!r0cWRKMG;4BQJ~iPbl(}V
z_=MPs0vbmnnQ-P`&mw)09fI2R?sbA+_v`1b@H5u^TkaLWSSxduLP{?V!)ce-Jf;u{
z*8HQ*NBQBb!3_fui=KX5wt;k;2l3qY+#YUrH1r{Wp$&&T+dd6xyRgInp$IIjV2&TQ
z+NTT0(zJKm7;F6#c<EYG)%WBJsP&psR6%<~6ckuP*j=eMh(Amg5v5_eySjr5HH%QR
z#o8zo-mZ#YdU!chC(Ai*^8%%k6o@Wx(cdVkl@uqao0RxT>3l%Ad@6?bqL3xUAE&Ok
zBq;+_jkoR2fdcBDIX;rSN<Ejld|%^os93P-#h#H(bxYSL7JPUjD8%`G+z9Xrx*vmr
z)caHRRVyIJ!fduxG{nOs7N@zj`~hwZD2UxdJ+ZyALb|oGP7RzufhQJ=V5g$HyP!F%
z>kRZ!sS`KOhJ-h1hgl2H^XI?pVdbI93#l1^svj2q3?h~XcF@aCDUUI6^cG`mBPKu}
zX~0y$aG0m$7qs_xSQo}_Q((Xi`aqgUw%ob}?LB;`cMPzfpC?4-UOloWe|w)Jop5wE
zVgG|%eg#b9AKS%Qz3*A~Wwm?|(cG6<b12Nl-Ql|jb;tAU4POHEo2&OL+0un!;OwXz
zb(Zji{n3cYv1G|Qc+VhdO3sKvY>?eQBcM_*(T%t%L0`c5Yd`(lMcB`)ol6kiSi0}z
zuo?eS<D9=S{u}<xJ9)2af7I;zJF~tZK}}KKJ2PL6Yv3=9r&({YU93srIZM$p>Xq~o
znY2dwKlV=)P>Oi0&LK(q#8pj|uKr7=cz<THuDlNRPb<f2F9&mn&}ZeELfG|f|5++U
zc-#8r*%s`UIQW{ZwI4(b#R)(LB~Y~H-dd9wr_-h8$v8Lo9BYw77Fv#0#j)mMV*w|P
zOZ)iuZ4&pgNoF#kx1XK4nNI$3d4Vey5$S{V@Dlr?@j1}qXZDVf)nc?DuKlB0R#8CF
zd0(W^P3=<R2iHDtab5{6;yB=^;_k7oB4?JNb}CDE6r^IYuKV(YQ8m}4Iu0HY8D=I6
zVU%a2?pD>a;`GV=kIe-bq+rdECtz6ssS9-fcx?~Y!k-3g%-2xAE{R@taB#rCJ)~uq
z;d~^rNsF3WQqkT4SPiHBYt)c-d<z*`pSB_=o~G0o3v063v7boOJ9gks60(|&3KA`$
z|M@*7dglPPdYZ=(uHXf9Z~7~L&KAbQD*Ac@mpWt7)}0473N4SrnK6(F`}RR)!YzK#
z-2JN1t9KWAsqjQTOIRT7iBeCUA9wx|Fp#eQ*vdVx`Kne#+3Fv7iyecVvJSlbY%-Ur
zxDQ)xUyac2?tRaFyRZ!?f8-SOHa(W?^)g8LXWBA2cCjykJ|dfRLb;Mh4nJ^74Y3>l
z=#TF13(rl>$|<sDMdUQMEnQXTCB(bSA-zRjxsX?6q-ms$Rqr$l4#vWp)3bLO?h|^)
zVNh`PUh4ktGm}ss=zAVR#ss2Kogkj=?*n5lSf)XuSufwH++)oG<MlhG-TbJA{R=gY
zsAEy+iVNj?*G2)>%hP_K>R;-tJK%Oyfw|?`Sa)@s8!yeT9krSvK<z2#Kwb5hsj+=7
zAj94QaSO^(f2jKy%QrzVNL+Oy`Cqra-%ZY<`4%FU{`cGB5oz<6-TX!k|Mou--6()f
zpZj9!g!y}G{l0in_*xBjWrRK;knRAhzyG40+!O4cz>lwL+|z0ssWkPUWi|v8B*lao
z@<SPpAL{jSw;#Wk%_Yl>!=8fK_;K>E&Nt?N5FdY*InBVJKdZv_(%RB%>eJz^g)J0I
zdZ!1AcHawo=Q1R`+^{$|X)r=osc6&S9C$+AoudyI2}o2c0Cxajh?`pFVNyR3JhJ;e
z_$F>Qpb^&z8vE+RHazI%y%MIDL)-+_i5|kmpMo9V`fPbMO(`3Y{(Ds-DuK{Xo|hPl
z^3PYpPRBXCK$l?#-&Pg2Pu-?IiTewD>kfLG=JsO~xp-A6K6;n;1nA2U>5#$@;ypV@
zZGIuj=FMF%p4N5+T#j~fwfC@&S7_5+&u-aWGW;=xG9~(dEh&V#;641B-aN<dor;sL
z_RcA@L{msiizeWJ@|~oU-#)F#=5o5s?E5C&QKVnZ;&~f3z-|!2rgEQgtomp?mq$mH
zEl5NIt_$4DK3yYh-K6Gk=RG+r4=m*`OC<R3RRfqbEu8d2<c7@209v)|`d2ZQPsKQv
zo_y9*bG?_9g{A=)Pa>_V0qS`WdSOd97O`i7?NTMZ+DM&J%dFRQkuw571FXWoN-U+{
zwl5EfU&uN!v?(~QJj4snOXtTFObgs!y5|-Ux0)>jh(Fy&;8F)AoBAJy0Wx~`+dJ!&
zxBsvz1#D8#jMI{*w*;-iNuPdr?&iNo@y=;->Um6s5e}4W(2D&3^lD&7y9fj~jU6+*
zq&r5|j%g|9-nml<ywa7MaF2z^S?#zNm7Aq~W{WN57;9;rAx`R_(P~TLdGe9g-d!C5
zFuM^;QzMLIh(KGS><umO&>>;Anv|aJ6A$HfurdCF9R2@?f%N!3wq%)2=-Winz0Fy*
za4X}De;kgxX0On*73jP~3?ASGNB>iwMvN{D81Za%;s4<yt|G1|CKNej_S9f;L?J-s
zk0(h~z6Vo!pK40+fy*r0UbMXm=JL;(jQA1HyX+h4Z2o~FQ6aWG!ax1r{0%S-6^<@+
zV3mLS#{Vxwso$YNn;#gB8x>Abb!NmzC)cztx&%4oG+kA}<y7`{%fWyBbII`Yhr@68
zpWVm3i?^=^^*?c&OZo`^`XZ;yKMhONt3C!W*!aIMx#JxruYc10?mf7bcOJ*O`D6xf
zaGf4#VvsPcoF2F`x=~eH$6n98)r~ZOaNFHI0T*~eZQ>m4n9S(6&Z|be>zpY2_Or)*
zP?DX}G7mZL9`h&tSH8DeVK+f12fnQ6Pu7&bs~QbU=u8t1opJrB9@xob&f4kB{H=LB
z6>DP|-`+4~;Bpdv897w?>CEUFXIMhOS3le0G-p4L+aeQyGp%1GAB$+-PM(R8|9%@q
zv}0o(-!3xcgT793MDfXheW&$$kPE-KxApefVChhDV`fX5&)9dup$)bEp^|IeS9R~N
zHJ!CT?byUineIs7|M{l4Gj8|I*ci#Mnd+Yb7vss5s+4-4e{wvlFZj^<;c_}|QHj>Z
zIbG$krRDsFiQ)as@|kv>-|4cKGlz>^hpFD9zqSTuSaCT=y;*X;5B9gleVa~@zi=z7
zRYD)y*Y_2?8yZdOnQ!8uB-L#)xnTmg=iTIGF{h4vN&KYq6QA&x=P{`7EoUk4nW*Al
z-h=Ebe1f9<JA~I3f#ilnVb?(n@>8-5M(j*BxN)+j;_>sI`UiO!zd7oQyQ_V9UX6ck
z80o!xPcoHU?Y{D%%K~v<jo3`gRG5ydr7NZgl-6b$XG%i4noAYAZ0KJ~qA+!y#nxr=
znkb4h*^eli7skRR8G#?8xW-M)Tfsv3Q<q-HCX~r}FK%~AX2np8DY-f}KNC5A*;DcR
z2s!T;TEbcjCO!?t&CL5I?!cFjg_G+Lg?X3677NG{)}Jf7?a(`wvBwYSDo%_b^*M?&
z?OQ~Wzrqij<BLyu#B@4>!P^9U3AS8hahPn2rs5POLzuo7QzI~@X%b)_n6@qa%<5<f
zFZkqf3PQT}DEgk05MKSi&V}Olo$$2^Qo=L7r;qjV!+rro{t20t^GLH?EX*4|X`He!
z*<0!Rj5Z8$WhC*{!HNviw0-u#m|W$Yb+E&(C>~IhhI9>`jOzrX`TkBQ=J7~xl92qx
zhV2o*2Dz4g|0OVft(v3u|K<lg83|!(FUwqD>GRSxKBN~;HhN!&ES_wB`k4XDOx724
zeN6v<GgL-gwnLGh$gL7B;>yDd?LXOUrQZC&-*7baIKr&WaN49};}C?BQDdb_6d~U%
z<I1+b=HgytmGll&pBWf<*Vl_5JJzTQjbn27nRua~Pzt+GDzi=R<R$-DJt**&f<Aa!
zmcchGG!9rq(^!4FGE8wcF=n;E>u6x8FAkEu!HLD0mRVWvqg1;(+uFsUs=1hm?W5SF
z$Vh$voUkd0J?p1g`615fuQl8+$6GW8!$B<PxB@L>?$Yd-1DS^76bHgJ1BX;|tDpGb
znN<I~^<z()hb*qTVDO-<T%(F>d0TK?7e=MFes87E2VP6gVW&M(@1N)vKW7;nWKa3O
ze`-cCa!&Az+TcKb=z&MteE|n43+NU%mL}ZKd4<l%W%lw{F0uhxOT_{o-625TTBG6q
zz<`*Yvfkdcl56YaObPg8()!&@l}H8EvD~e$o#P~~v`POWdq?4C|6?QAXH+5I&<jHz
zo%qaR-J1%d_ia{s--RYp{hUgT)+bQ_D#?sFzjO@_uC6Uc{=lM({ecGD9Sdx`jJUzq
z^gw6ZjV|Va?#u{oz2fk2Tr4_Icjzgva5f2j3TY>>z{Sm)+n;?|mtqdupS!r`OQaJM
zy(|GUE|$)P+-(5JQpdf@z7Nfg^rc~DCG_!%8GgG|+7_tYX{==h_hgoxCULnG5Szn3
zxhcEzxc9-1!QI|Z9~;IDKy1k;_|z`Yg(fA!hVif)?&3_$vyq<<PI#3T44RY|;4e`~
z4Kp*D4=nwu)D)M8sI>W`Q{c|hh0X_+#mG0OBEqegx4XD#{@Q90PM`4a?8yD^L_a+1
zXc2ayO8FF>Uz>}O>>&pUubAJW{_Rm~|46*F-I!mC(tA&X6An3RnB0<o110ZcKbTLv
zLO*&#J!hJE(gU5M_UUBb%cyIPnta1^2o=0a(MH72?1|s+Ip26XlJ203j#FVJ7gh6!
zP2ya^JG)f9)Pq486JWY8@{3XC-^@nR5vB;f!>|!+{f5Cz+%rAK#ez@}!v4&5J6dke
zH}wqTrPH)5XW-cdre08rawO6X9}Ay44?E8Ps^rU0C@bea5!ZzCHB2B`rTh3In`g?r
z%~^EtAR~9CgIXk}W%|HeeK7UnVE0!^=(37jkM@gU_zc0ILn^EhT=bh$6MpCemBx@O
z$J}|iN~f($CNAHVjAx77d55nEV>RiZ_~HFW*x=JQuA27_c)MM`e`q@4bX_opp>q#3
z1<|f{d-VM<ef%U8F*{g03Kf)vl3-&a_0f*^m$xhPHu-2xjEgJ66ld{6ScEulD@cvh
z#q4Fb^6=SzNgC%-^`pIOFfb8vyN?x$?(&U<yd{2`eAi36FAwXLEo$BfJ6k@Nh-Bi?
zgIi~hpWH^j1KvN=mxMq6N_2nc33Lbw<XUzHB*WyYt>4_A_3gf<j-yiRY|tFsA>;j$
z>S=D|h>fH`SZn)S;n4%wQ)b}wup|?HgCzO2%ocb3K%v<E`#neVGgcakg{=XknYYib
zZsNw>Lwd=4-mVE4dQ5AXX7T!6Jx}hlH8ARMW70*II`ir2sC4HQL$c?HmAuvZxlC{K
zG>a2zGBmYW+#eRP+ZoeGb$Oi;Q%vx&rVm5Jw0!}^ty~?mjg>b4?Z(`8MW^KiHTBxj
zJ4_QtB;#L9F_Gt@+0v4$sWQoW-w2uJ?Yzl)3Lms93fi>cvK@yE3X!q@l8$e?t$DT^
z@J?h+Qp@|)k`_H%db+ze*^wUEV4ysbneEBcd!`*^#Xw2v-%sCPwQqMH>yN@BRBt4!
z@OA^<vnRgL0O?Tq{kq2~+WDi9wgfnN)~AWnqH9lWRKV5QO=KAliX+yVDgR7c%|kz3
zbD2JPR<u*0^}3>Fw^xWibo^6#`0KWxg|gC(U$bbT5M+k4-A=T$!8f<3_tRU22d)dA
zY_TWN`PS9BE3}RrzQO1vyF@h&JiV0Kyt3W7A{>_cOya3uLmhk^x{PerJKlP&`Q9B)
z(L*HOPqUtMoH1MSF3>yS#+|1-esc6-boCpcOSX|dmJ7M0)2mlAnWiV4Zg{C!p74YL
z9Yj#0daO3{RHOGCrI;Aj?19}|KnJI*IvOK_JyPU}AEGqF-OQ}W<T5|^SCur!$0cV@
z`T+8t>+XG*0mc}&z$hJPk`BmSq)oh9yrH~Ye2;RS*@Avs)~ay3=!<{tdy8~E-iqYg
z(scbKObzn~#;ij6lRohaG-y$O`p0m*3R5JG0DR4JOSFg4)|c85*GetZ0m_HtdZvP+
zBbXfxiVH=&#q6cgnYbpe1*!)Hqp$tcHPjU<9xZ$-K7Xq)_KL|AScAzN5mz|=T*A^h
zp7^CQI`Gc2L5Tlr!9E6>=JrzTLy4;!^<#9rWQVWB;y95!fNLZB80>?&!8jUz{L<Ku
zoEGT`sqfD{?&T*pr=R&E$DE(G^RbQF--MEEP+dt=VnO$UR%%Ii#bZ-5J|Kaz9keSZ
zJU(d3zK@l1GyJwd&~dIr`>^<(w#XKdMQUdLTf)_|lusH`0n7SR{x{X14{XfoNEZ$<
z&04yI1~OKP7X98=G|UL5ogj^^j{UxDJYHF$HhvfLeoJ~OXJXvj9)mQO#%vC$CEj2r
zn}n%%(yVuOX<v#$Euf7mqY^}V!-}!P;8E5C+rKzo<&%#r%uSlH@{#q{(7;lIJQ+Nd
z5~+L2{MV#Bh$8u|aEv9P*jd19hOCRB-F9Lzh~F69+kSaPkD9&cFmBqRx}tW#D`y(B
z$rzurbM{@I$?%BMV%SgP`^{HIgh{hvBM94-vLF{?+=?mVP0pQ<y>a>Gr*;3ZBgbB1
z>1mqW2wn7^_=A#YoqMphbhHK0+=d0?syr^ovfoS_&4yiHkr4A^pbehIwcz=foVg&S
z>>E6UV=!#KI!<I&6oATzF#Kcg4DpJz?7zPAx8ylHMc~&wU0#cbkrXCpu}Hk%v2}uV
zd^wO=M}4E{R@4s6+Qsj)C#RpXNgrgO{e&8AY4}o-7ne*63(muL*>cgLanAI}_gt7u
z0*=Y=(b@doyJ#)cLWzJghZ1N0kG<KM<u95ST2}Z<yy{WLC>1V;+OQTg;jEnMSWF2#
z#&OF1Jc*hvm01jO-4;^w+t<tC_nc7K4-c$n8~&QKc2Ckn3FLdPrzF1+JkK}-TjM@o
zF2DaJlC#U8-IIMz-7tH#F~c2cYa!!(`j*$hW$U5~UJR2i3-hbbreR}bBkJX6=01wy
zAz}>pzp2Pad@sl8w%&JPT>BD>UY?PBQ@*@V{e9^b*%z9*T<Cjou^Gk^18==WiT=cL
zFyHFvKy{ke)$+BqsNrx_sc9Mbub@ksAJS-S=(WU|#RO%MPj4b6@7aWMzjgBJ#}(z7
zzddXHP3x>hNgeR>npNaJ5rHCp3w3Ur@9)*cpw^H?iLd^@uR{JG>fS1<uCCb@4OzGb
zhhV{3xCeJ9NO1SyF2RFCaCcj{I|-8D!2$#c5G=sLJ-Ex6E8q9;yYD$qci+}-Yxj)@
z+8Sfjs9DvkSFbsbu~bv$1nHgx=rg?b?~N&o9^*Rgz9posCCB1f5v2GJ9g9A3ElpAk
zRmBXS`!*L-;hKt~)Rbz7#q_smsre<ElP!ysZj0XUi<+y5CY$p~9OXaWvklG<hSYk8
zGsICv=MyKjb`Htbi>`4t%Yr+9n?2CZ$VvFs>bcZBAA9~G4_r&qd~Z$vh^9s~@kSEc
zGXYLX05Ob3k6`6p!y|pImAIcR?J%Ooig-DrOBZ6&PR?qsHo6+Kp#4xm7d`GNQaLA{
zv}THA+v<bcHImSd@hY64YhXqHYjqltTYo&?PitLP*dh8+Dwi7QHeiu9tu=inaOaSB
zf9{OX$>BoexF!?vB9;ek=7s1JF0W*mIaj2r+Jx8^9UR8pnhB4!A(~z@bEhKHNkuA+
z!2z=aQbFZ)_~EzhH2l|x>jYkjQcXjUbKQ^P2j8~0_Eerim7FJsQlcxZBVB)LuzCfA
z%Ygj`LRy*BUUg<tolO3r*;AjONjr<|Vd<y$Wejh|%Qs*;sY%5%-uTLyK{?uDM=#P+
zyY{6SCx7AD3C3dNI1AH}{!`JC#L(L!^qt+m$7+{0b5=YV&WoZ1e&m;epB-&KVMWLF
zdtg_yntth8Yw9GgI!y&n+TP&{mW3$^>HIvv>KeRy(M3;4?>-D!dANG=F%e6?n5>M1
zZ97lbk4t*3NS_AgM+X%|Q)+&$HQ)YT?n1QFFJ(VR!+uX2T9Hq1Yb2j>^Yyjlu~JBn
z?Uevay+4odD)Y4A=D!&?k-`P})Qjlhb9~IbUgC;&DwXqdkN&m_IFc~Fq+XOtZR+)`
zLKK^t?_aCeR9&;yd@JJ>^Xq%R<~D+gUjAate~3tR&uS{Y4?57i{bv){-(R!-Ki*#h
z8sVcv0Ow%fT3QTTjWwV7|7c5$ypVuW6Qd}evCy%5Co{dleCbh%1-JhfM`S>x!IVy!
z6E(A5S&2B!vRv9N*2L;vc}i|Z;(e>S8r=3J%<KhlZB_&;ZxXL}2>wqeDZX4=`~+Nq
z_KBfhq>4k}Ur*%KK{ulpxf)%|2IEltrs8NVxwcU0hdse|w4I-l)}p74{JsE2-e>&l
z#uzVF>VX?VjQ$_o{i4bL#FQk=hz+>;zLqCrc#-jri9}0kevM^ja)1ke!dcOiKasX&
z>of$;?Sa2il6pOj_|a(&aJrEGSHPynnStl#!Pw8s?)){nBe?G8a6UI!9;b@3%^>d7
z>E^&^&lB+Y%GXK7YVKn_KP!6XN?RWP++5G@h_bhqd{w3om@1}`uMXdE<^TSaJYHM&
znDu8$;X)=!BdV6)OMr)H@p|@&LZpE9R)64g+TiYnN>&P8<jh9{R{0yJ>vNyIfXy$>
zY8{C*sfBi!dCC|Ayqy<a*JSyVs1IfC{-%mI*~*W_^4>{0N<*7dDNlmNkhoi>PFckn
zVtF<B_R`heX%qWs{io=s>~Wi*$MvV~3EB-iftzGHO*yj}(R3!&N5M|12aV5_H5^IB
zQ|@Ekup_*0c@%xJ5*c%Q*mflklM*RS7PV1JrP1neYkRqAJnN?O{m&!DiZRLzp1c}<
zJL2C(?)0BV2)?2`#A|Y<Z0f^SU<FoqonEE>xOjV09-!T@sV+$)ueoa*SEe4)ptyYd
zbp7YaxHB-~cCoAS{ylTrYuMrWXd*TH4BkuM5I!9}tuxJvKke2TT>rKUqZvjq^gnU8
zW&fP5|7W@)dHlBJY0&s-%sV%v>n^+N5lhn-j3gkMM6>OF(F;4okyD3ynR2dx$KG_s
z|3`mh)jX}`5{(932a`wkq^wB_;e-Tvg~;x8XVD=F;{3UkL!XTknzpj%+Ds}lv{~n>
zX|X5Mku2UmNpHsYo<ki<vejO=ezVV_k2L#Y*ZZE<L7M#Y^Ty(ZR9=yvJ|nY1JW@X7
z0rL8Z5jBC#74J)8$D5ZMJxnkX^9GHlv^GqcUMW`o|MF)Y;7Gzu^F?{pZ(~k*(S+%e
zr~jKR6uWPsA4PdIZ)3h6Wrp7u6exI~kB+4zmEkAQ&eS%aWb#HhEPbn0>RopzB@6yE
z9LP%WfBh3ZmMfX^jk?<g=g-qx9*kf275v{-NXC}wf|D%rNKb8WPjJX-bwur}J0=4$
z`X~czE6-`!zm#!7JW`YBks9YUqd}VncGEi_qSf{~V&Q3rh4?qfUru<OEp#+Yn_$=v
zS(UH_WeOLSw&1JfNj8>#Ddq7lAD{aXxMfhq1p%byMf4QBjZJCC3_V}DAP##$yiH;X
z3f`E*S<6kc8wDcf+44Dk<~2EXvDtGuz|Jm~Wq_TL3ww)_o|Vyop>KnEaOCAcvmGFZ
zIM@Q8i6>+*Ylom<%EM1D7ZgTSNM@GlUPrvkBX#{_&>Btmyniat1F@MOSYqhqpRB)2
zd?B~~@u{8M`jGZ)Bf<9?bGnP10k3bMQ{lx_diOz|41t_HcpOf5-SB4?%sH<xxwrLx
z3r;L3ZVgj9y;qk**;)~t*isx{XWOOJT{LLTg&X^#WhG|EmO1#YqC5m<Kr69*dg1u4
z;s}dFE{OQt7-+_JGR+&J1Tz?`qBL0waQ^V+F_1&&xzJ-0c@ocWo|<8BKvP}&e|(#>
ze%XxZjbn((Q^@O`X5@%tihzocbK*@00-rZ@75};FS^dvxK|yaxvoR7;p{p_^6VSq6
zE0-awgL$uaxG|kDes1`uobF_1gK1U%>tTQ+(a_qjdgV12u!iw+rm$1N>+4vvANxVj
zhlKziuist%7$P55{sj1a8@t}!rsw957H1cK{obLyz=wQ{oWY`RV&fCkWbu#ba80yY
z-o220UWyQ6LXkJS4q^{;I~3!kc-pUWTbEZMb{J_L<U-YbRE}N*qvR3ljQR8dY#|>O
zM2`_;ZQ&G>yrbN3oCLUu6ZE5R8KJ`n*)buL)8$y|AxPRo=rI^O<keejzKn@04ZqK#
zNP?a2sFvO2#*<YTU7eoXo(`ddkAvgCkh($BSWb5oSSwdfYHoG!b#<g>`M};$l(AI=
z=b173mSU8qqc7VM*!$>6CS-yDPdZZ*7oMP~a}tG=`rKV!UhI=-O?Xt*;p7Riw@UVX
zenUoXKuKLc7pSJMoH2rQeOmt%1HAVTeLEqKK0gIxC;R%E>Hf~<N%*=xpdX`0r9y00
z8XTX*{9Y+O+BUhpt&~RyVV*wH2g1hoYy53<JEmo}^=sTk=+AXSZpr*%>*pjt7C;=^
z;Ix}h<wydUzCRsW$q}?i*mEwO1wUk6Q3}_ViU$N`k7iMoy%mYaW|`5$y2DyozO2g+
zF%CGoS`k@&?^^nRDri1&<|+A({456#F{Pao>#nX9fw693MjIs_C8Io;g1xCT0Fu+^
z!v|66qI9}norQFA!v7?iSUE0AM@&IfdVs85p>%$WgY*(&TB&|7qTzC!of+e)fr5ub
z<a+~U#*a_H3x@DL9_KjlSItl#B*bO{&cC~!3#{#Njghd|yO=d$xN9zG5&FVnx_L%4
z*$C&4P<5kwon!=sdqR(0dvz_B`)NULi(_1#7qVt<Ao;5naJ4txQuuFXH&bobR$gqA
zIK0Tzp&$+hW0a4xF2%q(IAd`WT!{6(<0E71?ROQ<i}G<JVI4SgX7W(k|B0b_if55e
z1N<0u<_L196v;i_k~^XXUS#Wj*y9fbB+BFQzyT~y<b2x?Ip%`uSi?eF{1w~~k#n*5
zTlN)aN&f|SiEtA+H+!)V74Dp!G7GV6!H*1SoLAoj!<_E4!B=}`)@peMoTg$e*Dly*
zvTj5_LdaDFjAv;VF^e}~hIt8U?^DTz(<GUI`(PggSE9}BlB$LK?>e6VCnCpXkA;&i
z6IaJ5SEG>5UVZKOa$3<R52rZt!$AO4<>Kx4WJNl3Yc=htC@%6CaOG6H{IGeRzb$B?
z&H1e@Xs-_KwYiwED|D-p_d>JR{CVk7ANN97)oesW{@ugAwh!3&^{)5#1VrPYGibI@
zc@3|Ex9e~UVsn`A#A<LjF7#EQj=jW>BDMav)Ms`T7}hUhocXXZsT4K_Ulqo{g;nl*
zJY5Ecd^F4b^di9`ud<}j@t<XQAw6t>Nvgh0=ZH&<CBxFE0cQyu{vWY2s2(T4Rf9D5
zy+M{dt#eZ8Yj<s%jeJ@ivxk=iPow)dncVqh&kwJ(tHVUU&VDbTIcsIc_fYLs2NW&z
zm(wDri@)24;B<sbU`bs_SIDQ>NbOYPM`!l7A)fHcNlVknJC|y|nn!oCaK*+#4^<Hr
z`~{VjmCL(7+=7L?N!|u&Q34Fmif5NMgCCwQW2vsEWm&sz!LZ;;J46Td;mrFU;%rAl
z8{ajrZFs0al5f-T&W=n+2rJ<2k*1uGLjN1+>^u{pzs&BHGjeMqwQaRgA#;xOii-ES
z#eYwlvma~30b>9;bGLh70GgW8$OZXn<BUez8+J}y@;f9)^Pt6$vj?j!vNQjGkEkg6
z$$M3ygNtvNp7)XOup2niuY2(QW*vwIs=~V7T`yusuK8B<ZF4@yU?h8+JrQyt8Bb`=
z)}Q#k{`Z^()WL6Cb9=XFNwG)7Uu%r3XM{}`>N6ucSB;22Bh|NsSDkBGEZQQE(>G8#
z-s(f(p6aDN`~zpuI&f^==Zu`L6OU4d^=s~jOJnYrojb4!rEXvG21oElDx)>2BZ}!a
zDsxTrpdHSiF}0ssQA^J)(!N+RX-~7~Xxn_)KEs>jw4NW%Gjl8{F!<J!;%%=GR@zyp
zl3aCQh(Dt(&es|I(qAEj%EvkO#T0h!_=TqY_fUF@QNE*cz4z~YurWzzJEi&Q(052h
z!zoL-@G@5!bJoI*23C8xB)5V^rm(4Bk<>=Uk(W0RZNCogXhH8Nt`bPjEBly&M$@1?
ze2R6PR@NQunInIi;4>P{v*9BryJS{nB0&-2F^6PimP@sjB!c-_IlW!k+ufqHDmi2S
z2ra-w9FEWS{Jd0iQCuOf_W-2nkgZQV5k>hz$?MHKcNVz}#*6S&xI-4%9#Z|`&6II<
zDShne?}6<fL%?TqDX1R)JcSq`4g;llv)c8<RR`Ec<`NXXBYTqQxOsl{o)?~Rvk7XN
zv2m0cUXIs~QZu1SiLC_*{^j*eh!`*H3H(qWh$jaS=q55$0>>wi%~IL+QFpzg<kp{q
zMFzet;m9SJD?u<`W2<0pe+B5IW#wlM$n<LZa53~LGyO!LUTAw1r^rfb&E$~oGsIxA
zS|vUpr2E^+ht1Q=kHY^xFtSvcJhHjEX6U8=DwRF|la)jP%DN!5=8{fS!_cdKd=PDT
zF66tEARlXFa>hmG(h#N9dSjFmUAe&P`6}CPlA0egegDLQL_!J0AA`BoeCN}fS$)=#
zhTFwMy!epdPUIGMRA8`WlZZ@J6^$jWo|xR%53h8`R__!f$*9Y;bw)IG`eq`(4$FAu
z()enHT*_}I;$iVuU7eFzyJ?}Z&Ev-7At5)!$sP65IN%^aMCg90%Rew))by<7wVu@)
z3HgIMCXIT@ZMwI;tD3Gcf>58JojU4%Rp6Ne4ki*bYp==%fkCU+98pG&&PDbuE_0YF
z9yafVXYV$cK(0ID4W%sjGZ{phBVtS++cjFFqL33&{iUiBmOIUY87`j$C&bOCb(M4|
zw&7YRcvi*WPjz}~Hh<%}P?&Bx2W0w=KoE<kHCNqSnK5C+`2&J(s4gzaghvLXUGuxT
z7~Sab<Q@&7$`|W1+C|}9>E;KKR(<Kop9qfaLAbB4PiUX_wEc31pTo8<x||qv(;;Z8
zVe|!iqc*7|y^5>dU8i>J5&OWzfpE@YE@oV4VH^E1-8WL}Eb=)=w{j~^#AH9XSP)$~
zUM9U(PnuUqy0MLbMD@TMy-gnnb2YPMN&%~)n4`m<@+5{nMmXS0$x^A=SazB7qXAxU
zUaKuWkVB^$;riKm^DjkR&}t1&HJNEI>;?j(5HjRQ`;pAYpDtyRufQ7QPLGJ&KeOV*
zkHrl>HCm8o=S&l+MlFCFZ&YtCH0^<iU9VvGswi|2{SA?7UuzMifWLfFS5u3HfE?=b
zQp8z>8)g<VNW*ZzL;-}VKeV>~lg2JiiO>;u0(`xV8lUS3Rwin8`-N*gu=Z1+_i&;%
zVF8{v#6ZR;ua{YaQ0^02OH2Q3+;K=T0#26WbtGnZ+KZ&gjdLD2tn#ppH((Vw;_T$n
zJsK74cJn_KUubOz99EGVZp=Vh8?G&qlI|43{lXLYx}}P|OtK4krH{;`BJyvTMUvWz
zty_D=eWzmwN+s}wLA(?}0j%YpjvFaKjLLyk%d0X;e2u~OUQrfqEW8k%thT~$dt8ne
z&brE>Y;!yK`hM^aK}YEJ25YGVYw%wg9q|g4@KeUk<*MSO@dT~85K{<*`ZSx585YC>
zI@bpBMTY~UaE~in-+tuPOdT~(PTsp!IP7Jqty{IIE_R&Uc{6-^zQN8!n|hoPgaXP+
zQFtpgpXOYU8p>wd$MQmcmX2sbhT-$IVa_uf?o!uC0lBo(MtI;A6o^{_3ZU(&GrXiq
zG#ZR&dd5nPo*zDSxBQtyR6wJ-sc-q&gP)>4G{%v`aEexk{bQ49$p5pef;Z|=%GL%V
z29*OK2)O@1jShd=l~+_ojH)DW?_29+sbb=><0dn~Sh=L2dWvV6PxA?G=0Z&S`cIgJ
z8Fbu3N*_wf4Ou&*YY)t0z7YH49XW~G2n8pN0eUU$@d67m9^r{d>r>N3V@`<v2_lNH
z(s8bcO2NBv>GpAvY}D`D9L1U>s)kr80?8B;Gzy644k}QEt|)Q_|3^eaO>8UH>#)8Y
z?Z0-)x<y5L<}fH5@-b?U>G}f#eN<UGA~s8fT>UknJ<M9zYI=;)*fZy1NQ$R%CW)!&
z3_RG+#E4XHmm<gwmn!nw^;#!!n5$^p^9!!5^|fag0xLh2nD#DdHNCLf+f6yeORzy_
zxRf&w$=n+XOu|i;Mm%>V6kc9MlPrNdoGh=fRXd%t$X^KTO&dFKX&EM(*hkJnju&*_
zC`H6$-nQh`m+lQ*yPHV2M)-X8yhYTp^+LQ-?-O6b*r#bkyw9s*z9%Bw`0f$}=H+h+
zsFKik;Nx>63Xxtqrmt5Vf9|I6Lt>1rir;5ZgguUx7!3B_celrk%Mqowtt$b2g!x_M
zL=$yh2Ui_CK}Rbe*7VgHlBn&U+RBLBBxuFuIaMyyIE>9_B?kH*mDDv~O)x<@KtTM`
zTzWgrLhzcwjn5>RT>Sp$pgH|Mi-2qdWoNIky64OG4QCAKorOzCT@G{4`AdaHp0cup
zyfWrp<SfStg+6!{O#`dhrNBMI)@wsfNIG_#bJr>XC&W1P!b=Sh3DBM3lm@2=y=(d-
z$K?&5kT_S{p2|g@&}7#yxi#AVex~Ns+hAg#>P=Sk;T{Uj8J`KskI6*@;MH0V#92gx
z0z?xAx`SlG@9+<Bs<o!ht`+sfN_XQ8*Gvk&3{rtmjq{4EuV|L?C+^Ulxu>>H);P%g
z+$J)rj7k{2E3H<@R89Vz)Prmx1VpOVt>Z2xH6FAJ+@RZ$#YaKRYT<*xm*pIY%~}dx
zm4T_Tf(yhGIDeOjvX3AiUe$YzSN`?eaT}xBY3pd`NVdjYhe(9MY3o<5_}}t0%a_aF
zw_T*}pvg4L?+Jjy*{u;%!D}fHqt&3$-f-Gc6P=eBiMm9Tey~=bM|Kd}y0Kp*w9EBf
ziC|V1tis=TM|hwU`~W#AW!_)Tl)!yCqAIRH{@-hC0rq-_OSXZ-d;Vxs_S!JG&1cT2
zyXxJxd>Z{jmbN8AK8=<N!$1E+z1V$we|r1<_Z~S)d`qSlGF_Mp2GYLsyx?9C=C;sx
z#JC-mK4&FpLGU8&kHLa5yt=oeYjQ=*$}&C35yW2(!}q+KeJ4Ka5)xKD*X3x;)B~%S
zlJmY%Ag7kYG%8Pc6yBQrI$<C#?qUEqD=?iqViaF!L<%nZZ8vkE->PTBkYD!kGkW>&
zeNHO~P39mCy^67MoBnTVDEJxE`k&^5KIWWD(>FcAJROrSSA`5v2RvhFEQ4cPoO6&z
z`MTv$was{mWEg_R_!@I-p{(Bvfp|`(_O*%@ktsy*8{9<lE`xr6T;kY(7d$uO@w0X7
zo3i0vv&L|sK6$QNY1_aBsay>!0a&A$S_Bkx<NZZ^9XTR;CX7u0LZ-oe;({EdX-EtU
z@s<>}cJ|6i3ZYeK_-l65WE^jVq@H>%d_oy8_jzwP`~uxk&7Iz*k|q7Sz3!J@ly+(b
z4@YREXb05#>)cuo8b`z_BdK6aV|ij;Ss-_2U36){P(D`+Sekde{#sow`6@OopJ5YB
z1&XEbVTe2jYFsnxRC(oDz87kzL}I1w84iUz2(l<4-hMIfjpKYE?%z<YPP?(Mj&Cx<
ziC2VW{R%(YwBo26C0U2dA4_-wY}ULX+OJI=wRv<x>gs@a0n5EqK|~^qP-USD1<4zs
zfo1}TYDMg<DPP(9hdUU55S_zaUI$T#%D>@)Roj$0dl1qH?PzCVA=~+n8Qgx~h+IRz
z+c+a&JU0@RIrS`+UbTIB&cShT*sND-f{2JWO#z30&PNLpKuM?O%eil|7n7ZlILekE
zS5FB;A)Jmq@waq(9cw@EKH~AKUGu#ZSOuFH`Si!Kremt^f6htgQ>glhkO%=9PTy{c
zz7xx5hNKTgCXKu!{f$6~H>euQk~4X)-5yRlHj()^W!5B?Yk-###;txV;A19c>$aH_
z7an$Z7an`5cK9}?hM#()+ti2!A@4FcUJ1hZHsF*MbyG9KKQxSKg)B|f_X@;*`5=4B
znEEu%%1Eh~)l4LYQ~TJJdq6mXkjw?2@!<q)r|u{xlT0_n^c1t#z63GyCa-7#>Be;`
zDj^1K1E<GtNm_SW4nX5RbA{>za^S{HjaMtL^*$7)lzn`I(`G|}$A|c67(>V*SLhxa
z@(K@I>^PA82W}2BmMU`mvX^o}j$xh4tKyA@z?dRtLn)?fW@f9;NbSLKTQQ~e^=Pjj
z-Cp~a^5|Gn9j_qxr4<niBqOL4*w^;=3>3V$s^61-cNpck8Q0@16h<Jd1y?8|Fax{b
z@MbNDLB&bFCkSBX2>5MOjpeI+b1DF)BBpySRnS@Zd1r3*r`;mL)wCm3iRTz~18$Z1
zY60{_l;0aUthy94LlW16BakBn$6krIpcgpjTZn<D{@h2US-$#XB=-Z$^DO}n3=WWg
zRagCc=oH_r$<y@_fw$k)<<dS%=1GLH2O1QNqbD&Kt_MnzU7eJcx%}wuxnHgA>pbJZ
z3{4T*N{~UsbLRpv`LK{oe6haT+>*%YL&QtBO#H7yzBADl8)Whn8{_F|^K3Na|F5GZ
z!TGDGCcyws|3zwZyKKcRqjJkB>~QGl4CS5(EZ##)pyI>Bdio^F3s0>s|Bi!hMuoSI
zDs;l*z6k@Fx8w@mqr+=C-BZJd1x11b1j--nNaa3RO;96^2MZw?6ho!9?MRWq-u8`u
zB6Dg8sUAC~0#7Z^7Xc8FS$J<)9ST=6*U}oE33EjQ0@-Z4*ie1DINj3EOu&O?or!5L
z-_uiO1Vp#NgjrM#PRJh=fEYALL&eQ)2kv0Bn+B-_fWTuLEt|fBqAB8Yg<>FuU#|kn
zfJw3u9A@efhUi>4;V?CFx5hqlYqdujdD!;o`FF&dUp)^9=OlgYms|7KYrc!Tq&jCt
z#0Fz95XtR)sI@eqrUfZ5W!H4VSM5AU-}#N}hYdw7u@0~P9;=_;KY66bT^}6ITYiYT
zA;O&=r*+hQ^|UXK)q$Z}mFr$o;74w*ho(9l*h$YIzvZIK_(p>Z#7LSVB*id>t=j>{
z<m$D3!wKn09kM;=9ePyeP<RsqhwHit-;7`vb1p<H&+m*k?6KICq(+X`7~%=Bxk*l`
zb$RtGPw^aqMbcF4q~Gekl6)npU4LCnZdjf3PC*eKK_u7U^sDCr>5Oe$>iAh)a~UGw
zvorIpl~lQ1fU%a@elC}Nd5>6s^|$}wN#`~l{!?*SolFg$36GIC?vN9*QDB9?7=uVM
z;R6e0=_Q3gK4lEV;Qu1`l|?oeu7(CitNeL0s{ztuYum8-xB0T)9Ug5YN)}&%&8AV6
zq%%CLifP`z$EDCEOYuxOsq@7A*PF2YjOL=K)YF~eQH}h+UaLvDPnRzAmyn5_i`ip(
zsKWbli(0D=t$Fh=$wK!Q%YaxlqQUIqFTc{yJC+J3EY8d)?8a5HVS`{H|B1=Luaw7>
ztE$d`c*52c4VT=Bs|NXCaJ=PLgp<>qamXsDR{HKDHD>GA!#=D;C5FL{#g>RV_v60D
z!^8a$zUx6iv0!?430-yVdEJU=pOzNxIe{^AhGz-o`_@e+B+G+=AzgGz5MrN*kE`br
zLxCr!NmL&)V%q(1(H!A`aMiGiI{cE3HlCo7t+1b4iIr<?M^81QEK_E0($~mlz)r|-
zUQIbi{zgc;8Fj+^Wa^YrHz>YOpPWrkk27AWVq!+S)x+PzLTnu8Gdpw`iR;u;V81UE
zb2f_uUUK~k?eXUG6Z+Fxnu-az==z{B7vj0;Q>tjctY1h-fN#-{oxVq4=PS|_7k!WP
zk1n*rD%fB=Ino>jm*R~HRlBGI>7Ttc7%piB6N?gt1t=go!&)}8i;2w~@FtihWHC@?
zgC>{AiK%MxNHs{SN-|Z5Kfy()QD*qn&lrFyVd$P$IEF`@j^qLZ;6e+|hgloZz5eiu
zQF$%tA0{YE?Y#djFI9eq-{mr3|2#i->VPsX(4JWhu_SxFSFhBE=lfUCLGtr;eDoD8
zQ^k;Kw5xSbp^fdqZ()i7ol^rqIQ%&Rm7!eNkb~ikA!%FCg_QC@LBZPPCTFtmM4o<3
z!+C$(U_cVP$<*c+G(|#zKh`AU6ff!RU4w$;ykoo0Oc+)Qu<x9yu_W<Q;*mW)wn@v&
z=EWem%nm6PE^k++gT+H~iq}L8*K`ZgY)#wNNJs|Ng);{(5l}Gedu#Lj$$Nu8g;rmu
zJYFnGXAjQfl8sbOhr()}FoI{RJ-I)(f8#DO5-V9?-#5}ce;!bnxFB{X6ml+>jG8X^
zCLSZ~w~3}#JFL#u_@(XAtHK$7;N43=2V}+5HFmTZaC>npFi3dgh+_E8MWfoiCeySv
zjPboVQrEa1*MN!WCB^Z0%R?PSTh%qLePJomEq+Jy33eo$KW_RpqYva{vz#97_T3ZN
zQ)^OQ89Q2=nAwg1m|NrCf<Z<Wi!z`9fgWFuA3ejp&Gjh$d5KV%MaA(hPHq0hM@xz#
zece&XJoQl#-K;RIX|xUjgKM$jWDx%zIzs^aghD8TpA@KYCn!m&6$#A;*TMQHU3=?l
zyeA&j3CHs@H@M4WU)%UevK;#7%6w`f@sx+LA0GlxUI#Sbw7iu5m~pC@gbdFz;EbNP
z3je}mH2J(0{UgOskTy*@bLJo&{mg~t{WN&ts}-_-22l15NB7z9KAvcK0KKzk&*a?X
zrNlMDd)<DG5DC4Ct)Zj{g5E9L)DWB3Q{1)WW35~|O;p^s1@voYT9b#2`}*}|`Xk*!
zJ)YK~P<*nsB)EsD!4DB<LxFVZASTpBsE>u3d`Z=k28@FsRX;0k33mlYgyZ}kFehaF
z&3G$b{2Uwsm(2Q$5W5Fr8sSR@bALtJDEwRB(0C+AEDCYG*+6UB3PO<y=JP#QD2P^t
zb)&hlZuj@QM4K>xr^yyZ{aVZpSz{yp2A6f5aZOG9s#(}V)KaK~x3`0+7aWy0T&tlC
zvk}k}{ds|S<aiZx#dbELfO3s|?{*SQVr-2M$1iLaNzJ6N)i5lt<S#9v^Wm#{F4*1_
z;JWBoFhdM2trOXxtm~h}K4XPE1SZU8B_{l1kvZLcxi~gYW?>amg5aLOq-L7e5qK{d
zcO<_!czaBP`j578jN3-q=)>TW*?_tm_!J3?*1)Sprhm_vM(GG=vhCF)%-2Ah=m-VP
zNYp74im0A$h+&cm<NaZbe#4Y9_6gMoGL3~Rta|QGekI1nJ9z;l8l?nw3Kf!_ps5DJ
z-fk_}@oID$FnyA$R->=wuFCd<O5rQuI<~mGc@BfQg`ZCbBOiGZQyVA)flYaDuWA{-
z2#fsUpUB1m!gY9G|A<?zI^?ykz?nr}-aH!<dai6YMilbtd!Rr~1ex)X9E_Z1jD;)l
zY|{mysxkXPjCMZx_=+XLI?KLDwr(tbNVlzv5V#whn#Mn3+O4=`Z6;y>4PyUHpRxng
zL2ZsoKP5Op=+}UZWGw5<Oqw9z%EX@x1FuenzmIG__hff|6ZBtt#%&)!^>`qt8u^L`
zV#5EL%P&>|5pN}eZWrzsL~*ck^#UNeU8h_S`ecS`aLBpXZCo-?!trt==0N=)3(mv2
z)CY_0#^eysb7!*};D7G-(aV25%z>de0s^^9xP+*Y+VxZa;*>!utW-pe{oZ~x`49S*
zJ)&7*Mux0@kd5J>1N8ETPnM_18SP*I*jx_D>%X<v{;deO#&9ocDIzSDaj_42l&+5q
zH^SS+!Fs@Y2>>$5=%Q^Nk3+-;JoUvsEh)?So-^RAK=R2(vh}DDBuzl=pl_M&2MT(0
zn~v8IB~`$=W{D@7V7+J?&BjHRnj0D!s<|C0m+VxbM@wT5&9>PKrAm<J5pnAHI^!m5
z=F71Cjyy%q_z|_YNVGz6-F&MI^0rl7T~R%B>n2iv9Muiq3TG=^-ld<@_t~(n<ZZr7
zDPpdURm=Ns;eTLjrcvSpT-(OU>$}RTf0Y$AAkcb-M-LhQFd31mRidf+E`SWI7O@(8
zq#8qVH_E$&f@ZWf{<!a`$^9QKmoZ2uYVk2Q-t!+u%Kvsy;QNApj8LBQ?4+Itp&)>=
zq*Tx)XL4@!F<*sEmN0l#f1J%2o$w?i=1t3!{enb-I=><s1iU6ZB#0>MQ0Cv7I!{Y1
zE#;pYI*&)bsmO*of`l9H4`+n9@P4z6PPe*L)pP$Lp~fk$oF*!ny!ul&t^=7Zj$!%a
z+wVbe9%?|%gJ6Ohu>A`6h1$`0DzhUdS|vDx6h&(iv<ZnaA@6zBI|%o~n<x?)o4cT;
z3j@ha<S10e{xZ#!;)f)cPtsdsej({%BIc*TzN?4D$DeFib$1aVYF8+{KYtk4*LQaK
zKPW({*dgCqkmkgC(4gY?a+GH_`<|%SCvH!<l8WD4^TPTqFp~dDI)Fs&71|`N!hzKc
zi03|=L`eJv8>*Q5NtNwz(rYKW=+@O=Z&A^$8*K+cq^B6vv)x<0z*o>%?@uJ-RE#Pz
z6M!PT$B7h!fAoW=i5MAyh9vtdT#(|iLwavuR4&mNVi{+kwZbc+w<#a9;2+fN70{i3
zU|ORNzZ#qHWEEC{vpF_>QV0M~O}Suc9QgeX2=vDrJm%8ruUn(e0ny_8Dl8aSc3RrF
zYkPX8V)fE`xJE6HigUsXCnl*|TVPX&QOilNs#djNgCo#B#o%<#(C%~O`*=^2m*9*a
zL14VJlUo}?X(sMm6EWz{Sj-yYl924=7K-SUDPcXmo_|o?48egaP#<IvjDBDIWehbf
z{<w%lcRu*2UC%0dvKuCQ{wv=^+&VSd*kpgv(<?*t>r~X+<E6f{Z2K^bl^$;Yka#hn
z-l)U!wU)F|^Tgz~3;b7Sm1JGPCB+DJAK<EVbc_+ErJc({2VcYl$4DG<g#OU=%4kfO
z&{m9-X+o}B?LJV@o>1@0HQyVdHBz<t%^ZO+gNME+ajU-&c=`F&bvD@Q1l=D-`o5z?
zY?Xa$cQF@OD!A@H5gaXw$~GL)!50;Llq`Zku{yu_$5^_}qY|ii(cz^_=Stf+A)4Cd
zssMGttHA1QU+bX)=THYu2y91ck8*y)IFJwq+BtJOr)@1RKXJZI=6eHw+cuYL<|+$I
zMOLVd4<{J;^3qs%7}Pq$@yFJG0zz22pwM314}N`)^V)OAx#T<f7VkwmgL-fpzEUh7
zvuh`LGrMxObVjA$bcdLE@s$E>jTRC&Hch-PEv(Q#Rp6WtAAYfZy{0eD9v{uuyz{KD
zxNwGa5;}5Bz}q{lX7Ltg08YdaPiW~RuH;3IPgHrcRQjPtt24HW#x%F&O4~W$`v}6S
z(CHV#sy==I33H4bg=lv_1-;m2%QxQDK`s8H1F#yI#m?BQ?Bqx(ZOgnl@dd=xsLQTG
zztx=K<+WRDdr~lsLKiQ*s4x@K<P+E;eg%$1^5R$6zt$Kwhnq)k+h}Mbh}bOBO%fnH
zx)?jK3443E=8Tq&9JBKiJ<P>){)Y@()6X|gLjcO#9Lzk30URBZelAnxE7eKMLm3ih
zNvj;<%Y=9nx#RK`_U_j^NCvyw*60CuF*4J+F!qCaFK9{$Aiy*GihwWGcivtn$BotE
zsOh1^V-pP?ghWZ&i1q&fA8f1^nH6B3w38HJ7kc9tIYLUQ3VsLIGSp>ns&1Lgi5sC?
zbFh_*VHq8msR+j9ArF%Ns37#Wd>a<W!6Bsre2>>X`~x$*2bHi2CAdTV`t#uC0%Z5*
zp?f0;?)Ul#$WQ6nQ|DSEREMC%6Lne{KQDjWm+xB8;UNzD8fzKc>gxU%5YD{~gexzj
z8a{d7DrBZ1uZxDQfYUDvsT0?u6YtDH*20i^eRk9ZFbM<+u?dRg*>=)`w?(fZ>0frR
ztwJr)85XtF?r$s+wa(#rxq)=`{z)d_JIMr6Ich!55PI*;CeYd|W0C{NJ^oB>WAQU6
zhG$fIuBgi<RxfjP-G@GWk=3u$iTa<3-@tf<aI=CaR7AY?QjHGF0~DAcN=-m6zeqoI
z!A+eJy`%h?U$elp$w3P`=?kQ7mhGz~)LCt<CT}BRm*Q!T>gH@jOMfgLp^-~hv`Vo2
z?vDcYW`0FNnJ&cpi_snW0h`h&KNOC(>yT@1Qt{*j4RNEVk1y@MX)$ax6qXdx3|i;#
z5YJA~h*WJ+pTZHhQbnQ?dFGn#QuR8gv^cIAjOHg}E%I<m3TI);2Slya-hNiXOsWQS
z4Yn^+niTpcxc0q@MyemYhs3uGhl#2qKKrr|1jA2$G1R)qI)w)wa!cs12PW>$<;HP6
z!&IP9DZ#<S+~K@LyvHVN{&gX?i>wDP&Iy^~M48%rZggwiJVpe?Q9IQU<CS3buC5IW
z33$&%DurhtBpY!V-=6l1PiN=k`L7#zG{Hq_5<a}pkdN<Gh-&1{#TxF~aMq(kZFxWL
zfH0|iy2Yh>BL4WEEYr@$KSVg)J*X@p8crKSyPY;pi=h&HGCR1$Zx1%bJp%QwRw|6H
zugL~+<u$*}BVzNwBwpw}&cW-qQ3N20nzr|*;%ASxWR5m)>(X<A2D?$rc>Dq<V%1T_
zLxscbx(&-NFW#;fZ#U3wBHz&XwC|Oq3<fiQBg`*+`25Xfa^wXDL-YMHhSeq6&ND*x
z4Eu&%I$~AtO!!?_;jhaUEMFMgJt6$A!{%-^+ZZpq&<qIMKf|i6hsUV2FoDdAv{CgE
z4QEa1N=1KIzsR9tMUL9WmQ}HSOAbIW6$qK;Ej>10U;M+=SJ!%+eT^>ME?$>clxdPu
zIlBjbs^kGaxBua)cs(<YXS1iwBXM-SQDa&hAY<pOgZAkLotRjtqdUv&dtOq*_zVII
zfZ2#PIBkZ}2^VQK9wvR#bt<^_YT(iaQpbs}m_wvm9$EgKfXrisD+?(E<SE$64aKT1
za;8-)XV#6#rWCL2qM8J6PA2PCjWdx!a^m!w5|aigEm$`Gm91iD7k2JNLaQnog?W{*
zj+~qB07csg?@xCVll4ZM-Jw9OT+4hme;J#rRR^n)UnfEnoQQbl{-&+>$qbv5T(8v?
zV30-Q%hZ6C9xkt@f4@1NS}G^xX#ZbT3CWsR(z{lSkV$TX7%UmK#CIuM>sV0s{;xg)
z_Q6em2v1upt8YB0g;iV#%%&l~R>&{&MJA(hmKDrEw~OP#gXar>L_fsKZXx8Go~iiM
zNm$WBI{~2?1!#xgaTcXSsNZ5N>0hu>)33>%yE>Y3u0;PsgJU+`n&-=St=d?!u3Tn!
z8&pf%j*t-`3(~~jO@5Qmop;H|a>>jb!%uLfInzmvqB+X-(jL+e`}V7f-@Xh%Xgb>e
zV_(uI;kDcEWbb;Du}Y6U8A}^1c*~tQ-R53osOa4hY|Nz+Y09e;PYbKsh&-}qYOf@3
z&-^r{TAkW0*#^Jr&z$Cqz2!n^R=X4nz&|VNf;O|W=N+F=|CoJD-Ad`tw)rUJ=RtC$
zSddbdfX?a6flf!oXkL4omqRWRbkARPpw__5fS*+fR#6}a4ap4C#!r5FWyis$Kr+GS
z(;MW9zT^IK$&xg-^!qlr@SUn|9J)0^F)6H(A(6*jaS(LLuu4V>=h5orZqtAT9wj5Y
zWVQ3%2nmYh`y8)~ZtcX6x;10|g>?7@GuS&r+!n+*HW-gJG+ISys1zfK=cp3s$^alJ
z@SGfWVkn)Vok1&wi2sLcarGwtxLK=f^c8t)p_3=|&^xuiT@7)iSB(W?dlKN3CY8JM
z9`NM|1%o&GD-v2*{L7O5vSaf(O6!<E>OG(gi30>SFg}A$&k#}puPBF&t7h2SSvF65
zA;z|PFu#sigogr(TFHNpmu-qKY~CswlRWaBN$ut4S;iD=FVA2ZDO>OEH3Uv2&4tLZ
z%o#rIw{+`oWjZ_5BA(tcw?<C)IZhxgUnW(u#<sD5GY6^YvDTJ%4O;uEPIA76EVcCL
zY(C2gZR>kldFOz|RZsqfy(ch+@@hlJqdPk7Z5(wJRGRW`;$DSgZwE&`6f-$Ep)qdM
zNjfta|59x8=34Rs=DhNo@e=EKKwV$ReE;pF?HuuZ|8d)DPPU+f2E-`nd((90Y>ZkW
zByEv|;}m<%P)aM=^b(ai%oK0ju(30U23rCY^<0DN1b9JlCB=*T62&PG0;mawrfS<J
zKpOog-Hk~>3KW|mi5cj(@C5F(B1WpkU0-(Ic}%YqlQR%c{EjQ6Ukb{k0o_vl@DKGr
zHjJuP)q-^fTAisrKL5br3+X5+kEz#SlS1Fd&KTCO-Ew9@7ecRG?xXMd1Op2~AAQTX
zA==Q(_j9<PB{?B$#Dr_(WyXtr$ms2(=jmIeR!Rt_q~EadiG>i*1PE^yK_ICJd2u-v
z8gq6Zq$q5}il;g+B*1f!Knh4!TY&es@I5?2l*F=g6=8C)FxI8lS;;KHA%_)Rx7dd}
zR?yy(RFY6MLe^EjrF02cRZTUl-&D*9;({s&MMf%?b(O5@Y}_PDV=YQCk4uU-e{(5J
z>iKH$a|UUr!&t)x4E7$S%>J8Hqx!Boq)f!5>2Yh!mR9iM7QT#fT9tnbTt2RzME(JY
zFAV8gyGtetW04E={~E>Md4)A2hV?TCA$vZvwm6IzIZ`#^IN>|NeaD*A+;~{+lB~36
za~l=~Xz5xv-C0Cx5>0bR<aCT{xOIo7@C2Td3||^~KJG|<cai#uJ`gM6r}HxOT&fWi
zl8vtvatoaIO%(PQqkxB~%RlQXMn3{+4O&~L$FGb<AvQ}-*VmxVRWB;`0_XF65V?x*
z1n%D_hhtPyb+4wU)VgrlncK*|LUqdRH7#L0@<<NqRnTqUB8mp5q*q|_gMnfsVZrrs
zwryUc+G&O)uOtj}^WYw79tEj+o|GfqlKN-99Wv0h!Q#ddp132s;KTBPT#-ag1X3`;
zIuxJlk$jlGHj-8*wKLCwVo5Id-{oVHM1n!{eCbE90Eh}c`uz-jAm|e$0)K4iH0^du
z7KniCp6+wL)xhbsmO#QEsToDgu~MqCMcfgjuu=>PQkv+KIEXMyrR&aj?c4Ns0lm?2
ztU%Iqg?+GEo&=}vK}{n14vj$^<o<0xC1dg%hrYr!KuoB8jJL)CBsQvlLPHf<=Yf>0
zWAl8+Xr5Hc^*PzjCML$3M>BO(1_KX-u*y2*j~EnZjf^2j&=>EISUz<9AZ)q5QDNHb
zMyRy;8q>Ye(0uZ1m>%<me^9-jgq<r#UNsc?zO#7qb|OkFA9E$B&^IjVgw)?22^X0$
z^@th<2j@duJyGI`i8?^JQTUplDLWqIrP5Ab#qIb+BPEldKduU@%3%w<L6!LK`a7Zl
zUQr`D^3ofCP{|*{MQD{ou?yRnnJWZ!2}^{wycxXhLeV_o9X_-0%pl2QX&9}D5ge@#
zrrygO^h&EF9d~@}7eks`*h9BY{F}-Ttk*T4_L(vVqYl}R$b|AM3|?L{DtJ|Qd$~jl
zRFUd{U{fU1!Ea7yKZ;A~sJR55-=nT{yLrMPAw?QCT2pks2#(NVz5^p-n+obM<1?{F
zs<;mXF3hX{my%R+fr%lC4;7UC{!`lMx`T37kdpQJ!Drq}W43?6iVf3gJ)p$*sBCUa
z+%R#jX*l&(n8KWM39r;dlHKChRL)aGQ32PJTM<UK%t-Z{B)hO@4H)7i8NPmQY`0>G
z6d|uY!dC)aOI`ZVa89hrh-O!vvTb2urT3sYyzX?Vsr?raIL+%J@ovq%c!yDbTT-(e
zn3nAtzzox~+TX#&H78#m81awy-FRh??a-H{g<`nk88QM;h3CIx3hK|v)7mGi<|$wP
z!6i}M#Y9oj$F}Vl>1(-uoWDN2+Ik=7t;ZHkrw+|wyiY>ukSnSxqbhxlQ9dy9Z5@2&
zDw386<69JwmUVWYd+RLg_<;5p+BLqbr$i|I!&3mGC*VRdW?!NnNhj<m?)mUVF9kE^
zHkkwphZhD*N4LboLUjDzS+@%L2ATlZQ}!(8Ar~hpmlDEWq#IDyi5ecQ&COkEa@;46
z-j5Kbbx_j>hdC`(#b4;BAKp@b0(sM~8znbC5$*3n%+Ue7-;HhdXjy&-P+meUlbwxR
zb?)J0M)JeG?&{kjP@KC_9r%YZhj8bzVxP-^h|~1Hy(b6E0;aLcND+b!3jqTrd!mpN
zof{kV6xZWsXrJ~%;P(s1LQ+uFe)^n&DBxJ78rFME4KafRAo_D_K1}I1a<sp+3muCl
z&r=M30LYA<0{ufyEo(`2ZjicVeU3^dh(J|j84#A~%J4ZPAQ4^#xx>le@f2k8woLRC
z*CnZe+Q&kTEUL-L*@L*~cP+?}fJY&*H}>uJOL}!h(-Zy-$|c4ht_I=B>t<4!Aqb~@
zs6@F;Dl_lPV9@5s5hsS7idgw0$=i7$A7THbR5}Gfobha)p>1QhZMOchi4PB$m+4Ch
zQ4=wuKY6ZUm)PAGxI!otperYJLLNnKjfGVH)0VDKr07DWwj8k!dm9h032E%z&ao?x
zssyZAr3&;n*q>t?+v5a#f5S^ioz@1`j?}*#ROs>l3)SV7mHF?chzcfrvD?H=Pt9F2
z-o7oN?R<M3M;yoBMf9R6!|+*pW=t^lWyN82GqX86E+{JTQB9vM<)vzyL1Yg0pz9Wf
zV~_%C&rI<)c(iw}xI4BVla%4zv>M3eE?VFRF*YT6dhBz8d`0c|WPh)#NXn`S#j}{1
zw@lZjFa<Vf5}Tx$mt*&7K2VL8XYjl9X;G|{^&(gV^x(h;u^B-sv<kI-#x8nH*6!|q
za;?~NM#^eR^^fc~bt$RblRk=wPu`yv9ZNxq+0*#1xFIQ7{f7uxhT+**lfUf?N~pt#
ziyWBx1o(Doy_0xB^uVI01F;+j9{&(an0V^eLLbW&ptzw?!La5%r^<;=7gwCdt)Y&q
z*pz~GCsKO7<xNk3w2^>$Juw-sp%#QkOd<;>XgxWJPbQNNDE)@bE((-hlU>cH^oBbp
zdVMf!`_eN7s8=L;rC7aBYd?uN9?uxv*qCRyy1*l#bJKW$)-rZ?XnXZ`TaV<?8H5fu
z1hbPOah{6Uz4H2K-vEs%wSw9@Odp7K6sF)@d2we{P^q0X*0lUTcjnIxzT0?VrGrE<
ztu5i6ty41WBF90!&ghMDS6go<&}(1EYR(Q0ry_D@-%vE|swj#n3K*cm-MN4iPX|g6
ztjiT}pPR9V{&9nfv<64lZG!JOoONMIN)>gRHlja$IVg%?SLkk(76AwK%oLBEkC3e?
z43s1L_o!-<hVqCSLiTfXm5Kl2vAnN#O40<c75<)(4iQi9c#!<Zx4`dM3?JgU++@dQ
zaOnOvPN%g2s-J1iQU!>vox!&@K|yG%I2vHSI$+9z3-BxfO#B`W9FRihU=l`oy_9$W
zd^1v?q2hO(YJQl&%vL;&2U6(n601rG_P!tM-22|9m2KF%L0d#CPa~Rj@S-U5nc}ca
z0uJcqn_cdgJaV%k_nmVJaZHwTsXSem|Cu}B043+-xL(S0NKk$WPjJel6Soz6x5DtQ
z*z3y>tHC`HTeq%Knu(0U%Co7pgl(YjPHDrEa;lT?4#Rrwrl5-lC=*u>s%rl_Eau?i
zi}b26*F3{$i%9>hu=3Kmh%_KMo4vq}wZZ;fNA5m6Yi{e*uHP#KQxxn$U=m(e*YES`
z1IX`W%ZQAa!5k20<XF7kylVCg|Le}Lh5jM@0?Q>_>yH&3%sp8^wW~NK@NHVfn|Y`a
z5%2fU!g<0fd!AKQ1@iQHZ6sKn&_XL~;8x-skDU9V1vvk*Wmb!c>?v$8qx8@a_a93E
zGncJp`0Cm^Uk@K>&b+ddfAmaYfBW;&l6!Wy`v)_uq~07B0QvC#Zwi4YuV&yKkNNyF
zZQJYnupR(679t{BS`((*wb62fks&xfuR2rhwD~^+oG%-A$^mBL>lil~5zVK``ps%M
z;VVBsSe5*$EOJtm(LZ%vTvP3Pk(FcVKSBHNgeRT7QYGL6Fe!jN@edcRJ(|@UX>Nde
za>3bdKH?`<1s9uDSDX5?0jm5a_DK72>I-YHJCpXXc%37upf^Rnn(;`Q@?aGX-KOII
z$PA*aq0k&lOWP`U+>T><O4(?dd7xSIUB05T(B?)1<AP3A^ew{|foW|N6_JdJE-v4F
zv>03$0!ee^@S0q&&@HV|4;|K!f+0pmN3SPWxqdeQ$BMm_OFWdm_l-(!E^d|d&k>ht
zqnExCXo7*dR#R(#WykT;DK~hmdZAOSm*WVBR<!YO7Day<1$8O3l96Jqv6rg<?qqnl
zxpVE>Lykq_J(3yS=FLCZ$8o0oB#8%7#6y%nC)QDg-t%g=0Yp$M+iTD$)9ipvqRL;q
z)&>;ORAG$-vY!W6X(JK)PVlmPsIE44jI6W(0g}vo3X9*L;-Y*OqpuSGCKEvQK{T}t
zt1Q)<zn};2!{!a*v<StIacV4^G@HMefNn!&YYORIF!z3cDS-6?OD|?L9NEqzT9Tzn
z_n}LFi8Wr7bn_f7B#N?~%?)<NMR2%cuumvsvfj0zQjbilD^qanI{A*fa7J_<UH?h9
zeta}(xQ0;6xe%T1P~xFFXkjHhnUZ@Mpf8S8(v86JD8y(^0i;ufIMfZSOz#+h*GWFB
z0f{%g_#FN2$EF8(A~68XcegrEr{H;fBK}?X`g_e&G{k6ahJVRJK!zf@kiHWqYADNe
z?}jamX#D>5wF|*mD4c)X&RrPanh?N?yCv;20mHI=H~2z^g~o1=094`tDHbg?#%5L{
zb(Dz7-LbFTc|YV1n7G;Zs%Agy>xy3es7*xY(q0lKKXj=`iErK!kSzw2xG_q9wjw;@
zBwb2El9CE8+ycHjy+5kp9W_K_nigv_3__I)f3*Q2Xw8g>U3-!Ues+@5tJ+IBXMl5H
zhLOvYi&wW!pm-XjUb*{0oP{<X!Gij)TbL{OwnT}I6I!sE3$J~i0p5pjt@QCKUz{^u
zT3O)&|BG`Oltr3pBjO7t><W;>$IhM6JYuK6l27njgFiC1l0Pcz*AD>udjssR3==O4
z%RtFdc(<QvOI-JxP^n?Vw@Xj$@8*dSfP%X3n}fRjjx+Lh5n=H?*&{mtGdC<fHJ2h@
z;itJn+M8~$hkF+!Eotz1Bev`C&)Q~=Vt=Tk72cIt$Qa|CG~x9w@F_yJLL?tz;r0A8
zxqqG|oRE+8iKq`5hwgzczxGvu_#<NyLfa7{^Na3EOm-2wZbo{)ceQ5FV+^qJdlQT#
zU=B?Ujg#3A6~t&nZ!ty^5Ft7PfMCLDwbSct{}}*rauj#O6_(AfW0JM~61<kHlcXs6
zP2GtR3lymOCu9tr<nP{%oDS2%qL`&g%fub2%5b8f^e=klr=22Sx5j0&T!~b9miyc^
zlQEcR3Rtb0G!DIdHjJrrJ^!E`@2jqrm^wu(!&xrcl^m=0e};4a9Xw|lng}K%R)sd?
zkz-Netn6ZW2XF0gb8tYCuyV*`2D@H$Y!1o&-;lgOeK?He{M)M?BP%MoQl|c5toonR
zoMZy?yq7^r13e#QAX`l1cgz>BTPbkRd?;=)<!AwA^CEM#^6cOz4424ns3U3{b#V#z
z!I3F{JH3C!Qh(V^lasgySJX{nRe55No=|3IHH_G7Iz*zQZs$Zt!m(>h;vDA1bUIUJ
zu1k?snSBHH)`Qbk`&X$&I<39N6E|V1KJNhF2d``!X%jO%ZV$3=cI+>ey?F*dAX*l(
zLz>O_*sk*wba43W6i1#dxgJy;^Jczrvi`tQyghj&9&I(X!pW>n3=){T^ra_u&OOE3
zH2wMCqAe1jZnjPLi+tN^`&*7n*1Hr9EzhwiE-cJ~JUPeR!1!J5=4T)^pmw7_<a*29
zilFNdZ-)=y*)z~hI6=duG^D?_kjGY_%d`d^+5fN3zB(+b^^H~-2}$W81*DM<sX;oW
zySoJGMv!jFA*4H`LqY^)=q{0N>5>uz<=j0O$8*l_dG2#B{|+<H?7hEvzxA$nt@X(s
zHX!Szb*7_nNO2>WR!bgV^O9kRrCs}Q0w_fWXOuEVJyvX|s*df1;hfI#qj{_39>=xI
z`zN{?FW~e&&*NX*Yf(5MMxd<kODhozauQ|zk=x&`u;5fifx6L)>SSG0-}w$c4`3r8
zFQ}?2LGhNfdOGHx^$Pf!$L-5K(P^}y50hF6Z$)}OIWR{JZQ#(4uHpCKlXCIo$x>rA
zQE3ekSB}D<IN016N}5I@*GePs5rF%~UaZO>QGY}EOy-<}Kpv$khl9nz*ZV;h=L68h
z#l7gjd#I^MORHq^h3E0j@++!;2-u2!-gL*NC;<!J1@f-kc$dr00_2Uoq-FD54Hz7-
zEte0GgGZw<Ly+D^@DgcF<izgUf+x={0a2GTXGIm?$M=)&w-uhx22MH~`i3C8%0Q@b
za+lSA6pf%WbfQkxD?v$YC@j&G0HXroaTbT`G9gGmLW(AmdBz_z_bDD04G4`TWzIwi
znoCoR5BqK>66Px7fG+SfvvQM>PLjw?l&sVhI<#^U)J$}Bx0972<zOJc94akO_io!d
z4l2|&GxgvGJ%GpClt*)CupTclz3GWHCODMn&843fK|WOsR(^k2+v_hZX|%lN8w?qh
z<~!6m-hBfdG)qbZ0K87<VjvTJMs=o^k;%~ADXhFj0figDdnDiX4XK9vTCtNa)u9c;
zL5T=9iXTm?J57tnI^XU-P>0j`RCVzVc6+=B;d$&9A}mDSx2|w&AyW=cLUKpmM;a0y
z6Pl$+<{fMeqS#`oWR=M}Py@*S1RzGB?PslPjmjIEfTZ>K%p4oc&yN_oSBGFQJfpNQ
zUCBduVemHg6Rb7VZ<p5BgB8x37G6f$miqA#qvG9u{ove|`)pDL`&8m)o3yN@Odeih
zyku8jIV(X6z0c31Na@19=f}#?PmN6t6YTUmJ|=W7?P-k8d{QG%Xjb0A;`*w`kpU;M
zC6ksXlMIlJ%RIrCMoYuAeO4@D!A9_URmv0L2n(t!J9;fkitgGVUnrzoyXd|z;JYLa
zw@ccl-q0t5Hf)F4?xSu{j4iYJHz+=-<=uIAsM$zIiHS#b>Se1|HQ_ruu&Yc8oeJQW
z(U;WXfnJqXmDpe|q%mf^4tcYCvtBSXn<$eHx@kcD$yPX7tf6g?AMqXPO1#=FymN<C
zf3qpN92a4Tedt67km+%Q;Dz4Z0u*mH{U)8m2Q7|H`Hm}<%h@e>6<&1kS}kR4iEe81
zEfW?JMH=EzA4`;OzW7}rkvz4A>2aqApi<QvKVb$QNyNjE7l}V0Hh>Ko_s>pPXCe6n
z>!K3Yf>!Aa-fbOckD9|Gg2H*ZNQ|J7C^(P<W1>?L2S)_^i?;jek6*P@=Fu^%aNOq7
zy(_<eNJDSz&kP=M>Qi=i#iAA59bd9R{|Mgp(oe(+EoJCg(qW8EF84vvHTsEseZ(5-
z089wjK{i;X<G;~P{=G){G54d1K1xNTWFOML?3v3Dx&v+zkUi9&$Rym0hBqj$s7S+1
zy)L_D|FJ&+OppHQWuL?X+4|QchmQ4BOLv9>uMlN^+<f8uQy-}lV`KJ3{_}3B-8iBb
z!m3ISq%tR~O_i(vTgdf4Xa&*6xM}oT{HWss>@vRjpZd9Syso&XD#;>LvQ=gwy*8Ym
z7^_4!_VFAVV>LJ!fU|J_4YpZ#%HF<N`1v7{#siM5mJ+MW%j=%F_PN<S-O4pC$AXFR
zWX89;w+TH8cRFk)obJVFuMX0XVMz82byzr{YWWYBHhm)OVT|qA_kJt$=52FDu@D@b
zk21vI4}});T_*IRWBW`{Ude9xa<CKgj?zXj=|}rFKLM_*IJI2och4YM|M{B&CLLSn
ziBOVvOqNw&_?&ozS8fklqm<>oHfO8`uwW<eO21NFA5Rx)t;>Ra%(#>bW=>#kEa-R9
z1MtRI<D%<fp*r^WJn|PVnrXbENY9I@z(s@>cdbdsi`O~9k>z<%2~cEys0!bxHX-rU
z9E+Rykre-9DeNJRtnoDuSFIJYFIGjD$xy(bf^Q#jP)QjB4ez;~{1<zRO*WbNBg|Gd
zX9pVGgjIz%chUeobT58oR=JOqAIUKpGJoKNTvjQ-ma$)v+-RfIsdY=P70}Ep5hibI
zi)`P_qDc5OV{Zs_U86I}C8Sb%7)ML!C1X_^?ntY8v#dte06Z)MB}gyHmbwB}Hvg?J
z2}rkn$p#f!Hu5Rd5h76XBv%x1ju0kb+#97GhMEUfa_n)wb7d|e^zda7s-!UX(huv;
z7>rL9gmH?HW?3uP_MNY{jm$gS8GE*vCVEl5GB-`5KcA|DY~WhGfrg(w=U_?zZag&V
z_yI&^e2AV>SSjKAA9|yZ_gh82ylD;yhIjFiPw&L)N-C|AeH?laX~~sw{uOX5k2;-?
z9Yn;W1A^T;`J?Z&vj%Il9&B|Vz<Q4sel-w&&Q}0Yf66XNLv7$o@aSB2ole7x6+QsP
z7_=6BQ236^e}d2~STQ3%VgTPaca%TrTc%^*zIj^o2nXk47$*|x9S|gdbQ9iHEGyUG
zpAy(ET$~pZ+bZ&KW`}BpgLo=eu(O<}L68;%B!cevaax{*HhlAUSjgNsyn!DZ*Bn!u
ztZ>^~4Ow#iO|M*W4BG>^J&4_Nlu*e<$6q0jYpxLIxtT6hx#fJ~=xd|vqg4XP*|;wu
zn#!Uo+<BBu;S@n^f;+-=x!<xRJ#azHWZ~X9N_nI}5=q<p=A4!Vz%VH6eEL<x74r)F
z{@5HK6CISTtfB$E0jPXqozEtNWmVQK`puIa{zd46TST4XUj<9ik~8y52cU80wp^b^
z)-5o2d+0D;-+AxPHtKKG&JsLLBNNmoo^BlQG<YyhZx%)2mL0A!HemC{V@#qv<UAC?
z#<1QfyV2uVIu#VbXcI7NWv)I^UfFE<s!&S1%JSWgDO0ATEfhjBS3BN`zDRmKO^E)O
z>ENy8Nz+|<;_?oJ&`wyw15HF3O&(*w&|6Xn+yTH*0QFT!l+U`UxC>~TuEIuf&CzG!
zbDCw-{;LnE7Dw_9iP)OfEI@>6y7<5bgMKeIW%vR33CqvT@ZMw3(cGSht1Xj}vZ`KK
zZ<3zwI)4Sw(~Gm)B?q<hT?`EdY70CsW+yInQMVVQOxQd8@S1th;-(fy$^wWfqEf4<
z^!-%<<&{u4Y7|QhI1JNO)?3s`n{vR?vOiI&JBsx^3lnC^bi~u?5KpDR%uOVvKAeiT
zo*F^?+|QLSxDlhZ>A&1oJ;t{(s{6U5uV4$q3<m+o=^EAfJj3s!YY{BgJQy|k<wKCU
zg)!Hbov}}GuNk~^tT=K|&XT|q!IN00W(^Yvm45-Mm<T!PSM#5kMofTW3l1P=2`Bi=
z@8gneQ>_cP+e_8g9@8Ket<9oZkZl}X>$#DL+4ohq%fGvf#5|+kG*kO}RrR+sF|>Y4
z<&@iJKKK5)H_E+jH(-DE>1@ocLBKR-ZUYs@9Ve8ayn)FyRbyj*f>4|X(w)iI_qTx{
z>^Wf@-6EW^jr=yqmi3g$-Al4p=9|_#oJcJKBX8la)9#hhrj~k>*~WLyEoymFG!|GZ
zs+y=BwgNafj-bi0hAe^0is~&NpMRB7U6L^uez~g#$cCAS-~Kaj2zQU$mrT8%El^h6
z{sREN0AU$k=<YPnWutl>&jQaTZ(}mFSm9!J91&DbMa~hLIFKCgX|3SF_$^V&NR~A@
zR%LYQlP58$YXJ`ThRgXotqZZ11rud?1<)k_+Rle;tl1n?4Ne-)0e#0)#jM-W2AMRa
zjDJxuAp=Q4IA6~$eB8!puJ>@0Mjf1{+z__8c4%}wrQpNje55a=iH%8n-Bg)9BZic}
zG02Z>km*!2aX#*x9r<>tl!?!P`2^k^@#PJytQxJV!oI#NanAWBUMRFksHpQhR!qw7
zP~8Z#!)tx))s6e}wv?uW$Vv34lF04oOQ-Pr#j)M8Y_N6`ltP+ZVlUvxlYmvd=6$j&
ziOS-YUBLKR4GrfQk3lz=N&>Cl1LhF@9YwOgoTd0<SU^ycI`hNvy^&XMt`Ps~ba`YZ
znrQ-A)dZje4gFGC+m2|Va4{pzdeUcn|C)CbR0-otcgVrgzF;+W?i|=NXKQ~7DGjLQ
zhYm=*lY!+Z<M6#K39bl7^Wy5R;QFHWbVq~p7+bh`WSzn>hrgIlRUR=JQChSQv$$HP
zQ%w%Pr(G$3Blf7NExZ95lNqxvfF1>Rz5L9K0_do2r1Ui`p!HU5;_l<euzpb5MiMf|
zSTT;H#b-N<aW;v_tELQ9Z$3*Zxhy%vE0cT34eU`--!p;t%q}&9SMxZrfTrjiVL|>I
zrL+dr>O*YTUb`V(QX+)~Zkh^ZE&@3^utMyNrfx<ePYohgQJ=Y<Kqyl{(nkfdXl)N7
z)|TZ}PFC)H!zk{r^B@PnUwloOv}S`gIfN{Y_iV+IXgHG$p4Cug@)M&IXdXtu<_(4S
z^A_sBq^|EG^9YDLQ&M&}8N7#sMg31G^2uHK4~?x(ox=>2?n;s6EmXUmxCSJ@3cKdo
zzi)P9a+tW>NlyO+ZV&OHRc~}&7hgzG_c|lc4=i)RTGvv6k3JWq=NWQLuIu^Tw6Qv<
zw8_zksn8QlaVl5l_4w|@3Vrq{s+{O!0Ot;sE{1U+>jeib5*7~(@D%MiSb&&No=YB4
zsai{^cumdvhq{Q0mMXzsJctgUs^-#U7=FS1MA!~?f5kKcJ9kIT9cZ7#L-~`x>pVpM
z5aL2F{Rf!Guo?45J0UJ-upd3M52D&&w1nZ=d*zNywJeC@hopxi-q)8Poqraqb+-YG
zxkZaq99CiT`J(@4KRC{qMRq`f<kNKAce;DL!#swL)Ti+b2`&e9yk{pYX)U^i-9c1i
zQ5~{zYecVD(v(KDm?U<?9xW59O?nZjL7+c%2WJ2rePBj)03v0@*HFo_dZbvqG=NJ?
z8D<HM3l1`&ELo6)D;cySFZE769JjaSMYGu*p{Tde)3JS<^eGEVzPd6pMO*C$)aD>a
z#AG=34&ch*dX{pbJXm*sVIo}JO2=P2=5M&7g+mwYX70BDc;ZrhF@J1ARQn3`Q4&I5
z*~SJi&Jf)liN<<}LrJ_5g3#XFKS`C5V8~PQsI$_IqUFh9i{I)Nri<Sx1K3?Sry8lS
z>OF(qq)PtO1W!wSH>h-}`D}8=CuelBfOJ|+G9)k(We~$xf|;9UAfQq9xoOv(yQiV~
zM~K5o<qBjGnBOA~%rWTRJZQXFb5`$im4cjRO>Cc+P9w|a5_i$~jnU={dkNGaek$oo
z>;SGl51*yR2g4Fk0fiT{;Jr!{>M%ZTfDmOV-}$GKuW^R2y--Nz9G?V4>^+vGQ+D47
zUPVX?L`ou?F&81Czav->_cw3^*v#}T{#2D{J^oaot}U|~gjOw*8*gYR5u~gNVf_-#
zo8bZrDxF^ruwXnsPTxXN+36S?+i)v3&iznG(0vopkCo(D*_zvcptbhEMBH)b+csiL
zJEkJ&w6!(7<0tBLg?lBjs#(u!FDe=Hp~(1XhO2`sVMxFN71Ph+(DVZjW8dq+1D*rq
zY6p-f!5LQFTJKTJR@A^3jw`F1J}WTa?o75%%dIiN<S)7aJLqt;HaW?te57BLudYz{
z3v-G_ex3GXepPrFj2nKJaH3XFJX!K+Si60S_Bj&)=_t$V>)Zj*5-y0TMRB1I_knaN
z0>r~n8}LbS+L%V}<U;AQ%bQS`1YP*X?|l)Wm2TJyP(_c?l;o>`3Qg8k8U9@eOoMzS
z0nueFAxu;vomf|8OR??!#^d?i>5!<hT)+X!tp9G^i3pN_5VF7*zJ;R{t8o?(=QY1!
z&V|X4CdVGk7alH+p4*{XZil4t*%Lk8sjQ$WF;aN^E5R8L?ek)7u)KSAU*@dy4>Jd=
z>=oG~G+M#DB{g^)$`5sfwGN`Jxrl|8V$<Sk`-kB~vKehzj8P>T&4<PsP6wX=nvnBq
zhw|n|TGhj-vkZVCjq|Y`e3wNn=k*0o8Na?^D7iQdE6Ndm6QE8~>EKb)jAGoV#@1|e
z3I~^Q9PK9}6Wci4%NMQdB;~-Pga<nC=8o0Pbxs-raW?Q_*soa48)xS|$XJl0pr&+$
zb}&dw+=^T-Tjdf0<oUfn_u1loCuwEzQH9-|5TQR;l>+iob+4alj5!pZTXsjs=Vu>m
z+etX3HC4uNDQ2)Sm|ArCqbDu`1z{N1-676>HpQbG<W^yTR6fG6Pf>tp3E)$4#0*J)
z=Mt#&$o!uKq{8&-rChYT>Sczl+Rc1Q7hxKl&xkbx74vYZdAvDOw=)U1Z(2}cQpYGT
zLWLRS+FDSRu>b@U&!g~*$&+e6w{~}T0Tel(Xpq3qaFFp8wiR5t=E9G<oO}!SF5Z^m
z0#Q%XOT=~^eHYI=T7=yZ1q}LM)nc?yTgo7FXIVYx!!8zvZ$-{di5Ltc@|BtCx`06M
zw|W2oFWwGhR&`$R5~{irfCiG|(#p2}qG$jGycS*8!>8HhWNa21I-$lr$m-P=b0}v(
zpg}m4OOZ3@gC-zDtO6W0;+(^I$nr1MLT0AeZ*FACc{j(CG<vwZE*67fRte*q{PUgM
z;2u;vZp^7Q96gm+fkbnC<}6{4tK=Vk0PmHcqz%?p72kv(@#f4he0{Cb?;^vC<Zt=U
zPyHeYV#3%eYDQL9Z?8ot6e_gfvZXVtz7Kk1(0#I})Aklb00okVP!Xk7j&1Xyn{nk_
zIe<_c4$<rnOE~Z{vEdErr+9=yh9K(OCcrhO1^~2o%}b^YZ{TOC!i%rJ4-E8aAe=eC
zC+x01qAU~&Gaov$=u+KK|IJ*cwYqupR?z(Tq|71rF)2R~)027^_>@JEGe`X@MzMMi
zxMhK~$20zx5|9HD=Jmcfo(8&z!a>Hly|R`Y-s*$T9FWI(ALpvk5^fKV!Ps(#yd0I&
zt|2+cs<#<GnNHxH^mk{Sm+FN#Xi}|>Tsc5uOY{?cBrQ-`&5y86fXMqXv`{&Cx+DYy
z`K&M^0WPZZ6nO5Pc&$B&{EvCdO&;w&2#z+ncZNTYCyI?9TYaD=ON%|7w1PJ3Y;n9S
z_$}N0rP4X9TFXWvZR0CMSoi|MaCSltLtq_Y{HsLa{i}bIG_UQxSH~ZE{;)M;_~djr
z@)?HB$+D-0tppbNuNKp~Vr-{ML`mLjsFGA%i(HlGL|!qi2QXX8Fv;?pmAU)O9GzSy
z%wYvdeh7X}fhpWOKSD-K*FGh2F<jO0B9y6S<q6rP{V_e|w>v86m0|>P9=X4G4Cj$9
zezt$(8Ixyq9UX`jEqkvUKN`~-RTFPk#B+f^t%k=QwP@z@xv0Z2$_mNm!2@>7G5ir6
zde|cW(-~NdXi@silBykYeS97YaC)m4oX(cScCA@tWmoNVjan;SY4e$L<j4w}S?BIq
zafSpW-&)*7#k$AF@aFcn7BHLjA$T}DSWQ@J+!+1)uvgO_W=|Za*(95`g$V~d#O}8T
ze8^<2*Lp}5mY8j`dEyc2RbM-(Sqm7bqx_MftPIh_ANR>jBi#E}4=?EF$9s&4zpVNt
zPY*OunGqM4p5;J4d>6}wn^HcfTZ{BL#&&%$@ZU1Ay#;;2IPx%jDB{l2F0P_M_=uTz
zgdm2~q;Q*gUcmHhNA`R5(eGmMz_=Ekg7P~BCcWchuZaAeSe+sjZ0@v$iX1IZr=$8P
zUXP}i!oGd(N>*cCYpk;?$6TV0Ao9K;Jilne1CKGhgRZ&*&|C0+oZFo%_iDFL`C0!v
z3eVde*ICTi>y3c{7xGv5fUD<G5`4gg225=S#o)1-&k_e&-R0m{)+Q|!u`p=L_V5*F
zr4_#wJAvyv<pMw%RBrN{9}}L~s8x`ZZ;=Il8k`E?(Jl06^U4{8Y<$gU5xn!#(#Wnz
z?nkD02FNL62e=LMYy9!4*n&~K)mj<1dzOpTnIk>;I_{UkpCuV_P_7#vXO1_K{jrrq
zym@XIvh<0G&KymYL6cf_x|REirM>DBsPOP&vl=DM?jx}>&^g;)1bG@@b2I<y8`ij<
zV(HzVvX#I2Hp=xG?dZf@7+Rj7@3J49u%W<&Ed{mX7D~IBE76sCmm$tU99#01VI+S*
z*sdRybhq&nkes+5VUR`lTUlf%3XaO7gbL#<1OhGn*!Hd38UE{#Xe!kkfvg;eX;E3R
z!)>F77UX?_#l29@kpoB?Bdc_-GSDK8mX}6=_&1r4?0-zUT&AA^I4J7DbPWPQ(6ic&
z7w^`x`;pbZqx09^9<Zi_`ORmsUP^Bpjh;ZM6*`$CvXvX&#T~2PFU$VNkEwzOxJ9hK
zJ&#gMY17y|>jMxh0D^l~LEu|3tNv?mvn|6)`cb1!S^63*L0w6bV;!2sWTx*(f<TIA
zJThKW)DgEJ>QtfJwOGGeLV3d<5Aw;(EC;swQxTiqk&OcmvqfOwg0f1PCru(@ZY<(;
z7pLtZLD;>&!~*a}Zv-#ubmSOnI=slt8Lu?@CnAOHe#IvM6G60TEEII4sWGW%kbu4C
z?RT`vZ?ZW58L&-TeU*ZG88tPX_&BMpn;SP<vfmYG5ZO}uF|{%O$N&F84m}YYk&waC
zHi@zs-y@joUXf<__gOT|4*u9DLrFggYs#YJPg`4%eQWa28GpH+eqw?0M?`+|N4k9Z
z|1OB|<j(17nx^r11c<jzS@NR)&ZJ&UAb%H1Kk#YwK5=!^h8D5xFKrMx(|i&q@8Y&`
z<%B)?<5FK6vz09X=>Sm&fy<^ukP!;W;P4*nlZ&Pq0C^6~(0QjBsA9W@wp8XXAjRM;
z3Jq&=Yn=fjgCWh9as-3+kd=AqF;MHvT#7yomdpVr_T3x9d06W<?`^>sZ_>g{nsS;B
zsDu5w{l}+QIKEK(?B2yD4YkeajI7fs=5`0OHM9wE=I}u@qgg{;661S&p&4^}U)Te$
zYsQ3?|GflTE!6y9z~~eQP1Xa*0fARdn{n^K%JW5MkUQ4qm_J1QOe6&T+_}*w4|^m}
z%NwV{7+VJx1;n_<JirPRqh+0^7f=_NQikU$u;+l$a>H-}yv;GGqKaozW%5wuk{y6^
zih5~diY1Colxdl0!jPdIYn8cMG%Nv(6l~eH|EV3`14XxOKhOU4)i}X0I6@-dQ`9WS
zkk|Nw;WXj~SeZNVG^HC7k9z{nra%|}Dh0lHD7=~lBcsT$1so1IjHwb&?w$pkkQ%<&
zY>JcHdo?6wkbq%*YeCK1NC>;w?1KPVH~G43x4$CHksk*=`3Of`H%m%5g_r10Rpw7p
zh_Rh;2>pPR`QGDkd5Ppb#y2e+_RDB?8KT&y@~6+lkDh&P-Lg0>-wjxdO1+A8q#z#M
z&E#8O))$BGKxUv>ln!?SdxtQHGf@w4(fD);m|J}M6A$~0NJ*8kzS7$|3S!sz$Vs1m
z6cU}iw6OUf4<?X@UBIvW8M`dom_3hAo{VgnHCK8CHFwUa<UC{;FsEC9<k}Nhc`pv|
zhr7>Hr;Be-TyO`;TM9r&JlBc1N3BJ+Nzp1PGY16L1(l<pV-r?jFNoGZX{EhR-oo8z
zb{yOG32B=V<_`$co0ntGrD%5a(c7_K9M`pURo|;g8sSm>@0XhJuTU`3o>Y)Dj;LMy
z_y(wa5@>WsXoH^;zLjZF;|jBO6Q1UFG`$`FA~J(Cm#_Vd@2<5VLFIBEuYPQ-caCGO
zqk{DNW-|(LiH|x0DF6*8H~A~tY?mb+*Dgcr-J~Ay@O3RjSTajC|5Gx7^&0qkh1@!x
z)t=^v7w?F;;KL(51Me)caWPPTres1$MF^`E_1<d%;Jfo+@BXHbZeWyL^|1d(rT?b&
zGrkAv92~e^$j|<>Z27|F(o7`OZutFd8n`&^65O5-U0A*s>f~}7mkZ~=9TJP}qj~*H
zOa`0=AW-d8s@+7}H38W#zyg<zFnrH?`bax!8kv7QHk^F+?My*?%S)Wr8H6@zlCVb8
z(JFvZ`KSB=s6Q;z0|RF+_>jba7SDN!v5koCZ}Sy-TiXnAh^%|$5Hs!DdjWK1jqNi;
zTzuyth;+R$uta)G6UrsSDi;tWBz7v|l<8zx^dgz8c*!rK^5NLda}|{6$rI~yV{s}E
z7;5PS>V0xjF%V|M_r>@KC9lZiYMVdGK5!Yj)kIkS)h)M9z92Q)<qa$+m|_@|b0WKj
zI^juBhIEFY7Y8CdD*#J4B1(Q#IF(3wOOF>dg?n}Hi1R`tSTt?_U>U`uY17aS@@*0X
z*J(PEu{I?3k(iOa-#jxHDsFGkT5%UJw68;UB_8HDpdgEej0%UWkUESBDIU$Qt2~~w
zXd+W;biM7)KgI3CJ#F7Lbxr7JQQOsY3K7B;h9PyYxI+wSs%Ejv9savU;rB1}dS1m7
z2=il$9j)kWE8I%3d_1!b6xrugq{O77L;^=?LFf?jth+7#_sl?YDr*joI)s<rVVT1|
z2X4pl9!IfdaP$MKL46grG9PAJt3LgZ98G?D0IWPMi5w?ft3#^7kl&oTI*w<uhR4zw
zS0`&Ha<xL%rpTcR@gq-MkUA6NXu3kf0^fVNq7FC5{0Sw2{qVK1oycK3R%PC%AiJ6M
zJ5$nRj+~sRmL7mb!rUUlf)#t|kT?_tZJ&^XN65S8*9}t6m0GsGW@40FL{<4okFH@%
zp`#Jf6g{&vQeS;y;fOk|&zl3P$&eE{fyiF4;Ji}?-mH7Sdg{JzZAjZ{o{dQpS-}@D
zYux5%1wj7$H?Mm0!>B;=p;}@-rQCjg_p2V8O|DW1I#EkHQTkS(UEb%#gw7rF>dnzF
z5lADdM69|pSVpl%A*G!RUp@zUma=VYzJND>d_``M_7a60B1Xi^ZJ(U`M~b+DQ1c`2
zf$5J<>YigAkCXNTR1ek2f{f^4`7Ib>VUW>b|JvW8ANZ;VyM8y!+aj(Y!n&A&7?{!H
z2*fhar@`f>SOl<m5o-}IR2&vtsi1I8)9AQ@s2*}}4>b_lcYTiW@HWFo`D&?qw3yW1
z_y_YKkhk>={7Z$ik<llz2sl^490juM8B@O*qh}MXd>Qo*d|?o4a-!<W<j|q~76~cn
zA!g$R7XCtWZIL64#uo)mH%#v`%E)KevHegTdocj$-VVr19^G&`H~OfLfo?U9yMTxO
zpUC&-mqQKm?#O(M>oB0f*@zT;t=9we@RlaP(G1joW71z|jFc)0K%qw?Op}{tD%@?o
zrK(x~|0jlERWpv{SXckA;~5(K*9QgdLQjd47U~$#l8jt{Sjmk)V<mypP4wP7i}mn`
z-yG;+N$1Y107zw<|CGuC7e`p_W+jrI_iw@kczBrFMiS}{fXXXrf$KC>{hx#i@H)Eo
zgb~Ju`G>Kr(07Ll|2iuK5S%smyB&&<0$}WaywmD`zks4$wh`byp}oWO-~0Oie0IxU
zU4k5`pd%y*ZV+Qvm?sS~=m<IXLjaEghC9vTemuBb(rfzva$wDn+TCw|NsJ{PV8<fT
Q-U9w)B^4zq#f$_04|&lvy8r+H

literal 0
HcmV?d00001

diff --git a/docs/DATA_PIPELINE.md b/docs/DATA_PIPELINE.md
new file mode 100644
index 0000000..413463a
--- /dev/null
+++ b/docs/DATA_PIPELINE.md
@@ -0,0 +1,115 @@
+## Data preparation pipeline
+
+The data preparation pipeline and the dataset is decomposed. Usually a dataset
+defines how to process the annotations and a data pipeline defines all the steps to prepare a data dict.
+A pipeline consists of a sequence of operations. Each operation takes a dict as input and also output a dict for the next transform.
+
+We present a classical pipeline in the following figure. The blue blocks are pipeline operations. With the pipeline going on, each operator can add new keys (marked as green) to the result dict or update the existing keys (marked as orange).
+![pipeline figure](../demo/data_pipeline.png)
+
+The operations are categorized into data loading, pre-processing, formatting and test-time augmentation.
+
+Here is an pipeline example for Faster R-CNN.
+```python
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(type='LoadAnnotations', with_bbox=True),
+    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+    dict(type='RandomFlip', flip_ratio=0.5),
+    dict(type='Normalize', **img_norm_cfg),
+    dict(type='Pad', size_divisor=32),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+    dict(type='LoadImageFromFile'),
+    dict(
+        type='MultiScaleFlipAug',
+        img_scale=(1333, 800),
+        flip=False,
+        transforms=[
+            dict(type='Resize', keep_ratio=True),
+            dict(type='RandomFlip'),
+            dict(type='Normalize', **img_norm_cfg),
+            dict(type='Pad', size_divisor=32),
+            dict(type='ImageToTensor', keys=['img']),
+            dict(type='Collect', keys=['img']),
+        ])
+]
+```
+
+For each operation, we list the related dict fields that are added/updated/removed.
+
+### Data loading
+
+`LoadImageFromFile`
+- add: img, img_shape, ori_shape
+
+`LoadAnnotations`
+- add: gt_bboxes, gt_bboxes_ignore, gt_labels, gt_masks, gt_semantic_seg, bbox_fields, mask_fields
+
+`LoadProposals`
+- add: proposals
+
+### Pre-processing
+
+`Resize`
+- add: scale, scale_idx, pad_shape, scale_factor, keep_ratio
+- update: img, img_shape, *bbox_fields, *mask_fields
+
+`RandomFlip`
+- add: flip
+- update: img, *bbox_fields, *mask_fields
+
+`Pad`
+- add: pad_fixed_size, pad_size_divisor
+- update: img, pad_shape, *mask_fields
+
+`RandomCrop`
+- update: img, pad_shape, gt_bboxes, gt_labels, gt_masks, *bbox_fields
+
+`Normalize`
+- add: img_norm_cfg
+- update: img
+
+`SegResizeFlipPadRescale`
+- update: gt_semantic_seg
+
+`PhotoMetricDistortion`
+- update: img
+
+`Expand`
+- update: img, gt_bboxes
+
+`MinIoURandomCrop`
+- update: img, gt_bboxes, gt_labels
+
+`Corrupt`
+- update: img
+
+### Formatting
+
+`ToTensor`
+- update: specified by `keys`.
+
+`ImageToTensor`
+- update: specified by `keys`.
+
+`Transpose`
+- update: specified by `keys`.
+
+`ToDataContainer`
+- update: specified by `fields`.
+
+`DefaultFormatBundle`
+- update: img, proposals, gt_bboxes, gt_bboxes_ignore, gt_labels, gt_masks, gt_semantic_seg
+
+`Collect`
+- add: img_meta (the keys of img_meta is specified by `meta_keys`)
+- remove: all other keys except for those specified by `keys`
+
+### Test time augmentation
+
+`MultiScaleFlipAug`
\ No newline at end of file
diff --git a/docs/GETTING_STARTED.md b/docs/GETTING_STARTED.md
index b603e19..5977c71 100644
--- a/docs/GETTING_STARTED.md
+++ b/docs/GETTING_STARTED.md
@@ -89,12 +89,10 @@ model = init_detector(config_file, checkpoint_file, device='cuda:0')
 # test a single image and show the results
 img = 'test.jpg'  # or img = mmcv.imread(img), which will only load it once
 result = inference_detector(model, img)
+# visualize the results in a new window
 show_result(img, result, model.CLASSES)
-
-# test a list of images and write the results to image files
-imgs = ['test1.jpg', 'test2.jpg']
-for i, result in enumerate(inference_detector(model, imgs)):
-    show_result(imgs[i], result, model.CLASSES, out_file='result_{}.jpg'.format(i))
+# or save the visualization results to image files
+show_result(img, result, model.CLASSES, out_file='result.jpg')
 
 # test a video and show the results
 video = mmcv.VideoReader('video.mp4')
diff --git a/mmdet/apis/inference.py b/mmdet/apis/inference.py
index 85bfd2b..67b2b24 100644
--- a/mmdet/apis/inference.py
+++ b/mmdet/apis/inference.py
@@ -5,11 +5,11 @@ import mmcv
 import numpy as np
 import pycocotools.mask as maskUtils
 import torch
+from mmcv.parallel import collate, scatter
 from mmcv.runner import load_checkpoint
 
 from mmdet.core import get_classes
-from mmdet.datasets import to_tensor
-from mmdet.datasets.transforms import ImageTransform
+from mmdet.datasets.pipelines import Compose
 from mmdet.models import build_detector
 
 
@@ -46,7 +46,16 @@ def init_detector(config, checkpoint=None, device='cuda:0'):
     return model
 
 
-def inference_detector(model, imgs):
+class LoadImage(object):
+
+    def __call__(self, results):
+        img = mmcv.imread(results['img'])
+        results['img'] = img
+        results['ori_shape'] = img.shape
+        return results
+
+
+def inference_detector(model, img):
     """Inference image(s) with the detector.
 
     Args:
@@ -59,45 +68,19 @@ def inference_detector(model, imgs):
         detection results directly.
     """
     cfg = model.cfg
-    img_transform = ImageTransform(
-        size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)
-
     device = next(model.parameters()).device  # model device
-    if not isinstance(imgs, list):
-        return _inference_single(model, imgs, img_transform, device)
-    else:
-        return _inference_generator(model, imgs, img_transform, device)
-
-
-def _prepare_data(img, img_transform, cfg, device):
-    ori_shape = img.shape
-    img, img_shape, pad_shape, scale_factor = img_transform(
-        img,
-        scale=cfg.data.test.img_scale,
-        keep_ratio=cfg.data.test.get('resize_keep_ratio', True))
-    img = to_tensor(img).to(device).unsqueeze(0)
-    img_meta = [
-        dict(
-            ori_shape=ori_shape,
-            img_shape=img_shape,
-            pad_shape=pad_shape,
-            scale_factor=scale_factor,
-            flip=False)
-    ]
-    return dict(img=[img], img_meta=[img_meta])
-
-
-def _inference_single(model, img, img_transform, device):
-    img = mmcv.imread(img)
-    data = _prepare_data(img, img_transform, model.cfg, device)
+    # build the data pipeline
+    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
+    test_pipeline = Compose(test_pipeline)
+    # prepare data
+    data = dict(img=img)
+    data = test_pipeline(data)
+    data = scatter(collate([data], samples_per_gpu=1), [device])[0]
+    # forward the model
     with torch.no_grad():
         result = model(return_loss=False, rescale=True, **data)
-    return result
-
 
-def _inference_generator(model, imgs, img_transform, device):
-    for img in imgs:
-        yield _inference_single(model, img, img_transform, device)
+    return result
 
 
 # TODO: merge this method with the one in BaseDetector
diff --git a/mmdet/core/evaluation/eval_hooks.py b/mmdet/core/evaluation/eval_hooks.py
index c37f7bd..6cf87ff 100644
--- a/mmdet/core/evaluation/eval_hooks.py
+++ b/mmdet/core/evaluation/eval_hooks.py
@@ -78,12 +78,12 @@ class DistEvalmAPHook(DistEvalHook):
     def evaluate(self, runner, results):
         gt_bboxes = []
         gt_labels = []
-        gt_ignore = [] if self.dataset.with_crowd else None
+        gt_ignore = []
         for i in range(len(self.dataset)):
             ann = self.dataset.get_ann_info(i)
             bboxes = ann['bboxes']
             labels = ann['labels']
-            if gt_ignore is not None:
+            if 'bboxes_ignore' in ann:
                 ignore = np.concatenate([
                     np.zeros(bboxes.shape[0], dtype=np.bool),
                     np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
@@ -93,6 +93,8 @@ class DistEvalmAPHook(DistEvalHook):
                 labels = np.concatenate([labels, ann['labels_ignore']])
             gt_bboxes.append(bboxes)
             gt_labels.append(labels)
+        if not gt_ignore:
+            gt_ignore = None
         # If the dataset is VOC2007, then use 11 points mAP evaluation.
         if hasattr(self.dataset, 'year') and self.dataset.year == 2007:
             ds_name = 'voc07'
diff --git a/mmdet/datasets/__init__.py b/mmdet/datasets/__init__.py
index 11b7569..8de55d2 100644
--- a/mmdet/datasets/__init__.py
+++ b/mmdet/datasets/__init__.py
@@ -6,7 +6,6 @@ from .dataset_wrappers import ConcatDataset, RepeatDataset
 from .extra_aug import ExtraAugmentation
 from .loader import DistributedGroupSampler, GroupSampler, build_dataloader
 from .registry import DATASETS
-from .utils import random_scale, show_ann, to_tensor
 from .voc import VOCDataset
 from .wider_face import WIDERFaceDataset
 from .xml_style import XMLDataset
@@ -14,7 +13,6 @@ from .xml_style import XMLDataset
 __all__ = [
     'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset',
     'CityscapesDataset', 'GroupSampler', 'DistributedGroupSampler',
-    'build_dataloader', 'to_tensor', 'random_scale', 'show_ann',
-    'ConcatDataset', 'RepeatDataset', 'ExtraAugmentation', 'WIDERFaceDataset',
-    'DATASETS', 'build_dataset'
+    'build_dataloader', 'ConcatDataset', 'RepeatDataset', 'ExtraAugmentation',
+    'WIDERFaceDataset', 'DATASETS', 'build_dataset'
 ]
diff --git a/mmdet/datasets/coco.py b/mmdet/datasets/coco.py
index 46ef709..23c9120 100644
--- a/mmdet/datasets/coco.py
+++ b/mmdet/datasets/coco.py
@@ -42,7 +42,7 @@ class CocoDataset(CustomDataset):
         img_id = self.img_infos[idx]['id']
         ann_ids = self.coco.getAnnIds(imgIds=[img_id])
         ann_info = self.coco.loadAnns(ann_ids)
-        return self._parse_ann_info(ann_info, self.with_mask)
+        return self._parse_ann_info(self.img_infos[idx], ann_info)
 
     def _filter_imgs(self, min_size=32):
         """Filter images too small or without ground truths."""
@@ -55,7 +55,7 @@ class CocoDataset(CustomDataset):
                 valid_inds.append(i)
         return valid_inds
 
-    def _parse_ann_info(self, ann_info, with_mask=True):
+    def _parse_ann_info(self, img_info, ann_info):
         """Parse bbox and mask annotation.
 
         Args:
@@ -64,19 +64,14 @@ class CocoDataset(CustomDataset):
 
         Returns:
             dict: A dict containing the following keys: bboxes, bboxes_ignore,
-                labels, masks, mask_polys, poly_lens.
+                labels, masks, seg_map. "masks" are raw annotations and not
+                decoded into binary masks.
         """
         gt_bboxes = []
         gt_labels = []
         gt_bboxes_ignore = []
-        # Two formats are provided.
-        # 1. mask: a binary map of the same size of the image.
-        # 2. polys: each mask consists of one or several polys, each poly is a
-        # list of float.
-        if with_mask:
-            gt_masks = []
-            gt_mask_polys = []
-            gt_poly_lens = []
+        gt_masks_ann = []
+
         for i, ann in enumerate(ann_info):
             if ann.get('ignore', False):
                 continue
@@ -84,19 +79,13 @@ class CocoDataset(CustomDataset):
             if ann['area'] <= 0 or w < 1 or h < 1:
                 continue
             bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
-            if ann['iscrowd']:
+            if ann.get('iscrowd', False):
                 gt_bboxes_ignore.append(bbox)
             else:
                 gt_bboxes.append(bbox)
                 gt_labels.append(self.cat2label[ann['category_id']])
-            if with_mask:
-                gt_masks.append(self.coco.annToMask(ann))
-                mask_polys = [
-                    p for p in ann['segmentation'] if len(p) >= 6
-                ]  # valid polygons have >= 3 points (6 coordinates)
-                poly_lens = [len(p) for p in mask_polys]
-                gt_mask_polys.append(mask_polys)
-                gt_poly_lens.extend(poly_lens)
+                gt_masks_ann.append(ann['segmentation'])
+
         if gt_bboxes:
             gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
             gt_labels = np.array(gt_labels, dtype=np.int64)
@@ -109,12 +98,13 @@ class CocoDataset(CustomDataset):
         else:
             gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
 
+        seg_map = img_info['filename'].replace('jpg', 'png')
+
         ann = dict(
-            bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore)
+            bboxes=gt_bboxes,
+            labels=gt_labels,
+            bboxes_ignore=gt_bboxes_ignore,
+            masks=gt_masks_ann,
+            seg_map=seg_map)
 
-        if with_mask:
-            ann['masks'] = gt_masks
-            # poly format is not used in the current implementation
-            ann['mask_polys'] = gt_mask_polys
-            ann['poly_lens'] = gt_poly_lens
         return ann
diff --git a/mmdet/datasets/custom.py b/mmdet/datasets/custom.py
index aed2bf9..d596943 100644
--- a/mmdet/datasets/custom.py
+++ b/mmdet/datasets/custom.py
@@ -1,17 +1,11 @@
 import os.path as osp
-import warnings
 
 import mmcv
 import numpy as np
-from imagecorruptions import corrupt
-from mmcv.parallel import DataContainer as DC
 from torch.utils.data import Dataset
 
-from .extra_aug import ExtraAugmentation
+from .pipelines import Compose
 from .registry import DATASETS
-from .transforms import (BboxTransform, ImageTransform, MaskTransform,
-                         Numpy2Tensor, SegMapTransform)
-from .utils import random_scale, to_tensor
 
 
 @DATASETS.register_module
@@ -27,7 +21,7 @@ class CustomDataset(Dataset):
             'ann': {
                 'bboxes': <np.ndarray> (n, 4),
                 'labels': <np.ndarray> (n, ),
-                'bboxes_ignore': <np.ndarray> (k, 4),
+                'bboxes_ignore': <np.ndarray> (k, 4), (optional field)
                 'labels_ignore': <np.ndarray> (k, 4) (optional field)
             }
         },
@@ -41,33 +35,35 @@ class CustomDataset(Dataset):
 
     def __init__(self,
                  ann_file,
-                 img_prefix,
-                 img_scale,
-                 img_norm_cfg,
-                 multiscale_mode='value',
-                 size_divisor=None,
-                 proposal_file=None,
-                 num_max_proposals=1000,
-                 flip_ratio=0,
-                 with_mask=True,
-                 with_crowd=True,
-                 with_label=True,
-                 with_semantic_seg=False,
+                 pipeline,
+                 data_root=None,
+                 img_prefix=None,
                  seg_prefix=None,
-                 seg_scale_factor=1,
-                 extra_aug=None,
-                 resize_keep_ratio=True,
-                 corruption=None,
-                 corruption_severity=1,
-                 skip_img_without_anno=True,
+                 proposal_file=None,
                  test_mode=False):
-        # prefix of images path
+        self.ann_file = ann_file
+        self.data_root = data_root
         self.img_prefix = img_prefix
+        self.seg_prefix = seg_prefix
+        self.proposal_file = proposal_file
+        self.test_mode = test_mode
 
+        # join paths if data_root is specified
+        if self.data_root is not None:
+            if not osp.isabs(self.ann_file):
+                self.ann_file = osp.join(self.data_root, self.ann_file)
+            if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
+                self.img_prefix = osp.join(self.data_root, self.img_prefix)
+            if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
+                self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
+            if not (self.proposal_file is None
+                    or osp.isabs(self.proposal_file)):
+                self.proposal_file = osp.join(self.data_root,
+                                              self.proposal_file)
         # load annotations (and proposals)
-        self.img_infos = self.load_annotations(ann_file)
-        if proposal_file is not None:
-            self.proposals = self.load_proposals(proposal_file)
+        self.img_infos = self.load_annotations(self.ann_file)
+        if self.proposal_file is not None:
+            self.proposals = self.load_proposals(self.proposal_file)
         else:
             self.proposals = None
         # filter images with no annotation during training
@@ -76,67 +72,11 @@ class CustomDataset(Dataset):
             self.img_infos = [self.img_infos[i] for i in valid_inds]
             if self.proposals is not None:
                 self.proposals = [self.proposals[i] for i in valid_inds]
-
-        # (long_edge, short_edge) or [(long1, short1), (long2, short2), ...]
-        self.img_scales = img_scale if isinstance(img_scale,
-                                                  list) else [img_scale]
-        assert mmcv.is_list_of(self.img_scales, tuple)
-        # normalization configs
-        self.img_norm_cfg = img_norm_cfg
-
-        # multi-scale mode (only applicable for multi-scale training)
-        self.multiscale_mode = multiscale_mode
-        assert multiscale_mode in ['value', 'range']
-
-        # max proposals per image
-        self.num_max_proposals = num_max_proposals
-        # flip ratio
-        self.flip_ratio = flip_ratio
-        assert flip_ratio >= 0 and flip_ratio <= 1
-        # padding border to ensure the image size can be divided by
-        # size_divisor (used for FPN)
-        self.size_divisor = size_divisor
-
-        # with mask or not (reserved field, takes no effect)
-        self.with_mask = with_mask
-        # some datasets provide bbox annotations as ignore/crowd/difficult,
-        # if `with_crowd` is True, then these info is returned.
-        self.with_crowd = with_crowd
-        # with label is False for RPN
-        self.with_label = with_label
-        # with semantic segmentation (stuff) annotation or not
-        self.with_seg = with_semantic_seg
-        # prefix of semantic segmentation map path
-        self.seg_prefix = seg_prefix
-        # rescale factor for segmentation maps
-        self.seg_scale_factor = seg_scale_factor
-        # in test mode or not
-        self.test_mode = test_mode
-
         # set group flag for the sampler
         if not self.test_mode:
             self._set_group_flag()
-        # transforms
-        self.img_transform = ImageTransform(
-            size_divisor=self.size_divisor, **self.img_norm_cfg)
-        self.bbox_transform = BboxTransform()
-        self.mask_transform = MaskTransform()
-        self.seg_transform = SegMapTransform(self.size_divisor)
-        self.numpy2tensor = Numpy2Tensor()
-
-        # if use extra augmentation
-        if extra_aug is not None:
-            self.extra_aug = ExtraAugmentation(**extra_aug)
-        else:
-            self.extra_aug = None
-
-        # image rescale if keep ratio
-        self.resize_keep_ratio = resize_keep_ratio
-        self.skip_img_without_anno = skip_img_without_anno
-
-        # corruptions
-        self.corruption = corruption
-        self.corruption_severity = corruption_severity
+        # processing pipeline
+        self.pipeline = Compose(pipeline)
 
     def __len__(self):
         return len(self.img_infos)
@@ -150,6 +90,13 @@ class CustomDataset(Dataset):
     def get_ann_info(self, idx):
         return self.img_infos[idx]['ann']
 
+    def pre_pipeline(self, results):
+        results['img_prefix'] = self.img_prefix
+        results['seg_prefix'] = self.seg_prefix
+        results['proposal_file'] = self.proposal_file
+        results['bbox_fields'] = []
+        results['mask_fields'] = []
+
     def _filter_imgs(self, min_size=32):
         """Filter images too small."""
         valid_inds = []
@@ -186,164 +133,17 @@ class CustomDataset(Dataset):
 
     def prepare_train_img(self, idx):
         img_info = self.img_infos[idx]
-        # load image
-        img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
-        # corruption
-        if self.corruption is not None:
-            img = corrupt(
-                img,
-                severity=self.corruption_severity,
-                corruption_name=self.corruption)
-        # load proposals if necessary
-        if self.proposals is not None:
-            proposals = self.proposals[idx][:self.num_max_proposals]
-            # TODO: Handle empty proposals properly. Currently images with
-            # no proposals are just ignored, but they can be used for
-            # training in concept.
-            if len(proposals) == 0:
-                return None
-            if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
-                raise AssertionError(
-                    'proposals should have shapes (n, 4) or (n, 5), '
-                    'but found {}'.format(proposals.shape))
-            if proposals.shape[1] == 5:
-                scores = proposals[:, 4, None]
-                proposals = proposals[:, :4]
-            else:
-                scores = None
-
-        ann = self.get_ann_info(idx)
-        gt_bboxes = ann['bboxes']
-        gt_labels = ann['labels']
-        if self.with_crowd:
-            gt_bboxes_ignore = ann['bboxes_ignore']
-
-        # skip the image if there is no valid gt bbox
-        if len(gt_bboxes) == 0 and self.skip_img_without_anno:
-            warnings.warn('Skip the image "%s" that has no valid gt bbox' %
-                          osp.join(self.img_prefix, img_info['filename']))
-            return None
-
-        # extra augmentation
-        if self.extra_aug is not None:
-            img, gt_bboxes, gt_labels = self.extra_aug(img, gt_bboxes,
-                                                       gt_labels)
-
-        # apply transforms
-        flip = True if np.random.rand() < self.flip_ratio else False
-        # randomly sample a scale
-        img_scale = random_scale(self.img_scales, self.multiscale_mode)
-        img, img_shape, pad_shape, scale_factor = self.img_transform(
-            img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
-        img = img.copy()
-        if self.with_seg:
-            gt_seg = mmcv.imread(
-                osp.join(self.seg_prefix,
-                         img_info['filename'].replace('jpg', 'png')),
-                flag='unchanged')
-            gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
-            gt_seg = mmcv.imrescale(
-                gt_seg, self.seg_scale_factor, interpolation='nearest')
-            gt_seg = gt_seg[None, ...]
+        ann_info = self.get_ann_info(idx)
+        results = dict(img_info=img_info, ann_info=ann_info)
         if self.proposals is not None:
-            proposals = self.bbox_transform(proposals, img_shape, scale_factor,
-                                            flip)
-            proposals = np.hstack([proposals, scores
-                                   ]) if scores is not None else proposals
-        gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
-                                        flip)
-        if self.with_crowd:
-            gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
-                                                   scale_factor, flip)
-        if self.with_mask:
-            gt_masks = self.mask_transform(ann['masks'], pad_shape,
-                                           scale_factor, flip)
-
-        ori_shape = (img_info['height'], img_info['width'], 3)
-        img_meta = dict(
-            ori_shape=ori_shape,
-            img_shape=img_shape,
-            pad_shape=pad_shape,
-            scale_factor=scale_factor,
-            flip=flip)
-
-        data = dict(
-            img=DC(to_tensor(img), stack=True),
-            img_meta=DC(img_meta, cpu_only=True),
-            gt_bboxes=DC(to_tensor(gt_bboxes)))
-        if self.proposals is not None:
-            data['proposals'] = DC(to_tensor(proposals))
-        if self.with_label:
-            data['gt_labels'] = DC(to_tensor(gt_labels))
-        if self.with_crowd:
-            data['gt_bboxes_ignore'] = DC(to_tensor(gt_bboxes_ignore))
-        if self.with_mask:
-            data['gt_masks'] = DC(gt_masks, cpu_only=True)
-        if self.with_seg:
-            data['gt_semantic_seg'] = DC(to_tensor(gt_seg), stack=True)
-        return data
+            results['proposals'] = self.proposals[idx]
+        self.pre_pipeline(results)
+        return self.pipeline(results)
 
     def prepare_test_img(self, idx):
-        """Prepare an image for testing (multi-scale and flipping)"""
         img_info = self.img_infos[idx]
-        img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
-        # corruption
-        if self.corruption is not None:
-            img = corrupt(
-                img,
-                severity=self.corruption_severity,
-                corruption_name=self.corruption)
-        # load proposals if necessary
-        if self.proposals is not None:
-            proposal = self.proposals[idx][:self.num_max_proposals]
-            if not (proposal.shape[1] == 4 or proposal.shape[1] == 5):
-                raise AssertionError(
-                    'proposals should have shapes (n, 4) or (n, 5), '
-                    'but found {}'.format(proposal.shape))
-        else:
-            proposal = None
-
-        def prepare_single(img, scale, flip, proposal=None):
-            _img, img_shape, pad_shape, scale_factor = self.img_transform(
-                img, scale, flip, keep_ratio=self.resize_keep_ratio)
-            _img = to_tensor(_img)
-            _img_meta = dict(
-                ori_shape=(img_info['height'], img_info['width'], 3),
-                img_shape=img_shape,
-                pad_shape=pad_shape,
-                scale_factor=scale_factor,
-                flip=flip)
-            if proposal is not None:
-                if proposal.shape[1] == 5:
-                    score = proposal[:, 4, None]
-                    proposal = proposal[:, :4]
-                else:
-                    score = None
-                _proposal = self.bbox_transform(proposal, img_shape,
-                                                scale_factor, flip)
-                _proposal = np.hstack([_proposal, score
-                                       ]) if score is not None else _proposal
-                _proposal = to_tensor(_proposal)
-            else:
-                _proposal = None
-            return _img, _img_meta, _proposal
-
-        imgs = []
-        img_metas = []
-        proposals = []
-        for scale in self.img_scales:
-            _img, _img_meta, _proposal = prepare_single(
-                img, scale, False, proposal)
-            imgs.append(_img)
-            img_metas.append(DC(_img_meta, cpu_only=True))
-            proposals.append(_proposal)
-            if self.flip_ratio > 0:
-                _img, _img_meta, _proposal = prepare_single(
-                    img, scale, True, proposal)
-                imgs.append(_img)
-                img_metas.append(DC(_img_meta, cpu_only=True))
-                proposals.append(_proposal)
-        data = dict(img=imgs, img_meta=img_metas)
+        results = dict(img_info=img_info)
         if self.proposals is not None:
-            data['proposals'] = proposals
-        return data
+            results['proposals'] = self.proposals[idx]
+        self.pre_pipeline(results)
+        return self.pipeline(results)
diff --git a/mmdet/datasets/pipelines/__init__.py b/mmdet/datasets/pipelines/__init__.py
new file mode 100644
index 0000000..dd5919e
--- /dev/null
+++ b/mmdet/datasets/pipelines/__init__.py
@@ -0,0 +1,16 @@
+from .compose import Compose
+from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,
+                        Transpose, to_tensor)
+from .loading import LoadAnnotations, LoadImageFromFile, LoadProposals
+from .test_aug import MultiScaleFlipAug
+from .transforms import (Expand, MinIoURandomCrop, Normalize, Pad,
+                         PhotoMetricDistortion, RandomCrop, RandomFlip, Resize,
+                         SegResizeFlipPadRescale)
+
+__all__ = [
+    'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
+    'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
+    'LoadProposals', 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad',
+    'RandomCrop', 'Normalize', 'SegResizeFlipPadRescale', 'MinIoURandomCrop',
+    'Expand', 'PhotoMetricDistortion'
+]
diff --git a/mmdet/datasets/pipelines/compose.py b/mmdet/datasets/pipelines/compose.py
new file mode 100644
index 0000000..f160eed
--- /dev/null
+++ b/mmdet/datasets/pipelines/compose.py
@@ -0,0 +1,35 @@
+import collections
+
+from mmdet.utils import build_from_cfg
+from ..registry import PIPELINES
+
+
+@PIPELINES.register_module
+class Compose(object):
+
+    def __init__(self, transforms):
+        assert isinstance(transforms, collections.abc.Sequence)
+        self.transforms = []
+        for transform in transforms:
+            if isinstance(transform, dict):
+                transform = build_from_cfg(transform, PIPELINES)
+                self.transforms.append(transform)
+            elif callable(transform):
+                self.transforms.append(transform)
+            else:
+                raise TypeError('transform must be callable or a dict')
+
+    def __call__(self, data):
+        for t in self.transforms:
+            data = t(data)
+            if data is None:
+                return None
+        return data
+
+    def __repr__(self):
+        format_string = self.__class__.__name__ + '('
+        for t in self.transforms:
+            format_string += '\n'
+            format_string += '    {0}'.format(t)
+        format_string += '\n)'
+        return format_string
diff --git a/mmdet/datasets/pipelines/formating.py b/mmdet/datasets/pipelines/formating.py
new file mode 100644
index 0000000..f5357f7
--- /dev/null
+++ b/mmdet/datasets/pipelines/formating.py
@@ -0,0 +1,157 @@
+from collections.abc import Sequence
+
+import mmcv
+import numpy as np
+import torch
+from mmcv.parallel import DataContainer as DC
+
+from ..registry import PIPELINES
+
+
+def to_tensor(data):
+    """Convert objects of various python types to :obj:`torch.Tensor`.
+
+    Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
+    :class:`Sequence`, :class:`int` and :class:`float`.
+    """
+    if isinstance(data, torch.Tensor):
+        return data
+    elif isinstance(data, np.ndarray):
+        return torch.from_numpy(data)
+    elif isinstance(data, Sequence) and not mmcv.is_str(data):
+        return torch.tensor(data)
+    elif isinstance(data, int):
+        return torch.LongTensor([data])
+    elif isinstance(data, float):
+        return torch.FloatTensor([data])
+    else:
+        raise TypeError('type {} cannot be converted to tensor.'.format(
+            type(data)))
+
+
+@PIPELINES.register_module
+class ToTensor(object):
+
+    def __init__(self, keys):
+        self.keys = keys
+
+    def __call__(self, results):
+        for key in self.keys:
+            results[key] = to_tensor(results[key])
+        return results
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(keys={})'.format(self.keys)
+
+
+@PIPELINES.register_module
+class ImageToTensor(object):
+
+    def __init__(self, keys):
+        self.keys = keys
+
+    def __call__(self, results):
+        for key in self.keys:
+            results[key] = to_tensor(results[key].transpose(2, 0, 1))
+        return results
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(keys={})'.format(self.keys)
+
+
+@PIPELINES.register_module
+class Transpose(object):
+
+    def __init__(self, keys, order):
+        self.keys = keys
+        self.order = order
+
+    def __call__(self, results):
+        for key in self.keys:
+            results[key] = results[key].transpose(self.order)
+        return results
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(keys={}, order={})'.format(
+            self.keys, self.order)
+
+
+@PIPELINES.register_module
+class ToDataContainer(object):
+
+    def __init__(self,
+                 fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),
+                         dict(key='gt_labels'))):
+        self.fields = fields
+
+    def __call__(self, results):
+        for field in self.fields:
+            field = field.copy()
+            key = field.pop('key')
+            results[key] = DC(results[key], **field)
+        return results
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(fields={})'.format(self.fields)
+
+
+@PIPELINES.register_module
+class DefaultFormatBundle(object):
+    """Default formatting bundle.
+
+    It simplifies the pipeline of formatting common fields, including "img",
+    "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".
+    These fields are formatted as follows.
+
+    - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
+    - proposals: (1)to tensor, (2)to DataContainer
+    - gt_bboxes: (1)to tensor, (2)to DataContainer
+    - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer
+    - gt_labels: (1)to tensor, (2)to DataContainer
+    - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)
+    - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
+                       (3)to DataContainer (stack=True)
+    """
+
+    def __call__(self, results):
+        if 'img' in results:
+            img = np.ascontiguousarray(results['img'].transpose(2, 0, 1))
+            results['img'] = DC(to_tensor(img), stack=True)
+        for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
+            if key not in results:
+                continue
+            results[key] = DC(to_tensor(results[key]))
+        if 'gt_masks' in results:
+            results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)
+        if 'gt_semantic_seg' in results:
+            results['gt_semantic_seg'] = DC(
+                to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)
+        return results
+
+    def __repr__(self):
+        return self.__class__.__name__
+
+
+@PIPELINES.register_module
+class Collect(object):
+
+    def __init__(self,
+                 keys,
+                 meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
+                            'scale_factor', 'flip', 'img_norm_cfg')):
+        self.keys = keys
+        self.meta_keys = meta_keys
+
+    def __call__(self, results):
+        data = {}
+        img_meta = {}
+        for key in self.meta_keys:
+            img_meta[key] = results[key]
+        data['img_meta'] = DC(img_meta, cpu_only=True)
+        for key in self.keys:
+            data[key] = results[key]
+        return data
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(keys={}, meta_keys={})'.format(
+            self.keys, self.meta_keys)
diff --git a/mmdet/datasets/pipelines/loading.py b/mmdet/datasets/pipelines/loading.py
new file mode 100644
index 0000000..cb5ce38
--- /dev/null
+++ b/mmdet/datasets/pipelines/loading.py
@@ -0,0 +1,145 @@
+import os.path as osp
+import warnings
+
+import mmcv
+import numpy as np
+import pycocotools.mask as maskUtils
+
+from ..registry import PIPELINES
+
+
+@PIPELINES.register_module
+class LoadImageFromFile(object):
+
+    def __init__(self, to_float32=False):
+        self.to_float32 = to_float32
+
+    def __call__(self, results):
+        filename = osp.join(results['img_prefix'],
+                            results['img_info']['filename'])
+        img = mmcv.imread(filename)
+        if self.to_float32:
+            img = img.astype(np.float32)
+        results['filename'] = filename
+        results['img'] = img
+        results['img_shape'] = img.shape
+        results['ori_shape'] = img.shape
+        return results
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(to_float32={})'.format(
+            self.to_float32)
+
+
+@PIPELINES.register_module
+class LoadAnnotations(object):
+
+    def __init__(self,
+                 with_bbox=True,
+                 with_label=True,
+                 with_mask=False,
+                 with_seg=False,
+                 poly2mask=True,
+                 skip_img_without_anno=True):
+        self.with_bbox = with_bbox
+        self.with_label = with_label
+        self.with_mask = with_mask
+        self.with_seg = with_seg
+        self.poly2mask = poly2mask
+        self.skip_img_without_anno = skip_img_without_anno
+
+    def _load_bboxes(self, results):
+        ann_info = results['ann_info']
+        results['gt_bboxes'] = ann_info['bboxes']
+        if len(results['gt_bboxes']) == 0 and self.skip_img_without_anno:
+            file_path = osp.join(results['img_prefix'],
+                                 results['img_info']['filename'])
+            warnings.warn(
+                'Skip the image "{}" that has no valid gt bbox'.format(
+                    file_path))
+            return None
+        results['gt_bboxes_ignore'] = ann_info.get('bboxes_ignore', None)
+        results['bbox_fields'].extend(['gt_bboxes', 'gt_bboxes_ignore'])
+        return results
+
+    def _load_labels(self, results):
+        results['gt_labels'] = results['ann_info']['labels']
+        return results
+
+    def _poly2mask(self, mask_ann, img_h, img_w):
+        if isinstance(mask_ann, list):
+            # polygon -- a single object might consist of multiple parts
+            # we merge all parts into one mask rle code
+            rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
+            rle = maskUtils.merge(rles)
+        elif isinstance(mask_ann['counts'], list):
+            # uncompressed RLE
+            rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
+        else:
+            # rle
+            rle = mask_ann
+        mask = maskUtils.decode(rle)
+        return mask
+
+    def _load_masks(self, results):
+        h, w = results['img_info']['height'], results['img_info']['width']
+        gt_masks = results['ann_info']['masks']
+        if self.poly2mask:
+            gt_masks = [self._poly2mask(mask, h, w) for mask in gt_masks]
+        results['gt_masks'] = gt_masks
+        results['mask_fields'].append('gt_masks')
+        return results
+
+    def _load_semantic_seg(self, results):
+        results['gt_semantic_seg'] = mmcv.imread(
+            osp.join(results['seg_prefix'], results['ann_info']['seg_map']),
+            flag='unchanged').squeeze()
+        return results
+
+    def __call__(self, results):
+        if self.with_bbox:
+            results = self._load_bboxes(results)
+            if results is None:
+                return None
+        if self.with_label:
+            results = self._load_labels(results)
+        if self.with_mask:
+            results = self._load_masks(results)
+        if self.with_seg:
+            results = self._load_semantic_seg(results)
+        return results
+
+    def __repr__(self):
+        repr_str = self.__class__.__name__
+        repr_str += ('(with_bbox={}, with_label={}, with_mask={},'
+                     ' with_seg={})').format(self.with_bbox, self.with_label,
+                                             self.with_mask, self.with_seg)
+        return repr_str
+
+
+@PIPELINES.register_module
+class LoadProposals(object):
+
+    def __init__(self, num_max_proposals=None):
+        self.num_max_proposals = num_max_proposals
+
+    def __call__(self, results):
+        proposals = results['proposals']
+        if proposals.shape[1] not in (4, 5):
+            raise AssertionError(
+                'proposals should have shapes (n, 4) or (n, 5), '
+                'but found {}'.format(proposals.shape))
+        proposals = proposals[:, :4]
+
+        if self.num_max_proposals is not None:
+            proposals = proposals[:self.num_max_proposals]
+
+        if len(proposals) == 0:
+            proposals = np.array([0, 0, 0, 0], dtype=np.float32)
+        results['proposals'] = proposals
+        results['bbox_fields'].append('proposals')
+        return results
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(num_max_proposals={})'.format(
+            self.num_max_proposals)
diff --git a/mmdet/datasets/pipelines/test_aug.py b/mmdet/datasets/pipelines/test_aug.py
new file mode 100644
index 0000000..b5d2180
--- /dev/null
+++ b/mmdet/datasets/pipelines/test_aug.py
@@ -0,0 +1,38 @@
+import mmcv
+
+from ..registry import PIPELINES
+from .compose import Compose
+
+
+@PIPELINES.register_module
+class MultiScaleFlipAug(object):
+
+    def __init__(self, transforms, img_scale, flip=False):
+        self.transforms = Compose(transforms)
+        self.img_scale = img_scale if isinstance(img_scale,
+                                                 list) else [img_scale]
+        assert mmcv.is_list_of(self.img_scale, tuple)
+        self.flip = flip
+
+    def __call__(self, results):
+        aug_data = []
+        flip_aug = [False, True] if self.flip else [False]
+        for scale in self.img_scale:
+            for flip in flip_aug:
+                _results = results.copy()
+                _results['scale'] = scale
+                _results['flip'] = flip
+                data = self.transforms(_results)
+                aug_data.append(data)
+        # list of dict to dict of list
+        aug_data_dict = {key: [] for key in aug_data[0]}
+        for data in aug_data:
+            for key, val in data.items():
+                aug_data_dict[key].append(val)
+        return aug_data_dict
+
+    def __repr__(self):
+        repr_str = self.__class__.__name__
+        repr_str += '(transforms={}, img_scale={}, flip={})'.format(
+            self.transforms, self.img_scale, self.flip)
+        return repr_str
diff --git a/mmdet/datasets/pipelines/transforms.py b/mmdet/datasets/pipelines/transforms.py
new file mode 100644
index 0000000..60ee42d
--- /dev/null
+++ b/mmdet/datasets/pipelines/transforms.py
@@ -0,0 +1,634 @@
+import mmcv
+import numpy as np
+from imagecorruptions import corrupt
+from numpy import random
+
+from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
+from ..registry import PIPELINES
+
+
+@PIPELINES.register_module
+class Resize(object):
+    """Resize images & bbox & mask.
+
+    This transform resizes the input image to some scale. Bboxes and masks are
+    then resized with the same scale factor. If the input dict contains the key
+    "scale", then the scale in the input dict is used, otherwise the specified
+    scale in the init method is used.
+
+    `img_scale` can either be a tuple (single-scale) or a list of tuple
+    (multi-scale). There are 3 multiscale modes:
+    - `ratio_range` is not None: randomly sample a ratio from the ratio range
+        and multiply it with the image scale.
+    - `ratio_range` is None and `multiscale_mode` == "range": randomly sample a
+        scale from the a range.
+    - `ratio_range` is None and `multiscale_mode` == "value": randomly sample a
+        scale from multiple scales.
+
+    Args:
+        img_scale (tuple or list[tuple]): Images scales for resizing.
+        multiscale_mode (str): Either "range" or "value".
+        ratio_range (tuple[float]): (min_ratio, max_ratio)
+        keep_ratio (bool): Whether to keep the aspect ratio when resizing the
+            image.
+    """
+
+    def __init__(self,
+                 img_scale=None,
+                 multiscale_mode='range',
+                 ratio_range=None,
+                 keep_ratio=True):
+        if img_scale is None:
+            self.img_scale = None
+        else:
+            if isinstance(img_scale, list):
+                self.img_scale = img_scale
+            else:
+                self.img_scale = [img_scale]
+            assert mmcv.is_list_of(self.img_scale, tuple)
+
+        if ratio_range is not None:
+            # mode 1: given a scale and a range of image ratio
+            assert len(self.img_scale) == 1
+        else:
+            # mode 2: given multiple scales or a range of scales
+            assert multiscale_mode in ['value', 'range']
+
+        self.multiscale_mode = multiscale_mode
+        self.ratio_range = ratio_range
+        self.keep_ratio = keep_ratio
+
+    @staticmethod
+    def random_select(img_scales):
+        assert mmcv.is_list_of(img_scales, tuple)
+        scale_idx = np.random.randint(len(img_scales))
+        img_scale = img_scales[scale_idx]
+        return img_scale, scale_idx
+
+    @staticmethod
+    def random_sample(img_scales):
+        assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
+        img_scale_long = [max(s) for s in img_scales]
+        img_scale_short = [min(s) for s in img_scales]
+        long_edge = np.random.randint(
+            min(img_scale_long),
+            max(img_scale_long) + 1)
+        short_edge = np.random.randint(
+            min(img_scale_short),
+            max(img_scale_short) + 1)
+        img_scale = (long_edge, short_edge)
+        return img_scale, None
+
+    @staticmethod
+    def random_sample_ratio(img_scale, ratio_range):
+        assert isinstance(img_scale, tuple) and len(img_scale) == 2
+        min_ratio, max_ratio = ratio_range
+        assert min_ratio <= max_ratio
+        ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
+        scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
+        return scale, None
+
+    def _random_scale(self, results):
+        if self.ratio_range is not None:
+            scale, scale_idx = self.random_sample_ratio(
+                self.img_scale[0], self.ratio_range)
+        elif len(self.img_scale) == 1:
+            scale, scale_idx = self.img_scale[0], 0
+        elif self.multiscale_mode == 'range':
+            scale, scale_idx = self.random_sample(self.img_scale)
+        elif self.multiscale_mode == 'value':
+            scale, scale_idx = self.random_select(self.img_scale)
+        else:
+            raise NotImplementedError
+
+        results['scale'] = scale
+        results['scale_idx'] = scale_idx
+
+    def _resize_img(self, results):
+        if self.keep_ratio:
+            img, scale_factor = mmcv.imrescale(
+                results['img'], results['scale'], return_scale=True)
+        else:
+            img, w_scale, h_scale = mmcv.imresize(
+                results['img'], results['scale'], return_scale=True)
+            scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
+                                    dtype=np.float32)
+        results['img'] = img
+        results['img_shape'] = img.shape
+        results['pad_shape'] = img.shape  # in case that there is no padding
+        results['scale_factor'] = scale_factor
+        results['keep_ratio'] = self.keep_ratio
+
+    def _resize_bboxes(self, results):
+        img_shape = results['img_shape']
+        for key in results.get('bbox_fields', []):
+            bboxes = results[key] * results['scale_factor']
+            bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1] - 1)
+            bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0] - 1)
+            results[key] = bboxes
+
+    def _resize_masks(self, results):
+        for key in results.get('mask_fields', []):
+            if results[key] is None:
+                continue
+            if self.keep_ratio:
+                masks = [
+                    mmcv.imrescale(
+                        mask, results['scale_factor'], interpolation='nearest')
+                    for mask in results[key]
+                ]
+            else:
+                mask_size = (results['img_shape'][1], results['img_shape'][0])
+                masks = [
+                    mmcv.imresize(mask, mask_size, interpolation='nearest')
+                    for mask in results[key]
+                ]
+            results[key] = masks
+
+    def __call__(self, results):
+        if 'scale' not in results:
+            self._random_scale(results)
+        self._resize_img(results)
+        self._resize_bboxes(results)
+        self._resize_masks(results)
+        return results
+
+    def __repr__(self):
+        repr_str = self.__class__.__name__
+        repr_str += ('(img_scale={}, multiscale_mode={}, ratio_range={}, '
+                     'keep_ratio={})').format(self.img_scale,
+                                              self.multiscale_mode,
+                                              self.ratio_range,
+                                              self.keep_ratio)
+        return repr_str
+
+
+@PIPELINES.register_module
+class RandomFlip(object):
+    """Flip the image & bbox & mask.
+
+    If the input dict contains the key "flip", then the flag will be used,
+    otherwise it will be randomly decided by a ratio specified in the init
+    method.
+
+    Args:
+        flip_ratio (float, optional): The flipping probability.
+    """
+
+    def __init__(self, flip_ratio=None):
+        self.flip_ratio = flip_ratio
+        if flip_ratio is not None:
+            assert flip_ratio >= 0 and flip_ratio <= 1
+
+    def bbox_flip(self, bboxes, img_shape):
+        """Flip bboxes horizontally.
+
+        Args:
+            bboxes(ndarray): shape (..., 4*k)
+            img_shape(tuple): (height, width)
+        """
+        assert bboxes.shape[-1] % 4 == 0
+        w = img_shape[1]
+        flipped = bboxes.copy()
+        flipped[..., 0::4] = w - bboxes[..., 2::4] - 1
+        flipped[..., 2::4] = w - bboxes[..., 0::4] - 1
+        return flipped
+
+    def __call__(self, results):
+        if 'flip' not in results:
+            flip = True if np.random.rand() < self.flip_ratio else False
+            results['flip'] = flip
+        if results['flip']:
+            # flip image
+            results['img'] = mmcv.imflip(results['img'])
+            # flip bboxes
+            for key in results.get('bbox_fields', []):
+                results[key] = self.bbox_flip(results[key],
+                                              results['img_shape'])
+            # flip masks
+            for key in results.get('mask_fields', []):
+                results[key] = [mask[:, ::-1] for mask in results[key]]
+        return results
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(flip_ratio={})'.format(
+            self.flip_ratio)
+
+
+@PIPELINES.register_module
+class Pad(object):
+    """Pad the image & mask.
+
+    There are two padding modes: (1) pad to a fixed size and (2) pad to the
+    minimum size that is divisible by some number.
+
+    Args:
+        size (tuple, optional): Fixed padding size.
+        size_divisor (int, optional): The divisor of padded size.
+        pad_val (float, optional): Padding value, 0 by default.
+    """
+
+    def __init__(self, size=None, size_divisor=None, pad_val=0):
+        self.size = size
+        self.size_divisor = size_divisor
+        self.pad_val = pad_val
+        # only one of size and size_divisor should be valid
+        assert size is not None or size_divisor is not None
+        assert size is None or size_divisor is None
+
+    def _pad_img(self, results):
+        if self.size is not None:
+            padded_img = mmcv.impad(results['img'], self.size)
+        elif self.size_divisor is not None:
+            padded_img = mmcv.impad_to_multiple(
+                results['img'], self.size_divisor, pad_val=self.pad_val)
+        results['img'] = padded_img
+        results['pad_shape'] = padded_img.shape
+        results['pad_fixed_size'] = self.size
+        results['pad_size_divisor'] = self.size_divisor
+
+    def _pad_masks(self, results):
+        pad_shape = results['pad_shape'][:2]
+        for key in results.get('mask_fields', []):
+            padded_masks = [
+                mmcv.impad(mask, pad_shape, pad_val=self.pad_val)
+                for mask in results[key]
+            ]
+            results[key] = np.stack(padded_masks, axis=0)
+
+    def __call__(self, results):
+        self._pad_img(results)
+        self._pad_masks(results)
+        return results
+
+    def __repr__(self):
+        repr_str = self.__class__.__name__
+        repr_str += '(size={}, size_divisor={}, pad_val={})'.format(
+            self.size, self.size_divisor, self.pad_val)
+        return repr_str
+
+
+@PIPELINES.register_module
+class Normalize(object):
+    """Normalize the image.
+
+    Args:
+        mean (sequence): Mean values of 3 channels.
+        std (sequence): Std values of 3 channels.
+        to_rgb (bool): Whether to convert the image from BGR to RGB,
+            default is true.
+    """
+
+    def __init__(self, mean, std, to_rgb=True):
+        self.mean = np.array(mean, dtype=np.float32)
+        self.std = np.array(std, dtype=np.float32)
+        self.to_rgb = to_rgb
+
+    def __call__(self, results):
+        results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std,
+                                          self.to_rgb)
+        results['img_norm_cfg'] = dict(
+            mean=self.mean, std=self.std, to_rgb=self.to_rgb)
+        return results
+
+    def __repr__(self):
+        repr_str = self.__class__.__name__
+        repr_str += '(mean={}, std={}, to_rgb={})'.format(
+            self.mean, self.std, self.to_rgb)
+        return repr_str
+
+
+@PIPELINES.register_module
+class RandomCrop(object):
+    """Random crop the image & bboxes.
+
+    Args:
+        crop_size (tuple): Expected size after cropping, (h, w).
+    """
+
+    def __init__(self, crop_size):
+        self.crop_size = crop_size
+
+    def __call__(self, results):
+        img = results['img']
+        margin_h = max(img.shape[0] - self.crop_size[0], 0)
+        margin_w = max(img.shape[1] - self.crop_size[1], 0)
+        offset_h = np.random.randint(0, margin_h + 1)
+        offset_w = np.random.randint(0, margin_w + 1)
+        crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
+        crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
+
+        # crop the image
+        img = img[crop_y1:crop_y2, crop_x1:crop_x2, :]
+        img_shape = img.shape
+        results['img'] = img
+        results['img_shape'] = img_shape
+
+        # crop bboxes accordingly and clip to the image boundary
+        for key in results.get('bbox_fields', []):
+            bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
+                                   dtype=np.float32)
+            bboxes = results[key] - bbox_offset
+            bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1] - 1)
+            bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0] - 1)
+            results[key] = bboxes
+
+        # filter out the gt bboxes that are completely cropped
+        if 'gt_bboxes' in results:
+            gt_bboxes = results['gt_bboxes']
+            valid_inds = (gt_bboxes[:, 2] > gt_bboxes[:, 0]) & (
+                gt_bboxes[:, 3] > gt_bboxes[:, 1])
+            # if no gt bbox remains after cropping, just skip this image
+            if not np.any(valid_inds):
+                return None
+            results['gt_bboxes'] = gt_bboxes[valid_inds, :]
+            if 'gt_labels' in results:
+                results['gt_labels'] = results['gt_labels'][valid_inds]
+
+            # filter and crop the masks
+            if 'gt_masks' in results:
+                valid_gt_masks = []
+                for i in valid_inds:
+                    gt_mask = results['gt_masks'][i][crop_y1:crop_y2, crop_x1:
+                                                     crop_x2]
+                    valid_gt_masks.append(gt_mask)
+                results['gt_masks'] = valid_gt_masks
+
+        return results
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(crop_size={})'.format(
+            self.crop_size)
+
+
+@PIPELINES.register_module
+class SegResizeFlipPadRescale(object):
+    """A sequential transforms to semantic segmentation maps.
+
+    The same pipeline as input images is applied to the semantic segmentation
+    map, and finally rescale it by some scale factor. The transforms include:
+    1. resize
+    2. flip
+    3. pad
+    4. rescale (so that the final size can be different from the image size)
+
+    Args:
+        scale_factor (float): The scale factor of the final output.
+    """
+
+    def __init__(self, scale_factor=1):
+        self.scale_factor = scale_factor
+
+    def __call__(self, results):
+        if results['keep_ratio']:
+            gt_seg = mmcv.imrescale(
+                results['gt_semantic_seg'],
+                results['scale'],
+                interpolation='nearest')
+        else:
+            gt_seg = mmcv.imresize(
+                results['gt_semantic_seg'],
+                results['scale'],
+                interpolation='nearest')
+        if results['flip']:
+            gt_seg = mmcv.imflip(gt_seg)
+        if gt_seg.shape != results['pad_shape']:
+            gt_seg = mmcv.impad(gt_seg, results['pad_shape'][:2])
+        if self.scale_factor != 1:
+            gt_seg = mmcv.imrescale(
+                gt_seg, self.scale_factor, interpolation='nearest')
+        results['gt_semantic_seg'] = gt_seg
+        return results
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(scale_factor={})'.format(
+            self.scale_factor)
+
+
+@PIPELINES.register_module
+class PhotoMetricDistortion(object):
+    """Apply photometric distortion to image sequentially, every transformation
+    is applied with a probability of 0.5. The position of random contrast is in
+    second or second to last.
+
+    1. random brightness
+    2. random contrast (mode 0)
+    3. convert color from BGR to HSV
+    4. random saturation
+    5. random hue
+    6. convert color from HSV to BGR
+    7. random contrast (mode 1)
+    8. randomly swap channels
+
+    Args:
+        brightness_delta (int): delta of brightness.
+        contrast_range (tuple): range of contrast.
+        saturation_range (tuple): range of saturation.
+        hue_delta (int): delta of hue.
+    """
+
+    def __init__(self,
+                 brightness_delta=32,
+                 contrast_range=(0.5, 1.5),
+                 saturation_range=(0.5, 1.5),
+                 hue_delta=18):
+        self.brightness_delta = brightness_delta
+        self.contrast_lower, self.contrast_upper = contrast_range
+        self.saturation_lower, self.saturation_upper = saturation_range
+        self.hue_delta = hue_delta
+
+    def __call__(self, results):
+        img = results['img']
+        # random brightness
+        if random.randint(2):
+            delta = random.uniform(-self.brightness_delta,
+                                   self.brightness_delta)
+            img += delta
+
+        # mode == 0 --> do random contrast first
+        # mode == 1 --> do random contrast last
+        mode = random.randint(2)
+        if mode == 1:
+            if random.randint(2):
+                alpha = random.uniform(self.contrast_lower,
+                                       self.contrast_upper)
+                img *= alpha
+
+        # convert color from BGR to HSV
+        img = mmcv.bgr2hsv(img)
+
+        # random saturation
+        if random.randint(2):
+            img[..., 1] *= random.uniform(self.saturation_lower,
+                                          self.saturation_upper)
+
+        # random hue
+        if random.randint(2):
+            img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
+            img[..., 0][img[..., 0] > 360] -= 360
+            img[..., 0][img[..., 0] < 0] += 360
+
+        # convert color from HSV to BGR
+        img = mmcv.hsv2bgr(img)
+
+        # random contrast
+        if mode == 0:
+            if random.randint(2):
+                alpha = random.uniform(self.contrast_lower,
+                                       self.contrast_upper)
+                img *= alpha
+
+        # randomly swap channels
+        if random.randint(2):
+            img = img[..., random.permutation(3)]
+
+        results['img'] = img
+        return results
+
+    def __repr__(self):
+        repr_str = self.__class__.__name__
+        repr_str += ('(brightness_delta={}, contrast_range={}, '
+                     'saturation_range={}, hue_delta={})').format(
+                         self.brightness_delta, self.contrast_range,
+                         self.saturation_range, self.hue_delta)
+        return repr_str
+
+
+@PIPELINES.register_module
+class Expand(object):
+    """Random expand the image & bboxes.
+
+    Randomly place the original image on a canvas of 'ratio' x original image
+    size filled with mean values. The ratio is in the range of ratio_range.
+
+    Args:
+        mean (tuple): mean value of dataset.
+        to_rgb (bool): if need to convert the order of mean to align with RGB.
+        ratio_range (tuple): range of expand ratio.
+    """
+
+    def __init__(self, mean=(0, 0, 0), to_rgb=True, ratio_range=(1, 4)):
+        if to_rgb:
+            self.mean = mean[::-1]
+        else:
+            self.mean = mean
+        self.min_ratio, self.max_ratio = ratio_range
+
+    def __call__(self, results):
+        if random.randint(2):
+            return results
+
+        img, boxes = [results[k] for k in ('img', 'gt_bboxes')]
+
+        h, w, c = img.shape
+        ratio = random.uniform(self.min_ratio, self.max_ratio)
+        expand_img = np.full((int(h * ratio), int(w * ratio), c),
+                             self.mean).astype(img.dtype)
+        left = int(random.uniform(0, w * ratio - w))
+        top = int(random.uniform(0, h * ratio - h))
+        expand_img[top:top + h, left:left + w] = img
+        boxes += np.tile((left, top), 2)
+
+        results['img'] = expand_img
+        results['gt_bboxes'] = boxes
+        return results
+
+    def __repr__(self):
+        repr_str = self.__class__.__name__
+        repr_str += '(mean={}, to_rgb={}, ratio_range={})'.format(
+            self.mean, self.to_rgb, self.ratio_range)
+        return repr_str
+
+
+@PIPELINES.register_module
+class MinIoURandomCrop(object):
+    """Random crop the image & bboxes, the cropped patches have minimum IoU
+    requirement with original image & bboxes, the IoU threshold is randomly
+    selected from min_ious.
+
+    Args:
+        min_ious (tuple): minimum IoU threshold
+        crop_size (tuple): Expected size after cropping, (h, w).
+    """
+
+    def __init__(self, min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3):
+        # 1: return ori img
+        self.sample_mode = (1, *min_ious, 0)
+        self.min_crop_size = min_crop_size
+
+    def __call__(self, results):
+        img, boxes, labels = [
+            results[k] for k in ('img', 'gt_bboxes', 'gt_labels')
+        ]
+        h, w, c = img.shape
+        while True:
+            mode = random.choice(self.sample_mode)
+            if mode == 1:
+                return results
+
+            min_iou = mode
+            for i in range(50):
+                new_w = random.uniform(self.min_crop_size * w, w)
+                new_h = random.uniform(self.min_crop_size * h, h)
+
+                # h / w in [0.5, 2]
+                if new_h / new_w < 0.5 or new_h / new_w > 2:
+                    continue
+
+                left = random.uniform(w - new_w)
+                top = random.uniform(h - new_h)
+
+                patch = np.array(
+                    (int(left), int(top), int(left + new_w), int(top + new_h)))
+                overlaps = bbox_overlaps(
+                    patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
+                if overlaps.min() < min_iou:
+                    continue
+
+                # center of boxes should inside the crop img
+                center = (boxes[:, :2] + boxes[:, 2:]) / 2
+                mask = (center[:, 0] > patch[0]) * (
+                    center[:, 1] > patch[1]) * (center[:, 0] < patch[2]) * (
+                        center[:, 1] < patch[3])
+                if not mask.any():
+                    continue
+                boxes = boxes[mask]
+                labels = labels[mask]
+
+                # adjust boxes
+                img = img[patch[1]:patch[3], patch[0]:patch[2]]
+                boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
+                boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
+                boxes -= np.tile(patch[:2], 2)
+
+                results['img'] = img
+                results['gt_bboxes'] = boxes
+                results['gt_labels'] = labels
+                return results
+
+    def __repr__(self):
+        repr_str = self.__class__.__name__
+        repr_str += '(min_ious={}, min_crop_size={})'.format(
+            self.min_ious, self.min_crop_size)
+        return repr_str
+
+
+@PIPELINES.register_module
+class Corrupt(object):
+
+    def __init__(self, corruption, severity=1):
+        self.corruption = corruption
+        self.severity = severity
+
+    def __call__(self, results):
+        results['img'] = corrupt(
+            results['img'].astype(np.uint8),
+            corruption_name=self.corruption,
+            severity=self.severity)
+        return results
+
+    def __repr__(self):
+        repr_str = self.__class__.__name__
+        repr_str += '(corruption={}, severity={})'.format(
+            self.corruption, self.severity)
+        return repr_str
diff --git a/mmdet/datasets/registry.py b/mmdet/datasets/registry.py
index e726624..974a4fb 100644
--- a/mmdet/datasets/registry.py
+++ b/mmdet/datasets/registry.py
@@ -1,3 +1,4 @@
 from mmdet.utils import Registry
 
 DATASETS = Registry('dataset')
+PIPELINES = Registry('pipeline')
diff --git a/mmdet/datasets/utils.py b/mmdet/datasets/utils.py
deleted file mode 100644
index 9f4f46c..0000000
--- a/mmdet/datasets/utils.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from collections import Sequence
-
-import matplotlib.pyplot as plt
-import mmcv
-import numpy as np
-import torch
-
-
-def to_tensor(data):
-    """Convert objects of various python types to :obj:`torch.Tensor`.
-
-    Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
-    :class:`Sequence`, :class:`int` and :class:`float`.
-    """
-    if isinstance(data, torch.Tensor):
-        return data
-    elif isinstance(data, np.ndarray):
-        return torch.from_numpy(data)
-    elif isinstance(data, Sequence) and not mmcv.is_str(data):
-        return torch.tensor(data)
-    elif isinstance(data, int):
-        return torch.LongTensor([data])
-    elif isinstance(data, float):
-        return torch.FloatTensor([data])
-    else:
-        raise TypeError('type {} cannot be converted to tensor.'.format(
-            type(data)))
-
-
-def random_scale(img_scales, mode='range'):
-    """Randomly select a scale from a list of scales or scale ranges.
-
-    Args:
-        img_scales (list[tuple]): Image scale or scale range.
-        mode (str): "range" or "value".
-
-    Returns:
-        tuple: Sampled image scale.
-    """
-    num_scales = len(img_scales)
-    if num_scales == 1:  # fixed scale is specified
-        img_scale = img_scales[0]
-    elif num_scales == 2:  # randomly sample a scale
-        if mode == 'range':
-            img_scale_long = [max(s) for s in img_scales]
-            img_scale_short = [min(s) for s in img_scales]
-            long_edge = np.random.randint(
-                min(img_scale_long),
-                max(img_scale_long) + 1)
-            short_edge = np.random.randint(
-                min(img_scale_short),
-                max(img_scale_short) + 1)
-            img_scale = (long_edge, short_edge)
-        elif mode == 'value':
-            img_scale = img_scales[np.random.randint(num_scales)]
-    else:
-        if mode != 'value':
-            raise ValueError(
-                'Only "value" mode supports more than 2 image scales')
-        img_scale = img_scales[np.random.randint(num_scales)]
-    return img_scale
-
-
-def show_ann(coco, img, ann_info):
-    plt.imshow(mmcv.bgr2rgb(img))
-    plt.axis('off')
-    coco.showAnns(ann_info)
-    plt.show()
diff --git a/mmdet/models/detectors/base.py b/mmdet/models/detectors/base.py
index 038dd10..7650878 100644
--- a/mmdet/models/detectors/base.py
+++ b/mmdet/models/detectors/base.py
@@ -87,12 +87,7 @@ class BaseDetector(nn.Module):
         else:
             return self.forward_test(img, img_meta, **kwargs)
 
-    def show_result(self,
-                    data,
-                    result,
-                    img_norm_cfg,
-                    dataset=None,
-                    score_thr=0.3):
+    def show_result(self, data, result, dataset=None, score_thr=0.3):
         if isinstance(result, tuple):
             bbox_result, segm_result = result
         else:
@@ -100,7 +95,7 @@ class BaseDetector(nn.Module):
 
         img_tensor = data['img'][0]
         img_metas = data['img_meta'][0].data[0]
-        imgs = tensor2imgs(img_tensor, **img_norm_cfg)
+        imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
         assert len(imgs) == len(img_metas)
 
         if dataset is None:
diff --git a/mmdet/models/detectors/cascade_rcnn.py b/mmdet/models/detectors/cascade_rcnn.py
index bd878eb..4333b81 100644
--- a/mmdet/models/detectors/cascade_rcnn.py
+++ b/mmdet/models/detectors/cascade_rcnn.py
@@ -402,7 +402,7 @@ class CascadeRCNN(BaseDetector, RPNTestMixin):
     def aug_test(self, img, img_meta, proposals=None, rescale=False):
         raise NotImplementedError
 
-    def show_result(self, data, result, img_norm_cfg, **kwargs):
+    def show_result(self, data, result, **kwargs):
         if self.with_mask:
             ms_bbox_result, ms_segm_result = result
             if isinstance(ms_bbox_result, dict):
@@ -411,5 +411,4 @@ class CascadeRCNN(BaseDetector, RPNTestMixin):
         else:
             if isinstance(result, dict):
                 result = result['ensemble']
-        super(CascadeRCNN, self).show_result(data, result, img_norm_cfg,
-                                             **kwargs)
+        super(CascadeRCNN, self).show_result(data, result, **kwargs)
diff --git a/mmdet/models/detectors/rpn.py b/mmdet/models/detectors/rpn.py
index c9de290..fafee4f 100644
--- a/mmdet/models/detectors/rpn.py
+++ b/mmdet/models/detectors/rpn.py
@@ -81,7 +81,7 @@ class RPN(BaseDetector, RPNTestMixin):
         # TODO: remove this restriction
         return proposal_list[0].cpu().numpy()
 
-    def show_result(self, data, result, img_norm_cfg, dataset=None, top_k=20):
+    def show_result(self, data, result, dataset=None, top_k=20):
         """Show RPN proposals on the image.
 
         Although we assume batch size is 1, this method supports arbitrary
@@ -89,7 +89,7 @@ class RPN(BaseDetector, RPNTestMixin):
         """
         img_tensor = data['img'][0]
         img_metas = data['img_meta'][0].data[0]
-        imgs = tensor2imgs(img_tensor, **img_norm_cfg)
+        imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
         assert len(imgs) == len(img_metas)
         for img, img_meta in zip(imgs, img_metas):
             h, w, _ = img_meta['img_shape']
diff --git a/mmdet/utils/registry.py b/mmdet/utils/registry.py
index e39552a..a1cc87d 100644
--- a/mmdet/utils/registry.py
+++ b/mmdet/utils/registry.py
@@ -61,14 +61,16 @@ def build_from_cfg(cfg, registry, default_args=None):
     args = cfg.copy()
     obj_type = args.pop('type')
     if mmcv.is_str(obj_type):
-        obj_type = registry.get(obj_type)
-        if obj_type is None:
+        obj_cls = registry.get(obj_type)
+        if obj_cls is None:
             raise KeyError('{} is not in the {} registry'.format(
                 obj_type, registry.name))
-    elif not inspect.isclass(obj_type):
+    elif inspect.isclass(obj_type):
+        obj_cls = obj_type
+    else:
         raise TypeError('type must be a str or valid type, but got {}'.format(
             type(obj_type)))
     if default_args is not None:
         for name, value in default_args.items():
             args.setdefault(name, value)
-    return obj_type(**args)
+    return obj_cls(**args)
diff --git a/tools/test.py b/tools/test.py
index c0fdec7..e3ff487 100644
--- a/tools/test.py
+++ b/tools/test.py
@@ -27,7 +27,7 @@ def single_gpu_test(model, data_loader, show=False):
         results.append(result)
 
         if show:
-            model.module.show_result(data, result, dataset.img_norm_cfg)
+            model.module.show_result(data, result)
 
         batch_size = data['img'][0].size(0)
         for _ in range(batch_size):
diff --git a/tools/test_robustness.py b/tools/test_robustness.py
index 584654b..e263215 100644
--- a/tools/test_robustness.py
+++ b/tools/test_robustness.py
@@ -1,4 +1,5 @@
 import argparse
+import copy
 import os
 import os.path as osp
 import shutil
@@ -350,13 +351,15 @@ def main():
                 continue
 
             # assign corruption and severity
-            if corruption_severity == 0:
-                # evaluate without corruptions for severity = 0
-                cfg.data.test['corruption'] = None
-                cfg.data.test['corruption_severity'] = 0
-            else:
-                cfg.data.test['corruption'] = corruption
-                cfg.data.test['corruption_severity'] = corruption_severity
+            if corruption_severity > 0:
+                test_data_cfg = copy.deepcopy(cfg.data.test)
+                corruption_trans = dict(
+                    type='Corrupt',
+                    corruption=corruption,
+                    severity=corruption_severity)
+                # TODO: hard coded "1", we assume that the first step is
+                # loading images, which needs to be fixed in the future
+                test_data_cfg['pipeline'].insert(1, corruption_trans)
 
             # print info
             print('\nTesting {} at severity {}'.format(corruption,
-- 
GitLab