diff --git a/GETTING_STARTED.md b/GETTING_STARTED.md
index 9e9c6f0c3b1a471a942595d98f5dd4987ff97f50..728cbbeeba746beb4bdd64898c6e3fba38ca0aab 100644
--- a/GETTING_STARTED.md
+++ b/GETTING_STARTED.md
@@ -57,7 +57,22 @@ python tools/test.py configs/mask_rcnn_r50_fpn_1x.py \
     8 --out results.pkl --eval bbox segm
 ```
 
-### High-level APIs for testing images.
+### Webcam demo
+
+We provide a webcam demo to illustrate the results.
+
+```shell
+python tools/webcam_demo.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${GPU_ID}] [--camera-id ${CAMERA-ID}] [--score-thr ${CAMERA-ID}]
+```
+
+Examples:
+
+```shell
+python tools/webcam_demo.py configs/faster_rcnn_r50_fpn_1x.py \
+    checkpoints/faster_rcnn_r50_fpn_1x_20181010-3d1b3351.pth
+```
+
+### High-level APIs for testing images
 
 Here is an example of building the model and test given images.
 
diff --git a/tools/webcam_demo.py b/tools/webcam_demo.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1ef27add072993acc9a842e1568f217e52b1027
--- /dev/null
+++ b/tools/webcam_demo.py
@@ -0,0 +1,38 @@
+import argparse
+
+import cv2
+import torch
+
+from mmdet.apis import inference_detector, init_detector, show_result
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='MMDetection webcam demo')
+    parser.add_argument('config', help='test config file path')
+    parser.add_argument('checkpoint', help='checkpoint file')
+    parser.add_argument('--device', type=int, default=0, help='CUDA device id')
+    parser.add_argument(
+        '--camera-id', type=int, default=0, help='camera device id')
+    parser.add_argument(
+        '--score-thr', type=float, default=0.5, help='bbox score threshold')
+    args = parser.parse_args()
+    return args
+
+
+def main():
+    args = parse_args()
+
+    model = init_detector(
+        args.config, args.checkpoint, device=torch.device('cuda', args.device))
+
+    camera = cv2.VideoCapture(args.camera_id)
+
+    while True:
+        ret_val, img = camera.read()
+        result = inference_detector(model, img)
+        show_result(
+            img, result, model.CLASSES, score_thr=args.score_thr, wait_time=1)
+
+
+if __name__ == '__main__':
+    main()