qninhdt commited on
Commit
8bb3bd1
1 Parent(s): 3e042ea

Upload 83 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +497 -0
  2. __init__.py +1 -0
  3. configs/_base_/datasets/bdd100k.py +56 -0
  4. configs/_base_/datasets/bdd100k_mstrain.py +68 -0
  5. configs/_base_/datasets/bdd100k_strong.py +71 -0
  6. configs/_base_/default_runtime.py +18 -0
  7. configs/_base_/models/atss_r50_fpn.py +67 -0
  8. configs/_base_/models/atss_r50_fpn_dyhead.py +70 -0
  9. configs/_base_/models/cascade_rcnn_r50_fpn.py +206 -0
  10. configs/_base_/models/faster_rcnn_r50_fpn.py +130 -0
  11. configs/_base_/models/fcos_r50_fpn.py +66 -0
  12. configs/_base_/models/retinanet_r50_fpn.py +70 -0
  13. configs/_base_/schedules/schedule_1x.py +12 -0
  14. configs/_base_/schedules/schedule_3x.py +12 -0
  15. configs/_base_/schedules/schedule_5x.py +12 -0
  16. configs/det/atss_r101_fpn_3x_det_bdd100k.py +10 -0
  17. configs/det/atss_r101_fpn_dyhead_3x_det_bdd100k.py +10 -0
  18. configs/det/atss_r50_fpn_1x_det_bdd100k.py +11 -0
  19. configs/det/atss_r50_fpn_3x_det_bdd100k.py +11 -0
  20. configs/det/atss_r50_fpn_dyhead_1x_det_bdd100k.py +11 -0
  21. configs/det/atss_r50_fpn_dyhead_3x_det_bdd100k.py +11 -0
  22. configs/det/cascade_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.py +21 -0
  23. configs/det/cascade_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.py +45 -0
  24. configs/det/cascade_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.py +45 -0
  25. configs/det/cascade_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.py +45 -0
  26. configs/det/cascade_rcnn_r101_fpn_3x_det_bdd100k.py +10 -0
  27. configs/det/cascade_rcnn_r50_fpn_1x_det_bdd100k.py +9 -0
  28. configs/det/cascade_rcnn_r50_fpn_3x_det_bdd100k.py +9 -0
  29. configs/det/cascade_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.py +28 -0
  30. configs/det/cascade_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.py +51 -0
  31. configs/det/cascade_rcnn_swin-t_fpn_3x_det_bdd100k.py +51 -0
  32. configs/det/faster_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.py +21 -0
  33. configs/det/faster_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.py +45 -0
  34. configs/det/faster_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.py +45 -0
  35. configs/det/faster_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.py +45 -0
  36. configs/det/faster_rcnn_hrnetv2p_w18_1x_det_bdd100k.py +17 -0
  37. configs/det/faster_rcnn_hrnetv2p_w18_3x_det_bdd100k.py +17 -0
  38. configs/det/faster_rcnn_hrnetv2p_w32_1x_det_bdd100k.py +51 -0
  39. configs/det/faster_rcnn_hrnetv2p_w32_3x_det_bdd100k.py +51 -0
  40. configs/det/faster_rcnn_r101_fpn_3x_det_bdd100k.py +10 -0
  41. configs/det/faster_rcnn_r101_fpn_5x_det_bdd100k.py +10 -0
  42. configs/det/faster_rcnn_r101_fpn_dconv_3x_det_bdd100k.py +10 -0
  43. configs/det/faster_rcnn_r50_fpn_1x_det_bdd100k.py +9 -0
  44. configs/det/faster_rcnn_r50_fpn_3x_det_bdd100k.py +9 -0
  45. configs/det/faster_rcnn_r50_fpn_5x_det_bdd100k.py +9 -0
  46. configs/det/faster_rcnn_r50_fpn_dconv_1x_det_bdd100k.py +10 -0
  47. configs/det/faster_rcnn_r50_fpn_dconv_3x_det_bdd100k.py +10 -0
  48. configs/det/faster_rcnn_r50_fpn_rsb_1x_det_bdd100k.py +26 -0
  49. configs/det/faster_rcnn_r50_fpn_rsb_3x_det_bdd100k.py +26 -0
  50. configs/det/faster_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.py +28 -0
README.md ADDED
@@ -0,0 +1,497 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Detection Models of BDD100K
2
+
3
+ The object detection task involves localization (predicting a bounding box for each object) and classification (predicting the object category).
4
+
5
+ ![det1](../doc/images/det1.png)
6
+
7
+ The BDD100K dataset contains bounding box annotations for 100K images (70K/10K/20K for train/val/test). Each annotation contains bounding box labels for 10 object classes. For details about downloading the data and the annotation format for this task, see the [official documentation](https://doc.bdd100k.com/download.html).
8
+
9
+ ## Model Zoo
10
+
11
+ For training the models listed below, we follow the common settings used by MMDetection (details [here](https://github.com/open-mmlab/mmdetection/blob/master/docs/model_zoo.md#common-settings)), unless otherwise stated.
12
+ All models are trained on either 8 GeForce RTX 2080 Ti GPUs or 8 TITAN RTX GPUs with a batch size of 4x8=32.
13
+ See the config files for the detailed setting for each model.
14
+
15
+ ## Table of Contents
16
+
17
+ * [Models](#model-zoo)
18
+ * [Faster R-CNN](#faster-r-cnn)
19
+ * [RetinaNet](#retinanet)
20
+ * [Cascade R-CNN](#cascade-r-cnn)
21
+ * [FCOS](#fcos)
22
+ * [Deformable ConvNets v2](#deformable-convnets-v2)
23
+ * [Libra R-CNN](#libra-r-cnn)
24
+ * [HRNet](#hrnet)
25
+ * [ATSS](#atss)
26
+ * [Sparse R-CNN](#sparse-r-cnn)
27
+ * [DyHead](#dyhead)
28
+ * [Swin Transformer](#swin-transformer)
29
+ * [Pyramid Vision Transformer](#pyramid-vision-transformer)
30
+ * [Pyramid Vision Transformer v2](#pyramid-vision-transformer-v2)
31
+ * [ResNet Strikes Back](#resnet-strikes-back)
32
+ * [ConvNeXt](#convnext)
33
+ * [Usage](#usage)
34
+ * [Contribution](#contribution)
35
+
36
+ ---
37
+
38
+ ### Faster R-CNN
39
+
40
+ [Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks](https://arxiv.org/abs/1506.01497) [NeurIPS 2015]
41
+
42
+ Authors: [Shaoqing Ren](https://www.shaoqingren.com/), [Kaiming He](http://kaiminghe.com/), [Ross Girshick](https://www.rossgirshick.info/), [Jian Sun](http://www.jiansun.org/)
43
+
44
+ <details>
45
+ <summary>Abstract</summary>
46
+ State-of-the-art object detection networks depend on region proposal algorithms to hypothesize object locations. Advances like SPPnet and Fast R-CNN have reduced the running time of these detection networks, exposing region proposal computation as a bottleneck. In this work, we introduce a Region Proposal Network (RPN) that shares full-image convolutional features with the detection network, thus enabling nearly cost-free region proposals. An RPN is a fully convolutional network that simultaneously predicts object bounds and objectness scores at each position. The RPN is trained end-to-end to generate high-quality region proposals, which are used by Fast R-CNN for detection. We further merge RPN and Fast R-CNN into a single network by sharing their convolutional features---using the recently popular terminology of neural networks with 'attention' mechanisms, the RPN component tells the unified network where to look. For the very deep VGG-16 model, our detection system has a frame rate of 5fps (including all steps) on a GPU, while achieving state-of-the-art object detection accuracy on PASCAL VOC 2007, 2012, and MS COCO datasets with only 300 proposals per image. In ILSVRC and COCO 2015 competitions, Faster R-CNN and RPN are the foundations of the 1st-place winning entries in several tracks. Code has been made publicly available.
47
+ </details>
48
+
49
+ #### Results
50
+
51
+ | Backbone | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
52
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
53
+ | R-50-FPN | 1x | | 31.04 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_r50_fpn_1x_det_bdd100k.json) | 29.78 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_r50_fpn_1x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_r50_fpn_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_r50_fpn_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_r50_fpn_1x_det_bdd100k.zip) |
54
+ | R-50-FPN | 3x | ✓ | 32.30 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_r50_fpn_3x_det_bdd100k.json) | 31.45 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_r50_fpn_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_r50_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_r50_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_r50_fpn_3x_det_bdd100k.zip) |
55
+ | R-50-FPN | 5x | ✓ | 32.49 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_r50_fpn_5x_det_bdd100k.json) | 31.86 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_r50_fpn_5x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_r50_fpn_5x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_5x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_5x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_r50_fpn_5x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_r50_fpn_5x_det_bdd100k.zip) |
56
+ | R-101-FPN | 3x | ✓ | 32.71 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_r101_fpn_3x_det_bdd100k.json) | 31.96 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_r101_fpn_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_r101_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r101_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r101_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_r101_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_r101_fpn_3x_det_bdd100k.zip) |
57
+ | R-101-FPN | 5x | ✓ | 32.61 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_r101_fpn_5x_det_bdd100k.json) | 31.94 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_r101_fpn_5x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_r101_fpn_5x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r101_fpn_5x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r101_fpn_5x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_r101_fpn_5x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_r101_fpn_5x_det_bdd100k.zip) |
58
+
59
+ [[Code](https://github.com/facebookresearch/detectron2)] [[Usage Instructions](#usage)]
60
+
61
+ ---
62
+
63
+ ### RetinaNet
64
+
65
+ [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002) [ICCV 2017]
66
+
67
+ Authors: [Tsung-Yi Lin](https://scholar.google.com/citations?user=_BPdgV0AAAAJ), [Priya Goyal](https://research.fb.com/people/goyal-priya/), [Ross Girshick](https://www.rossgirshick.info/), [Kaiming He](http://kaiminghe.com/), [Piotr Dollár](https://pdollar.github.io/)
68
+
69
+ <details>
70
+ <summary>Abstract</summary>
71
+ The highest accuracy object detectors to date are based on a two-stage approach popularized by R-CNN, where a classifier is applied to a sparse set of candidate object locations. In contrast, one-stage detectors that are applied over a regular, dense sampling of possible object locations have the potential to be faster and simpler, but have trailed the accuracy of two-stage detectors thus far. In this paper, we investigate why this is the case. We discover that the extreme foreground-background class imbalance encountered during training of dense detectors is the central cause. We propose to address this class imbalance by reshaping the standard cross entropy loss such that it down-weights the loss assigned to well-classified examples. Our novel Focal Loss focuses training on a sparse set of hard examples and prevents the vast number of easy negatives from overwhelming the detector during training. To evaluate the effectiveness of our loss, we design and train a simple dense detector we call RetinaNet. Our results show that when trained with the focal loss, RetinaNet is able to match the speed of previous one-stage detectors while surpassing the accuracy of all existing state-of-the-art two-stage detectors. Code is at: [this https URL](https://github.com/facebookresearch/detectron2).
72
+ </details>
73
+
74
+ #### Results
75
+
76
+ | Backbone | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
77
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
78
+ | R-50-FPN | 1x | | 28.58 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/retinanet_r50_fpn_1x_det_bdd100k.json) | 27.14 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/retinanet_r50_fpn_1x_det_bdd100k.json) | [config](./configs/det/retinanet_r50_fpn_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_r50_fpn_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_r50_fpn_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/retinanet_r50_fpn_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/retinanet_r50_fpn_1x_det_bdd100k.zip) |
79
+ | R-50-FPN | 3x | ✓ | 30.91 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/retinanet_r50_fpn_3x_det_bdd100k.json) | 30.21 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/retinanet_r50_fpn_3x_det_bdd100k.json) | [config](./configs/det/retinanet_r50_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_r50_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_r50_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/retinanet_r50_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/retinanet_r50_fpn_3x_det_bdd100k.zip) |
80
+ | R-101-FPN | 3x | ✓ | 31.29 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/retinanet_r101_fpn_3x_det_bdd100k.json) | 30.62 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/retinanet_r101_fpn_3x_det_bdd100k.json) | [config](./configs/det/retinanet_r101_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_r101_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_r101_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/retinanet_r101_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/retinanet_r101_fpn_3x_det_bdd100k.zip) |
81
+
82
+ [[Code](https://github.com/facebookresearch/detectron2)] [[Usage Instructions](#usage)]
83
+
84
+ ---
85
+
86
+ ### Cascade R-CNN
87
+
88
+ [Cascade R-CNN: Delving into High Quality Object Detection](https://arxiv.org/abs/1712.00726) [CVPR 2018]
89
+
90
+ Authors: [Zhaowei Cai](https://zhaoweicai.github.io/), [Nuno Vasconcelos](http://www.svcl.ucsd.edu/~nuno/)
91
+
92
+ <details>
93
+ <summary>Abstract</summary>
94
+ In object detection, an intersection over union (IoU) threshold is required to define positives and negatives. An object detector, trained with low IoU threshold, e.g. 0.5, usually produces noisy detections. However, detection performance tends to degrade with increasing the IoU thresholds. Two main factors are responsible for this: 1) overfitting during training, due to exponentially vanishing positive samples, and 2) inference-time mismatch between the IoUs for which the detector is optimal and those of the input hypotheses. A multi-stage object detection architecture, the Cascade R-CNN, is proposed to address these problems. It consists of a sequence of detectors trained with increasing IoU thresholds, to be sequentially more selective against close false positives. The detectors are trained stage by stage, leveraging the observation that the output of a detector is a good distribution for training the next higher quality detector. The resampling of progressively improved hypotheses guarantees that all detectors have a positive set of examples of equivalent size, reducing the overfitting problem. The same cascade procedure is applied at inference, enabling a closer match between the hypotheses and the detector quality of each stage. A simple implementation of the Cascade R-CNN is shown to surpass all single-model object detectors on the challenging COCO dataset. Experiments also show that the Cascade R-CNN is widely applicable across detector architectures, achieving consistent gains independently of the baseline detector strength. The code will be made available at this [https URL](https://github.com/zhaoweicai/cascade-rcnn).
95
+ </details>
96
+
97
+ #### Results
98
+
99
+ | Backbone | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
100
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
101
+ | R-50-FPN | 1x | | 32.40 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/cascade_rcnn_r50_fpn_1x_det_bdd100k.json) | 31.23 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/cascade_rcnn_r50_fpn_1x_det_bdd100k.json) | [config](./configs/det/cascade_rcnn_r50_fpn_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_r50_fpn_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_r50_fpn_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/cascade_rcnn_r50_fpn_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/cascade_rcnn_r50_fpn_1x_det_bdd100k.zip) |
102
+ | R-50-FPN | 3x | ✓ | 33.72 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/cascade_rcnn_r50_fpn_3x_det_bdd100k.json) | 33.07 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/cascade_rcnn_r50_fpn_3x_det_bdd100k.json) | [config](./configs/det/cascade_rcnn_r50_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_r50_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_r50_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/cascade_rcnn_r50_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/cascade_rcnn_r50_fpn_3x_det_bdd100k.zip) |
103
+ | R-101-FPN | 3x | ✓ | 33.57 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/cascade_rcnn_r101_fpn_3x_det_bdd100k.json) | 32.90 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/cascade_rcnn_r101_fpn_3x_det_bdd100k.json) | [config](./configs/det/cascade_rcnn_r101_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_r101_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_r101_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/cascade_rcnn_r101_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/cascade_rcnn_r101_fpn_3x_det_bdd100k.zip) |
104
+
105
+ [[Code](https://github.com/zhaoweicai/cascade-rcnn)] [[Usage Instructions](#usage)]
106
+
107
+ ---
108
+
109
+ ### FCOS
110
+
111
+ [FCOS: Fully Convolutional One-Stage Object Detection](https://arxiv.org/abs/1904.01355) [ICCV 2019]
112
+
113
+ Authors: [Zhi Tian](https://zhitian.xyz/), [Chunhua Shen](https://cshen.github.io/), [Hao Chen](https://stan-haochen.github.io/), [Tong He](https://tonghehehe.com/)
114
+
115
+ <details>
116
+ <summary>Abstract</summary>
117
+ We propose a fully convolutional one-stage object detector (FCOS) to solve object detection in a per-pixel prediction fashion, analogue to semantic segmentation. Almost all state-of-the-art object detectors such as RetinaNet, SSD, YOLOv3, and Faster R-CNN rely on pre-defined anchor boxes. In contrast, our proposed detector FCOS is anchor box free, as well as proposal free. By eliminating the predefined set of anchor boxes, FCOS completely avoids the complicated computation related to anchor boxes such as calculating overlapping during training. More importantly, we also avoid all hyper-parameters related to anchor boxes, which are often very sensitive to the final detection performance. With the only post-processing non-maximum suppression (NMS), FCOS with ResNeXt-64x4d-101 achieves 44.7% in AP with single-model and single-scale testing, surpassing previous one-stage detectors with the advantage of being much simpler. For the first time, we demonstrate a much simpler and flexible detection framework achieving improved detection accuracy. We hope that the proposed FCOS framework can serve as a simple and strong alternative for many other instance-level tasks. Code is available at:Code is available at: [this https URL](https://github.com/tianzhi0549/FCOS/).
118
+ </details>
119
+
120
+ #### Results
121
+
122
+ | Backbone | Tricks | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
123
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
124
+ | R-50-FPN | | 1x | | 27.69 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/fcos_r50_fpn_1x_det_bdd100k.json) | 26.16 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/fcos_r50_fpn_1x_det_bdd100k.json) | [config](./configs/det/fcos_r50_fpn_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/fcos_r50_fpn_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/fcos_r50_fpn_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/fcos_r50_fpn_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/fcos_r50_fpn_1x_det_bdd100k.zip) |
125
+ | R-50-FPN | | 3x | ✓ | 30.60 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/fcos_r50_fpn_3x_det_bdd100k.json) | 28.96 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/fcos_r50_fpn_3x_det_bdd100k.json) | [config](./configs/det/fcos_r50_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/fcos_r50_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/fcos_r50_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/fcos_r50_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/fcos_r50_fpn_3x_det_bdd100k.zip) |
126
+ | R-101-FPN | | 3x | ✓ | 31.13 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/fcos_r101_fpn_3x_det_bdd100k.json) | 29.62 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/fcos_r101_fpn_3x_det_bdd100k.json) | [config](./configs/det/fcos_r101_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/fcos_r101_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/fcos_r101_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/fcos_r101_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/fcos_r101_fpn_3x_det_bdd100k.zip) |
127
+ | R-50-FPN | ✓ | 1x | | 26.59 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/fcos_tricks_r50_fpn_1x_det_bdd100k.json) | 24.76 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/fcos_tricks_r50_fpn_1x_det_bdd100k.json) | [config](./configs/det/fcos_tricks_r50_fpn_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/fcos_tricks_r50_fpn_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/fcos_tricks_r50_fpn_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/fcos_tricks_r50_fpn_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/fcos_tricks_r50_fpn_1x_det_bdd100k.zip) |
128
+
129
+ [[Code](https://github.com/tianzhi0549/FCOS/)] [[Usage Instructions](#usage)]
130
+
131
+ ---
132
+
133
+ ### Deformable ConvNets v2
134
+
135
+ [Deformable ConvNets v2: More Deformable, Better Results](https://arxiv.org/abs/1811.11168) [CVPR 2019]
136
+
137
+ Authors: [Xizhou Zhu](https://scholar.google.com/citations?user=02RXI00AAAAJ), [Han Hu](https://sites.google.com/site/hanhushomepage/), [Stephen Lin](https://scholar.google.com/citations?user=c3PYmxUAAAAJ&hl=en), [Jifeng Dai](https://jifengdai.org/)
138
+
139
+ <details>
140
+ <summary>Abstract</summary>
141
+ The superior performance of Deformable Convolutional Networks arises from its ability to adapt to the geometric variations of objects. Through an examination of its adaptive behavior, we observe that while the spatial support for its neural features conforms more closely than regular ConvNets to object structure, this support may nevertheless extend well beyond the region of interest, causing features to be influenced by irrelevant image content. To address this problem, we present a reformulation of Deformable ConvNets that improves its ability to focus on pertinent image regions, through increased modeling power and stronger training. The modeling power is enhanced through a more comprehensive integration of deformable convolution within the network, and by introducing a modulation mechanism that expands the scope of deformation modeling. To effectively harness this enriched modeling capability, we guide network training via a proposed feature mimicking scheme that helps the network to learn features that reflect the object focus and classification power of R-CNN features. With the proposed contributions, this new version of Deformable ConvNets yields significant performance gains over the original model and produces leading results on the COCO benchmark for object detection and instance segmentation.
142
+ </details>
143
+
144
+ #### Results
145
+
146
+ | Backbone | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
147
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
148
+ | R-50-FPN | 1x | | 32.09 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_r50_fpn_dconv_1x_det_bdd100k.json) | 30.93 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_r50_fpn_dconv_1x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_r50_fpn_dconv_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_dconv_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_dconv_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_r50_fpn_dconv_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_r50_fpn_dconv_1x_det_bdd100k.zip) |
149
+ | R-50-FPN | 3x | ✓ | 33.21 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_r50_fpn_dconv_3x_det_bdd100k.json) | 32.41 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_r50_fpn_dconv_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_r50_fpn_dconv_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_dconv_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_dconv_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_r50_fpn_dconv_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_r50_fpn_dconv_3x_det_bdd100k.zip) |
150
+ | R-101-FPN | 3x | ✓ | 33.09 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_r101_fpn_dconv_3x_det_bdd100k.json) | 32.43 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_r101_fpn_dconv_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_r101_fpn_dconv_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r101_fpn_dconv_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r101_fpn_dconv_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_r101_fpn_dconv_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_r101_fpn_dconv_3x_det_bdd100k.zip) |
151
+
152
+ [[Code](https://github.com/msracver/Deformable-ConvNets)] [[Usage Instructions](#usage)]
153
+
154
+ ---
155
+
156
+ ### Libra R-CNN
157
+
158
+ [Libra R-CNN: Towards Balanced Learning for Object Detection](https://arxiv.org/abs/1904.02701) [CVPR 2019]
159
+
160
+ Authors: [Jiangmiao Pang](https://scholar.google.com/citations?user=ssSfKpAAAAAJ), [Kai Chen](https://chenkai.site/), [Jianping Shi](https://shijianping.me/), Huajun Feng, [Wanli Ouyang](https://wlouyang.github.io/), [Dahua Lin](http://dahua.site/)
161
+
162
+ <details>
163
+ <summary>Abstract</summary>
164
+ Compared with model architectures, the training process, which is also crucial to the success of detectors, has received relatively less attention in object detection. In this work, we carefully revisit the standard training practice of detectors, and find that the detection performance is often limited by the imbalance during the training process, which generally consists in three levels - sample level, feature level, and objective level. To mitigate the adverse effects caused thereby, we propose Libra R-CNN, a simple but effective framework towards balanced learning for object detection. It integrates three novel components: IoU-balanced sampling, balanced feature pyramid, and balanced L1 loss, respectively for reducing the imbalance at sample, feature, and objective level. Benefitted from the overall balanced design, Libra R-CNN significantly improves the detection performance. Without bells and whistles, it achieves 2.5 points and 2.0 points higher Average Precision (AP) than FPN Faster R-CNN and RetinaNet respectively on MSCOCO.
165
+ </details>
166
+
167
+ #### Results
168
+
169
+ | Backbone | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
170
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
171
+ | R-50-FPN | 1x | | 30.70 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/libra_faster_rcnn_r50_fpn_1x_det_bdd100k.json) | 29.54 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/libra_faster_rcnn_r50_fpn_1x_det_bdd100k.json) | [config](./configs/det/libra_faster_rcnn_r50_fpn_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/libra_faster_rcnn_r50_fpn_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/libra_faster_rcnn_r50_fpn_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/libra_faster_rcnn_r50_fpn_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/libra_faster_rcnn_r50_fpn_1x_det_bdd100k.zip) |
172
+ | R-50-FPN | 3x | ✓ | 32.00 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/libra_faster_rcnn_r50_fpn_3x_det_bdd100k.json) | 31.05 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/libra_faster_rcnn_r50_fpn_3x_det_bdd100k.json) | [config](./configs/det/libra_faster_rcnn_r50_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/libra_faster_rcnn_r50_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/libra_faster_rcnn_r50_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/libra_faster_rcnn_r50_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/libra_faster_rcnn_r50_fpn_3x_det_bdd100k.zip) |
173
+ | R-101-FPN | 3x | ✓ | 32.24 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/libra_faster_rcnn_r101_fpn_3x_det_bdd100k.json) | 31.49 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/libra_faster_rcnn_r101_fpn_3x_det_bdd100k.json) | [config](./configs/det/libra_faster_rcnn_r101_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/libra_faster_rcnn_r101_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/libra_faster_rcnn_r101_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/libra_faster_rcnn_r101_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/libra_faster_rcnn_r101_fpn_3x_det_bdd100k.zip) |
174
+
175
+ [[Code](https://github.com/OceanPang/Libra_R-CNN)] [[Usage Instructions](#usage)]
176
+
177
+ ---
178
+
179
+ ### HRNet
180
+
181
+ [Deep High-Resolution Representation Learning for Visual Recognition](https://arxiv.org/abs/1908.07919) [CVPR 2019 / TPAMI 2020]
182
+
183
+ Authors: [Jingdong Wang](https://jingdongwang2017.github.io/), [Ke Sun](https://github.com/sunke123), [Tianheng Cheng](https://scholar.google.com/citations?user=PH8rJHYAAAAJ), Borui Jiang, Chaorui Deng, [Yang Zhao](https://yangyangkiki.github.io/), Dong Liu, [Yadong Mu](http://www.muyadong.com/), Mingkui Tan, [Xinggang Wang](https://xinggangw.info/), [Wenyu Liu](http://eic.hust.edu.cn/professor/liuwenyu/), [Bin Xiao](https://www.microsoft.com/en-us/research/people/bixi/)
184
+
185
+ <details>
186
+ <summary>Abstract</summary>
187
+ High-resolution representations are essential for position-sensitive vision problems, such as human pose estimation, semantic segmentation, and object detection. Existing state-of-the-art frameworks first encode the input image as a low-resolution representation through a subnetwork that is formed by connecting high-to-low resolution convolutions in series (e.g., ResNet, VGGNet), and then recover the high-resolution representation from the encoded low-resolution representation. Instead, our proposed network, named as High-Resolution Network (HRNet), maintains high-resolution representations through the whole process. There are two key characteristics: (i) Connect the high-to-low resolution convolution streams in parallel; (ii) Repeatedly exchange the information across resolutions. The benefit is that the resulting representation is semantically richer and spatially more precise. We show the superiority of the proposed HRNet in a wide range of applications, including human pose estimation, semantic segmentation, and object detection, suggesting that the HRNet is a stronger backbone for computer vision problems. All the codes are available at [this https URL](https://github.com/HRNet).
188
+ </details>
189
+
190
+ #### Results
191
+
192
+ | Backbone | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
193
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
194
+ | HRNet-w18 | 1x | | 31.74 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_hrnetv2p_w18_1x_det_bdd100k.json) | 30.64 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_hrnetv2p_w18_1x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_hrnetv2p_w18_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_hrnetv2p_w18_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_hrnetv2p_w18_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_hrnetv2p_w18_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_hrnetv2p_w18_1x_det_bdd100k.zip) |
195
+ | HRNet-w18 | 3x | ✓ | 33.35 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_hrnetv2p_w18_3x_det_bdd100k.json) | 32.61 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_hrnetv2p_w18_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_hrnetv2p_w18_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_hrnetv2p_w18_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_hrnetv2p_w18_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_hrnetv2p_w18_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_hrnetv2p_w18_3x_det_bdd100k.zip) |
196
+ | HRNet-w32 | 1x | | 32.84 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_hrnetv2p_w32_1x_det_bdd100k.json) | 31.84 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_hrnetv2p_w32_1x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_hrnetv2p_w32_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_hrnetv2p_w32_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_hrnetv2p_w32_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_hrnetv2p_w32_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_hrnetv2p_w32_1x_det_bdd100k.zip) |
197
+ | HRNet-w32 | 3x | ✓ | 33.97 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_hrnetv2p_w32_3x_det_bdd100k.json) | 33.19 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_hrnetv2p_w32_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_hrnetv2p_w32_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_hrnetv2p_w32_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_hrnetv2p_w32_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_hrnetv2p_w32_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_hrnetv2p_w32_3x_det_bdd100k.zip) |
198
+
199
+ [[Code](https://github.com/HRNet)] [[Usage Instructions](#usage)]
200
+
201
+ ---
202
+
203
+ ### ATSS
204
+
205
+ [Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection](https://arxiv.org/abs/1912.02424) [CVPR 2020]
206
+
207
+ Authors: [Shifeng Zhang](http://www.cbsr.ia.ac.cn/users/sfzhang/), [Cheng Chi](https://chicheng123.github.io/), Yongqiang Yao, Zhen Lei, [Stan Z. Li](https://scholar.google.com/citations?hl=zh-CN&user=Y-nyLGIAAAAJ)
208
+
209
+ <details>
210
+ <summary>Abstract</summary>
211
+ Object detection has been dominated by anchor-based detectors for several years. Recently, anchor-free detectors have become popular due to the proposal of FPN and Focal Loss. In this paper, we first point out that the essential difference between anchor-based and anchor-free detection is actually how to define positive and negative training samples, which leads to the performance gap between them. If they adopt the same definition of positive and negative samples during training, there is no obvious difference in the final performance, no matter regressing from a box or a point. This shows that how to select positive and negative training samples is important for current object detectors. Then, we propose an Adaptive Training Sample Selection (ATSS) to automatically select positive and negative samples according to statistical characteristics of object. It significantly improves the performance of anchor-based and anchor-free detectors and bridges the gap between them. Finally, we discuss the necessity of tiling multiple anchors per location on the image to detect objects. Extensive experiments conducted on MS COCO support our aforementioned analysis and conclusions. With the newly introduced ATSS, we improve state-of-the-art detectors by a large margin to 50.7% AP without introducing any overhead. The code is available at [this https URL](https://github.com/sfzhang15/ATSS).
212
+ </details>
213
+
214
+ #### Results
215
+
216
+ | Backbone | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
217
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
218
+ | R-50-FPN | 1x | | 31.45 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/atss_r50_fpn_1x_det_bdd100k.json) | 29.92 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/atss_r50_fpn_1x_det_bdd100k.json) | [config](./configs/det/atss_r50_fpn_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/atss_r50_fpn_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/atss_r50_fpn_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/atss_r50_fpn_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/atss_r50_fpn_1x_det_bdd100k.zip) |
219
+ | R-50-FPN | 3x | ✓ | 34.17 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/atss_r50_fpn_3x_det_bdd100k.json) | 32.38 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/atss_r50_fpn_3x_det_bdd100k.json) | [config](./configs/det/atss_r50_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/atss_r50_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/atss_r50_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/atss_r50_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/atss_r50_fpn_3x_det_bdd100k.zip) |
220
+ | R-101-FPN | 3x | ✓ | 33.80 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/atss_r101_fpn_3x_det_bdd100k.json) | 32.58 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/atss_r101_fpn_3x_det_bdd100k.json) | [config](./configs/det/atss_r101_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/atss_r101_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/atss_r101_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/atss_r101_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/atss_r101_fpn_3x_det_bdd100k.zip) |
221
+
222
+ [[Code](https://github.com/sfzhang15/ATSS)] [[Usage Instructions](#usage)]
223
+
224
+ ---
225
+
226
+ ### Sparse R-CNN
227
+
228
+ [Sparse R-CNN: End-to-End Object Detection with Learnable Proposals](https://arxiv.org/abs/2011.12450) [CVPR 2021]
229
+
230
+ Authors: [Peize Sun](https://peizesun.github.io/), Rufeng Zhang, Yi Jiang, [Tao Kong](http://www.taokong.org/), [Chenfeng Xu](https://scholar.google.com/citations?user=RpqvaTUAAAAJ), [Wei Zhan](https://zhanwei.site/), [Masayoshi Tomizuka](https://me.berkeley.edu/people/masayoshi-tomizuka/), [Lei Li](https://sites.cs.ucsb.edu/~lilei/), [Zehuan Yuan](https://shallowyuan.github.io/), [Changhu Wang](https://changhu.wang/), [Ping Luo](http://luoping.me/)
231
+
232
+ <details>
233
+ <summary>Abstract</summary>
234
+ We present Sparse R-CNN, a purely sparse method for object detection in images. Existing works on object detection heavily rely on dense object candidates, such as k anchor boxes pre-defined on all grids of image feature map of size H×W. In our method, however, a fixed sparse set of learned object proposals, total length of N, are provided to object recognition head to perform classification and location. By eliminating HWk (up to hundreds of thousands) hand-designed object candidates to N (e.g. 100) learnable proposals, Sparse R-CNN completely avoids all efforts related to object candidates design and many-to-one label assignment. More importantly, final predictions are directly output without non-maximum suppression post-procedure. Sparse R-CNN demonstrates accuracy, run-time and training convergence performance on par with the well-established detector baselines on the challenging COCO dataset, e.g., achieving 45.0 AP in standard 3× training schedule and running at 22 fps using ResNet-50 FPN model. We hope our work could inspire re-thinking the convention of dense prior in object detectors. The code is available at: [this https URL](https://github.com/PeizeSun/SparseR-CNN).
235
+ </details>
236
+
237
+ #### Results
238
+
239
+ | Backbone | Proposals | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
240
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
241
+ | R-50-FPN | 100 | 1x | | 26.71 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/sparse_rcnn_r50_fpn_1x_det_bdd100k.json) | 25.55 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/sparse_rcnn_r50_fpn_1x_det_bdd100k.json) | [config](./configs/det/sparse_rcnn_r50_fpn_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/sparse_rcnn_r50_fpn_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/sparse_rcnn_r50_fpn_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/sparse_rcnn_r50_fpn_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/sparse_rcnn_r50_fpn_1x_det_bdd100k.zip) |
242
+ | R-50-FPN | 100 | 3x | ✓ | 31.31 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/sparse_rcnn_r50_fpn_3x_det_bdd100k.json) | 31.19 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/sparse_rcnn_r50_fpn_3x_det_bdd100k.json) | [config](./configs/det/sparse_rcnn_r50_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/sparse_rcnn_r50_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/sparse_rcnn_r50_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/sparse_rcnn_r50_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/sparse_rcnn_r50_fpn_3x_det_bdd100k.zip) |
243
+ | R-101-FPN | 100 | 3x | ✓ | 32.18 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/sparse_rcnn_r101_fpn_3x_det_bdd100k.json) | 31.45 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/sparse_rcnn_r101_fpn_3x_det_bdd100k.json) | [config](./configs/det/sparse_rcnn_r101_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/sparse_rcnn_r101_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/sparse_rcnn_r101_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/sparse_rcnn_r101_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/sparse_rcnn_r101_fpn_3x_det_bdd100k.zip) |
244
+
245
+ [[Code](https://github.com/PeizeSun/SparseR-CNN)] [[Usage Instructions](#usage)]
246
+
247
+ ---
248
+
249
+ ### DyHead
250
+
251
+ [Dynamic Head: Unifying Object Detection Heads with Attentions](https://arxiv.org/abs/2106.08322) [CVPR 2021]
252
+
253
+ Authors: [Xiyang Dai](https://sites.google.com/site/xiyangdai/), [Yinpeng Chen](https://scholar.google.com/citations?hl=en&user=V_VpLksAAAAJ), [Bin Xiao](https://www.microsoft.com/en-us/research/people/bixi/), [Dongdong Chen](http://www.dongdongchen.bid/), [Mengchen Liu](https://scholar.google.com/citations?user=cOPQtYgAAAAJ), [Lu Yuan](https://scholar.google.com/citations?user=k9TsUVsAAAAJ), [Lei Zhang](https://www.leizhang.org/)
254
+
255
+ <details>
256
+ <summary>Abstract</summary>
257
+ The complex nature of combining localization and classification in object detection has resulted in the flourished development of methods. Previous works tried to improve the performance in various object detection heads but failed to present a unified view. In this paper, we present a novel dynamic head framework to unify object detection heads with attentions. By coherently combining multiple self-attention mechanisms between feature levels for scale-awareness, among spatial locations for spatial-awareness, and within output channels for task-awareness, the proposed approach significantly improves the representation ability of object detection heads without any computational overhead. Further experiments demonstrate that the effectiveness and efficiency of the proposed dynamic head on the COCO benchmark. With a standard ResNeXt-101-DCN backbone, we largely improve the performance over popular object detectors and achieve a new state-of-the-art at 54.0 AP. Furthermore, with latest transformer backbone and extra data, we can push current best COCO result to a new record at 60.6 AP. The code will be released at [this https URL](https://github.com/microsoft/DynamicHead).
258
+ </details>
259
+
260
+ #### Results
261
+
262
+ | Backbone | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
263
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
264
+ | R-50-FPN | 1x | | 32.84 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/atss_r50_fpn_dyhead_1x_det_bdd100k.json) | 31.61 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/atss_r50_fpn_dyhead_1x_det_bdd100k.json) | [config](./configs/det/atss_r50_fpn_dyhead_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/atss_r50_fpn_dyhead_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/atss_r50_fpn_dyhead_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/atss_r50_fpn_dyhead_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/atss_r50_fpn_dyhead_1x_det_bdd100k.zip) |
265
+ | R-50-FPN | 3x | ✓ | 34.56 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/atss_r50_fpn_dyhead_3x_det_bdd100k.json) | 33.77 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/atss_r50_fpn_dyhead_3x_det_bdd100k.json) | [config](./configs/det/atss_r50_fpn_dyhead_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/atss_r50_fpn_dyhead_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/atss_r50_fpn_dyhead_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/atss_r50_fpn_dyhead_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/atss_r50_fpn_dyhead_3x_det_bdd100k.zip) |
266
+ | R-101-FPN | 3x | ✓ | 34.61 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/atss_r101_fpn_dyhead_3x_det_bdd100k.json) | 33.70 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/atss_r101_fpn_dyhead_3x_det_bdd100k.json) | [config](./configs/det/atss_r101_fpn_dyhead_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/atss_r101_fpn_dyhead_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/atss_r101_fpn_dyhead_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/atss_r101_fpn_dyhead_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/atss_r101_fpn_dyhead_3x_det_bdd100k.zip) |
267
+
268
+ [[Code](https://github.com/microsoft/DynamicHead)] [[Usage Instructions](#usage)]
269
+
270
+ ---
271
+
272
+ ### Swin Transformer
273
+
274
+ [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) [ICCV 2021]
275
+
276
+ Authors: [Ze Liu](https://zeliu98.github.io/), [Yutong Lin](https://scholar.google.com/citations?user=mjUgH44AAAAJ), [Yue Cao](http://yue-cao.me), [Han Hu](https://sites.google.com/site/hanhushomepage/), [Yixuan Wei](https://weiyx16.github.io/), [Zheng Zhang](https://stupidzz.github.io/), [Stephen Lin](https://scholar.google.com/citations?user=c3PYmxUAAAAJ&hl=en), [Baining Guo](https://scholar.google.com/citations?user=h4kYmRYAAAAJ)
277
+
278
+ <details>
279
+ <summary>Abstract</summary>
280
+ This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with Shifted windows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. The code and models are publicly available at [this https URL](https://github.com/microsoft/Swin-Transformer).
281
+ </details>
282
+
283
+ #### Results
284
+
285
+ | Backbone | Network | FP16 | Lr schd | MS-train | Strong | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
286
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
287
+ | Swin-T | Faster R-CNN | | 1x | | | 32.07 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_swin-t_fpn_1x_det_bdd100k.json) | 30.96 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_swin-t_fpn_1x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_swin-t_fpn_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-t_fpn_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-t_fpn_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_swin-t_fpn_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_swin-t_fpn_1x_det_bdd100k.zip) |
288
+ | Swin-T | Faster R-CNN | | 3x | ✓ | | 34.12 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_swin-t_fpn_3x_det_bdd100k.json) | 33.47 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_swin-t_fpn_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_swin-t_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-t_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-t_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_swin-t_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_swin-t_fpn_3x_det_bdd100k.zip) |
289
+ | Swin-T | Faster R-CNN | | 3x | ✓ | ✓ | 34.43 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_swin-t_fpn_3x_strong_det_bdd100k.json) | 33.30 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_swin-t_fpn_3x_strong_det_bdd100k.json) | [config](./configs/det/faster_rcnn_swin-t_fpn_3x_strong_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-t_fpn_3x_strong_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-t_fpn_3x_strong_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_swin-t_fpn_3x_strong_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_swin-t_fpn_3x_strong_det_bdd100k.zip) |
290
+ | Swin-S | Faster R-CNN | | 3x | ✓ | | 34.21 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_swin-s_fpn_3x_det_bdd100k.json) | 33.15 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_swin-s_fpn_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_swin-s_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-s_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-s_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_swin-s_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_swin-s_fpn_3x_det_bdd100k.zip) |
291
+ | Swin-S | Faster R-CNN | ✓ | 3x | ✓ | | 34.21 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.json) | 33.26 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.zip) |
292
+ | Swin-S | Faster R-CNN | ✓ | 3x | ✓ | ✓ | 35.06 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_swin-s_fpn_fp16_3x_strong_det_bdd100k.json) | 33.69 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_swin-s_fpn_fp16_3x_strong_det_bdd100k.json) | [config](./configs/det/faster_rcnn_swin-s_fpn_fp16_3x_strong_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-s_fpn_fp16_3x_strong_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-s_fpn_fp16_3x_strong_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_swin-s_fpn_fp16_3x_strong_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_swin-s_fpn_fp16_3x_strong_det_bdd100k.zip) |
293
+ | Swin-B | Faster R-CNN | ✓ | 3x | ✓ | | 34.46 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.json) | 33.35 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.zip) |
294
+ | Swin-B | Faster R-CNN | ✓ | 3x | ✓ | ✓ | 35.36 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_swin-b_fpn_fp16_3x_strong_det_bdd100k.json) | 34.25 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_swin-b_fpn_fp16_3x_strong_det_bdd100k.json) | [config](./configs/det/faster_rcnn_swin-b_fpn_fp16_3x_strong_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-b_fpn_fp16_3x_strong_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-b_fpn_fp16_3x_strong_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_swin-b_fpn_fp16_3x_strong_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_swin-b_fpn_fp16_3x_strong_det_bdd100k.zip) |
295
+ | Swin-T | Cascade R-CNN | | 3x | ✓ | | 35.95 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/cascade_rcnn_swin-t_fpn_3x_det_bdd100k.json) | 35.26 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/cascade_rcnn_swin-t_fpn_3x_det_bdd100k.json) | [config](./configs/det/cascade_rcnn_swin-t_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_swin-t_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_swin-t_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/cascade_rcnn_swin-t_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/cascade_rcnn_swin-t_fpn_3x_det_bdd100k.zip) |
296
+ | Swin-S | Cascade R-CNN | ✓ | 3x | ✓ | | 35.49 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/cascade_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.json) | 34.86 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/cascade_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.json) | [config](./configs/det/cascade_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/cascade_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/cascade_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.zip) |
297
+ | Swin-B | Cascade R-CNN | ✓ | 3x | ✓ | | 35.03 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/cascade_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.json) | 34.13 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/cascade_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.json) | [config](./configs/det/cascade_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/cascade_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/cascade_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.zip) |
298
+
299
+ [[Code](https://github.com/microsoft/Swin-Transformer)] [[Usage Instructions](#usage)]
300
+
301
+ ---
302
+
303
+ ### Pyramid Vision Transformer
304
+
305
+ [Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions](https://arxiv.org/abs/2102.12122) [ICCV 2021]
306
+
307
+ Authors: [Wenhai Wang](https://whai362.github.io/), [Enze Xie](https://xieenze.github.io/), [Xiang Li](http://implus.github.io/), Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, [Ping Luo](http://luoping.me/), [Ling Shao](https://scholar.google.com/citations?user=z84rLjoAAAAJ)
308
+
309
+ <details>
310
+ <summary>Abstract</summary>
311
+ Although using convolutional neural networks (CNNs) as backbones achieves great successes in computer vision, this work investigates a simple backbone network useful for many dense prediction tasks without convolutions. Unlike the recently-proposed Transformer model (e.g., ViT) that is specially designed for image classification, we propose Pyramid Vision Transformer~(PVT), which overcomes the difficulties of porting Transformer to various dense prediction tasks. PVT has several merits compared to prior arts. (1) Different from ViT that typically has low-resolution outputs and high computational and memory cost, PVT can be not only trained on dense partitions of the image to achieve high output resolution, which is important for dense predictions but also using a progressive shrinking pyramid to reduce computations of large feature maps. (2) PVT inherits the advantages from both CNN and Transformer, making it a unified backbone in various vision tasks without convolutions by simply replacing CNN backbones. (3) We validate PVT by conducting extensive experiments, showing that it boosts the performance of many downstream tasks, e.g., object detection, semantic, and instance segmentation. For example, with a comparable number of parameters, RetinaNet+PVT achieves 40.4 AP on the COCO dataset, surpassing RetinNet+ResNet50 (36.3 AP) by 4.1 absolute AP. We hope PVT could serve as an alternative and useful backbone for pixel-level predictions and facilitate future researches. Code is available at [this https URL](https://github.com/whai362/PVT).
312
+ </details>
313
+
314
+ #### Results
315
+
316
+ | Backbone | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
317
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
318
+ | PVT-T | 1x | | 28.45 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/retinanet_pvt-t_fpn_1x_det_bdd100k.json) | 27.50 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/retinanet_pvt-t_fpn_1x_det_bdd100k.json) | [config](./configs/det/retinanet_pvt-t_fpn_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvt-t_fpn_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvt-t_fpn_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/retinanet_pvt-t_fpn_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/retinanet_pvt-t_fpn_1x_det_bdd100k.zip) |
319
+ | PVT-T | 3x | ✓ | 30.45 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/retinanet_pvt-t_fpn_3x_det_bdd100k.json) | 29.85 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/retinanet_pvt-t_fpn_3x_det_bdd100k.json) | [config](./configs/det/retinanet_pvt-t_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvt-t_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvt-t_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/retinanet_pvt-t_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/retinanet_pvt-t_fpn_3x_det_bdd100k.zip) |
320
+ | PVT-S | 3x | ✓ | 31.23 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/retinanet_pvt-s_fpn_3x_det_bdd100k.json) | 30.67 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/retinanet_pvt-s_fpn_3x_det_bdd100k.json) | [config](./configs/det/retinanet_pvt-s_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvt-s_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvt-s_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/retinanet_pvt-s_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/retinanet_pvt-s_fpn_3x_det_bdd100k.zip) |
321
+
322
+ [[Code](https://github.com/whai362/PVT)] [[Usage Instructions](#usage)]
323
+
324
+ ---
325
+
326
+ ### Pyramid Vision Transformer v2
327
+
328
+ [PVTv2: Improved Baselines with Pyramid Vision Transformer](https://arxiv.org/abs/2106.13797) [CVMJ 2022]
329
+
330
+ Authors: [Wenhai Wang](https://whai362.github.io/), [Enze Xie](https://xieenze.github.io/), [Xiang Li](http://implus.github.io/), Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, [Ping Luo](http://luoping.me/), [Ling Shao](https://scholar.google.com/citations?user=z84rLjoAAAAJ)
331
+
332
+ <details>
333
+ <summary>Abstract</summary>
334
+ Transformer recently has presented encouraging progress in computer vision. In this work, we present new baselines by improving the original Pyramid Vision Transformer (PVTv1) by adding three designs, including (1) linear complexity attention layer, (2) overlapping patch embedding, and (3) convolutional feed-forward network. With these modifications, PVTv2 reduces the computational complexity of PVTv1 to linear and achieves significant improvements on fundamental vision tasks such as classification, detection, and segmentation. Notably, the proposed PVTv2 achieves comparable or better performances than recent works such as Swin Transformer. We hope this work will facilitate state-of-the-art Transformer researches in computer vision. Code is available at [this https URL](https://github.com/whai362/PVT).
335
+ </details>
336
+
337
+ #### Results
338
+
339
+ | Backbone | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
340
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
341
+ | PVTv2-B0 | 1x | | 30.35 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/retinanet_pvtv2-b0_fpn_1x_det_bdd100k.json) | 29.50 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/retinanet_pvtv2-b0_fpn_1x_det_bdd100k.json) | [config](./configs/det/retinanet_pvtv2-b0_fpn_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvtv2-b0_fpn_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvtv2-b0_fpn_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/retinanet_pvtv2-b0_fpn_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/retinanet_pvtv2-b0_fpn_1x_det_bdd100k.zip) |
342
+ | PVTv2-B0 | 3x | ✓ | 31.89 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/retinanet_pvtv2-b0_fpn_3x_det_bdd100k.json) | 31.08 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/retinanet_pvtv2-b0_fpn_3x_det_bdd100k.json) | [config](./configs/det/retinanet_pvtv2-b0_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvtv2-b0_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvtv2-b0_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/retinanet_pvtv2-b0_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/retinanet_pvtv2-b0_fpn_3x_det_bdd100k.zip) |
343
+ | PVTv2-B1 | 3x | ✓ | 32.57 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/retinanet_pvtv2-b1_fpn_3x_det_bdd100k.json) | 31.78 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/retinanet_pvtv2-b1_fpn_3x_det_bdd100k.json) | [config](./configs/det/retinanet_pvtv2-b1_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvtv2-b1_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvtv2-b1_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/retinanet_pvtv2-b1_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/retinanet_pvtv2-b1_fpn_3x_det_bdd100k.zip) |
344
+ | PVTv2-B2 | 3x | ✓ | 32.98 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/retinanet_pvtv2-b2_fpn_3x_det_bdd100k.json) | 32.60 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/retinanet_pvtv2-b2_fpn_3x_det_bdd100k.json) | [config](./configs/det/retinanet_pvtv2-b2_fpn_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvtv2-b2_fpn_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_pvtv2-b2_fpn_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/retinanet_pvtv2-b2_fpn_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/retinanet_pvtv2-b2_fpn_3x_det_bdd100k.zip) |
345
+
346
+ [[Code](https://github.com/whai362/PVT)] [[Usage Instructions](#usage)]
347
+
348
+ ---
349
+
350
+ ### ResNet Strikes Back
351
+
352
+ [ResNet strikes back: An improved training procedure in timm](https://arxiv.org/abs/2110.00476) [NeurIPS 2021 Workshop]
353
+
354
+ Authors: [Ross Wightman](https://rwightman.com/), [Hugo Touvron](https://scholar.google.com/citations?user=xImarzoAAAAJ), [Hervé Jégou](https://scholar.google.com/citations?user=1lcY2z4AAAAJ)
355
+
356
+ <details>
357
+ <summary>Abstract</summary>
358
+ The influential Residual Networks designed by He et al. remain the gold-standard architecture in numerous scientific publications. They typically serve as the default architecture in studies, or as baselines when new architectures are proposed. Yet there has been significant progress on best practices for training neural networks since the inception of the ResNet architecture in 2015. Novel optimization & data-augmentation have increased the effectiveness of the training recipes. In this paper, we re-evaluate the performance of the vanilla ResNet-50 when trained with a procedure that integrates such advances. We share competitive training settings and pre-trained models in the timm open-source library, with the hope that they will serve as better baselines for future work. For instance, with our more demanding training setting, a vanilla ResNet-50 reaches 80.4% top-1 accuracy at resolution 224x224 on ImageNet-val without extra data or distillation. We also report the performance achieved with popular models with our training procedure.
359
+ </details>
360
+
361
+ #### Results
362
+
363
+ | Backbone | Network | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
364
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
365
+ | R-50-FPN | Faster R-CNN | 1x | | 31.40 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_r50_fpn_rsb_1x_det_bdd100k.json) | 30.26 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_r50_fpn_rsb_1x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_r50_fpn_rsb_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_rsb_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_rsb_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_r50_fpn_rsb_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_r50_fpn_rsb_1x_det_bdd100k.zip) |
366
+ | R-50-FPN | Faster R-CNN | 3x | ✓ | 32.48 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_r50_fpn_rsb_3x_det_bdd100k.json) | 31.43 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_r50_fpn_rsb_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_r50_fpn_rsb_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_rsb_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_rsb_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_r50_fpn_rsb_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_r50_fpn_rsb_3x_det_bdd100k.zip) |
367
+ | R-50-FPN | RetinaNet | 1x | | 29.52 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/retinanet_r50_fpn_rsb_1x_det_bdd100k.json) | 28.69 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/retinanet_r50_fpn_rsb_1x_det_bdd100k.json) | [config](./configs/det/retinanet_r50_fpn_rsb_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_r50_fpn_rsb_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_r50_fpn_rsb_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/retinanet_r50_fpn_rsb_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/retinanet_r50_fpn_rsb_1x_det_bdd100k.zip) |
368
+ | R-50-FPN | RetinaNet | 3x | ✓ | 31.13 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/retinanet_r50_fpn_rsb_3x_det_bdd100k.json) | 30.51 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/retinanet_r50_fpn_rsb_3x_det_bdd100k.json) | [config](./configs/det/retinanet_r50_fpn_rsb_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_r50_fpn_rsb_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/retinanet_r50_fpn_rsb_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/retinanet_r50_fpn_rsb_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/retinanet_r50_fpn_rsb_3x_det_bdd100k.zip) |
369
+
370
+ [[Code](https://github.com/rwightman/pytorch-image-models)] [[Usage Instructions](#usage)]
371
+
372
+ ---
373
+
374
+ ### ConvNeXt
375
+
376
+ [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) [CVPR 2022]
377
+
378
+ Authors: [Zhuang Liu](https://liuzhuang13.github.io/), [Hanzi Mao](https://hanzimao.me/), [Chao-Yuan Wu](https://chaoyuan.org/), [Christoph Feichtenhofer](https://feichtenhofer.github.io/), [Trevor Darrell](https://people.eecs.berkeley.edu/~trevor/), [Saining Xie](https://www.sainingxie.com/)
379
+
380
+ <details>
381
+ <summary>Abstract</summary>
382
+ The "Roaring 20s" of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually "modernize" a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets.
383
+ </details>
384
+
385
+ #### Results
386
+
387
+ | Backbone | Network | FP16 | Lr schd | MS-train | Box AP-val | Scores-val | Box AP-test | Scores-test | Config | Weights | Preds | Visuals |
388
+ | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
389
+ | ConvNeXt-T | Faster R-CNN | ✓ | 1x | | 33.32 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.json) | 31.96 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.zip) |
390
+ | ConvNeXt-T | Faster R-CNN | ✓ | 3x | ✓ | 34.08 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.json) | 33.59 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.zip) |
391
+ | ConvNeXt-S | Faster R-CNN | ✓ | 3x | ✓ | 34.79 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.json) | 34.49 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.zip) |
392
+ | ConvNeXt-B | Faster R-CNN | ✓ | 3x | ✓ | 33.92 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/faster_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.json) | 33.30 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/faster_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.json) | [config](./configs/det/faster_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/faster_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/faster_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.zip) |
393
+ | ConvNeXt-T | Cascade R-CNN | ✓ | 1x | | 35.51 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/cascade_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.json) | 33.99 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/cascade_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.json) | [config](./configs/det/cascade_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/cascade_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/cascade_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.zip) |
394
+ | ConvNeXt-T | Cascade R-CNN | ✓ | 3x | ✓ | 35.84 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/cascade_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.json) | 35.35 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/cascade_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.json) | [config](./configs/det/cascade_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/cascade_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/cascade_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.zip) |
395
+ | ConvNeXt-S | Cascade R-CNN | ✓ | 3x | ✓ | 36.11 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/cascade_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.json) | 35.43 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/cascade_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.json) | [config](./configs/det/cascade_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/cascade_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/cascade_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.zip) |
396
+ | ConvNeXt-B | Cascade R-CNN | ✓ | 3x | ✓ | 35.77 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-val/cascade_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.json) | 34.95 | [scores](https://dl.cv.ethz.ch/bdd100k/det/scores-test/cascade_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.json) | [config](./configs/det/cascade_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.py) | [model](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.pth) \| [MD5](https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.md5) | [preds](https://dl.cv.ethz.ch/bdd100k/det/preds/cascade_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.json) | [visuals](https://dl.cv.ethz.ch/bdd100k/det/visuals/cascade_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.zip) |
397
+
398
+ [[Code](https://github.com/facebookresearch/ConvNeXt)] [[Usage Instructions](#usage)]
399
+
400
+ ---
401
+
402
+ ## Install
403
+
404
+ a. Create a conda virtual environment and activate it.
405
+
406
+ ```shell
407
+ conda create -n bdd100k-mmdet python=3.8
408
+ conda activate bdd100k-mmdet
409
+ ```
410
+
411
+ b. Install PyTorch and torchvision following the [official instructions](https://pytorch.org/), e.g.,
412
+
413
+ ```shell
414
+ conda install pytorch torchvision -c pytorch
415
+ ```
416
+
417
+ Note: Make sure that your compilation CUDA version and runtime CUDA version match.
418
+ You can check the supported CUDA version for precompiled packages on the [PyTorch website](https://pytorch.org/).
419
+
420
+ c. Install mmcv and mmdetection.
421
+
422
+ ```shell
423
+ pip install mmcv-full
424
+ pip install mmdet
425
+ ```
426
+
427
+ You can also refer to the [official instructions](https://github.com/open-mmlab/mmdetection/blob/master/docs/en/get_started.md/#Installation).
428
+
429
+ Note that mmdetection uses their forked version of pycocotools via the github repo instead of pypi for better compatibility. If you meet issues, you may need to re-install the cocoapi through
430
+
431
+ ```shell
432
+ pip uninstall pycocotools
433
+ pip install git+https://github.com/open-mmlab/cocoapi.git#subdirectory=pycocotools
434
+ ```
435
+
436
+ ## Usage
437
+
438
+ ### Model Inference
439
+
440
+ Single GPU inference:
441
+
442
+ ```shell
443
+ python ./test.py ${CONFIG_FILE} --format-only --format-dir ${OUTPUT_DIR} [--cfg-options]
444
+ ```
445
+
446
+ Multiple GPU inference:
447
+
448
+ ```shell
449
+ CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch \
450
+ --nproc_per_node=4 --master_port=12000 ./test.py $CFG_FILE \
451
+ --format-only --format-dir ${OUTPUT_DIR} [--cfg-options] \
452
+ --launcher pytorch
453
+ ```
454
+
455
+ ### Output Evaluation
456
+
457
+ #### Validation Set
458
+
459
+ To evaluate the detection performance on the BDD100K validation set, you can follow the official evaluation [scripts](https://doc.bdd100k.com/evaluate.html) provided by BDD100K:
460
+
461
+ ```bash
462
+ python -m bdd100k.eval.run -t det \
463
+ -g ../data/bdd100k/labels/det_20/det_${SET_NAME}.json \
464
+ -r ${OUTPUT_DIR}/det.json \
465
+ [--out-file ${RESULTS_FILE}] [--nproc ${NUM_PROCESS}]
466
+ ```
467
+
468
+ #### Test Set
469
+
470
+ You can obtain the performance on the BDD100K test set by submitting your model predictions to our [evaluation server](https://eval.ai/web/challenges/challenge-page/1260) hosted on EvalAI.
471
+
472
+ ### Output Visualization
473
+
474
+ For visualization, you can use the visualization tool provided by [Scalabel](https://doc.scalabel.ai/visual.html).
475
+
476
+ Below is an example:
477
+
478
+ ```python
479
+ import os
480
+ import numpy as np
481
+ from PIL import Image
482
+ from scalabel.label.io import load
483
+ from scalabel.vis.label import LabelViewer
484
+
485
+ # load prediction frames
486
+ frames = load('$OUTPUT_DIR/det.json').frames
487
+
488
+ viewer = LabelViewer()
489
+ for frame in frames:
490
+ img = np.array(Image.open(os.path.join('$IMG_DIR', frame.name)))
491
+ viewer.draw(img, frame)
492
+ viewer.save(os.path.join('$VIS_DIR', frame.name))
493
+ ```
494
+
495
+ ## Contribution
496
+
497
+ **You can include your models in this repo as well!** Please follow the [contribution](../doc/CONTRIBUTING.md) instructions.
__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Object Detection."""
configs/_base_/datasets/bdd100k.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Dataset settings."""
2
+
3
+ dataset_type = "BDD100KDetDataset" # pylint: disable=invalid-name
4
+ data_root = "../data/bdd100k/" # pylint: disable=invalid-name
5
+ img_norm_cfg = dict(
6
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
7
+ )
8
+ train_pipeline = [
9
+ dict(type="LoadImageFromFile"),
10
+ dict(type="LoadAnnotations", with_bbox=True),
11
+ dict(type="Resize", img_scale=(1280, 720), keep_ratio=True),
12
+ dict(type="RandomFlip", flip_ratio=0.5),
13
+ dict(type="Normalize", **img_norm_cfg),
14
+ dict(type="Pad", size_divisor=32),
15
+ dict(type="DefaultFormatBundle"),
16
+ dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels"]),
17
+ ]
18
+ test_pipeline = [
19
+ dict(type="LoadImageFromFile"),
20
+ dict(
21
+ type="MultiScaleFlipAug",
22
+ img_scale=(1280, 720),
23
+ flip=False,
24
+ transforms=[
25
+ dict(type="Resize", keep_ratio=True),
26
+ dict(type="RandomFlip"),
27
+ dict(type="Normalize", **img_norm_cfg),
28
+ dict(type="Pad", size_divisor=32),
29
+ dict(type="ImageToTensor", keys=["img"]),
30
+ dict(type="Collect", keys=["img"]),
31
+ ],
32
+ ),
33
+ ]
34
+ data = dict(
35
+ samples_per_gpu=4,
36
+ workers_per_gpu=4,
37
+ train=dict(
38
+ type=dataset_type,
39
+ ann_file=data_root + "jsons/det_train_cocofmt.json",
40
+ img_prefix=data_root + "images/100k/train",
41
+ pipeline=train_pipeline,
42
+ ),
43
+ val=dict(
44
+ type=dataset_type,
45
+ ann_file=data_root + "jsons/det_val_cocofmt.json",
46
+ img_prefix=data_root + "images/100k/val",
47
+ pipeline=test_pipeline,
48
+ ),
49
+ test=dict(
50
+ type=dataset_type,
51
+ ann_file=data_root + "jsons/det_val_cocofmt.json",
52
+ img_prefix=data_root + "images/100k/val",
53
+ pipeline=test_pipeline,
54
+ ),
55
+ )
56
+ evaluation = dict(interval=1, metric="bbox")
configs/_base_/datasets/bdd100k_mstrain.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Dataset settings."""
2
+
3
+ dataset_type = "BDD100KDetDataset" # pylint: disable=invalid-name
4
+ data_root = "../data/bdd100k/" # pylint: disable=invalid-name
5
+ img_norm_cfg = dict(
6
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
7
+ )
8
+ train_pipeline = [
9
+ dict(type="LoadImageFromFile"),
10
+ dict(type="LoadAnnotations", with_bbox=True),
11
+ dict(
12
+ type="Resize",
13
+ img_scale=[
14
+ (1280, 600),
15
+ (1280, 624),
16
+ (1280, 648),
17
+ (1280, 672),
18
+ (1280, 696),
19
+ (1280, 720),
20
+ ],
21
+ multiscale_mode="value",
22
+ keep_ratio=True,
23
+ ),
24
+ dict(type="RandomFlip", flip_ratio=0.5),
25
+ dict(type="Normalize", **img_norm_cfg),
26
+ dict(type="Pad", size_divisor=32),
27
+ dict(type="DefaultFormatBundle"),
28
+ dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels"]),
29
+ ]
30
+ test_pipeline = [
31
+ dict(type="LoadImageFromFile"),
32
+ dict(
33
+ type="MultiScaleFlipAug",
34
+ img_scale=(1280, 720),
35
+ flip=False,
36
+ transforms=[
37
+ dict(type="Resize", keep_ratio=True),
38
+ dict(type="RandomFlip"),
39
+ dict(type="Normalize", **img_norm_cfg),
40
+ dict(type="Pad", size_divisor=32),
41
+ dict(type="ImageToTensor", keys=["img"]),
42
+ dict(type="Collect", keys=["img"]),
43
+ ],
44
+ ),
45
+ ]
46
+ data = dict(
47
+ samples_per_gpu=4,
48
+ workers_per_gpu=4,
49
+ train=dict(
50
+ type=dataset_type,
51
+ ann_file=data_root + "jsons/det_train_cocofmt.json",
52
+ img_prefix=data_root + "images/100k/train",
53
+ pipeline=train_pipeline,
54
+ ),
55
+ val=dict(
56
+ type=dataset_type,
57
+ ann_file=data_root + "jsons/det_val_cocofmt.json",
58
+ img_prefix=data_root + "images/100k/val",
59
+ pipeline=test_pipeline,
60
+ ),
61
+ test=dict(
62
+ type=dataset_type,
63
+ ann_file=data_root + "jsons/det_val_cocofmt.json",
64
+ img_prefix=data_root + "images/100k/val",
65
+ pipeline=test_pipeline,
66
+ ),
67
+ )
68
+ evaluation = dict(interval=1, metric="bbox")
configs/_base_/datasets/bdd100k_strong.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Dataset settings."""
2
+
3
+ dataset_type = "BDD100KDetDataset" # pylint: disable=invalid-name
4
+ data_root = "../data/bdd100k/" # pylint: disable=invalid-name # pylint: disable=invalid-name
5
+ img_norm_cfg = dict(
6
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
7
+ )
8
+ crop_size = (720, 1280)
9
+ train_pipeline = [
10
+ dict(type="Mosaic", img_scale=crop_size),
11
+ dict(type="MixUp", img_scale=crop_size),
12
+ dict(type="RandomFlip", flip_ratio=0.5),
13
+ dict(type="Resize", img_scale=(1280, 720), ratio_range=(0.5, 1.5)),
14
+ dict(type="RandomCrop", crop_size=crop_size, allow_negative_crop=True),
15
+ dict(type="PhotoMetricDistortion"),
16
+ dict(type="Normalize", **img_norm_cfg),
17
+ dict(type="Pad", size_divisor=32),
18
+ dict(type="DefaultFormatBundle"),
19
+ dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels"]),
20
+ ]
21
+
22
+ train_dataset = dict(
23
+ type='MultiImageMixDataset',
24
+ dataset=dict(
25
+ type=dataset_type,
26
+ ann_file=data_root + "jsons/det_train_cocofmt.json",
27
+ img_prefix=data_root + "images/100k/train",
28
+ pipeline=[
29
+ dict(type="LoadImageFromFile"),
30
+ dict(type='LoadAnnotations', with_bbox=True)
31
+ ],
32
+ filter_empty_gt=True,
33
+ ),
34
+ pipeline=train_pipeline,
35
+ )
36
+
37
+ test_pipeline = [
38
+ dict(type="LoadImageFromFile"),
39
+ dict(
40
+ type="MultiScaleFlipAug",
41
+ img_scale=(1280, 720),
42
+ flip=False,
43
+ transforms=[
44
+ dict(type="Resize", keep_ratio=True),
45
+ dict(type="RandomFlip"),
46
+ dict(type="Normalize", **img_norm_cfg),
47
+ dict(type="Pad", size_divisor=32),
48
+ dict(type="ImageToTensor", keys=["img"]),
49
+ dict(type="Collect", keys=["img"]),
50
+ ],
51
+ ),
52
+ ]
53
+
54
+ data = dict(
55
+ samples_per_gpu=4,
56
+ workers_per_gpu=4,
57
+ train=train_dataset,
58
+ val=dict(
59
+ type=dataset_type,
60
+ ann_file=data_root + "jsons/det_val_cocofmt.json",
61
+ img_prefix=data_root + "images/100k/val",
62
+ pipeline=test_pipeline,
63
+ ),
64
+ test=dict(
65
+ type=dataset_type,
66
+ ann_file=data_root + "jsons/det_val_cocofmt.json",
67
+ img_prefix=data_root + "images/100k/val",
68
+ pipeline=test_pipeline,
69
+ ),
70
+ )
71
+ evaluation = dict(interval=1, metric="bbox")
configs/_base_/default_runtime.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # runtime
2
+ checkpoint_config = dict(interval=1)
3
+ # yapf:disable
4
+ log_config = dict(
5
+ interval=50,
6
+ hooks=[
7
+ dict(type="TextLoggerHook"),
8
+ # dict(type='TensorboardLoggerHook')
9
+ ],
10
+ )
11
+ # yapf:enable
12
+ custom_hooks = [dict(type="NumClassCheckHook")]
13
+
14
+ dist_params = dict(backend="nccl")
15
+ log_level = "INFO"
16
+ load_from = None
17
+ resume_from = None
18
+ workflow = [("train", 1)]
configs/_base_/models/atss_r50_fpn.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model settings
2
+ model = dict(
3
+ type="ATSS",
4
+ backbone=dict(
5
+ type="ResNet",
6
+ depth=50,
7
+ num_stages=4,
8
+ out_indices=(0, 1, 2, 3),
9
+ frozen_stages=1,
10
+ norm_cfg=dict(type="BN", requires_grad=True),
11
+ norm_eval=True,
12
+ style="pytorch",
13
+ init_cfg=dict(type="Pretrained", checkpoint="torchvision://resnet50"),
14
+ ),
15
+ neck=dict(
16
+ type="FPN",
17
+ in_channels=[256, 512, 1024, 2048],
18
+ out_channels=256,
19
+ start_level=1,
20
+ add_extra_convs="on_output",
21
+ num_outs=5,
22
+ ),
23
+ bbox_head=dict(
24
+ type="ATSSHead",
25
+ num_classes=10,
26
+ in_channels=256,
27
+ stacked_convs=4,
28
+ feat_channels=256,
29
+ anchor_generator=dict(
30
+ type="AnchorGenerator",
31
+ ratios=[1.0],
32
+ octave_base_scale=8,
33
+ scales_per_octave=1,
34
+ strides=[8, 16, 32, 64, 128],
35
+ ),
36
+ bbox_coder=dict(
37
+ type="DeltaXYWHBBoxCoder",
38
+ target_means=[0.0, 0.0, 0.0, 0.0],
39
+ target_stds=[0.1, 0.1, 0.2, 0.2],
40
+ ),
41
+ loss_cls=dict(
42
+ type="FocalLoss",
43
+ use_sigmoid=True,
44
+ gamma=2.0,
45
+ alpha=0.25,
46
+ loss_weight=1.0,
47
+ ),
48
+ loss_bbox=dict(type="GIoULoss", loss_weight=2.0),
49
+ loss_centerness=dict(
50
+ type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0
51
+ ),
52
+ ),
53
+ # training and testing settings
54
+ train_cfg=dict(
55
+ assigner=dict(type="ATSSAssigner", topk=9),
56
+ allowed_border=-1,
57
+ pos_weight=-1,
58
+ debug=False,
59
+ ),
60
+ test_cfg=dict(
61
+ nms_pre=1000,
62
+ min_bbox_size=0,
63
+ score_thr=0.05,
64
+ nms=dict(type="nms", iou_threshold=0.6),
65
+ max_per_img=100,
66
+ ),
67
+ )
configs/_base_/models/atss_r50_fpn_dyhead.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model settings
2
+ model = dict(
3
+ type="ATSS",
4
+ backbone=dict(
5
+ type="ResNet",
6
+ depth=50,
7
+ num_stages=4,
8
+ out_indices=(0, 1, 2, 3),
9
+ frozen_stages=1,
10
+ norm_cfg=dict(type="BN", requires_grad=True),
11
+ norm_eval=True,
12
+ style="pytorch",
13
+ init_cfg=dict(type="Pretrained", checkpoint="torchvision://resnet50"),
14
+ ),
15
+ neck=[
16
+ dict(
17
+ type="FPN",
18
+ in_channels=[256, 512, 1024, 2048],
19
+ out_channels=256,
20
+ start_level=1,
21
+ add_extra_convs="on_output",
22
+ num_outs=5,
23
+ ),
24
+ dict(type="DyHead", in_channels=256, out_channels=256, num_blocks=6),
25
+ ],
26
+ bbox_head=dict(
27
+ type="ATSSHead",
28
+ num_classes=10,
29
+ in_channels=256,
30
+ stacked_convs=0,
31
+ feat_channels=256,
32
+ anchor_generator=dict(
33
+ type="AnchorGenerator",
34
+ ratios=[1.0],
35
+ octave_base_scale=8,
36
+ scales_per_octave=1,
37
+ strides=[8, 16, 32, 64, 128],
38
+ ),
39
+ bbox_coder=dict(
40
+ type="DeltaXYWHBBoxCoder",
41
+ target_means=[0.0, 0.0, 0.0, 0.0],
42
+ target_stds=[0.1, 0.1, 0.2, 0.2],
43
+ ),
44
+ loss_cls=dict(
45
+ type="FocalLoss",
46
+ use_sigmoid=True,
47
+ gamma=2.0,
48
+ alpha=0.25,
49
+ loss_weight=1.0,
50
+ ),
51
+ loss_bbox=dict(type="GIoULoss", loss_weight=2.0),
52
+ loss_centerness=dict(
53
+ type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0
54
+ ),
55
+ ),
56
+ # training and testing settings
57
+ train_cfg=dict(
58
+ assigner=dict(type="ATSSAssigner", topk=9),
59
+ allowed_border=-1,
60
+ pos_weight=-1,
61
+ debug=False,
62
+ ),
63
+ test_cfg=dict(
64
+ nms_pre=1000,
65
+ min_bbox_size=0,
66
+ score_thr=0.05,
67
+ nms=dict(type="nms", iou_threshold=0.6),
68
+ max_per_img=100,
69
+ ),
70
+ )
configs/_base_/models/cascade_rcnn_r50_fpn.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model settings
2
+ model = dict(
3
+ type="CascadeRCNN",
4
+ backbone=dict(
5
+ type="ResNet",
6
+ depth=50,
7
+ num_stages=4,
8
+ out_indices=(0, 1, 2, 3),
9
+ frozen_stages=1,
10
+ norm_cfg=dict(type="BN", requires_grad=True),
11
+ norm_eval=True,
12
+ style="pytorch",
13
+ init_cfg=dict(type="Pretrained", checkpoint="torchvision://resnet50"),
14
+ ),
15
+ neck=dict(
16
+ type="FPN",
17
+ in_channels=[256, 512, 1024, 2048],
18
+ out_channels=256,
19
+ num_outs=5,
20
+ ),
21
+ rpn_head=dict(
22
+ type="RPNHead",
23
+ in_channels=256,
24
+ feat_channels=256,
25
+ anchor_generator=dict(
26
+ type="AnchorGenerator",
27
+ scales=[8],
28
+ ratios=[0.5, 1.0, 2.0],
29
+ strides=[4, 8, 16, 32, 64],
30
+ ),
31
+ bbox_coder=dict(
32
+ type="DeltaXYWHBBoxCoder",
33
+ target_means=[0.0, 0.0, 0.0, 0.0],
34
+ target_stds=[1.0, 1.0, 1.0, 1.0],
35
+ ),
36
+ loss_cls=dict(
37
+ type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0
38
+ ),
39
+ loss_bbox=dict(type="SmoothL1Loss", beta=1.0 / 9.0, loss_weight=1.0),
40
+ ),
41
+ roi_head=dict(
42
+ type="CascadeRoIHead",
43
+ num_stages=3,
44
+ stage_loss_weights=[1, 0.5, 0.25],
45
+ bbox_roi_extractor=dict(
46
+ type="SingleRoIExtractor",
47
+ roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0),
48
+ out_channels=256,
49
+ featmap_strides=[4, 8, 16, 32],
50
+ ),
51
+ bbox_head=[
52
+ dict(
53
+ type="Shared2FCBBoxHead",
54
+ in_channels=256,
55
+ fc_out_channels=1024,
56
+ roi_feat_size=7,
57
+ num_classes=10,
58
+ bbox_coder=dict(
59
+ type="DeltaXYWHBBoxCoder",
60
+ target_means=[0.0, 0.0, 0.0, 0.0],
61
+ target_stds=[0.1, 0.1, 0.2, 0.2],
62
+ ),
63
+ reg_class_agnostic=True,
64
+ loss_cls=dict(
65
+ type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
66
+ ),
67
+ loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0),
68
+ ),
69
+ dict(
70
+ type="Shared2FCBBoxHead",
71
+ in_channels=256,
72
+ fc_out_channels=1024,
73
+ roi_feat_size=7,
74
+ num_classes=10,
75
+ bbox_coder=dict(
76
+ type="DeltaXYWHBBoxCoder",
77
+ target_means=[0.0, 0.0, 0.0, 0.0],
78
+ target_stds=[0.05, 0.05, 0.1, 0.1],
79
+ ),
80
+ reg_class_agnostic=True,
81
+ loss_cls=dict(
82
+ type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
83
+ ),
84
+ loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0),
85
+ ),
86
+ dict(
87
+ type="Shared2FCBBoxHead",
88
+ in_channels=256,
89
+ fc_out_channels=1024,
90
+ roi_feat_size=7,
91
+ num_classes=10,
92
+ bbox_coder=dict(
93
+ type="DeltaXYWHBBoxCoder",
94
+ target_means=[0.0, 0.0, 0.0, 0.0],
95
+ target_stds=[0.033, 0.033, 0.067, 0.067],
96
+ ),
97
+ reg_class_agnostic=True,
98
+ loss_cls=dict(
99
+ type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
100
+ ),
101
+ loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0),
102
+ ),
103
+ ],
104
+ ),
105
+ # model training and testing settings
106
+ train_cfg=dict(
107
+ rpn=dict(
108
+ assigner=dict(
109
+ type="MaxIoUAssigner",
110
+ pos_iou_thr=0.7,
111
+ neg_iou_thr=0.3,
112
+ min_pos_iou=0.3,
113
+ match_low_quality=True,
114
+ ignore_iof_thr=-1,
115
+ ),
116
+ sampler=dict(
117
+ type="RandomSampler",
118
+ num=256,
119
+ pos_fraction=0.5,
120
+ neg_pos_ub=-1,
121
+ add_gt_as_proposals=False,
122
+ ),
123
+ allowed_border=0,
124
+ pos_weight=-1,
125
+ debug=False,
126
+ ),
127
+ rpn_proposal=dict(
128
+ nms_pre=2000,
129
+ max_per_img=2000,
130
+ nms=dict(type="nms", iou_threshold=0.7),
131
+ min_bbox_size=0,
132
+ ),
133
+ rcnn=[
134
+ dict(
135
+ assigner=dict(
136
+ type="MaxIoUAssigner",
137
+ pos_iou_thr=0.5,
138
+ neg_iou_thr=0.5,
139
+ min_pos_iou=0.5,
140
+ match_low_quality=False,
141
+ ignore_iof_thr=-1,
142
+ ),
143
+ sampler=dict(
144
+ type="RandomSampler",
145
+ num=512,
146
+ pos_fraction=0.25,
147
+ neg_pos_ub=-1,
148
+ add_gt_as_proposals=True,
149
+ ),
150
+ pos_weight=-1,
151
+ debug=False,
152
+ ),
153
+ dict(
154
+ assigner=dict(
155
+ type="MaxIoUAssigner",
156
+ pos_iou_thr=0.6,
157
+ neg_iou_thr=0.6,
158
+ min_pos_iou=0.6,
159
+ match_low_quality=False,
160
+ ignore_iof_thr=-1,
161
+ ),
162
+ sampler=dict(
163
+ type="RandomSampler",
164
+ num=512,
165
+ pos_fraction=0.25,
166
+ neg_pos_ub=-1,
167
+ add_gt_as_proposals=True,
168
+ ),
169
+ pos_weight=-1,
170
+ debug=False,
171
+ ),
172
+ dict(
173
+ assigner=dict(
174
+ type="MaxIoUAssigner",
175
+ pos_iou_thr=0.7,
176
+ neg_iou_thr=0.7,
177
+ min_pos_iou=0.7,
178
+ match_low_quality=False,
179
+ ignore_iof_thr=-1,
180
+ ),
181
+ sampler=dict(
182
+ type="RandomSampler",
183
+ num=512,
184
+ pos_fraction=0.25,
185
+ neg_pos_ub=-1,
186
+ add_gt_as_proposals=True,
187
+ ),
188
+ pos_weight=-1,
189
+ debug=False,
190
+ ),
191
+ ],
192
+ ),
193
+ test_cfg=dict(
194
+ rpn=dict(
195
+ nms_pre=1000,
196
+ max_per_img=1000,
197
+ nms=dict(type="nms", iou_threshold=0.7),
198
+ min_bbox_size=0,
199
+ ),
200
+ rcnn=dict(
201
+ score_thr=0.05,
202
+ nms=dict(type="nms", iou_threshold=0.5),
203
+ max_per_img=100,
204
+ ),
205
+ ),
206
+ )
configs/_base_/models/faster_rcnn_r50_fpn.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model settings
2
+ model = dict(
3
+ type="FasterRCNN",
4
+ backbone=dict(
5
+ type="ResNet",
6
+ depth=50,
7
+ num_stages=4,
8
+ out_indices=(0, 1, 2, 3),
9
+ frozen_stages=1,
10
+ norm_cfg=dict(type="BN", requires_grad=True),
11
+ norm_eval=True,
12
+ style="pytorch",
13
+ init_cfg=dict(type="Pretrained", checkpoint="torchvision://resnet50"),
14
+ ),
15
+ neck=dict(
16
+ type="FPN",
17
+ in_channels=[256, 512, 1024, 2048],
18
+ out_channels=256,
19
+ num_outs=5,
20
+ ),
21
+ rpn_head=dict(
22
+ type="RPNHead",
23
+ in_channels=256,
24
+ feat_channels=256,
25
+ anchor_generator=dict(
26
+ type="AnchorGenerator",
27
+ scales=[8],
28
+ ratios=[0.5, 1.0, 2.0],
29
+ strides=[4, 8, 16, 32, 64],
30
+ ),
31
+ bbox_coder=dict(
32
+ type="DeltaXYWHBBoxCoder",
33
+ target_means=[0.0, 0.0, 0.0, 0.0],
34
+ target_stds=[1.0, 1.0, 1.0, 1.0],
35
+ ),
36
+ loss_cls=dict(
37
+ type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0
38
+ ),
39
+ loss_bbox=dict(type="L1Loss", loss_weight=1.0),
40
+ ),
41
+ roi_head=dict(
42
+ type="StandardRoIHead",
43
+ bbox_roi_extractor=dict(
44
+ type="SingleRoIExtractor",
45
+ roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0),
46
+ out_channels=256,
47
+ featmap_strides=[4, 8, 16, 32],
48
+ ),
49
+ bbox_head=dict(
50
+ type="Shared2FCBBoxHead",
51
+ in_channels=256,
52
+ fc_out_channels=1024,
53
+ roi_feat_size=7,
54
+ num_classes=10,
55
+ bbox_coder=dict(
56
+ type="DeltaXYWHBBoxCoder",
57
+ target_means=[0.0, 0.0, 0.0, 0.0],
58
+ target_stds=[0.1, 0.1, 0.2, 0.2],
59
+ ),
60
+ reg_class_agnostic=False,
61
+ loss_cls=dict(
62
+ type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
63
+ ),
64
+ loss_bbox=dict(type="L1Loss", loss_weight=1.0),
65
+ ),
66
+ ),
67
+ # model training and testing settings
68
+ train_cfg=dict(
69
+ rpn=dict(
70
+ assigner=dict(
71
+ type="MaxIoUAssigner",
72
+ pos_iou_thr=0.7,
73
+ neg_iou_thr=0.3,
74
+ min_pos_iou=0.3,
75
+ match_low_quality=True,
76
+ ignore_iof_thr=-1,
77
+ ),
78
+ sampler=dict(
79
+ type="RandomSampler",
80
+ num=256,
81
+ pos_fraction=0.5,
82
+ neg_pos_ub=-1,
83
+ add_gt_as_proposals=False,
84
+ ),
85
+ allowed_border=-1,
86
+ pos_weight=-1,
87
+ debug=False,
88
+ ),
89
+ rpn_proposal=dict(
90
+ nms_pre=2000,
91
+ max_per_img=1000,
92
+ nms=dict(type="nms", iou_threshold=0.7),
93
+ min_bbox_size=0,
94
+ ),
95
+ rcnn=dict(
96
+ assigner=dict(
97
+ type="MaxIoUAssigner",
98
+ pos_iou_thr=0.5,
99
+ neg_iou_thr=0.5,
100
+ min_pos_iou=0.5,
101
+ match_low_quality=False,
102
+ ignore_iof_thr=-1,
103
+ ),
104
+ sampler=dict(
105
+ type="RandomSampler",
106
+ num=512,
107
+ pos_fraction=0.25,
108
+ neg_pos_ub=-1,
109
+ add_gt_as_proposals=True,
110
+ ),
111
+ pos_weight=-1,
112
+ debug=False,
113
+ ),
114
+ ),
115
+ test_cfg=dict(
116
+ rpn=dict(
117
+ nms_pre=1000,
118
+ max_per_img=1000,
119
+ nms=dict(type="nms", iou_threshold=0.7),
120
+ min_bbox_size=0,
121
+ ),
122
+ rcnn=dict(
123
+ score_thr=0.05,
124
+ nms=dict(type="nms", iou_threshold=0.5),
125
+ max_per_img=100,
126
+ )
127
+ # soft-nms is also supported for rcnn testing
128
+ # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
129
+ ),
130
+ )
configs/_base_/models/fcos_r50_fpn.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model settings
2
+ model = dict(
3
+ type="FCOS",
4
+ backbone=dict(
5
+ type="ResNet",
6
+ depth=50,
7
+ num_stages=4,
8
+ out_indices=(0, 1, 2, 3),
9
+ frozen_stages=1,
10
+ norm_cfg=dict(type="BN", requires_grad=False),
11
+ norm_eval=True,
12
+ style="caffe",
13
+ init_cfg=dict(
14
+ type="Pretrained",
15
+ checkpoint="open-mmlab://detectron/resnet50_caffe",
16
+ ),
17
+ ),
18
+ neck=dict(
19
+ type="FPN",
20
+ in_channels=[256, 512, 1024, 2048],
21
+ out_channels=256,
22
+ start_level=1,
23
+ add_extra_convs="on_output", # use P5
24
+ num_outs=5,
25
+ relu_before_extra_convs=True,
26
+ ),
27
+ bbox_head=dict(
28
+ type="FCOSHead",
29
+ num_classes=10,
30
+ in_channels=256,
31
+ stacked_convs=4,
32
+ feat_channels=256,
33
+ strides=[8, 16, 32, 64, 128],
34
+ loss_cls=dict(
35
+ type="FocalLoss",
36
+ use_sigmoid=True,
37
+ gamma=2.0,
38
+ alpha=0.25,
39
+ loss_weight=1.0,
40
+ ),
41
+ loss_bbox=dict(type="IoULoss", loss_weight=1.0),
42
+ loss_centerness=dict(
43
+ type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0
44
+ ),
45
+ ),
46
+ # training and testing settings
47
+ train_cfg=dict(
48
+ assigner=dict(
49
+ type="MaxIoUAssigner",
50
+ pos_iou_thr=0.5,
51
+ neg_iou_thr=0.4,
52
+ min_pos_iou=0,
53
+ ignore_iof_thr=-1,
54
+ ),
55
+ allowed_border=-1,
56
+ pos_weight=-1,
57
+ debug=False,
58
+ ),
59
+ test_cfg=dict(
60
+ nms_pre=1000,
61
+ min_bbox_size=0,
62
+ score_thr=0.05,
63
+ nms=dict(type="nms", iou_threshold=0.5),
64
+ max_per_img=100,
65
+ ),
66
+ )
configs/_base_/models/retinanet_r50_fpn.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model settings
2
+ model = dict(
3
+ type="RetinaNet",
4
+ backbone=dict(
5
+ type="ResNet",
6
+ depth=50,
7
+ num_stages=4,
8
+ out_indices=(0, 1, 2, 3),
9
+ frozen_stages=1,
10
+ norm_cfg=dict(type="BN", requires_grad=True),
11
+ norm_eval=True,
12
+ style="pytorch",
13
+ init_cfg=dict(type="Pretrained", checkpoint="torchvision://resnet50"),
14
+ ),
15
+ neck=dict(
16
+ type="FPN",
17
+ in_channels=[256, 512, 1024, 2048],
18
+ out_channels=256,
19
+ start_level=1,
20
+ add_extra_convs="on_input",
21
+ num_outs=5,
22
+ ),
23
+ bbox_head=dict(
24
+ type="RetinaHead",
25
+ num_classes=10,
26
+ in_channels=256,
27
+ stacked_convs=4,
28
+ feat_channels=256,
29
+ anchor_generator=dict(
30
+ type="AnchorGenerator",
31
+ octave_base_scale=4,
32
+ scales_per_octave=3,
33
+ ratios=[0.5, 1.0, 2.0],
34
+ strides=[8, 16, 32, 64, 128],
35
+ ),
36
+ bbox_coder=dict(
37
+ type="DeltaXYWHBBoxCoder",
38
+ target_means=[0.0, 0.0, 0.0, 0.0],
39
+ target_stds=[1.0, 1.0, 1.0, 1.0],
40
+ ),
41
+ loss_cls=dict(
42
+ type="FocalLoss",
43
+ use_sigmoid=True,
44
+ gamma=2.0,
45
+ alpha=0.25,
46
+ loss_weight=1.0,
47
+ ),
48
+ loss_bbox=dict(type="L1Loss", loss_weight=1.0),
49
+ ),
50
+ # model training and testing settings
51
+ train_cfg=dict(
52
+ assigner=dict(
53
+ type="MaxIoUAssigner",
54
+ pos_iou_thr=0.5,
55
+ neg_iou_thr=0.4,
56
+ min_pos_iou=0,
57
+ ignore_iof_thr=-1,
58
+ ),
59
+ allowed_border=-1,
60
+ pos_weight=-1,
61
+ debug=False,
62
+ ),
63
+ test_cfg=dict(
64
+ nms_pre=1000,
65
+ min_bbox_size=0,
66
+ score_thr=0.05,
67
+ nms=dict(type="nms", iou_threshold=0.5),
68
+ max_per_img=100,
69
+ ),
70
+ )
configs/_base_/schedules/schedule_1x.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # optimizer
2
+ optimizer = dict(type="SGD", lr=0.04, momentum=0.9, weight_decay=0.0001)
3
+ optimizer_config = dict(grad_clip=None)
4
+ # learning policy
5
+ lr_config = dict(
6
+ policy="step",
7
+ warmup="linear",
8
+ warmup_iters=500,
9
+ warmup_ratio=0.001,
10
+ step=[8, 11],
11
+ )
12
+ runner = dict(type="EpochBasedRunner", max_epochs=12)
configs/_base_/schedules/schedule_3x.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # optimizer
2
+ optimizer = dict(type="SGD", lr=0.04, momentum=0.9, weight_decay=0.0001)
3
+ optimizer_config = dict(grad_clip=None)
4
+ # learning policy
5
+ lr_config = dict(
6
+ policy="step",
7
+ warmup="linear",
8
+ warmup_iters=500,
9
+ warmup_ratio=0.001,
10
+ step=[24, 33],
11
+ )
12
+ runner = dict(type="EpochBasedRunner", max_epochs=36)
configs/_base_/schedules/schedule_5x.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # optimizer
2
+ optimizer = dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=0.0001)
3
+ optimizer_config = dict(grad_clip=None)
4
+ # learning policy
5
+ lr_config = dict(
6
+ policy='step',
7
+ warmup='linear',
8
+ warmup_iters=500,
9
+ warmup_ratio=0.001,
10
+ step=[48, 55],
11
+ )
12
+ runner = dict(type='EpochBasedRunner', max_epochs=60)
configs/det/atss_r101_fpn_3x_det_bdd100k.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """ATSS with ResNet101-FPN, 3x schedule, MS training."""
2
+
3
+ _base_ = "./atss_r50_fpn_3x_det_bdd100k.py"
4
+ model = dict(
5
+ backbone=dict(
6
+ depth=101,
7
+ init_cfg=dict(type="Pretrained", checkpoint="torchvision://resnet101"),
8
+ )
9
+ )
10
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/atss_r101_fpn_3x_det_bdd100k.pth"
configs/det/atss_r101_fpn_dyhead_3x_det_bdd100k.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """ATSS with ResNet101-FPN and DyHead, 3x schedule, MS training."""
2
+
3
+ _base_ = "./atss_r50_fpn_dyhead_3x_det_bdd100k.py"
4
+ model = dict(
5
+ backbone=dict(
6
+ depth=101,
7
+ init_cfg=dict(type="Pretrained", checkpoint="torchvision://resnet101"),
8
+ )
9
+ )
10
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/atss_r101_fpn_dyhead_3x_det_bdd100k.pth"
configs/det/atss_r50_fpn_1x_det_bdd100k.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ATSS with ResNet50-FPN, 1x schedule."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/atss_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k.py",
6
+ "../_base_/schedules/schedule_1x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+ # optimizer
10
+ optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
11
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/atss_r50_fpn_1x_det_bdd100k.pth"
configs/det/atss_r50_fpn_3x_det_bdd100k.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ATSS with ResNet50-FPN, 3x schedule, MS training."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/atss_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k_mstrain.py",
6
+ "../_base_/schedules/schedule_3x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+ # optimizer
10
+ optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
11
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/atss_r50_fpn_3x_det_bdd100k.pth"
configs/det/atss_r50_fpn_dyhead_1x_det_bdd100k.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ATSS with ResNet50-FPN and DyHead, 1x schedule."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/atss_r50_fpn_dyhead.py",
5
+ "../_base_/datasets/bdd100k.py",
6
+ "../_base_/schedules/schedule_1x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+ # optimizer
10
+ optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
11
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/atss_r50_fpn_dyhead_1x_det_bdd100k.pth"
configs/det/atss_r50_fpn_dyhead_3x_det_bdd100k.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ATSS with ResNet50-FPN and DyHead, 3x schedule, MS training."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/atss_r50_fpn_dyhead.py",
5
+ "../_base_/datasets/bdd100k_mstrain.py",
6
+ "../_base_/schedules/schedule_3x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+ # optimizer
10
+ optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
11
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/atss_r50_fpn_dyhead_3x_det_bdd100k.pth"
configs/det/cascade_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Cascade RCNN with ConvNeXt-B, 3x schedule, MS training, FP16."""
2
+
3
+ _base_ = "./cascade_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.py"
4
+
5
+ # please install mmcls>=0.22.0
6
+ # import mmcls.models to trigger register_module in mmcls
7
+ custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
8
+ checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_in21k-pre-3rdparty_32xb128_in1k_20220124-eb2d6ada.pth' # noqa
9
+ model = dict(
10
+ backbone=dict(
11
+ type='mmcls.ConvNeXt',
12
+ arch='base',
13
+ out_indices=[0, 1, 2, 3],
14
+ drop_path_rate=0.4,
15
+ layer_scale_init_value=1.0,
16
+ gap_before_final_norm=False,
17
+ init_cfg=dict(
18
+ type='Pretrained', checkpoint=checkpoint_file,
19
+ prefix='backbone.')),
20
+ neck=dict(in_channels=[128, 256, 512, 1024]))
21
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.pth"
configs/det/cascade_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Cascade RCNN with ConvNeXt-S, 3x schedule, MS training, FP16."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/cascade_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k_mstrain.py",
6
+ "../_base_/schedules/schedule_3x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+
10
+ # please install mmcls>=0.22.0
11
+ # import mmcls.models to trigger register_module in mmcls
12
+ custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
13
+ checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
14
+
15
+ model = dict(
16
+ backbone=dict(
17
+ _delete_=True,
18
+ type='mmcls.ConvNeXt',
19
+ arch='tiny',
20
+ out_indices=[0, 1, 2, 3],
21
+ drop_path_rate=0.4,
22
+ layer_scale_init_value=1.0,
23
+ gap_before_final_norm=False,
24
+ init_cfg=dict(
25
+ type='Pretrained', checkpoint=checkpoint_file,
26
+ prefix='backbone.')),
27
+ neck=dict(in_channels=[96, 192, 384, 768]))
28
+
29
+ optimizer = dict(
30
+ _delete_=True,
31
+ constructor='LearningRateDecayOptimizerConstructor',
32
+ type='AdamW',
33
+ lr=0.0001,
34
+ betas=(0.9, 0.999),
35
+ weight_decay=0.05,
36
+ paramwise_cfg={
37
+ 'decay_rate': 0.95,
38
+ 'decay_type': 'layer_wise',
39
+ 'num_layers': 12
40
+ })
41
+ lr_config = dict(warmup_iters=1000, step=[27, 33])
42
+
43
+ # you need to set mode='dynamic' if you are using pytorch<=1.5.0
44
+ fp16 = dict(loss_scale=dict(init_scale=512))
45
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.pth"
configs/det/cascade_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Cascade RCNN with ConvNeXt-T, 1x schedule, FP16."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/cascade_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k.py",
6
+ "../_base_/schedules/schedule_1x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+
10
+ # please install mmcls>=0.22.0
11
+ # import mmcls.models to trigger register_module in mmcls
12
+ custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
13
+ checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
14
+
15
+ model = dict(
16
+ backbone=dict(
17
+ _delete_=True,
18
+ type='mmcls.ConvNeXt',
19
+ arch='tiny',
20
+ out_indices=[0, 1, 2, 3],
21
+ drop_path_rate=0.4,
22
+ layer_scale_init_value=1.0,
23
+ gap_before_final_norm=False,
24
+ init_cfg=dict(
25
+ type='Pretrained', checkpoint=checkpoint_file,
26
+ prefix='backbone.')),
27
+ neck=dict(in_channels=[96, 192, 384, 768]))
28
+
29
+ optimizer = dict(
30
+ _delete_=True,
31
+ constructor='LearningRateDecayOptimizerConstructor',
32
+ type='AdamW',
33
+ lr=0.0001,
34
+ betas=(0.9, 0.999),
35
+ weight_decay=0.05,
36
+ paramwise_cfg={
37
+ 'decay_rate': 0.95,
38
+ 'decay_type': 'layer_wise',
39
+ 'num_layers': 6
40
+ })
41
+ lr_config = dict(warmup_iters=1000, step=[9, 11])
42
+
43
+ # you need to set mode='dynamic' if you are using pytorch<=1.5.0
44
+ fp16 = dict(loss_scale=dict(init_scale=512))
45
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.pth"
configs/det/cascade_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Cascade RCNN with ConvNeXt-T, 3x schedule, MS training, FP16."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/cascade_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k_mstrain.py",
6
+ "../_base_/schedules/schedule_3x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+
10
+ # please install mmcls>=0.22.0
11
+ # import mmcls.models to trigger register_module in mmcls
12
+ custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
13
+ checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
14
+
15
+ model = dict(
16
+ backbone=dict(
17
+ _delete_=True,
18
+ type='mmcls.ConvNeXt',
19
+ arch='tiny',
20
+ out_indices=[0, 1, 2, 3],
21
+ drop_path_rate=0.4,
22
+ layer_scale_init_value=1.0,
23
+ gap_before_final_norm=False,
24
+ init_cfg=dict(
25
+ type='Pretrained', checkpoint=checkpoint_file,
26
+ prefix='backbone.')),
27
+ neck=dict(in_channels=[96, 192, 384, 768]))
28
+
29
+ optimizer = dict(
30
+ _delete_=True,
31
+ constructor='LearningRateDecayOptimizerConstructor',
32
+ type='AdamW',
33
+ lr=0.0001,
34
+ betas=(0.9, 0.999),
35
+ weight_decay=0.05,
36
+ paramwise_cfg={
37
+ 'decay_rate': 0.95,
38
+ 'decay_type': 'layer_wise',
39
+ 'num_layers': 6
40
+ })
41
+ lr_config = dict(warmup_iters=1000, step=[27, 33])
42
+
43
+ # you need to set mode='dynamic' if you are using pytorch<=1.5.0
44
+ fp16 = dict(loss_scale=dict(init_scale=512))
45
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.pth"
configs/det/cascade_rcnn_r101_fpn_3x_det_bdd100k.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """Cascade RCNN with ResNet101-FPN, 3x schedule, MS training."""
2
+
3
+ _base_ = "./cascade_rcnn_r50_fpn_3x_det_bdd100k.py"
4
+ model = dict(
5
+ backbone=dict(
6
+ depth=101,
7
+ init_cfg=dict(type="Pretrained", checkpoint="torchvision://resnet101"),
8
+ )
9
+ )
10
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_r101_fpn_3x_det_bdd100k.pth"
configs/det/cascade_rcnn_r50_fpn_1x_det_bdd100k.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """Cascade RCNN with ResNet50-FPN, 1x schedule."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/cascade_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k.py",
6
+ "../_base_/schedules/schedule_1x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_r50_fpn_1x_det_bdd100k.pth"
configs/det/cascade_rcnn_r50_fpn_3x_det_bdd100k.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """Cascade RCNN with ResNet50-FPN, 3x schedule, MS training."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/cascade_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k_mstrain.py",
6
+ "../_base_/schedules/schedule_3x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_r50_fpn_3x_det_bdd100k.pth"
configs/det/cascade_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Cascade RCNN with Swin-B, 3x schedule, MS training."""
2
+
3
+ _base_ = "./cascade_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.py"
4
+
5
+ pretrained = "https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth" # noqa
6
+ model = dict(
7
+ backbone=dict(
8
+ _delete_=True,
9
+ type="SwinTransformer",
10
+ embed_dims=128,
11
+ depths=[2, 2, 18, 2],
12
+ num_heads=[4, 8, 16, 32],
13
+ window_size=7,
14
+ mlp_ratio=4,
15
+ qkv_bias=True,
16
+ qk_scale=None,
17
+ drop_rate=0.0,
18
+ attn_drop_rate=0.0,
19
+ drop_path_rate=0.2,
20
+ patch_norm=True,
21
+ out_indices=(0, 1, 2, 3),
22
+ with_cp=False,
23
+ convert_weights=True,
24
+ init_cfg=dict(type="Pretrained", checkpoint=pretrained),
25
+ ),
26
+ neck=dict(in_channels=[128, 256, 512, 1024]),
27
+ )
28
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.pth"
configs/det/cascade_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Cascade RCNN with Swin-S, 3x schedule, MS training."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/cascade_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k_mstrain.py",
6
+ "../_base_/schedules/schedule_3x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+
10
+ pretrained = "https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth" # noqa
11
+ model = dict(
12
+ backbone=dict(
13
+ _delete_=True,
14
+ type="SwinTransformer",
15
+ embed_dims=96,
16
+ depths=[2, 2, 18, 2],
17
+ num_heads=[3, 6, 12, 24],
18
+ window_size=7,
19
+ mlp_ratio=4,
20
+ qkv_bias=True,
21
+ qk_scale=None,
22
+ drop_rate=0.0,
23
+ attn_drop_rate=0.0,
24
+ drop_path_rate=0.2,
25
+ patch_norm=True,
26
+ out_indices=(0, 1, 2, 3),
27
+ with_cp=False,
28
+ convert_weights=True,
29
+ init_cfg=dict(type="Pretrained", checkpoint=pretrained),
30
+ ),
31
+ neck=dict(in_channels=[96, 192, 384, 768]),
32
+ )
33
+
34
+ optimizer = dict(
35
+ _delete_=True,
36
+ type="AdamW",
37
+ lr=0.0001,
38
+ betas=(0.9, 0.999),
39
+ weight_decay=0.05,
40
+ paramwise_cfg=dict(
41
+ custom_keys={
42
+ "absolute_pos_embed": dict(decay_mult=0.0),
43
+ "relative_position_bias_table": dict(decay_mult=0.0),
44
+ "norm": dict(decay_mult=0.0),
45
+ }
46
+ ),
47
+ )
48
+ lr_config = dict(warmup_iters=1000, step=[27, 33])
49
+
50
+ data = dict(samples_per_gpu=2, workers_per_gpu=2)
51
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.pth"
configs/det/cascade_rcnn_swin-t_fpn_3x_det_bdd100k.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Cascade RCNN with Swin-T, 3x schedule, MS training."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/cascade_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k_mstrain.py",
6
+ "../_base_/schedules/schedule_3x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+
10
+ pretrained = "https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth" # noqa
11
+ model = dict(
12
+ backbone=dict(
13
+ _delete_=True,
14
+ type="SwinTransformer",
15
+ embed_dims=96,
16
+ depths=[2, 2, 6, 2],
17
+ num_heads=[3, 6, 12, 24],
18
+ window_size=7,
19
+ mlp_ratio=4,
20
+ qkv_bias=True,
21
+ qk_scale=None,
22
+ drop_rate=0.0,
23
+ attn_drop_rate=0.0,
24
+ drop_path_rate=0.2,
25
+ patch_norm=True,
26
+ out_indices=(0, 1, 2, 3),
27
+ with_cp=False,
28
+ convert_weights=True,
29
+ init_cfg=dict(type="Pretrained", checkpoint=pretrained),
30
+ ),
31
+ neck=dict(in_channels=[96, 192, 384, 768]),
32
+ )
33
+
34
+ optimizer = dict(
35
+ _delete_=True,
36
+ type="AdamW",
37
+ lr=0.0001,
38
+ betas=(0.9, 0.999),
39
+ weight_decay=0.05,
40
+ paramwise_cfg=dict(
41
+ custom_keys={
42
+ "absolute_pos_embed": dict(decay_mult=0.0),
43
+ "relative_position_bias_table": dict(decay_mult=0.0),
44
+ "norm": dict(decay_mult=0.0),
45
+ }
46
+ ),
47
+ )
48
+ lr_config = dict(warmup_iters=1000, step=[27, 33])
49
+
50
+ data = dict(samples_per_gpu=2, workers_per_gpu=2)
51
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/cascade_rcnn_swin-t_fpn_3x_det_bdd100k.pth"
configs/det/faster_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Faster RCNN with ConvNeXt-B, 3x schedule, MS training, FP16."""
2
+
3
+ _base_ = "./faster_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.py"
4
+
5
+ # please install mmcls>=0.22.0
6
+ # import mmcls.models to trigger register_module in mmcls
7
+ custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
8
+ checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-base_3rdparty_32xb128-noema_in1k_20220301-2a0ee547.pth' # noqa
9
+ model = dict(
10
+ backbone=dict(
11
+ type='mmcls.ConvNeXt',
12
+ arch='base',
13
+ out_indices=[0, 1, 2, 3],
14
+ drop_path_rate=0.7,
15
+ layer_scale_init_value=1.0,
16
+ gap_before_final_norm=False,
17
+ init_cfg=dict(
18
+ type='Pretrained', checkpoint=checkpoint_file,
19
+ prefix='backbone.')),
20
+ neck=dict(in_channels=[128, 256, 512, 1024]))
21
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_convnext-b_fpn_fp16_3x_det_bdd100k.pth"
configs/det/faster_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Faster RCNN with ConvNeXt-S, 3x schedule, MS training, FP16."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/faster_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k_mstrain.py",
6
+ "../_base_/schedules/schedule_3x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+
10
+ # please install mmcls>=0.22.0
11
+ # import mmcls.models to trigger register_module in mmcls
12
+ custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
13
+ checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa
14
+
15
+ model = dict(
16
+ backbone=dict(
17
+ _delete_=True,
18
+ type='mmcls.ConvNeXt',
19
+ arch='small',
20
+ out_indices=[0, 1, 2, 3],
21
+ drop_path_rate=0.6,
22
+ layer_scale_init_value=1.0,
23
+ gap_before_final_norm=False,
24
+ init_cfg=dict(
25
+ type='Pretrained', checkpoint=checkpoint_file,
26
+ prefix='backbone.')),
27
+ neck=dict(in_channels=[96, 192, 384, 768]))
28
+
29
+ optimizer = dict(
30
+ _delete_=True,
31
+ constructor='LearningRateDecayOptimizerConstructor',
32
+ type='AdamW',
33
+ lr=0.0001,
34
+ betas=(0.9, 0.999),
35
+ weight_decay=0.05,
36
+ paramwise_cfg={
37
+ 'decay_rate': 0.95,
38
+ 'decay_type': 'layer_wise',
39
+ 'num_layers': 12
40
+ })
41
+ lr_config = dict(warmup_iters=1000, step=[27, 33])
42
+
43
+ # you need to set mode='dynamic' if you are using pytorch<=1.5.0
44
+ fp16 = dict(loss_scale=dict(init_scale=512))
45
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_convnext-s_fpn_fp16_3x_det_bdd100k.pth"
configs/det/faster_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Faster RCNN with ConvNeXt-T, 1x schedule, FP16."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/faster_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k.py",
6
+ "../_base_/schedules/schedule_1x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+
10
+ # please install mmcls>=0.22.0
11
+ # import mmcls.models to trigger register_module in mmcls
12
+ custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
13
+ checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
14
+
15
+ model = dict(
16
+ backbone=dict(
17
+ _delete_=True,
18
+ type='mmcls.ConvNeXt',
19
+ arch='tiny',
20
+ out_indices=[0, 1, 2, 3],
21
+ drop_path_rate=0.4,
22
+ layer_scale_init_value=1.0,
23
+ gap_before_final_norm=False,
24
+ init_cfg=dict(
25
+ type='Pretrained', checkpoint=checkpoint_file,
26
+ prefix='backbone.')),
27
+ neck=dict(in_channels=[96, 192, 384, 768]))
28
+
29
+ optimizer = dict(
30
+ _delete_=True,
31
+ constructor='LearningRateDecayOptimizerConstructor',
32
+ type='AdamW',
33
+ lr=0.0001,
34
+ betas=(0.9, 0.999),
35
+ weight_decay=0.05,
36
+ paramwise_cfg={
37
+ 'decay_rate': 0.95,
38
+ 'decay_type': 'layer_wise',
39
+ 'num_layers': 6
40
+ })
41
+ lr_config = dict(warmup_iters=1000, step=[9, 11])
42
+
43
+ # you need to set mode='dynamic' if you are using pytorch<=1.5.0
44
+ fp16 = dict(loss_scale=dict(init_scale=512))
45
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_convnext-t_fpn_fp16_1x_det_bdd100k.pth"
configs/det/faster_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Faster RCNN with ConvNeXt-T, 3x schedule, MS training, FP16."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/faster_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k_mstrain.py",
6
+ "../_base_/schedules/schedule_3x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+
10
+ # please install mmcls>=0.22.0
11
+ # import mmcls.models to trigger register_module in mmcls
12
+ custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
13
+ checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
14
+
15
+ model = dict(
16
+ backbone=dict(
17
+ _delete_=True,
18
+ type='mmcls.ConvNeXt',
19
+ arch='tiny',
20
+ out_indices=[0, 1, 2, 3],
21
+ drop_path_rate=0.4,
22
+ layer_scale_init_value=1.0,
23
+ gap_before_final_norm=False,
24
+ init_cfg=dict(
25
+ type='Pretrained', checkpoint=checkpoint_file,
26
+ prefix='backbone.')),
27
+ neck=dict(in_channels=[96, 192, 384, 768]))
28
+
29
+ optimizer = dict(
30
+ _delete_=True,
31
+ constructor='LearningRateDecayOptimizerConstructor',
32
+ type='AdamW',
33
+ lr=0.0001,
34
+ betas=(0.9, 0.999),
35
+ weight_decay=0.05,
36
+ paramwise_cfg={
37
+ 'decay_rate': 0.95,
38
+ 'decay_type': 'layer_wise',
39
+ 'num_layers': 6
40
+ })
41
+ lr_config = dict(warmup_iters=1000, step=[27, 33])
42
+
43
+ # you need to set mode='dynamic' if you are using pytorch<=1.5.0
44
+ fp16 = dict(loss_scale=dict(init_scale=512))
45
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_convnext-t_fpn_fp16_3x_det_bdd100k.pth"
configs/det/faster_rcnn_hrnetv2p_w18_1x_det_bdd100k.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """HRNet18, 1x schedule."""
2
+
3
+ _base_ = "./faster_rcnn_hrnetv2p_w32_1x_det_bdd100k.py"
4
+ model = dict(
5
+ backbone=dict(
6
+ extra=dict(
7
+ stage2=dict(num_channels=(18, 36)),
8
+ stage3=dict(num_channels=(18, 36, 72)),
9
+ stage4=dict(num_channels=(18, 36, 72, 144)),
10
+ ),
11
+ init_cfg=dict(
12
+ type="Pretrained", checkpoint="open-mmlab://msra/hrnetv2_w18"
13
+ ),
14
+ ),
15
+ neck=dict(type="HRFPN", in_channels=[18, 36, 72, 144], out_channels=256),
16
+ )
17
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_hrnetv2p_w18_1x_det_bdd100k.pth"
configs/det/faster_rcnn_hrnetv2p_w18_3x_det_bdd100k.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """HRNet18, 3x schedule, MS training."""
2
+
3
+ _base_ = "./faster_rcnn_hrnetv2p_w32_3x_det_bdd100k.py"
4
+ model = dict(
5
+ backbone=dict(
6
+ extra=dict(
7
+ stage2=dict(num_channels=(18, 36)),
8
+ stage3=dict(num_channels=(18, 36, 72)),
9
+ stage4=dict(num_channels=(18, 36, 72, 144)),
10
+ ),
11
+ init_cfg=dict(
12
+ type="Pretrained", checkpoint="open-mmlab://msra/hrnetv2_w18"
13
+ ),
14
+ ),
15
+ neck=dict(type="HRFPN", in_channels=[18, 36, 72, 144], out_channels=256),
16
+ )
17
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_hrnetv2p_w18_3x_det_bdd100k.pth"
configs/det/faster_rcnn_hrnetv2p_w32_1x_det_bdd100k.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """HRNet32, 1x schedule."""
2
+
3
+ _base_ = "./faster_rcnn_r50_fpn_1x_det_bdd100k.py"
4
+ model = dict(
5
+ backbone=dict(
6
+ _delete_=True,
7
+ type="HRNet",
8
+ extra=dict(
9
+ stage1=dict(
10
+ num_modules=1,
11
+ num_branches=1,
12
+ block="BOTTLENECK",
13
+ num_blocks=(4,),
14
+ num_channels=(64,),
15
+ ),
16
+ stage2=dict(
17
+ num_modules=1,
18
+ num_branches=2,
19
+ block="BASIC",
20
+ num_blocks=(4, 4),
21
+ num_channels=(32, 64),
22
+ ),
23
+ stage3=dict(
24
+ num_modules=4,
25
+ num_branches=3,
26
+ block="BASIC",
27
+ num_blocks=(4, 4, 4),
28
+ num_channels=(32, 64, 128),
29
+ ),
30
+ stage4=dict(
31
+ num_modules=3,
32
+ num_branches=4,
33
+ block="BASIC",
34
+ num_blocks=(4, 4, 4, 4),
35
+ num_channels=(32, 64, 128, 256),
36
+ ),
37
+ ),
38
+ init_cfg=dict(
39
+ type="Pretrained", checkpoint="open-mmlab://msra/hrnetv2_w32"
40
+ ),
41
+ ),
42
+ neck=dict(
43
+ _delete_=True,
44
+ type="HRFPN",
45
+ in_channels=[32, 64, 128, 256],
46
+ out_channels=256,
47
+ ),
48
+ )
49
+ data = dict(samples_per_gpu=2, workers_per_gpu=2)
50
+ optimizer = dict(type="SGD", lr=0.02, momentum=0.9, weight_decay=0.0001)
51
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_hrnetv2p_w32_1x_det_bdd100k.pth"
configs/det/faster_rcnn_hrnetv2p_w32_3x_det_bdd100k.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """HRNet32, 3x schedule, MS training."""
2
+
3
+ _base_ = "./faster_rcnn_r50_fpn_3x_det_bdd100k.py"
4
+ model = dict(
5
+ backbone=dict(
6
+ _delete_=True,
7
+ type="HRNet",
8
+ extra=dict(
9
+ stage1=dict(
10
+ num_modules=1,
11
+ num_branches=1,
12
+ block="BOTTLENECK",
13
+ num_blocks=(4,),
14
+ num_channels=(64,),
15
+ ),
16
+ stage2=dict(
17
+ num_modules=1,
18
+ num_branches=2,
19
+ block="BASIC",
20
+ num_blocks=(4, 4),
21
+ num_channels=(32, 64),
22
+ ),
23
+ stage3=dict(
24
+ num_modules=4,
25
+ num_branches=3,
26
+ block="BASIC",
27
+ num_blocks=(4, 4, 4),
28
+ num_channels=(32, 64, 128),
29
+ ),
30
+ stage4=dict(
31
+ num_modules=3,
32
+ num_branches=4,
33
+ block="BASIC",
34
+ num_blocks=(4, 4, 4, 4),
35
+ num_channels=(32, 64, 128, 256),
36
+ ),
37
+ ),
38
+ init_cfg=dict(
39
+ type="Pretrained", checkpoint="open-mmlab://msra/hrnetv2_w32"
40
+ ),
41
+ ),
42
+ neck=dict(
43
+ _delete_=True,
44
+ type="HRFPN",
45
+ in_channels=[32, 64, 128, 256],
46
+ out_channels=256,
47
+ ),
48
+ )
49
+ data = dict(samples_per_gpu=2, workers_per_gpu=2)
50
+ optimizer = dict(type="SGD", lr=0.02, momentum=0.9, weight_decay=0.0001)
51
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_hrnetv2p_w32_3x_det_bdd100k.pth"
configs/det/faster_rcnn_r101_fpn_3x_det_bdd100k.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """Faster RCNN with ResNet101-FPN, 3x schedule, MS training."""
2
+
3
+ _base_ = "./faster_rcnn_r50_fpn_3x_det_bdd100k.py"
4
+ model = dict(
5
+ backbone=dict(
6
+ depth=101,
7
+ init_cfg=dict(type="Pretrained", checkpoint="torchvision://resnet101"),
8
+ )
9
+ )
10
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r101_fpn_3x_det_bdd100k.pth"
configs/det/faster_rcnn_r101_fpn_5x_det_bdd100k.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """Faster RCNN with ResNet101-FPN, 5x schedule, MS training."""
2
+
3
+ _base_ = "./faster_rcnn_r50_fpn_5x_det_bdd100k.py"
4
+ model = dict(
5
+ backbone=dict(
6
+ depth=101,
7
+ init_cfg=dict(type="Pretrained", checkpoint="torchvision://resnet101"),
8
+ )
9
+ )
10
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r101_fpn_5x_det_bdd100k.pth"
configs/det/faster_rcnn_r101_fpn_dconv_3x_det_bdd100k.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """Deformable Conv Nets with ResNet101-FPN, 3x schedule, MS training."""
2
+
3
+ _base_ = "./faster_rcnn_r101_fpn_3x_det_bdd100k.py"
4
+ model = dict(
5
+ backbone=dict(
6
+ dcn=dict(type="DCN", deform_groups=1, fallback_on_stride=False),
7
+ stage_with_dcn=(False, True, True, True),
8
+ )
9
+ )
10
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r101_fpn_dconv_3x_det_bdd100k.pth"
configs/det/faster_rcnn_r50_fpn_1x_det_bdd100k.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """Faster RCNN with ResNet50-FPN, 1x schedule."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/faster_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k.py",
6
+ "../_base_/schedules/schedule_1x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_1x_det_bdd100k.pth"
configs/det/faster_rcnn_r50_fpn_3x_det_bdd100k.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """Faster RCNN with ResNet50-FPN, 3x schedule, MS training."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/faster_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k_mstrain.py",
6
+ "../_base_/schedules/schedule_3x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_3x_det_bdd100k.pth"
configs/det/faster_rcnn_r50_fpn_5x_det_bdd100k.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """Faster RCNN with ResNet50-FPN, 5x schedule, MS training."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/faster_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k_mstrain.py",
6
+ "../_base_/schedules/schedule_5x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_5x_det_bdd100k.pth"
configs/det/faster_rcnn_r50_fpn_dconv_1x_det_bdd100k.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """Deformable Conv Nets with ResNet50-FPN, 1x schedule."""
2
+
3
+ _base_ = "./faster_rcnn_r50_fpn_1x_det_bdd100k.py"
4
+ model = dict(
5
+ backbone=dict(
6
+ dcn=dict(type="DCN", deform_groups=1, fallback_on_stride=False),
7
+ stage_with_dcn=(False, True, True, True),
8
+ )
9
+ )
10
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_dconv_1x_det_bdd100k.pth"
configs/det/faster_rcnn_r50_fpn_dconv_3x_det_bdd100k.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """Deformable Conv Nets with ResNet50-FPN, 3x schedule, MS training."""
2
+
3
+ _base_ = "./faster_rcnn_r50_fpn_3x_det_bdd100k.py"
4
+ model = dict(
5
+ backbone=dict(
6
+ dcn=dict(type="DCN", deform_groups=1, fallback_on_stride=False),
7
+ stage_with_dcn=(False, True, True, True),
8
+ )
9
+ )
10
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_dconv_3x_det_bdd100k.pth"
configs/det/faster_rcnn_r50_fpn_rsb_1x_det_bdd100k.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ResNet strikes back, Faster RCNN with ResNet50-FPN, 1x schedule."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/faster_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k.py",
6
+ "../_base_/schedules/schedule_1x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+ checkpoint = "https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth" # noqa
10
+ model = dict(
11
+ backbone=dict(
12
+ init_cfg=dict(
13
+ type="Pretrained",
14
+ prefix="backbone.",
15
+ checkpoint=checkpoint,
16
+ )
17
+ )
18
+ )
19
+ optimizer = dict(
20
+ _delete_=True,
21
+ type="AdamW",
22
+ lr=0.0002,
23
+ weight_decay=0.05,
24
+ paramwise_cfg=dict(norm_decay_mult=0.0, bypass_duplicate=True),
25
+ )
26
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_rsb_1x_det_bdd100k.pth"
configs/det/faster_rcnn_r50_fpn_rsb_3x_det_bdd100k.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ResNet strikes back, Faster RCNN with ResNet50-FPN, 3x schedule, MS training."""
2
+
3
+ _base_ = [
4
+ "../_base_/models/faster_rcnn_r50_fpn.py",
5
+ "../_base_/datasets/bdd100k_mstrain.py",
6
+ "../_base_/schedules/schedule_3x.py",
7
+ "../_base_/default_runtime.py",
8
+ ]
9
+ checkpoint = "https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth" # noqa
10
+ model = dict(
11
+ backbone=dict(
12
+ init_cfg=dict(
13
+ type="Pretrained",
14
+ prefix="backbone.",
15
+ checkpoint=checkpoint,
16
+ )
17
+ )
18
+ )
19
+ optimizer = dict(
20
+ _delete_=True,
21
+ type="AdamW",
22
+ lr=0.0002,
23
+ weight_decay=0.05,
24
+ paramwise_cfg=dict(norm_decay_mult=0.0, bypass_duplicate=True),
25
+ )
26
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_r50_fpn_rsb_3x_det_bdd100k.pth"
configs/det/faster_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Faster RCNN with Swin-B, 3x schedule, MS training."""
2
+
3
+ _base_ = "./faster_rcnn_swin-s_fpn_fp16_3x_det_bdd100k.py"
4
+
5
+ pretrained = "https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth" # noqa
6
+ model = dict(
7
+ backbone=dict(
8
+ _delete_=True,
9
+ type="SwinTransformer",
10
+ embed_dims=128,
11
+ depths=[2, 2, 18, 2],
12
+ num_heads=[4, 8, 16, 32],
13
+ window_size=7,
14
+ mlp_ratio=4,
15
+ qkv_bias=True,
16
+ qk_scale=None,
17
+ drop_rate=0.0,
18
+ attn_drop_rate=0.0,
19
+ drop_path_rate=0.2,
20
+ patch_norm=True,
21
+ out_indices=(0, 1, 2, 3),
22
+ with_cp=False,
23
+ convert_weights=True,
24
+ init_cfg=dict(type="Pretrained", checkpoint=pretrained),
25
+ ),
26
+ neck=dict(in_channels=[128, 256, 512, 1024]),
27
+ )
28
+ load_from = "https://dl.cv.ethz.ch/bdd100k/det/models/faster_rcnn_swin-b_fpn_fp16_3x_det_bdd100k.pth"