Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
F
food-round2
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
nikhil_rayaprolu
food-round2
Commits
976629d4
There was an error fetching the commit references. Please try again later.
Commit
976629d4
authored
5 years ago
by
Cao Yuhang
Browse files
Options
Downloads
Patches
Plain Diff
support segm evaluation using different score from bbox det
parent
dc341cb8
No related branches found
No related tags found
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
mmdet/core/evaluation/coco_utils.py
+39
-11
39 additions, 11 deletions
mmdet/core/evaluation/coco_utils.py
mmdet/core/evaluation/eval_hooks.py
+7
-6
7 additions, 6 deletions
mmdet/core/evaluation/eval_hooks.py
tools/test.py
+6
-6
6 additions, 6 deletions
tools/test.py
with
52 additions
and
23 deletions
mmdet/core/evaluation/coco_utils.py
+
39
−
11
View file @
976629d4
...
...
@@ -6,7 +6,7 @@ from pycocotools.cocoeval import COCOeval
from
.recall
import
eval_recalls
def
coco_eval
(
result_file
,
result_types
,
coco
,
max_dets
=
(
100
,
300
,
1000
)):
def
coco_eval
(
result_file
s
,
result_types
,
coco
,
max_dets
=
(
100
,
300
,
1000
)):
for
res_type
in
result_types
:
assert
res_type
in
[
'
proposal
'
,
'
proposal_fast
'
,
'
bbox
'
,
'
segm
'
,
'
keypoints
'
...
...
@@ -17,16 +17,17 @@ def coco_eval(result_file, result_types, coco, max_dets=(100, 300, 1000)):
assert
isinstance
(
coco
,
COCO
)
if
result_types
==
[
'
proposal_fast
'
]:
ar
=
fast_eval_recall
(
result_file
,
coco
,
np
.
array
(
max_dets
))
ar
=
fast_eval_recall
(
result_file
s
,
coco
,
np
.
array
(
max_dets
))
for
i
,
num
in
enumerate
(
max_dets
):
print
(
'
AR@{}
\t
= {:.4f}
'
.
format
(
num
,
ar
[
i
]))
return
assert
result_file
.
endswith
(
'
.json
'
)
coco_dets
=
coco
.
loadRes
(
result_file
)
img_ids
=
coco
.
getImgIds
()
for
res_type
in
result_types
:
result_file
=
result_files
[
res_type
]
assert
result_file
.
endswith
(
'
.json
'
)
coco_dets
=
coco
.
loadRes
(
result_file
)
img_ids
=
coco
.
getImgIds
()
iou_type
=
'
bbox
'
if
res_type
==
'
proposal
'
else
res_type
cocoEval
=
COCOeval
(
coco
,
coco_dets
,
iou_type
)
cocoEval
.
params
.
imgIds
=
img_ids
...
...
@@ -118,32 +119,59 @@ def det2json(dataset, results):
def
segm2json
(
dataset
,
results
):
json_results
=
[]
bbox_json_results
=
[]
segm_json_results
=
[]
for
idx
in
range
(
len
(
dataset
)):
img_id
=
dataset
.
img_ids
[
idx
]
det
,
seg
=
results
[
idx
]
for
label
in
range
(
len
(
det
)):
# bbox results
bboxes
=
det
[
label
]
segms
=
seg
[
label
]
for
i
in
range
(
bboxes
.
shape
[
0
]):
data
=
dict
()
data
[
'
image_id
'
]
=
img_id
data
[
'
bbox
'
]
=
xyxy2xywh
(
bboxes
[
i
])
data
[
'
score
'
]
=
float
(
bboxes
[
i
][
4
])
data
[
'
category_id
'
]
=
dataset
.
cat_ids
[
label
]
bbox_json_results
.
append
(
data
)
# segm results
# some detectors use different score for det and segm
if
len
(
seg
)
==
2
:
segms
=
seg
[
0
][
label
]
mask_score
=
seg
[
1
][
label
]
else
:
segms
=
seg
[
label
]
mask_score
=
[
bbox
[
4
]
for
bbox
in
bboxes
]
for
i
in
range
(
bboxes
.
shape
[
0
]):
data
=
dict
()
data
[
'
image_id
'
]
=
img_id
data
[
'
score
'
]
=
float
(
mask_score
[
i
])
data
[
'
category_id
'
]
=
dataset
.
cat_ids
[
label
]
segms
[
i
][
'
counts
'
]
=
segms
[
i
][
'
counts
'
].
decode
()
data
[
'
segmentation
'
]
=
segms
[
i
]
json_results
.
append
(
data
)
return
json_results
segm_
json_results
.
append
(
data
)
return
bbox_json_results
,
segm_
json_results
def
results2json
(
dataset
,
results
,
out_file
):
result_files
=
dict
()
if
isinstance
(
results
[
0
],
list
):
json_results
=
det2json
(
dataset
,
results
)
result_files
[
'
bbox
'
]
=
'
{}.{}.json
'
.
format
(
out_file
,
'
bbox
'
)
result_files
[
'
proposal
'
]
=
'
{}.{}.json
'
.
format
(
out_file
,
'
bbox
'
)
mmcv
.
dump
(
json_results
,
result_files
[
'
bbox
'
])
elif
isinstance
(
results
[
0
],
tuple
):
json_results
=
segm2json
(
dataset
,
results
)
result_files
[
'
bbox
'
]
=
'
{}.{}.json
'
.
format
(
out_file
,
'
bbox
'
)
result_files
[
'
proposal
'
]
=
'
{}.{}.json
'
.
format
(
out_file
,
'
bbox
'
)
result_files
[
'
segm
'
]
=
'
{}.{}.json
'
.
format
(
out_file
,
'
segm
'
)
mmcv
.
dump
(
json_results
[
0
],
result_files
[
'
bbox
'
])
mmcv
.
dump
(
json_results
[
1
],
result_files
[
'
segm
'
])
elif
isinstance
(
results
[
0
],
np
.
ndarray
):
json_results
=
proposal2json
(
dataset
,
results
)
result_files
[
'
proposal
'
]
=
'
{}.{}.json
'
.
format
(
out_file
,
'
proposal
'
)
mmcv
.
dump
(
json_results
,
result_files
[
'
proposal
'
])
else
:
raise
TypeError
(
'
invalid type of results
'
)
mmcv
.
dump
(
json_results
,
ou
t_file
)
return
resul
t_file
s
This diff is collapsed.
Click to expand it.
mmdet/core/evaluation/eval_hooks.py
+
7
−
6
View file @
976629d4
...
...
@@ -135,15 +135,15 @@ class CocoDistEvalRecallHook(DistEvalHook):
class
CocoDistEvalmAPHook
(
DistEvalHook
):
def
evaluate
(
self
,
runner
,
results
):
tmp_file
=
osp
.
join
(
runner
.
work_dir
,
'
temp_0
.json
'
)
results2json
(
self
.
dataset
,
results
,
tmp_file
)
tmp_file
=
osp
.
join
(
runner
.
work_dir
,
'
temp_0
'
)
result_files
=
results2json
(
self
.
dataset
,
results
,
tmp_file
)
res_types
=
[
'
bbox
'
,
'
segm
'
]
if
runner
.
model
.
module
.
with_mask
else
[
'
bbox
'
]
res_types
=
[
'
bbox
'
,
'
segm
'
]
if
runner
.
model
.
module
.
with_mask
else
[
'
bbox
'
]
cocoGt
=
self
.
dataset
.
coco
cocoDt
=
cocoGt
.
loadRes
(
tmp_file
)
imgIds
=
cocoGt
.
getImgIds
()
for
res_type
in
res_types
:
cocoDt
=
cocoGt
.
loadRes
(
result_files
[
res_type
])
iou_type
=
res_type
cocoEval
=
COCOeval
(
cocoGt
,
cocoDt
,
iou_type
)
cocoEval
.
params
.
imgIds
=
imgIds
...
...
@@ -159,4 +159,5 @@ class CocoDistEvalmAPHook(DistEvalHook):
'
{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f}
'
'
{ap[4]:.3f} {ap[5]:.3f}
'
).
format
(
ap
=
cocoEval
.
stats
[:
6
])
runner
.
log_buffer
.
ready
=
True
os
.
remove
(
tmp_file
)
for
res_type
in
res_types
:
os
.
remove
(
result_files
[
res_type
])
This diff is collapsed.
Click to expand it.
tools/test.py
+
6
−
6
View file @
976629d4
...
...
@@ -184,16 +184,16 @@ def main():
coco_eval
(
result_file
,
eval_types
,
dataset
.
coco
)
else
:
if
not
isinstance
(
outputs
[
0
],
dict
):
result_file
=
args
.
out
+
'
.json
'
results2json
(
dataset
,
outputs
,
result_file
)
coco_eval
(
result_file
,
eval_types
,
dataset
.
coco
)
result_files
=
results2json
(
dataset
,
outputs
,
args
.
out
)
coco_eval
(
result_files
,
eval_types
,
dataset
.
coco
)
else
:
for
name
in
outputs
[
0
]:
print
(
'
\n
Evaluating {}
'
.
format
(
name
))
outputs_
=
[
out
[
name
]
for
out
in
outputs
]
result_file
=
args
.
out
+
'
.{}.json
'
.
format
(
name
)
results2json
(
dataset
,
outputs_
,
result_file
)
coco_eval
(
result_file
,
eval_types
,
dataset
.
coco
)
result_file
=
args
.
out
+
'
.{}
'
.
format
(
name
)
result_files
=
results2json
(
dataset
,
outputs_
,
result_file
)
coco_eval
(
result_files
,
eval_types
,
dataset
.
coco
)
if
__name__
==
'
__main__
'
:
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment