Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
F
food-recognition-benchmark-starter-kit
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
gotsulyak
food-recognition-benchmark-starter-kit
Commits
e103508f
Commit
e103508f
authored
3 years ago
by
iggotsul
Browse files
Options
Downloads
Patches
Plain Diff
test2
parent
3c84d5ba
No related branches found
No related tags found
No related merge requests found
Changes
5
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
Dockerfile
+53
-0
53 additions, 0 deletions
Dockerfile
aicrowd.json
+3
-2
3 additions, 2 deletions
aicrowd.json
configs/solo.py
+820
-0
820 additions, 0 deletions
configs/solo.py
run.sh
+2
-1
2 additions, 1 deletion
run.sh
test.py
+238
-0
238 additions, 0 deletions
test.py
with
1116 additions
and
3 deletions
Dockerfile
0 → 100644
+
53
−
0
View file @
e103508f
# This is example dockerfile for Food Recognition Benchmark @ AIcrowd
FROM
nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04
ENV
DEBIAN_FRONTEND=noninteractive
# Install needed apt packages
COPY
apt.txt apt.txt
RUN
apt
-qq
update
&&
xargs
-a
apt.txt apt
-qq
install
-y
--no-install-recommends
\
&&
rm
-rf
/var/cache/
*
# Create user home directory
ENV
USER aicrowd
ENV
HOME_DIR /home/$USER
# Replace HOST_UID/HOST_GUID with your user / group id
ENV
HOST_UID 1001
ENV
HOST_GID 1001
# Use bash as default shell, rather than sh
ENV
SHELL /bin/bash
# Set up user
RUN
adduser
--disabled-password
\
--gecos
"Default user"
\
--uid
${
HOST_UID
}
\
${
USER
}
USER
${USER}
WORKDIR
${HOME_DIR}
ENV
CONDA_DIR ${HOME_DIR}/.conda
ENV
PATH ${CONDA_DIR}/bin:${PATH}
# Download miniconda for python
RUN
wget
-nv
-O
miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-py37_4.8.2-Linux-x86_64.sh
\
&&
bash miniconda.sh
-b
-p
${
CONDA_DIR
}
\
&&
.
${
CONDA_DIR
}
/etc/profile.d/conda.sh
\
&&
rm
-rf
miniconda.sh
\
&&
conda
install
pytorch
==
1.5.1
torchvision
==
0.6.1
cudatoolkit
=
10.1
-c
pytorch
\
&&
conda clean
-a
-y
ENV
FORCE_CUDA="1"
# Install needed pypi packages
USER
${USER}
RUN
pip
install
numpy cython
--no-cache-dir
COPY
--chown=1001:1001 requirements.txt requirements.txt
RUN
pip
install
--no-cache-dir
-r
requirements.txt
# (optional)
# (either add below as dependency in requirements.txt or uncomment below line)
# RUN pip install -U git+https://github.com/open-mmlab/mmdetection.git@v2.3.0
# Copy user files
COPY
--chown=1001:1001 . ${HOME_DIR}
This diff is collapsed.
Click to expand it.
aicrowd.json
+
3
−
2
View file @
e103508f
...
@@ -3,5 +3,6 @@
...
@@ -3,5 +3,6 @@
"authors"
:
[
"aicrowd-bot"
],
"authors"
:
[
"aicrowd-bot"
],
"description"
:
"Food Recognition Benchmark Submission"
,
"description"
:
"Food Recognition Benchmark Submission"
,
"license"
:
"MIT"
,
"license"
:
"MIT"
,
"gpu"
:
false
"gpu"
:
true
,
}
"debug"
:
true
\ No newline at end of file
}
This diff is collapsed.
Click to expand it.
configs/solo.py
0 → 100644
+
820
−
0
View file @
e103508f
This diff is collapsed.
Click to expand it.
run.sh
+
2
−
1
View file @
e103508f
#!/bin/bash
#!/bin/bash
python predict.py
python test.py ./configs/solo.py ./models/epoch_20.pth
--json_out
$AICROWD_PREDICTIONS_OUTPUT_PATH
This diff is collapsed.
Click to expand it.
test.py
0 → 100644
+
238
−
0
View file @
e103508f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
os
import
os.path
as
osp
import
time
import
warnings
import
mmcv
import
torch
from
mmcv
import
Config
,
DictAction
from
mmcv.cnn
import
fuse_conv_bn
from
mmcv.parallel
import
MMDataParallel
,
MMDistributedDataParallel
from
mmcv.runner
import
(
get_dist_info
,
init_dist
,
load_checkpoint
,
wrap_fp16_model
)
from
mmdet.apis
import
multi_gpu_test
,
single_gpu_test
from
mmdet.datasets
import
(
build_dataloader
,
build_dataset
,
replace_ImageToTensor
)
from
mmdet.models
import
build_detector
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'
MMDet test (and eval) a model
'
)
parser
.
add_argument
(
'
config
'
,
help
=
'
test config file path
'
)
parser
.
add_argument
(
'
checkpoint
'
,
help
=
'
checkpoint file
'
)
parser
.
add_argument
(
'
--work-dir
'
,
help
=
'
the directory to save the file containing evaluation metrics
'
)
parser
.
add_argument
(
'
--out
'
,
help
=
'
output result file in pickle format
'
)
parser
.
add_argument
(
'
--fuse-conv-bn
'
,
action
=
'
store_true
'
,
help
=
'
Whether to fuse conv and bn, this will slightly increase
'
'
the inference speed
'
)
parser
.
add_argument
(
'
--format-only
'
,
action
=
'
store_true
'
,
help
=
'
Format the output results without perform evaluation. It is
'
'
useful when you want to format the result to a specific format and
'
'
submit it to the test server
'
)
parser
.
add_argument
(
'
--eval
'
,
type
=
str
,
nargs
=
'
+
'
,
help
=
'
evaluation metrics, which depends on the dataset, e.g.,
"
bbox
"
,
'
'
"
segm
"
,
"
proposal
"
for COCO, and
"
mAP
"
,
"
recall
"
for PASCAL VOC
'
)
parser
.
add_argument
(
'
--show
'
,
action
=
'
store_true
'
,
help
=
'
show results
'
)
parser
.
add_argument
(
'
--show-dir
'
,
help
=
'
directory where painted images will be saved
'
)
parser
.
add_argument
(
'
--show-score-thr
'
,
type
=
float
,
default
=
0.3
,
help
=
'
score threshold (default: 0.3)
'
)
parser
.
add_argument
(
'
--gpu-collect
'
,
action
=
'
store_true
'
,
help
=
'
whether to use gpu to collect results.
'
)
parser
.
add_argument
(
'
--tmpdir
'
,
help
=
'
tmp directory used for collecting results from multiple
'
'
workers, available when gpu-collect is not specified
'
)
parser
.
add_argument
(
'
--cfg-options
'
,
nargs
=
'
+
'
,
action
=
DictAction
,
help
=
'
override some settings in the used config, the key-value pair
'
'
in xxx=yyy format will be merged into config file. If the value to
'
'
be overwritten is a list, it should be like key=
"
[a,b]
"
or key=a,b
'
'
It also allows nested list/tuple values, e.g. key=
"
[(a,b),(c,d)]
"
'
'
Note that the quotation marks are necessary and that no white space
'
'
is allowed.
'
)
parser
.
add_argument
(
'
--options
'
,
nargs
=
'
+
'
,
action
=
DictAction
,
help
=
'
custom options for evaluation, the key-value pair in xxx=yyy
'
'
format will be kwargs for dataset.evaluate() function (deprecate),
'
'
change to --eval-options instead.
'
)
parser
.
add_argument
(
'
--eval-options
'
,
nargs
=
'
+
'
,
action
=
DictAction
,
help
=
'
custom options for evaluation, the key-value pair in xxx=yyy
'
'
format will be kwargs for dataset.evaluate() function
'
)
parser
.
add_argument
(
'
--launcher
'
,
choices
=
[
'
none
'
,
'
pytorch
'
,
'
slurm
'
,
'
mpi
'
],
default
=
'
none
'
,
help
=
'
job launcher
'
)
parser
.
add_argument
(
'
--local_rank
'
,
type
=
int
,
default
=
0
)
args
=
parser
.
parse_args
()
if
'
LOCAL_RANK
'
not
in
os
.
environ
:
os
.
environ
[
'
LOCAL_RANK
'
]
=
str
(
args
.
local_rank
)
if
args
.
options
and
args
.
eval_options
:
raise
ValueError
(
'
--options and --eval-options cannot be both
'
'
specified, --options is deprecated in favor of --eval-options
'
)
if
args
.
options
:
warnings
.
warn
(
'
--options is deprecated in favor of --eval-options
'
)
args
.
eval_options
=
args
.
options
return
args
def
main
():
args
=
parse_args
()
assert
args
.
out
or
args
.
eval
or
args
.
format_only
or
args
.
show
\
or
args
.
show_dir
,
\
(
'
Please specify at least one operation (save/eval/format/show the
'
'
results / save the results) with the argument
"
--out
"
,
"
--eval
"'
'
,
"
--format-only
"
,
"
--show
"
or
"
--show-dir
"'
)
if
args
.
eval
and
args
.
format_only
:
raise
ValueError
(
'
--eval and --format_only cannot be both specified
'
)
if
args
.
out
is
not
None
and
not
args
.
out
.
endswith
((
'
.pkl
'
,
'
.pickle
'
)):
raise
ValueError
(
'
The output file must be a pkl file.
'
)
cfg
=
Config
.
fromfile
(
args
.
config
)
if
args
.
cfg_options
is
not
None
:
cfg
.
merge_from_dict
(
args
.
cfg_options
)
# import modules from string list.
if
cfg
.
get
(
'
custom_imports
'
,
None
):
from
mmcv.utils
import
import_modules_from_strings
import_modules_from_strings
(
**
cfg
[
'
custom_imports
'
])
# set cudnn_benchmark
if
cfg
.
get
(
'
cudnn_benchmark
'
,
False
):
torch
.
backends
.
cudnn
.
benchmark
=
True
cfg
.
model
.
pretrained
=
None
if
cfg
.
model
.
get
(
'
neck
'
):
if
isinstance
(
cfg
.
model
.
neck
,
list
):
for
neck_cfg
in
cfg
.
model
.
neck
:
if
neck_cfg
.
get
(
'
rfp_backbone
'
):
if
neck_cfg
.
rfp_backbone
.
get
(
'
pretrained
'
):
neck_cfg
.
rfp_backbone
.
pretrained
=
None
elif
cfg
.
model
.
neck
.
get
(
'
rfp_backbone
'
):
if
cfg
.
model
.
neck
.
rfp_backbone
.
get
(
'
pretrained
'
):
cfg
.
model
.
neck
.
rfp_backbone
.
pretrained
=
None
# in case the test dataset is concatenated
samples_per_gpu
=
1
if
isinstance
(
cfg
.
data
.
test
,
dict
):
cfg
.
data
.
test
.
test_mode
=
True
samples_per_gpu
=
cfg
.
data
.
test
.
pop
(
'
samples_per_gpu
'
,
1
)
if
samples_per_gpu
>
1
:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg
.
data
.
test
.
pipeline
=
replace_ImageToTensor
(
cfg
.
data
.
test
.
pipeline
)
elif
isinstance
(
cfg
.
data
.
test
,
list
):
for
ds_cfg
in
cfg
.
data
.
test
:
ds_cfg
.
test_mode
=
True
samples_per_gpu
=
max
(
[
ds_cfg
.
pop
(
'
samples_per_gpu
'
,
1
)
for
ds_cfg
in
cfg
.
data
.
test
])
if
samples_per_gpu
>
1
:
for
ds_cfg
in
cfg
.
data
.
test
:
ds_cfg
.
pipeline
=
replace_ImageToTensor
(
ds_cfg
.
pipeline
)
# init distributed env first, since logger depends on the dist info.
if
args
.
launcher
==
'
none
'
:
distributed
=
False
else
:
distributed
=
True
init_dist
(
args
.
launcher
,
**
cfg
.
dist_params
)
rank
,
_
=
get_dist_info
()
# allows not to create
if
args
.
work_dir
is
not
None
and
rank
==
0
:
mmcv
.
mkdir_or_exist
(
osp
.
abspath
(
args
.
work_dir
))
timestamp
=
time
.
strftime
(
'
%Y%m%d_%H%M%S
'
,
time
.
localtime
())
json_file
=
osp
.
join
(
args
.
work_dir
,
f
'
eval_
{
timestamp
}
.json
'
)
# build the dataloader
dataset
=
build_dataset
(
cfg
.
data
.
test
)
data_loader
=
build_dataloader
(
dataset
,
samples_per_gpu
=
samples_per_gpu
,
workers_per_gpu
=
cfg
.
data
.
workers_per_gpu
,
dist
=
distributed
,
shuffle
=
False
)
# build the model and load checkpoint
cfg
.
model
.
train_cfg
=
None
model
=
build_detector
(
cfg
.
model
,
test_cfg
=
cfg
.
get
(
'
test_cfg
'
))
fp16_cfg
=
cfg
.
get
(
'
fp16
'
,
None
)
if
fp16_cfg
is
not
None
:
wrap_fp16_model
(
model
)
checkpoint
=
load_checkpoint
(
model
,
args
.
checkpoint
,
map_location
=
'
cpu
'
)
if
args
.
fuse_conv_bn
:
model
=
fuse_conv_bn
(
model
)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if
'
CLASSES
'
in
checkpoint
.
get
(
'
meta
'
,
{}):
model
.
CLASSES
=
checkpoint
[
'
meta
'
][
'
CLASSES
'
]
else
:
model
.
CLASSES
=
dataset
.
CLASSES
if
not
distributed
:
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
outputs
=
single_gpu_test
(
model
,
data_loader
,
args
.
show
,
args
.
show_dir
,
args
.
show_score_thr
)
else
:
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
device_ids
=
[
torch
.
cuda
.
current_device
()],
broadcast_buffers
=
False
)
outputs
=
multi_gpu_test
(
model
,
data_loader
,
args
.
tmpdir
,
args
.
gpu_collect
)
rank
,
_
=
get_dist_info
()
if
rank
==
0
:
if
args
.
out
:
print
(
f
'
\n
writing results to
{
args
.
out
}
'
)
mmcv
.
dump
(
outputs
,
args
.
out
)
kwargs
=
{}
if
args
.
eval_options
is
None
else
args
.
eval_options
if
args
.
format_only
:
dataset
.
format_results
(
outputs
,
**
kwargs
)
if
args
.
eval
:
eval_kwargs
=
cfg
.
get
(
'
evaluation
'
,
{}).
copy
()
# hard-code way to remove EvalHook args
for
key
in
[
'
interval
'
,
'
tmpdir
'
,
'
start
'
,
'
gpu_collect
'
,
'
save_best
'
,
'
rule
'
]:
eval_kwargs
.
pop
(
key
,
None
)
eval_kwargs
.
update
(
dict
(
metric
=
args
.
eval
,
**
kwargs
))
metric
=
dataset
.
evaluate
(
outputs
,
**
eval_kwargs
)
print
(
metric
)
metric_dict
=
dict
(
config
=
args
.
config
,
metric
=
metric
)
if
args
.
work_dir
is
not
None
and
rank
==
0
:
mmcv
.
dump
(
metric_dict
,
json_file
)
if
__name__
==
'
__main__
'
:
main
()
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment