Commit c2fa90b7 authored by nikhil_rayaprolu's avatar nikhil_rayaprolu
Browse files

adding changes for submission

parent b54f45c7
......@@ -60,6 +60,9 @@ RUN export uid=${HOST_UID} gid=${HOST_GID} && \
USER ${USER_NAME}
WORKDIR ${HOME_DIR}
# Custom changes for Submission
COPY ./requirements.txt .
RUN pip install -r requirements.txt --user
COPY . .
......
![CrowdAI-Logo](https://github.com/crowdAI/crowdai/raw/master/app/assets/images/misc/crowdai-logo-smile.svg?sanitize=true)
# crowdAI Mapping Challenge : Baseline
# crowdAI Food Challenge : Baseline
This repository contains the details of implementation of the Baseline submission using [Mask RCNN](https://arxiv.org/abs/1703.06870) which obtains a score of `[AP(IoU=0.5)=0.697 ; AR(IoU=0.5)=0.479]` for the [crowdAI Mapping Challenge](https://www.crowdai.org/challenges/mapping-challenge).
This repository contains the details of implementation of the Baseline submission using [Mask RCNN](https://arxiv.org/abs/1703.06870) which obtains a score of `[AP(IoU=0.5)=0.183 ; AR(IoU=0.5)=0.224]` for the [AICrowd Food Challenge](https://www.aicrowd.com/challenges/food-recognition-challenge).
# Installation
```
git clone https://github.com/crowdai/crowdai-mapping-challenge-mask-rcnn
cd crowdai-mapping-challenge-mask-rcnn
git clone https://gitlab.aicrowd.com/nikhil_rayaprolu/food-recognition/
cd food-recognition
# Please ensure that you use python3.6
pip install -r requirements.txt
python setup.py install
```
# Notebooks
Please follow the instructions on the relevant notebooks for the training, prediction and submissions.
Please follow the instructions on the relevant files for the training, prediction and submissions.
* [Training](Training.ipynb)
* [Prediction and Submission](Prediction-and-Submission.ipynb)
* [Training](Training.py)
* [Prediction and Submission](eval.py)
(_pre-trained weights for baseline submission included_)
# Results
![sample_predictions](images/predictions.png)
# How to Submit
As noted at https://github.com/AIcrowd/food-recognition-challenge-starter-kit/ this repository contains aicrowd.json and custom changes
to Dockerfile
```
# Custom changes for Submission
COPY ./requirements.txt .
RUN pip install -r requirements.txt --user
COPY . .
```
changes to [run.py](run.py#L135) can be seen at line 135, where we call a function from [eval.py](eval.py#L82) .
run.py .
```
evaluate(test_images_path, predictions_output_path)
```
Inside the evaluate function in [eval.py](eval.py#L102), we pass the images into the model for detection .
eval.py .
```
predoctions = model.detect(images, verbose=0)
```
By writing this code, you are basically allowing AICrowd Auto-Evaluators to pass private test data into your model and detect predictions in runtime
for the new data.
* [Training](train.py)
* [Prediction and Submission](eval.py)
(_pre-trained weights for baseline submission included_)
# Citation
```
@misc{crowdAIMappingChallengeBaseline2018,
author = {Mohanty, Sharada Prasanna},
title = {CrowdAI Mapping Challenge 2018 : Baseline with Mask RCNN},
year = {2018},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/crowdai/crowdai-mapping-challenge-mask-rcnn}},
commit = {bac1cf19adbc9d078122c6933da6f808c4ee590d}
@misc{AICrowdFoodChallengeBaseline2018,
author = {Nikhil, Rayaprolu},
title = {AICrowd Food Recognition Challenge 2019 : Baseline with Mask RCNN},
year = {2019},
publisher = {Gitlab},
journal = {Gitlab repository},
howpublished = {\url{https://gitlab.aicrowd.com/nikhil_rayaprolu/food-recognition}},
}
```
# Acknowledgements
......@@ -41,4 +66,4 @@ Many thanks to all the contributors of that project.
You are encouraged to checkout [https://github.com/matterport/Mask_RCNN](https://github.com/matterport/Mask_RCNN) for documentation on many other aspects of this code.
# Author
Sharada Mohanty [sharada.mohanty@epfl.ch](sharada.mohanty@epfl.ch)
Nikhil Rayaprolu [nr178@student.london.ac.uk](nr178@student.london.ac.uk)
......@@ -21,7 +21,7 @@ from pycocotools import mask as maskUtils
import coco #a slightly modified version
from mrcnn.evaluate import build_coco_results, evaluate_coco
from mrcnn.dataset import MappingChallengeDataset
from mrcnn.dataset import FoodChallengeDataset
from mrcnn import visualize
......@@ -40,7 +40,7 @@ from mrcnn.config import Config
from mrcnn import model as modellib, utils
PRETRAINED_MODEL_PATH = os.path.join(ROOT_DIR,"models/mask_rcnn_crowdai-mapping-challenge_0060.h5")
PRETRAINED_MODEL_PATH = os.path.join(ROOT_DIR,"models/mask_rcnn_crowdai-food-challenge_0060.h5")
LOGS_DIRECTORY = os.path.join(ROOT_DIR, "logs")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# IMAGE_DIR = os.path.join(ROOT_DIR, "data", "test")
......@@ -54,7 +54,7 @@ class InferenceConfig(coco.CocoConfig):
NUM_CLASSES = 41 # 1 Background + 1 Building
IMAGE_MAX_DIM=320
IMAGE_MIN_DIM=320
NAME = "crowdai-mapping-challenge"
NAME = "crowdai-food-challenge"
config = InferenceConfig()
config.display()
......
......@@ -7,9 +7,9 @@ from pycocotools import mask as maskUtils
import os
class MappingChallengeDataset(utils.Dataset):
class FoodChallengeDataset(utils.Dataset):
def load_dataset(self, dataset_dir, load_small=False, return_coco=True):
""" Loads dataset released for the crowdAI Mapping Challenge(https://www.crowdai.org/challenges/mapping-challenge)
""" Loads dataset released for the AICrowd Food Challenge
Params:
- dataset_dir : root directory of the dataset (can point to the train/val folder)
- load_small : Boolean value which signals if the annotations for all the images need to be loaded into the memory,
......@@ -37,13 +37,13 @@ class MappingChallengeDataset(utils.Dataset):
# register classes
for _class_id in classIds:
self.add_class("crowdai-mapping-challenge", _class_id, self.coco.loadCats(_class_id)[0]["name"])
self.add_class("crowdai-food-challenge", _class_id, self.coco.loadCats(_class_id)[0]["name"])
# Register Images
for _img_id in image_ids:
assert(os.path.exists(os.path.join(image_dir, self.coco.imgs[_img_id]['file_name'])))
self.add_image(
"crowdai-mapping-challenge", image_id=_img_id,
"crowdai-food-challenge", image_id=_img_id,
path=os.path.join(image_dir, self.coco.imgs[_img_id]['file_name']),
width=self.coco.imgs[_img_id]["width"],
height=self.coco.imgs[_img_id]["height"],
......@@ -70,7 +70,7 @@ class MappingChallengeDataset(utils.Dataset):
"""
image_info = self.image_info[image_id]
assert image_info["source"] == "crowdai-mapping-challenge"
assert image_info["source"] == "crowdai-food-challenge"
instance_masks = []
class_ids = []
......@@ -79,7 +79,7 @@ class MappingChallengeDataset(utils.Dataset):
# of class IDs that correspond to each channel of the mask.
for annotation in annotations:
class_id = self.map_source_class_id(
"crowdai-mapping-challenge.{}".format(annotation['category_id']))
"crowdai-food-challenge.{}".format(annotation['category_id']))
if class_id:
m = self.annToMask(annotation, image_info["height"],
image_info["width"])
......@@ -100,7 +100,7 @@ class MappingChallengeDataset(utils.Dataset):
return mask, class_ids
else:
# Call super class to return an empty mask
return super(MappingChallengeDataset, self).load_mask(image_id)
return super(FoodChallengeDataset, self).load_mask(image_id)
def image_reference(self, image_id):
......@@ -109,7 +109,7 @@ class MappingChallengeDataset(utils.Dataset):
Ideally you this function is supposed to return a URL
but in this case, we will simply return the image_id
"""
return "crowdai-mapping-challenge::{}".format(image_id)
return "crowdai-food-challenge::{}".format(image_id)
# The following two functions are from pycocotools with a few changes.
def annToRLE(self, ann, height, width):
......
......@@ -16,7 +16,7 @@ from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
from mrcnn.evaluate import build_coco_results, evaluate_coco
from mrcnn.dataset import MappingChallengeDataset
from mrcnn.dataset import FoodChallengeDataset
import zipfile
import urllib.request
......@@ -32,13 +32,13 @@ from mrcnn import model as modellib, utils
PRETRAINED_MODEL_PATH = os.path.join(ROOT_DIR,"data", "mask_rcnn_coco.h5")
LOGS_DIRECTORY = os.path.join(ROOT_DIR, "logs")
class MappingChallengeConfig(Config):
class FoodChallengeConfig(Config):
"""Configuration for training on data in MS COCO format.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "crowdai-mapping-challenge"
NAME = "crowdai-food-challenge"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
......@@ -57,7 +57,7 @@ class MappingChallengeConfig(Config):
IMAGE_MAX_DIM=256
IMAGE_MIN_DIM=256
config = MappingChallengeConfig()
config = FoodChallengeConfig()
config.display()
import keras.backend
......@@ -69,10 +69,10 @@ model_path = PRETRAINED_MODEL_PATH
model.load_weights(model_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
dataset_train = MappingChallengeDataset()
dataset_train = FoodChallengeDataset()
dataset_train.load_dataset(dataset_dir=os.path.join("data", "train"), load_small=False)
dataset_train.prepare()
dataset_val = MappingChallengeDataset()
dataset_val = FoodChallengeDataset()
val_coco = dataset_val.load_dataset(dataset_dir=os.path.join("data", "val"), load_small=False, return_coco=True)
dataset_val.prepare()
print("Training network heads")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment