Skip to content
Snippets Groups Projects
Commit 416c43d5 authored by Jyotish P's avatar Jyotish P
Browse files

Override participant entrypoint

parent 78fe054c
No related branches found
No related tags found
No related merge requests found
...@@ -20,6 +20,9 @@ docker: ...@@ -20,6 +20,9 @@ docker:
evaluation: evaluation:
global: global:
files:
predict.py: predict.py
run.sh: run.sh
resources: resources:
cpu: 1 cpu: 1
memory: 2Gi memory: 2Gi
......
import timeout_decorator
import os
import pandas as pd
from config import SubmissionConfig
class Timeouts:
model_initialization = 10
predict_batch = 10
class Constants:
SHARED_DISK = os.getenv("AICROWD_SHARED_DISK", "/shared")
PREDICTIONS_OUTPUT_PATH = os.path.join(SHARED_DISK, "predictions.csv")
DATASET_DIR = os.getenv("AICROWD_DATASET_PATH", "/dataset")
TEST_DATA_PATH = os.path.join(DATASET_DIR, "test.csv")
DEBUG_RUN = os.getenv("AICROWD_DEBUG_RUN", "false") == "true"
@timeout_decorator.timeout(Timeouts.model_initialization)
def initialize_predictor():
return SubmissionConfig.predictor()
@timeout_decorator.timeout(Timeouts.predict_batch)
def predict_batch(predictor, batch_input):
return predictor.predict_batch(batch_input)
def append_results(results):
if Constants.DEBUG_RUN:
return
with open(Constants.PREDICTIONS_OUTPUT_PATH, "a") as fp:
for result in results:
fp.write(f"{result}\n")
def main():
predictor = initialize_predictor()
test_data = pd.read_csv(Constants.TEST_DATA_PATH)
for _, row in test_data.iterrows():
result = predict_batch(predictor, [row])
append_results(result)
if __name__ == "__main__":
main()
#!/bin/bash
python predict.py
import os import os
import time import time
import pandas as pd import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, log_loss from sklearn.metrics import f1_score, log_loss
class Constants: class Constants:
SHARED_DISK = os.getenv("AICROWD_SHARED_DISK", "test/shared") SHARED_DISK = os.getenv("AICROWD_SHARED_DIR", "test/shared")
PREDICTIONS_DIR = os.getenv("AICROWD_PREDICTIONS_DIR", "test/predictions") PREDICTIONS_DIR = os.getenv("AICROWD_PREDICTIONS_DIR", "test/predictions")
GROUND_TRUTH_DIR = os.getenv("AICROWD_GROUND_TRUTH_PATH", "test") GROUND_TRUTH_DIR = os.getenv("AICROWD_GROUND_TRUTH_DIR", "test")
PREDICTIONS_FILE_PATH = os.path.join(SHARED_DISK, "predictions.csv") PREDICTIONS_FILE_PATH = os.path.join(SHARED_DISK, "predictions.csv")
GROUND_TRUTH_PATH = os.path.join(GROUND_TRUTH_DIR, "test_ground_truth.csv") GROUND_TRUTH_PATH = os.path.join(GROUND_TRUTH_DIR, "test_ground_truth.csv")
...@@ -18,9 +19,6 @@ class AIcrowdEvaluator: ...@@ -18,9 +19,6 @@ class AIcrowdEvaluator:
pass pass
def evaluate(self): def evaluate(self):
while not os.path.exists("/tmp/let-go"):
time.sleep(10)
predictions = pd.read_csv(Constants.PREDICTIONS_FILE_PATH, header=None) predictions = pd.read_csv(Constants.PREDICTIONS_FILE_PATH, header=None)
ground_truth = pd.read_csv(Constants.GROUND_TRUTH_PATH, header=None) ground_truth = pd.read_csv(Constants.GROUND_TRUTH_PATH, header=None)
...@@ -33,7 +31,7 @@ class AIcrowdEvaluator: ...@@ -33,7 +31,7 @@ class AIcrowdEvaluator:
y_pred[index][val] = 1 y_pred[index][val] = 1
f1 = f1_score(ground_truth, submission, average="micro") f1 = f1_score(ground_truth, predictions, average="micro")
log_loss_score = log_loss(ground_truth, y_pred, labels=labels) log_loss_score = log_loss(ground_truth, y_pred, labels=labels)
return { return {
......
...@@ -2,3 +2,5 @@ numpy ...@@ -2,3 +2,5 @@ numpy
pandas pandas
minio minio
loguru loguru
sklearn
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment