diff --git a/docker_run.sh b/docker_run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..94c2d2a89b66d5fa570c6f502cb2f98b43112971
--- /dev/null
+++ b/docker_run.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+# This script builds a Docker image from the current directory
+# and runs a container from this image, executing local_evaluation.py
+# with the current directory mounted at /submission inside the container.
+
+# Step 1: Define the name of the Docker image.
+LAST_COMMIT_HASH=$(git rev-parse --short HEAD)
+IMAGE_NAME="aicrowd/meta-kddcup24-crag-submission:${LAST_COMMIT_HASH}"
+
+
+# Check if OPENAI_API_KEY is set
+if [ -z "$OPENAI_API_KEY" ]; then
+    echo "Error: OPENAI_API_KEY is not set."
+    echo "Please set the OPENAI_API_KEY environment variable and try again."
+    exit 1
+fi
+# Check if OPENAI_API_KEY is set
+if [ -z "$EVALUATION_MODEL_NAME" ]; then
+    echo "Warning: EVALUATION_MODEL_NAME is not set."
+    echo "Using the default model as: gpt-4-0125-preview"
+fi
+
+
+# Step 2: Build the Docker image.
+# The '.' at the end specifies that the Docker context is the current directory.
+# This means Docker will look for a Dockerfile in the current directory to build the image.
+START_TIME=$(date +%s)
+DOCKER_BUILDKIT=1 docker build -t $IMAGE_NAME .
+BUILD_STATUS=$?
+if [ $BUILD_STATUS -ne 0 ]; then
+    echo "Docker build failed. Exiting..."
+    exit $BUILD_STATUS
+fi
+END_TIME=$(date +%s)
+BUILD_TIME=$((END_TIME - START_TIME))
+echo "Total build time: $BUILD_TIME seconds"
+
+# Step 3: Run the Docker container.
+# -v "$(pwd)":/submission mounts the current directory ($(pwd) outputs the current directory path)
+# to /submission inside the container. This way, the container can access the contents
+# of the current directory as if they were located at /submission inside the container.
+# 'python /submission/local_evaluation.py' is the command executed inside the container.
+# the -w sets the workind directory to /submission.
+# It then local_evaluation.py using software runtime set up in the Dockerfile.
+docker run \
+    --gpus all \
+    -v "$(pwd)":/submission \
+    -w /submission \
+    -e OPENAI_API_KEY=$OPENAI_API_KEY \
+    $IMAGE_NAME python local_evaluation.py
+
+# Note: We assume you have nvidia-container-toolkit installed and configured 
+# to use the --gpus all flag. If you are not using GPUs, you can remove this flag.
+
+
+# Note 1: Please refer to the Dockerfile to understand how the software runtime is set up.
+# The Dockerfile should include all necessary commands to install Python, the necessary
+# dependencies, and any other software required to run local_evaluation.py.
+
+# Note 2: Note the .dockerignore file in the root of this directory.
+# In the .dockerignore file, specify any files or directories that should not be included
+# in the Docker context. This typically includes large files, models, or datasets that
+# are not necessary for building the Docker image. Excluding these can significantly
+# speed up the build process by reducing the size of the build context sent to the Docker daemon.
+
+# Ensure your Dockerfile and .dockerignore are properly set up before running this script.
diff --git a/docs/runtime.md b/docs/runtime.md
index 60dd631ecf991cbbc4736631fc974c3295c54967..cbed6ac2158e151895063398e6a84435e7c1d8e2 100644
--- a/docs/runtime.md
+++ b/docs/runtime.md
@@ -25,4 +25,4 @@ These files are used to construct your **AIcrowd submission docker containers**
 
 ----
 
-To test your image builds locally, you can use [repo2docker](https://github.com/jupyterhub/repo2docker)
+To test your image builds locally, you can use the included [docker_run.sh][../docker_run.sh] script.
diff --git a/docs/submission.md b/docs/submission.md
index 3097c749cae74a5eb6e1bdf6bcecbacce9172eee..4dc03ab4aaa44c962a8b646f90f672b7fc83a88f 100644
--- a/docs/submission.md
+++ b/docs/submission.md
@@ -58,9 +58,114 @@ Specify the track by setting the appropriate `challenge_id` in your [aicrowd.jso
 
 | Track Name                        | Challenge ID                                        |
 |-----------------------------------|-----------------------------------------------------|
-| Retrieval Summarization   | `retrieval-summarization` |
-| Knowledge Graph and Web Retrieval      | `knowledge-graph-and-web-retrieval`    |
-| End-to-end Retrieval Augmented Generation           | `end-to-end-retrieval-augmented-generation`         |
+| Retrieval Summarization   | `meta-kdd-cup-24-crag-retrieval-summarization` |
+| Knowledge Graph and Web Retrieval      | `meta-kdd-cup-24-crag-knowledge-graph-and-web-retrieval`    |
+| End-to-end Retrieval Augmented Generation           | `meta-kdd-cup-24-crag-end-to-end-retrieval-augmented-generation`         |
+
+## Submission Entry Point
+
+The evaluation process will instantiate a model from `models/user_config.py` for evaluation. Ensure this configuration is set correctly.
+
+## Setting Up SSH Keys
+
+You will have to add your SSH Keys to your GitLab account by going to your profile settings [here](https://gitlab.aicrowd.com/profile/keys). If you do not have SSH Keys, you will first need to [generate one](https://docs.gitlab.com/ee/ssh/README.html#generating-a-new-ssh-key-pair).
+
+
+## Managing Large Model Files with Git LFS
+
+When preparing your submission, it's crucial to ensure all necessary models and files required by your inference code are properly saved and included. Due to the potentially large size of model weight files, we highly recommend using Git Large File Storage (Git LFS) to manage these files efficiently.
+
+### Why Use Git LFS?
+
+Git LFS is designed to handle large files more effectively than Git's default handling of large files. This ensures smoother operations and avoids common errors associated with large files, such as:
+
+- `fatal: the remote end hung up unexpectedly`
+- `remote: fatal: pack exceeds maximum allowed size`
+
+These errors typically occur when large files are directly checked into the Git repository without Git LFS, leading to challenges in handling and transferring those files.
+
+### Steps to Use Git LFS
+
+1. **Install Git LFS**: If you haven't already, install Git LFS on your machine. Detailed instructions can be found [here](https://git-lfs.github.com/).
+
+2. **Track Large Files**: Use Git LFS to track the large files within your project. You can do this by running `git lfs track "*.model"` (replace `*.model` with your file type).
+
+3. **Add and Commit**: After tracking the large files with Git LFS, add and commit them as you would with any other file. Git LFS will automatically handle these files differently to optimize their storage and transfer.
+
+4. **Push to Repository**: When you push your changes to the repository, Git LFS will manage the large files, ensuring a smooth push process.
+
+### Handling Previously Committed Large Files
+
+If you have already committed large files directly to your Git repository without using Git LFS, you may encounter issues. These files, even if not present in the current working directory, could still be in the Git history, leading to errors.
+
+To resolve this, ensure that the large files are removed from the Git history and then re-add and commit them using Git LFS. This process cleans up the repository's history and avoids the aforementioned errors.
+
+For more information on how to upload large files to your submission and detailed guidance on using Git LFS, please refer to [this detailed guide](https://discourse.aicrowd.com/t/how-to-upload-large-files-size-to-your-submission/2304).
+
+**Note**: Properly managing large files not only facilitates smoother operations for you but also ensures that the evaluation process can proceed without hindrances.
+
+# Guide to Making Your First Submission
+
+This document is designed to assist you in making your initial submission smoothly. Below, you'll find step-by-step instructions on specifying your software runtime and dependencies, structuring your code, and finally, submitting your project. Follow these guidelines to ensure a smooth submission process.
+
+# Table of Contents
+
+1. [Specifying Software Runtime and Dependencies](#specifying-software-runtime-and-dependencies)
+2. [Code Structure Guidelines](#code-structure-guidelines)
+3. [Submitting to Different Tracks](#submitting-to-different-tracks)
+4. [Submission Entry Point](#submission-entry-point)
+5. [Setting Up SSH Keys](#setting-up-ssh-keys)
+6. [Managing Large Model Files with Git LFS](#managing-large-model-files-with-git-lfs)
+    - [Why Use Git LFS?](#why-use-git-lfs)
+    - [Steps to Use Git LFS](#steps-to-use-git-lfs)
+    - [Handling Previously Committed Large Files](#handling-previously-committed-large-files)
+7. [How to Submit Your Code](#how-to-submit-your-code)
+
+
+## Specifying Software Runtime and Dependencies
+
+Our platform supports custom runtime environments. This means you have the flexibility to choose any libraries or frameworks necessary for your project. Here’s how you can specify your runtime and dependencies:
+
+- **`requirements.txt`**: List any PyPI packages your project needs.
+- **`apt.txt`**: Include any apt packages required.
+- **`Dockerfile`**: Optionally, you can provide your own Dockerfile. An example is located at `utilities/_Dockerfile`, which can serve as a helpful starting point.
+
+For detailed setup instructions regarding runtime dependencies, refer to the documentation in the `docs/runtime.md` file.
+
+## Code Structure Guidelines
+
+Your project should follow the structure outlined in the starter kit. Here’s a brief overview of what each component represents:
+
+```
+.
+├── README.md                       # Project documentation and setup instructions
+├── aicrowd.json                    # Submission meta information - like your username, track name
+├── data
+│   └── development.json            # Development dataset local testing
+├── docs
+│   └── runtime.md                  # Documentation on the runtime environment setup, dependency confifgs
+├── local_evaluation.py             # Use this to check your model evaluation flow locally
+├── metrics.py                      # Scripts to calculate evaluation metrics for your model's performance
+├── models
+│   ├── README.md                   # Documentation specific to the implementation of model interfaces
+│   ├── base_model.py               # Base model class 
+│   ├── dummy_model.py              # A simple or placeholder model for demonstration or testing
+│   └── user_config.py              # IMPORTANT: Configuration file to specify your model 
+├── requirements.txt                # Python packages to be installed for model development
+└── Dockerfile                 # Example Dockerfile for specifying runtime via Docker
+```
+
+Remember, **your submission metadata JSON (`aicrowd.json`)** is crucial for mapping your submission to the challenge. Ensure it contains the correct `challenge_id`, `authors`, and other necessary information. To utilize GPUs, set the `"gpu": true` flag in your `aicrowd.json`.
+
+## Submitting to Different Tracks
+
+Specify the track by setting the appropriate `challenge_id` in your [aicrowd.json](aicrowd.json). Here are the challenge IDs for various tracks:
+
+| Track Name                        | Challenge ID                                        |
+|-----------------------------------|-----------------------------------------------------|
+| Retrieval Summarization   | `meta-kdd-cup-24-crag-retrieval-summarization` |
+| Knowledge Graph and Web Retrieval      | `meta-kdd-cup-24-crag-knowledge-graph-and-web-retrieval`    |
+| End-to-end Retrieval Augmented Generation           | `meta-kdd-cup-24-crag-end-to-end-retrieval-augmented-generation`         |
 
 ## Submission Entry Point
 
diff --git a/local_evaluation.py b/local_evaluation.py
index 2f040284cae069eb8716621acfa75aff99348a34..f77980978e8522e691cb67043bd15436fb56b41e 100644
--- a/local_evaluation.py
+++ b/local_evaluation.py
@@ -3,7 +3,6 @@ import os
 from datetime import datetime
 
 from loguru import logger
-from models.user_config import UserModel
 from openai import APIConnectionError, OpenAI, RateLimitError
 from prompts.templates import IN_CONTEXT_EXAMPLES, INSTRUCTIONS
 from tqdm.auto import tqdm
@@ -146,6 +145,8 @@ def evaluate_predictions(predictions, evaluation_model_name, openai_client):
 
 
 if __name__ == "__main__":
+    from models.user_config import UserModel
+
     DATASET_PATH = "example_data/"
     EVALUATION_MODEL_NAME = os.getenv(
         "EVALUATION_MODEL_NAME", "gpt-4-0125-preview"
diff --git a/requirements.txt b/requirements.txt
index 39dab0fdd98d55da5ce06ddf1dacbdbda14b1372..41e536ab14609661fafdec1e1171f76ffd69cfc0 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,3 @@
 torch
-transformers
\ No newline at end of file
+transformers
+loguru
\ No newline at end of file