diff --git a/README.rst b/README.rst
index 610d202035af31e7b1faa2db25d0772dc252783b..1559be647c840b90c149a59d9ec612fa79ba0704 100644
--- a/README.rst
+++ b/README.rst
@@ -54,6 +54,12 @@ Quick Start
     $ conda install -c anaconda tk  
     $ pip install flatland-rl
 
+* Test that the installation works
+
+.. code-block:: console
+
+    $ flatland-demo
+
 
 Basic Usage
 ============
diff --git a/docs/conf.py b/docs/conf.py
index 8223e1c7238176cccaaa116c0776bf80438750c7..2ec79aa8f3288861d82ce2b2f20d4cd1329d323c 100755
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -33,7 +33,7 @@ sys.path.insert(0, os.path.abspath('..'))
 
 # Add any Sphinx extension module names here, as strings. They can be
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx']
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
diff --git a/docs/flatland.evaluators.rst b/docs/flatland.evaluators.rst
new file mode 100644
index 0000000000000000000000000000000000000000..022e8586b7933af4f7eac78a01168c27639c65c2
--- /dev/null
+++ b/docs/flatland.evaluators.rst
@@ -0,0 +1,46 @@
+flatland.evaluators package
+===========================
+
+Submodules
+----------
+
+flatland.evaluators.aicrowd\_helpers module
+-------------------------------------------
+
+.. automodule:: flatland.evaluators.aicrowd_helpers
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+flatland.evaluators.client module
+---------------------------------
+
+.. automodule:: flatland.evaluators.client
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+flatland.evaluators.messages module
+-----------------------------------
+
+.. automodule:: flatland.evaluators.messages
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+flatland.evaluators.service module
+----------------------------------
+
+.. automodule:: flatland.evaluators.service
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: flatland.evaluators
+    :members:
+    :undoc-members:
+    :show-inheritance:
diff --git a/docs/flatland.rst b/docs/flatland.rst
index fce09a2c0f1f2b442b39d6e51f160fce7981b757..88e8ec93fd4f6c89c1f6e20c55defbddaa9b28fa 100644
--- a/docs/flatland.rst
+++ b/docs/flatland.rst
@@ -8,6 +8,7 @@ Subpackages
 
     flatland.core
     flatland.envs
+    flatland.evaluators
     flatland.utils
 
 Submodules
diff --git a/docs/index.rst b/docs/index.rst
index 47201ffd35914052e3fde0a72a5d393f33c419e1..08158b557b337cd30515fc34eb9b0a58ecfe4684 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -9,6 +9,7 @@ Welcome to flatland's documentation!
    installation
    about_flatland
    gettingstarted
+   localevaluation
    modules
    FAQ
    contributing
diff --git a/docs/localevaluation.rst b/docs/localevaluation.rst
new file mode 100644
index 0000000000000000000000000000000000000000..abfab39aaa33e836981d8e890bd1a9070d3dcae0
--- /dev/null
+++ b/docs/localevaluation.rst
@@ -0,0 +1,66 @@
+=====
+Local Evaluation
+=====
+
+This document explains you how to locally evaluate your submissions before making 
+an official submission to the competition.
+
+Requirements
+--------------
+
+* **flatland-rl** : We expect that you have `flatland-rl` installed by following the instructions in  :doc:`installation`.
+
+* **redis** : Additionally you will also need to have  `redis installed <https://redis.io/topics/quickstart>`_ and **should have it running in the background.**
+
+Test Data
+--------------
+
+* **test env data** : You can `download and untar the test-env-data <https://www.aicrowd.com/challenges/flatland-challenge/dataset_files>`_, 
+at a location of your choice, lets say `/path/to/test-env-data/`. After untarring the folder, the folder structure should look something like : 
+
+
+.. code-block:: console
+
+    .
+    └── test-env-data
+        ├── Test_0
+        │   ├── Level_0.pkl
+        │   └── Level_1.pkl
+        ├── Test_1
+        │   ├── Level_0.pkl
+        │   └── Level_1.pkl
+        ├..................
+        ├..................
+        ├── Test_8
+        │   ├── Level_0.pkl
+        │   └── Level_1.pkl
+        └── Test_9
+            ├── Level_0.pkl
+            └── Level_1.pkl
+
+Evaluation Service
+------------------
+
+* **start evaluation service** : Then you can start the evaluator by running : 
+
+.. code-block:: console
+
+    flatland-evaluator --tests /path/to/test-env-data/
+
+RemoteClient
+------------------
+
+* **run client** : Some `sample submission code can be found in the starter-kit <https://github.com/AIcrowd/flatland-challenge-starter-kit/>`_, 
+but before you can run your code locally using `FlatlandRemoteClient`, you will have to set the `AICROWD_TESTS_FOLDER` environment variable to the location where you 
+previous untarred the folder with `the test-env-data`:
+
+.. code-block:: console
+
+    export AICROWD_TESTS_FOLDER="/path/to/test-env-data/"
+
+    # or on Windows :
+    # 
+    # set AICROWD_TESTS_FOLDER "\path\to\test-env-data\"
+
+    # and then finally run your code
+    python run.py
diff --git a/flatland/cli.py b/flatland/cli.py
index f4d4317676d2bf3de0e2f6fe522d5a6e106741f0..32e8d9dc786b0412795694fc985c90aa55fc2e91 100644
--- a/flatland/cli.py
+++ b/flatland/cli.py
@@ -2,18 +2,84 @@
 
 """Console script for flatland."""
 import sys
-
 import click
+import numpy as np
+import time
+from flatland.envs.generators import complex_rail_generator
+from flatland.envs.rail_env import RailEnv
+from flatland.utils.rendertools import RenderTool
+from flatland.evaluators.service import FlatlandRemoteEvaluationService
+import redis
 
 
 @click.command()
-def main(args=None):
-    """Console script for flatland."""
-    click.echo("Replace this message by putting your code into "
-               "flatland.cli.main")
-    click.echo("See click documentation at http://click.pocoo.org/")
+def demo(args=None):
+    """Demo script to check installation"""
+    env = RailEnv(
+            width=15,
+            height=15,
+            rail_generator=complex_rail_generator(
+                                    nr_start_goal=10,
+                                    nr_extra=1,
+                                    min_dist=8,
+                                    max_dist=99999),
+            number_of_agents=5)
+    
+    env._max_episode_steps = int(15 * (env.width + env.height))
+    env_renderer = RenderTool(env)
+
+    while True:
+        obs = env.reset()
+        _done = False
+        # Run a single episode here
+        step = 0
+        while not _done:
+            # Compute Action
+            _action = {}
+            for _idx, _ in enumerate(env.agents):
+                _action[_idx] = np.random.randint(0, 5)
+            obs, all_rewards, done, _ = env.step(_action)
+            _done = done['__all__']
+            step += 1
+            env_renderer.render_env(
+                show=True,
+                frames=False,
+                show_observations=False,
+                show_predictions=False
+            )
+            time.sleep(0.3)
     return 0
 
 
+@click.command()
+@click.option('--tests', 
+              type=click.Path(exists=True),
+              help="Path to folder containing Flatland tests",
+              required=True
+              )
+@click.option('--service_id', 
+              default="FLATLAND_RL_SERVICE_ID",
+              help="Evaluation Service ID. This has to match the service id on the client.",
+              required=False
+              )
+def evaluator(tests, service_id):
+    try:
+        redis_connection = redis.Redis()
+        redis_connection.ping()
+    except redis.exceptions.ConnectionError as e:
+        raise Exception(
+            "\nRedis server does not seem to be running on your localhost.\n"
+            "Please ensure that you have a redis server running on your localhost"
+            )
+    
+    grader = FlatlandRemoteEvaluationService(
+                test_env_folder=tests,
+                flatland_rl_service_id=service_id,
+                visualize=False,
+                verbose=False
+                )
+    grader.run()
+
+
 if __name__ == "__main__":
-    sys.exit(main())  # pragma: no cover
+    sys.exit(demo())  # pragma: no cover
diff --git a/flatland/evaluators/client.py b/flatland/evaluators/client.py
index f74af355d8e6233d7569d9a786068586bbf6a089..a4968c0c8e827c060e0e3f7de0cf28cc0658089b 100644
--- a/flatland/evaluators/client.py
+++ b/flatland/evaluators/client.py
@@ -46,6 +46,7 @@ class FlatlandRemoteClient(object):
                 remote_port=6379,
                 remote_db=0,
                 remote_password=None,
+                test_envs_root=None,
                 verbose=False):
 
         self.remote_host = remote_host
@@ -58,14 +59,22 @@ class FlatlandRemoteClient(object):
                                 db=remote_db,
                                 password=remote_password)
         self.namespace = "flatland-rl"
-        try:
-            self.service_id = os.environ['FLATLAND_RL_SERVICE_ID']
-        except KeyError:
-            self.service_id = "FLATLAND_RL_SERVICE_ID"
+        self.service_id = os.getenv(
+                            'FLATLAND_RL_SERVICE_ID',
+                            'FLATLAND_RL_SERVICE_ID'
+                            )
         self.command_channel = "{}::{}::commands".format(
                                     self.namespace,
                                     self.service_id
                                 )
+        if test_envs_root:
+            self.test_envs_root = test_envs_root
+        else:
+            self.test_envs_root = os.getenv(
+                                'AICROWD_TESTS_FOLDER',
+                                '/tmp/flatland_envs'
+                                )
+
         self.verbose = verbose
 
         self.env = None
@@ -161,6 +170,19 @@ class FlatlandRemoteClient(object):
             return observation
 
         test_env_file_path = _response['payload']['env_file_path']
+        print("Received Env : ", test_env_file_path)
+        test_env_file_path = os.path.join(
+            self.test_envs_root,
+            test_env_file_path
+        )
+        if not os.path.exists(test_env_file_path):
+            raise Exception(
+                "\nWe cannot seem to find the env file paths at the required location.\n"
+                "Did you remember to set the AICROWD_TESTS_FOLDER environment variable "
+                "to point to the location of the Tests folder ? \n"
+                "We are currently looking at `{}` for the tests".format(self.test_envs_root)
+                )
+        print("Current env path : ", test_env_file_path)
         self.env = RailEnv(
             width=1,
             height=1,
@@ -192,11 +214,15 @@ class FlatlandRemoteClient(object):
         remote_info = _payload['info']
 
         # Replicate the action in the local env
-        local_observation, local_rewards, local_done, local_info = \
+        local_observation, local_reward, local_done, local_info = \
             self.env.step(action)
         
-        assert are_dicts_equal(remote_reward, local_rewards)
-        assert are_dicts_equal(remote_done, local_done)
+        print(local_reward)
+        if not are_dicts_equal(remote_reward, local_reward):
+            raise Exception("local and remote `reward` are diverging")
+            print(remote_reward, local_reward)
+        if not are_dicts_equal(remote_done, local_done):
+            raise Exception("local and remote `done` are diverging")
         
         # Return local_observation instead of remote_observation
         # as the remote_observation is build using a dummy observation
diff --git a/flatland/evaluators/service.py b/flatland/evaluators/service.py
index b03f9692a7123b5dd74fba81072cf5fd240b5363..201a3978e5dcebb01592f97116f9cdd7d8387646 100644
--- a/flatland/evaluators/service.py
+++ b/flatland/evaluators/service.py
@@ -11,6 +11,7 @@ import numpy as np
 import msgpack
 import msgpack_numpy as m
 import os
+import glob
 import shutil
 import timeout_decorator
 import time
@@ -60,9 +61,7 @@ class FlatlandRemoteEvaluationService:
         # Test Env folder Paths
         self.test_env_folder = test_env_folder
         self.video_generation_envs = video_generation_envs
-        self.video_generation_indices = []
         self.env_file_paths = self.get_env_filepaths()
-        print(self.video_generation_indices)
 
         # Logging and Reporting related vars
         self.verbose = verbose
@@ -100,7 +99,7 @@ class FlatlandRemoteEvaluationService:
         self.env = False
         self.env_renderer = False
         self.reward = 0
-        self.simulation_count = 0
+        self.simulation_count = -1
         self.simulation_rewards = []
         self.simulation_percentage_complete = []
         self.simulation_steps = []
@@ -136,29 +135,21 @@ class FlatlandRemoteEvaluationService:
             ├── .......
             ├── .......
             └── Level_99.pkl 
-        """
-        env_paths = []
-        folder_path = self.test_env_folder
-        for root, dirs, files in os.walk(folder_path):
-            for file in files:
-                if file.endswith(".pkl"):
-                    env_paths.append(
-                        os.path.join(root, file)
-                        )
-        env_paths = sorted(env_paths)
-        for _idx, env_path in enumerate(env_paths):
-            """
-            Here we collect the indices of the environments for which
-            we need to generate the videos
-            
-            We increment the simulation count on env_create
-            so the 1st simulation has an index of 1, when comparing in 
-            env_step 
-            """
-            for vg_env in self.video_generation_envs:
-                if vg_env in env_path:
-                    self.video_generation_indices.append(_idx+1)
-        return sorted(env_paths)        
+        """            
+        env_paths = sorted(glob.glob(
+            os.path.join(
+                self.test_env_folder,
+                "*/*.pkl"
+            )
+        ))
+        # Remove the root folder name from the individual 
+        # lists, so that we only have the path relative 
+        # to the test root folder
+        env_paths = sorted([os.path.relpath(
+            x, self.test_env_folder
+        ) for x in env_paths])
+
+        return env_paths
 
     def instantiate_redis_connection_pool(self):
         """
@@ -278,13 +269,18 @@ class FlatlandRemoteEvaluationService:
             Add a high level summary of everything thats 
             hapenning here.
         """
-        
+        self.simulation_count += 1
         if self.simulation_count < len(self.env_file_paths):
             """
             There are still test envs left that are yet to be evaluated 
             """
 
             test_env_file_path = self.env_file_paths[self.simulation_count]
+            print("Evaluating : {}".format(test_env_file_path))
+            test_env_file_path = os.path.join(
+                self.test_env_folder,
+                test_env_file_path
+            )
             del self.env
             self.env = RailEnv(
                 width=1,
@@ -294,15 +290,13 @@ class FlatlandRemoteEvaluationService:
             )
             if self.visualize:
                 if self.env_renderer:
-                    del self.env_renderer                
+                    del self.env_renderer     
                 self.env_renderer = RenderTool(self.env, gl="PILSVG", )
             
             # Set max episode steps allowed
             self.env._max_episode_steps = \
                 int(1.5 * (self.env.width + self.env.height))
 
-            self.simulation_count += 1
-
             if self.begin_simulation:
                 # If begin simulation has already been initialized 
                 # atleast once
@@ -321,7 +315,7 @@ class FlatlandRemoteEvaluationService:
             _command_response['type'] = messages.FLATLAND_RL.ENV_CREATE_RESPONSE
             _command_response['payload'] = {}
             _command_response['payload']['observation'] = _observation
-            _command_response['payload']['env_file_path'] = test_env_file_path
+            _command_response['payload']['env_file_path'] = self.env_file_paths[self.simulation_count]
         else:
             """
             All test env evaluations are complete
@@ -384,12 +378,17 @@ class FlatlandRemoteEvaluationService:
         
         # Record Frame
         if self.visualize:
-            self.env_renderer.render_env(show=False, show_observations=False, show_predictions=False)
+            self.env_renderer.render_env(
+                                show=False, 
+                                show_observations=False, 
+                                show_predictions=False
+                                )
             """
             Only save the frames for environments which are separately provided 
             in video_generation_indices param
             """
-            if self.simulation_count in self.video_generation_indices:        
+            current_env_path = self.env_file_paths[self.simulation_count]
+            if current_env_path in self.video_generation_envs:
                 self.env_renderer.gl.save_image(
                         os.path.join(
                             self.vizualization_folder_name,
@@ -474,6 +473,13 @@ class FlatlandRemoteEvaluationService:
         self.evaluation_state["score"]["score"] = mean_percentage_complete
         self.evaluation_state["score"]["score_secondary"] = mean_reward
         self.handle_aicrowd_success_event(self.evaluation_state)
+        print("#"*100)
+        print("EVALUATION COMPLETE !!")
+        print("#"*100)
+        print("# Mean Reward : {}".format(mean_reward))
+        print("# Mean Percentage Complete : {}".format(mean_percentage_complete))
+        print("#"*100)
+        print("#"*100)
 
     def report_error(self, error_message, command_response_channel):
         """
@@ -517,7 +523,7 @@ class FlatlandRemoteEvaluationService:
         Main runner function which waits for commands from the client
         and acts accordingly.
         """
-        print("Listening for commands at : ", self.command_channel)
+        print("Listening at : ", self.command_channel)
         while True:
             command = self.get_next_command()
 
@@ -603,7 +609,6 @@ if __name__ == "__main__":
     result = grader.run()
     if result['type'] == messages.FLATLAND_RL.ENV_SUBMIT_RESPONSE:
         cumulative_results = result['payload']
-        print("Results : ", cumulative_results)
     elif result['type'] == messages.FLATLAND_RL.ERROR:
         error = result['payload']
         raise Exception("Evaluation Failed : {}".format(str(error)))
diff --git a/requirements_dev.txt b/requirements_dev.txt
index 3beb1cfb6ebf7ef3606d512c3ff043f98bf4967d..619e276988353631bc098d9b24af56d03ade3545 100644
--- a/requirements_dev.txt
+++ b/requirements_dev.txt
@@ -3,6 +3,7 @@ tox>=3.5.2
 twine>=1.12.1
 pytest>=3.8.2
 pytest-runner>=4.2
+Click>=7.0
 crowdai-api>=0.1.21
 boto3>=1.9.194
 numpy>=1.16.2
diff --git a/setup.py b/setup.py
index 22ee05ddb49972e21dda7e8df45ee02dcebae768..131cc983d228abffa22f7e4415edbce0ced19668 100644
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@ def get_all_svg_files(directory='./svg/'):
     ret = []
     for dirpath, subdirs, files in os.walk(directory):
         for f in files:
-            ret.append(os.path.join(dirpath,f))
+            ret.append(os.path.join(dirpath, f))
     return ret
 
 
@@ -24,7 +24,7 @@ def get_all_images_files(directory='./images/'):
     ret = []
     for dirpath, subdirs, files in os.walk(directory):
         for f in files:
-            ret.append(os.path.join(dirpath,f))
+            ret.append(os.path.join(dirpath, f))
     return ret
 
 
@@ -32,7 +32,7 @@ def get_all_notebook_files(directory='./notebooks/'):
     ret = []
     for dirpath, subdirs, files in os.walk(directory):
         for f in files:
-            ret.append(os.path.join(dirpath,f))
+            ret.append(os.path.join(dirpath, f))
     return ret
 
 
@@ -62,7 +62,8 @@ setup(
     description="Multi Agent Reinforcement Learning on Trains",
     entry_points={
         'console_scripts': [
-            'flatland=flatland.cli:main',
+            'flatland-demo=flatland.cli:demo',
+            'flatland-evaluator=flatland.cli:evaluator'
         ],
     },
     install_requires=requirements,