diff --git a/flatland/evaluators/service.py b/flatland/evaluators/service.py
index d658dc9b8db00ec435b64776d9060675869c3846..f4d92468f3eb89cf27db1136a1ddf56e73440fe1 100644
--- a/flatland/evaluators/service.py
+++ b/flatland/evaluators/service.py
@@ -150,6 +150,7 @@ class FlatlandRemoteEvaluationService:
         self.env = False
         self.env_renderer = False
         self.reward = 0
+        self.simulation_done = True
         self.simulation_count = -1
         self.simulation_env_file_paths = []
         self.simulation_rewards = []
@@ -486,7 +487,14 @@ class FlatlandRemoteEvaluationService:
         Handles a ENV_CREATE command from the client
         TODO: Add a high level summary of everything thats happening here.
         """
+        if not self.simulation_done:
+            # trying to reset a simulation before finishing the previous one
+            _command_response = self._error_template("CAN'T CREATE NEW ENV BEFORE PREVIOUS IS DONE")
+            self.send_response(_command_response, command)
+            raise Exception(_command_response['payload'])
+
         self.simulation_count += 1
+        self.simulation_done = False
         if self.simulation_count < len(self.env_file_paths):
             """
             There are still test envs left that are yet to be evaluated 
@@ -624,6 +632,8 @@ class FlatlandRemoteEvaluationService:
             )
 
         if done["__all__"]:
+            self.simulation_done = True
+
             # Compute percentage complete
             complete = 0
             for i_agent in range(self.env.get_num_agents()):
@@ -633,6 +643,12 @@ class FlatlandRemoteEvaluationService:
             percentage_complete = complete * 1.0 / self.env.get_num_agents()
             self.simulation_percentage_complete[-1] = percentage_complete
 
+            print("Evaluation finished in {} timesteps. Percentage agents done: {:.3f}. Normalized reward: {:.3f}.".format(
+                self.simulation_steps[-1],
+                self.simulation_percentage_complete[-1],
+                self.simulation_rewards_normalized[-1]
+            ))
+
         # Record Frame
         if self.visualize:
             """