diff --git a/flatland/envs/distance_map.py b/flatland/envs/distance_map.py
index c6e73b0bdbe752b8d5df9c4a0697bb621e5276ec..2bc1a5117794959cca82d2edad821cb629397f78 100644
--- a/flatland/envs/distance_map.py
+++ b/flatland/envs/distance_map.py
@@ -55,7 +55,6 @@ class DistanceMap:
         self.env_width = rail.width
 
     def _compute(self, agents: List[EnvAgent], rail: GridTransitionMap):
-        print("computing distance map")
         self.agents_previous_computation = self.agents
         self.distance_map = np.inf * np.ones(shape=(len(agents),
                                                     self.env_height,
diff --git a/flatland/envs/rail_env.py b/flatland/envs/rail_env.py
index 2543fc9180298d2b2b6be07a8a82d7730218b037..cda0dee972395be0aa5e29e6695337e40f0fdc52 100644
--- a/flatland/envs/rail_env.py
+++ b/flatland/envs/rail_env.py
@@ -459,6 +459,7 @@ class RailEnv(Environment):
                 agent.status = RailAgentStatus.ACTIVE
                 agent.position = agent.initial_position
                 self.rewards_dict[i_agent] += self.step_penalty * agent.speed_data['speed']
+                print(self.rewards_dict[i_agent])
                 return
             else:
                 # TODO: Here we need to check for the departure time in future releases with full schedules
diff --git a/tests/test_flaltland_rail_agent_status.py b/tests/test_flaltland_rail_agent_status.py
index ec7635ba05c7d7aec3964f9964d1341baef8e96a..099ccce6462f21217df0bd03abacc4716e52993b 100644
--- a/tests/test_flaltland_rail_agent_status.py
+++ b/tests/test_flaltland_rail_agent_status.py
@@ -50,7 +50,7 @@ def test_initial_status():
                 reward=env.step_penalty * 0.5,  # running at speed 0.5
             ),
             Replay(
-                position=(3, 8),
+                position=(3, 9),
                 direction=Grid4TransitionsEnum.WEST,
                 status=RailAgentStatus.ACTIVE,
                 action=RailEnvActions.MOVE_FORWARD,
@@ -64,7 +64,7 @@ def test_initial_status():
                 reward=env.step_penalty * 0.5,  # running at speed 0.5
             ),
             Replay(
-                position=(3, 7),
+                position=(3, 8),
                 direction=Grid4TransitionsEnum.WEST,
                 status=RailAgentStatus.ACTIVE,
                 action=RailEnvActions.MOVE_FORWARD,
@@ -79,7 +79,7 @@ def test_initial_status():
                 status=RailAgentStatus.ACTIVE
             ),
             Replay(
-                position=(3, 6),
+                position=(3, 7),
                 direction=Grid4TransitionsEnum.WEST,
                 action=RailEnvActions.MOVE_RIGHT,
                 reward=env.step_penalty * 0.5,  # wrong action is corrected to forward without penalty!
@@ -93,7 +93,7 @@ def test_initial_status():
                 status=RailAgentStatus.ACTIVE
             ),
             Replay(
-                position=(3, 5),
+                position=(3, 6),
                 direction=Grid4TransitionsEnum.WEST,
                 action=None,
                 reward=env.global_reward,  # already done