diff --git a/examples/training_navigation.py b/examples/training_navigation.py
index 60dc1adbf473382aad4ed0dde3ab9a13e64e7785..975d33fb3139ebc2040b941dbe73d8aeb3b225eb 100644
--- a/examples/training_navigation.py
+++ b/examples/training_navigation.py
@@ -54,7 +54,8 @@ for trials in range(1, n_trials + 1):
 
     # Run episode
     for step in range(100):
-        #env_renderer.renderEnv(show=True)
+        if trials >= 114:
+            env_renderer.renderEnv(show=True)
 
         # Action
         for a in range(env.number_of_agents):
diff --git a/flatland/envs/rail_env.py b/flatland/envs/rail_env.py
index 22c072a838085d40f045a496b1aa6cc8aa778fc3..36040d5cf66f1adc74faaade7e45acbd711676e0 100644
--- a/flatland/envs/rail_env.py
+++ b/flatland/envs/rail_env.py
@@ -547,7 +547,7 @@ class RailEnv(Environment):
         global_reward = 1 * beta
 
         # Reset the step rewards
-        self.rewards_dict = {}
+        self.rewards_dict = dict()
         for handle in self.agents_handles:
             self.rewards_dict[handle] = 0