diff --git a/flatland/envs/rail_env.py b/flatland/envs/rail_env.py
index 3fe1edbbda88f9b9f5672ffd54de6446e7d1f054..17e8827fdc6e14646e72493930ec4dc650d8dc8f 100644
--- a/flatland/envs/rail_env.py
+++ b/flatland/envs/rail_env.py
@@ -189,7 +189,7 @@ class RailEnv(Environment):
             self.rewards_dict[iAgent] = 0
 
         if self.dones["__all__"]:
-            self.rewards_dict = [r + global_reward for r in self.rewards_dict]
+            self.rewards_dict = {i:r + global_reward for i,r in self.rewards_dict.items()}
             return self._get_observations(), self.rewards_dict, self.dones, {}
 
         # for i in range(len(self.agents_handles)):
@@ -297,7 +297,7 @@ class RailEnv(Environment):
         # Check for end of episode + add global reward to all rewards!
         if np.all([np.array_equal(agent2.position, agent2.target) for agent2 in self.agents]):
             self.dones["__all__"] = True
-            self.rewards_dict = [0 * r + global_reward for r in self.rewards_dict]
+            self.rewards_dict = {i:0 * r + global_reward for i,r in self.rewards_dict.items()}
 
         return self._get_observations(), self.rewards_dict, self.dones, {}