diff --git a/changelog.md b/changelog.md
index 1099488b3c03d3045c0e5cfdc8f661a97a9ff365..faca0cb8cdd822c47ee9db529a8339eca2d21bab 100644
--- a/changelog.md
+++ b/changelog.md
@@ -9,10 +9,10 @@ Changes since Flatland 2.0.0
 
 ### Changes in rail generator and `RailEnv`
 - renaming of `distance_maps` into `distance_map`
-- by default the reset method of RailEnv is not called in the constructor of RailEnv anymore. Therefore the reset method needs to be called after the creation of a RailEnv object
+- by default the reset method of RailEnv is not called in the constructor of RailEnv anymore (compliance for OpenAI Gym). Therefore the reset method needs to be called after the creation of a RailEnv object
 
 ### Changes in schedule generation
-- return value of schedule generator has changed to the named tuple `Schedule`
+- return value of schedule generator has changed to the named tuple `Schedule`. From the point of view of a consumer, nothing has changed, this is just a type hint which is introduced where the attributes of `Schedule` have names.
 
 Changes since Flatland 1.0.0
 --------------------------
diff --git a/docs/specifications/railway.md b/docs/specifications/railway.md
index 3623ea450f74c5075bf0b1b9500cbddb6e406cb3..e1ee77f498cf9f030830aad048c0742acb2d9da1 100644
--- a/docs/specifications/railway.md
+++ b/docs/specifications/railway.md
@@ -711,3 +711,5 @@ RailEnv._max_episode_steps = timedelay_factor * alpha * (env.width + env.height
 ```
 
 where the following default values are used `timedelay_factor=4`, `alpha=2` and `ratio_nr_agents_to_nr_cities=20`
+
+If participants want to use their own formula they have to overwrite the method `compute_max_episode_steps()` from the class `RailEnv`
diff --git a/flatland/envs/rail_env.py b/flatland/envs/rail_env.py
index bd1920acc149cc0b991589530ed266617597d285..1dc24affd461ce585bceb6083df4feffe74c9b46 100644
--- a/flatland/envs/rail_env.py
+++ b/flatland/envs/rail_env.py
@@ -250,8 +250,7 @@ class RailEnv(Environment):
         self.agents = EnvAgent.list_from_static(self.agents_static)
 
     @staticmethod
-    def compute_max_episode_steps(width: int, height: int, timedelay_factor: int = 4, alpha: int = 2,
-                                  ratio_nr_agents_to_nr_cities: float = 20.0) -> int:
+    def compute_max_episode_steps(width: int, height: int, ratio_nr_agents_to_nr_cities: float = 20.0) -> int:
         """
         compute_max_episode_steps(width, height, ratio_nr_agents_to_nr_cities, timedelay_factor, alpha)
 
@@ -265,10 +264,6 @@ class RailEnv(Environment):
             height of environment
         ratio_nr_agents_to_nr_cities : float, optional
             number_of_agents/number_of_cities
-        timedelay_factor : int, optional
-            timedelay_factor
-        alpha : int, optional
-            alpha
 
         Returns
         -------
@@ -276,6 +271,8 @@ class RailEnv(Environment):
             maximum number of episode steps
 
         """
+        timedelay_factor = 4
+        alpha = 2
         return int(timedelay_factor * alpha * (width + height + ratio_nr_agents_to_nr_cities))
 
     def reset(self, regen_rail=True, replace_agents=True, activate_agents=False, random_seed=None):