diff --git a/examples/flatland_2_0_example.py b/examples/flatland_2_0_example.py index f8a613779c2afcb7955184b80c2098bf76e64a38..a1ad9a85d9a41f57c317a0b4b0bc61796e4e0f4a 100644 --- a/examples/flatland_2_0_example.py +++ b/examples/flatland_2_0_example.py @@ -1,5 +1,3 @@ -import time - import numpy as np from flatland.envs.observations import TreeObsForRailEnv @@ -114,7 +112,6 @@ for step in range(500): next_obs, all_rewards, done, _ = env.step(action_dict) env_renderer.render_env(show=True, show_observations=False, show_predictions=False) frame_step += 1 - time.sleep(10.1) # Update replay buffer and train agent for a in range(env.get_num_agents()): agent.step((obs[a], action_dict[a], all_rewards[a], next_obs[a], done[a]))