diff --git a/tests/test_multi_speed.py b/tests/test_multi_speed.py index 5918c24eb413e57a6b5d9ddb4cd3ff1f461e5c29..561057d81b431dfbb87b904f7a57e6fcbf84f84e 100644 --- a/tests/test_multi_speed.py +++ b/tests/test_multi_speed.py @@ -8,6 +8,7 @@ from flatland.envs.rail_generators import sparse_rail_generator, rail_from_grid_ from flatland.envs.line_generators import sparse_line_generator from flatland.utils.simple_rail import make_simple_rail from test_utils import ReplayConfig, Replay, run_replay_config, set_penalties_for_replay +from flatland.envs.agent_utils import RailAgentStatus # Use the sparse_rail_generator to generate feasible network configurations with corresponding tasks @@ -49,7 +50,7 @@ class RandomAgent: def test_multi_speed_init(): env = RailEnv(width=50, height=50, rail_generator=sparse_rail_generator(seed=2), line_generator=sparse_line_generator(), - random_seed=2, + random_seed=3, number_of_agents=3) # Initialize the agent with the parameters corresponding to the environment and observation_builder @@ -62,13 +63,17 @@ def test_multi_speed_init(): # Reset environment and get initial observations for all agents env.reset(False, False) + for a_idx in range(len(env.agents)): + env.agents[a_idx].position = env.agents[a_idx].initial_position + env.agents[a_idx].status = RailAgentStatus.ACTIVE + # Here you can also further enhance the provided observation by means of normalization # See training navigation example in the baseline repository old_pos = [] for i_agent in range(env.get_num_agents()): - env.agents[i_agent].speed_data['speed'] = 1. / (i_agent + 2) + env.agents[i_agent].speed_data['speed'] = 1. / (i_agent + 1) old_pos.append(env.agents[i_agent].position) - + print(env.agents[i_agent].position) # Run episode for step in range(100):