From 52d6b38b5ae55a9f583dc6dec6bbddcbf75d0bd8 Mon Sep 17 00:00:00 2001 From: "Egli Adrian (IT-SCI-API-PFI)" <adrian.egli@sbb.ch> Date: Tue, 3 Nov 2020 21:14:12 +0100 Subject: [PATCH] DeadLockAvoidanceAgent -> observation --- reinforcement_learning/multi_agent_training.py | 2 +- run.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/reinforcement_learning/multi_agent_training.py b/reinforcement_learning/multi_agent_training.py index fd85fdc..04c32db 100755 --- a/reinforcement_learning/multi_agent_training.py +++ b/reinforcement_learning/multi_agent_training.py @@ -475,7 +475,7 @@ def eval_policy(env, tree_observation, policy, train_params, obs_params): if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("-n", "--n_episodes", help="number of episodes to run", default=2500, type=int) - parser.add_argument("-t", "--training_env_config", help="training config id (eg 0 for Test_0)", default=0, type=int) + parser.add_argument("-t", "--training_env_config", help="training config id (eg 0 for Test_0)", default=2, type=int) parser.add_argument("-e", "--evaluation_env_config", help="evaluation config id (eg 0 for Test_0)", default=0, type=int) parser.add_argument("--n_evaluation_episodes", help="number of evaluation episodes", default=5, type=int) diff --git a/run.py b/run.py index 637bc79..fb16e53 100644 --- a/run.py +++ b/run.py @@ -25,7 +25,7 @@ from reinforcement_learning.dddqn_policy import DDDQNPolicy VERBOSE = True # Checkpoint to use (remember to push it!) -checkpoint = "./checkpoints/201103172118-0.pth" +checkpoint = "./checkpoints/201103180606-1400.pth" # Use last action cache USE_ACTION_CACHE = True -- GitLab