diff --git a/reinforcement_learning/multi_agent_training.py b/reinforcement_learning/multi_agent_training.py
index fd85fdc8e77530b0223b4f71a952887b47eb762f..04c32db73f09668f6f65ff51b79548dac6d43c8d 100755
--- a/reinforcement_learning/multi_agent_training.py
+++ b/reinforcement_learning/multi_agent_training.py
@@ -475,7 +475,7 @@ def eval_policy(env, tree_observation, policy, train_params, obs_params):
 if __name__ == "__main__":
     parser = ArgumentParser()
     parser.add_argument("-n", "--n_episodes", help="number of episodes to run", default=2500, type=int)
-    parser.add_argument("-t", "--training_env_config", help="training config id (eg 0 for Test_0)", default=0, type=int)
+    parser.add_argument("-t", "--training_env_config", help="training config id (eg 0 for Test_0)", default=2, type=int)
     parser.add_argument("-e", "--evaluation_env_config", help="evaluation config id (eg 0 for Test_0)", default=0,
                         type=int)
     parser.add_argument("--n_evaluation_episodes", help="number of evaluation episodes", default=5, type=int)
diff --git a/run.py b/run.py
index 637bc7953f2dc9527f23c79809d0eab13c5e5268..fb16e53867b4ef36b229c0c437c5cb6c9b679d07 100644
--- a/run.py
+++ b/run.py
@@ -25,7 +25,7 @@ from reinforcement_learning.dddqn_policy import DDDQNPolicy
 VERBOSE = True
 
 # Checkpoint to use (remember to push it!)
-checkpoint = "./checkpoints/201103172118-0.pth"
+checkpoint = "./checkpoints/201103180606-1400.pth"
 
 # Use last action cache
 USE_ACTION_CACHE = True