diff --git a/reinforcement_learning/multi_agent_training.py b/reinforcement_learning/multi_agent_training.py
index 04c32db73f09668f6f65ff51b79548dac6d43c8d..b43cf9480230d9e3ad8eded82a77ffaf897fcc97 100755
--- a/reinforcement_learning/multi_agent_training.py
+++ b/reinforcement_learning/multi_agent_training.py
@@ -474,7 +474,7 @@ def eval_policy(env, tree_observation, policy, train_params, obs_params):
 
 if __name__ == "__main__":
     parser = ArgumentParser()
-    parser.add_argument("-n", "--n_episodes", help="number of episodes to run", default=2500, type=int)
+    parser.add_argument("-n", "--n_episodes", help="number of episodes to run", default=12500, type=int)
     parser.add_argument("-t", "--training_env_config", help="training config id (eg 0 for Test_0)", default=2, type=int)
     parser.add_argument("-e", "--evaluation_env_config", help="evaluation config id (eg 0 for Test_0)", default=0,
                         type=int)
@@ -482,8 +482,8 @@ if __name__ == "__main__":
     parser.add_argument("--checkpoint_interval", help="checkpoint interval", default=100, type=int)
     parser.add_argument("--eps_start", help="max exploration", default=1.0, type=float)
     parser.add_argument("--eps_end", help="min exploration", default=0.01, type=float)
-    parser.add_argument("--eps_decay", help="exploration decay", default=0.99, type=float)
-    parser.add_argument("--buffer_size", help="replay buffer size", default=int(1e5), type=int)
+    parser.add_argument("--eps_decay", help="exploration decay", default=0.998, type=float)
+    parser.add_argument("--buffer_size", help="replay buffer size", default=int(1e7), type=int)
     parser.add_argument("--buffer_min_size", help="min buffer size to start training", default=0, type=int)
     parser.add_argument("--restore_replay_buffer", help="replay buffer to restore", default="", type=str)
     parser.add_argument("--save_replay_buffer", help="save replay buffer at each evaluation interval", default=False,
diff --git a/run.py b/run.py
index fb16e53867b4ef36b229c0c437c5cb6c9b679d07..e9950fcf1335b52db2a3b7a15baa8d164f62c7be 100644
--- a/run.py
+++ b/run.py
@@ -25,7 +25,7 @@ from reinforcement_learning.dddqn_policy import DDDQNPolicy
 VERBOSE = True
 
 # Checkpoint to use (remember to push it!)
-checkpoint = "./checkpoints/201103180606-1400.pth"
+checkpoint = "./checkpoints/201103221432-3000.pth"
 
 # Use last action cache
 USE_ACTION_CACHE = True