diff --git a/sequential_agent/run_test.py b/sequential_agent/run_test.py
index 72b645fcdf909ff6a1a6742e6b41fe65e5903e4c..6e9f7c218eba6d7abef1294a568f64a08842d25f 100644
--- a/sequential_agent/run_test.py
+++ b/sequential_agent/run_test.py
@@ -18,9 +18,9 @@ y_dim = env.height
 
 """
 
-x_dim = 10  # np.random.randint(8, 20)
-y_dim = 10  # np.random.randint(8, 20)
-n_agents = 5  # np.random.randint(3, 8)
+x_dim = 20  # np.random.randint(8, 20)
+y_dim = 20  # np.random.randint(8, 20)
+n_agents = 10  # np.random.randint(3, 8)
 n_goals = n_agents + np.random.randint(0, 3)
 min_dist = int(0.75 * min(x_dim, y_dim))
 
@@ -63,10 +63,10 @@ for trials in range(1, n_trials + 1):
         for a in range(env.get_num_agents()):
             if done[a]:
                 acting_agent += 1
-            if acting_agent == a:
-                action = agent.act(obs[acting_agent], eps=0)
+            if a == acting_agent:
+                action = agent.act(obs[a], eps=0)
             else:
-                action = 0
+                action = 4
             action_dict.update({a: action})
 
         # Environment step
diff --git a/torch_training/multi_agent_training.py b/torch_training/multi_agent_training.py
index b5fe86a04e981c7bdae96976bfdfca85d533d789..476066a902242ff1c7a024ed6a9bacee8d370d83 100644
--- a/torch_training/multi_agent_training.py
+++ b/torch_training/multi_agent_training.py
@@ -93,7 +93,7 @@ def main(argv):
 
     # Here you can pre-load an agent
     if True:
-        with path(torch_training.Nets, "avoid_checkpoint53700.pth") as file_in:
+        with path(torch_training.Nets, "avoid_checkpoint2400.pth") as file_in:
             agent.qnetwork_local.load_state_dict(torch.load(file_in))
 
     # Do training over n_episodes