diff --git a/examples/training_example.py b/examples/training_example.py
index 218f133efe4e79064ae94b24c61aa2d6f2ce2d09..dd9ded92510be8ec5fa6c222b7259157db920430 100644
--- a/examples/training_example.py
+++ b/examples/training_example.py
@@ -80,7 +80,7 @@ for trials in range(1, n_trials + 1):
         # Environment step which returns the observations for all agents, their corresponding
         # reward and whether their are done
         next_obs, all_rewards, done, _ = env.step(action_dict)
-        # TreeObservation.util_print_obs_subtree(next_obs[0], num_features_per_node=8)
+
         # Update replay buffer and train agent
         for a in range(env.get_num_agents()):
             agent.step((obs[a], action_dict[a], all_rewards[a], next_obs[a], done[a]))
diff --git a/flatland/envs/observations.py b/flatland/envs/observations.py
index 4de614db4389556a877f4083fcdded6a7e3debdf..4b0049f655bc03336411f1001813c3471d9845a7 100644
--- a/flatland/envs/observations.py
+++ b/flatland/envs/observations.py
@@ -342,21 +342,18 @@ class TreeObsForRailEnv(ObservationBuilder):
                         for ca in conflicting_agent:
                             if direction != self.predicted_dir[tot_dist][ca[0]]:
                                 potential_conflict = 1
-                                # print("Potential Conflict",position,handle,ca[0],tot_dist,depth)
                     # Look for opposing paths at distance num_step-1
                     elif int_position in np.delete(self.predicted_pos[pre_step], handle):
                         conflicting_agent = np.where(self.predicted_pos[pre_step] == int_position)
                         for ca in conflicting_agent:
                             if direction != self.predicted_dir[pre_step][ca[0]]:
                                 potential_conflict = 1
-                                # print("Potential Conflict", position,handle,ca[0],pre_step,depth)
                     # Look for opposing paths at distance num_step+1
                     elif int_position in np.delete(self.predicted_pos[post_step], handle):
                         conflicting_agent = np.where(np.delete(self.predicted_pos[post_step], handle) == int_position)
                         for ca in conflicting_agent:
                             if direction != self.predicted_dir[post_step][ca[0]]:
                                 potential_conflict = 1
-                                # print("Potential Conflict", position,handle,ca[0],post_step,depth)
 
             if position in self.location_has_target and position != agent.target:
                 if num_steps < other_target_encountered:
diff --git a/notebooks/Scene_Editor.ipynb b/notebooks/Scene_Editor.ipynb
index 250ad48a5d37bd875527abc8e6a01d6c7941827b..db23e0e04ef6baa421f8b1449c519eefc54e5243 100644
--- a/notebooks/Scene_Editor.ipynb
+++ b/notebooks/Scene_Editor.ipynb
@@ -100,7 +100,7 @@
     {
      "data": {
       "application/vnd.jupyter.widget-view+json": {
-       "model_id": "7783247e5f2146e293236d2426248f90",
+       "model_id": "387832d242b44d69a1712a42d9547c05",
        "version_major": 2,
        "version_minor": 0
       },
@@ -134,7 +134,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.6.8"
+   "version": "3.6.5"
   },
   "latex_envs": {
    "LaTeX_envs_menu_present": true,