diff --git a/examples/complex_scene.py b/examples/complex_scene.py
deleted file mode 100644
index ecd7ee833e3fa93ffd14cafef648e83b802a1b73..0000000000000000000000000000000000000000
--- a/examples/complex_scene.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import random
-
-import numpy as np
-
-from examples.demo import Demo
-
-random.seed(1)
-np.random.seed(1)
-
-if __name__ == "__main__":
-    Demo.run_complex_scene()
diff --git a/examples/demo.py b/examples/demo.py
deleted file mode 100644
index f150ef8521be758c88517e00d50ebe4a6f5b9ffe..0000000000000000000000000000000000000000
--- a/examples/demo.py
+++ /dev/null
@@ -1,174 +0,0 @@
-import os
-import random
-import time
-
-import numpy as np
-
-from flatland.envs.generators import complex_rail_generator
-from flatland.envs.generators import random_rail_generator
-from flatland.envs.rail_env import RailEnv
-from flatland.utils.rendertools import RenderTool
-
-# ensure that every demo run behave constantly equal
-random.seed(1)
-np.random.seed(1)
-
-__file_dirname__ = os.path.dirname(os.path.realpath(__file__))
-
-
-class Scenario_Generator:
-    @staticmethod
-    def generate_random_scenario(number_of_agents=3):
-        # Example generate a rail given a manual specification,
-        # a map of tuples (cell_type, rotation)
-        transition_probability = [15,  # empty cell - Case 0
-                                  5,  # Case 1 - straight
-                                  5,  # Case 2 - simple switch
-                                  1,  # Case 3 - diamond crossing
-                                  1,  # Case 4 - single slip
-                                  1,  # Case 5 - double slip
-                                  1,  # Case 6 - symmetrical
-                                  0,  # Case 7 - dead end
-                                  1,  # Case 1b (8)  - simple turn right
-                                  1,  # Case 1c (9)  - simple turn left
-                                  1]  # Case 2b (10) - simple switch mirrored
-
-        # Example generate a random rail
-
-        env = RailEnv(width=20,
-                      height=20,
-                      rail_generator=random_rail_generator(cell_type_relative_proportion=transition_probability),
-                      number_of_agents=number_of_agents)
-
-        return env
-
-    @staticmethod
-    def generate_complex_scenario(number_of_agents=3):
-        env = RailEnv(width=15,
-                      height=15,
-                      rail_generator=complex_rail_generator(nr_start_goal=6, nr_extra=30, min_dist=10,
-                                                            max_dist=99999, seed=0),
-                      number_of_agents=number_of_agents)
-
-        return env
-
-    @staticmethod
-    def load_scenario(resource, package='env_data.railway', number_of_agents=3):
-        env = RailEnv(width=2 * (1 + number_of_agents),
-                      height=1 + number_of_agents)
-        env.load_resource(package, resource)
-        env.reset(False, False)
-
-        return env
-
-
-class Demo:
-
-    def __init__(self, env):
-        self.env = env
-        self.create_renderer()
-        self.action_size = 4
-        self.max_frame_rate = 60
-        self.record_frames = None
-
-    def set_record_frames(self, record_frames):
-        self.record_frames = record_frames
-
-    def create_renderer(self):
-        self.renderer = RenderTool(self.env)
-        handle = self.env.get_agent_handles()
-        return handle
-
-    def set_max_framerate(self, max_frame_rate):
-        self.max_frame_rate = max_frame_rate
-
-    def run_demo(self, max_nbr_of_steps=30):
-        action_dict = dict()
-
-        # Reset environment
-        _ = self.env.reset(False, False)
-
-        time.sleep(0.0001)  # to satisfy lint...
-
-        for step in range(max_nbr_of_steps):
-
-            # Action
-            for iAgent in range(self.env.get_num_agents()):
-                # allways walk straight forward
-                action = 2
-                action = np.random.choice([0, 1, 2, 3], 1, p=[0.0, 0.5, 0.5, 0.0])[0]
-
-                # update the actions
-                action_dict.update({iAgent: action})
-
-            # render
-            self.renderer.render_env(show=True, show_observations=False)
-
-            # environment step (apply the actions to all agents)
-            next_obs, all_rewards, done, _ = self.env.step(action_dict)
-
-            if done['__all__']:
-                break
-
-            if self.record_frames is not None:
-                self.renderer.gl.save_image(self.record_frames.format(step))
-
-        self.renderer.close_window()
-
-    @staticmethod
-    def run_generate_random_scenario():
-        demo_000 = Demo(Scenario_Generator.generate_random_scenario())
-        demo_000.run_demo()
-
-    @staticmethod
-    def run_generate_complex_scenario():
-        demo_001 = Demo(Scenario_Generator.generate_complex_scenario())
-        demo_001.run_demo()
-
-    @staticmethod
-    def run_example_network_000():
-        demo_000 = Demo(Scenario_Generator.load_scenario('example_network_000.pkl'))
-        demo_000.run_demo()
-
-    @staticmethod
-    def run_example_network_001():
-        demo_001 = Demo(Scenario_Generator.load_scenario('example_network_001.pkl'))
-        demo_001.run_demo()
-
-    @staticmethod
-    def run_example_network_002():
-        demo_002 = Demo(Scenario_Generator.load_scenario('example_network_002.pkl'))
-        demo_002.run_demo()
-
-    @staticmethod
-    def run_example_network_003():
-        demo_flatland_000 = Demo(Scenario_Generator.load_scenario('example_network_003.pkl'))
-        demo_flatland_000.renderer.resize()
-        demo_flatland_000.set_max_framerate(5)
-        demo_flatland_000.run_demo(30)
-
-    @staticmethod
-    def run_example_flatland_000():
-        demo_flatland_000 = Demo(Scenario_Generator.load_scenario('example_flatland_000.pkl'))
-        demo_flatland_000.renderer.resize()
-        demo_flatland_000.set_max_framerate(5)
-        demo_flatland_000.run_demo(60)
-
-    @staticmethod
-    def run_example_flatland_001():
-        demo_flatland_000 = Demo(Scenario_Generator.load_scenario('example_flatland_001.pkl'))
-        demo_flatland_000.renderer.resize()
-        demo_flatland_000.set_max_framerate(5)
-        demo_flatland_000.set_record_frames(os.path.join(__file_dirname__, '..', 'rendering', 'frame_{:04d}.bmp'))
-        demo_flatland_000.run_demo(60)
-
-    @staticmethod
-    def run_complex_scene():
-        demo_001 = Demo(Scenario_Generator.load_scenario('complex_scene.pkl'))
-        demo_001.set_record_frames(os.path.join(__file_dirname__, '..', 'rendering', 'frame_{:04d}.bmp'))
-        demo_001.run_demo(120)
-
-    @staticmethod
-    def run_basic_elements_test():
-        demo_001 = Demo(Scenario_Generator.load_scenario('basic_elements_test.pkl'))
-        demo_001.run_demo(120)
diff --git a/examples/example_basic_elements_test.py b/examples/example_basic_elements_test.py
deleted file mode 100644
index df4b2f834c0d255737e783a8d6319e2f222d842a..0000000000000000000000000000000000000000
--- a/examples/example_basic_elements_test.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import random
-
-import numpy as np
-
-from examples.demo import Demo
-
-random.seed(1)
-np.random.seed(1)
-
-if __name__ == "__main__":
-    Demo.run_basic_elements_test()
diff --git a/examples/example_flatland_000.py b/examples/example_flatland_000.py
deleted file mode 100644
index 504b85b7ac053f8fc43f53dc6df762c788ec1409..0000000000000000000000000000000000000000
--- a/examples/example_flatland_000.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import random
-
-import numpy as np
-
-from examples.demo import Demo
-
-random.seed(1)
-np.random.seed(1)
-
-if __name__ == "__main__":
-    Demo.run_example_flatland_000()
diff --git a/examples/example_flatland_001.py b/examples/example_flatland_001.py
deleted file mode 100644
index 45281512c61914041b4da8ec24628389f3ce2b89..0000000000000000000000000000000000000000
--- a/examples/example_flatland_001.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import random
-
-import numpy as np
-
-from examples.demo import Demo
-
-random.seed(1)
-np.random.seed(1)
-
-if __name__ == "__main__":
-    Demo.run_example_flatland_001()
diff --git a/examples/example_network_000.py b/examples/example_network_000.py
deleted file mode 100644
index 6e0e02b7afbd525d21394af63de63773a48d9d90..0000000000000000000000000000000000000000
--- a/examples/example_network_000.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import random
-
-import numpy as np
-
-from examples.demo import Demo
-
-random.seed(1)
-np.random.seed(1)
-
-if __name__ == "__main__":
-    Demo.run_example_network_000()
diff --git a/examples/example_network_001.py b/examples/example_network_001.py
deleted file mode 100644
index 44775bffcf295a6afee32b1aeafe65b3fcce2663..0000000000000000000000000000000000000000
--- a/examples/example_network_001.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import random
-
-import numpy as np
-
-from examples.demo import Demo
-
-random.seed(1)
-np.random.seed(1)
-
-if __name__ == "__main__":
-    Demo.run_example_network_001()
diff --git a/examples/example_network_002.py b/examples/example_network_002.py
deleted file mode 100644
index 047c2259f7a18219f35d574396acdc4fec281052..0000000000000000000000000000000000000000
--- a/examples/example_network_002.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import random
-
-import numpy as np
-
-from examples.demo import Demo
-
-random.seed(1)
-np.random.seed(1)
-
-if __name__ == "__main__":
-    Demo.run_example_network_002()
diff --git a/examples/example_network_003.py b/examples/example_network_003.py
deleted file mode 100644
index 960e9738459bb0a71c4d11067796548bd40291a3..0000000000000000000000000000000000000000
--- a/examples/example_network_003.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import random
-
-import numpy as np
-
-from examples.demo import Demo
-
-random.seed(1)
-np.random.seed(1)
-
-if __name__ == "__main__":
-    Demo.run_example_network_003()
diff --git a/examples/generate_complex_scenario.py b/examples/generate_complex_scenario.py
deleted file mode 100644
index 75d67aaf9f04acb2fbb73bc9c486ec65bf6bfa7e..0000000000000000000000000000000000000000
--- a/examples/generate_complex_scenario.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import random
-
-import numpy as np
-
-from examples.demo import Demo
-
-random.seed(1)
-np.random.seed(1)
-
-if __name__ == "__main__":
-    Demo.run_generate_complex_scenario()
diff --git a/examples/generate_random_scenario.py b/examples/generate_random_scenario.py
deleted file mode 100644
index 0cf4e5122ca226a1814de6f3e23bd7f5ccd296ad..0000000000000000000000000000000000000000
--- a/examples/generate_random_scenario.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import random
-
-import numpy as np
-
-from examples.demo import Demo
-
-random.seed(1)
-np.random.seed(1)
-
-if __name__ == "__main__":
-    Demo.run_generate_random_scenario()
diff --git a/examples/play_model.py b/examples/play_model.py
deleted file mode 100644
index fdd2bd8d90ab505550996635edf08db1d9af56af..0000000000000000000000000000000000000000
--- a/examples/play_model.py
+++ /dev/null
@@ -1,128 +0,0 @@
-import random
-import time
-from collections import deque
-
-import numpy as np
-
-from flatland.envs.generators import complex_rail_generator
-from flatland.envs.rail_env import RailEnv
-from flatland.utils.rendertools import RenderTool
-
-
-class Player(object):
-    def __init__(self, env):
-        self.env = env
-        self.handle = env.get_agent_handles()
-
-        self.state_size = 105
-        self.action_size = 4
-        self.n_trials = 9999
-        self.eps = 1.
-        self.eps_end = 0.005
-        self.eps_decay = 0.998
-        self.action_dict = dict()
-        self.scores_window = deque(maxlen=100)
-        self.done_window = deque(maxlen=100)
-        self.scores = []
-        self.dones_list = []
-        self.action_prob = [0] * 4
-
-        # Removing refs to a real agent for now.
-        self.iFrame = 0
-        self.tStart = time.time()
-
-        # Reset environment
-        self.env.obs_builder.reset()
-        self.obs = self.env._get_observations()
-        for envAgent in range(self.env.get_num_agents()):
-            norm = max(1, max_lt(self.obs[envAgent], np.inf))
-            self.obs[envAgent] = np.clip(np.array(self.obs[envAgent]) / norm, -1, 1)
-
-        self.score = 0
-        self.env_done = 0
-
-    def reset(self):
-        self.obs = self.env.reset()
-        return self.obs
-
-    def step(self):
-        env = self.env
-
-        # Pass the (stored) observation to the agent network and retrieve the action
-        for handle in env.get_agent_handles():
-            # Random actions
-            action = np.random.choice([0, 1, 2, 3], 1, p=[0.2, 0.1, 0.6, 0.1])[0]
-            # Numpy version uses single random sequence
-            self.action_prob[action] += 1
-            self.action_dict.update({handle: action})
-
-        # Environment step - pass the agent actions to the environment,
-        # retrieve the response - observations, rewards, dones
-        next_obs, all_rewards, done, _ = self.env.step(self.action_dict)
-
-        for handle in env.get_agent_handles():
-            norm = max(1, max_lt(next_obs[handle], np.inf))
-            next_obs[handle] = np.clip(np.array(next_obs[handle]) / norm, -1, 1)
-
-        # Update replay buffer and train agent
-        if False:
-            for handle in self.env.get_agent_handles():
-                self.agent.step(self.obs[handle], self.action_dict[handle],
-                                all_rewards[handle], next_obs[handle], done[handle],
-                                train=False)
-                self.score += all_rewards[handle]
-
-        self.iFrame += 1
-
-        self.obs = next_obs.copy()
-        if done['__all__']:
-            self.env_done = 1
-
-
-def max_lt(seq, val):
-    """
-    Return greatest item in seq for which item < val applies.
-    None is returned if seq was empty or all items in seq were >= val.
-    """
-
-    idx = len(seq) - 1
-    while idx >= 0:
-        if seq[idx] < val and seq[idx] >= 0:
-            return seq[idx]
-        idx -= 1
-    return None
-
-
-def main(render=True, delay=0.0, n_trials=3, n_steps=50):
-    random.seed(1)
-    np.random.seed(1)
-
-    # Example generate a random rail
-    env = RailEnv(width=15, height=15,
-                  rail_generator=complex_rail_generator(nr_start_goal=5, nr_extra=20, min_dist=12),
-                  number_of_agents=5)
-
-    if render:
-        env_renderer = RenderTool(env)
-
-    oPlayer = Player(env)
-
-    for trials in range(1, n_trials + 1):
-
-        # Reset environment
-        oPlayer.reset()
-        env_renderer.set_new_rail()
-
-        # Run episode
-        for step in range(n_steps):
-            oPlayer.step()
-            if render:
-                env_renderer.render_env(show=True, frames=True, episode=trials, step=step)
-                if delay > 0:
-                    time.sleep(delay)
-
-    env_renderer.gl.close_window()
-
-
-if __name__ == "__main__":
-    main(render=True, delay=0)
diff --git a/examples/simple_example_1.py b/examples/simple_example_1.py
index 93d5268840c4980aea40ae3a18b35f2bed31260e..7956c34fd4a5b94859a4b64441450afe2114133c 100644
--- a/examples/simple_example_1.py
+++ b/examples/simple_example_1.py
@@ -1,5 +1,4 @@
 from flatland.envs.generators import rail_from_manual_specifications_generator
-from flatland.envs.observations import TreeObsForRailEnv
 from flatland.envs.rail_env import RailEnv
 from flatland.utils.rendertools import RenderTool
 
@@ -13,12 +12,11 @@ specs = [[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
 env = RailEnv(width=6,
               height=4,
               rail_generator=rail_from_manual_specifications_generator(specs),
-              number_of_agents=1,
-              obs_builder_object=TreeObsForRailEnv(max_depth=2))
+              number_of_agents=1)
 
 env.reset()
 
 env_renderer = RenderTool(env)
-env_renderer.render_env(show=True)
+env_renderer.render_env(show=True, show_predictions=False, show_observations=False)
 
 input("Press Enter to continue...")
diff --git a/examples/simple_example_2.py b/examples/simple_example_2.py
index f8612a392d0c6d57f6077dc988379e927af1e1b1..994c7deda1569b77d4adac8a17fa9ebe14b27ef6 100644
--- a/examples/simple_example_2.py
+++ b/examples/simple_example_2.py
@@ -3,7 +3,6 @@ import random
 import numpy as np
 
 from flatland.envs.generators import random_rail_generator
-from flatland.envs.observations import TreeObsForRailEnv
 from flatland.envs.rail_env import RailEnv
 from flatland.utils.rendertools import RenderTool
 
@@ -27,8 +26,7 @@ transition_probability = [1.0,  # empty cell - Case 0
 env = RailEnv(width=10,
               height=10,
               rail_generator=random_rail_generator(cell_type_relative_proportion=transition_probability),
-              number_of_agents=3,
-              obs_builder_object=TreeObsForRailEnv(max_depth=2))
+              number_of_agents=3)
 
 env.reset()
 
diff --git a/examples/tkplay.py b/examples/tkplay.py
deleted file mode 100644
index 225e113851c0695f5dd0af4045869d8d799aaafc..0000000000000000000000000000000000000000
--- a/examples/tkplay.py
+++ /dev/null
@@ -1,37 +0,0 @@
-try:
-    from examples.play_model import Player
-except ImportError:
-    from play_model import Player
-
-from flatland.envs.generators import complex_rail_generator
-from flatland.envs.rail_env import RailEnv
-from flatland.utils.rendertools import RenderTool
-
-
-def tkmain(n_trials=2, n_steps=50, sGL="PIL"):
-    # Example generate a random rail
-    env = RailEnv(width=15, height=15,
-                  rail_generator=complex_rail_generator(nr_start_goal=5, nr_extra=20, min_dist=12),
-                  number_of_agents=5)
-
-    env_renderer = RenderTool(env, gl=sGL)
-
-    oPlayer = Player(env)
-    n_trials = 1
-    for trials in range(1, n_trials + 1):
-
-        # Reset environment8
-        oPlayer.reset()
-        env_renderer.set_new_rail()
-
-        for step in range(n_steps):
-            oPlayer.step()
-            env_renderer.render_env(show=True, frames=True, episode=trials, step=step,
-                                    action_dict=oPlayer.action_dict)
-
-    env_renderer.close_window()
-
-
-if __name__ == "__main__":
-    tkmain(sGL="PIL")
-    tkmain(sGL="PILSVG")