diff --git a/baselines/Nets/avoid_checkpoint9900.pth b/baselines/Nets/avoid_checkpoint9900.pth
deleted file mode 100644
index 5fae4c2908a10725315f388f7862860cd3ab5a52..0000000000000000000000000000000000000000
Binary files a/baselines/Nets/avoid_checkpoint9900.pth and /dev/null differ
diff --git a/baselines/Nets/dummy b/baselines/Nets/dummy
deleted file mode 100644
index bf453594ba586f318379101f00d73ec29f688e97..0000000000000000000000000000000000000000
--- a/baselines/Nets/dummy
+++ /dev/null
@@ -1 +0,0 @@
-dummy file for empty folder
diff --git a/env-data/railway/example_network_000.pkl b/env-data/railway/example_network_000.pkl
index 280688c2629331621ab2ea80b4b096226464e653..e102e21735416747cb8bd9f231ce6e20fdf514c0 100644
Binary files a/env-data/railway/example_network_000.pkl and b/env-data/railway/example_network_000.pkl differ
diff --git a/env-data/railway/example_network_001.pkl b/env-data/railway/example_network_001.pkl
index 801f95149dec6eb4d47fd14e36d30f2541480188..a9c5cc97c9c4bf4159db2134756f17fa0c4fce87 100644
Binary files a/env-data/railway/example_network_001.pkl and b/env-data/railway/example_network_001.pkl differ
diff --git a/env-data/railway/example_network_002.pkl b/env-data/railway/example_network_002.pkl
index 898d54ebeb823e48790d4661ffe75a6940cd0712..37647ac2871801d2d08fd65276889e2b232c1170 100644
Binary files a/env-data/railway/example_network_002.pkl and b/env-data/railway/example_network_002.pkl differ
diff --git a/examples/demo.py b/examples/demo.py
index 7442779267795231e9bbe7c3cf530b5520ba36ef..3d7aa0a15acdfb341bfeb1093daff8bbfd310231 100644
--- a/examples/demo.py
+++ b/examples/demo.py
@@ -6,7 +6,6 @@ import time
 import numpy as np
 import torch
 
-from flatland.baselines.dueling_double_dqn import Agent
 from flatland.envs.generators import complex_rail_generator
 # from flatland.envs.generators import rail_from_list_of_saved_GridTransitionMap_generator
 from flatland.envs.generators import random_rail_generator
@@ -48,11 +47,7 @@ class Scenario_Generator:
     def generate_complex_scenario(number_of_agents=3):
         env = RailEnv(width=15,
                       height=15,
-                      rail_generator=complex_rail_generator(nr_start_goal=6,
-                                                            nr_extra=30,
-                                                            min_dist=10,
-                                                            max_dist=99999,
-                                                            seed=0),
+                      rail_generator=complex_rail_generator(nr_start_goal=6, nr_extra=30, min_dist=10, max_dist=99999, seed=0),
                       number_of_agents=number_of_agents)
 
         return env
@@ -130,79 +125,35 @@ class Demo:
     def __init__(self, env):
         self.env = env
         self.create_renderer()
-        self.load_agent()
-
-    def load_agent(self):
-        self.state_size = 105 * 2
         self.action_size = 4
-        self.agent = Agent(self.state_size, self.action_size, "FC", 0)
-        self.agent.qnetwork_local.load_state_dict(torch.load('./flatland/baselines/Nets/avoid_checkpoint15000.pth'))
 
     def create_renderer(self):
         self.renderer = RenderTool(self.env, gl="QTSVG")
         handle = self.env.get_agent_handles()
         return handle
 
-    def run_demo(self, max_nbr_of_steps=100):
+    def run_demo(self, max_nbr_of_steps=30):
         action_dict = dict()
-        time_obs = deque(maxlen=2)
-        action_prob = [0] * 4
-        agent_obs = [None] * self.env.get_num_agents()
-        agent_next_obs = [None] * self.env.get_num_agents()
 
         # Reset environment
         obs = self.env.reset(False, False)
 
-        for a in range(self.env.get_num_agents()):
-            data, distance = self.env.obs_builder.split_tree(tree=np.array(obs[a]),
-                                                             num_features_per_node=5,
-                                                             current_depth=0)
-
-            data = norm_obs_clip(data)
-            distance = norm_obs_clip(distance)
-            obs[a] = np.concatenate((data, distance))
-
-        for i in range(2):
-            time_obs.append(obs)
-
-        # env.obs_builder.util_print_obs_subtree(tree=obs[0], num_elements_per_node=5)
-        for a in range(self.env.get_num_agents()):
-            agent_obs[a] = np.concatenate((time_obs[0][a], time_obs[1][a]))
-
         for step in range(max_nbr_of_steps):
-
-            time.sleep(.2)
-
-            # print(step)
             # Action
             for a in range(self.env.get_num_agents()):
-                action = self.agent.act(agent_obs[a])
-                action_prob[action] += 1
+                action = 2 #np.random.choice(self.action_size) #self.agent.act(agent_obs[a])
                 action_dict.update({a: action})
 
-            self.renderer.renderEnv(show=True, action_dict=action_dict)
+            self.renderer.renderEnv(show=True,action_dict=action_dict)
 
             # Environment step
             next_obs, all_rewards, done, _ = self.env.step(action_dict)
-            for a in range(self.env.get_num_agents()):
-                data, distance = self.env.obs_builder.split_tree(tree=np.array(next_obs[a]), num_features_per_node=5,
-                                                                 current_depth=0)
-                data = norm_obs_clip(data)
-                distance = norm_obs_clip(distance)
-                next_obs[a] = np.concatenate((data, distance))
 
-            # Update replay buffer and train agent
-            for a in range(self.env.get_num_agents()):
-                agent_next_obs[a] = np.concatenate((time_obs[0][a], time_obs[1][a]))
-
-            time_obs.append(next_obs)
-
-            agent_obs = agent_next_obs.copy()
             if done['__all__']:
                 break
 
 
-if True:
+if False:
     demo_000 = Demo(Scenario_Generator.generate_random_scenario())
     demo_000.run_demo()
     demo_000 = None
@@ -211,14 +162,20 @@ if True:
     demo_001.run_demo()
     demo_001 = None
 
-demo_000 = Demo(Scenario_Generator.load_scenario('./env-data/railway/example_network_000.pkl'))
-demo_000.run_demo()
-demo_000 = None
+    demo_000 = Demo(Scenario_Generator.load_scenario('./env-data/railway/example_network_000.pkl'))
+    demo_000.run_demo()
+    demo_000 = None
+
+    demo_001 = Demo(Scenario_Generator.load_scenario('./env-data/railway/example_network_001.pkl'))
+    demo_001.run_demo()
+    demo_001 = None
+
+    demo_002 = Demo(Scenario_Generator.load_scenario('./env-data/railway/example_network_002.pkl'))
+    demo_002.run_demo()
+    demo_002 = None
 
-demo_001 = Demo(Scenario_Generator.load_scenario('./env-data/railway/example_network_001.pkl'))
-demo_001.run_demo()
-demo_001 = None
 
-demo_002 = Demo(Scenario_Generator.load_scenario('./env-data/railway/example_network_002.pkl'))
-demo_002.run_demo()
-demo_002 = None
+demo_flatland_000 = Demo(Scenario_Generator.load_scenario('./env-data/railway/example_flatland_000.pkl'))
+demo_flatland_000.renderer.resize()
+demo_flatland_000.run_demo(100)
+demo_flatland_000 = None
diff --git a/examples/training_navigation.py b/examples/training_navigation.py
deleted file mode 100644
index 0cb9d275eda2a01932c4f632c1abd4fb662f4037..0000000000000000000000000000000000000000
--- a/examples/training_navigation.py
+++ /dev/null
@@ -1,214 +0,0 @@
-import random
-from collections import deque
-
-import numpy as np
-import torch
-
-from flatland.baselines.dueling_double_dqn import Agent
-from flatland.envs.generators import complex_rail_generator
-from flatland.envs.rail_env import RailEnv
-from flatland.utils.rendertools import RenderTool
-
-random.seed(1)
-np.random.seed(1)
-
-# Example generate a rail given a manual specification,
-# a map of tuples (cell_type, rotation)
-transition_probability = [15,  # empty cell - Case 0
-                          5,  # Case 1 - straight
-                          5,  # Case 2 - simple switch
-                          1,  # Case 3 - diamond crossing
-                          1,  # Case 4 - single slip
-                          1,  # Case 5 - double slip
-                          1,  # Case 6 - symmetrical
-                          0,  # Case 7 - dead end
-                          1,  # Case 1b (8)  - simple turn right
-                          1,  # Case 1c (9)  - simple turn left
-                          1]  # Case 2b (10) - simple switch mirrored
-
-# Example generate a random rail
-"""
-env = RailEnv(width=20,
-              height=20,
-              rail_generator=random_rail_generator(cell_type_relative_proportion=transition_probability),
-              number_of_agents=1)
-"""
-env = RailEnv(width=15,
-              height=15,
-              rail_generator=complex_rail_generator(nr_start_goal=10, nr_extra=10, min_dist=10, max_dist=99999, seed=0),
-              number_of_agents=5)
-
-"""
-env = RailEnv(width=20,
-              height=20,
-              rail_generator=rail_from_list_of_saved_GridTransitionMap_generator(
-                      ['../notebooks/temp.npy']),
-              number_of_agents=3)
-
-"""
-env_renderer = RenderTool(env, gl="QTSVG")
-handle = env.get_agent_handles()
-
-state_size = 105 * 2
-action_size = 4
-n_trials = 15000
-eps = 1.
-eps_end = 0.005
-eps_decay = 0.9995
-action_dict = dict()
-final_action_dict = dict()
-scores_window = deque(maxlen=100)
-done_window = deque(maxlen=100)
-time_obs = deque(maxlen=2)
-scores = []
-dones_list = []
-action_prob = [0] * 4
-agent_obs = [None] * env.get_num_agents()
-agent_next_obs = [None] * env.get_num_agents()
-agent = Agent(state_size, action_size, "FC", 0)
-agent.qnetwork_local.load_state_dict(torch.load('./flatland/baselines/Nets/avoid_checkpoint15000.pth'))
-
-demo = True
-
-
-def max_lt(seq, val):
-    """
-    Return greatest item in seq for which item < val applies.
-    None is returned if seq was empty or all items in seq were >= val.
-    """
-    max = 0
-    idx = len(seq) - 1
-    while idx >= 0:
-        if seq[idx] < val and seq[idx] >= 0 and seq[idx] > max:
-            max = seq[idx]
-        idx -= 1
-    return max
-
-
-def min_lt(seq, val):
-    """
-    Return smallest item in seq for which item > val applies.
-    None is returned if seq was empty or all items in seq were >= val.
-    """
-    min = np.inf
-    idx = len(seq) - 1
-    while idx >= 0:
-        if seq[idx] > val and seq[idx] < min:
-            min = seq[idx]
-        idx -= 1
-    return min
-
-
-def norm_obs_clip(obs, clip_min=-1, clip_max=1):
-    """
-    This function returns the difference between min and max value of an observation
-    :param obs: Observation that should be normalized
-    :param clip_min: min value where observation will be clipped
-    :param clip_max: max value where observation will be clipped
-    :return: returnes normalized and clipped observatoin
-    """
-    max_obs = max(1, max_lt(obs, 1000))
-    min_obs = max(0, min_lt(obs, 0))
-    if max_obs == min_obs:
-        return np.clip(np.array(obs) / max_obs, clip_min, clip_max)
-    norm = np.abs(max_obs - min_obs)
-    if norm == 0:
-        norm = 1.
-    return np.clip((np.array(obs) - min_obs) / norm, clip_min, clip_max)
-
-
-for trials in range(1, n_trials + 1):
-
-    # Reset environment
-    obs = env.reset()
-
-    final_obs = obs.copy()
-    final_obs_next = obs.copy()
-
-    for a in range(env.get_num_agents()):
-        data, distance = env.obs_builder.split_tree(tree=np.array(obs[a]), num_features_per_node=5, current_depth=0)
-
-        data = norm_obs_clip(data)
-        distance = norm_obs_clip(distance)
-        obs[a] = np.concatenate((data, distance))
-
-    for i in range(2):
-        time_obs.append(obs)
-    # env.obs_builder.util_print_obs_subtree(tree=obs[0], num_elements_per_node=5)
-    for a in range(env.get_num_agents()):
-        agent_obs[a] = np.concatenate((time_obs[0][a], time_obs[1][a]))
-
-    score = 0
-    env_done = 0
-    # Run episode
-    for step in range(100):
-        if demo:
-            env_renderer.renderEnv(show=True)
-        # print(step)
-        # Action
-        for a in range(env.get_num_agents()):
-            if demo:
-                eps = 0
-            # action = agent.act(np.array(obs[a]), eps=eps)
-            action = agent.act(agent_obs[a])
-            action_prob[action] += 1
-            action_dict.update({a: action})
-
-        # Environment step
-        next_obs, all_rewards, done, _ = env.step(action_dict)
-        for a in range(env.get_num_agents()):
-            data, distance = env.obs_builder.split_tree(tree=np.array(next_obs[a]), num_features_per_node=5,
-                                                        current_depth=0)
-            data = norm_obs_clip(data)
-            distance = norm_obs_clip(distance)
-            next_obs[a] = np.concatenate((data, distance))
-
-        time_obs.append(next_obs)
-
-        # Update replay buffer and train agent
-        for a in range(env.get_num_agents()):
-            agent_next_obs[a] = np.concatenate((time_obs[0][a], time_obs[1][a]))
-
-            if done[a]:
-                final_obs[a] = agent_obs[a].copy()
-                final_obs_next[a] = agent_next_obs[a].copy()
-                final_action_dict.update({a: action_dict[a]})
-            if not demo and not done[a]:
-                agent.step(agent_obs[a], action_dict[a], all_rewards[a], agent_next_obs[a], done[a])
-            score += all_rewards[a]
-
-        agent_obs = agent_next_obs.copy()
-        if done['__all__']:
-            env_done = 1
-            for a in range(env.get_num_agents()):
-                agent.step(final_obs[a], final_action_dict[a], all_rewards[a], final_obs_next[a], done[a])
-            break
-    # Epsilon decay
-    eps = max(eps_end, eps_decay * eps)  # decrease epsilon
-
-    done_window.append(env_done)
-    scores_window.append(score)  # save most recent score
-    scores.append(np.mean(scores_window))
-    dones_list.append((np.mean(done_window)))
-
-    print('\rTraining {} Agents.\t Episode {}\t Average Score: {:.0f}\tDones: {:.2f}%' +
-          '\tEpsilon: {:.2f} \t Action Probabilities: \t {}'.format(
-              env.get_num_agents(),
-              trials,
-              np.mean(scores_window),
-              100 * np.mean(done_window),
-              eps, action_prob / np.sum(action_prob)), end=" ")
-
-    if trials % 100 == 0:
-        print(
-            '\rTraining {} Agents.\t Episode {}\t Average Score: {:.0f}\tDones: {:.2f}%' +
-            '\tEpsilon: {:.2f} \t Action Probabilities: \t {}'.format(
-                env.get_num_agents(),
-                trials,
-                np.mean(scores_window),
-                100 * np.mean(done_window),
-                eps,
-                action_prob / np.sum(action_prob)))
-        torch.save(agent.qnetwork_local.state_dict(),
-                   '../flatland/baselines/Nets/avoid_checkpoint' + str(trials) + '.pth')
-        action_prob = [1] * 4
diff --git a/flatland/baselines/Nets/avoid_checkpoint15000.pth b/flatland/baselines/Nets/avoid_checkpoint15000.pth
deleted file mode 100644
index 14882a37a86085b137f4422b6bba75f387a2d3b5..0000000000000000000000000000000000000000
Binary files a/flatland/baselines/Nets/avoid_checkpoint15000.pth and /dev/null differ
diff --git a/flatland/baselines/__init__.py b/flatland/baselines/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/flatland/baselines/dueling_double_dqn.py b/flatland/baselines/dueling_double_dqn.py
deleted file mode 100644
index 41a27bf8431df7812f1b4f63e797aa426c17edf1..0000000000000000000000000000000000000000
--- a/flatland/baselines/dueling_double_dqn.py
+++ /dev/null
@@ -1,200 +0,0 @@
-import copy
-import os
-import random
-from collections import namedtuple, deque, Iterable
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-import torch.optim as optim
-
-from flatland.baselines.model import QNetwork, QNetwork2
-
-BUFFER_SIZE = int(1e5)  # replay buffer size
-BATCH_SIZE = 512  # minibatch size
-GAMMA = 0.99  # discount factor 0.99
-TAU = 1e-3  # for soft update of target parameters
-LR = 0.5e-4  # learning rate 5
-UPDATE_EVERY = 10  # how often to update the network
-double_dqn = True  # If using double dqn algorithm
-input_channels = 5  # Number of Input channels
-
-device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
-device = torch.device("cpu")
-print(device)
-
-
-class Agent:
-    """Interacts with and learns from the environment."""
-
-    def __init__(self, state_size, action_size, net_type, seed, double_dqn=True, input_channels=5):
-        """Initialize an Agent object.
-
-        Params
-        ======
-            state_size (int): dimension of each state
-            action_size (int): dimension of each action
-            seed (int): random seed
-        """
-        self.state_size = state_size
-        self.action_size = action_size
-        self.seed = random.seed(seed)
-        self.version = net_type
-        self.double_dqn = double_dqn
-        # Q-Network
-        if self.version == "Conv":
-            self.qnetwork_local = QNetwork2(state_size, action_size, seed, input_channels).to(device)
-            self.qnetwork_target = copy.deepcopy(self.qnetwork_local)
-        else:
-            self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
-            self.qnetwork_target = copy.deepcopy(self.qnetwork_local)
-
-        self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
-
-        # Replay memory
-        self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
-        # Initialize time step (for updating every UPDATE_EVERY steps)
-        self.t_step = 0
-
-    def save(self, filename):
-        torch.save(self.qnetwork_local.state_dict(), filename + ".local")
-        torch.save(self.qnetwork_target.state_dict(), filename + ".target")
-
-    def load(self, filename):
-        if os.path.exists(filename + ".local"):
-            self.qnetwork_local.load_state_dict(torch.load(filename + ".local"))
-        if os.path.exists(filename + ".target"):
-            self.qnetwork_target.load_state_dict(torch.load(filename + ".target"))
-
-    def step(self, state, action, reward, next_state, done, train=True):
-        # Save experience in replay memory
-        self.memory.add(state, action, reward, next_state, done)
-
-        # Learn every UPDATE_EVERY time steps.
-        self.t_step = (self.t_step + 1) % UPDATE_EVERY
-        if self.t_step == 0:
-            # If enough samples are available in memory, get random subset and learn
-            if len(self.memory) > BATCH_SIZE:
-                experiences = self.memory.sample()
-                if train:
-                    self.learn(experiences, GAMMA)
-
-    def act(self, state, eps=0.):
-        """Returns actions for given state as per current policy.
-
-        Params
-        ======
-            state (array_like): current state
-            eps (float): epsilon, for epsilon-greedy action selection
-        """
-        state = torch.from_numpy(state).float().unsqueeze(0).to(device)
-        self.qnetwork_local.eval()
-        with torch.no_grad():
-            action_values = self.qnetwork_local(state)
-        self.qnetwork_local.train()
-
-        # Epsilon-greedy action selection
-        if random.random() > eps:
-            return np.argmax(action_values.cpu().data.numpy())
-        else:
-            return random.choice(np.arange(self.action_size))
-
-    def learn(self, experiences, gamma):
-
-        """Update value parameters using given batch of experience tuples.
-
-        Params
-        ======
-            experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
-            gamma (float): discount factor
-        """
-        states, actions, rewards, next_states, dones = experiences
-
-        # Get expected Q values from local model
-        Q_expected = self.qnetwork_local(states).gather(1, actions)
-
-        if self.double_dqn:
-            # Double DQN
-            q_best_action = self.qnetwork_local(next_states).max(1)[1]
-            Q_targets_next = self.qnetwork_target(next_states).gather(1, q_best_action.unsqueeze(-1))
-        else:
-            # DQN
-            Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(-1)
-
-            # Compute Q targets for current states
-
-        Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
-
-        # Compute loss
-        loss = F.mse_loss(Q_expected, Q_targets)
-        # Minimize the loss
-        self.optimizer.zero_grad()
-        loss.backward()
-        self.optimizer.step()
-
-        # ------------------- update target network ------------------- #
-        self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
-
-    def soft_update(self, local_model, target_model, tau):
-        """Soft update model parameters.
-        θ_target = τ*θ_local + (1 - τ)*θ_target
-
-        Params
-        ======
-            local_model (PyTorch model): weights will be copied from
-            target_model (PyTorch model): weights will be copied to
-            tau (float): interpolation parameter
-        """
-        for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
-            target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
-
-
-class ReplayBuffer:
-    """Fixed-size buffer to store experience tuples."""
-
-    def __init__(self, action_size, buffer_size, batch_size, seed):
-        """Initialize a ReplayBuffer object.
-
-        Params
-        ======
-            action_size (int): dimension of each action
-            buffer_size (int): maximum size of buffer
-            batch_size (int): size of each training batch
-            seed (int): random seed
-        """
-        self.action_size = action_size
-        self.memory = deque(maxlen=buffer_size)
-        self.batch_size = batch_size
-        self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
-        self.seed = random.seed(seed)
-
-    def add(self, state, action, reward, next_state, done):
-        """Add a new experience to memory."""
-        e = self.experience(np.expand_dims(state, 0), action, reward, np.expand_dims(next_state, 0), done)
-        self.memory.append(e)
-
-    def sample(self):
-        """Randomly sample a batch of experiences from memory."""
-        experiences = random.sample(self.memory, k=self.batch_size)
-
-        states = torch.from_numpy(self.__v_stack_impr([e.state for e in experiences if e is not None])) \
-            .float().to(device)
-        actions = torch.from_numpy(self.__v_stack_impr([e.action for e in experiences if e is not None])) \
-            .long().to(device)
-        rewards = torch.from_numpy(self.__v_stack_impr([e.reward for e in experiences if e is not None])) \
-            .float().to(device)
-        next_states = torch.from_numpy(self.__v_stack_impr([e.next_state for e in experiences if e is not None])) \
-            .float().to(device)
-        dones = torch.from_numpy(self.__v_stack_impr([e.done for e in experiences if e is not None]).astype(np.uint8)) \
-            .float().to(device)
-
-        return (states, actions, rewards, next_states, dones)
-
-    def __len__(self):
-        """Return the current size of internal memory."""
-        return len(self.memory)
-
-    def __v_stack_impr(self, states):
-        sub_dim = len(states[0][0]) if isinstance(states[0], Iterable) else 1
-        np_states = np.reshape(np.array(states), (len(states), sub_dim))
-        return np_states
diff --git a/flatland/baselines/model.py b/flatland/baselines/model.py
deleted file mode 100644
index 7a5b3d613342a4fba8e2c8f1f45df21381e12684..0000000000000000000000000000000000000000
--- a/flatland/baselines/model.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-class QNetwork(nn.Module):
-    def __init__(self, state_size, action_size, seed, hidsize1=128, hidsize2=128):
-        super(QNetwork, self).__init__()
-
-        self.fc1_val = nn.Linear(state_size, hidsize1)
-        self.fc2_val = nn.Linear(hidsize1, hidsize2)
-        self.fc3_val = nn.Linear(hidsize2, 1)
-
-        self.fc1_adv = nn.Linear(state_size, hidsize1)
-        self.fc2_adv = nn.Linear(hidsize1, hidsize2)
-        self.fc3_adv = nn.Linear(hidsize2, action_size)
-
-    def forward(self, x):
-        val = F.relu(self.fc1_val(x))
-        val = F.relu(self.fc2_val(val))
-        val = self.fc3_val(val)
-
-        # advantage calculation
-        adv = F.relu(self.fc1_adv(x))
-        adv = F.relu(self.fc2_adv(adv))
-        adv = self.fc3_adv(adv)
-        return val + adv - adv.mean()
-
-
-class QNetwork2(nn.Module):
-    def __init__(self, state_size, action_size, seed, input_channels, hidsize1=128, hidsize2=64):
-        super(QNetwork2, self).__init__()
-        self.conv1 = nn.Conv2d(input_channels, 16, kernel_size=3, stride=1)
-        self.bn1 = nn.BatchNorm2d(16)
-        self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=3)
-        self.bn2 = nn.BatchNorm2d(32)
-        self.conv3 = nn.Conv2d(32, 64, kernel_size=5, stride=3)
-        self.bn3 = nn.BatchNorm2d(64)
-
-        self.fc1_val = nn.Linear(6400, hidsize1)
-        self.fc2_val = nn.Linear(hidsize1, hidsize2)
-        self.fc3_val = nn.Linear(hidsize2, 1)
-
-        self.fc1_adv = nn.Linear(6400, hidsize1)
-        self.fc2_adv = nn.Linear(hidsize1, hidsize2)
-        self.fc3_adv = nn.Linear(hidsize2, action_size)
-
-    def forward(self, x):
-        x = F.relu(self.conv1(x))
-        x = F.relu(self.conv2(x))
-        x = F.relu(self.conv3(x))
-
-        # value function approximation
-        val = F.relu(self.fc1_val(x.view(x.size(0), -1)))
-        val = F.relu(self.fc2_val(val))
-        val = self.fc3_val(val)
-
-        # advantage calculation
-        adv = F.relu(self.fc1_adv(x.view(x.size(0), -1)))
-        adv = F.relu(self.fc2_adv(adv))
-        adv = self.fc3_adv(adv)
-        return val + adv - adv.mean()
diff --git a/flatland/envs/observations.py b/flatland/envs/observations.py
index 7de6327121cf7a650bd36fad6faf54755a86b2d5..4d5fb44d98698072993cb6334e298033c9914b31 100644
--- a/flatland/envs/observations.py
+++ b/flatland/envs/observations.py
@@ -483,11 +483,15 @@ class GlobalObsForRailEnv(ObservationBuilder):
         - transition map array with dimensions (env.height, env.width, 16),
           assuming 16 bits encoding of transitions.
 
-        - Four 2D arrays containing respectively the position of the given agent,
-          the position of its target, the positions of the other agents and of
-          their target.
+        - Three 2D arrays (map_height, map_width, 3) containing respectively the position of the given agent,
+          the position of its target and the positions of the other agents targets.
+
+        - A 3D array (map_height, map_width, 4) containing the one hot encoding of directions
+          of the other agents at their position coordinates.
+
+        - A 4 elements array with one of encoding of the direction of the agent of interest.
+
 
-        - A 4 elements array with one of encoding of the direction.
     """
 
     def __init__(self):
@@ -514,21 +518,22 @@ class GlobalObsForRailEnv(ObservationBuilder):
         #     self.targets[target_pos] += 1
 
     def get(self, handle):
-        obs = np.zeros((4, self.env.height, self.env.width))
+        obs_map_state = np.zeros((self.env.height, self.env.width, 3))
+        obs_other_agents_state = np.zeros((self.env.height, self.env.width, 4))
         agents = self.env.agents
         agent = agents[handle]
 
         agent_pos = agents[handle].position
-        obs[0][agent_pos] += 1
-        obs[1][agent.target] += 1
+        obs_map_state[agent_pos][0] += 1
+        obs_map_state[agent.target][1] += 1
 
         for i in range(len(agents)):
             if i != handle:  # TODO: handle used as index...?
                 agent2 = agents[i]
-                obs[3][agent2.position] += 1
-                obs[2][agent2.target] += 1
+                obs_other_agents_state[agent2.position][agent2.direction] = 1
+                obs_map_state[agent2.target][2] += 1
 
         direction = np.zeros(4)
         direction[agent.direction] = 1
 
-        return self.rail_obs, obs, direction
+        return self.rail_obs, obs_map_state, obs_other_agents_state,  direction
diff --git a/flatland/utils/editor.py b/flatland/utils/editor.py
index 7e813d763d7f26a96a1e1ca1f4c3d1bceef68ee3..59a55db5218acabb67904692a0b4cc8207b0df00 100644
--- a/flatland/utils/editor.py
+++ b/flatland/utils/editor.py
@@ -98,9 +98,15 @@ class View(object):
         self.wFilename.observe(self.controller.setFilename, names="value")
 
         # Size of environment when regenerating
-        self.wRegenSize = IntSlider(value=10, min=5, max=100, step=5, description="Regen Size",
+
+
+        self.wRegenSizeWidth = IntSlider(value=10, min=5, max=100, step=5, description="Regen Size (Width)",
+            tip="Click Regenerate after changing this")
+        self.wRegenSizeWidth.observe(self.controller.setRegenSizeWidth, names="value")
+
+        self.wRegenSizeHeight = IntSlider(value=10, min=5, max=100, step=5, description="Regen Size (Height)",
             tip="Click Regenerate after changing this")
-        self.wRegenSize.observe(self.controller.setRegenSize, names="value")
+        self.wRegenSizeHeight.observe(self.controller.setRegenSizeHeight, names="value")
 
         # Number of Agents when regenerating
         self.wRegenNAgents = IntSlider(value=1, min=0, max=20, step=1, description="# Agents",
@@ -115,7 +121,7 @@ class View(object):
             self.wTab.set_title(i, title)
         self.wTab.children = [
             VBox([self.wDebug, self.wDebug_move, self.wShowObs]),
-            VBox([self.wRegenSize, self.wRegenNAgents, self.wRegenMethod, self.wReplaceAgents])]
+            VBox([self.wRegenSizeWidth, self.wRegenSizeHeight, self.wRegenNAgents, self.wRegenMethod, self.wReplaceAgents])]
 
         # Progress bar intended for stepping in the background (not yet working)
         self.wProg_steps = ipywidgets.IntProgress(value=0, min=0, max=20, step=1, description="Step")
@@ -126,6 +132,7 @@ class View(object):
             dict(name="Clear", method=self.controller.clear, tip="Clear rails and agents"),
             dict(name="Reset", method=self.controller.reset,
                  tip="Standard env reset, including regen rail + agents"),
+            dict(name="Rotate Agent", method=self.controller.rotate_agent, tip="Rotate selected agent"),
             dict(name="Restart Agents", method=self.controller.restartAgents,
                  tip="Move agents back to start positions"),
             dict(name="Regenerate", method=self.controller.regenerate,
@@ -314,9 +321,20 @@ class Controller(object):
 
     def reset(self, event):
         self.log("Reset - nAgents:", self.view.wRegenNAgents.value)
+        self.log("Reset - size:", self.model.regen_size_width)
+        self.log("Reset - size:", self.model.regen_size_height)
         self.model.reset(replace_agents=self.view.wReplaceAgents.value,
                          nAgents=self.view.wRegenNAgents.value)
 
+    def rotate_agent(self,event):
+        self.log("Rotate Agent:", self.model.iSelectedAgent)
+        if self.model.iSelectedAgent is not None:
+            for iAgent, agent in enumerate(self.model.env.agents_static):
+                if agent is None:
+                    continue
+                agent.direction = (agent.direction + 1) % 4
+        self.model.redraw()
+
     def restartAgents(self, event):
         self.log("Restart Agents - nAgents:", self.view.wRegenNAgents.value)
         self.model.restartAgents()
@@ -326,8 +344,11 @@ class Controller(object):
         nAgents = self.view.wRegenNAgents.value
         self.model.regenerate(method, nAgents)
 
-    def setRegenSize(self, event):
-        self.model.setRegenSize(event["new"])
+    def setRegenSizeWidth(self, event):
+        self.model.setRegenSizeWidth(event["new"])
+
+    def setRegenSizeHeight(self, event):
+        self.model.setRegenSizeHeight(event["new"])
 
     def load(self, event):
         self.model.load()
@@ -355,7 +376,8 @@ class EditorModel(object):
     def __init__(self, env):
         self.view = None
         self.env = env
-        self.regen_size = 10
+        self.regen_size_width = 10
+        self.regen_size_height = 10
 
         self.lrcStroke = []
         self.iTransLast = -1
@@ -607,6 +629,13 @@ class EditorModel(object):
             self.log("load file: ", self.env_filename)
             # self.env.rail.load_transition_map(self.env_filename, override_gridsize=True)
             self.env.load(self.env_filename)
+
+            if not self.regen_size_height == self.env.height and not self.regen_size_width == self.env.width:
+                self.regen_size_height = self.env.height
+                self.regen_size_width = self.env.width
+                self.regenerate(None, 0, self.env)
+                self.env.load(self.env_filename)
+
             self.fix_env()
             self.set_env(self.env)
             self.redraw()
@@ -618,8 +647,10 @@ class EditorModel(object):
         # self.env.rail.save_transition_map(self.env_filename)
         self.env.save(self.env_filename)
 
-    def regenerate(self, method=None, nAgents=0):
-        self.log("Regenerate size", self.regen_size)
+    def regenerate(self, method=None, nAgents=0,env=None):
+        self.log("Regenerate size",
+                 self.regen_size_width,
+                 self.regen_size_height)
 
         if method is None or method == "Empty":
             fnMethod = empty_rail_generator()
@@ -628,12 +659,15 @@ class EditorModel(object):
         else:
             fnMethod = complex_rail_generator(nr_start_goal=5, nr_extra=20, min_dist=12)
 
-        self.env = RailEnv(width=self.regen_size,
-                           height=self.regen_size,
-                           rail_generator=fnMethod,
-                           # number_of_agents=self.env.get_num_agents(),
-                           number_of_agents=nAgents,
-                           obs_builder_object=TreeObsForRailEnv(max_depth=2))
+        if env is None:
+            self.env = RailEnv(width=self.regen_size_width,
+                               height=self.regen_size_height,
+                               rail_generator=fnMethod,
+                               # number_of_agents=self.env.get_num_agents(),
+                               number_of_agents=nAgents,
+                               obs_builder_object=TreeObsForRailEnv(max_depth=2))
+        else:
+            self.env = env
         self.env.reset(regen_rail=True)
         self.fix_env()
         self.set_env(self.env)
@@ -642,8 +676,13 @@ class EditorModel(object):
         # self.view.init_canvas() # Can't do init_canvas - need to keep the same canvas widget!
         self.redraw()
 
-    def setRegenSize(self, size):
-        self.regen_size = size
+
+    def setRegenSizeWidth(self, size):
+        self.regen_size_width = size
+
+    def setRegenSizeHeight(self, size):
+        self.regen_size_height = size
+
 
     def find_agent_at(self, rcCell):
         for iAgent, agent in enumerate(self.env.agents_static):
@@ -666,7 +705,7 @@ class EditorModel(object):
             # No
             if self.iSelectedAgent is None:
                 # Create a new agent and select it.
-                agent_static = EnvAgentStatic(rcCell, 0, rcCell)
+                agent_static = EnvAgentStatic(rcCell,0, rcCell)
                 self.iSelectedAgent = self.env.add_agent_static(agent_static)
                 self.player = None  # will need to start a new player
             else:
diff --git a/flatland/utils/graphics_layer.py b/flatland/utils/graphics_layer.py
index 4cfcc64bffb82f91a0f36822188db297bc1ed37e..527944b7831c7b172a7ffe133a9a6d7041918b27 100644
--- a/flatland/utils/graphics_layer.py
+++ b/flatland/utils/graphics_layer.py
@@ -68,3 +68,6 @@ class GraphicsLayer(object):
 
     def setAgentAt(self, iAgent, row, col, iDirIn, iDirOut):
         pass
+
+    def resize(self,env):
+        pass
diff --git a/flatland/utils/render_qt.py b/flatland/utils/render_qt.py
index 73b8ca77a33042bf181097d4b1a0a1afcb48b56e..8de407ac1c1c22cd0c310a17bf2ba4138081708e 100644
--- a/flatland/utils/render_qt.py
+++ b/flatland/utils/render_qt.py
@@ -1,14 +1,15 @@
-from flatland.utils.graphics_qt import QtRenderer
-from numpy import array
-from flatland.utils.graphics_layer import GraphicsLayer
-# from matplotlib import pyplot as plt
-import numpy as np
 import time
-from flatland.utils.svg import Track, Zug
-from flatland.envs.agent_utils import EnvAgent
 
-from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QGridLayout
+# from matplotlib import pyplot as plt
+import numpy as np
 from PyQt5 import QtSvg
+from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QGridLayout
+from numpy import array
+
+from flatland.envs.agent_utils import EnvAgent
+from flatland.utils.graphics_layer import GraphicsLayer
+from flatland.utils.graphics_qt import QtRenderer
+from flatland.utils.svg import Track, Zug
 
 
 def transform_string_svg(sSVG):
@@ -16,6 +17,7 @@ def transform_string_svg(sSVG):
     bySVG = bytearray(sSVG, encoding='utf-8')
     return bySVG
 
+
 def create_QtSvgWidget_from_svg_string(sSVG):
     svgWidget = QtSvg.QSvgWidget()
     ret = svgWidget.renderer().load(transform_string_svg(sSVG))
@@ -45,10 +47,10 @@ class QTGL(GraphicsLayer):
         # use the renderer to scale back to the desired size
         self.qtr.scale(self.tile_size / self.cell_pixels, self.tile_size / self.cell_pixels)
 
-        self.tColBg = (255, 255, 255)     # white background
+        self.tColBg = (255, 255, 255)  # white background
         # self.tColBg = (220, 120, 40)    # background color
-        self.tColRail = (0, 0, 0)         # black rails
-        self.tColGrid = (230,) * 3        # light grey for grid
+        self.tColRail = (0, 0, 0)  # black rails
+        self.tColGrid = (230,) * 3  # light grey for grid
 
         # Draw the background of the in-world cells
         self.qtr.fillRect(0, 0, self.widthPx, self.heightPx, *self.tColBg)
@@ -195,8 +197,8 @@ class QTSVG(GraphicsLayer):
 
                 # We can only reuse the image if noth new and old are straight and the same:
                 if iDirIn == iDirOut and \
-                        agentPrev.direction == iDirIn and \
-                        agentPrev.old_direction == agentPrev.direction:
+                    agentPrev.direction == iDirIn and \
+                    agentPrev.old_direction == agentPrev.direction:
                     return
                 else:
                     # need to load new image
@@ -222,6 +224,13 @@ class QTSVG(GraphicsLayer):
     def show(self, block=False):
         self.wMain.update()
 
+    def resize(self, env):
+        screen_resolution = self.app.desktop().screenGeometry()
+        width, height = screen_resolution.width(), screen_resolution.height()
+        w = np.ceil(width * 0.8 / env.width)
+        h = np.ceil(height * 0.8 / env.height)
+        self.wWinMain.resize(env.width * w, env.height * h)
+        self.wWinMain.move((width - env.width * w) / 2, (height - env.height * h) / 2)
 
 def main2():
     gl = QTGL(10, 10)
diff --git a/flatland/utils/rendertools.py b/flatland/utils/rendertools.py
index 713f65f0d726f1b89778982cf0fe1443b4e24282..4a1827f1df44ce8168d4facafc2fe0b686006eac 100644
--- a/flatland/utils/rendertools.py
+++ b/flatland/utils/rendertools.py
@@ -137,6 +137,9 @@ class RenderTool(object):
 
         self.new_rail = True
 
+    def resize(self):
+        self.gl.resize(self.env)
+
     def set_new_rail(self):
         self.new_rail = True
 
@@ -762,18 +765,12 @@ class RenderTool(object):
                 iAction = action_dict[iAgent]
                 new_direction, action_isValid = self.env.check_action(agent, iAction)
 
-
-            # ** TODO ***
-            # why should we only update if the action is valid ?
-            if True:
-                if action_isValid:
-                    self.gl.setAgentAt(iAgent, *agent.position, agent.direction, new_direction, color=oColor)
-                else:
-                    # pass
-                    print("invalid action - agent ", iAgent, " bend ", agent.direction, new_direction)
-                    self.gl.setAgentAt(iAgent, *agent.position, agent.direction, new_direction)
-            else:
+            if action_isValid:
                 self.gl.setAgentAt(iAgent, *agent.position, agent.direction, new_direction, color=oColor)
+            else:
+                #pass
+                print("invalid action - agent ", iAgent, " bend ", agent.direction, new_direction)
+                self.gl.setAgentAt(iAgent, *agent.position, agent.direction, new_direction)
 
         self.gl.show()
         for i in range(3):
diff --git a/notebooks/Editor2.ipynb b/notebooks/Editor2.ipynb
index 5dcfd5595bbc7d15e876397e01f9c369abb91c48..4ac6e9dbd974df1074b90b2f481561d2dbac5b50 100644
--- a/notebooks/Editor2.ipynb
+++ b/notebooks/Editor2.ipynb
@@ -9,7 +9,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 25,
+   "execution_count": 9,
    "metadata": {},
    "outputs": [
     {
@@ -28,7 +28,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 26,
+   "execution_count": 10,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -41,7 +41,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 27,
+   "execution_count": 11,
    "metadata": {},
    "outputs": [
     {
@@ -63,7 +63,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 28,
+   "execution_count": 12,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -72,7 +72,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 29,
+   "execution_count": 13,
    "metadata": {},
    "outputs": [
     {
@@ -106,7 +106,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 30,
+   "execution_count": 14,
    "metadata": {
     "scrolled": false
    },
@@ -114,7 +114,7 @@
     {
      "data": {
       "application/vnd.jupyter.widget-view+json": {
-       "model_id": "47af532101994c36a053e16a9b31dcd6",
+       "model_id": "ece47ccd72af4638b61e9d93a66e9a57",
        "version_major": 2,
        "version_minor": 0
       },
@@ -132,7 +132,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 31,
+   "execution_count": 15,
    "metadata": {
     "scrolled": false
    },
@@ -140,7 +140,7 @@
     {
      "data": {
       "application/vnd.jupyter.widget-view+json": {
-       "model_id": "949dc7440647445e82dd1ca0f250e5ca",
+       "model_id": "86207439e5a94055bb3d837028f195fc",
        "version_major": 2,
        "version_minor": 0
       },
@@ -159,7 +159,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 32,
+   "execution_count": 16,
    "metadata": {},
    "outputs": [
     {
@@ -168,7 +168,7 @@
        "(0, 0)"
       ]
      },
-     "execution_count": 32,
+     "execution_count": 16,
      "metadata": {},
      "output_type": "execute_result"
     }