training_navigation.py 3.77 KB
Newer Older
1
2
3
from flatland.envs.rail_env import *
from flatland.core.env_observation_builder import TreeObsForRailEnv
from flatland.utils.rendertools import *
4
5
from flatland.baselines.dueling_double_dqn import Agent
from collections import deque
Erik Nygren's avatar
Erik Nygren committed
6
import torch, random
7

8
9
10
random.seed(1)
np.random.seed(1)

11
12
# Example generate a rail given a manual specification,
# a map of tuples (cell_type, rotation)
13
14
transition_probability = [10.0,  # empty cell - Case 0
                          50.0,  # Case 1 - straight
15
16
                          1.0,  # Case 2 - simple switch
                          0.3,  # Case 3 - diamond drossing
17
                          0.5,  # Case 4 - single slip
18
                          0.5,  # Case 5 - double slip
19
                          0.2,  # Case 6 - symmetrical
20
                          0.0]  # Case 7 - dead end
21
22

# Example generate a random rail
23
24
env = RailEnv(width=7,
              height=7,
25
              rail_generator=random_rail_generator(cell_type_relative_proportion=transition_probability),
26
              number_of_agents=1)
27
env_renderer = RenderTool(env)
Erik Nygren's avatar
Erik Nygren committed
28
handle = env.get_agent_handles()
29

Erik Nygren's avatar
Erik Nygren committed
30
state_size = 105
31
action_size = 4
32
33
34
35
36
37
38
39
40
n_trials = 5000
eps = 1.
eps_end = 0.005
eps_decay = 0.998
action_dict = dict()
scores_window = deque(maxlen=100)
done_window = deque(maxlen=100)
scores = []
dones_list = []
41
action_prob = [0]*4
42
43
agent = Agent(state_size, action_size, "FC", 0)

44
for trials in range(1, n_trials + 1):
45

46
    # Reset environment
47
    obs = env.reset()
48
49
50
51
    for a in range(env.number_of_agents):
        if np.max(obs[a]) > 0 and np.max(obs[a]) < np.inf:
            obs[a] = np.clip(obs[a] / np.max(obs[a]), -1, 1)

52
53
54
55
56
57
58
    # env.obs_builder.util_print_obs_subtree(tree=obs[0], num_elements_per_node=5)

    score = 0
    env_done = 0

    # Run episode
    for step in range(100):
59
60
        #if trials > 114:
        #    env_renderer.renderEnv(show=True)
61
62
63
64

        # Action
        for a in range(env.number_of_agents):
            action = agent.act(np.array(obs[a]), eps=eps)
65
            action_prob[action] += 1
66
67
68
69
            action_dict.update({a: action})

        # Environment step
        next_obs, all_rewards, done, _ = env.step(action_dict)
70
71
72
        for a in range(env.number_of_agents):
            if np.max(next_obs[a]) > 0 and np.max(next_obs[a]) < np.inf:
                next_obs[a] = np.clip(next_obs[a] / np.max(next_obs[a]), -1, 1)
73
74
75
76
77
78
        # Update replay buffer and train agent
        for a in range(env.number_of_agents):
            agent.step(obs[a], action_dict[a], all_rewards[a], next_obs[a], done[a])
            score += all_rewards[a]

        obs = next_obs.copy()
79
        if done['__all__']:
80
81
82
83
            env_done = 1
            break
    # Epsioln decay
    eps = max(eps_end, eps_decay * eps)  # decrease epsilon
Erik Nygren's avatar
Erik Nygren committed
84

85
86
87
88
    done_window.append(env_done)
    scores_window.append(score)  # save most recent score
    scores.append(np.mean(scores_window))
    dones_list.append((np.mean(done_window)))
Erik Nygren's avatar
Erik Nygren committed
89

90
    print('\rTraining {} Agents.\tEpisode {}\tAverage Score: {:.0f}\tDones: {:.2f}%\tEpsilon: {:.2f} \t Action Probabilities: \t {}'.format(
Erik Nygren's avatar
Erik Nygren committed
91
92
93
94
95
96
        env.number_of_agents,
        trials,
        np.mean(
            scores_window),
        100 * np.mean(
            done_window),
97
        eps, action_prob/np.sum(action_prob)),
98
99
100
          end=" ")
    if trials % 100 == 0:
        print(
101
            '\rTraining {} Agents.\tEpisode {}\tAverage Score: {:.0f}\tDones: {:.2f}%\tEpsilon: {:.2f} \t Action Probabilities: \t {}'.format(
Erik Nygren's avatar
Erik Nygren committed
102
103
104
105
106
107
                env.number_of_agents,
                trials,
                np.mean(
                    scores_window),
                100 * np.mean(
                    done_window),
108
                eps, action_prob / np.sum(action_prob)))
Erik Nygren's avatar
Erik Nygren committed
109
110
        torch.save(agent.qnetwork_local.state_dict(),
                   '../flatland/baselines/Nets/avoid_checkpoint' + str(trials) + '.pth')