training_navigation.py 5.37 KB
Newer Older
1
from flatland.envs.rail_env import *
2
from flatland.envs.generators import *
3
4
from flatland.core.env_observation_builder import TreeObsForRailEnv
from flatland.utils.rendertools import *
5
6
from flatland.baselines.dueling_double_dqn import Agent
from collections import deque
Erik Nygren's avatar
Erik Nygren committed
7
import torch, random
8

9
10
11
random.seed(1)
np.random.seed(1)

12
13
# Example generate a rail given a manual specification,
# a map of tuples (cell_type, rotation)
14
transition_probability = [5,  # empty cell - Case 0
15
                          1,  # Case 1 - straight
16
17
18
19
20
                          5,  # Case 2 - simple switch
                          1,  # Case 3 - diamond crossing
                          1,  # Case 4 - single slip
                          1,  # Case 5 - double slip
                          1,  # Case 6 - symmetrical
21
22
23
24
25
                          0,  # Case 7 - dead end
                          15,  # Case 1b (8)  - simple turn right
                          15,  # Case 1c (9)  - simple turn left
                          15]  # Case 2b (10) - simple switch mirrored

26
27

# Example generate a random rail
28
29
env = RailEnv(width=10,
              height=10,
30
              rail_generator=random_rail_generator(cell_type_relative_proportion=transition_probability),
31
              number_of_agents=3)
32
"""
33
34
35
36
37
env = RailEnv(width=20,
              height=20,
              rail_generator=complex_rail_generator(nr_start_goal=20, min_dist=10, max_dist=99999, seed=0),
              number_of_agents=5)

Erik Nygren's avatar
Erik Nygren committed
38
"""
39
40
41
env = RailEnv(width=20,
              height=20,
              rail_generator=rail_from_list_of_saved_GridTransitionMap_generator(
Erik Nygren's avatar
Erik Nygren committed
42
                      ['../notebooks/testing_11.npy']),
43
              number_of_agents=1)
Erik Nygren's avatar
Erik Nygren committed
44

45

46
env_renderer = RenderTool(env, gl="QT")
Erik Nygren's avatar
Erik Nygren committed
47
handle = env.get_agent_handles()
48

Erik Nygren's avatar
Erik Nygren committed
49
state_size = 105
50
action_size = 4
Erik Nygren's avatar
Erik Nygren committed
51
n_trials = 15000
52
53
54
55
56
57
58
59
eps = 1.
eps_end = 0.005
eps_decay = 0.998
action_dict = dict()
scores_window = deque(maxlen=100)
done_window = deque(maxlen=100)
scores = []
dones_list = []
Erik Nygren's avatar
Erik Nygren committed
60
action_prob = [0] * 4
61
agent = Agent(state_size, action_size, "FC", 0)
62
agent.qnetwork_local.load_state_dict(torch.load('../flatland/baselines/Nets/avoid_checkpoint14900.pth'))
Erik Nygren's avatar
Erik Nygren committed
63

Erik Nygren's avatar
Erik Nygren committed
64
demo = True
Erik Nygren's avatar
Erik Nygren committed
65

Erik Nygren's avatar
Erik Nygren committed
66

Erik Nygren's avatar
Erik Nygren committed
67
68
69
70
71
def max_lt(seq, val):
    """
    Return greatest item in seq for which item < val applies.
    None is returned if seq was empty or all items in seq were >= val.
    """
Erik Nygren's avatar
Erik Nygren committed
72
    max = 0
Erik Nygren's avatar
Erik Nygren committed
73
    idx = len(seq) - 1
Erik Nygren's avatar
Erik Nygren committed
74
75
76
77
78
    while idx >= 0:
        if seq[idx] < val and seq[idx] >= 0 and seq[idx] > max:
            max = seq[idx]
        idx -= 1
    return max
Erik Nygren's avatar
Erik Nygren committed
79

Erik Nygren's avatar
Erik Nygren committed
80

Erik Nygren's avatar
Erik Nygren committed
81
82
83
84
85
86
def min_lt(seq, val):
    """
    Return smallest item in seq for which item > val applies.
    None is returned if seq was empty or all items in seq were >= val.
    """
    min = np.inf
Erik Nygren's avatar
Erik Nygren committed
87
    idx = len(seq) - 1
Erik Nygren's avatar
Erik Nygren committed
88
    while idx >= 0:
Erik Nygren's avatar
Erik Nygren committed
89
90
        if seq[idx] > val and seq[idx] < min:
            min = seq[idx]
Erik Nygren's avatar
Erik Nygren committed
91
        idx -= 1
Erik Nygren's avatar
Erik Nygren committed
92
    return min
93

Erik Nygren's avatar
Erik Nygren committed
94

95
for trials in range(1, n_trials + 1):
96

97
    # Reset environment
98
    obs = env.reset()
99
    for a in range(env.number_of_agents):
100
        norm = max(1, max_lt(obs[a], np.inf))
Erik Nygren's avatar
Erik Nygren committed
101
        obs[a] = np.clip(np.array(obs[a]) / norm, -1, 1)
102

103
104
105
106
107
108
    # env.obs_builder.util_print_obs_subtree(tree=obs[0], num_elements_per_node=5)

    score = 0
    env_done = 0

    # Run episode
109
    for step in range(100):
Erik Nygren's avatar
Erik Nygren committed
110
111
        if demo:
            env_renderer.renderEnv(show=True)
Erik Nygren's avatar
Erik Nygren committed
112
        # print(step)
113
114
        # Action
        for a in range(env.number_of_agents):
Erik Nygren's avatar
Erik Nygren committed
115
116
            if demo:
                eps = 0
117
            action = agent.act(np.array(obs[a]), eps=eps)
118
            action_prob[action] += 1
119
            action_dict.update({a: action})
120
            #env.obs_builder.util_print_obs_subtree(tree=obs[a], num_features_per_node=5)
121
122
        # Environment step
        next_obs, all_rewards, done, _ = env.step(action_dict)
123
        for a in range(env.number_of_agents):
Erik Nygren's avatar
Erik Nygren committed
124
125
            norm = max(1, max_lt(next_obs[a], np.inf))
            next_obs[a] = np.clip(np.array(next_obs[a]) / norm, -1, 1)
126
127
128
129
130
131
        # Update replay buffer and train agent
        for a in range(env.number_of_agents):
            agent.step(obs[a], action_dict[a], all_rewards[a], next_obs[a], done[a])
            score += all_rewards[a]

        obs = next_obs.copy()
132
        if done['__all__']:
133
134
            env_done = 1
            break
135
    # Epsilon decay
136
    eps = max(eps_end, eps_decay * eps)  # decrease epsilon
Erik Nygren's avatar
Erik Nygren committed
137

138
139
140
141
    done_window.append(env_done)
    scores_window.append(score)  # save most recent score
    scores.append(np.mean(scores_window))
    dones_list.append((np.mean(done_window)))
Erik Nygren's avatar
Erik Nygren committed
142

Erik Nygren's avatar
Erik Nygren committed
143
144
145
146
147
148
149
150
151
152
    print(
        '\rTraining {} Agents.\tEpisode {}\tAverage Score: {:.0f}\tDones: {:.2f}%\tEpsilon: {:.2f} \t Action Probabilities: \t {}'.format(
            env.number_of_agents,
            trials,
            np.mean(
                scores_window),
            100 * np.mean(
                done_window),
            eps, action_prob / np.sum(action_prob)),
        end=" ")
153
154
    if trials % 100 == 0:
        print(
155
            '\rTraining {} Agents.\tEpisode {}\tAverage Score: {:.0f}\tDones: {:.2f}%\tEpsilon: {:.2f} \t Action Probabilities: \t {}'.format(
Erik Nygren's avatar
Erik Nygren committed
156
157
158
159
160
161
                env.number_of_agents,
                trials,
                np.mean(
                    scores_window),
                100 * np.mean(
                    done_window),
162
                eps, action_prob / np.sum(action_prob)))
Erik Nygren's avatar
Erik Nygren committed
163
164
        torch.save(agent.qnetwork_local.state_dict(),
                   '../flatland/baselines/Nets/avoid_checkpoint' + str(trials) + '.pth')
Erik Nygren's avatar
Erik Nygren committed
165
        action_prob = [1] * 4