From a7cb94ebb383c09c90b0d21c6a618bce5b38b02b Mon Sep 17 00:00:00 2001 From: Erik Nygren <erik.nygren@sbb.ch> Date: Tue, 2 Jul 2019 15:36:58 -0400 Subject: [PATCH] added new special maps --- torch_training/railway/complex_scene_2.pkl | Bin 0 -> 1916 bytes torch_training/railway/complex_scene_3.pkl | Bin 0 -> 1837 bytes torch_training/railway/flatland.pkl | Bin 0 -> 2684 bytes torch_training/training_navigation.py | 9 +++++---- 4 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 torch_training/railway/complex_scene_2.pkl create mode 100644 torch_training/railway/complex_scene_3.pkl create mode 100644 torch_training/railway/flatland.pkl diff --git a/torch_training/railway/complex_scene_2.pkl b/torch_training/railway/complex_scene_2.pkl new file mode 100644 index 0000000000000000000000000000000000000000..5ebb8ab1715b3c9f01171116ac519128f4d67234 GIT binary patch literal 1916 zcmcIkJ5B>J5Vbce1j#1Af~Y7EB?Ve>0t$-c6j1~yN<%^R07x{H$gb!S9e3aYdxIQ? z@n^>4{SkzSqS!m0dGF2Jah~6okE^?lINpdWfuFQWW%%9<!k)+7{)A;VPg^57@(3VB zt|p5^rugmwl~qq?wD+57MW_ta)>PMtZD3}8v%`8brVdkFFE+7Nc~>wOfj=0@W!gOt zum((+j!bzh$BL?VPx&OC`iIEZx#<#XEM&tnix6N^w8nN%U?IW0tRR!*M=<y@BcD3# zd?^rP3t&w3>+e+kt+?f7MD4NpN5chqWexFWhJEB&&C`fZdg<tp^LOTFoj-9%4?&|& zSE#9*a^R?)4BGR2j?O-NAU`ToGi-EQ>CSQNCNo7kX^A<qgzC(l^PO28Kt~!Nsh_?7 zUrWaCk0`+jw{HhW8ITh!XTg3_0uOX-w45aCbnE#xK^#wpLie2Rh#9q?KJ5Bo>OYn- zxvtB8ADvUEtAA}ka~q}jTrBS&%JqC*F3Q#In^uXGYM5yyVLd9e8m3Z>FSQ!MI)OEV fHM`Ji4C@Hii8<fbDlY~~CR**BN+tF_9PH~iF4>Q4 literal 0 HcmV?d00001 diff --git a/torch_training/railway/complex_scene_3.pkl b/torch_training/railway/complex_scene_3.pkl new file mode 100644 index 0000000000000000000000000000000000000000..fddf5794b7292065e0a7e4bf72e616d6087ee179 GIT binary patch literal 1837 zcmd^9J5Iwu5Z$$1AlnIG5H&Rg3Y>s~f}8?HfTA=M#0Nm)0%(<v7SVAO-ynx!c4l{W z)?W#U3M<9i@ywey&pUbhT)%7{_v&=7u2u9jH%XJ}!Cw_6M+czFT-|NjCOAV%jvpLx zY4du{xD5m)R(NqsOJ5$hn^<u^g+{={k#V}O!hpdVDG{*@2+bmFi(@G$s6Dr+5IjnR zk*H%4CWx&6SKMQ6_hV8dnC{KQd+NS)z_F<V?kW|LWXpzl=<3r+5;>iGS$PG9)KXl# zFB&(4JLLNLO@p%|LQttYd**H9t>ON^Y|EYdeYbtxWPUe61J63%ozxBq(=Eeku~83| z^pkrTki#;-EKVM88f@iu0+1whdPwzT++Rw;68?JpI@_;5_4M4+KP(e+X~c0K$*HLk z=zWJ;dZ^X+YW?(FZ|}GDs@^<&>`bDy$xF2}8o<c{K%Qwcy97`Gm|IW+C@%oa02CI? fZO$QpYLOb9*vM6C5@mA$<SBr%fSgo)QOEZmQ8;AQ literal 0 HcmV?d00001 diff --git a/torch_training/railway/flatland.pkl b/torch_training/railway/flatland.pkl new file mode 100644 index 0000000000000000000000000000000000000000..a652becf420b1ec592c9de07681a394085a08bc8 GIT binary patch literal 2684 zcmcIku}T9$5Z&Cx+#x8Mh=nN<6eJdkLYjb}g+=}%gyf6|l7!?6%Rm}SS+KEII}1NR zun+lze1xCi%+BuK<&05eJi_hV%$xUSW_L4w><q&8!%UnCF+F#t+05K8PhME?Ndt5= zne(dzq(iQBKKccfGAgIV{0%V$@2HF*W!iG{_$lgG`F3V{;iB;)98&740wP6jBFqC5 zR$>d$l4*H#W0nekHR7O-ArP74&xGe+<Y>lqm<1|nUAxBpUFkhn^EWU(<dF&99wSNI zmE|$Mc_7i430tsXx8m=+S{ybkK~AO(%JOpY6i}<^X@4)S8u}`WZd2$2BMxei!Onl; z^l9=7kCHmBban6^=!0Zd>p){E)MV(@z(HB*>w|@d02+lSK}OevLK<Kfs*K){ngGq2 z7N+Rcu9D1O-Y?Bg(2a(TVbqMm)=cE@rRNIiJ7WCW?+wE!>~$NLgJuhjiBWhO_Jg23 zIr|{TTQq2PHI|SK$Z7@G*TGE~iF@g<XQaQzOS$RD>=rAfSA@oktd#y9G``DAxetvW zuu^8Kjucg1$`hbGVWsq-${cT%D>Wf=HP$NKLq|G?tdzMUA-yA3N>OoTw!%wU0LlU{ i<&G-_?}c<%VS842tIR=_9xJ6=f+|all+W}xfA<MJnOzeA literal 0 HcmV?d00001 diff --git a/torch_training/training_navigation.py b/torch_training/training_navigation.py index de4b792..d085a8b 100644 --- a/torch_training/training_navigation.py +++ b/torch_training/training_navigation.py @@ -43,7 +43,7 @@ env = RailEnv(width=15, """ env = RailEnv(width=10, height=20, obs_builder_object=TreeObsForRailEnv(max_depth=2, predictor=ShortestPathPredictorForRailEnv())) -env.load("./railway/complex_scene.pkl") +env.load("./railway/flatland.pkl") file_load = True """ @@ -79,7 +79,7 @@ agent = Agent(state_size, action_size, "FC", 0) agent.qnetwork_local.load_state_dict(torch.load('./Nets/avoid_checkpoint15000.pth')) demo = True -record_images = False +record_images = True def max_lt(seq, val): """ @@ -140,6 +140,7 @@ for trials in range(1, n_trials + 1): final_obs = obs.copy() final_obs_next = obs.copy() for a in range(env.get_num_agents()): + print(a) data, distance, agent_data = env.obs_builder.split_tree(tree=np.array(obs[a]), num_features_per_node=8, current_depth=0) data = norm_obs_clip(data) @@ -164,14 +165,14 @@ for trials in range(1, n_trials + 1): if demo: env_renderer.renderEnv(show=True, show_observations=False) if record_images: - env_renderer.gl.saveImage("./Images/frame_{:04d}.bmp".format(step)) + env_renderer.gl.saveImage("./Images/flatland_frame_{:04d}.bmp".format(step)) # print(step) # Action for a in range(env.get_num_agents()): if demo: eps = 0 # action = agent.act(np.array(obs[a]), eps=eps) - action = agent.act(agent_obs[a], eps=eps) + action = 2 #agent.act(agent_obs[a], eps=eps) action_prob[action] += 1 action_dict.update({a: action}) # Environment step -- GitLab