Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Neural MMO
neural-mmo-starter-kit
Commits
4a877e3b
Commit
4a877e3b
authored
Jun 25, 2021
by
Siddhartha Laghuvarapu
Browse files
Update rollout and undo neural baseline changes
parent
9b812f28
Changes
5
Hide whitespace changes
Inline
Side-by-side
neural-mmo
deleted
120000 → 0
View file @
9b812f28
../../neuralmmo_master/neural-mmo
\ No newline at end of file
players.yaml
View file @
4a877e3b
...
...
@@ -4,9 +4,9 @@
# Number of opponent agents is exactly 127
player_agent
:
file
:
scripted
_baseline_agent
agent_class
:
Baseline
Combat
Agent
agent_type
:
scripted
file
:
neural
_baseline_agent
agent_class
:
Neural
BaselineAgent
agent_type
:
neural
opponent_agents
:
agent_1
:
...
...
resource
deleted
120000 → 0
View file @
9b812f28
../../neuralmmo_master/neural-mmo/resource
\ No newline at end of file
rollout.py
View file @
4a877e3b
...
...
@@ -52,7 +52,7 @@ def run_episode(player_index, agents, N_TIME_STEPS):
obs
,
dones
,
rewards
,
_
=
env
.
step
(
actions
,
omitDead
=
True
,
preprocess
=
neural_agents
)
for
entid
in
alive_agents
:
for
entid
in
sorted
(
alive_agents
)
:
if
entid
not
in
list
(
obs
.
keys
()):
dead_agents
.
append
(
entid
)
...
...
@@ -64,26 +64,27 @@ def run_episode(player_index, agents, N_TIME_STEPS):
actions
[
entid
]
=
agents
[
entid_agent_map
[
entid
]].
compute_action
(
obs
[
entid
])
n_steps
+=
1
for
entid
in
obs
:
for
entid
in
sorted
(
list
(
obs
.
keys
()))
:
if
entid
not
in
dead_agents
:
dead_agents
.
append
(
entid
)
logs
=
env
.
terminal
()
player_entid
=
entid_agent_map
[
player_index
]
logs
=
env
.
terminal
()
player_entid
=
agent_entid_map
[
player_index
]
player_log
=
{}
player_log
[
"Achievement"
]
=
logs
[
'Stats'
][
'Achievement'
][
player_entid
]
player_log
[
"Equipment"
]
=
logs
[
'Stats'
][
'Equipment'
][
player_entid
]
player_log
[
"E
quipment
"
]
=
logs
[
'Stats'
][
'Exploration'
][
player_entid
]
player_log
[
"PlayerKills"
]
=
logs
[
'Stats'
][
'PlayerKills'
][
player_entid
]
player_log
[
"Foraging"
]
=
logs
[
'Stats'
][
'Foraging'
][
player_entid
]
player_log
[
"Achievement"
]
=
logs
[
'Stats'
][
'Achievement'
][
dead_agents
.
index
(
player_entid
)
]
player_log
[
"Equipment"
]
=
logs
[
'Stats'
][
'Equipment'
][
dead_agents
.
index
(
player_entid
)
]
player_log
[
"E
xploration
"
]
=
logs
[
'Stats'
][
'Exploration'
][
dead_agents
.
index
(
player_entid
)
]
player_log
[
"PlayerKills"
]
=
logs
[
'Stats'
][
'PlayerKills'
][
dead_agents
.
index
(
player_entid
)
]
player_log
[
"Foraging"
]
=
logs
[
'Stats'
][
'Foraging'
][
dead_agents
.
index
(
player_entid
)
]
return
player_log
def
print_statistics
(
player_statistics
,
episode
):
print
(
"======= Episode {} =======
=
"
.
format
(
episode
+
1
))
print
(
"======= Episode {} ======="
.
format
(
episode
+
1
))
print
(
"Achievement "
,
player_statistics
[
'Achievement'
])
print
(
"Equipment "
,
player_statistics
[
'Equipment'
])
print
(
"E
quipment
"
,
player_statistics
[
'E
quipment
'
])
print
(
"E
xploration
"
,
player_statistics
[
'E
xploration
'
])
print
(
"PlayerKills "
,
player_statistics
[
'PlayerKills'
])
print
(
"Foraging "
,
player_statistics
[
'Foraging'
])
print
(
"========================="
)
...
...
@@ -91,7 +92,7 @@ def print_statistics(player_statistics,episode):
if
__name__
==
"__main__"
:
player_agent
,
opponent_agents
=
load_agents
(
"players.yaml"
)
N_EPISODES
=
10
N_TIME_STEPS
=
102
4
N_TIME_STEPS
=
102
for
episode
in
range
(
N_EPISODES
):
agents
,
player_index
=
assign_agents
(
player_agent
,
opponent_agents
)
statistics
=
run_episode
(
player_index
,
agents
,
N_TIME_STEPS
)
...
...
rollout_update.py
deleted
100644 → 0
View file @
9b812f28
import
sys
sys
.
path
.
append
(
'neural-mmo/'
)
from
forge.ethyr.torch
import
utils
from
forge.trinity.env
import
Env
import
projekt
from
utils.helpers
import
load_agents
import
random
import
copy
def
assign_agents
(
player_agent
,
opponent_agents
):
player_index
=
0
if
len
(
opponent_agents
)
!=
127
:
raise
Exception
(
"Number of opponent agents should add up to exactly 127"
)
random
.
shuffle
(
opponent_agents
)
player_index
=
random
.
randint
(
0
,
127
)
agents
=
copy
.
deepcopy
(
opponent_agents
)
agents
.
insert
(
player_index
,
player_agent
)
return
agents
,
player_index
def
run_episode
(
player_index
,
agents
,
N_TIME_STEPS
):
config
=
projekt
.
config
.
CompetitionRound1
()
env
=
Env
(
config
)
n_steps
=
0
neural_agents
=
set
()
dead_agents
=
[]
obs
=
env
.
reset
()
entids
=
list
(
obs
.
keys
())
agent_entid_map
=
dict
(
zip
(
range
(
len
(
agents
)),
entids
))
entid_agent_map
=
{
x
[
1
]:
x
[
0
]
for
x
in
agent_entid_map
.
items
()}
for
idx
,
agent
in
enumerate
(
agents
):
if
agent
.
type
==
'neural'
:
neural_agents
.
add
(
agent_entid_map
[
idx
])
actions
=
{}
for
entid
in
entids
:
actions
[
entid
]
=
agents
[
entid_agent_map
[
entid
]].
register_reset
(
obs
[
entid
])
while
len
(
obs
.
keys
())
>
0
and
n_steps
<
N_TIME_STEPS
:
obs
,
dones
,
rewards
,
_
=
env
.
step
(
actions
,
omitDead
=
False
)
alive_agents
=
list
(
obs
.
keys
())
for
entid
in
dones
:
if
dones
[
entid
]:
dead_agents
.
append
(
entid
)
actions
=
{}
for
entid
in
alive_agents
:
if
entid
not
in
entid_agent_map
:
continue
actions
[
entid
]
=
agents
[
entid_agent_map
[
entid
]].
compute_action
(
obs
[
entid
])
n_steps
+=
1
for
entid
in
obs
:
if
entid
not
in
dead_agents
:
dead_agents
.
append
(
entid
)
logs
=
env
.
terminal
()
player_entid
=
entid_agent_map
[
player_index
]
player_log
=
{}
if
player_entid
in
dead_agents
:
player_log
[
"Achievement"
]
=
logs
[
'Stats'
][
'Achievement'
][
player_entid
]
player_log
[
"Equipment"
]
=
logs
[
'Stats'
][
'Equipment'
][
player_entid
]
player_log
[
"Exploration"
]
=
logs
[
'Stats'
][
'Exploration'
][
player_entid
]
player_log
[
"PlayerKills"
]
=
logs
[
'Stats'
][
'PlayerKills'
][
player_entid
]
player_log
[
"Foraging"
]
=
logs
[
'Stats'
][
'Foraging'
][
player_entid
]
return
player_log
def
print_statistics
(
player_statistics
,
episode
):
print
(
player_statistics
,
episode
)
if
__name__
==
"__main__"
:
player_agent
,
opponent_agents
=
load_agents
(
"players.yaml"
)
N_EPISODES
=
10
N_TIME_STEPS
=
10
for
episode
in
range
(
N_EPISODES
):
agents
,
player_index
=
assign_agents
(
player_agent
,
opponent_agents
)
statistics
=
run_episode
(
player_index
,
agents
,
N_TIME_STEPS
)
print_statistics
(
statistics
,
episode
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment