Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Neural MMO
neural-mmo-starter-kit
Commits
b2ee52e0
Commit
b2ee52e0
authored
Jun 16, 2021
by
Siddhartha Laghuvarapu
Browse files
Add scripted baseline agents
parent
0b05592d
Changes
5
Hide whitespace changes
Inline
Side-by-side
agents/scripted_baseline_agent.py
0 → 100644
View file @
b2ee52e0
from
utils.base_agent
import
NeuralMMOAgent
from
forge.trinity.scripted
import
baselines
import
projekt
class
BaselineForageAgent
(
NeuralMMOAgent
):
def
__init__
(
self
):
self
.
agent
=
getattr
(
baselines
,
'Forage'
)(
projekt
.
config
.
SmallMaps
())
def
register_reset
(
self
,
observations
):
action
=
self
.
agent
(
observations
)
return
action
def
compute_action
(
self
,
observations
,
info
=
None
):
action
=
self
.
agent
(
observations
)
return
action
class
BaselineCombatAgent
(
NeuralMMOAgent
):
def
__init__
(
self
):
self
.
agent
=
getattr
(
baselines
,
'Combat'
)(
projekt
.
config
.
SmallMaps
())
def
register_reset
(
self
,
observations
):
action
=
self
.
agent
(
observations
)
return
action
def
compute_action
(
self
,
observations
,
info
=
None
):
action
=
self
.
agent
(
observations
)
return
action
class
BaselineRandomAgent
(
NeuralMMOAgent
):
def
__init__
(
self
):
self
.
agent
=
getattr
(
baselines
,
'Random'
)(
projekt
.
config
.
SmallMaps
())
def
register_reset
(
self
,
observations
):
action
=
self
.
agent
(
observations
)
return
action
def
compute_action
(
self
,
observations
,
info
=
None
):
action
=
self
.
agent
(
observations
)
return
action
env/gym-neuralmmo/gym_neuralmmo/envs/neuralmmo_eval.py
View file @
b2ee52e0
...
...
@@ -40,7 +40,7 @@ class NeuralMMOEval(gym.Env):
self
.
available_agents
=
self
.
eval_agents
[:]
self
.
assign_agents
()
self
.
actions
=
self
.
get_agent_actions
()
return
self
.
observations
return
self
.
observations
[
self
.
player_idx
]
def
get_available_agent
(
self
):
try
:
...
...
@@ -64,7 +64,7 @@ class NeuralMMOEval(gym.Env):
return
actions
def
step
(
self
,
action
):
self
.
observations
,
dones
,
rewards
,
_
=
self
.
env
.
step
(
self
.
actions
)
self
.
observations
,
dones
,
rewards
,
_
=
self
.
env
.
step
(
self
.
actions
,
preprocessActions
=
False
)
for
agent
in
dones
:
if
dones
[
agent
]
==
-
1
:
self
.
dead_agents
.
append
(
agent
)
...
...
env/gym-neuralmmo/setup.py
View file @
b2ee52e0
from
setuptools
import
setup
setup
(
name
=
"gym_neuralmmo"
,
version
=
"0.0.
1
"
,
install_requires
=
[
"gym"
])
setup
(
name
=
"gym_neuralmmo"
,
version
=
"0.0.
2
"
,
install_requires
=
[
"gym"
])
players.yaml
View file @
b2ee52e0
...
...
@@ -4,21 +4,21 @@
# Max number of opponent agents is 127
player_agent
:
file
:
random
_agent
agent_class
:
RandomNeuralMMO
Agent
file
:
scripted_baseline
_agent
agent_class
:
BaselineForage
Agent
opponent_agents
:
agent_1
:
file
:
random
_agent
agent_class
:
RandomNeuralMMO
Agent
file
:
scripted_baseline
_agent
agent_class
:
BaselineForage
Agent
num_agents
:
50
agent_2
:
file
:
random
_agent
agent_class
:
RandomNeuralMMO
Agent
file
:
scripted_baseline
_agent
agent_class
:
BaselineForage
Agent
num_agents
:
51
agent_3
:
file
:
random
_agent
agent_class
:
RandomNeuralMMO
Agent
num_agents
:
101
file
:
scripted_baseline
_agent
agent_class
:
BaselineForage
Agent
num_agents
:
26
rollout.py
View file @
b2ee52e0
...
...
@@ -20,7 +20,7 @@ def main():
done
=
False
while
done
==
False
:
obs
,
dones
,
rewards
,
_
=
env
.
step
(
action
)
action
=
player_agent
.
compute_action
(
action
)
action
=
player_agent
.
compute_action
(
obs
[
"player"
]
)
total_rewards
+=
rewards
[
"player"
]
done
=
dones
[
"player"
]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment