Commit 3e7e26c3 authored by Siddhartha Laghuvarapu's avatar Siddhartha Laghuvarapu
Browse files

Reorg and bugfix

parent ecc9887a
......@@ -10,8 +10,8 @@ class RandomNeuralMMOAgent(NeuralMMOAgent):
return action
def compute_action(self, observations, info=None):
action = self.agent.compute_actions(observations)
action = self.get_action(observations)
return action
def get_action(observations):
def get_action(self, observations):
return {}
......@@ -3,21 +3,22 @@ from gym import error, spaces, utils
from gym.utils import seeding
from forge.blade.systems import ai
from forge.trinity import env
from forge.trinity.env import Env
import projekt
class NeuralMMOEval(gym.Env):
def __init__(self):
config = projekt.config.SmallMaps()
self.env = env.Env(config)
self.env = Env(config)
self.agents_in_play = {}
self.available_agents = []
self.alive_agents = []
def reset(self):
self.observations = env.reset()
self.observations = self.env.reset()
self.alive_agents = list(self.observations.keys())
self.player_idx = self.alive_agents[0]
self.agents_in_play[self.alive_agents[0]] = self.player_agent
self.available_agents = self.eval_agents
self.assign_agents()
......@@ -36,18 +37,21 @@ class NeuralMMOEval(gym.Env):
def get_agent_actions(self):
actions = {}
print(self.observations)
print(self.alive_agents)
for agent in self.observations:
actions[agent] = self.alive_agents[agent].compute_action(
print(agent)
actions[agent] = self.agents_in_play[agent].compute_action(
self.observations[agent]
)
return actions
def step(self):
self.actions = self.get_agent_actions(self.observations)
self.observations, dones, rewards, _ = env.step(self.actions)
self.alive_agents = list(obs.keys())
self.actions = self.get_agent_actions()
self.observations, dones, rewards, _ = self.env.step(self.actions)
self.alive_agents = list(self.observations.keys())
self.assign_agents()
return self.parse_observations(self, self.observations, dones, rewards, _)
return self.parse_observations(self.observations, dones, rewards, _)
def set_player_agent(self, player_agent):
self.player_agent = player_agent
......@@ -58,5 +62,11 @@ class NeuralMMOEval(gym.Env):
def get_default_agent(self):
raise NotImplementedError
def parse_observations(self):
raise NotImplementedError
def parse_observations(self, obs, dones, rewards, _):
parse_obs = {}
parse_dones = {}
parse_rewards = {}
parse_obs["player_agent"] = obs[self.player_idx]
parse_dones["player_agent"] = dones[self.player_idx]
parse_rewards["player_agent"] = rewards[self.player_idx]
return parse_obs, parse_dones, parse_rewards, _
import sys
import yaml
from tqdm import trange
import importlib
import gym
import gym_neuralmmo
def get_agent(agent_dict):
sys.path.append("agents/")
module = importlib.import_module(agent_dict["file"])
agent = getattr(module, agent_dict["agent_class"])()
return agent
def load_agents(agents_config):
with open(agents_config, "r") as stream:
data = yaml.safe_load(stream)
player_agent = get_agent(data["player_agent"])
opponent_agents = []
for agent in data["opponent_agents"]:
opponent_agents.append(get_agent(agent))
return (player_agent,)
from utils.helpers import load_agents
def main():
env = gym.make("neuralmmo-eval")
env = gym.make("neuralmmo-v0")
player_agent, opponent_agents = load_agents("players.yaml")
env.set_player_agent(player_agent)
env.set_eval_agents(opponent_agents)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment