Commit ecc9887a authored by Siddhartha Laghuvarapu's avatar Siddhartha Laghuvarapu
Browse files

Formatting

parent 30f949bd
......@@ -2,17 +2,16 @@ from utils.base_agent import NeuralMMOAgent
class RandomNeuralMMOAgent(NeuralMMOAgent):
def __init__(self):
pass
def register_reset(self, observations):
action = self.agent.compute_actions(observations)
return action
def compute_action(self, observations, info=None):
action = self.agent.compute_actions(observations)
return action
return action
def get_action(observations):
return {}
\ No newline at end of file
return {}
from gym.envs.registration import register
register(
id='neuralmmo-v0',
entry_point='gym_neuralmmo.envs:NeuralMMOEval',
id="neuralmmo-v0", entry_point="gym_neuralmmo.envs:NeuralMMOEval",
)
register(
id='neuralmmo-v1',
entry_point='gym_neuralmmo.envs:NeuralMMOTrain',
)
\ No newline at end of file
id="neuralmmo-v1", entry_point="gym_neuralmmo.envs:NeuralMMOTrain",
)
from gym_neuralmmo.envs.neuralmmo_eval import NeuralMMOEval
from gym_neuralmmo.envs.neuralmmo_train import NeuralMMOTrain
......@@ -7,60 +7,56 @@ from forge.trinity import env
import projekt
class NeuralMMOEval(gym.Env):
def __init__(self):
config = projekt.config.SmallMaps()
self.env = env.Env(config)
self.agents_in_play = {}
self.available_agents = []
self.alive_agents = []
def reset(self):
self.observations = env.reset()
self.alive_agents = list(self.observations.keys())
self.agents_in_play[self.alive_agents[0]] = self.player_agent
self.available_agents = self.eval_agents
self.assign_agents()
return self.observations
def get_available_agent(self):
try:
return self.available_agents.pop()
except:
return self.get_default_agent()
def assign_agents(self):
for agent in self.alive_agents:
if agent not in self.agents_in_play:
self.agents_in_play[agent] = self.get_available_agent()
def get_agent_actions(self):
actions = {}
for agent in self.observations:
actions[agent] = self.alive_agents[agent].compute_action(self.observations[agent])
return actions
def step(self):
self.actions = self.get_agent_actions(self.observations)
self.observations,dones,rewards,_ = env.step(self.actions)
self.alive_agents = list(obs.keys())
self.assign_agents()
return self.parse_observations(self,self.observations,dones,rewards,_)
def set_player_agent(self,player_agent):
self.player_agent = player_agent
def set_eval_agents(self,eval_agents):
self.eval_agents = eval_agents
def get_default_agent(self):
raise NotImplementedError
def parse_observations(self):
raise NotImplementedError
def __init__(self):
config = projekt.config.SmallMaps()
self.env = env.Env(config)
self.agents_in_play = {}
self.available_agents = []
self.alive_agents = []
def reset(self):
self.observations = env.reset()
self.alive_agents = list(self.observations.keys())
self.agents_in_play[self.alive_agents[0]] = self.player_agent
self.available_agents = self.eval_agents
self.assign_agents()
return self.observations
def get_available_agent(self):
try:
return self.available_agents.pop()
except:
return self.get_default_agent()
def assign_agents(self):
for agent in self.alive_agents:
if agent not in self.agents_in_play:
self.agents_in_play[agent] = self.get_available_agent()
def get_agent_actions(self):
actions = {}
for agent in self.observations:
actions[agent] = self.alive_agents[agent].compute_action(
self.observations[agent]
)
return actions
def step(self):
self.actions = self.get_agent_actions(self.observations)
self.observations, dones, rewards, _ = env.step(self.actions)
self.alive_agents = list(obs.keys())
self.assign_agents()
return self.parse_observations(self, self.observations, dones, rewards, _)
def set_player_agent(self, player_agent):
self.player_agent = player_agent
def set_eval_agents(self, eval_agents):
self.eval_agents = eval_agents
def get_default_agent(self):
raise NotImplementedError
def parse_observations(self):
raise NotImplementedError
from setuptools import setup
setup(name='gym_neuralmmo',
version='0.0.1',
install_requires=['gym']
)
setup(name="gym_neuralmmo", version="0.0.1", install_requires=["gym"])
......@@ -5,26 +5,28 @@ import importlib
import gym
def get_agent(agent_dict):
sys.path.append('agents/')
module = importlib.import_module(agent_dict['file'])
agent = getattr(module,agent_dict['agent_class'])()
sys.path.append("agents/")
module = importlib.import_module(agent_dict["file"])
agent = getattr(module, agent_dict["agent_class"])()
return agent
def load_agents(agents_config):
with open(agents_config, 'r') as stream:
with open(agents_config, "r") as stream:
data = yaml.safe_load(stream)
player_agent = get_agent(data['player_agent'])
player_agent = get_agent(data["player_agent"])
opponent_agents = []
for agent in data['opponent_agents']:
for agent in data["opponent_agents"]:
opponent_agents.append(get_agent(agent))
return player_agent,
return (player_agent,)
def main():
env = gym.make("neuralmmo-eval")
player_agent, opponent_agents = load_agents('players.yaml')
player_agent, opponent_agents = load_agents("players.yaml")
env.set_player_agent(player_agent)
env.set_eval_agents(opponent_agents)
n_episodes = 100
......@@ -33,11 +35,12 @@ def main():
obs = env.reset()
done = False
while done == False:
obs,dones,rewards,_ = env.step()
total_rewards += rewards['player']
done = dones['player']
obs, dones, rewards, _ = env.step()
total_rewards += rewards["player"]
done = dones["player"]
print("The total reward is ", total_rewards)
print("The total reward is ",total_rewards)
if __name__ == "__main__":
main()
\ No newline at end of file
main()
from abc import ABC, abstractmethod
class NeuralMMOAgent(ABC):
class NeuralMMOAgent(ABC):
@abstractmethod
def register_reset(self, observations):
pass
@abstractmethod
def compute_action(self,observations,info):
def compute_action(self, observations, info):
pass
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment