Commit ae94980a authored by nilabha's avatar nilabha

Updated custom model to v2 versions and apex IL Support

parent fdb25dfc
flatland-random-sparse-small-tree-fc-ppo:
run: APEX
env: flatland_sparse
stop:
timesteps_total: 100000000 # 1e8
checkpoint_freq: 10
checkpoint_at_end: True
keep_checkpoints_num: 5
checkpoint_score_attr: episode_reward_mean
config:
input:
"/tmp/flatland-out": 0.25
sampler: 0.75
num_workers: 2
num_envs_per_worker: 1
num_gpus: 0
env_config:
observation: tree
observation_config:
max_depth: 2
shortest_path_max_depth: 30
generator: sparse_rail_generator
generator_config: small_v0
wandb:
project: flatland
entity: masterscrat
tags: ["small_v0", "tree_obs", "APEX_DQfD"] # TODO should be set programmatically
model:
custom_model: custom_loss_model
fcnet_hiddens: [256, 256]
vf_share_layers: True # False
custom_options:
input_files: /tmp/flatland-out
lambda1: 1
lambda2: 1
flatland-random-sparse-small-tree-fc-ppo:
run: APEX
env: flatland_sparse
stop:
timesteps_total: 100000000 # 1e8
checkpoint_freq: 10
checkpoint_at_end: True
keep_checkpoints_num: 5
checkpoint_score_attr: episode_reward_mean
config:
input: /tmp/flatland-out
input_evaluation: [is, wis, simulation]
num_workers: 2
num_envs_per_worker: 1
num_gpus: 0
env_config:
observation: tree
observation_config:
max_depth: 2
shortest_path_max_depth: 30
generator: sparse_rail_generator
generator_config: small_v0
wandb:
project: flatland
entity: masterscrat
tags: ["small_v0", "tree_obs", "apex_IL"] # TODO should be set programmatically
model:
fcnet_activation: relu
fcnet_hiddens: [256, 256]
vf_share_layers: True # False
flatland-random-sparse-small-tree-fc-ppo:
run: APEX
env: flatland_sparse
stop:
timesteps_total: 100000000 # 1e8
checkpoint_freq: 10
checkpoint_at_end: True
keep_checkpoints_num: 5
checkpoint_score_attr: episode_reward_mean
config:
input:
"/tmp/flatland-out": 0.25
sampler: 0.75
num_workers: 2
num_envs_per_worker: 1
num_gpus: 0
env_config:
observation: tree
observation_config:
max_depth: 2
shortest_path_max_depth: 30
generator: sparse_rail_generator
generator_config: small_v0
wandb:
project: flatland
entity: masterscrat
tags: ["small_v0", "tree_obs", "apex_Mixed_IL"] # TODO should be set programmatically
model:
fcnet_activation: relu
fcnet_hiddens: [256, 256]
vf_share_layers: True # False
from ray.rllib.models import Model
from ray.rllib.models.model import Model, restore_original_dimensions
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.models.tf.fcnet_v1 import FullyConnectedNetwork
from ray.rllib.models.model import restore_original_dimensions
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.offline import JsonReader
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class CustomLossModel(Model):
class CustomLossModel(TFModelV2):
"""Custom model that adds an imitation loss on top of the policy loss."""
def _build_layers_v2(self, input_dict, num_outputs, options):
self.obs_in = input_dict["obs"]
with tf.variable_scope("shared", reuse=tf.AUTO_REUSE):
self.fcnet = FullyConnectedNetwork(input_dict, self.obs_space,
self.action_space, num_outputs,
options)
return self.fcnet.outputs, self.fcnet.last_layer
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super().__init__(obs_space, action_space, num_outputs, model_config,
name)
self.fcnet = FullyConnectedNetwork(
self.obs_space,
self.action_space,
num_outputs,
model_config,
name="fcnet")
self.register_variables(self.fcnet.variables())
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
# Delegate to our FCNet.
return self.fcnet(input_dict, state, seq_lens)
@override(ModelV2)
def custom_loss(self, policy_loss, loss_inputs):
# create a new input reader per worker
reader = JsonReader(self.options["custom_options"]["input_files"])
reader = JsonReader(self.model_config["custom_options"]["input_files"])
input_ops = reader.tf_input_ops()
# define a secondary loss by building a graph copy with weight sharing
obs = tf.cast(input_ops["obs"], tf.float32)
logits, _ = self._build_layers_v2({
"obs": restore_original_dimensions(obs, self.obs_space)
}, self.num_outputs, self.options)
obs = restore_original_dimensions(
tf.cast(input_ops["obs"], tf.float32), self.obs_space)
logits, _ = self.forward({"obs": obs}, [], None)
# You can also add self-supervised losses easily by referencing tensors
# created during _build_layers_v2(). For example, an autoencoder-style
......@@ -38,12 +50,12 @@ class CustomLossModel(Model):
print("FYI: You can also use these tensors: {}, ".format(loss_inputs))
# compute the IL loss
action_dist = Categorical(logits, self.options)
action_dist = Categorical(logits, self.model_config)
self.policy_loss = policy_loss
self.imitation_loss = tf.reduce_mean(
-action_dist.logp(input_ops["actions"]))
total_loss = policy_loss + self.options["custom_options"]["lambda1"]\
* policy_loss + self.options["custom_options"]["lambda2"]\
total_loss = policy_loss + self.model_config["custom_options"]["lambda1"]\
* policy_loss + self.model_config["custom_options"]["lambda2"]\
* self.imitation_loss
return total_loss
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment