Commit ed021c4d authored by nilabha's avatar nilabha

Merge branch 'flatland-paper-baselines' of...

Merge branch 'flatland-paper-baselines' of gitlab.aicrowd.com:flatland/neurips2020-flatland-baselines into flatland-paper-baselines
parents b25d312c 174a128b
Pipeline #5113 failed with stage
in 2 minutes and 58 seconds
evaluation_num_workers: 2
# Evaluation wokers for the evaluation run.
evaluation_interval: 50
# Episodes each time evaluation runs. Reduced episodes for rendering
evaluation_num_episodes: 5
# Override the env config for evaluation.
evaluation_config:
explore: False
env_config:
seed: 100
render: human
\ No newline at end of file
......@@ -7,4 +7,6 @@ evaluation_num_episodes: 2
evaluation_config:
explore: False
env_config:
seed: 100
\ No newline at end of file
seed: 100
render: human
# video_dir: small_tree_video
\ No newline at end of file
......@@ -447,9 +447,10 @@ class FlatlandRenderWrapper(RailEnv,gym.Env):
show_debug=False,
screen_height=600, # Adjust these parameters to fit your resolution
screen_width=800) # Adjust these parameters to fit your resolution
self.renderer.show = False
def update_renderer(self, mode='human'):
image = self.renderer.render_env(show=True, show_observations=False, show_predictions=False,
image = self.renderer.render_env(show=False, show_observations=False, show_predictions=False,
return_image=True)
return image[:,:,:3]
......@@ -462,12 +463,12 @@ class FlatlandRenderWrapper(RailEnv,gym.Env):
super().close()
if self.renderer:
try:
self.renderer.close_window()
self.renderer = None
if self.renderer.show:
self.renderer.close_window()
except Exception as e:
# This is since the last step(Due to a stopping criteria) is skipped by rllib
# Due to this done is not true and the env does not close
# Finally the env is closed when RLLib exits but at that time there is no window
# and hence the error
print("Could Not close window due to:",e)
self.renderer = None
......@@ -33,7 +33,7 @@ flatland-sparse-small-action-mask-tree-fc-apex:
wandb:
project: neurips2020-flatland-baselines
entity: nilabha2007
entity: aicrowd
tags: ["small_v0", "new_tree_obs", "apex", "skip_no_choice_cells",
"action_mask"] # TODO should be set programmatically
......
......@@ -19,7 +19,7 @@ flatland-render-test:
rollout_fragment_length: 50 # 100
sgd_minibatch_size: 100 # 500
num_sgd_iter: 10
num_workers: 2
num_workers: 1
num_envs_per_worker: 1
batch_mode: truncate_episodes
observation_filter: NoFilter
......@@ -31,20 +31,21 @@ flatland-render-test:
observation: new_tree
skip_no_choice_cells: True
available_actions_obs: True
render: human
# render: human
# For saving videos in custom folder and to wandb.
# By default if not specified folder is flatland
video_dir: small_tree_video
# video_dir: small_tree_video
observation_config:
max_depth: 2
shortest_path_max_depth: 30
generator: sparse_rail_generator
generator_config: small_v0
eval_generator: test_render
wandb:
project: neurips2020-flatland-baselines
entity: nilabha2007
entity: aicrowd
tags: ["small_v0", "tree_obs"] # TODO should be set programmatically
# monitor_gym: True # Wandb video doesn't seem to work
......
......@@ -189,9 +189,14 @@ def run(args, parser):
}
if args.eval:
eval_configs = get_eval_config(exp['config'].get('env_config',\
{}).get('eval_generator',"default"))
eval_configs_file = exp['config'].get('env_config',\
{}).get('eval_generator',"default")
if args.record:
eval_configs_file = exp['config'].get('env_config',\
{}).get('eval_generator',"default_render")
eval_configs = get_eval_config(eval_configs_file)
eval_seed = eval_configs.get('evaluation_config',{}).get('env_config',{}).get('seed')
eval_render = eval_configs.get('evaluation_config',{}).get('env_config',{}).get('render')
# add evaluation config to the current config
exp['config'] = merge_dicts(exp['config'],eval_configs)
......@@ -201,7 +206,13 @@ def run(args, parser):
if eval_seed and eval_env_config:
# We override the env seed from the evaluation config
eval_env_config['seed'] = eval_seed
if eval_render and eval_env_config:
# We override the env render from the evaluation config
eval_env_config['render'] = eval_render
# Set video_dir if it exists
eval_render_dir = eval_configs.get('evaluation_config',{}).get('env_config',{}).get('video_dir')
if eval_render_dir:
eval_env_config['video_dir'] = eval_render_dir
# Remove any wandb related configs
if eval_env_config:
if eval_env_config.get('wandb'):
......
......@@ -105,13 +105,20 @@ def create_parser(parser_creator=None):
"--eval",
action="store_true",
help="Whether to run evaluation. Default evaluation config is default.yaml "
"to use custom evaluation config set (eval_generator:high_eval) under configs")
"to use custom evaluation config set (eval_generator:test_eval) under configs")
parser.add_argument(
"-i",
"--custom-fn",
action="store_true",
help="Whether the experiment uses a custom function for training"
"Default custom function is imitation_ppo_train_fn")
parser.add_argument(
"-r",
"--record",
action="store_true",
help="Whether the experiment requires video recording during evaluation"
"Default evaluation config is default_render.yaml "
"Can also be done via custom evaluation config set (eval_generator:test_render) under configs")
parser.add_argument(
"--bind-all",
action="store_true",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment