import logging from ray.rllib.agents import with_common_config from .custom_torch_ppg import CustomTorchPolicy # from ray.rllib.agents.trainer_template import build_trainer from .custom_trainer_template import build_trainer logger = logging.getLogger(__name__) # yapf: disable # __sphinx_doc_begin__ DEFAULT_CONFIG = with_common_config({ # Should use a critic as a baseline (otherwise don't use value baseline; # required for using GAE). "use_critic": True, # If true, use the Generalized Advantage Estimator (GAE) # with a value function, see https://arxiv.org/pdf/1506.02438.pdf. "use_gae": True, # The GAE(lambda) parameter. "lambda": 1.0, # Initial coefficient for KL divergence. "kl_coeff": 0.2, # Size of batches collected from each worker. "rollout_fragment_length": 200, # Number of timesteps collected for each SGD round. This defines the size # of each SGD epoch. "train_batch_size": 4000, # Total SGD batch size across all devices for SGD. This defines the # minibatch size within each epoch. "sgd_minibatch_size": 128, # Whether to shuffle sequences in the batch when training (recommended). "shuffle_sequences": True, # Number of SGD iterations in each outer loop (i.e., number of epochs to # execute per train batch). "num_sgd_iter": 30, # Stepsize of SGD. "lr": 5e-5, # Learning rate schedule. "lr_schedule": None, # Share layers for value function. If you set this to True, it's important # to tune vf_loss_coeff. "vf_share_layers": False, # Coefficient of the value function loss. IMPORTANT: you must tune this if # you set vf_share_layers: True. "vf_loss_coeff": 1.0, # Coefficient of the entropy regularizer. "entropy_coeff": 0.0, # Decay schedule for the entropy regularizer. "entropy_coeff_schedule": None, # PPO clip parameter. "clip_param": 0.3, # Clip param for the value function. Note that this is sensitive to the # scale of the rewards. If your expected V is large, increase this. "vf_clip_param": 10.0, # If specified, clip the global norm of gradients by this amount. "grad_clip": None, # Target value for KL divergence. "kl_target": 0.01, # Whether to rollout "complete_episodes" or "truncate_episodes". "batch_mode": "truncate_episodes", # Which observation filter to apply to the observation. "observation_filter": "NoFilter", # Uses the sync samples optimizer instead of the multi-gpu one. This is # usually slower, but you might want to try it if you run into issues with # the default optimizer. "simple_optimizer": False, # Whether to fake GPUs (using CPUs). # Set this to True for debugging on non-GPU machines (set `num_gpus` > 0). "_fake_gpus": False, # Use PyTorch as framework? "use_pytorch": True, # Custom swithches "skips": 0, "n_pi": 32, "num_retunes": 100, "retune_epochs": 6, "standardize_rewards": True, "scale_reward": 1.0, "accumulate_train_batches": 1, "adaptive_gamma": False, "final_lr": 2e-4, "lr_schedule": 'None', "final_entropy_coeff": 0.002, "entropy_schedule": False, "max_minibatch_size": 2048, "updates_per_batch": 8, "aux_mbsize": 4, "augment_buffer": False, "reset_returns": True, "flattened_buffer": False, "augment_randint_num": 6, "aux_lr": 5e-4, "value_lr": 1e-3, "same_lr_everywhere": False, "aux_phase_mixed_precision": False, "single_optimizer": False, "max_time": 7200, "pi_phase_mixed_precision": False, "aux_num_accumulates": 1, }) # __sphinx_doc_end__ # yapf: enable PPGTrainer = build_trainer( name="CustomTorchPPGAgent", default_config=DEFAULT_CONFIG, default_policy=CustomTorchPolicy)