ppo.py 3.5 KB
Newer Older
Dipam Chakraborty's avatar
Dipam Chakraborty committed
1
2
3
4
import logging

from ray.rllib.agents import with_common_config
from .custom_torch_policy import CustomTorchPolicy
Dipam Chakraborty's avatar
Dipam Chakraborty committed
5
6
# from ray.rllib.agents.trainer_template import build_trainer
from .custom_trainer_template import build_trainer
7

Dipam Chakraborty's avatar
Dipam Chakraborty committed
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71

logger = logging.getLogger(__name__)

# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
    # Should use a critic as a baseline (otherwise don't use value baseline;
    # required for using GAE).
    "use_critic": True,
    # If true, use the Generalized Advantage Estimator (GAE)
    # with a value function, see https://arxiv.org/pdf/1506.02438.pdf.
    "use_gae": True,
    # The GAE(lambda) parameter.
    "lambda": 1.0,
    # Initial coefficient for KL divergence.
    "kl_coeff": 0.2,
    # Size of batches collected from each worker.
    "rollout_fragment_length": 200,
    # Number of timesteps collected for each SGD round. This defines the size
    # of each SGD epoch.
    "train_batch_size": 4000,
    # Total SGD batch size across all devices for SGD. This defines the
    # minibatch size within each epoch.
    "sgd_minibatch_size": 128,
    # Whether to shuffle sequences in the batch when training (recommended).
    "shuffle_sequences": True,
    # Number of SGD iterations in each outer loop (i.e., number of epochs to
    # execute per train batch).
    "num_sgd_iter": 30,
    # Stepsize of SGD.
    "lr": 5e-5,
    # Learning rate schedule.
    "lr_schedule": None,
    # Share layers for value function. If you set this to True, it's important
    # to tune vf_loss_coeff.
    "vf_share_layers": False,
    # Coefficient of the value function loss. IMPORTANT: you must tune this if
    # you set vf_share_layers: True.
    "vf_loss_coeff": 1.0,
    # Coefficient of the entropy regularizer.
    "entropy_coeff": 0.0,
    # Decay schedule for the entropy regularizer.
    "entropy_coeff_schedule": None,
    # PPO clip parameter.
    "clip_param": 0.3,
    # Clip param for the value function. Note that this is sensitive to the
    # scale of the rewards. If your expected V is large, increase this.
    "vf_clip_param": 10.0,
    # If specified, clip the global norm of gradients by this amount.
    "grad_clip": None,
    # Target value for KL divergence.
    "kl_target": 0.01,
    # Whether to rollout "complete_episodes" or "truncate_episodes".
    "batch_mode": "truncate_episodes",
    # Which observation filter to apply to the observation.
    "observation_filter": "NoFilter",
    # Uses the sync samples optimizer instead of the multi-gpu one. This is
    # usually slower, but you might want to try it if you run into issues with
    # the default optimizer.
    "simple_optimizer": False,
    # Whether to fake GPUs (using CPUs).
    # Set this to True for debugging on non-GPU machines (set `num_gpus` > 0).
    "_fake_gpus": False,
    # Use PyTorch as framework?
Dipam Chakraborty's avatar
cleanup    
Dipam Chakraborty committed
72
73
74
75
76
77
78
79
    "use_pytorch": True,
    
    # Custom swithches
    "retune_skips": 300000,
    "retune_replay_size": 200000,
    "num_retunes": 6,
    "retune_epochs": 3,
    "standardize_rewards": False,
Dipam Chakraborty's avatar
Dipam Chakraborty committed
80
81
    "accumulate_train_batches": 1,
    "adaptive_gamma": False, 
82
83
84
85
    "final_lr": 1e-4,
    "lr_schedule": True,
    "final_entropy_coeff": 0.002,
    "entropy_schedule": True,
86
87
    
    "max_minibatch_size": 2048,
88
89
    "updates_per_batch": 8,
    "scale_reward": 1.0,
Dipam Chakraborty's avatar
Dipam Chakraborty committed
90
    "return_reset": True,
Dipam Chakraborty's avatar
ppo amp    
Dipam Chakraborty committed
91
    "aux_phase_mixed_precision": False,
Dipam Chakraborty's avatar
Dipam Chakraborty committed
92
    "max_time": 100000000,
Dipam Chakraborty's avatar
Dipam Chakraborty committed
93
94
95
96
97
98
99
100
101
})
# __sphinx_doc_end__
# yapf: enable


PPOTrainer = build_trainer(
    name="CustomTorchPPOAgent",
    default_config=DEFAULT_CONFIG,
    default_policy=CustomTorchPolicy)