Reinforcement Learning
ml-agents
TensorBoard
ONNX
unity-ml-agents
deep-reinforcement-learning
ML-Agents-Pyramids
Instructions to use Segamboam/ppo-PyramidsTraining with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- ml-agents
How to use Segamboam/ppo-PyramidsTraining with ml-agents:
mlagents-load-from-hf --repo-id="Segamboam/ppo-PyramidsTraining" --local-dir="./download: string[]s"
- Notebooks
- Google Colab
- Kaggle
| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 0.475240021944046, | |
| "min": 0.46541517972946167, | |
| "max": 1.4633564949035645, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 14173.55859375, | |
| "min": 13962.455078125, | |
| "max": 44392.3828125, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 989895.0, | |
| "min": 29895.0, | |
| "max": 989895.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 989895.0, | |
| "min": 29895.0, | |
| "max": 989895.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": 0.5235245227813721, | |
| "min": -0.2340911477804184, | |
| "max": 0.5236735939979553, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": 143.96923828125, | |
| "min": -55.4796028137207, | |
| "max": 143.96923828125, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.03888088837265968, | |
| "min": -0.018090147525072098, | |
| "max": 0.2799306809902191, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 10.692244529724121, | |
| "min": -4.75770902633667, | |
| "max": 73.06190490722656, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.06962337272956834, | |
| "min": 0.06403613343210299, | |
| "max": 0.07349723073442875, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.9747272182139568, | |
| "min": 0.5656785967585228, | |
| "max": 1.0621262683793355, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.01322473961530098, | |
| "min": 0.0007025245559779764, | |
| "max": 0.022468483257052, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.18514635461421372, | |
| "min": 0.006322721003801788, | |
| "max": 0.314558765598728, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 7.407761816492854e-06, | |
| "min": 7.407761816492854e-06, | |
| "max": 0.00029499525166825, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.00010370866543089995, | |
| "min": 0.00010370866543089995, | |
| "max": 0.0036331156889614995, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.10246922142857144, | |
| "min": 0.10246922142857144, | |
| "max": 0.19833175, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.4345691000000003, | |
| "min": 1.4345691000000003, | |
| "max": 2.6110385000000007, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.00025667522071428565, | |
| "min": 0.00025667522071428565, | |
| "max": 0.009833341825000001, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.003593453089999999, | |
| "min": 0.003593453089999999, | |
| "max": 0.12112274615000002, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.010324850678443909, | |
| "min": 0.010324850678443909, | |
| "max": 0.3980882167816162, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 0.14454790949821472, | |
| "min": 0.14454790949821472, | |
| "max": 3.1847057342529297, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 362.3333333333333, | |
| "min": 342.3, | |
| "max": 999.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 29349.0, | |
| "min": 16821.0, | |
| "max": 32123.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": 1.5388740566961558, | |
| "min": -0.9999806972280625, | |
| "max": 1.5910244228111372, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": 124.64879859238863, | |
| "min": -31.998801663517952, | |
| "max": 143.19219805300236, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": 1.5388740566961558, | |
| "min": -0.9999806972280625, | |
| "max": 1.5910244228111372, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": 124.64879859238863, | |
| "min": -31.998801663517952, | |
| "max": 143.19219805300236, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 0.03878415141177053, | |
| "min": 0.03878415141177053, | |
| "max": 7.534700820015536, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 3.141516264353413, | |
| "min": 3.141516264353413, | |
| "max": 135.62461476027966, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1674052936", | |
| "python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", | |
| "mlagents_version": "0.29.0.dev0", | |
| "mlagents_envs_version": "0.29.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "1.8.1+cu102", | |
| "numpy_version": "1.21.6", | |
| "end_time_seconds": "1674054947" | |
| }, | |
| "total": 2011.787828901, | |
| "count": 1, | |
| "self": 0.3963621290001811, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.10353333900002326, | |
| "count": 1, | |
| "self": 0.10353333900002326 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 2011.2879334329998, | |
| "count": 1, | |
| "self": 1.1178473749951081, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 6.0697630520000985, | |
| "count": 1, | |
| "self": 6.0697630520000985 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 2004.0146495820045, | |
| "count": 63828, | |
| "self": 1.1605653400238225, | |
| "children": { | |
| "env_step": { | |
| "total": 1367.1529542059798, | |
| "count": 63828, | |
| "self": 1269.5356720280367, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 96.92909705092643, | |
| "count": 63828, | |
| "self": 4.049075117918164, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 92.88002193300827, | |
| "count": 62560, | |
| "self": 31.32916417103138, | |
| "children": { | |
| "TorchPolicy.sample_actions": { | |
| "total": 61.550857761976886, | |
| "count": 62560, | |
| "self": 61.550857761976886 | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.6881851270165953, | |
| "count": 63828, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 2007.6760073899711, | |
| "count": 63828, | |
| "is_parallel": true, | |
| "self": 830.2577266800017, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.0018089249999775348, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005876300001546042, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0012212949998229305, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0012212949998229305 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.050259611000001314, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005057099999703496, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0004216110000925255, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0004216110000925255 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.04782815799990203, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.04782815799990203 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.0015041320000364067, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.00039199200000439305, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0011121400000320136, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0011121400000320136 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 1177.4182807099694, | |
| "count": 63827, | |
| "is_parallel": true, | |
| "self": 27.022396464875783, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 21.231541364049235, | |
| "count": 63827, | |
| "is_parallel": true, | |
| "self": 21.231541364049235 | |
| }, | |
| "communicator.exchange": { | |
| "total": 1035.0354641479844, | |
| "count": 63827, | |
| "is_parallel": true, | |
| "self": 1035.0354641479844 | |
| }, | |
| "steps_from_proto": { | |
| "total": 94.12887873305999, | |
| "count": 63827, | |
| "is_parallel": true, | |
| "self": 20.341070800982152, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 73.78780793207784, | |
| "count": 510616, | |
| "is_parallel": true, | |
| "self": 73.78780793207784 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 635.7011300360009, | |
| "count": 63828, | |
| "self": 2.0364603400880696, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 138.81790026991234, | |
| "count": 63828, | |
| "self": 138.63125504091272, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.18664522899962321, | |
| "count": 2, | |
| "self": 0.18664522899962321 | |
| } | |
| } | |
| }, | |
| "_update_policy": { | |
| "total": 494.8467694260005, | |
| "count": 451, | |
| "self": 183.84266522800044, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 311.00410419800005, | |
| "count": 22833, | |
| "self": 311.00410419800005 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 8.609999895270448e-07, | |
| "count": 1, | |
| "self": 8.609999895270448e-07 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.0856725630001165, | |
| "count": 1, | |
| "self": 0.001478554999721382, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.08419400800039512, | |
| "count": 1, | |
| "self": 0.08419400800039512 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |