Victarry's picture
First Push
02e8be8
raw
history blame contribute delete
No virus
19 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1237443685531616,
"min": 1.1237443685531616,
"max": 2.8504152297973633,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10754.2333984375,
"min": 10754.2333984375,
"max": 29222.45703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.614886283874512,
"min": 0.43102267384529114,
"max": 11.614886283874512,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2264.90283203125,
"min": 83.61840057373047,
"max": 2333.810791015625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06570940507734514,
"min": 0.06106924022081811,
"max": 0.07402926603361837,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26283762030938057,
"min": 0.24427696088327244,
"max": 0.3680490115132439,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18228381214772954,
"min": 0.1403834389764633,
"max": 0.27825765861015694,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7291352485909182,
"min": 0.5615337559058532,
"max": 1.3912882930507846,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.34090909090909,
"min": 3.8636363636363638,
"max": 23.34090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1027.0,
"min": 170.0,
"max": 1268.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.34090909090909,
"min": 3.8636363636363638,
"max": 23.34090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1027.0,
"min": 170.0,
"max": 1268.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673775769",
"python_version": "3.8.13 (default, Mar 28 2022, 11:38:47) \n[GCC 7.5.0]",
"command_line_arguments": "/data1/zhenhuan_liu/anaconda3/envs/RL/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.23.0",
"end_time_seconds": "1673776334"
},
"total": 564.4623995572329,
"count": 1,
"self": 0.3225214555859566,
"children": {
"run_training.setup": {
"total": 0.024620432406663895,
"count": 1,
"self": 0.024620432406663895
},
"TrainerController.start_learning": {
"total": 564.1152576692402,
"count": 1,
"self": 0.47332795709371567,
"children": {
"TrainerController._reset_env": {
"total": 2.1684222407639027,
"count": 1,
"self": 2.1684222407639027
},
"TrainerController.advance": {
"total": 561.4020166806877,
"count": 18203,
"self": 0.2268051691353321,
"children": {
"env_step": {
"total": 561.1752115115523,
"count": 18203,
"self": 475.9460798688233,
"children": {
"SubprocessEnvManager._take_step": {
"total": 85.02812844887376,
"count": 18203,
"self": 1.212802231311798,
"children": {
"TorchPolicy.evaluate": {
"total": 83.81532621756196,
"count": 18203,
"self": 17.013007927685976,
"children": {
"TorchPolicy.sample_actions": {
"total": 66.80231828987598,
"count": 18203,
"self": 66.80231828987598
}
}
}
}
},
"workers": {
"total": 0.20100319385528564,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 562.8091255016625,
"count": 18203,
"is_parallel": true,
"self": 196.09069154784083,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0042130351066589355,
"count": 1,
"is_parallel": true,
"self": 0.0011581815779209137,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003054853528738022,
"count": 10,
"is_parallel": true,
"self": 0.003054853528738022
}
}
},
"UnityEnvironment.step": {
"total": 0.05857975780963898,
"count": 1,
"is_parallel": true,
"self": 0.0009105689823627472,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000627957284450531,
"count": 1,
"is_parallel": true,
"self": 0.000627957284450531
},
"communicator.exchange": {
"total": 0.05426255241036415,
"count": 1,
"is_parallel": true,
"self": 0.05426255241036415
},
"steps_from_proto": {
"total": 0.002778679132461548,
"count": 1,
"is_parallel": true,
"self": 0.000672709196805954,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002105969935655594,
"count": 10,
"is_parallel": true,
"self": 0.002105969935655594
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 366.71843395382166,
"count": 18202,
"is_parallel": true,
"self": 15.889967773109674,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.847616668790579,
"count": 18202,
"is_parallel": true,
"self": 8.847616668790579
},
"communicator.exchange": {
"total": 295.01977460831404,
"count": 18202,
"is_parallel": true,
"self": 295.01977460831404
},
"steps_from_proto": {
"total": 46.96107490360737,
"count": 18202,
"is_parallel": true,
"self": 10.344161704182625,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.616913199424744,
"count": 182020,
"is_parallel": true,
"self": 36.616913199424744
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00019016116857528687,
"count": 1,
"self": 0.00019016116857528687,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 560.2121567800641,
"count": 244995,
"is_parallel": true,
"self": 3.976197514683008,
"children": {
"process_trajectory": {
"total": 326.1053060814738,
"count": 244995,
"is_parallel": true,
"self": 325.2289860062301,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8763200752437115,
"count": 4,
"is_parallel": true,
"self": 0.8763200752437115
}
}
},
"_update_policy": {
"total": 230.13065318390727,
"count": 90,
"is_parallel": true,
"self": 29.199153769761324,
"children": {
"TorchPPOOptimizer.update": {
"total": 200.93149941414595,
"count": 4587,
"is_parallel": true,
"self": 200.93149941414595
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.07130062952637672,
"count": 1,
"self": 0.0007156208157539368,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07058500871062279,
"count": 1,
"self": 0.07058500871062279
}
}
}
}
}
}
}