ppo-Worm / run_logs /timers.json
markeidsaune's picture
First training of Worm
38dd613
raw
history blame
17.5 kB
{
"name": "root",
"gauges": {
"Worm.Policy.Entropy.mean": {
"value": 0.7546901106834412,
"min": 0.7546901106834412,
"max": 1.418938398361206,
"count": 233
},
"Worm.Policy.Entropy.sum": {
"value": 22640.703125,
"min": 22640.703125,
"max": 42568.15234375,
"count": 233
},
"Worm.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 999.0,
"max": 999.0,
"count": 233
},
"Worm.Environment.EpisodeLength.sum": {
"value": 29970.0,
"min": 29970.0,
"max": 29970.0,
"count": 233
},
"Worm.Step.mean": {
"value": 6989000.0,
"min": 29000.0,
"max": 6989000.0,
"count": 233
},
"Worm.Step.sum": {
"value": 6989000.0,
"min": 29000.0,
"max": 6989000.0,
"count": 233
},
"Worm.Policy.ExtrinsicValueEstimate.mean": {
"value": 238.57986450195312,
"min": -0.030264511704444885,
"max": 238.7449493408203,
"count": 233
},
"Worm.Policy.ExtrinsicValueEstimate.sum": {
"value": 7157.39599609375,
"min": -0.8776708245277405,
"max": 7162.3486328125,
"count": 233
},
"Worm.Environment.CumulativeReward.mean": {
"value": 1188.4943888346354,
"min": 0.17140958830714226,
"max": 1188.6684448242188,
"count": 233
},
"Worm.Environment.CumulativeReward.sum": {
"value": 35654.83166503906,
"min": 5.142287649214268,
"max": 35660.05334472656,
"count": 233
},
"Worm.Policy.ExtrinsicReward.mean": {
"value": 1188.4943888346354,
"min": 0.17140958830714226,
"max": 1188.6684448242188,
"count": 233
},
"Worm.Policy.ExtrinsicReward.sum": {
"value": 35654.83166503906,
"min": 5.142287649214268,
"max": 35660.05334472656,
"count": 233
},
"Worm.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 233
},
"Worm.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 233
},
"Worm.Losses.PolicyLoss.mean": {
"value": 0.01411332649392231,
"min": 0.01047790824668482,
"max": 0.0235713456256365,
"count": 232
},
"Worm.Losses.PolicyLoss.sum": {
"value": 0.01411332649392231,
"min": 0.01047790824668482,
"max": 0.0235713456256365,
"count": 232
},
"Worm.Losses.ValueLoss.mean": {
"value": 11.896761213030134,
"min": 0.0010334085063299252,
"max": 13.696317945207868,
"count": 232
},
"Worm.Losses.ValueLoss.sum": {
"value": 11.896761213030134,
"min": 0.0010334085063299252,
"max": 13.696317945207868,
"count": 232
},
"Worm.Policy.LearningRate.mean": {
"value": 1.7143851428571302e-06,
"min": 1.7143851428571302e-06,
"max": 0.00029871428614285713,
"count": 232
},
"Worm.Policy.LearningRate.sum": {
"value": 1.7143851428571302e-06,
"min": 1.7143851428571302e-06,
"max": 0.00029871428614285713,
"count": 232
},
"Worm.Policy.Epsilon.mean": {
"value": 0.10057142857142858,
"min": 0.10057142857142858,
"max": 0.1995714285714285,
"count": 232
},
"Worm.Policy.Epsilon.sum": {
"value": 0.10057142857142858,
"min": 0.10057142857142858,
"max": 0.1995714285714285,
"count": 232
},
"Worm.Policy.Beta.mean": {
"value": 3.851428571428551e-05,
"min": 3.851428571428551e-05,
"max": 0.004978614285714285,
"count": 232
},
"Worm.Policy.Beta.sum": {
"value": 3.851428571428551e-05,
"min": 3.851428571428551e-05,
"max": 0.004978614285714285,
"count": 232
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683739604",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/home/mark/rl_course/.unit5_venv/bin/mlagents-learn ./ml-agents/config/ppo/Worm.yaml --env=./ml-agents/training-envs-executables/linux/Worm/Worm --run-id=Worm1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683750109"
},
"total": 10504.193586319685,
"count": 1,
"self": 0.3231488047167659,
"children": {
"run_training.setup": {
"total": 0.013392854947596788,
"count": 1,
"self": 0.013392854947596788
},
"TrainerController.start_learning": {
"total": 10503.85704466002,
"count": 1,
"self": 13.384338697884232,
"children": {
"TrainerController._reset_env": {
"total": 5.288313401862979,
"count": 1,
"self": 5.288313401862979
},
"TrainerController.advance": {
"total": 10485.048619326204,
"count": 701000,
"self": 13.016889019403607,
"children": {
"env_step": {
"total": 8093.429603974801,
"count": 701000,
"self": 7228.569016146474,
"children": {
"SubprocessEnvManager._take_step": {
"total": 855.4780502999201,
"count": 701000,
"self": 45.95606778562069,
"children": {
"TorchPolicy.evaluate": {
"total": 809.5219825142995,
"count": 701000,
"self": 809.5219825142995
}
}
},
"workers": {
"total": 9.382537528406829,
"count": 701000,
"self": 0.0,
"children": {
"worker_root": {
"total": 10480.880464751739,
"count": 701000,
"is_parallel": true,
"self": 4168.93715487374,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022225920110940933,
"count": 1,
"is_parallel": true,
"self": 0.0005126679316163063,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001709924079477787,
"count": 2,
"is_parallel": true,
"self": 0.001709924079477787
}
}
},
"UnityEnvironment.step": {
"total": 0.028827323112636805,
"count": 1,
"is_parallel": true,
"self": 0.0005137305706739426,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008109067566692829,
"count": 1,
"is_parallel": true,
"self": 0.0008109067566692829
},
"communicator.exchange": {
"total": 0.026349851861596107,
"count": 1,
"is_parallel": true,
"self": 0.026349851861596107
},
"steps_from_proto": {
"total": 0.0011528339236974716,
"count": 1,
"is_parallel": true,
"self": 0.00031482381746172905,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008380101062357426,
"count": 2,
"is_parallel": true,
"self": 0.0008380101062357426
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 6311.943309877999,
"count": 700999,
"is_parallel": true,
"self": 229.04008141485974,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 382.5745876613073,
"count": 700999,
"is_parallel": true,
"self": 382.5745876613073
},
"communicator.exchange": {
"total": 5163.548799792305,
"count": 700999,
"is_parallel": true,
"self": 5163.548799792305
},
"steps_from_proto": {
"total": 536.7798410095274,
"count": 700999,
"is_parallel": true,
"self": 143.95231797220185,
"children": {
"_process_rank_one_or_two_observation": {
"total": 392.8275230373256,
"count": 1401998,
"is_parallel": true,
"self": 392.8275230373256
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2378.602126332,
"count": 701000,
"self": 17.068648553453386,
"children": {
"process_trajectory": {
"total": 389.50852839881554,
"count": 701000,
"self": 387.6618690909818,
"children": {
"RLTrainer._checkpoint": {
"total": 1.846659307833761,
"count": 14,
"self": 1.846659307833761
}
}
},
"_update_policy": {
"total": 1972.024949379731,
"count": 233,
"self": 1759.5650872197002,
"children": {
"TorchPPOOptimizer.update": {
"total": 212.45986216003075,
"count": 9786,
"self": 212.45986216003075
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2759119272232056e-06,
"count": 1,
"self": 1.2759119272232056e-06
},
"TrainerController._save_models": {
"total": 0.13577195815742016,
"count": 1,
"self": 0.0016271830536425114,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13414477510377765,
"count": 1,
"self": 0.13414477510377765
}
}
}
}
}
}
}