{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.287369042634964, "min": 0.2667141854763031, "max": 1.4335803985595703, "count": 36 }, "Pyramids.Policy.Entropy.sum": { "value": 8565.896484375, "min": 8129.4482421875, "max": 43489.09375, "count": 36 }, "Pyramids.Step.mean": { "value": 1079961.0, "min": 29952.0, "max": 1079961.0, "count": 36 }, "Pyramids.Step.sum": { "value": 1079961.0, "min": 29952.0, "max": 1079961.0, "count": 36 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.5470317602157593, "min": -0.0999130979180336, "max": 0.6519434452056885, "count": 36 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 147.69857788085938, "min": -24.079055786132812, "max": 179.93638610839844, "count": 36 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.006864666938781738, "min": -0.004418520722538233, "max": 0.4534786641597748, "count": 36 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 1.8534600734710693, "min": -1.2195117473602295, "max": 107.47444152832031, "count": 36 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.07089045304850775, "min": 0.06409058919803427, "max": 0.0721138468342057, "count": 36 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9924663426791085, "min": 0.48811832188388415, "max": 1.0817077025130855, "count": 36 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.015651139642050976, "min": 0.000885894968271914, "max": 0.015983723027490442, "count": 36 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.21911595498871367, "min": 0.011516634587534881, "max": 0.23051815697616287, "count": 36 }, "Pyramids.Policy.LearningRate.mean": { "value": 9.528876044519475e-06, "min": 9.528876044519475e-06, "max": 0.00029559148198898697, "count": 36 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00013340426462327265, "min": 0.00013340426462327265, "max": 0.0035707995733699088, "count": 36 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10317625974025976, "min": 0.10317625974025976, "max": 0.19853049350649352, "count": 36 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4444676363636366, "min": 1.3897134545454546, "max": 2.590266454545455, "count": 36 }, "Pyramids.Policy.Beta.mean": { "value": 0.00032730834805194794, "min": 0.00032730834805194794, "max": 0.009853196301298703, "count": 36 }, "Pyramids.Policy.Beta.sum": { "value": 0.004582316872727271, "min": 0.004582316872727271, "max": 0.11904761880909089, "count": 36 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.009886262938380241, "min": 0.009681078605353832, "max": 0.4267338216304779, "count": 36 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.13840767741203308, "min": 0.1355351060628891, "max": 2.9871368408203125, "count": 36 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 361.26190476190476, "min": 301.77777777777777, "max": 999.0, "count": 36 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 30346.0, "min": 15984.0, "max": 32568.0, "count": 36 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.519659504649185, "min": -1.0000000521540642, "max": 1.6982222048772706, "count": 36 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 127.65139839053154, "min": -30.415201626718044, "max": 155.36799799650908, "count": 36 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.519659504649185, "min": -1.0000000521540642, "max": 1.6982222048772706, "count": 36 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 127.65139839053154, "min": -30.415201626718044, "max": 155.36799799650908, "count": 36 }, "Pyramids.Policy.RndReward.mean": { "value": 0.03621241321005592, "min": 0.03196428829461284, "max": 8.495654990896583, "count": 36 }, "Pyramids.Policy.RndReward.sum": { "value": 3.0418427096446976, "min": 2.8650760277814697, "max": 135.93047985434532, "count": 36 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 36 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 36 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1727776843", "python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.4.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1727780588" }, "total": 3744.8172190179994, "count": 1, "self": 0.6511105679992397, "children": { "run_training.setup": { "total": 0.07290274000001773, "count": 1, "self": 0.07290274000001773 }, "TrainerController.start_learning": { "total": 3744.09320571, "count": 1, "self": 2.5731807299316642, "children": { "TrainerController._reset_env": { "total": 2.523525099000153, "count": 1, "self": 2.523525099000153 }, "TrainerController.advance": { "total": 3738.9151695030687, "count": 70468, "self": 2.9955448220648577, "children": { "env_step": { "total": 2475.3934913830053, "count": 70468, "self": 2290.3742062289784, "children": { "SubprocessEnvManager._take_step": { "total": 183.44057437001788, "count": 70468, "self": 7.992111577047808, "children": { "TorchPolicy.evaluate": { "total": 175.44846279297008, "count": 68808, "self": 175.44846279297008 } } }, "workers": { "total": 1.5787107840089902, "count": 70468, "self": 0.0, "children": { "worker_root": { "total": 3736.3162278599502, "count": 70468, "is_parallel": true, "self": 1655.792529376963, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0033808529999532766, "count": 1, "is_parallel": true, "self": 0.0011859959997764236, "children": { "_process_rank_one_or_two_observation": { "total": 0.002194857000176853, "count": 8, "is_parallel": true, "self": 0.002194857000176853 } } }, "UnityEnvironment.step": { "total": 0.06702927700007422, "count": 1, "is_parallel": true, "self": 0.0008099440001387848, "children": { "UnityEnvironment._generate_step_input": { "total": 0.000549130999843328, "count": 1, "is_parallel": true, "self": 0.000549130999843328 }, "communicator.exchange": { "total": 0.06341267700008757, "count": 1, "is_parallel": true, "self": 0.06341267700008757 }, "steps_from_proto": { "total": 0.0022575250000045344, "count": 1, "is_parallel": true, "self": 0.0004763680003634363, "children": { "_process_rank_one_or_two_observation": { "total": 0.001781156999641098, "count": 8, "is_parallel": true, "self": 0.001781156999641098 } } } } } } }, "UnityEnvironment.step": { "total": 2080.523698482987, "count": 70467, "is_parallel": true, "self": 56.09306456202694, "children": { "UnityEnvironment._generate_step_input": { "total": 34.77281887001436, "count": 70467, "is_parallel": true, "self": 34.77281887001436 }, "communicator.exchange": { "total": 1844.6171182019805, "count": 70467, "is_parallel": true, "self": 1844.6171182019805 }, "steps_from_proto": { "total": 145.04069684896535, "count": 70467, "is_parallel": true, "self": 31.630093902177123, "children": { "_process_rank_one_or_two_observation": { "total": 113.41060294678823, "count": 563736, "is_parallel": true, "self": 113.41060294678823 } } } } } } } } } } }, "trainer_advance": { "total": 1260.5261332979985, "count": 70468, "self": 5.006909977063287, "children": { "process_trajectory": { "total": 193.3450703299311, "count": 70468, "self": 193.11726744693055, "children": { "RLTrainer._checkpoint": { "total": 0.22780288300054963, "count": 2, "self": 0.22780288300054963 } } }, "_update_policy": { "total": 1062.1741529910041, "count": 498, "self": 421.50339530400197, "children": { "TorchPPOOptimizer.update": { "total": 640.6707576870022, "count": 25095, "self": 640.6707576870022 } } } } } } }, "trainer_threads": { "total": 1.184000211651437e-06, "count": 1, "self": 1.184000211651437e-06 }, "TrainerController._save_models": { "total": 0.08132919399940874, "count": 1, "self": 0.0019687249996422906, "children": { "RLTrainer._checkpoint": { "total": 0.07936046899976645, "count": 1, "self": 0.07936046899976645 } } } } } } }