Dataset Preview
Full Screen Viewer
Full Screen
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed because of a cast error
Error code: DatasetGenerationCastError Exception: DatasetGenerationCastError Message: An error occurred while generating the dataset All the data files must have the same columns, but at some point there are 5 new columns ({'experiment_name', 'backend', 'environment', 'launcher', 'benchmark'}) and 1 missing columns ({'forward'}). This happened while the json dataset builder was generating data using hf://datasets/IlyasMoutawwakil/optimum-benchmarks-ci/test_api_push_to_hub_mixin/experiment_config.json (at revision 335baeadc78a580f719398da485c6f12328e814b) Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations) Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 2011, in _prepare_split_single writer.write_table(table) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 585, in write_table pa_table = table_cast(pa_table, self._schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2302, in table_cast return cast_table_to_schema(table, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2256, in cast_table_to_schema raise CastError( datasets.table.CastError: Couldn't cast experiment_name: string backend: struct<name: string, version: string, _target_: string, task: string, model: string, library: string, device: string, device_ids: null, seed: int64, inter_op_num_threads: null, intra_op_num_threads: null, hub_kwargs: struct<revision: string, force_download: bool, local_files_only: bool, trust_remote_code: bool>, no_weights: bool, device_map: null, torch_dtype: null, amp_autocast: bool, amp_dtype: null, eval_mode: bool, to_bettertransformer: bool, low_cpu_mem_usage: null, attn_implementation: null, cache_implementation: null, torch_compile: bool, torch_compile_config: struct<>, quantization_scheme: null, quantization_config: struct<>, deepspeed_inference: bool, deepspeed_inference_config: struct<>, peft_type: null, peft_config: struct<>> child 0, name: string child 1, version: string child 2, _target_: string child 3, task: string child 4, model: string child 5, library: string child 6, device: string child 7, device_ids: null child 8, seed: int64 child 9, inter_op_num_threads: null child 10, intra_op_num_threads: null child 11, hub_kwargs: struct<revision: string, force_download: bool, local_files_only: bool, trust_remote_code: bool> child 0, revision: string child 1, force_download: bool child 2, local_files_only: bool child 3, trust_remote_code: bool child 12, no_weights: bool child 13, device_map: null child 14, torch_dtype: null child 15, amp_autocast: bool child 16, amp_dtype: null ... nerate_kwargs: struct<> child 11, call_kwargs: struct<> environment: struct<cpu: string, cpu_count: int64, cpu_ram_mb: double, system: string, machine: string, platform: string, processor: string, python_version: string, gpu: list<item: string>, gpu_count: int64, gpu_vram_mb: int64, optimum_benchmark_version: string, optimum_benchmark_commit: string, transformers_version: string, transformers_commit: null, accelerate_version: string, accelerate_commit: null, diffusers_version: string, diffusers_commit: null, optimum_version: null, optimum_commit: null, timm_version: string, timm_commit: null, peft_version: string, peft_commit: null> child 0, cpu: string child 1, cpu_count: int64 child 2, cpu_ram_mb: double child 3, system: string child 4, machine: string child 5, platform: string child 6, processor: string child 7, python_version: string child 8, gpu: list<item: string> child 0, item: string child 9, gpu_count: int64 child 10, gpu_vram_mb: int64 child 11, optimum_benchmark_version: string child 12, optimum_benchmark_commit: string child 13, transformers_version: string child 14, transformers_commit: null child 15, accelerate_version: string child 16, accelerate_commit: null child 17, diffusers_version: string child 18, diffusers_commit: null child 19, optimum_version: null child 20, optimum_commit: null child 21, timm_version: string child 22, timm_commit: null child 23, peft_version: string child 24, peft_commit: null to {'forward': {'memory': {'unit': Value(dtype='string', id=None), 'max_ram': Value(dtype='float64', id=None), 'max_vram': Value(dtype='null', id=None), 'max_reserved': Value(dtype='null', id=None), 'max_allocated': Value(dtype='null', id=None)}, 'latency': {'unit': Value(dtype='string', id=None), 'mean': Value(dtype='float64', id=None), 'stdev': Value(dtype='float64', id=None), 'values': Sequence(feature=Value(dtype='float64', id=None), length=-1, id=None)}, 'throughput': {'unit': Value(dtype='string', id=None), 'value': Value(dtype='float64', id=None)}, 'energy': Value(dtype='null', id=None), 'efficiency': Value(dtype='null', id=None)}} because column names don't match During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1321, in compute_config_parquet_and_info_response parquet_operations = convert_to_parquet(builder) File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 935, in convert_to_parquet builder.download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1027, in download_and_prepare self._download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1122, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1882, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 2013, in _prepare_split_single raise DatasetGenerationCastError.from_cast_error( datasets.exceptions.DatasetGenerationCastError: An error occurred while generating the dataset All the data files must have the same columns, but at some point there are 5 new columns ({'experiment_name', 'backend', 'environment', 'launcher', 'benchmark'}) and 1 missing columns ({'forward'}). This happened while the json dataset builder was generating data using hf://datasets/IlyasMoutawwakil/optimum-benchmarks-ci/test_api_push_to_hub_mixin/experiment_config.json (at revision 335baeadc78a580f719398da485c6f12328e814b) Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
forward
dict | experiment_name
string | backend
dict | launcher
dict | benchmark
dict | environment
dict |
---|---|---|---|---|---|
{
"memory": {
"unit": "MB",
"max_ram": 1084.637184,
"max_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"mean": 0.039569467664338075,
"stdev": 0.002033212140164305,
"values": [
0.041925519704818726,
0.04176455829292536,
0.041776430793106556,
0.041355191729962826,
0.04157991334795952,
0.04103707615286112,
0.04141983296722174,
0.04134945012629032,
0.04154875408858061,
0.04141546320170164,
0.04227675963193178,
0.03971706051379442,
0.038739739917218685,
0.0426051439717412,
0.0376142505556345,
0.03715469967573881,
0.037393887527287006,
0.03790341317653656,
0.03759381361305714,
0.03753022290766239,
0.03781145066022873,
0.0375981405377388,
0.03772352542728186,
0.03696391265839338,
0.038118516094982624,
0.03688943199813366
]
},
"throughput": {
"unit": "samples/s",
"value": 50.544020884124684
},
"energy": null,
"efficiency": null
} | null | null | null | null | null |
null | test_api_push_to_hub_mixin | {
"name": "pytorch",
"version": "2.2.2+cu118",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "fill-mask",
"model": "google-bert/bert-base-uncased",
"library": "transformers",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"hub_kwargs": {
"revision": "main",
"force_download": false,
"local_files_only": false,
"trust_remote_code": false
},
"no_weights": false,
"device_map": null,
"torch_dtype": null,
"amp_autocast": false,
"amp_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"torch_compile": false,
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
} | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"start_method": "spawn"
} | {
"name": "inference",
"_target_": "optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark",
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 2,
"num_choices": 2,
"sequence_length": 16
},
"new_tokens": null,
"latency": true,
"memory": true,
"energy": false,
"forward_kwargs": {},
"generate_kwargs": {},
"call_kwargs": {}
} | {
"cpu": " AMD EPYC 7742 64-Core Processor",
"cpu_count": 128,
"cpu_ram_mb": 540671.627264,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.4.0-166-generic-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.12",
"gpu": [
"NVIDIA A100-SXM4-80GB",
"NVIDIA A100-SXM4-80GB",
"NVIDIA A100-SXM4-80GB",
"NVIDIA DGX Display",
"NVIDIA A100-SXM4-80GB"
],
"gpu_count": 5,
"gpu_vram_mb": 347892350976,
"optimum_benchmark_version": "0.2.0",
"optimum_benchmark_commit": "379b5ada9deda73c472324db992fcbbba8f48fa4",
"transformers_version": "4.39.3",
"transformers_commit": null,
"accelerate_version": "0.29.1",
"accelerate_commit": null,
"diffusers_version": "0.27.2",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "0.9.16",
"timm_commit": null,
"peft_version": "0.10.0",
"peft_commit": null
} |
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 17