Dataset Preview
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed because of a cast error
Error code: DatasetGenerationCastError
Exception: DatasetGenerationCastError
Message: An error occurred while generating the dataset
All the data files must have the same columns, but at some point there are 5 new columns ({'backend', 'launcher', 'benchmark', 'experiment_name', 'environment'}) and 3 missing columns ({'decode', 'prefill', 'per_token'}).
This happened while the json dataset builder was generating data using
hf://datasets/davisgao/ennew/benchmarks/experiment_config.json (at revision f414fb59a9264f207859d663cf36838a9085c849)
Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)
Traceback: Traceback (most recent call last):
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 2011, in _prepare_split_single
writer.write_table(table)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 585, in write_table
pa_table = table_cast(pa_table, self._schema)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2302, in table_cast
return cast_table_to_schema(table, schema)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2256, in cast_table_to_schema
raise CastError(
datasets.table.CastError: Couldn't cast
experiment_name: string
backend: struct<name: string, version: string, _target_: string, task: string, model: string, library: string, device: string, device_ids: string, seed: int64, inter_op_num_threads: null, intra_op_num_threads: null, hub_kwargs: struct<revision: string, force_download: bool, local_files_only: bool, trust_remote_code: bool>, no_weights: bool, device_map: null, torch_dtype: null, amp_autocast: bool, amp_dtype: null, eval_mode: bool, to_bettertransformer: bool, low_cpu_mem_usage: null, attn_implementation: null, cache_implementation: null, torch_compile: bool, torch_compile_config: struct<>, quantization_scheme: null, quantization_config: struct<>, deepspeed_inference: bool, deepspeed_inference_config: struct<>, peft_type: null, peft_config: struct<>>
child 0, name: string
child 1, version: string
child 2, _target_: string
child 3, task: string
child 4, model: string
child 5, library: string
child 6, device: string
child 7, device_ids: string
child 8, seed: int64
child 9, inter_op_num_threads: null
child 10, intra_op_num_threads: null
child 11, hub_kwargs: struct<revision: string, force_download: bool, local_files_only: bool, trust_remote_code: bool>
child 0, revision: string
child 1, force_download: bool
child 2, local_files_only: bool
child 3, trust_remote_code: bool
child 12, no_weights: bool
child 13, device_map: null
child 14, torch_dtype: null
child 15, amp_autocast: bool
child 16, amp_dtype:
...
>
child 11, generate_kwargs: struct<>
child 12, call_kwargs: struct<>
environment: struct<cpu: string, cpu_count: int64, cpu_ram_mb: double, system: string, machine: string, platform: string, processor: string, python_version: string, gpu: list<item: string>, gpu_count: int64, gpu_vram_mb: int64, optimum_benchmark_version: string, optimum_benchmark_commit: null, transformers_version: string, transformers_commit: null, accelerate_version: string, accelerate_commit: null, diffusers_version: null, diffusers_commit: null, optimum_version: null, optimum_commit: null, timm_version: null, timm_commit: null, peft_version: null, peft_commit: null>
child 0, cpu: string
child 1, cpu_count: int64
child 2, cpu_ram_mb: double
child 3, system: string
child 4, machine: string
child 5, platform: string
child 6, processor: string
child 7, python_version: string
child 8, gpu: list<item: string>
child 0, item: string
child 9, gpu_count: int64
child 10, gpu_vram_mb: int64
child 11, optimum_benchmark_version: string
child 12, optimum_benchmark_commit: null
child 13, transformers_version: string
child 14, transformers_commit: null
child 15, accelerate_version: string
child 16, accelerate_commit: null
child 17, diffusers_version: null
child 18, diffusers_commit: null
child 19, optimum_version: null
child 20, optimum_commit: null
child 21, timm_version: null
child 22, timm_commit: null
child 23, peft_version: null
child 24, peft_commit: null
to
{'prefill': {'memory': {'unit': Value(dtype='string', id=None), 'max_ram': Value(dtype='float64', id=None), 'max_global_vram': Value(dtype='float64', id=None), 'max_process_vram': Value(dtype='float64', id=None), 'max_reserved': Value(dtype='float64', id=None), 'max_allocated': Value(dtype='float64', id=None)}, 'latency': {'unit': Value(dtype='string', id=None), 'count': Value(dtype='int64', id=None), 'total': Value(dtype='float64', id=None), 'mean': Value(dtype='float64', id=None), 'stdev': Value(dtype='float64', id=None), 'p50': Value(dtype='float64', id=None), 'p90': Value(dtype='float64', id=None), 'p95': Value(dtype='float64', id=None), 'p99': Value(dtype='float64', id=None), 'values': Sequence(feature=Value(dtype='float64', id=None), length=-1, id=None)}, 'throughput': {'unit': Value(dtype='string', id=None), 'value': Value(dtype='float64', id=None)}, 'energy': Value(dtype='null', id=None), 'efficiency': Value(dtype='null', id=None)}, 'decode': {'memory': {'unit': Value(dtype='string', id=None), 'max_ram': Value(dtype='float64', id=None), 'max_global_vram': Value(dtype='float64', id=None), 'max_process_vram': Value(dtype='float64', id=None), 'max_reserved': Value(dtype='float64', id=None), 'max_allocated': Value(dtype='float64', id=None)}, 'latency': {'unit': Value(dtype='string', id=None), 'count': Value(dtype='int64', id=None), 'total': Value(dtype='float64', id=None), 'mean': Value(dtype='float64', id=None), 'stdev': Value(dtype='float64', id=None), 'p50': Value(dtype='float64', id=None), 'p90': Value(dtype='float64', id=None), 'p95': Value(dtype='float64', id=None), 'p99': Value(dtype='float64', id=None), 'values': Sequence(feature=Value(dtype='float64', id=None), length=-1, id=None)}, 'throughput': {'unit': Value(dtype='string', id=None), 'value': Value(dtype='float64', id=None)}, 'energy': Value(dtype='null', id=None), 'efficiency': Value(dtype='null', id=None)}, 'per_token': {'memory': Value(dtype='null', id=None), 'latency': {'unit': Value(dtype='string', id=None), 'count': Value(dtype='int64', id=None), 'total': Value(dtype='float64', id=None), 'mean': Value(dtype='float64', id=None), 'stdev': Value(dtype='float64', id=None), 'p50': Value(dtype='float64', id=None), 'p90': Value(dtype='float64', id=None), 'p95': Value(dtype='float64', id=None), 'p99': Value(dtype='float64', id=None), 'values': Sequence(feature=Value(dtype='float64', id=None), length=-1, id=None)}, 'throughput': {'unit': Value(dtype='string', id=None), 'value': Value(dtype='float64', id=None)}, 'energy': Value(dtype='null', id=None), 'efficiency': Value(dtype='null', id=None)}}
because column names don't match
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1577, in compute_config_parquet_and_info_response
parquet_operations = convert_to_parquet(builder)
File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1191, in convert_to_parquet
builder.download_and_prepare(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1027, in download_and_prepare
self._download_and_prepare(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1122, in _download_and_prepare
self._prepare_split(split_generator, **prepare_split_kwargs)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1882, in _prepare_split
for job_id, done, content in self._prepare_split_single(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 2013, in _prepare_split_single
raise DatasetGenerationCastError.from_cast_error(
datasets.exceptions.DatasetGenerationCastError: An error occurred while generating the dataset
All the data files must have the same columns, but at some point there are 5 new columns ({'backend', 'launcher', 'benchmark', 'experiment_name', 'environment'}) and 3 missing columns ({'decode', 'prefill', 'per_token'}).
This happened while the json dataset builder was generating data using
hf://datasets/davisgao/ennew/benchmarks/experiment_config.json (at revision f414fb59a9264f207859d663cf36838a9085c849)
Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
prefill dict | decode dict | per_token dict | experiment_name string | backend dict | launcher dict | benchmark dict | environment dict |
|---|---|---|---|---|---|---|---|
{
"memory": {
"unit": "MB",
"max_ram": 2031.69792,
"max_global_vram": 252287.3856,
"max_process_vram": 249359.761408,
"max_reserved": 244829.913088,
"max_allocated": 243680.131072
},
"latency": {
"unit": "s",
"count": 40,
"total": 242.91761718749993,
"mean": 6.0729404296875... | {
"memory": {
"unit": "MB",
"max_ram": 2031.69792,
"max_global_vram": 302241.54624,
"max_process_vram": 299225.841664,
"max_reserved": 294960.234496,
"max_allocated": 265282.7648
},
"latency": {
"unit": "s",
"count": 40,
"total": 248.41823291015623,
"mean": 6.21045582275390... | {
"memory": null,
"latency": {
"unit": "s",
"count": 3956,
"total": 467.31920429611165,
"mean": 0.11812922252176745,
"stdev": 0.5788143574843364,
"p50": 0.06266777420043945,
"p90": 0.06290176010131837,
"p95": 0.06296499252319336,
"p99": 0.06416163749694821,
"values": [
... | null | null | null | null | null |
null | null | null | pytorch_llama | {
"name": "pytorch",
"version": "2.1.2",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"model": "FlagAlpha/Llama2-Chinese-13b-Chat",
"library": "transformers",
"device": "cuda",
"device_ids": "0,1,2,3",
"seed": 42,
"inter_op_num_threads": null,
... | {
"name": "torchrun",
"_target_": "optimum_benchmark.launchers.torchrun.launcher.TorchrunLauncher",
"device_isolation": false,
"min_nodes": 1,
"max_nodes": 1,
"nproc_per_node": 4,
"role": "benchmark_worker",
"monitor_interval": 30,
"rdzv_id": "3ac12ff4-5dc0-43ee-97e5-2bc7705f9cf9",
"rdzv_backend": "... | {
"name": "inference",
"_target_": "optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark",
"iterations": 10,
"duration": 10,
"warmup_runs": 10,
"input_shapes": {
"batch_size": 8,
"num_choices": 2,
"sequence_length": 2048
},
"new_tokens": null,
"latency": true,
"memory": tr... | {
"cpu": " Intel(R) Xeon(R) Platinum 8378A CPU @ 3.00GHz",
"cpu_count": 128,
"cpu_ram_mb": 1081425.444864,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.4.0-144-generic-x86_64-with-glibc2.31",
"processor": "x86_64",
"python_version": "3.10.14",
"gpu": [
"NVIDIA A800-SXM4-80GB",
... |
README.md exists but content is empty.
- Downloads last month
- 3