cara_latency_prediction / instance_metadata.json
asdwb's picture
Upload instance_metadata.json with huggingface_hub
24c65fb verified
{
"instance_0": {
"model_name": "Qwen/Qwen2.5-72B",
"gpu_type": "A100_40GB",
"gpu_count": 4,
"tensor_parallel": 4,
"inference_backend": "vllm",
"inference_backend_version": "v1_dev11708",
"engine_version": "v1",
"max_model_len": 8192,
"gpu_memory_utilization": 0.95,
"enforce_eager": true,
"attention_backend": "FLASH_ATTN"
},
"instance_1": {
"model_name": "Qwen/Qwen2.5-72B",
"gpu_type": "A100_40GB",
"gpu_count": 4,
"tensor_parallel": 4,
"inference_backend": "vllm",
"inference_backend_version": "v1_dev11708",
"engine_version": "v1",
"max_model_len": 8192,
"gpu_memory_utilization": 0.95,
"enforce_eager": true,
"attention_backend": "FLASH_ATTN"
},
"instance_2": {
"model_name": "Qwen/Qwen2.5-14B",
"gpu_type": "V100_16GB",
"gpu_count": 4,
"tensor_parallel": 4,
"inference_backend": "vllm",
"inference_backend_version": "v1_dev11708",
"engine_version": "v1",
"max_model_len": 8192,
"gpu_memory_utilization": 0.85,
"enforce_eager": false,
"attention_backend": "FLASH_ATTN"
},
"instance_3": {
"model_name": "Qwen/Qwen2.5-14B",
"gpu_type": "V100_16GB",
"gpu_count": 4,
"tensor_parallel": 4,
"inference_backend": "vllm",
"inference_backend_version": "v1_dev11708",
"engine_version": "v1",
"max_model_len": 8192,
"gpu_memory_utilization": 0.85,
"enforce_eager": false,
"attention_backend": "FLASH_ATTN"
},
"instance_4": {
"model_name": "Qwen/Qwen2.5-14B",
"gpu_type": "V100_16GB",
"gpu_count": 4,
"tensor_parallel": 4,
"inference_backend": "vllm",
"inference_backend_version": "v1_dev11708",
"engine_version": "v1",
"max_model_len": 8192,
"gpu_memory_utilization": 0.85,
"enforce_eager": false,
"attention_backend": "FLASH_ATTN"
},
"instance_5": {
"model_name": "Qwen/Qwen2.5-7B",
"gpu_type": "A30_24GB",
"gpu_count": 1,
"tensor_parallel": 1,
"inference_backend": "vllm",
"inference_backend_version": "v1_dev11708",
"engine_version": "v1",
"max_model_len": 8192,
"gpu_memory_utilization": 0.9,
"enforce_eager": false,
"attention_backend": "FLASH_ATTN"
},
"instance_6": {
"model_name": "Qwen/Qwen2.5-7B",
"gpu_type": "A30_24GB",
"gpu_count": 1,
"tensor_parallel": 1,
"inference_backend": "vllm",
"inference_backend_version": "v1_dev11708",
"engine_version": "v1",
"max_model_len": 8192,
"gpu_memory_utilization": 0.9,
"enforce_eager": false,
"attention_backend": "FLASH_ATTN"
},
"instance_7": {
"model_name": "Qwen/Qwen2.5-7B",
"gpu_type": "A30_24GB",
"gpu_count": 1,
"tensor_parallel": 1,
"inference_backend": "vllm",
"inference_backend_version": "v1_dev11708",
"engine_version": "v1",
"max_model_len": 8192,
"gpu_memory_utilization": 0.9,
"enforce_eager": false,
"attention_backend": "FLASH_ATTN"
},
"instance_8": {
"model_name": "Qwen/Qwen2.5-7B",
"gpu_type": "A30_24GB",
"gpu_count": 1,
"tensor_parallel": 1,
"inference_backend": "vllm",
"inference_backend_version": "v1_dev11708",
"engine_version": "v1",
"max_model_len": 8192,
"gpu_memory_utilization": 0.9,
"enforce_eager": false,
"attention_backend": "FLASH_ATTN"
},
"instance_9": {
"model_name": "Qwen/Qwen2.5-7B",
"gpu_type": "A30_24GB",
"gpu_count": 1,
"tensor_parallel": 1,
"inference_backend": "vllm",
"inference_backend_version": "v1_dev11708",
"engine_version": "v1",
"max_model_len": 8192,
"gpu_memory_utilization": 0.9,
"enforce_eager": false,
"attention_backend": "FLASH_ATTN"
},
"instance_10": {
"model_name": "Qwen/Qwen2.5-3B",
"gpu_type": "A30_24GB",
"gpu_count": 1,
"tensor_parallel": 1,
"inference_backend": "vllm",
"inference_backend_version": "v1_dev11708",
"engine_version": "v1",
"max_model_len": 8192,
"gpu_memory_utilization": 0.9,
"enforce_eager": false,
"attention_backend": "FLASH_ATTN"
},
"instance_11": {
"model_name": "Qwen/Qwen2.5-3B",
"gpu_type": "A30_24GB",
"gpu_count": 1,
"tensor_parallel": 1,
"inference_backend": "vllm",
"inference_backend_version": "v1_dev11708",
"engine_version": "v1",
"max_model_len": 8192,
"gpu_memory_utilization": 0.9,
"enforce_eager": false,
"attention_backend": "FLASH_ATTN"
},
"instance_12": {
"model_name": "Qwen/Qwen2.5-3B",
"gpu_type": "A30_24GB",
"gpu_count": 1,
"tensor_parallel": 1,
"inference_backend": "vllm",
"inference_backend_version": "v1_dev11708",
"engine_version": "v1",
"max_model_len": 8192,
"gpu_memory_utilization": 0.9,
"enforce_eager": false,
"attention_backend": "FLASH_ATTN"
},
"instance_13": {
"model_name": "Qwen/Qwen2.5-3B",
"gpu_type": "P100_12GB",
"gpu_count": 1,
"tensor_parallel": 1,
"inference_backend": "vllm",
"inference_backend_version": "v0_dev5353",
"engine_version": "v0",
"max_model_len": 4096,
"gpu_memory_utilization": 0.9,
"enforce_eager": false,
"attention_backend": "XFORMERS"
},
"instance_14": {
"model_name": "Qwen/Qwen2.5-3B",
"gpu_type": "P100_12GB",
"gpu_count": 1,
"tensor_parallel": 1,
"inference_backend": "vllm",
"inference_backend_version": "v0_dev5353",
"engine_version": "v0",
"max_model_len": 4096,
"gpu_memory_utilization": 0.9,
"enforce_eager": false,
"attention_backend": "XFORMERS"
},
"instance_15": {
"model_name": "Qwen/Qwen2.5-3B",
"gpu_type": "P100_12GB",
"gpu_count": 1,
"tensor_parallel": 1,
"inference_backend": "vllm",
"inference_backend_version": "v0_dev5353",
"engine_version": "v0",
"max_model_len": 4096,
"gpu_memory_utilization": 0.9,
"enforce_eager": false,
"attention_backend": "XFORMERS"
},
"instance_16": {
"model_name": "Qwen/Qwen2.5-3B",
"gpu_type": "P100_12GB",
"gpu_count": 1,
"tensor_parallel": 1,
"inference_backend": "vllm",
"inference_backend_version": "v0_dev5353",
"engine_version": "v0",
"max_model_len": 4096,
"gpu_memory_utilization": 0.9,
"enforce_eager": false,
"attention_backend": "XFORMERS"
},
"instance_17": {
"model_name": "Qwen/Qwen2.5-3B",
"gpu_type": "P100_12GB",
"gpu_count": 1,
"tensor_parallel": 1,
"inference_backend": "vllm",
"inference_backend_version": "v0_dev5353",
"engine_version": "v0",
"max_model_len": 4096,
"gpu_memory_utilization": 0.9,
"enforce_eager": false,
"attention_backend": "XFORMERS"
}
}