Adding regression benchmark for the transformers SHA 5ccf343aebdb7c913cc41149c9f8b4fbe37c0028
Browse files- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/0/.config/config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/0/.config/hydra.yaml +174 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/0/.config/overrides.yaml +2 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/0/hydra_config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/0/inference_results.csv +2 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/0/main.log +26 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/1/.config/config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/1/.config/hydra.yaml +174 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/1/.config/overrides.yaml +2 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/1/hydra_config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/1/inference_results.csv +2 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/1/main.log +26 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/2/.config/config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/2/.config/hydra.yaml +174 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/2/.config/overrides.yaml +2 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/2/hydra_config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/2/inference_results.csv +2 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/2/main.log +26 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/3/.config/config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/3/.config/hydra.yaml +174 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/3/.config/overrides.yaml +2 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/3/hydra_config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/3/inference_results.csv +2 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/3/main.log +26 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/0/.config/config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/0/.config/hydra.yaml +172 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/0/.config/overrides.yaml +1 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/0/hydra_config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/0/inference_results.csv +2 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/0/main.log +20 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/1/.config/config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/1/.config/hydra.yaml +172 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/1/.config/overrides.yaml +1 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/1/hydra_config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/1/inference_results.csv +2 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/1/main.log +20 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_gpt2_inference/0/.config/config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_gpt2_inference/0/.config/hydra.yaml +170 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_gpt2_inference/0/.config/overrides.yaml +1 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_gpt2_inference/0/hydra_config.yaml +66 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_gpt2_inference/0/inference_results.csv +2 -0
- raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_gpt2_inference/0/main.log +20 -0
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/0/.config/config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: float16
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
| 27 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: true
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 15
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 1
|
| 37 |
+
sequence_length: 200
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 200
|
| 48 |
+
experiment_name: llama_1gpu_inference
|
| 49 |
+
model: daryl149/llama-2-7b-chat-hf
|
| 50 |
+
device: cuda
|
| 51 |
+
task: ${infer_task:${model}, ${hub_kwargs.revision}}
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/0/.config/hydra.yaml
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params:
|
| 13 |
+
benchmark.input_shapes.batch_size: 1,16
|
| 14 |
+
backend.torch_dtype: float16,float32
|
| 15 |
+
help:
|
| 16 |
+
app_name: ${hydra.job.name}
|
| 17 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 18 |
+
|
| 19 |
+
'
|
| 20 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 21 |
+
|
| 22 |
+
Use --hydra-help to view Hydra specific help
|
| 23 |
+
|
| 24 |
+
'
|
| 25 |
+
template: '${hydra.help.header}
|
| 26 |
+
|
| 27 |
+
== Configuration groups ==
|
| 28 |
+
|
| 29 |
+
Compose your configuration from those groups (group=option)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
$APP_CONFIG_GROUPS
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
== Config ==
|
| 36 |
+
|
| 37 |
+
Override anything in the config (foo.bar=value)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
$CONFIG
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
${hydra.help.footer}
|
| 44 |
+
|
| 45 |
+
'
|
| 46 |
+
hydra_help:
|
| 47 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 48 |
+
|
| 49 |
+
See https://hydra.cc for more info.
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
== Flags ==
|
| 53 |
+
|
| 54 |
+
$FLAGS_HELP
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
== Configuration groups ==
|
| 58 |
+
|
| 59 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 60 |
+
to command line)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
$HYDRA_CONFIG_GROUPS
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 67 |
+
|
| 68 |
+
'
|
| 69 |
+
hydra_help: ???
|
| 70 |
+
hydra_logging:
|
| 71 |
+
version: 1
|
| 72 |
+
formatters:
|
| 73 |
+
colorlog:
|
| 74 |
+
(): colorlog.ColoredFormatter
|
| 75 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
| 76 |
+
handlers:
|
| 77 |
+
console:
|
| 78 |
+
class: logging.StreamHandler
|
| 79 |
+
formatter: colorlog
|
| 80 |
+
stream: ext://sys.stdout
|
| 81 |
+
root:
|
| 82 |
+
level: INFO
|
| 83 |
+
handlers:
|
| 84 |
+
- console
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
colorlog:
|
| 92 |
+
(): colorlog.ColoredFormatter
|
| 93 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
| 94 |
+
- %(message)s'
|
| 95 |
+
log_colors:
|
| 96 |
+
DEBUG: purple
|
| 97 |
+
INFO: green
|
| 98 |
+
WARNING: yellow
|
| 99 |
+
ERROR: red
|
| 100 |
+
CRITICAL: red
|
| 101 |
+
handlers:
|
| 102 |
+
console:
|
| 103 |
+
class: logging.StreamHandler
|
| 104 |
+
formatter: colorlog
|
| 105 |
+
stream: ext://sys.stdout
|
| 106 |
+
file:
|
| 107 |
+
class: logging.FileHandler
|
| 108 |
+
formatter: simple
|
| 109 |
+
filename: ${hydra.job.name}.log
|
| 110 |
+
root:
|
| 111 |
+
level: INFO
|
| 112 |
+
handlers:
|
| 113 |
+
- console
|
| 114 |
+
- file
|
| 115 |
+
disable_existing_loggers: false
|
| 116 |
+
env: {}
|
| 117 |
+
mode: MULTIRUN
|
| 118 |
+
searchpath: []
|
| 119 |
+
callbacks: {}
|
| 120 |
+
output_subdir: .hydra
|
| 121 |
+
overrides:
|
| 122 |
+
hydra:
|
| 123 |
+
- hydra.mode=MULTIRUN
|
| 124 |
+
task:
|
| 125 |
+
- benchmark.input_shapes.batch_size=1
|
| 126 |
+
- backend.torch_dtype=float16
|
| 127 |
+
job:
|
| 128 |
+
name: main
|
| 129 |
+
chdir: true
|
| 130 |
+
override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=1
|
| 131 |
+
id: '0'
|
| 132 |
+
num: 0
|
| 133 |
+
config_name: llama2_1gpu_inference
|
| 134 |
+
env_set: {}
|
| 135 |
+
env_copy: []
|
| 136 |
+
config:
|
| 137 |
+
override_dirname:
|
| 138 |
+
kv_sep: '='
|
| 139 |
+
item_sep: ','
|
| 140 |
+
exclude_keys: []
|
| 141 |
+
runtime:
|
| 142 |
+
version: 1.3.2
|
| 143 |
+
version_base: '1.3'
|
| 144 |
+
cwd: /home/user/transformers-regression
|
| 145 |
+
config_sources:
|
| 146 |
+
- path: hydra.conf
|
| 147 |
+
schema: pkg
|
| 148 |
+
provider: hydra
|
| 149 |
+
- path: optimum_benchmark
|
| 150 |
+
schema: pkg
|
| 151 |
+
provider: main
|
| 152 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
| 153 |
+
schema: pkg
|
| 154 |
+
provider: hydra-colorlog
|
| 155 |
+
- path: /home/user/transformers-regression/configs
|
| 156 |
+
schema: file
|
| 157 |
+
provider: command-line
|
| 158 |
+
- path: ''
|
| 159 |
+
schema: structured
|
| 160 |
+
provider: schema
|
| 161 |
+
output_dir: /home/user/transformers-regression/sweeps/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/0
|
| 162 |
+
choices:
|
| 163 |
+
benchmark: inference
|
| 164 |
+
backend: pytorch
|
| 165 |
+
hydra/env: default
|
| 166 |
+
hydra/callbacks: null
|
| 167 |
+
hydra/job_logging: colorlog
|
| 168 |
+
hydra/hydra_logging: colorlog
|
| 169 |
+
hydra/hydra_help: default
|
| 170 |
+
hydra/help: default
|
| 171 |
+
hydra/sweeper: basic
|
| 172 |
+
hydra/launcher: basic
|
| 173 |
+
hydra/output: default
|
| 174 |
+
verbose: false
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/0/.config/overrides.yaml
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- benchmark.input_shapes.batch_size=1
|
| 2 |
+
- backend.torch_dtype=float16
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/0/hydra_config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: float16
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: true
|
| 27 |
+
eval_mode: true
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: true
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 15
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 1
|
| 37 |
+
sequence_length: 200
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 200
|
| 48 |
+
experiment_name: llama_1gpu_inference
|
| 49 |
+
model: daryl149/llama-2-7b-chat-hf
|
| 50 |
+
device: cuda
|
| 51 |
+
task: text-generation
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/0/inference_results.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
| 2 |
+
0,15651.96288,0.0309,32.4,5.8,34.5
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/0/main.log
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2023-08-17 08:13:34,922][benchmark][INFO] - Configuring inference benchmark
|
| 2 |
+
[2023-08-17 08:13:34,924][benchmark][INFO] - + Setting seed(42)
|
| 3 |
+
[2023-08-17 08:13:36,352][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
| 4 |
+
[2023-08-17 08:13:36,352][backend][INFO] - Configuring pytorch backend
|
| 5 |
+
[2023-08-17 08:13:36,353][backend][INFO] - + Checking initial device isolation
|
| 6 |
+
[2023-08-17 08:13:36,353][utils][INFO] - device_ids to check: {0}
|
| 7 |
+
[2023-08-17 08:13:36,425][utils][INFO] - os.getpid() 514371
|
| 8 |
+
[2023-08-17 08:13:36,425][utils][INFO] - pids_on_device_id set()
|
| 9 |
+
[2023-08-17 08:13:36,426][backend][INFO] - + Checking contineous device isolation
|
| 10 |
+
[2023-08-17 08:13:36,439][pytorch][INFO] - + Disabling gradients
|
| 11 |
+
[2023-08-17 08:13:36,440][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
|
| 12 |
+
[2023-08-17 08:14:58,544][pytorch][INFO] - + Turning on eval mode
|
| 13 |
+
[2023-08-17 08:14:58,546][inference][INFO] - Running inference benchmark
|
| 14 |
+
[2023-08-17 08:15:06,681][inference][INFO] - + Tracking forward pass peak memory
|
| 15 |
+
[2023-08-17 08:15:07,941][memory_tracker][INFO] - Peak memory usage: 15651.96288 MB
|
| 16 |
+
[2023-08-17 08:15:07,941][inference][INFO] - + Forward pass peak memory: 15651.96288 (MB)
|
| 17 |
+
[2023-08-17 08:15:07,942][inference][INFO] - + Warming up the forward pass
|
| 18 |
+
[2023-08-17 08:15:08,264][inference][INFO] - + Tracking forward pass latency and throughput
|
| 19 |
+
[2023-08-17 08:15:24,763][inference][INFO] - + Forward pass latency: 3.09e-02 (s)
|
| 20 |
+
[2023-08-17 08:15:24,764][inference][INFO] - + Forward pass throughput: 32.40 (samples/s)
|
| 21 |
+
[2023-08-17 08:15:24,765][inference][INFO] - + Warming up the generation pass
|
| 22 |
+
[2023-08-17 08:15:31,241][inference][INFO] - + Tracking generation latency and throughput
|
| 23 |
+
[2023-08-17 08:15:48,632][inference][INFO] - + Generation pass latency: 5.80e+00 (s)
|
| 24 |
+
[2023-08-17 08:15:48,635][inference][INFO] - + Generation pass throughput: 34.50 (tokens/s)
|
| 25 |
+
[2023-08-17 08:15:48,635][inference][INFO] - Saving inference results
|
| 26 |
+
[2023-08-17 08:15:48,712][backend][INFO] - Cleaning backend
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/1/.config/config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: float32
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
| 27 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: true
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 15
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 1
|
| 37 |
+
sequence_length: 200
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 200
|
| 48 |
+
experiment_name: llama_1gpu_inference
|
| 49 |
+
model: daryl149/llama-2-7b-chat-hf
|
| 50 |
+
device: cuda
|
| 51 |
+
task: ${infer_task:${model}, ${hub_kwargs.revision}}
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/1/.config/hydra.yaml
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params:
|
| 13 |
+
benchmark.input_shapes.batch_size: 1,16
|
| 14 |
+
backend.torch_dtype: float16,float32
|
| 15 |
+
help:
|
| 16 |
+
app_name: ${hydra.job.name}
|
| 17 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 18 |
+
|
| 19 |
+
'
|
| 20 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 21 |
+
|
| 22 |
+
Use --hydra-help to view Hydra specific help
|
| 23 |
+
|
| 24 |
+
'
|
| 25 |
+
template: '${hydra.help.header}
|
| 26 |
+
|
| 27 |
+
== Configuration groups ==
|
| 28 |
+
|
| 29 |
+
Compose your configuration from those groups (group=option)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
$APP_CONFIG_GROUPS
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
== Config ==
|
| 36 |
+
|
| 37 |
+
Override anything in the config (foo.bar=value)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
$CONFIG
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
${hydra.help.footer}
|
| 44 |
+
|
| 45 |
+
'
|
| 46 |
+
hydra_help:
|
| 47 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 48 |
+
|
| 49 |
+
See https://hydra.cc for more info.
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
== Flags ==
|
| 53 |
+
|
| 54 |
+
$FLAGS_HELP
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
== Configuration groups ==
|
| 58 |
+
|
| 59 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 60 |
+
to command line)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
$HYDRA_CONFIG_GROUPS
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 67 |
+
|
| 68 |
+
'
|
| 69 |
+
hydra_help: ???
|
| 70 |
+
hydra_logging:
|
| 71 |
+
version: 1
|
| 72 |
+
formatters:
|
| 73 |
+
colorlog:
|
| 74 |
+
(): colorlog.ColoredFormatter
|
| 75 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
| 76 |
+
handlers:
|
| 77 |
+
console:
|
| 78 |
+
class: logging.StreamHandler
|
| 79 |
+
formatter: colorlog
|
| 80 |
+
stream: ext://sys.stdout
|
| 81 |
+
root:
|
| 82 |
+
level: INFO
|
| 83 |
+
handlers:
|
| 84 |
+
- console
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
colorlog:
|
| 92 |
+
(): colorlog.ColoredFormatter
|
| 93 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
| 94 |
+
- %(message)s'
|
| 95 |
+
log_colors:
|
| 96 |
+
DEBUG: purple
|
| 97 |
+
INFO: green
|
| 98 |
+
WARNING: yellow
|
| 99 |
+
ERROR: red
|
| 100 |
+
CRITICAL: red
|
| 101 |
+
handlers:
|
| 102 |
+
console:
|
| 103 |
+
class: logging.StreamHandler
|
| 104 |
+
formatter: colorlog
|
| 105 |
+
stream: ext://sys.stdout
|
| 106 |
+
file:
|
| 107 |
+
class: logging.FileHandler
|
| 108 |
+
formatter: simple
|
| 109 |
+
filename: ${hydra.job.name}.log
|
| 110 |
+
root:
|
| 111 |
+
level: INFO
|
| 112 |
+
handlers:
|
| 113 |
+
- console
|
| 114 |
+
- file
|
| 115 |
+
disable_existing_loggers: false
|
| 116 |
+
env: {}
|
| 117 |
+
mode: MULTIRUN
|
| 118 |
+
searchpath: []
|
| 119 |
+
callbacks: {}
|
| 120 |
+
output_subdir: .hydra
|
| 121 |
+
overrides:
|
| 122 |
+
hydra:
|
| 123 |
+
- hydra.mode=MULTIRUN
|
| 124 |
+
task:
|
| 125 |
+
- benchmark.input_shapes.batch_size=1
|
| 126 |
+
- backend.torch_dtype=float32
|
| 127 |
+
job:
|
| 128 |
+
name: main
|
| 129 |
+
chdir: true
|
| 130 |
+
override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=1
|
| 131 |
+
id: '1'
|
| 132 |
+
num: 1
|
| 133 |
+
config_name: llama2_1gpu_inference
|
| 134 |
+
env_set: {}
|
| 135 |
+
env_copy: []
|
| 136 |
+
config:
|
| 137 |
+
override_dirname:
|
| 138 |
+
kv_sep: '='
|
| 139 |
+
item_sep: ','
|
| 140 |
+
exclude_keys: []
|
| 141 |
+
runtime:
|
| 142 |
+
version: 1.3.2
|
| 143 |
+
version_base: '1.3'
|
| 144 |
+
cwd: /home/user/transformers-regression
|
| 145 |
+
config_sources:
|
| 146 |
+
- path: hydra.conf
|
| 147 |
+
schema: pkg
|
| 148 |
+
provider: hydra
|
| 149 |
+
- path: optimum_benchmark
|
| 150 |
+
schema: pkg
|
| 151 |
+
provider: main
|
| 152 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
| 153 |
+
schema: pkg
|
| 154 |
+
provider: hydra-colorlog
|
| 155 |
+
- path: /home/user/transformers-regression/configs
|
| 156 |
+
schema: file
|
| 157 |
+
provider: command-line
|
| 158 |
+
- path: ''
|
| 159 |
+
schema: structured
|
| 160 |
+
provider: schema
|
| 161 |
+
output_dir: /home/user/transformers-regression/sweeps/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/1
|
| 162 |
+
choices:
|
| 163 |
+
benchmark: inference
|
| 164 |
+
backend: pytorch
|
| 165 |
+
hydra/env: default
|
| 166 |
+
hydra/callbacks: null
|
| 167 |
+
hydra/job_logging: colorlog
|
| 168 |
+
hydra/hydra_logging: colorlog
|
| 169 |
+
hydra/hydra_help: default
|
| 170 |
+
hydra/help: default
|
| 171 |
+
hydra/sweeper: basic
|
| 172 |
+
hydra/launcher: basic
|
| 173 |
+
hydra/output: default
|
| 174 |
+
verbose: false
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/1/.config/overrides.yaml
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- benchmark.input_shapes.batch_size=1
|
| 2 |
+
- backend.torch_dtype=float32
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/1/hydra_config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: float32
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: true
|
| 27 |
+
eval_mode: true
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: true
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 15
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 1
|
| 37 |
+
sequence_length: 200
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 200
|
| 48 |
+
experiment_name: llama_1gpu_inference
|
| 49 |
+
model: daryl149/llama-2-7b-chat-hf
|
| 50 |
+
device: cuda
|
| 51 |
+
task: text-generation
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/1/inference_results.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
| 2 |
+
0,29317.005311999998,0.0641,15.6,5.83,34.3
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/1/main.log
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2023-08-17 08:15:49,285][benchmark][INFO] - Configuring inference benchmark
|
| 2 |
+
[2023-08-17 08:15:49,286][benchmark][INFO] - + Setting seed(42)
|
| 3 |
+
[2023-08-17 08:15:49,903][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
| 4 |
+
[2023-08-17 08:15:49,903][backend][INFO] - Configuring pytorch backend
|
| 5 |
+
[2023-08-17 08:15:49,903][backend][INFO] - + Checking initial device isolation
|
| 6 |
+
[2023-08-17 08:15:49,903][utils][INFO] - device_ids to check: {0}
|
| 7 |
+
[2023-08-17 08:15:50,027][utils][INFO] - os.getpid() 514371
|
| 8 |
+
[2023-08-17 08:15:50,028][utils][INFO] - pids_on_device_id {514371}
|
| 9 |
+
[2023-08-17 08:15:50,028][backend][INFO] - + Checking contineous device isolation
|
| 10 |
+
[2023-08-17 08:15:50,052][pytorch][INFO] - + Disabling gradients
|
| 11 |
+
[2023-08-17 08:15:50,053][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
|
| 12 |
+
[2023-08-17 08:16:07,128][pytorch][INFO] - + Turning on eval mode
|
| 13 |
+
[2023-08-17 08:16:07,130][inference][INFO] - Running inference benchmark
|
| 14 |
+
[2023-08-17 08:16:15,087][inference][INFO] - + Tracking forward pass peak memory
|
| 15 |
+
[2023-08-17 08:16:15,164][memory_tracker][INFO] - Peak memory usage: 29317.005311999998 MB
|
| 16 |
+
[2023-08-17 08:16:15,164][inference][INFO] - + Forward pass peak memory: 29317.005311999998 (MB)
|
| 17 |
+
[2023-08-17 08:16:15,165][inference][INFO] - + Warming up the forward pass
|
| 18 |
+
[2023-08-17 08:16:17,287][inference][INFO] - + Tracking forward pass latency and throughput
|
| 19 |
+
[2023-08-17 08:17:06,995][inference][INFO] - + Forward pass latency: 6.41e-02 (s)
|
| 20 |
+
[2023-08-17 08:17:06,996][inference][INFO] - + Forward pass throughput: 15.60 (samples/s)
|
| 21 |
+
[2023-08-17 08:17:06,997][inference][INFO] - + Warming up the generation pass
|
| 22 |
+
[2023-08-17 08:17:12,873][inference][INFO] - + Tracking generation latency and throughput
|
| 23 |
+
[2023-08-17 08:17:30,359][inference][INFO] - + Generation pass latency: 5.83e+00 (s)
|
| 24 |
+
[2023-08-17 08:17:30,360][inference][INFO] - + Generation pass throughput: 34.30 (tokens/s)
|
| 25 |
+
[2023-08-17 08:17:30,360][inference][INFO] - Saving inference results
|
| 26 |
+
[2023-08-17 08:17:30,366][backend][INFO] - Cleaning backend
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/2/.config/config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: float16
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
| 27 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: true
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 15
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 16
|
| 37 |
+
sequence_length: 200
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 200
|
| 48 |
+
experiment_name: llama_1gpu_inference
|
| 49 |
+
model: daryl149/llama-2-7b-chat-hf
|
| 50 |
+
device: cuda
|
| 51 |
+
task: ${infer_task:${model}, ${hub_kwargs.revision}}
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/2/.config/hydra.yaml
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params:
|
| 13 |
+
benchmark.input_shapes.batch_size: 1,16
|
| 14 |
+
backend.torch_dtype: float16,float32
|
| 15 |
+
help:
|
| 16 |
+
app_name: ${hydra.job.name}
|
| 17 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 18 |
+
|
| 19 |
+
'
|
| 20 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 21 |
+
|
| 22 |
+
Use --hydra-help to view Hydra specific help
|
| 23 |
+
|
| 24 |
+
'
|
| 25 |
+
template: '${hydra.help.header}
|
| 26 |
+
|
| 27 |
+
== Configuration groups ==
|
| 28 |
+
|
| 29 |
+
Compose your configuration from those groups (group=option)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
$APP_CONFIG_GROUPS
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
== Config ==
|
| 36 |
+
|
| 37 |
+
Override anything in the config (foo.bar=value)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
$CONFIG
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
${hydra.help.footer}
|
| 44 |
+
|
| 45 |
+
'
|
| 46 |
+
hydra_help:
|
| 47 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 48 |
+
|
| 49 |
+
See https://hydra.cc for more info.
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
== Flags ==
|
| 53 |
+
|
| 54 |
+
$FLAGS_HELP
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
== Configuration groups ==
|
| 58 |
+
|
| 59 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 60 |
+
to command line)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
$HYDRA_CONFIG_GROUPS
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 67 |
+
|
| 68 |
+
'
|
| 69 |
+
hydra_help: ???
|
| 70 |
+
hydra_logging:
|
| 71 |
+
version: 1
|
| 72 |
+
formatters:
|
| 73 |
+
colorlog:
|
| 74 |
+
(): colorlog.ColoredFormatter
|
| 75 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
| 76 |
+
handlers:
|
| 77 |
+
console:
|
| 78 |
+
class: logging.StreamHandler
|
| 79 |
+
formatter: colorlog
|
| 80 |
+
stream: ext://sys.stdout
|
| 81 |
+
root:
|
| 82 |
+
level: INFO
|
| 83 |
+
handlers:
|
| 84 |
+
- console
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
colorlog:
|
| 92 |
+
(): colorlog.ColoredFormatter
|
| 93 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
| 94 |
+
- %(message)s'
|
| 95 |
+
log_colors:
|
| 96 |
+
DEBUG: purple
|
| 97 |
+
INFO: green
|
| 98 |
+
WARNING: yellow
|
| 99 |
+
ERROR: red
|
| 100 |
+
CRITICAL: red
|
| 101 |
+
handlers:
|
| 102 |
+
console:
|
| 103 |
+
class: logging.StreamHandler
|
| 104 |
+
formatter: colorlog
|
| 105 |
+
stream: ext://sys.stdout
|
| 106 |
+
file:
|
| 107 |
+
class: logging.FileHandler
|
| 108 |
+
formatter: simple
|
| 109 |
+
filename: ${hydra.job.name}.log
|
| 110 |
+
root:
|
| 111 |
+
level: INFO
|
| 112 |
+
handlers:
|
| 113 |
+
- console
|
| 114 |
+
- file
|
| 115 |
+
disable_existing_loggers: false
|
| 116 |
+
env: {}
|
| 117 |
+
mode: MULTIRUN
|
| 118 |
+
searchpath: []
|
| 119 |
+
callbacks: {}
|
| 120 |
+
output_subdir: .hydra
|
| 121 |
+
overrides:
|
| 122 |
+
hydra:
|
| 123 |
+
- hydra.mode=MULTIRUN
|
| 124 |
+
task:
|
| 125 |
+
- benchmark.input_shapes.batch_size=16
|
| 126 |
+
- backend.torch_dtype=float16
|
| 127 |
+
job:
|
| 128 |
+
name: main
|
| 129 |
+
chdir: true
|
| 130 |
+
override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=16
|
| 131 |
+
id: '2'
|
| 132 |
+
num: 2
|
| 133 |
+
config_name: llama2_1gpu_inference
|
| 134 |
+
env_set: {}
|
| 135 |
+
env_copy: []
|
| 136 |
+
config:
|
| 137 |
+
override_dirname:
|
| 138 |
+
kv_sep: '='
|
| 139 |
+
item_sep: ','
|
| 140 |
+
exclude_keys: []
|
| 141 |
+
runtime:
|
| 142 |
+
version: 1.3.2
|
| 143 |
+
version_base: '1.3'
|
| 144 |
+
cwd: /home/user/transformers-regression
|
| 145 |
+
config_sources:
|
| 146 |
+
- path: hydra.conf
|
| 147 |
+
schema: pkg
|
| 148 |
+
provider: hydra
|
| 149 |
+
- path: optimum_benchmark
|
| 150 |
+
schema: pkg
|
| 151 |
+
provider: main
|
| 152 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
| 153 |
+
schema: pkg
|
| 154 |
+
provider: hydra-colorlog
|
| 155 |
+
- path: /home/user/transformers-regression/configs
|
| 156 |
+
schema: file
|
| 157 |
+
provider: command-line
|
| 158 |
+
- path: ''
|
| 159 |
+
schema: structured
|
| 160 |
+
provider: schema
|
| 161 |
+
output_dir: /home/user/transformers-regression/sweeps/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/2
|
| 162 |
+
choices:
|
| 163 |
+
benchmark: inference
|
| 164 |
+
backend: pytorch
|
| 165 |
+
hydra/env: default
|
| 166 |
+
hydra/callbacks: null
|
| 167 |
+
hydra/job_logging: colorlog
|
| 168 |
+
hydra/hydra_logging: colorlog
|
| 169 |
+
hydra/hydra_help: default
|
| 170 |
+
hydra/help: default
|
| 171 |
+
hydra/sweeper: basic
|
| 172 |
+
hydra/launcher: basic
|
| 173 |
+
hydra/output: default
|
| 174 |
+
verbose: false
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/2/.config/overrides.yaml
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- benchmark.input_shapes.batch_size=16
|
| 2 |
+
- backend.torch_dtype=float16
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/2/hydra_config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: float16
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: true
|
| 27 |
+
eval_mode: true
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: true
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 15
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 16
|
| 37 |
+
sequence_length: 200
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 200
|
| 48 |
+
experiment_name: llama_1gpu_inference
|
| 49 |
+
model: daryl149/llama-2-7b-chat-hf
|
| 50 |
+
device: cuda
|
| 51 |
+
task: text-generation
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/2/inference_results.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
| 2 |
+
0,18841.731072,0.0982,163.0,6.21,515.0
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/2/main.log
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2023-08-17 08:17:30,952][benchmark][INFO] - Configuring inference benchmark
|
| 2 |
+
[2023-08-17 08:17:30,953][benchmark][INFO] - + Setting seed(42)
|
| 3 |
+
[2023-08-17 08:17:31,453][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
| 4 |
+
[2023-08-17 08:17:31,453][backend][INFO] - Configuring pytorch backend
|
| 5 |
+
[2023-08-17 08:17:31,453][backend][INFO] - + Checking initial device isolation
|
| 6 |
+
[2023-08-17 08:17:31,453][utils][INFO] - device_ids to check: {0}
|
| 7 |
+
[2023-08-17 08:17:31,572][utils][INFO] - os.getpid() 514371
|
| 8 |
+
[2023-08-17 08:17:31,572][utils][INFO] - pids_on_device_id {514371}
|
| 9 |
+
[2023-08-17 08:17:31,572][backend][INFO] - + Checking contineous device isolation
|
| 10 |
+
[2023-08-17 08:17:31,595][pytorch][INFO] - + Disabling gradients
|
| 11 |
+
[2023-08-17 08:17:31,596][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
|
| 12 |
+
[2023-08-17 08:17:42,231][pytorch][INFO] - + Turning on eval mode
|
| 13 |
+
[2023-08-17 08:17:42,232][inference][INFO] - Running inference benchmark
|
| 14 |
+
[2023-08-17 08:17:50,282][inference][INFO] - + Tracking forward pass peak memory
|
| 15 |
+
[2023-08-17 08:17:50,394][memory_tracker][INFO] - Peak memory usage: 18841.731072 MB
|
| 16 |
+
[2023-08-17 08:17:50,394][inference][INFO] - + Forward pass peak memory: 18841.731072 (MB)
|
| 17 |
+
[2023-08-17 08:17:50,394][inference][INFO] - + Warming up the forward pass
|
| 18 |
+
[2023-08-17 08:17:53,039][inference][INFO] - + Tracking forward pass latency and throughput
|
| 19 |
+
[2023-08-17 08:18:33,597][inference][INFO] - + Forward pass latency: 9.82e-02 (s)
|
| 20 |
+
[2023-08-17 08:18:33,598][inference][INFO] - + Forward pass throughput: 163.00 (samples/s)
|
| 21 |
+
[2023-08-17 08:18:33,598][inference][INFO] - + Warming up the generation pass
|
| 22 |
+
[2023-08-17 08:18:40,767][inference][INFO] - + Tracking generation latency and throughput
|
| 23 |
+
[2023-08-17 08:18:59,405][inference][INFO] - + Generation pass latency: 6.21e+00 (s)
|
| 24 |
+
[2023-08-17 08:18:59,406][inference][INFO] - + Generation pass throughput: 515.00 (tokens/s)
|
| 25 |
+
[2023-08-17 08:18:59,406][inference][INFO] - Saving inference results
|
| 26 |
+
[2023-08-17 08:18:59,413][backend][INFO] - Cleaning backend
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/3/.config/config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: float32
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
| 27 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: true
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 15
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 16
|
| 37 |
+
sequence_length: 200
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 200
|
| 48 |
+
experiment_name: llama_1gpu_inference
|
| 49 |
+
model: daryl149/llama-2-7b-chat-hf
|
| 50 |
+
device: cuda
|
| 51 |
+
task: ${infer_task:${model}, ${hub_kwargs.revision}}
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/3/.config/hydra.yaml
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params:
|
| 13 |
+
benchmark.input_shapes.batch_size: 1,16
|
| 14 |
+
backend.torch_dtype: float16,float32
|
| 15 |
+
help:
|
| 16 |
+
app_name: ${hydra.job.name}
|
| 17 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 18 |
+
|
| 19 |
+
'
|
| 20 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 21 |
+
|
| 22 |
+
Use --hydra-help to view Hydra specific help
|
| 23 |
+
|
| 24 |
+
'
|
| 25 |
+
template: '${hydra.help.header}
|
| 26 |
+
|
| 27 |
+
== Configuration groups ==
|
| 28 |
+
|
| 29 |
+
Compose your configuration from those groups (group=option)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
$APP_CONFIG_GROUPS
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
== Config ==
|
| 36 |
+
|
| 37 |
+
Override anything in the config (foo.bar=value)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
$CONFIG
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
${hydra.help.footer}
|
| 44 |
+
|
| 45 |
+
'
|
| 46 |
+
hydra_help:
|
| 47 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 48 |
+
|
| 49 |
+
See https://hydra.cc for more info.
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
== Flags ==
|
| 53 |
+
|
| 54 |
+
$FLAGS_HELP
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
== Configuration groups ==
|
| 58 |
+
|
| 59 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 60 |
+
to command line)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
$HYDRA_CONFIG_GROUPS
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 67 |
+
|
| 68 |
+
'
|
| 69 |
+
hydra_help: ???
|
| 70 |
+
hydra_logging:
|
| 71 |
+
version: 1
|
| 72 |
+
formatters:
|
| 73 |
+
colorlog:
|
| 74 |
+
(): colorlog.ColoredFormatter
|
| 75 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
| 76 |
+
handlers:
|
| 77 |
+
console:
|
| 78 |
+
class: logging.StreamHandler
|
| 79 |
+
formatter: colorlog
|
| 80 |
+
stream: ext://sys.stdout
|
| 81 |
+
root:
|
| 82 |
+
level: INFO
|
| 83 |
+
handlers:
|
| 84 |
+
- console
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
colorlog:
|
| 92 |
+
(): colorlog.ColoredFormatter
|
| 93 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
| 94 |
+
- %(message)s'
|
| 95 |
+
log_colors:
|
| 96 |
+
DEBUG: purple
|
| 97 |
+
INFO: green
|
| 98 |
+
WARNING: yellow
|
| 99 |
+
ERROR: red
|
| 100 |
+
CRITICAL: red
|
| 101 |
+
handlers:
|
| 102 |
+
console:
|
| 103 |
+
class: logging.StreamHandler
|
| 104 |
+
formatter: colorlog
|
| 105 |
+
stream: ext://sys.stdout
|
| 106 |
+
file:
|
| 107 |
+
class: logging.FileHandler
|
| 108 |
+
formatter: simple
|
| 109 |
+
filename: ${hydra.job.name}.log
|
| 110 |
+
root:
|
| 111 |
+
level: INFO
|
| 112 |
+
handlers:
|
| 113 |
+
- console
|
| 114 |
+
- file
|
| 115 |
+
disable_existing_loggers: false
|
| 116 |
+
env: {}
|
| 117 |
+
mode: MULTIRUN
|
| 118 |
+
searchpath: []
|
| 119 |
+
callbacks: {}
|
| 120 |
+
output_subdir: .hydra
|
| 121 |
+
overrides:
|
| 122 |
+
hydra:
|
| 123 |
+
- hydra.mode=MULTIRUN
|
| 124 |
+
task:
|
| 125 |
+
- benchmark.input_shapes.batch_size=16
|
| 126 |
+
- backend.torch_dtype=float32
|
| 127 |
+
job:
|
| 128 |
+
name: main
|
| 129 |
+
chdir: true
|
| 130 |
+
override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=16
|
| 131 |
+
id: '3'
|
| 132 |
+
num: 3
|
| 133 |
+
config_name: llama2_1gpu_inference
|
| 134 |
+
env_set: {}
|
| 135 |
+
env_copy: []
|
| 136 |
+
config:
|
| 137 |
+
override_dirname:
|
| 138 |
+
kv_sep: '='
|
| 139 |
+
item_sep: ','
|
| 140 |
+
exclude_keys: []
|
| 141 |
+
runtime:
|
| 142 |
+
version: 1.3.2
|
| 143 |
+
version_base: '1.3'
|
| 144 |
+
cwd: /home/user/transformers-regression
|
| 145 |
+
config_sources:
|
| 146 |
+
- path: hydra.conf
|
| 147 |
+
schema: pkg
|
| 148 |
+
provider: hydra
|
| 149 |
+
- path: optimum_benchmark
|
| 150 |
+
schema: pkg
|
| 151 |
+
provider: main
|
| 152 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
| 153 |
+
schema: pkg
|
| 154 |
+
provider: hydra-colorlog
|
| 155 |
+
- path: /home/user/transformers-regression/configs
|
| 156 |
+
schema: file
|
| 157 |
+
provider: command-line
|
| 158 |
+
- path: ''
|
| 159 |
+
schema: structured
|
| 160 |
+
provider: schema
|
| 161 |
+
output_dir: /home/user/transformers-regression/sweeps/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/3
|
| 162 |
+
choices:
|
| 163 |
+
benchmark: inference
|
| 164 |
+
backend: pytorch
|
| 165 |
+
hydra/env: default
|
| 166 |
+
hydra/callbacks: null
|
| 167 |
+
hydra/job_logging: colorlog
|
| 168 |
+
hydra/hydra_logging: colorlog
|
| 169 |
+
hydra/hydra_help: default
|
| 170 |
+
hydra/help: default
|
| 171 |
+
hydra/sweeper: basic
|
| 172 |
+
hydra/launcher: basic
|
| 173 |
+
hydra/output: default
|
| 174 |
+
verbose: false
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/3/.config/overrides.yaml
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- benchmark.input_shapes.batch_size=16
|
| 2 |
+
- backend.torch_dtype=float32
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/3/hydra_config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: float32
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: true
|
| 27 |
+
eval_mode: true
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: true
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 15
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 16
|
| 37 |
+
sequence_length: 200
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 200
|
| 48 |
+
experiment_name: llama_1gpu_inference
|
| 49 |
+
model: daryl149/llama-2-7b-chat-hf
|
| 50 |
+
device: cuda
|
| 51 |
+
task: text-generation
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/3/inference_results.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
| 2 |
+
0,34801.057792,0.683,23.4,12.9,248.0
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/llama_1gpu_inference/3/main.log
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2023-08-17 08:19:00,058][benchmark][INFO] - Configuring inference benchmark
|
| 2 |
+
[2023-08-17 08:19:00,059][benchmark][INFO] - + Setting seed(42)
|
| 3 |
+
[2023-08-17 08:19:00,549][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
| 4 |
+
[2023-08-17 08:19:00,550][backend][INFO] - Configuring pytorch backend
|
| 5 |
+
[2023-08-17 08:19:00,550][backend][INFO] - + Checking initial device isolation
|
| 6 |
+
[2023-08-17 08:19:00,550][utils][INFO] - device_ids to check: {0}
|
| 7 |
+
[2023-08-17 08:19:00,653][utils][INFO] - os.getpid() 514371
|
| 8 |
+
[2023-08-17 08:19:00,653][utils][INFO] - pids_on_device_id {514371}
|
| 9 |
+
[2023-08-17 08:19:00,654][backend][INFO] - + Checking contineous device isolation
|
| 10 |
+
[2023-08-17 08:19:00,677][pytorch][INFO] - + Disabling gradients
|
| 11 |
+
[2023-08-17 08:19:00,678][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
|
| 12 |
+
[2023-08-17 08:19:17,837][pytorch][INFO] - + Turning on eval mode
|
| 13 |
+
[2023-08-17 08:19:17,839][inference][INFO] - Running inference benchmark
|
| 14 |
+
[2023-08-17 08:19:26,146][inference][INFO] - + Tracking forward pass peak memory
|
| 15 |
+
[2023-08-17 08:19:26,843][memory_tracker][INFO] - Peak memory usage: 34801.057792 MB
|
| 16 |
+
[2023-08-17 08:19:26,844][inference][INFO] - + Forward pass peak memory: 34801.057792 (MB)
|
| 17 |
+
[2023-08-17 08:19:26,860][inference][INFO] - + Warming up the forward pass
|
| 18 |
+
[2023-08-17 08:19:52,169][inference][INFO] - + Tracking forward pass latency and throughput
|
| 19 |
+
[2023-08-17 08:20:49,750][inference][INFO] - + Forward pass latency: 6.83e-01 (s)
|
| 20 |
+
[2023-08-17 08:20:49,751][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
|
| 21 |
+
[2023-08-17 08:20:49,752][inference][INFO] - + Warming up the generation pass
|
| 22 |
+
[2023-08-17 08:21:02,883][inference][INFO] - + Tracking generation latency and throughput
|
| 23 |
+
[2023-08-17 08:21:28,717][inference][INFO] - + Generation pass latency: 1.29e+01 (s)
|
| 24 |
+
[2023-08-17 08:21:28,718][inference][INFO] - + Generation pass throughput: 248.00 (tokens/s)
|
| 25 |
+
[2023-08-17 08:21:28,718][inference][INFO] - Saving inference results
|
| 26 |
+
[2023-08-17 08:21:28,726][backend][INFO] - Cleaning backend
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/0/.config/config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: null
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
| 27 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: true
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 15
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 1
|
| 37 |
+
sequence_length: 16
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 100
|
| 48 |
+
experiment_name: pytorch_bert_inference
|
| 49 |
+
model: hf-internal-testing/tiny-random-bert
|
| 50 |
+
device: cpu
|
| 51 |
+
task: text-classification
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/0/.config/hydra.yaml
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params:
|
| 13 |
+
benchmark.input_shapes.batch_size: 1,4
|
| 14 |
+
help:
|
| 15 |
+
app_name: ${hydra.job.name}
|
| 16 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 17 |
+
|
| 18 |
+
'
|
| 19 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 20 |
+
|
| 21 |
+
Use --hydra-help to view Hydra specific help
|
| 22 |
+
|
| 23 |
+
'
|
| 24 |
+
template: '${hydra.help.header}
|
| 25 |
+
|
| 26 |
+
== Configuration groups ==
|
| 27 |
+
|
| 28 |
+
Compose your configuration from those groups (group=option)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
$APP_CONFIG_GROUPS
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
== Config ==
|
| 35 |
+
|
| 36 |
+
Override anything in the config (foo.bar=value)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
$CONFIG
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
${hydra.help.footer}
|
| 43 |
+
|
| 44 |
+
'
|
| 45 |
+
hydra_help:
|
| 46 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 47 |
+
|
| 48 |
+
See https://hydra.cc for more info.
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
== Flags ==
|
| 52 |
+
|
| 53 |
+
$FLAGS_HELP
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
== Configuration groups ==
|
| 57 |
+
|
| 58 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 59 |
+
to command line)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
$HYDRA_CONFIG_GROUPS
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 66 |
+
|
| 67 |
+
'
|
| 68 |
+
hydra_help: ???
|
| 69 |
+
hydra_logging:
|
| 70 |
+
version: 1
|
| 71 |
+
formatters:
|
| 72 |
+
colorlog:
|
| 73 |
+
(): colorlog.ColoredFormatter
|
| 74 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
| 75 |
+
handlers:
|
| 76 |
+
console:
|
| 77 |
+
class: logging.StreamHandler
|
| 78 |
+
formatter: colorlog
|
| 79 |
+
stream: ext://sys.stdout
|
| 80 |
+
root:
|
| 81 |
+
level: INFO
|
| 82 |
+
handlers:
|
| 83 |
+
- console
|
| 84 |
+
disable_existing_loggers: false
|
| 85 |
+
job_logging:
|
| 86 |
+
version: 1
|
| 87 |
+
formatters:
|
| 88 |
+
simple:
|
| 89 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 90 |
+
colorlog:
|
| 91 |
+
(): colorlog.ColoredFormatter
|
| 92 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
| 93 |
+
- %(message)s'
|
| 94 |
+
log_colors:
|
| 95 |
+
DEBUG: purple
|
| 96 |
+
INFO: green
|
| 97 |
+
WARNING: yellow
|
| 98 |
+
ERROR: red
|
| 99 |
+
CRITICAL: red
|
| 100 |
+
handlers:
|
| 101 |
+
console:
|
| 102 |
+
class: logging.StreamHandler
|
| 103 |
+
formatter: colorlog
|
| 104 |
+
stream: ext://sys.stdout
|
| 105 |
+
file:
|
| 106 |
+
class: logging.FileHandler
|
| 107 |
+
formatter: simple
|
| 108 |
+
filename: ${hydra.job.name}.log
|
| 109 |
+
root:
|
| 110 |
+
level: INFO
|
| 111 |
+
handlers:
|
| 112 |
+
- console
|
| 113 |
+
- file
|
| 114 |
+
disable_existing_loggers: false
|
| 115 |
+
env: {}
|
| 116 |
+
mode: MULTIRUN
|
| 117 |
+
searchpath: []
|
| 118 |
+
callbacks: {}
|
| 119 |
+
output_subdir: .hydra
|
| 120 |
+
overrides:
|
| 121 |
+
hydra:
|
| 122 |
+
- hydra.mode=MULTIRUN
|
| 123 |
+
task:
|
| 124 |
+
- benchmark.input_shapes.batch_size=1
|
| 125 |
+
job:
|
| 126 |
+
name: main
|
| 127 |
+
chdir: true
|
| 128 |
+
override_dirname: benchmark.input_shapes.batch_size=1
|
| 129 |
+
id: '0'
|
| 130 |
+
num: 0
|
| 131 |
+
config_name: bert_cpu_inference
|
| 132 |
+
env_set: {}
|
| 133 |
+
env_copy: []
|
| 134 |
+
config:
|
| 135 |
+
override_dirname:
|
| 136 |
+
kv_sep: '='
|
| 137 |
+
item_sep: ','
|
| 138 |
+
exclude_keys: []
|
| 139 |
+
runtime:
|
| 140 |
+
version: 1.3.2
|
| 141 |
+
version_base: '1.3'
|
| 142 |
+
cwd: /home/user/transformers-regression
|
| 143 |
+
config_sources:
|
| 144 |
+
- path: hydra.conf
|
| 145 |
+
schema: pkg
|
| 146 |
+
provider: hydra
|
| 147 |
+
- path: optimum_benchmark
|
| 148 |
+
schema: pkg
|
| 149 |
+
provider: main
|
| 150 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
| 151 |
+
schema: pkg
|
| 152 |
+
provider: hydra-colorlog
|
| 153 |
+
- path: /home/user/transformers-regression/configs
|
| 154 |
+
schema: file
|
| 155 |
+
provider: command-line
|
| 156 |
+
- path: ''
|
| 157 |
+
schema: structured
|
| 158 |
+
provider: schema
|
| 159 |
+
output_dir: /home/user/transformers-regression/sweeps/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/0
|
| 160 |
+
choices:
|
| 161 |
+
benchmark: inference
|
| 162 |
+
backend: pytorch
|
| 163 |
+
hydra/env: default
|
| 164 |
+
hydra/callbacks: null
|
| 165 |
+
hydra/job_logging: colorlog
|
| 166 |
+
hydra/hydra_logging: colorlog
|
| 167 |
+
hydra/hydra_help: default
|
| 168 |
+
hydra/help: default
|
| 169 |
+
hydra/sweeper: basic
|
| 170 |
+
hydra/launcher: basic
|
| 171 |
+
hydra/output: default
|
| 172 |
+
verbose: false
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/0/.config/overrides.yaml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
- benchmark.input_shapes.batch_size=1
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/0/hydra_config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: null
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: true
|
| 27 |
+
eval_mode: true
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: true
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 15
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 1
|
| 37 |
+
sequence_length: 16
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 100
|
| 48 |
+
experiment_name: pytorch_bert_inference
|
| 49 |
+
model: hf-internal-testing/tiny-random-bert
|
| 50 |
+
device: cpu
|
| 51 |
+
task: text-classification
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/0/inference_results.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
|
| 2 |
+
0,466.169856,0.00405,247.0
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/0/main.log
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2023-08-17 08:12:28,164][benchmark][INFO] - Configuring inference benchmark
|
| 2 |
+
[2023-08-17 08:12:28,165][benchmark][INFO] - + Setting seed(42)
|
| 3 |
+
[2023-08-17 08:12:29,367][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
|
| 4 |
+
[2023-08-17 08:12:29,367][backend][INFO] - Configuring pytorch backend
|
| 5 |
+
[2023-08-17 08:12:29,368][backend][INFO] - + Checking initial device isolation
|
| 6 |
+
[2023-08-17 08:12:29,368][backend][INFO] - + Checking contineous device isolation
|
| 7 |
+
[2023-08-17 08:12:29,368][pytorch][INFO] - + Disabling gradients
|
| 8 |
+
[2023-08-17 08:12:29,368][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
|
| 9 |
+
[2023-08-17 08:12:29,969][pytorch][INFO] - + Turning on eval mode
|
| 10 |
+
[2023-08-17 08:12:29,969][inference][INFO] - Running inference benchmark
|
| 11 |
+
[2023-08-17 08:12:30,088][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
|
| 12 |
+
[2023-08-17 08:12:30,090][inference][INFO] - + Tracking forward pass peak memory
|
| 13 |
+
[2023-08-17 08:12:30,147][inference][INFO] - + Forward pass peak memory: 466.169856 (MB)
|
| 14 |
+
[2023-08-17 08:12:30,148][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
|
| 15 |
+
[2023-08-17 08:12:30,150][inference][INFO] - + Warming up the forward pass
|
| 16 |
+
[2023-08-17 08:12:30,193][inference][INFO] - + Tracking forward pass latency and throughput
|
| 17 |
+
[2023-08-17 08:12:45,340][inference][INFO] - + Forward pass latency: 4.05e-03 (s)
|
| 18 |
+
[2023-08-17 08:12:45,343][inference][INFO] - + Forward pass throughput: 247.00 (samples/s)
|
| 19 |
+
[2023-08-17 08:12:45,343][inference][INFO] - Saving inference results
|
| 20 |
+
[2023-08-17 08:12:45,359][backend][INFO] - Cleaning backend
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/1/.config/config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: null
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
| 27 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: true
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 15
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 4
|
| 37 |
+
sequence_length: 16
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 100
|
| 48 |
+
experiment_name: pytorch_bert_inference
|
| 49 |
+
model: hf-internal-testing/tiny-random-bert
|
| 50 |
+
device: cpu
|
| 51 |
+
task: text-classification
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/1/.config/hydra.yaml
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params:
|
| 13 |
+
benchmark.input_shapes.batch_size: 1,4
|
| 14 |
+
help:
|
| 15 |
+
app_name: ${hydra.job.name}
|
| 16 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 17 |
+
|
| 18 |
+
'
|
| 19 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 20 |
+
|
| 21 |
+
Use --hydra-help to view Hydra specific help
|
| 22 |
+
|
| 23 |
+
'
|
| 24 |
+
template: '${hydra.help.header}
|
| 25 |
+
|
| 26 |
+
== Configuration groups ==
|
| 27 |
+
|
| 28 |
+
Compose your configuration from those groups (group=option)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
$APP_CONFIG_GROUPS
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
== Config ==
|
| 35 |
+
|
| 36 |
+
Override anything in the config (foo.bar=value)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
$CONFIG
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
${hydra.help.footer}
|
| 43 |
+
|
| 44 |
+
'
|
| 45 |
+
hydra_help:
|
| 46 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 47 |
+
|
| 48 |
+
See https://hydra.cc for more info.
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
== Flags ==
|
| 52 |
+
|
| 53 |
+
$FLAGS_HELP
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
== Configuration groups ==
|
| 57 |
+
|
| 58 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 59 |
+
to command line)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
$HYDRA_CONFIG_GROUPS
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 66 |
+
|
| 67 |
+
'
|
| 68 |
+
hydra_help: ???
|
| 69 |
+
hydra_logging:
|
| 70 |
+
version: 1
|
| 71 |
+
formatters:
|
| 72 |
+
colorlog:
|
| 73 |
+
(): colorlog.ColoredFormatter
|
| 74 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
| 75 |
+
handlers:
|
| 76 |
+
console:
|
| 77 |
+
class: logging.StreamHandler
|
| 78 |
+
formatter: colorlog
|
| 79 |
+
stream: ext://sys.stdout
|
| 80 |
+
root:
|
| 81 |
+
level: INFO
|
| 82 |
+
handlers:
|
| 83 |
+
- console
|
| 84 |
+
disable_existing_loggers: false
|
| 85 |
+
job_logging:
|
| 86 |
+
version: 1
|
| 87 |
+
formatters:
|
| 88 |
+
simple:
|
| 89 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 90 |
+
colorlog:
|
| 91 |
+
(): colorlog.ColoredFormatter
|
| 92 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
| 93 |
+
- %(message)s'
|
| 94 |
+
log_colors:
|
| 95 |
+
DEBUG: purple
|
| 96 |
+
INFO: green
|
| 97 |
+
WARNING: yellow
|
| 98 |
+
ERROR: red
|
| 99 |
+
CRITICAL: red
|
| 100 |
+
handlers:
|
| 101 |
+
console:
|
| 102 |
+
class: logging.StreamHandler
|
| 103 |
+
formatter: colorlog
|
| 104 |
+
stream: ext://sys.stdout
|
| 105 |
+
file:
|
| 106 |
+
class: logging.FileHandler
|
| 107 |
+
formatter: simple
|
| 108 |
+
filename: ${hydra.job.name}.log
|
| 109 |
+
root:
|
| 110 |
+
level: INFO
|
| 111 |
+
handlers:
|
| 112 |
+
- console
|
| 113 |
+
- file
|
| 114 |
+
disable_existing_loggers: false
|
| 115 |
+
env: {}
|
| 116 |
+
mode: MULTIRUN
|
| 117 |
+
searchpath: []
|
| 118 |
+
callbacks: {}
|
| 119 |
+
output_subdir: .hydra
|
| 120 |
+
overrides:
|
| 121 |
+
hydra:
|
| 122 |
+
- hydra.mode=MULTIRUN
|
| 123 |
+
task:
|
| 124 |
+
- benchmark.input_shapes.batch_size=4
|
| 125 |
+
job:
|
| 126 |
+
name: main
|
| 127 |
+
chdir: true
|
| 128 |
+
override_dirname: benchmark.input_shapes.batch_size=4
|
| 129 |
+
id: '1'
|
| 130 |
+
num: 1
|
| 131 |
+
config_name: bert_cpu_inference
|
| 132 |
+
env_set: {}
|
| 133 |
+
env_copy: []
|
| 134 |
+
config:
|
| 135 |
+
override_dirname:
|
| 136 |
+
kv_sep: '='
|
| 137 |
+
item_sep: ','
|
| 138 |
+
exclude_keys: []
|
| 139 |
+
runtime:
|
| 140 |
+
version: 1.3.2
|
| 141 |
+
version_base: '1.3'
|
| 142 |
+
cwd: /home/user/transformers-regression
|
| 143 |
+
config_sources:
|
| 144 |
+
- path: hydra.conf
|
| 145 |
+
schema: pkg
|
| 146 |
+
provider: hydra
|
| 147 |
+
- path: optimum_benchmark
|
| 148 |
+
schema: pkg
|
| 149 |
+
provider: main
|
| 150 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
| 151 |
+
schema: pkg
|
| 152 |
+
provider: hydra-colorlog
|
| 153 |
+
- path: /home/user/transformers-regression/configs
|
| 154 |
+
schema: file
|
| 155 |
+
provider: command-line
|
| 156 |
+
- path: ''
|
| 157 |
+
schema: structured
|
| 158 |
+
provider: schema
|
| 159 |
+
output_dir: /home/user/transformers-regression/sweeps/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/1
|
| 160 |
+
choices:
|
| 161 |
+
benchmark: inference
|
| 162 |
+
backend: pytorch
|
| 163 |
+
hydra/env: default
|
| 164 |
+
hydra/callbacks: null
|
| 165 |
+
hydra/job_logging: colorlog
|
| 166 |
+
hydra/hydra_logging: colorlog
|
| 167 |
+
hydra/hydra_help: default
|
| 168 |
+
hydra/help: default
|
| 169 |
+
hydra/sweeper: basic
|
| 170 |
+
hydra/launcher: basic
|
| 171 |
+
hydra/output: default
|
| 172 |
+
verbose: false
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/1/.config/overrides.yaml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
- benchmark.input_shapes.batch_size=4
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/1/hydra_config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: null
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: true
|
| 27 |
+
eval_mode: true
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: true
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 15
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 4
|
| 37 |
+
sequence_length: 16
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 100
|
| 48 |
+
experiment_name: pytorch_bert_inference
|
| 49 |
+
model: hf-internal-testing/tiny-random-bert
|
| 50 |
+
device: cpu
|
| 51 |
+
task: text-classification
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/1/inference_results.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
|
| 2 |
+
0,467.40684799999997,0.00435,920.0
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_bert_inference/1/main.log
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2023-08-17 08:12:45,751][benchmark][INFO] - Configuring inference benchmark
|
| 2 |
+
[2023-08-17 08:12:45,752][benchmark][INFO] - + Setting seed(42)
|
| 3 |
+
[2023-08-17 08:12:46,198][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
|
| 4 |
+
[2023-08-17 08:12:46,198][backend][INFO] - Configuring pytorch backend
|
| 5 |
+
[2023-08-17 08:12:46,199][backend][INFO] - + Checking initial device isolation
|
| 6 |
+
[2023-08-17 08:12:46,199][backend][INFO] - + Checking contineous device isolation
|
| 7 |
+
[2023-08-17 08:12:46,199][pytorch][INFO] - + Disabling gradients
|
| 8 |
+
[2023-08-17 08:12:46,199][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
|
| 9 |
+
[2023-08-17 08:12:46,319][pytorch][INFO] - + Turning on eval mode
|
| 10 |
+
[2023-08-17 08:12:46,320][inference][INFO] - Running inference benchmark
|
| 11 |
+
[2023-08-17 08:12:46,442][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
|
| 12 |
+
[2023-08-17 08:12:46,443][inference][INFO] - + Tracking forward pass peak memory
|
| 13 |
+
[2023-08-17 08:12:46,484][inference][INFO] - + Forward pass peak memory: 467.40684799999997 (MB)
|
| 14 |
+
[2023-08-17 08:12:46,485][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
|
| 15 |
+
[2023-08-17 08:12:46,486][inference][INFO] - + Warming up the forward pass
|
| 16 |
+
[2023-08-17 08:12:46,530][inference][INFO] - + Tracking forward pass latency and throughput
|
| 17 |
+
[2023-08-17 08:13:01,666][inference][INFO] - + Forward pass latency: 4.35e-03 (s)
|
| 18 |
+
[2023-08-17 08:13:01,668][inference][INFO] - + Forward pass throughput: 920.00 (samples/s)
|
| 19 |
+
[2023-08-17 08:13:01,668][inference][INFO] - Saving inference results
|
| 20 |
+
[2023-08-17 08:13:01,678][backend][INFO] - Cleaning backend
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_gpt2_inference/0/.config/config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: null
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
| 27 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: false
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 10
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 1
|
| 37 |
+
sequence_length: 16
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 100
|
| 48 |
+
experiment_name: pytorch_gpt2_inference
|
| 49 |
+
model: hf-internal-testing/tiny-random-gpt2
|
| 50 |
+
device: cpu
|
| 51 |
+
task: text-generation
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_gpt2_inference/0/.config/hydra.yaml
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params: null
|
| 13 |
+
help:
|
| 14 |
+
app_name: ${hydra.job.name}
|
| 15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 16 |
+
|
| 17 |
+
'
|
| 18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 19 |
+
|
| 20 |
+
Use --hydra-help to view Hydra specific help
|
| 21 |
+
|
| 22 |
+
'
|
| 23 |
+
template: '${hydra.help.header}
|
| 24 |
+
|
| 25 |
+
== Configuration groups ==
|
| 26 |
+
|
| 27 |
+
Compose your configuration from those groups (group=option)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
$APP_CONFIG_GROUPS
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
== Config ==
|
| 34 |
+
|
| 35 |
+
Override anything in the config (foo.bar=value)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
$CONFIG
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
${hydra.help.footer}
|
| 42 |
+
|
| 43 |
+
'
|
| 44 |
+
hydra_help:
|
| 45 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 46 |
+
|
| 47 |
+
See https://hydra.cc for more info.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
== Flags ==
|
| 51 |
+
|
| 52 |
+
$FLAGS_HELP
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
== Configuration groups ==
|
| 56 |
+
|
| 57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 58 |
+
to command line)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
$HYDRA_CONFIG_GROUPS
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 65 |
+
|
| 66 |
+
'
|
| 67 |
+
hydra_help: ???
|
| 68 |
+
hydra_logging:
|
| 69 |
+
version: 1
|
| 70 |
+
formatters:
|
| 71 |
+
colorlog:
|
| 72 |
+
(): colorlog.ColoredFormatter
|
| 73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
| 74 |
+
handlers:
|
| 75 |
+
console:
|
| 76 |
+
class: logging.StreamHandler
|
| 77 |
+
formatter: colorlog
|
| 78 |
+
stream: ext://sys.stdout
|
| 79 |
+
root:
|
| 80 |
+
level: INFO
|
| 81 |
+
handlers:
|
| 82 |
+
- console
|
| 83 |
+
disable_existing_loggers: false
|
| 84 |
+
job_logging:
|
| 85 |
+
version: 1
|
| 86 |
+
formatters:
|
| 87 |
+
simple:
|
| 88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 89 |
+
colorlog:
|
| 90 |
+
(): colorlog.ColoredFormatter
|
| 91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
| 92 |
+
- %(message)s'
|
| 93 |
+
log_colors:
|
| 94 |
+
DEBUG: purple
|
| 95 |
+
INFO: green
|
| 96 |
+
WARNING: yellow
|
| 97 |
+
ERROR: red
|
| 98 |
+
CRITICAL: red
|
| 99 |
+
handlers:
|
| 100 |
+
console:
|
| 101 |
+
class: logging.StreamHandler
|
| 102 |
+
formatter: colorlog
|
| 103 |
+
stream: ext://sys.stdout
|
| 104 |
+
file:
|
| 105 |
+
class: logging.FileHandler
|
| 106 |
+
formatter: simple
|
| 107 |
+
filename: ${hydra.job.name}.log
|
| 108 |
+
root:
|
| 109 |
+
level: INFO
|
| 110 |
+
handlers:
|
| 111 |
+
- console
|
| 112 |
+
- file
|
| 113 |
+
disable_existing_loggers: false
|
| 114 |
+
env: {}
|
| 115 |
+
mode: MULTIRUN
|
| 116 |
+
searchpath: []
|
| 117 |
+
callbacks: {}
|
| 118 |
+
output_subdir: .hydra
|
| 119 |
+
overrides:
|
| 120 |
+
hydra:
|
| 121 |
+
- hydra.mode=MULTIRUN
|
| 122 |
+
task: []
|
| 123 |
+
job:
|
| 124 |
+
name: main
|
| 125 |
+
chdir: true
|
| 126 |
+
override_dirname: ''
|
| 127 |
+
id: '0'
|
| 128 |
+
num: 0
|
| 129 |
+
config_name: gpt2_cpu_inference
|
| 130 |
+
env_set: {}
|
| 131 |
+
env_copy: []
|
| 132 |
+
config:
|
| 133 |
+
override_dirname:
|
| 134 |
+
kv_sep: '='
|
| 135 |
+
item_sep: ','
|
| 136 |
+
exclude_keys: []
|
| 137 |
+
runtime:
|
| 138 |
+
version: 1.3.2
|
| 139 |
+
version_base: '1.3'
|
| 140 |
+
cwd: /home/user/transformers-regression
|
| 141 |
+
config_sources:
|
| 142 |
+
- path: hydra.conf
|
| 143 |
+
schema: pkg
|
| 144 |
+
provider: hydra
|
| 145 |
+
- path: optimum_benchmark
|
| 146 |
+
schema: pkg
|
| 147 |
+
provider: main
|
| 148 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
| 149 |
+
schema: pkg
|
| 150 |
+
provider: hydra-colorlog
|
| 151 |
+
- path: /home/user/transformers-regression/configs
|
| 152 |
+
schema: file
|
| 153 |
+
provider: command-line
|
| 154 |
+
- path: ''
|
| 155 |
+
schema: structured
|
| 156 |
+
provider: schema
|
| 157 |
+
output_dir: /home/user/transformers-regression/sweeps/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_gpt2_inference/0
|
| 158 |
+
choices:
|
| 159 |
+
benchmark: inference
|
| 160 |
+
backend: pytorch
|
| 161 |
+
hydra/env: default
|
| 162 |
+
hydra/callbacks: null
|
| 163 |
+
hydra/job_logging: colorlog
|
| 164 |
+
hydra/hydra_logging: colorlog
|
| 165 |
+
hydra/hydra_help: default
|
| 166 |
+
hydra/help: default
|
| 167 |
+
hydra/sweeper: basic
|
| 168 |
+
hydra/launcher: basic
|
| 169 |
+
hydra/output: default
|
| 170 |
+
verbose: false
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_gpt2_inference/0/.config/overrides.yaml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
[]
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_gpt2_inference/0/hydra_config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backend:
|
| 2 |
+
name: pytorch
|
| 3 |
+
version: 2.0.1+cu117
|
| 4 |
+
_target_: optimum_benchmark.backends.pytorch.PyTorchBackend
|
| 5 |
+
inter_op_num_threads: null
|
| 6 |
+
intra_op_num_threads: null
|
| 7 |
+
initial_isolation_check: true
|
| 8 |
+
continous_isolation_check: true
|
| 9 |
+
delete_cache: false
|
| 10 |
+
no_weights: false
|
| 11 |
+
torch_dtype: null
|
| 12 |
+
device_map: null
|
| 13 |
+
load_in_8bit: false
|
| 14 |
+
load_in_4bit: false
|
| 15 |
+
bettertransformer: false
|
| 16 |
+
torch_compile: false
|
| 17 |
+
torch_compile_config:
|
| 18 |
+
fullgraph: false
|
| 19 |
+
dynamic: false
|
| 20 |
+
backend: inductor
|
| 21 |
+
mode: null
|
| 22 |
+
options: null
|
| 23 |
+
disable: false
|
| 24 |
+
amp_autocast: false
|
| 25 |
+
amp_dtype: null
|
| 26 |
+
disable_grad: true
|
| 27 |
+
eval_mode: true
|
| 28 |
+
benchmark:
|
| 29 |
+
name: inference
|
| 30 |
+
_target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
|
| 31 |
+
seed: 42
|
| 32 |
+
memory: false
|
| 33 |
+
warmup_runs: 10
|
| 34 |
+
benchmark_duration: 10
|
| 35 |
+
input_shapes:
|
| 36 |
+
batch_size: 1
|
| 37 |
+
sequence_length: 16
|
| 38 |
+
num_choices: 1
|
| 39 |
+
width: 64
|
| 40 |
+
height: 64
|
| 41 |
+
num_channels: 3
|
| 42 |
+
point_batch_size: 3
|
| 43 |
+
nb_points_per_image: 2
|
| 44 |
+
feature_size: 80
|
| 45 |
+
nb_max_frames: 3000
|
| 46 |
+
audio_sequence_length: 16000
|
| 47 |
+
new_tokens: 100
|
| 48 |
+
experiment_name: pytorch_gpt2_inference
|
| 49 |
+
model: hf-internal-testing/tiny-random-gpt2
|
| 50 |
+
device: cpu
|
| 51 |
+
task: text-generation
|
| 52 |
+
hub_kwargs:
|
| 53 |
+
revision: main
|
| 54 |
+
cache_dir: null
|
| 55 |
+
force_download: false
|
| 56 |
+
local_files_only: false
|
| 57 |
+
environment:
|
| 58 |
+
optimum_version: 1.11.1
|
| 59 |
+
transformers_version: 4.32.0.dev0
|
| 60 |
+
accelerate_version: 0.21.0
|
| 61 |
+
diffusers_version: null
|
| 62 |
+
python_version: 3.10.12
|
| 63 |
+
system: Linux
|
| 64 |
+
cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
|
| 65 |
+
cpu_count: 96
|
| 66 |
+
cpu_ram_mb: 1204539.797504
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_gpt2_inference/0/inference_results.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
| 2 |
+
0,0.00375,267.0,0.541,185.0
|
raw_results/2023-08-16_09:48:24_5ccf343aebdb7c913cc41149c9f8b4fbe37c0028/pytorch_gpt2_inference/0/main.log
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2023-08-17 08:13:06,660][benchmark][INFO] - Configuring inference benchmark
|
| 2 |
+
[2023-08-17 08:13:06,661][benchmark][INFO] - + Setting seed(42)
|
| 3 |
+
[2023-08-17 08:13:08,198][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
|
| 4 |
+
[2023-08-17 08:13:08,198][backend][INFO] - Configuring pytorch backend
|
| 5 |
+
[2023-08-17 08:13:08,198][backend][INFO] - + Checking initial device isolation
|
| 6 |
+
[2023-08-17 08:13:08,199][backend][INFO] - + Checking contineous device isolation
|
| 7 |
+
[2023-08-17 08:13:08,199][pytorch][INFO] - + Disabling gradients
|
| 8 |
+
[2023-08-17 08:13:08,199][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
|
| 9 |
+
[2023-08-17 08:13:08,834][pytorch][INFO] - + Turning on eval mode
|
| 10 |
+
[2023-08-17 08:13:08,835][inference][INFO] - Running inference benchmark
|
| 11 |
+
[2023-08-17 08:13:09,032][inference][INFO] - + Warming up the forward pass
|
| 12 |
+
[2023-08-17 08:13:09,070][inference][INFO] - + Tracking forward pass latency and throughput
|
| 13 |
+
[2023-08-17 08:13:19,165][inference][INFO] - + Forward pass latency: 3.75e-03 (s)
|
| 14 |
+
[2023-08-17 08:13:19,167][inference][INFO] - + Forward pass throughput: 267.00 (samples/s)
|
| 15 |
+
[2023-08-17 08:13:19,168][inference][INFO] - + Warming up the generation pass
|
| 16 |
+
[2023-08-17 08:13:19,755][inference][INFO] - + Tracking generation latency and throughput
|
| 17 |
+
[2023-08-17 08:13:30,034][inference][INFO] - + Generation pass latency: 5.41e-01 (s)
|
| 18 |
+
[2023-08-17 08:13:30,035][inference][INFO] - + Generation pass throughput: 185.00 (tokens/s)
|
| 19 |
+
[2023-08-17 08:13:30,035][inference][INFO] - Saving inference results
|
| 20 |
+
[2023-08-17 08:13:30,048][backend][INFO] - Cleaning backend
|