CAIR-HKISI's picture
Upload folder using huggingface_hub
7fbf85b verified
hydra:
run:
dir: runs/${name}
sweep:
dir: sweeps/${name}
subdir: ${hydra.job.override_dirname}
launcher:
_target_: hydra_plugins.hydra_joblib_launcher.joblib_launcher.JoblibLauncher
n_jobs: -1
backend: null
prefer: threads
require: null
verbose: 0
timeout: null
pre_dispatch: 2*n_jobs
batch_size: '1'
temp_folder: null
max_nbytes: null
mmap_mode: r
sweeper:
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
max_batch_size: null
params:
backend.library: diffusers
backend.task: text-to-image
backend.model: hf-internal-testing/tiny-stable-diffusion-torch
help:
app_name: ${hydra.job.name}
header: '${hydra.help.app_name} is powered by Hydra.
'
footer: 'Powered by Hydra (https://hydra.cc)
Use --hydra-help to view Hydra specific help
'
template: '${hydra.help.header}
== Configuration groups ==
Compose your configuration from those groups (group=option)
$APP_CONFIG_GROUPS
== Config ==
Override anything in the config (foo.bar=value)
$CONFIG
${hydra.help.footer}
'
hydra_help:
template: 'Hydra (${hydra.runtime.version})
See https://hydra.cc for more info.
== Flags ==
$FLAGS_HELP
== Configuration groups ==
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
to command line)
$HYDRA_CONFIG_GROUPS
Use ''--cfg hydra'' to Show the Hydra config.
'
hydra_help: ???
hydra_logging:
version: 1
formatters:
simple:
format: '[%(asctime)s][HYDRA] %(message)s'
handlers:
console:
class: logging.StreamHandler
formatter: simple
stream: ext://sys.stdout
root:
level: INFO
handlers:
- console
loggers:
logging_example:
level: DEBUG
disable_existing_loggers: false
job_logging:
version: 1
formatters:
simple:
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
handlers:
console:
class: logging.StreamHandler
formatter: simple
stream: ext://sys.stdout
file:
class: logging.FileHandler
formatter: simple
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
root:
level: INFO
handlers:
- console
- file
disable_existing_loggers: false
env: {}
mode: MULTIRUN
searchpath: []
callbacks: {}
output_subdir: .hydra
overrides:
hydra:
- hydra/launcher=joblib
- hydra.launcher.n_jobs=-1
- hydra.launcher.batch_size=1
- hydra.launcher.prefer=threads
task: []
job:
name: cli
chdir: true
override_dirname: ''
id: ???
num: ???
config_name: cuda_inference_pytorch_diffusers
env_set:
OVERRIDE_BENCHMARKS: '1'
env_copy: []
config:
override_dirname:
kv_sep: '='
item_sep: ','
exclude_keys: []
runtime:
version: 1.3.2
version_base: '1.3'
cwd: /home/user01/optimum-benchmark
config_sources:
- path: hydra.conf
schema: pkg
provider: hydra
- path: optimum_benchmark
schema: pkg
provider: main
- path: /home/user01/optimum-benchmark/tests/configs
schema: file
provider: command-line
- path: ''
schema: structured
provider: schema
output_dir: ???
choices:
scenario: inference
launcher: process
backend: pytorch
hydra/env: default
hydra/callbacks: null
hydra/job_logging: default
hydra/hydra_logging: default
hydra/hydra_help: default
hydra/help: default
hydra/sweeper: basic
hydra/launcher: joblib
hydra/output: default
verbose: false
name: cuda_inference_pytorch_diffusers
backend:
name: pytorch
version: 2.8.0
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
model: null
processor: null
task: null
library: null
model_type: null
device: cuda
device_ids: '0'
seed: 42
inter_op_num_threads: null
intra_op_num_threads: null
model_kwargs: {}
processor_kwargs: {}
no_weights: false
tp_plan: null
device_map: null
torch_dtype: null
eval_mode: true
to_bettertransformer: false
low_cpu_mem_usage: null
attn_implementation: null
cache_implementation: null
allow_tf32: false
autocast_enabled: false
autocast_dtype: null
torch_compile: false
torch_compile_target: forward
torch_compile_config: {}
quantization_scheme: null
quantization_config: {}
deepspeed_inference: false
deepspeed_inference_config: {}
peft_type: null
peft_config: {}
scenario:
name: inference
_target_: optimum_benchmark.scenarios.inference.scenario.InferenceScenario
iterations: 1
duration: 1
warmup_runs: 1
input_shapes:
batch_size: 1
sequence_length: 16
new_tokens: null
memory: true
latency: true
energy: false
forward_kwargs: {}
generate_kwargs:
max_new_tokens: 16
min_new_tokens: 16
call_kwargs:
num_inference_steps: 4
launcher:
name: process
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
device_isolation: true
device_isolation_action: warn
numactl: false
numactl_kwargs: {}
start_method: spawn
environment:
cpu: ' Intel(R) Xeon(R) Platinum 8480C'
cpu_count: 224
cpu_ram_mb: 2164176.613376
system: Linux
machine: x86_64
platform: Linux-6.8.0-87-generic-x86_64-with-glibc2.39
processor: x86_64
python_version: 3.12.13
gpu:
- NVIDIA H800
- NVIDIA H800
- NVIDIA H800
- NVIDIA H800
- NVIDIA H800
- NVIDIA H800
- NVIDIA H800
- NVIDIA H800
gpu_count: 8
gpu_vram_mb: 684166479872
optimum_benchmark_version: 0.6.0
optimum_benchmark_commit: 7fa7b6797d7eb7c132a0b3c249cf63e95510db31
transformers_version: 4.57.6
transformers_commit: 7fa7b6797d7eb7c132a0b3c249cf63e95510db31
accelerate_version: 1.13.0
accelerate_commit: 7fa7b6797d7eb7c132a0b3c249cf63e95510db31
diffusers_version: null
diffusers_commit: null
optimum_version: null
optimum_commit: null
timm_version: 1.0.25
timm_commit: 7fa7b6797d7eb7c132a0b3c249cf63e95510db31
peft_version: 0.18.1
peft_commit: 7fa7b6797d7eb7c132a0b3c249cf63e95510db31
print_report: true
log_report: true