file_name
large_stringlengths
6
73
file_path
large_stringlengths
8
117
type
large_stringclasses
3 values
name
large_stringlengths
1
91
start_line
int64
1
6.25k
end_line
int64
5
6.27k
content
large_stringlengths
18
250k
docstring
large_stringlengths
0
41.1k
embedding
listlengths
768
768
embedding_model
large_stringclasses
1 value
conftest.py
conftest.py
function
pytest_configure
84
95
def pytest_configure(config): config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested") config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment") config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate") config.addinivalue_line("markers", "not_device_test: mark the tests always running on cpu") config.addinivalue_line("markers", "torch_compile_test: mark test which tests torch compile functionality") config.addinivalue_line("markers", "torch_export_test: mark test which tests torch export functionality") config.addinivalue_line("markers", "flash_attn_test: mark test which tests flash attention functionality") config.addinivalue_line("markers", "flash_attn_3_test: mark test which tests flash attention 3 functionality") config.addinivalue_line("markers", "training_ci: mark test for training CI validation") os.environ["DISABLE_SAFETENSORS_CONVERSION"] = "true"
null
[ -0.006010689772665501, -0.01654973439872265, -0.010188373737037182, 0.026173338294029236, 0.005266346037387848, 0.030715258792042732, -0.051606494933366776, 0.01947873644530773, 0.025859253481030464, -0.0019574088510125875, -0.036790698766708374, 0.028762491419911385, -0.011060409247875214, ...
Snowflake/snowflake-arctic-embed-m
conftest.py
conftest.py
function
pytest_collection_modifyitems
98
101
def pytest_collection_modifyitems(items): for item in items: if any(test_name in item.nodeid for test_name in NOT_DEVICE_TESTS): item.add_marker(pytest.mark.not_device_test)
null
[ -0.0018598344177007675, -0.01407221145927906, 0.0098698316141963, 0.011602354235947132, 0.04331904277205467, 0.021711518988013268, -0.043311864137649536, 0.010872561484575272, -0.0018784116255119443, -0.03966418653726578, 0.005111444275826216, 0.032855890691280365, -0.06387723237276077, 0....
Snowflake/snowflake-arctic-embed-m
conftest.py
conftest.py
function
pytest_addoption
104
107
def pytest_addoption(parser): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(parser)
null
[ 0.02109464816749096, -0.015749573707580566, 0.003046265570446849, 0.006348882336169481, -0.04745549336075783, 0.018048813566565514, -0.02918238379061222, -0.007588644977658987, 0.01914861798286438, 0.0010566345881670713, -0.03797975927591324, -0.0010851150145754218, -0.012139503844082355, ...
Snowflake/snowflake-arctic-embed-m
conftest.py
conftest.py
function
pytest_terminal_summary
110
115
def pytest_terminal_summary(terminalreporter): from transformers.testing_utils import pytest_terminal_summary_main make_reports = terminalreporter.config.getoption("--make-reports") if make_reports: pytest_terminal_summary_main(terminalreporter, id=make_reports)
null
[ -0.0030817899387329817, -0.03523353114724159, -0.0006435674149543047, -0.024024685844779015, -0.007272584363818169, 0.008798833936452866, -0.03417857363820076, 0.01577366515994072, 0.0157461054623127, -0.028448527678847313, -0.05114568769931793, 0.03233640640974045, -0.011357183568179607, ...
Snowflake/snowflake-arctic-embed-m
conftest.py
conftest.py
function
pytest_sessionfinish
118
121
def pytest_sessionfinish(session, exitstatus): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: session.exitstatus = 0
null
[ 0.009401445277035236, -0.06394325941801071, 0.015375849790871143, 0.05399767681956291, 0.012109951116144657, 0.019510721787810326, 0.01785055175423622, -0.017624689266085625, 0.059219278395175934, -0.023134123533964157, -0.019102929159998894, 0.0022015392314642668, 0.0017136919777840376, 0...
Snowflake/snowflake-arctic-embed-m
conftest.py
conftest.py
class
CustomOutputChecker
130
134
class CustomOutputChecker(OutputChecker): def check_output(self, want, got, optionflags): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self, want, got, optionflags)
null
[ -0.0015353856142610312, 0.03293216601014137, 0.04843374341726303, 0.016692599281668663, 0.00967515166848898, 0.008680326864123344, -0.03601459786295891, -0.01175685040652752, -0.03537588566541672, -0.017700154334306717, 0.00032964692218229175, 0.05489371716976166, -0.0347469300031662, 0.05...
Snowflake/snowflake-arctic-embed-m
conftest.py
conftest.py
function
check_output
131
134
def check_output(self, want, got, optionflags): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self, want, got, optionflags)
null
[ -0.0050439084880054, 0.043086420744657516, 0.02921714261174202, 0.009142640046775341, 0.02405254729092121, 0.008396622724831104, -0.042850691825151443, -0.02747061289846897, -0.02396944724023342, -0.005572537891566753, -0.04155103489756584, 0.04938877746462822, -0.024951333180069923, 0.067...
Snowflake/snowflake-arctic-embed-m
conftest.py
conftest.py
consecutive_lines
lines_1-50
1
50
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import os import sys import warnings from os.path import abspath, dirname, join import _pytest import pytest from transformers.testing_utils import ( HfDoctestModule, HfDocTestParser, is_torch_available, patch_testing_methods_to_collect_info, patch_torch_compile_force_graph, ) from transformers.utils import enable_tf32 NOT_DEVICE_TESTS = { "test_tokenization", "test_tokenization_mistral_common", "test_processing", "test_beam_constraints", "test_configuration_utils", "test_data_collator", "test_trainer_callback", "test_trainer_utils", "test_feature_extraction", "test_image_processing", "test_image_processor", "test_image_transforms", "test_optimization",
null
[ -0.006435020361095667, -0.0145240044221282, -0.0035980921238660812, -0.013333169743418694, -0.004700838588178158, 0.0468064546585083, -0.03010897897183895, -0.009453308768570423, 0.010752719826996326, 0.005468118004500866, -0.08877290040254593, -0.0037409691140055656, -0.0323854461312294, ...
Snowflake/snowflake-arctic-embed-m
conftest.py
conftest.py
consecutive_lines
lines_41-90
41
90
"test_beam_constraints", "test_configuration_utils", "test_data_collator", "test_trainer_callback", "test_trainer_utils", "test_feature_extraction", "test_image_processing", "test_image_processor", "test_image_transforms", "test_optimization", "test_retrieval", "test_config", "test_from_pretrained_no_checkpoint", "test_keep_in_fp32_modules", "test_gradient_checkpointing_backward_compatibility", "test_gradient_checkpointing_enable_disable", "test_torch_save_load", "test_forward_signature", "test_model_get_set_embeddings", "test_model_main_input_name", "test_correct_missing_keys", "test_can_use_safetensors", "test_load_save_without_tied_weights", "test_tied_weights_keys", "test_model_weights_reload_no_missing_tied_weights", "test_can_load_ignoring_mismatched_shapes", "test_model_is_small", "ModelTest::test_pipeline_", # None of the pipeline tests from PipelineTesterMixin (of which XxxModelTest inherits from) are running on device "ModelTester::test_pipeline_", "/repo_utils/", "/utils/", } # allow having multiple repository checkouts and not needing to remember to rerun # `pip install -e '.[dev]'` when switching between checkouts and running tests. git_repo_path = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def pytest_configure(config): config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested") config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment") config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate") config.addinivalue_line("markers", "not_device_test: mark the tests always running on cpu") config.addinivalue_line("markers", "torch_compile_test: mark test which tests torch compile functionality") config.addinivalue_line("markers", "torch_export_test: mark test which tests torch export functionality")
null
[ -0.03938659280538559, -0.005469099618494511, 0.00622708210721612, 0.004732238128781319, -0.0029964945279061794, 0.041295234113931656, -0.04239056259393692, 0.0011579382698982954, 0.004169244784861803, 0.0023165540769696236, -0.05699987709522247, -0.00005019697346142493, -0.0304555781185627, ...
Snowflake/snowflake-arctic-embed-m
conftest.py
conftest.py
consecutive_lines
lines_81-130
81
130
warnings.simplefilter(action="ignore", category=FutureWarning) def pytest_configure(config): config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested") config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment") config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate") config.addinivalue_line("markers", "not_device_test: mark the tests always running on cpu") config.addinivalue_line("markers", "torch_compile_test: mark test which tests torch compile functionality") config.addinivalue_line("markers", "torch_export_test: mark test which tests torch export functionality") config.addinivalue_line("markers", "flash_attn_test: mark test which tests flash attention functionality") config.addinivalue_line("markers", "flash_attn_3_test: mark test which tests flash attention 3 functionality") config.addinivalue_line("markers", "training_ci: mark test for training CI validation") os.environ["DISABLE_SAFETENSORS_CONVERSION"] = "true" def pytest_collection_modifyitems(items): for item in items: if any(test_name in item.nodeid for test_name in NOT_DEVICE_TESTS): item.add_marker(pytest.mark.not_device_test) def pytest_addoption(parser): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(parser) def pytest_terminal_summary(terminalreporter): from transformers.testing_utils import pytest_terminal_summary_main make_reports = terminalreporter.config.getoption("--make-reports") if make_reports: pytest_terminal_summary_main(terminalreporter, id=make_reports) def pytest_sessionfinish(session, exitstatus): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: session.exitstatus = 0 # Doctest custom flag to ignore output. IGNORE_RESULT = doctest.register_optionflag("IGNORE_RESULT") OutputChecker = doctest.OutputChecker class CustomOutputChecker(OutputChecker):
null
[ -0.008784526027739048, 0.0026440047658979893, -0.007121329195797443, 0.002730834996327758, 0.0043590981513261795, 0.01764943078160286, -0.037327203899621964, 0.020638270303606987, 0.026348697021603584, 0.012500341981649399, -0.04141455143690109, 0.02316303551197052, -0.013417514972388744, ...
Snowflake/snowflake-arctic-embed-m
conftest.py
conftest.py
consecutive_lines
lines_121-152
121
152
session.exitstatus = 0 # Doctest custom flag to ignore output. IGNORE_RESULT = doctest.register_optionflag("IGNORE_RESULT") OutputChecker = doctest.OutputChecker class CustomOutputChecker(OutputChecker): def check_output(self, want, got, optionflags): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self, want, got, optionflags) doctest.OutputChecker = CustomOutputChecker _pytest.doctest.DoctestModule = HfDoctestModule doctest.DocTestParser = HfDocTestParser if is_torch_available(): # The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True. # We set it to `False` for CI. See https://github.com/pytorch/pytorch/issues/157274#issuecomment-3090791615 enable_tf32(False) # patch `torch.compile`: if `TORCH_COMPILE_FORCE_FULLGRAPH=1` (or values considered as true, e.g. yes, y, etc.), # the patched version will always run with `fullgraph=True`. patch_torch_compile_force_graph() if os.environ.get("PATCH_TESTING_METHODS_TO_COLLECT_OUTPUTS", "").lower() in ("yes", "true", "on", "y", "1"): patch_testing_methods_to_collect_info()
null
[ -0.015207345597445965, -0.02301223762333393, 0.026528535410761833, 0.05873050540685654, -0.023370616137981415, -0.007472406141459942, -0.04254474490880966, 0.005331215914338827, 0.04482090845704079, -0.03774423524737358, -0.0482192300260067, 0.04052731767296791, 0.005130629520863295, 0.065...
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
function
deps_list
172
173
def deps_list(*pkgs): return [deps[pkg] for pkg in pkgs]
null
[ -0.0494125671684742, 0.05173264443874359, -0.001569700543768704, 0.010296719148755074, -0.0363311693072319, 0.039914052933454514, -0.06757320463657379, -0.05152059718966484, -0.023872748017311096, 0.020926237106323242, -0.05433335527777672, 0.003762018634006381, -0.07259820401668549, 0.018...
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
class
DepsTableUpdateCommand
277
312
class DepsTableUpdateCommand(Command): """ A custom distutils command that updates the dependency table. usage: python setup.py deps_table_update """ description = "build runtime dependency table" user_options = [ # format: (long option, short option, description). ("dep-table-update", None, "updates src/transformers/dependency_versions_table.py"), ] def initialize_options(self): pass def finalize_options(self): pass def run(self): if SUPPORTED_PYTHON_VERSIONS[0] != PYTHON_MINOR_VERSION: print(f"Table updated only when running 3.{SUPPORTED_PYTHON_VERSIONS[0]}.x") return entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()]) content = [ "# THIS FILE HAS BEEN AUTOGENERATED. To update:", "# 1. modify the `_deps` dict in setup.py", "# 2. run `make fix-repo``", "deps = {", entries, "}", "", ] target = "src/transformers/dependency_versions_table.py" with open(target, "w", encoding="utf-8", newline="\n") as f: f.write("\n".join(content))
A custom distutils command that updates the dependency table. usage: python setup.py deps_table_update
[ 0.0003546095686033368, 0.03550786152482033, 0.01892395131289959, -0.0034275336656719446, 0.00503761600703001, 0.05853080004453659, -0.04706244915723801, 0.0046277279034256935, 0.0164207573980093, -0.02841908670961857, -0.06216130778193474, 0.03282711282372475, -0.05664120987057686, 0.03374...
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
function
initialize_options
289
290
def initialize_options(self): pass
null
[ -0.024165118113160133, 0.021849283948540688, 0.023639781400561333, 0.0015931295929476619, -0.0032167902681976557, 0.04274918511509895, -0.021249718964099884, -0.02620035596191883, -0.006917029153555632, -0.012468605302274227, -0.0592234767973423, 0.010160677134990692, -0.05194012075662613, ...
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
function
finalize_options
292
293
def finalize_options(self): pass
null
[ -0.022006697952747345, 0.023513589054346085, 0.05579311400651932, -0.0070504131726920605, -0.01680196076631546, 0.022442953661084175, -0.012919694185256958, -0.02578042261302471, -0.009584043174982071, -0.009190605022013187, -0.058432772755622864, 0.0038528284057974815, -0.03867136314511299,...
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
function
run
295
312
def run(self): if SUPPORTED_PYTHON_VERSIONS[0] != PYTHON_MINOR_VERSION: print(f"Table updated only when running 3.{SUPPORTED_PYTHON_VERSIONS[0]}.x") return entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()]) content = [ "# THIS FILE HAS BEEN AUTOGENERATED. To update:", "# 1. modify the `_deps` dict in setup.py", "# 2. run `make fix-repo``", "deps = {", entries, "}", "", ] target = "src/transformers/dependency_versions_table.py" with open(target, "w", encoding="utf-8", newline="\n") as f: f.write("\n".join(content))
null
[ 0.03488852456212044, 0.017486775293946266, 0.039844974875450134, -0.013792885467410088, 0.0030936356633901596, 0.0674070492386818, -0.05074533820152283, 0.009066112339496613, -0.01284620352089405, -0.003205622313544154, -0.028680840507149696, 0.03235523775219917, -0.05674942955374718, 0.05...
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
consecutive_lines
lines_1-50
1
50
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/main/setup.py To create the package for pypi. 1. Create the release branch named: v<RELEASE>-release, for example v4.19-release. For a patch release checkout the current release branch. If releasing on a special branch, copy the updated README.md on the main branch for the commit you will make for the post-release and run `make fix-repo` on the main branch as well. 2. Run `make pre-release` (or `make pre-patch` for a patch release) and commit these changes with the message: "Release: <VERSION>" and push. 3. Go back to the main branch and run `make post-release` then `make fix-repo`. Commit these changes with the message "v<NEXT_VERSION>.dev.0" and push to main. # If you were just cutting the branch in preparation for a release, you can stop here for now. 4. Wait for the tests on the release branch to be completed and be green (otherwise revert and fix bugs) 5. On the release branch, add a tag in git to mark the release: "git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi' " Push the tag to git: git push --tags origin v<RELEASE>-release 6. Have a core maintainer review and approve the deployment to pypi. """ import re import shutil import sys from pathlib import Path from setuptools import Command, find_packages, setup # Supported Python version range (min, max)
null
[ -0.021190518513321877, -0.01070372760295868, 0.00047013198491185904, -0.01109621673822403, 0.019207606092095375, 0.06440595537424088, 0.009705429896712303, 0.019465047866106033, 0.016532836481928825, -0.0153293302282691, -0.0636087954044342, -0.003959989175200462, -0.02864387445151806, -0....
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
consecutive_lines
lines_41-90
41
90
import re import shutil import sys from pathlib import Path from setuptools import Command, find_packages, setup # Supported Python version range (min, max) SUPPORTED_PYTHON_VERSIONS = (10, 14) # 3.10 to 3.14 PYTHON_MINOR_VERSION = sys.version_info.minor # Remove stale transformers.egg-info directory to avoid https://github.com/pypa/pip/issues/5466 stale_egg_info = Path(__file__).parent / "transformers.egg-info" if stale_egg_info.exists(): print( f"Warning: {stale_egg_info} exists.\n\n" "If you recently updated transformers to 3.0 or later, this is expected,\n" "but it may prevent transformers from installing in editable mode.\n\n" "This directory is automatically generated by Python's packaging tools.\n" "I will remove it now.\n\n" "See https://github.com/pypa/pip/issues/5466 for details.\n" ) shutil.rmtree(stale_egg_info) # IMPORTANT: # 1. all dependencies should be listed here with their version requirements if any # 2. once modified, run: `make fix-repo` to update src/transformers/dependency_versions_table.py _deps = [ "Pillow>=10.0.1,<=15.0", "accelerate>=1.1.0", "av", "beautifulsoup4", "blobfile", "codecarbon>=2.8.1", "datasets>=2.15.0", # We need either this pin or pyarrow<21.0.0 "deepspeed>=0.9.3", "diffusers", "dill<0.3.5", "evaluate>=0.4.6", "faiss-cpu", "fastapi", "filelock", "fugashi>=1.0", "GitPython<3.1.19", "hf-doc-builder>=0.3.0", "huggingface-hub>=1.3.0,<2.0",
null
[ 0.040829550474882126, -0.011246595531702042, 0.04940060153603554, -0.020002515986561775, 0.010064659640192986, 0.05196117237210274, -0.02557339146733284, 0.02126607671380043, -0.00658151600509882, -0.016165943816304207, -0.038523949682712555, 0.014755217358469963, -0.028853993862867355, -0...
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
consecutive_lines
lines_81-130
81
130
"diffusers", "dill<0.3.5", "evaluate>=0.4.6", "faiss-cpu", "fastapi", "filelock", "fugashi>=1.0", "GitPython<3.1.19", "hf-doc-builder>=0.3.0", "huggingface-hub>=1.3.0,<2.0", "importlib_metadata", "ipadic>=1.0.0,<2.0", "jinja2>=3.1.0", "jmespath>=1.0.1", "kenlm", "kernels>=0.10.2,<0.11", "librosa", "mistral-common[image]>=1.8.8", "nltk<=3.8.1", "num2words", "numpy>=1.17", "openai>=1.98.0", "opencv-python", "optimum-benchmark>=0.3.0", "optuna", "pandas<2.3.0", # `datasets` requires `pandas` while `pandas==2.3.0` has issues with CircleCI on 2025/06/05 "packaging>=20.0", "parameterized>=0.9", # older version of parameterized cause pytest collection to fail on .expand "peft>=0.18.0", "phonemizer", "protobuf", "psutil", "pyyaml>=5.1", "pydantic>=2", "pytest>=7.2.0,<9.0.0", "pytest-asyncio>=1.2.0", "pytest-random-order", "pytest-rerunfailures<16.0", "pytest-timeout", "pytest-env", "pytest-xdist", "pytest-order", "python>=3.10.0", "regex!=2019.12.17", "rhoknp>=1.1.0,<1.3.1", "rjieba", "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff==0.14.10", # `sacrebleu` not used in `transformers`. However, it is needed in several tests, when a test calls # `evaluate.load("sacrebleu")`. This metric is used in the examples that we use to test the `Trainer` with, in the
null
[ -0.034407664090394974, 0.0139927938580513, 0.03974436968564987, 0.03343360498547554, 0.0013277073157951236, 0.0538610965013504, 0.005976351443678141, 0.0020637623965740204, 0.013219726271927357, -0.0092038968577981, -0.06456366181373596, -0.02924901247024536, -0.0444999523460865, 0.0234994...
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
consecutive_lines
lines_121-170
121
170
"pytest-xdist", "pytest-order", "python>=3.10.0", "regex!=2019.12.17", "rhoknp>=1.1.0,<1.3.1", "rjieba", "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff==0.14.10", # `sacrebleu` not used in `transformers`. However, it is needed in several tests, when a test calls # `evaluate.load("sacrebleu")`. This metric is used in the examples that we use to test the `Trainer` with, in the # `Trainer` tests (see references to `run_translation.py`). "sacrebleu>=1.4.12,<2.0.0", "sacremoses", "safetensors>=0.4.3", "sagemaker>=2.31.0", "schedulefree>=1.2.6", "scikit-learn", "scipy", "sentencepiece>=0.1.91,!=0.1.92", "starlette", "sudachipy>=0.6.6", "sudachidict_core>=20220729", "tensorboard", "timeout-decorator", "tiktoken", "timm>=1.0.23", "tokenizers>=0.22.0,<=0.23.0", "torch>=2.4", "torchaudio", "torchvision", "pyctcdecode>=0.4.0", "tqdm>=4.27", "typer-slim", "unidic>=1.0.2", "unidic_lite>=1.0.7", "urllib3<2.0.0", "uvicorn", "pytest-rich", "libcst", "rich", "ray[tune]>=2.7.0", "opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk", ] # This is a lookup table with items like: {"tokenizers": "tokenizers==0.9.4", "packaging": "packaging"}, i.e. # some of the values are versioned whereas others aren't. deps = {b: a for a, b in (re.findall(r"^(([^!=<>~ ]+)(?:[!=<>~ ].*)?$)", x)[0] for x in _deps)}
null
[ -0.06943200528621674, -0.03045819140970707, 0.005081091541796923, -0.004298980347812176, -0.0034129968844354153, 0.06551002711057663, -0.03538213297724724, -0.009902535937726498, 0.011264191940426826, -0.005174879450351, -0.05344909429550171, -0.0059850383549928665, -0.050699371844530106, ...
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
consecutive_lines
lines_161-210
161
210
"ray[tune]>=2.7.0", "opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk", ] # This is a lookup table with items like: {"tokenizers": "tokenizers==0.9.4", "packaging": "packaging"}, i.e. # some of the values are versioned whereas others aren't. deps = {b: a for a, b in (re.findall(r"^(([^!=<>~ ]+)(?:[!=<>~ ].*)?$)", x)[0] for x in _deps)} def deps_list(*pkgs): return [deps[pkg] for pkg in pkgs] extras = {} extras["torch"] = deps_list("torch", "accelerate") extras["vision"] = deps_list("torchvision", "Pillow") extras["audio"] = deps_list("torchaudio", "librosa", "pyctcdecode", "phonemizer") if PYTHON_MINOR_VERSION < 13: extras["audio"] += deps_list("kenlm") extras["video"] = deps_list("av") extras["timm"] = deps_list("timm") extras["quality"] = deps_list("datasets", "ruff", "GitPython", "urllib3", "libcst", "rich") extras["kernels"] = deps_list("kernels") extras["sentencepiece"] = deps_list("sentencepiece", "protobuf") extras["tiktoken"] = deps_list("tiktoken", "blobfile") if PYTHON_MINOR_VERSION < 14: extras["mistral-common"] = deps_list("mistral-common[image]") extras["chat_template"] = deps_list("jinja2", "jmespath") extras["sklearn"] = deps_list("scikit-learn") extras["accelerate"] = deps_list("accelerate") extras["retrieval"] = deps_list("faiss-cpu", "datasets") extras["sagemaker"] = deps_list("sagemaker") extras["deepspeed"] = deps_list("deepspeed", "accelerate") extras["optuna"] = deps_list("optuna") extras["integrations"] = deps_list("kernels", "optuna", "codecarbon") if PYTHON_MINOR_VERSION < 14: extras["ray"] = deps_list("ray[tune]") extras["integrations"] += extras["ray"] extras["codecarbon"] = deps_list("codecarbon") extras["serving"] = deps_list("openai", "pydantic", "uvicorn", "fastapi", "starlette", "rich") + extras["torch"] extras["num2words"] = deps_list("num2words") extras["benchmark"] = deps_list("optimum-benchmark") extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic", "rhoknp") if PYTHON_MINOR_VERSION < 14: extras["ja"] += deps_list("sudachipy", "sudachidict_core") # OpenTelemetry dependencies for metrics collection in continuous batching extras["open-telemetry"] = deps_list("opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk")
null
[ -0.026470033451914787, 0.032014377415180206, 0.026803143322467804, -0.016841517761349678, -0.006080410908907652, 0.06403461843729019, -0.05822994187474251, -0.0022063199430704117, 0.021291326731443405, 0.009912814013659954, -0.046166256070137024, -0.009303241968154907, -0.06119201332330704, ...
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
consecutive_lines
lines_201-250
201
250
extras["integrations"] += extras["ray"] extras["codecarbon"] = deps_list("codecarbon") extras["serving"] = deps_list("openai", "pydantic", "uvicorn", "fastapi", "starlette", "rich") + extras["torch"] extras["num2words"] = deps_list("num2words") extras["benchmark"] = deps_list("optimum-benchmark") extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic", "rhoknp") if PYTHON_MINOR_VERSION < 14: extras["ja"] += deps_list("sudachipy", "sudachidict_core") # OpenTelemetry dependencies for metrics collection in continuous batching extras["open-telemetry"] = deps_list("opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk") extras["testing"] = ( deps_list( "pytest", "pytest-asyncio", "pytest-random-order", "pytest-rich", "pytest-xdist", "pytest-order", "pytest-rerunfailures", "pytest-timeout", "pytest-env", "timeout-decorator", "parameterized", "psutil", "dill", "evaluate", "rouge-score", "nltk", "sacremoses", "rjieba", "beautifulsoup4", "tensorboard", "sacrebleu", # needed in trainer tests, see references to `run_translation.py` "filelock", # filesystem locks, e.g., to prevent parallel downloads ) + extras["quality"] + extras["retrieval"] + extras["sentencepiece"] + extras["serving"] ) if PYTHON_MINOR_VERSION < 14: extras["testing"] += extras["mistral-common"] extras["deepspeed-testing"] = extras["deepspeed"] + extras["testing"] + extras["optuna"] + extras["sentencepiece"] extras["all"] = ( extras["torch"] + extras["vision"] + extras["audio"] + extras["video"]
null
[ -0.02883412502706051, 0.030369536951184273, 0.005553541239351034, 0.006687094923108816, -0.010925378650426865, 0.0567903108894825, -0.035264648497104645, -0.0004146939900238067, -0.025301771238446236, 0.004842041991651058, -0.052424076944589615, -0.014127975329756737, -0.05733286589384079, ...
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
consecutive_lines
lines_241-290
241
290
) if PYTHON_MINOR_VERSION < 14: extras["testing"] += extras["mistral-common"] extras["deepspeed-testing"] = extras["deepspeed"] + extras["testing"] + extras["optuna"] + extras["sentencepiece"] extras["all"] = ( extras["torch"] + extras["vision"] + extras["audio"] + extras["video"] + extras["kernels"] + extras["timm"] + extras["sentencepiece"] + extras["tiktoken"] + extras["chat_template"] + extras["num2words"] ) if PYTHON_MINOR_VERSION < 14: extras["all"] += extras["mistral-common"] extras["dev"] = extras["all"] + extras["testing"] + extras["ja"] + extras["sklearn"] # Those define the hard dependencies of `transformers` install_requires = [ deps["huggingface-hub"], deps["numpy"], deps["packaging"], # utilities from PyPA to e.g., compare versions deps["pyyaml"], # used for the model cards metadata deps["regex"], # for OpenAI GPT deps["tokenizers"], deps["typer-slim"], # CLI utilities. In practice, already a dependency of huggingface_hub but we use it as well deps["safetensors"], deps["tqdm"], # progress bars in model download and training scripts ] class DepsTableUpdateCommand(Command): """ A custom distutils command that updates the dependency table. usage: python setup.py deps_table_update """ description = "build runtime dependency table" user_options = [ # format: (long option, short option, description). ("dep-table-update", None, "updates src/transformers/dependency_versions_table.py"), ] def initialize_options(self): pass
null
[ -0.003867183346301317, 0.010774005204439163, 0.02821751870214939, -0.033399950712919235, -0.02579066902399063, 0.042633116245269775, -0.033125102519989014, 0.004837960936129093, -0.0048879096284508705, -0.020743921399116516, -0.0545378252863884, 0.0238000750541687, -0.03694702312350273, 0....
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
consecutive_lines
lines_281-330
281
330
""" description = "build runtime dependency table" user_options = [ # format: (long option, short option, description). ("dep-table-update", None, "updates src/transformers/dependency_versions_table.py"), ] def initialize_options(self): pass def finalize_options(self): pass def run(self): if SUPPORTED_PYTHON_VERSIONS[0] != PYTHON_MINOR_VERSION: print(f"Table updated only when running 3.{SUPPORTED_PYTHON_VERSIONS[0]}.x") return entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()]) content = [ "# THIS FILE HAS BEEN AUTOGENERATED. To update:", "# 1. modify the `_deps` dict in setup.py", "# 2. run `make fix-repo``", "deps = {", entries, "}", "", ] target = "src/transformers/dependency_versions_table.py" with open(target, "w", encoding="utf-8", newline="\n") as f: f.write("\n".join(content)) if __name__ == "__main__": # Generate python_requires from supported version range min_version, max_version = SUPPORTED_PYTHON_VERSIONS python_requires = f">=3.{min_version}.0" # Generate Python version classifiers dynamically python_classifiers = ["Programming Language :: Python :: 3"] for minor in range(min_version, max_version + 1): python_classifiers.append(f"Programming Language :: Python :: 3.{minor}") setup( name="transformers", version="5.0.1.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/transformers/graphs/contributors)", author_email="transformers@huggingface.co", description="Transformers: the model-definition framework for state-of-the-art machine learning models in text, vision, audio, and multimodal models, for both inference and training.",
null
[ -0.01886608451604843, 0.016481587663292885, 0.026901600882411003, -0.023684898391366005, 0.011880775913596153, 0.055174730718135834, -0.057990673929452896, -0.006224957760423422, 0.021271375939249992, -0.023626726120710373, -0.07416518777608871, 0.03566010668873787, -0.0507105253636837, 0....
Snowflake/snowflake-arctic-embed-m
setup.py
setup.py
consecutive_lines
lines_321-357
321
357
python_classifiers = ["Programming Language :: Python :: 3"] for minor in range(min_version, max_version + 1): python_classifiers.append(f"Programming Language :: Python :: 3.{minor}") setup( name="transformers", version="5.0.1.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/transformers/graphs/contributors)", author_email="transformers@huggingface.co", description="Transformers: the model-definition framework for state-of-the-art machine learning models in text, vision, audio, and multimodal models, for both inference and training.", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="machine-learning nlp python pytorch transformer llm vlm deep-learning inference training model-hub pretrained-models llama gemma qwen", license="Apache 2.0 License", url="https://github.com/huggingface/transformers", package_dir={"": "src"}, packages=find_packages("src"), include_package_data=True, package_data={"": ["**/*.cu", "**/*.cpp", "**/*.cuh", "**/*.h", "**/*.pyx", "py.typed"]}, zip_safe=False, extras_require=extras, entry_points={"console_scripts": ["transformers=transformers.cli.transformers:main"]}, python_requires=python_requires, install_requires=list(install_requires), classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "Operating System :: OS Independent", ] + python_classifiers + [ "Topic :: Scientific/Engineering :: Artificial Intelligence", ], cmdclass={"deps_table_update": DepsTableUpdateCommand}, )
null
[ 0.02460513450205326, -0.021483566612005234, 0.016735754907131195, -0.018360253423452377, -0.008298270404338837, 0.05959960073232651, -0.011417793110013008, -0.001943541574291885, -0.020660359412431717, -0.004528346937149763, -0.03553372621536255, 0.015644704923033714, -0.057541489601135254, ...
Snowflake/snowflake-arctic-embed-m
benchmark.py
benchmark/benchmark.py
function
checkout_commit
43
57
def checkout_commit(repo: Repo, commit_id: str): """ Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). commit_id (`str`): The commit reference to checkout inside the context manager. """ current_head = repo.head.commit if repo.head.is_detached else repo.head.ref try: repo.git.checkout(commit_id) yield finally: repo.git.checkout(current_head)
Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). commit_id (`str`): The commit reference to checkout inside the context manager.
[ -0.009754015132784843, -0.023715849965810776, -0.020338615402579308, 0.007231442723423243, -0.02734859474003315, 0.022617282345891, -0.02306985855102539, -0.0014686277136206627, 0.011301232501864433, 0.018442431464791298, -0.02470654435455799, 0.0325886569917202, 0.022480720654129982, 0.11...
Snowflake/snowflake-arctic-embed-m
benchmark.py
benchmark/benchmark.py
function
summarize
60
146
def summarize(run_dir, metrics, expand_metrics=False): """Produce a summary for each optimum-benchmark launched job's output directory found in `run_dir`. Each summary's format is as follows (for `expand_metrics=False`): ``` { "model": "google/gemma-2b", "commit": "3cd6ed22e4d49219f300f5055e71e3929aba20d7", "config": "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5", "metrics": { "decode.latency.mean": 1.624666809082031, "per_token.latency.mean": 0.012843788806628804, "per_token.throughput.value": 77.85864553330948 } } ``` """ reports = glob.glob(os.path.join(run_dir, "**/benchmark_report.json"), recursive=True) report_dirs = [str(Path(report).parent) for report in reports] summaries = [] for report_dir in report_dirs: commit = re.search(r"/commit=([^/]+)", report_dir).groups()[0] if not os.path.isfile(os.path.join(report_dir, "benchmark.json")): continue benchmark = Benchmark.from_json(os.path.join(report_dir, "benchmark.json")) report = benchmark.report model = benchmark.config.backend["model"] # This looks like `benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5`. # (we rely on the usage of hydra's `${hydra.job.override_dirname}`.) benchmark_name = re.sub(f"backend.model={model},*", "", report_dir) benchmark_name = str(Path(benchmark_name).parts[-1]) if benchmark_name.startswith("commit="): benchmark_name = benchmark.config.name metrics_values = {} # post-processing of report: show a few selected/important metric for metric in metrics: keys = metric.split(".") value = report.to_dict() current = metrics_values for key in keys: # Avoid KeyError when a user's specified metric has typo. # TODO: Give warnings. if key not in value: continue value = value[key] if expand_metrics: if isinstance(value, dict): if key not in current: current[key] = {} current = current[key] else: current[key] = value if not expand_metrics: metrics_values[metric] = value # show some config information print(f"model: {model}") print(f"commit: {commit}") print(f"config: {benchmark_name}") if len(metrics_values) > 0: print("metrics:") if expand_metrics: print(metrics_values) else: for metric, value in metrics_values.items(): print(f" - {metric}: {value}") print("-" * 80) summary = { "model": model, "commit": commit, "config": benchmark_name, "metrics": metrics_values, } summaries.append(summary) with open(os.path.join(report_dir, "summary.json"), "w") as fp: json.dump(summary, fp, indent=4) return summaries
Produce a summary for each optimum-benchmark launched job's output directory found in `run_dir`. Each summary's format is as follows (for `expand_metrics=False`): ``` { "model": "google/gemma-2b", "commit": "3cd6ed22e4d49219f300f5055e71e3929aba20d7", "config": "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5", "metrics": { "decode.latency.mean": 1.624666809082031, "per_token.latency.mean": 0.012843788806628804, "per_token.throughput.value": 77.85864553330948 } } ```
[ 0.002560693072155118, 0.02421804890036583, -0.015506088733673096, -0.02109089121222496, -0.014389337971806526, 0.05925861373543739, -0.07285057753324509, -0.022945398464798927, 0.011294636875391006, -0.01962871663272381, -0.01005898043513298, 0.0019913522992283106, -0.031007330864667892, 0...
Snowflake/snowflake-arctic-embed-m
benchmark.py
benchmark/benchmark.py
function
combine_summaries
149
195
def combine_summaries(summaries): """Combine a list of summary obtained from the function `summarize`. The combined summary's format is as follows: ``` "google/gemma-2b": { "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.624666809082031} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": {"decode.latency.mean": 1.6278163452148438} } }, "benchmark.input_shapes.batch_size=2,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.6947791748046876} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": { "decode.latency.mean": 1.6980519409179688} } } } ``` """ combined = {} for summary in summaries: model = summary["model"] config = summary["config"] commit = summary["commit"] if model not in combined: combined[model] = {} if config not in combined[model]: combined[model][config] = {} if commit not in combined[model][config]: combined[model][config][commit] = {"metrics": summary["metrics"]} with open(os.path.join(exp_run_dir, "summary.json"), "w") as fp: json.dump(combined, fp, indent=4) print(json.dumps(combined, indent=4)) return combined
Combine a list of summary obtained from the function `summarize`. The combined summary's format is as follows: ``` "google/gemma-2b": { "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.624666809082031} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": {"decode.latency.mean": 1.6278163452148438} } }, "benchmark.input_shapes.batch_size=2,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.6947791748046876} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": { "decode.latency.mean": 1.6980519409179688} } } } ```
[ -0.0036142992321401834, 0.023074334487318993, 0.004391972906887531, -0.014860550872981548, 0.01937716454267502, 0.058210115879774094, -0.06021858751773834, 0.002744358265772462, 0.0005321243079379201, 0.0009724865667521954, -0.015606437809765339, 0.005734809208661318, -0.05541108548641205, ...
Snowflake/snowflake-arctic-embed-m
benchmark.py
benchmark/benchmark.py
function
list_str
200
201
def list_str(values): return values.split(",")
null
[ -0.006914547644555569, 0.04027152806520462, 0.025814734399318695, 0.02651987224817276, -0.011783893220126629, 0.08176975697278976, -0.021656224504113197, -0.022267144173383713, -0.03858897089958191, 0.04398009926080704, -0.043262824416160583, 0.015179529786109924, -0.06538361310958862, 0.0...
Snowflake/snowflake-arctic-embed-m
benchmark.py
benchmark/benchmark.py
consecutive_lines
lines_1-50
1
50
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Run benchmark using the `optimum-benchmark` library with some customization in `transformers`. Assume we are under `transformers` root directory: (make sure the commits are valid commits) ```bash python benchmark/benchmark.py --config-dir benchmark/config --config-name generation --commit=9b9c7f03da625b13643e99205c691fe046461724 --metrics=decode.latency.mean,per_token.latency.mean,per_token.throughput.value backend.model=google/gemma-2b benchmark.input_shapes.sequence_length=5,7 benchmark.input_shapes.batch_size=1,2 --multirun ``` """ import argparse import glob import json import os.path import re import tempfile from contextlib import contextmanager from pathlib import Path from git import Repo from huggingface_hub import HfApi from optimum_benchmark import Benchmark from optimum_benchmark_wrapper import main PATH_TO_REPO = Path(__file__).parent.parent.resolve() @contextmanager def checkout_commit(repo: Repo, commit_id: str): """ Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). commit_id (`str`): The commit reference to checkout inside the context manager. """ current_head = repo.head.commit if repo.head.is_detached else repo.head.ref
null
[ -0.0016044833464547992, -0.008274110965430737, -0.017481429502367973, -0.027536757290363312, -0.004528880584985018, 0.0560777373611927, -0.02417074143886566, -0.0034409055951982737, 0.0209346991032362, -0.013068385422229767, -0.06709206104278564, -0.010346188209950924, -0.014744529500603676,...
Snowflake/snowflake-arctic-embed-m
benchmark.py
benchmark/benchmark.py
consecutive_lines
lines_41-90
41
90
@contextmanager def checkout_commit(repo: Repo, commit_id: str): """ Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). commit_id (`str`): The commit reference to checkout inside the context manager. """ current_head = repo.head.commit if repo.head.is_detached else repo.head.ref try: repo.git.checkout(commit_id) yield finally: repo.git.checkout(current_head) def summarize(run_dir, metrics, expand_metrics=False): """Produce a summary for each optimum-benchmark launched job's output directory found in `run_dir`. Each summary's format is as follows (for `expand_metrics=False`): ``` { "model": "google/gemma-2b", "commit": "3cd6ed22e4d49219f300f5055e71e3929aba20d7", "config": "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5", "metrics": { "decode.latency.mean": 1.624666809082031, "per_token.latency.mean": 0.012843788806628804, "per_token.throughput.value": 77.85864553330948 } } ``` """ reports = glob.glob(os.path.join(run_dir, "**/benchmark_report.json"), recursive=True) report_dirs = [str(Path(report).parent) for report in reports] summaries = [] for report_dir in report_dirs: commit = re.search(r"/commit=([^/]+)", report_dir).groups()[0] if not os.path.isfile(os.path.join(report_dir, "benchmark.json")): continue benchmark = Benchmark.from_json(os.path.join(report_dir, "benchmark.json")) report = benchmark.report model = benchmark.config.backend["model"]
null
[ -0.019116414710879326, -0.005309815984219313, -0.013017912395298481, -0.0025706011801958084, -0.008477672003209591, 0.042580775916576385, -0.026571601629257202, 0.007086823228746653, 0.013712447136640549, 0.0031323260627686977, -0.02850494720041752, 0.02940072864294052, 0.010219148360192776,...
Snowflake/snowflake-arctic-embed-m
benchmark.py
benchmark/benchmark.py
consecutive_lines
lines_81-130
81
130
for report_dir in report_dirs: commit = re.search(r"/commit=([^/]+)", report_dir).groups()[0] if not os.path.isfile(os.path.join(report_dir, "benchmark.json")): continue benchmark = Benchmark.from_json(os.path.join(report_dir, "benchmark.json")) report = benchmark.report model = benchmark.config.backend["model"] # This looks like `benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5`. # (we rely on the usage of hydra's `${hydra.job.override_dirname}`.) benchmark_name = re.sub(f"backend.model={model},*", "", report_dir) benchmark_name = str(Path(benchmark_name).parts[-1]) if benchmark_name.startswith("commit="): benchmark_name = benchmark.config.name metrics_values = {} # post-processing of report: show a few selected/important metric for metric in metrics: keys = metric.split(".") value = report.to_dict() current = metrics_values for key in keys: # Avoid KeyError when a user's specified metric has typo. # TODO: Give warnings. if key not in value: continue value = value[key] if expand_metrics: if isinstance(value, dict): if key not in current: current[key] = {} current = current[key] else: current[key] = value if not expand_metrics: metrics_values[metric] = value # show some config information print(f"model: {model}") print(f"commit: {commit}") print(f"config: {benchmark_name}") if len(metrics_values) > 0: print("metrics:") if expand_metrics: print(metrics_values) else:
null
[ 0.018845152109861374, 0.031464833766222, -0.02614469639956951, 0.014954560436308384, -0.011491812765598297, 0.06476447731256485, -0.026098215952515602, -0.023622781038284302, -0.0179144199937582, 0.0098127331584692, -0.03749355673789978, 0.04966438561677933, -0.03415229544043541, 0.0621186...
Snowflake/snowflake-arctic-embed-m
benchmark.py
benchmark/benchmark.py
consecutive_lines
lines_121-170
121
170
# show some config information print(f"model: {model}") print(f"commit: {commit}") print(f"config: {benchmark_name}") if len(metrics_values) > 0: print("metrics:") if expand_metrics: print(metrics_values) else: for metric, value in metrics_values.items(): print(f" - {metric}: {value}") print("-" * 80) summary = { "model": model, "commit": commit, "config": benchmark_name, "metrics": metrics_values, } summaries.append(summary) with open(os.path.join(report_dir, "summary.json"), "w") as fp: json.dump(summary, fp, indent=4) return summaries def combine_summaries(summaries): """Combine a list of summary obtained from the function `summarize`. The combined summary's format is as follows: ``` "google/gemma-2b": { "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.624666809082031} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": {"decode.latency.mean": 1.6278163452148438} } }, "benchmark.input_shapes.batch_size=2,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.6947791748046876} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": { "decode.latency.mean": 1.6980519409179688} }
null
[ -0.008801553398370743, 0.03687671199440956, 0.003338991431519389, -0.024376748129725456, 0.021134188398718834, 0.057953186333179474, -0.04225175082683563, 0.012571552768349648, -0.0266206543892622, 0.012296398170292377, 0.003885685233399272, 0.008030529133975506, -0.06399288028478622, 0.03...
Snowflake/snowflake-arctic-embed-m
benchmark.py
benchmark/benchmark.py
consecutive_lines
lines_161-210
161
210
} }, "benchmark.input_shapes.batch_size=2,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.6947791748046876} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": { "decode.latency.mean": 1.6980519409179688} } } } ``` """ combined = {} for summary in summaries: model = summary["model"] config = summary["config"] commit = summary["commit"] if model not in combined: combined[model] = {} if config not in combined[model]: combined[model][config] = {} if commit not in combined[model][config]: combined[model][config][commit] = {"metrics": summary["metrics"]} with open(os.path.join(exp_run_dir, "summary.json"), "w") as fp: json.dump(combined, fp, indent=4) print(json.dumps(combined, indent=4)) return combined if __name__ == "__main__": def list_str(values): return values.split(",") parser = argparse.ArgumentParser() parser.add_argument("--config-dir", type=str, required=True, help="The path to the config directory.") parser.add_argument("--config-name", type=str, required=True, help="The config name.") # arguments specific to this wrapper for our own customization parser.add_argument("--ensure_empty", type=bool, default=True, help="If to create a temporary directory.") parser.add_argument(
null
[ -0.009088689461350441, 0.016749007627367973, 0.04501064866781235, 0.003189319046214223, -0.0025986176915466785, 0.07429319620132446, -0.03367904946208, -0.004785722587257624, -0.016318725422024727, 0.01849721558392048, -0.041932620108127594, -0.005562487058341503, -0.029662063345313072, -0...
Snowflake/snowflake-arctic-embed-m
benchmark.py
benchmark/benchmark.py
consecutive_lines
lines_201-250
201
250
return values.split(",") parser = argparse.ArgumentParser() parser.add_argument("--config-dir", type=str, required=True, help="The path to the config directory.") parser.add_argument("--config-name", type=str, required=True, help="The config name.") # arguments specific to this wrapper for our own customization parser.add_argument("--ensure_empty", type=bool, default=True, help="If to create a temporary directory.") parser.add_argument( "--commit", type=list_str, default="", help="Comma-separated list of branch names and/or commit sha values on which the benchmark will run. If `diff` is specified, it will run on both the current head and the `main` branch.", ) parser.add_argument("--metrics", type=str, help="The metrics to be included in the summary.") parser.add_argument("--repo_id", type=str, default=None, help="The repository to which the file will be uploaded.") parser.add_argument("--path_in_repo", type=str, default=None, help="Relative filepath in the repo.") parser.add_argument("--token", type=str, default=None, help="A valid user access token (string).") args, optimum_benchmark_args = parser.parse_known_args() repo = Repo(PATH_TO_REPO) metrics = [ "prefill.latency.mean", "prefill.throughput.value", "decode.latency.mean", "decode.throughput.value", "per_token.latency.mean", "per_token.throughput.value", ] if args.metrics is not None: metrics = args.metrics.split(",") # Get `backend.model` in a hacky way: We want to control the experiment flow manually. models = [""] for idx, arg in enumerate(optimum_benchmark_args): if arg.startswith("backend.model="): models = arg[len("backend.model=") :] models = models.split(",") break optimum_benchmark_args = [arg for arg in optimum_benchmark_args if not arg.startswith("backend.model=")] # Get the commit(s) current_head = str(repo.head.commit) if repo.head.is_detached else str(repo.head.ref) commits = [x for x in args.commit if x != ""] if len(commits) == 0: commits = [current_head]
null
[ 0.027882635593414307, 0.01531918253749609, 0.025968967005610466, 0.003253509057685733, -0.008747553452849388, 0.0906260535120964, -0.04928727075457573, 0.0032387899700552225, 0.00888626929372549, 0.0005282326601445675, -0.0016849066596478224, 0.02663387730717659, -0.01074427179992199, 0.00...
Snowflake/snowflake-arctic-embed-m
benchmark.py
benchmark/benchmark.py
consecutive_lines
lines_241-290
241
290
models = arg[len("backend.model=") :] models = models.split(",") break optimum_benchmark_args = [arg for arg in optimum_benchmark_args if not arg.startswith("backend.model=")] # Get the commit(s) current_head = str(repo.head.commit) if repo.head.is_detached else str(repo.head.ref) commits = [x for x in args.commit if x != ""] if len(commits) == 0: commits = [current_head] elif len(commits) == 1 and commits[0] == "diff": # compare to `main` commits = ["main", current_head] # Get the specified run directory run_dir_arg_idx, run_dir = -1, None sweep_dir_arg_idx, sweep_dir = -1, None for idx, arg in enumerate(optimum_benchmark_args): if arg.startswith("hydra.run.dir="): run_dir = arg[len("hydra.run.dir=") :] run_dir_arg_idx = idx elif arg.startswith("hydra.sweep.dir="): sweep_dir = arg[len("hydra.sweep.dir=") :] sweep_dir_arg_idx = idx exp_run_dir, arg_dix, arg_name = ( (sweep_dir, sweep_dir_arg_idx, "hydra.sweep.dir") if "--multirun" in optimum_benchmark_args else (run_dir, run_dir_arg_idx, "hydra.run.dir") ) # TODO: not hardcoded if exp_run_dir is None and args.ensure_empty: exp_run_dir = "_benchmark" if args.ensure_empty: os.makedirs(exp_run_dir, exist_ok=True) exp_run_dir = tempfile.mkdtemp(dir=exp_run_dir) run_summaries = [] for commit in commits: with checkout_commit(repo, commit): commit = str(repo.head.commit) commit_run_dir = exp_run_dir if exp_run_dir is not None: commit_run_dir = os.path.join(exp_run_dir, rf"commit\={commit}") print(f"Run benchmark on commit: {commit}") for model in models:
null
[ -0.018963495269417763, 0.008789286017417908, -0.014030827209353447, 0.01203567162156105, -0.03946371003985405, 0.061384789645671844, -0.03190089762210846, -0.034685052931308746, -0.0051489220932126045, 0.021518411114811897, -0.03397250175476074, 0.04129396378993988, 0.0032239684369415045, ...
Snowflake/snowflake-arctic-embed-m
benchmark.py
benchmark/benchmark.py
consecutive_lines
lines_281-324
281
324
with checkout_commit(repo, commit): commit = str(repo.head.commit) commit_run_dir = exp_run_dir if exp_run_dir is not None: commit_run_dir = os.path.join(exp_run_dir, rf"commit\={commit}") print(f"Run benchmark on commit: {commit}") for model in models: model_arg = [f"backend.model={model}"] if model != "" else [] dir_args = [] if commit_run_dir is not None: if arg_dix > -1: optimum_benchmark_args[arg_dix] = f"{arg_name}={commit_run_dir}" else: dir_args = [ f"hydra.sweep.dir={commit_run_dir}", f"hydra.run.dir={commit_run_dir}/" + "${hydra.job.override_dirname}", ] main(args.config_dir, args.config_name, model_arg + dir_args + optimum_benchmark_args) if commit_run_dir is not None: # Need to remove the `\` character summaries = summarize(commit_run_dir.replace("\\", ""), metrics) run_summaries.extend(summaries) # aggregate the information across the commits if exp_run_dir is not None: with open(os.path.join(exp_run_dir, "summaries.json"), "w") as fp: json.dump(run_summaries, fp, indent=4) combined_summary = combine_summaries(run_summaries) if args.repo_id is not None and args.path_in_repo is not None: # Upload to Hub api = HfApi() api.upload_folder( folder_path=exp_run_dir, path_in_repo=args.path_in_repo, repo_id=args.repo_id, repo_type="dataset", token=args.token, )
null
[ -0.007672429084777832, 0.013719524256885052, -0.018926125019788742, -0.01540476456284523, -0.02227802947163582, 0.051034241914749146, -0.04278167337179184, -0.019822534173727036, -0.0030236048623919487, 0.010418230667710304, -0.0370509959757328, 0.04972655326128006, -0.01825573481619358, 0...
Snowflake/snowflake-arctic-embed-m
benchmark.py
benchmark/benchmark.py
consecutive_lines
lines_321-324
321
324
repo_id=args.repo_id, repo_type="dataset", token=args.token, )
null
[ -0.010100586339831352, -0.02442842163145542, 0.011189697310328484, 0.013033409602940083, -0.024469764903187752, 0.0710294172167778, -0.023639973253011703, -0.026124492287635803, -0.022313576191663742, 0.01825130172073841, -0.054962072521448135, -0.025043148547410965, -0.017641104757785797, ...
Snowflake/snowflake-arctic-embed-m
optimum_benchmark_wrapper.py
benchmark/optimum_benchmark_wrapper.py
function
main
5
10
def main(config_dir, config_name, args): subprocess.run( ["optimum-benchmark", "--config-dir", f"{config_dir}", "--config-name", f"{config_name}"] + ["hydra/job_logging=disabled", "hydra/hydra_logging=disabled"] + args )
null
[ 0.0009669924038462341, 0.011063403449952602, 0.00720402505248785, -0.04228461533784866, -0.005709083750844002, 0.09096977114677429, -0.08167360723018646, -0.05616800859570503, -0.010058552958071232, -0.046998828649520874, -0.03389132767915726, 0.014847813174128532, -0.01333061046898365, 0....
Snowflake/snowflake-arctic-embed-m
optimum_benchmark_wrapper.py
benchmark/optimum_benchmark_wrapper.py
consecutive_lines
lines_1-20
1
20
import argparse import subprocess def main(config_dir, config_name, args): subprocess.run( ["optimum-benchmark", "--config-dir", f"{config_dir}", "--config-name", f"{config_name}"] + ["hydra/job_logging=disabled", "hydra/hydra_logging=disabled"] + args ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config-dir", type=str, required=True, help="The path to the config directory.") parser.add_argument("--config-name", type=str, required=True, help="The config name.") args, unknown = parser.parse_known_args() main(args.config_dir, args.config_name, unknown)
null
[ 0.00568745331838727, 0.019097652286291122, 0.025561274960637093, -0.04309668764472008, -0.013265259563922882, 0.11229579150676727, -0.07884515821933746, -0.06486554443836212, 0.0067429011687636375, -0.026068588718771935, -0.014078804291784763, 0.020390799269080162, -0.022213613614439964, 0...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
class
ImportModuleException
36
37
class ImportModuleException(Exception): pass
null
[ -0.05781903490424156, 0.014218623749911785, 0.004986193962395191, 0.013684508390724659, 0.02670780010521412, -0.010159513913094997, -0.0005481906700879335, -0.014442273415625095, -0.027850577607750893, 0.018465286120772362, -0.06218217685818672, -0.007638738490641117, -0.031649187207221985, ...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
class
MetricsRecorder
40
307
class MetricsRecorder: def __init__( self, connection, logger: logging.Logger, repository: str, branch: str, commit_id: str, commit_msg: str, collect_csv_data: bool = True, ): self.conn = connection self.use_database = connection is not None if self.use_database: self.conn.autocommit = True self.logger = logger self.repository = repository self.branch = branch self.commit_id = commit_id self.commit_msg = commit_msg self.collect_csv_data = collect_csv_data # For CSV export - store all data in pandas DataFrames (only if CSV collection is enabled) if self.collect_csv_data: # Initialize empty DataFrames with proper schemas self.benchmarks_df = pd.DataFrame( columns=[ "benchmark_id", "repository", "branch", "commit_id", "commit_message", "metadata", "created_at", ] ) self.device_measurements_df = pd.DataFrame( columns=["benchmark_id", "cpu_util", "mem_megabytes", "gpu_util", "gpu_mem_megabytes", "time"] ) self.model_measurements_df = pd.DataFrame( columns=[ "benchmark_id", "time", "model_load_time", "first_eager_forward_pass_time_secs", "second_eager_forward_pass_time_secs", "first_eager_generate_time_secs", "second_eager_generate_time_secs", "time_to_first_token_secs", "time_to_second_token_secs", "time_to_third_token_secs", "time_to_next_token_mean_secs", "first_compile_generate_time_secs", "second_compile_generate_time_secs", "third_compile_generate_time_secs", "fourth_compile_generate_time_secs", ] ) else: self.benchmarks_df = None self.device_measurements_df = None self.model_measurements_df = None def initialise_benchmark(self, metadata: dict[str, str]) -> str: """ Creates a new benchmark, returns the benchmark id (UUID) """ # Generate a unique UUID for this benchmark benchmark_id = str(uuid.uuid4()) if self.use_database: with self.conn.cursor() as cur: cur.execute( "INSERT INTO benchmarks (benchmark_id, repository, branch, commit_id, commit_message, metadata) VALUES (%s, %s, %s, %s, %s, %s)", (benchmark_id, self.repository, self.branch, self.commit_id, self.commit_msg, metadata), ) self.logger.debug(f"initialised benchmark #{benchmark_id}") # Store benchmark data for CSV export (if enabled) if self.collect_csv_data: # Add row to pandas DataFrame new_row = pd.DataFrame( [ { "benchmark_id": benchmark_id, "repository": self.repository, "branch": self.branch, "commit_id": self.commit_id, "commit_message": self.commit_msg, "metadata": json.dumps(metadata), "created_at": datetime.utcnow().isoformat(), } ] ) self.benchmarks_df = pd.concat([self.benchmarks_df, new_row], ignore_index=True) mode_info = [] if self.use_database: mode_info.append("database") if self.collect_csv_data: mode_info.append("CSV") mode_str = " + ".join(mode_info) if mode_info else "no storage" self.logger.debug(f"initialised benchmark #{benchmark_id} ({mode_str} mode)") return benchmark_id def collect_device_measurements(self, benchmark_id: str, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes): """ Collect device metrics, such as CPU & GPU usage. These are "static", as in you cannot pass arbitrary arguments to the function. """ # Store device measurements for CSV export (if enabled) if self.collect_csv_data: # Add row to pandas DataFrame new_row = pd.DataFrame( [ { "benchmark_id": benchmark_id, "cpu_util": cpu_util, "mem_megabytes": mem_megabytes, "gpu_util": gpu_util, "gpu_mem_megabytes": gpu_mem_megabytes, "time": datetime.utcnow().isoformat(), } ] ) self.device_measurements_df = pd.concat([self.device_measurements_df, new_row], ignore_index=True) # Store in database if available if self.use_database: with self.conn.cursor() as cur: cur.execute( "INSERT INTO device_measurements (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes) VALUES (%s, %s, %s, %s, %s)", (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes), ) self.logger.debug( f"collected device measurements for benchmark #{benchmark_id} [CPU util: {cpu_util}, mem MBs: {mem_megabytes}, GPU util: {gpu_util}, GPU mem MBs: {gpu_mem_megabytes}]" ) def collect_model_measurements(self, benchmark_id: str, measurements: dict[str, float]): # Store model measurements for CSV export (if enabled) if self.collect_csv_data: # Add row to pandas DataFrame with flattened measurements row_data = {"benchmark_id": benchmark_id, "time": datetime.utcnow().isoformat()} # Flatten the measurements dict into the row row_data.update(measurements) new_row = pd.DataFrame([row_data]) self.model_measurements_df = pd.concat([self.model_measurements_df, new_row], ignore_index=True) # Store in database if available if self.use_database: with self.conn.cursor() as cur: cur.execute( """ INSERT INTO model_measurements ( benchmark_id, measurements ) VALUES (%s, %s) """, ( benchmark_id, measurements, ), ) self.logger.debug(f"collected model measurements for benchmark #{benchmark_id}: {measurements}") def export_to_csv(self, output_dir: str = "benchmark_results"): """ Export all collected data to CSV files using pandas DataFrames """ if not self.collect_csv_data: self.logger.warning("CSV data collection is disabled - no CSV files will be generated") return if not os.path.exists(output_dir): os.makedirs(output_dir) self.logger.info(f"Created output directory: {output_dir}") timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") files_created = [] # Export using pandas DataFrames self._export_pandas_data(output_dir, timestamp, files_created) self.logger.info(f"CSV export complete! Created {len(files_created)} files in {output_dir}") def _export_pandas_data(self, output_dir: str, timestamp: str, files_created: list): """ Export CSV files using pandas DataFrames """ # Export benchmarks benchmarks_file = os.path.join(output_dir, f"benchmarks_{timestamp}.csv") self.benchmarks_df.to_csv(benchmarks_file, index=False) files_created.append(benchmarks_file) self.logger.info(f"Exported {len(self.benchmarks_df)} benchmark records to {benchmarks_file}") # Export device measurements device_file = os.path.join(output_dir, f"device_measurements_{timestamp}.csv") self.device_measurements_df.to_csv(device_file, index=False) files_created.append(device_file) self.logger.info(f"Exported {len(self.device_measurements_df)} device measurement records to {device_file}") # Export model measurements (already flattened) model_file = os.path.join(output_dir, f"model_measurements_{timestamp}.csv") self.model_measurements_df.to_csv(model_file, index=False) files_created.append(model_file) self.logger.info(f"Exported {len(self.model_measurements_df)} model measurement records to {model_file}") # Create comprehensive summary using pandas operations summary_file = os.path.join(output_dir, f"benchmark_summary_{timestamp}.csv") self._create_summary(summary_file) files_created.append(summary_file) def _create_summary(self, summary_file: str): """ Create a comprehensive summary CSV using pandas operations """ if len(self.benchmarks_df) == 0: # Create empty summary file summary_df = pd.DataFrame() summary_df.to_csv(summary_file, index=False) self.logger.info(f"Created empty benchmark summary at {summary_file}") return # Start with benchmarks as the base summary_df = self.benchmarks_df.copy() # Add model measurements (join on benchmark_id) if len(self.model_measurements_df) > 0: # Drop 'time' column from model measurements to avoid conflicts model_df = self.model_measurements_df.drop(columns=["time"], errors="ignore") summary_df = summary_df.merge(model_df, on="benchmark_id", how="left") # Calculate device measurement aggregates using pandas groupby if len(self.device_measurements_df) > 0: device_agg = ( self.device_measurements_df.groupby("benchmark_id") .agg( { "cpu_util": ["mean", "max", "std", "count"], "mem_megabytes": ["mean", "max", "std"], "gpu_util": ["mean", "max", "std"], "gpu_mem_megabytes": ["mean", "max", "std"], } ) .round(3) ) # Flatten column names device_agg.columns = [f"{col[0]}_{col[1]}" for col in device_agg.columns] device_agg = device_agg.reset_index() # Rename count column to be more descriptive if "cpu_util_count" in device_agg.columns: device_agg = device_agg.rename(columns={"cpu_util_count": "device_measurement_count"}) # Merge with summary summary_df = summary_df.merge(device_agg, on="benchmark_id", how="left") # Export the comprehensive summary summary_df.to_csv(summary_file, index=False) self.logger.info(f"Created comprehensive benchmark summary with {len(summary_df)} records at {summary_file}") def close(self): if self.use_database and self.conn: self.conn.close()
null
[ 0.008694285526871681, 0.015967058017849922, 0.005724742542952299, 0.01997293531894684, 0.027765804901719093, 0.022211536765098572, -0.03950253501534462, 0.020631322637200356, 0.019773414358496666, -0.004126252606511116, -0.016829635947942734, 0.02418738789856434, -0.023739317432045937, 0.0...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
function
__init__
41
101
def __init__( self, connection, logger: logging.Logger, repository: str, branch: str, commit_id: str, commit_msg: str, collect_csv_data: bool = True, ): self.conn = connection self.use_database = connection is not None if self.use_database: self.conn.autocommit = True self.logger = logger self.repository = repository self.branch = branch self.commit_id = commit_id self.commit_msg = commit_msg self.collect_csv_data = collect_csv_data # For CSV export - store all data in pandas DataFrames (only if CSV collection is enabled) if self.collect_csv_data: # Initialize empty DataFrames with proper schemas self.benchmarks_df = pd.DataFrame( columns=[ "benchmark_id", "repository", "branch", "commit_id", "commit_message", "metadata", "created_at", ] ) self.device_measurements_df = pd.DataFrame( columns=["benchmark_id", "cpu_util", "mem_megabytes", "gpu_util", "gpu_mem_megabytes", "time"] ) self.model_measurements_df = pd.DataFrame( columns=[ "benchmark_id", "time", "model_load_time", "first_eager_forward_pass_time_secs", "second_eager_forward_pass_time_secs", "first_eager_generate_time_secs", "second_eager_generate_time_secs", "time_to_first_token_secs", "time_to_second_token_secs", "time_to_third_token_secs", "time_to_next_token_mean_secs", "first_compile_generate_time_secs", "second_compile_generate_time_secs", "third_compile_generate_time_secs", "fourth_compile_generate_time_secs", ] ) else: self.benchmarks_df = None self.device_measurements_df = None self.model_measurements_df = None
null
[ 0.017283370718359947, 0.0068805404007434845, -0.0011268005473539233, 0.015035035088658333, 0.03065400756895542, 0.03644357621669769, -0.05171908065676689, 0.002703252015635371, 0.008114938624203205, -0.0027489298954606056, -0.027921799570322037, 0.01749047264456749, -0.02790527231991291, 0...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
function
initialise_benchmark
103
144
def initialise_benchmark(self, metadata: dict[str, str]) -> str: """ Creates a new benchmark, returns the benchmark id (UUID) """ # Generate a unique UUID for this benchmark benchmark_id = str(uuid.uuid4()) if self.use_database: with self.conn.cursor() as cur: cur.execute( "INSERT INTO benchmarks (benchmark_id, repository, branch, commit_id, commit_message, metadata) VALUES (%s, %s, %s, %s, %s, %s)", (benchmark_id, self.repository, self.branch, self.commit_id, self.commit_msg, metadata), ) self.logger.debug(f"initialised benchmark #{benchmark_id}") # Store benchmark data for CSV export (if enabled) if self.collect_csv_data: # Add row to pandas DataFrame new_row = pd.DataFrame( [ { "benchmark_id": benchmark_id, "repository": self.repository, "branch": self.branch, "commit_id": self.commit_id, "commit_message": self.commit_msg, "metadata": json.dumps(metadata), "created_at": datetime.utcnow().isoformat(), } ] ) self.benchmarks_df = pd.concat([self.benchmarks_df, new_row], ignore_index=True) mode_info = [] if self.use_database: mode_info.append("database") if self.collect_csv_data: mode_info.append("CSV") mode_str = " + ".join(mode_info) if mode_info else "no storage" self.logger.debug(f"initialised benchmark #{benchmark_id} ({mode_str} mode)") return benchmark_id
Creates a new benchmark, returns the benchmark id (UUID)
[ 0.029360275715589523, 0.013118775561451912, 0.005065503995865583, 0.03849106281995773, 0.021660974249243736, 0.05667739734053612, -0.05019465833902359, 0.025874603539705276, 0.011635730974376202, -0.019519876688718796, 0.02169799990952015, -0.027592647820711136, -0.04647168517112732, 0.048...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
function
collect_device_measurements
146
177
def collect_device_measurements(self, benchmark_id: str, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes): """ Collect device metrics, such as CPU & GPU usage. These are "static", as in you cannot pass arbitrary arguments to the function. """ # Store device measurements for CSV export (if enabled) if self.collect_csv_data: # Add row to pandas DataFrame new_row = pd.DataFrame( [ { "benchmark_id": benchmark_id, "cpu_util": cpu_util, "mem_megabytes": mem_megabytes, "gpu_util": gpu_util, "gpu_mem_megabytes": gpu_mem_megabytes, "time": datetime.utcnow().isoformat(), } ] ) self.device_measurements_df = pd.concat([self.device_measurements_df, new_row], ignore_index=True) # Store in database if available if self.use_database: with self.conn.cursor() as cur: cur.execute( "INSERT INTO device_measurements (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes) VALUES (%s, %s, %s, %s, %s)", (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes), ) self.logger.debug( f"collected device measurements for benchmark #{benchmark_id} [CPU util: {cpu_util}, mem MBs: {mem_megabytes}, GPU util: {gpu_util}, GPU mem MBs: {gpu_mem_megabytes}]" )
Collect device metrics, such as CPU & GPU usage. These are "static", as in you cannot pass arbitrary arguments to the function.
[ 0.023159004747867584, 0.0394592322409153, 0.022979486733675003, 0.037306006997823715, 0.018386706709861755, 0.02520129829645157, -0.059724777936935425, 0.013860123232007027, 0.00813355017453432, -0.006578096188604832, 0.04422362521290779, -0.031888198107481, -0.039132311940193176, 0.039687...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
function
collect_model_measurements
179
206
def collect_model_measurements(self, benchmark_id: str, measurements: dict[str, float]): # Store model measurements for CSV export (if enabled) if self.collect_csv_data: # Add row to pandas DataFrame with flattened measurements row_data = {"benchmark_id": benchmark_id, "time": datetime.utcnow().isoformat()} # Flatten the measurements dict into the row row_data.update(measurements) new_row = pd.DataFrame([row_data]) self.model_measurements_df = pd.concat([self.model_measurements_df, new_row], ignore_index=True) # Store in database if available if self.use_database: with self.conn.cursor() as cur: cur.execute( """ INSERT INTO model_measurements ( benchmark_id, measurements ) VALUES (%s, %s) """, ( benchmark_id, measurements, ), ) self.logger.debug(f"collected model measurements for benchmark #{benchmark_id}: {measurements}")
null
[ 0.04194438457489014, 0.03829462081193924, 0.023362068459391594, 0.034225545823574066, 0.02583235688507557, 0.055542778223752975, -0.04736415669322014, 0.013125963509082794, 0.003663780866190791, -0.050512947142124176, 0.05349886789917946, -0.02518615871667862, -0.04001324251294136, 0.01990...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
function
export_to_csv
208
226
def export_to_csv(self, output_dir: str = "benchmark_results"): """ Export all collected data to CSV files using pandas DataFrames """ if not self.collect_csv_data: self.logger.warning("CSV data collection is disabled - no CSV files will be generated") return if not os.path.exists(output_dir): os.makedirs(output_dir) self.logger.info(f"Created output directory: {output_dir}") timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") files_created = [] # Export using pandas DataFrames self._export_pandas_data(output_dir, timestamp, files_created) self.logger.info(f"CSV export complete! Created {len(files_created)} files in {output_dir}")
Export all collected data to CSV files using pandas DataFrames
[ 0.03275853767991066, -0.0009786784648895264, 0.024603266268968582, 0.02905936725437641, 0.036490414291620255, 0.007311932276934385, -0.08181025832891464, 0.024032030254602432, -0.023274894803762436, -0.01891208253800869, 0.002460715128108859, -0.00745326979085803, -0.03468770533800125, 0.0...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
function
_export_pandas_data
228
253
def _export_pandas_data(self, output_dir: str, timestamp: str, files_created: list): """ Export CSV files using pandas DataFrames """ # Export benchmarks benchmarks_file = os.path.join(output_dir, f"benchmarks_{timestamp}.csv") self.benchmarks_df.to_csv(benchmarks_file, index=False) files_created.append(benchmarks_file) self.logger.info(f"Exported {len(self.benchmarks_df)} benchmark records to {benchmarks_file}") # Export device measurements device_file = os.path.join(output_dir, f"device_measurements_{timestamp}.csv") self.device_measurements_df.to_csv(device_file, index=False) files_created.append(device_file) self.logger.info(f"Exported {len(self.device_measurements_df)} device measurement records to {device_file}") # Export model measurements (already flattened) model_file = os.path.join(output_dir, f"model_measurements_{timestamp}.csv") self.model_measurements_df.to_csv(model_file, index=False) files_created.append(model_file) self.logger.info(f"Exported {len(self.model_measurements_df)} model measurement records to {model_file}") # Create comprehensive summary using pandas operations summary_file = os.path.join(output_dir, f"benchmark_summary_{timestamp}.csv") self._create_summary(summary_file) files_created.append(summary_file)
Export CSV files using pandas DataFrames
[ 0.01819860376417637, 0.020771557465195656, 0.013791389763355255, 0.02383110299706459, 0.029595892876386642, 0.04015464708209038, -0.09756304323673248, 0.005118447355926037, -0.02029913105070591, -0.021057656034827232, -0.013252923265099525, -0.033098045736551285, -0.056146685034036636, 0.0...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
function
_create_summary
255
303
def _create_summary(self, summary_file: str): """ Create a comprehensive summary CSV using pandas operations """ if len(self.benchmarks_df) == 0: # Create empty summary file summary_df = pd.DataFrame() summary_df.to_csv(summary_file, index=False) self.logger.info(f"Created empty benchmark summary at {summary_file}") return # Start with benchmarks as the base summary_df = self.benchmarks_df.copy() # Add model measurements (join on benchmark_id) if len(self.model_measurements_df) > 0: # Drop 'time' column from model measurements to avoid conflicts model_df = self.model_measurements_df.drop(columns=["time"], errors="ignore") summary_df = summary_df.merge(model_df, on="benchmark_id", how="left") # Calculate device measurement aggregates using pandas groupby if len(self.device_measurements_df) > 0: device_agg = ( self.device_measurements_df.groupby("benchmark_id") .agg( { "cpu_util": ["mean", "max", "std", "count"], "mem_megabytes": ["mean", "max", "std"], "gpu_util": ["mean", "max", "std"], "gpu_mem_megabytes": ["mean", "max", "std"], } ) .round(3) ) # Flatten column names device_agg.columns = [f"{col[0]}_{col[1]}" for col in device_agg.columns] device_agg = device_agg.reset_index() # Rename count column to be more descriptive if "cpu_util_count" in device_agg.columns: device_agg = device_agg.rename(columns={"cpu_util_count": "device_measurement_count"}) # Merge with summary summary_df = summary_df.merge(device_agg, on="benchmark_id", how="left") # Export the comprehensive summary summary_df.to_csv(summary_file, index=False) self.logger.info(f"Created comprehensive benchmark summary with {len(summary_df)} records at {summary_file}")
Create a comprehensive summary CSV using pandas operations
[ 0.0010054168524220586, -0.009801075793802738, 0.010145257227122784, 0.007295302581042051, 0.02989283949136734, 0.029903437942266464, -0.062316279858350754, 0.01900031417608261, -0.01651553250849247, -0.023490000516176224, 0.022027555853128433, -0.017283733934164047, -0.05260699614882469, 0...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
function
close
305
307
def close(self): if self.use_database and self.conn: self.conn.close()
null
[ -0.012645638547837734, 0.052728816866874695, 0.01223049871623516, -0.022947821766138077, 0.034942056983709335, 0.016765335574746132, -0.025307120755314827, -0.0281510166823864, -0.011912310495972633, -0.0005703065544366837, -0.04602914676070213, 0.03912098705768585, -0.014697831124067307, ...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
function
parse_arguments
320
364
def parse_arguments() -> tuple[str, str, str, str, bool, str]: """ Parse command line arguments for the benchmarking CLI. """ parser = argparse.ArgumentParser(description="CLI for benchmarking the huggingface/transformers.") parser.add_argument( "repository", type=str, help="The repository name on which the benchmarking is performed.", ) parser.add_argument( "branch", type=str, help="The branch name on which the benchmarking is performed.", ) parser.add_argument( "commit_id", type=str, help="The commit hash on which the benchmarking is performed.", ) parser.add_argument( "commit_msg", type=str, help="The commit message associated with the commit, truncated to 70 characters.", ) parser.add_argument("--csv", action="store_true", default=False, help="Enable CSV output files generation.") parser.add_argument( "--csv-output-dir", type=str, default="benchmark_results", help="Directory for CSV output files (default: benchmark_results).", ) args = parser.parse_args() # CSV is disabled by default, only enabled when --csv is used generate_csv = args.csv return args.repository, args.branch, args.commit_id, args.commit_msg, generate_csv, args.csv_output_dir
Parse command line arguments for the benchmarking CLI.
[ 0.017497960478067398, 0.016872957348823547, 0.02130076475441456, 0.00019930617418140173, -0.027973858639597893, 0.074654720723629, -0.05193706601858139, -0.003305753692984581, 0.011043662205338478, 0.02734459936618805, -0.031255077570676804, 0.02865077555179596, -0.030622851103544235, 0.02...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
function
import_from_path
367
375
def import_from_path(module_name, file_path): try: spec = importlib.util.spec_from_file_location(module_name, file_path) module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module spec.loader.exec_module(module) return module except Exception as e: raise ImportModuleException(f"failed to load python module: {e}")
null
[ 0.04814441502094269, -0.012820378877222538, -0.04276156425476074, 0.016470514237880707, 0.01263735257089138, 0.014522255398333073, -0.057431288063526154, -0.02535339817404747, 0.011193382553756237, -0.02715764380991459, -0.05316155403852463, -0.0024706085678189993, -0.009507360868155956, 0...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
function
create_database_connection
378
394
def create_database_connection(): """ Try to create a database connection. Returns None if connection fails. """ if not PSYCOPG2_AVAILABLE: logger.warning("psycopg2 not available - running in CSV-only mode") return None try: import psycopg2 conn = psycopg2.connect("dbname=metrics") logger.info("Successfully connected to database") return conn except Exception as e: logger.warning(f"Failed to connect to database: {e}. Running in CSV-only mode") return None
Try to create a database connection. Returns None if connection fails.
[ 0.016297345981001854, 0.015240315347909927, -0.043378796428442, -0.002504634903743863, 0.04668141528964043, 0.038146212697029114, -0.016042770817875862, 0.014693251810967922, -0.0049621243961155415, -0.024941472336649895, -0.016908112913370132, -0.01611418090760708, -0.03968065604567528, 0...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
function
create_global_metrics_recorder
397
419
def create_global_metrics_recorder( repository: str, branch: str, commit_id: str, commit_msg: str, generate_csv: bool = False ) -> MetricsRecorder: """ Create a global metrics recorder that will be used across all benchmarks. """ connection = create_database_connection() recorder = MetricsRecorder(connection, logger, repository, branch, commit_id, commit_msg, generate_csv) # Log the storage mode storage_modes = [] if connection is not None: storage_modes.append("database") if generate_csv: storage_modes.append("CSV") if not storage_modes: logger.warning("Running benchmarks with NO data storage (no database connection, CSV disabled)") logger.warning("Use --csv flag to enable CSV output when database is unavailable") else: logger.info(f"Running benchmarks with: {' + '.join(storage_modes)} storage") return recorder
Create a global metrics recorder that will be used across all benchmarks.
[ -0.0062996926717460155, -0.0008421704987995327, 0.02956637553870678, 0.02909734472632408, 0.02205200493335724, 0.04791173338890076, -0.022021548822522163, 0.04983392357826233, 0.0022903375793248415, -0.02854972332715988, 0.010536757297813892, 0.00647822255268693, -0.02986852265894413, 0.06...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
consecutive_lines
lines_1-50
1
50
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import importlib.util import json import logging import os import sys import uuid from datetime import datetime import pandas as pd try: from psycopg2.extensions import register_adapter from psycopg2.extras import Json register_adapter(dict, Json) PSYCOPG2_AVAILABLE = True except ImportError: PSYCOPG2_AVAILABLE = False class ImportModuleException(Exception): pass class MetricsRecorder: def __init__( self, connection, logger: logging.Logger, repository: str, branch: str, commit_id: str, commit_msg: str, collect_csv_data: bool = True, ):
null
[ -0.021062105894088745, -0.014424330554902554, -0.0007810659590177238, -0.024477021768689156, -0.0029447246342897415, 0.06757794320583344, -0.021213069558143616, 0.0037636859342455864, -0.00036637784796766937, 0.0037746764719486237, -0.07544974982738495, -0.04524252191185951, -0.0363292954862...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
consecutive_lines
lines_41-90
41
90
def __init__( self, connection, logger: logging.Logger, repository: str, branch: str, commit_id: str, commit_msg: str, collect_csv_data: bool = True, ): self.conn = connection self.use_database = connection is not None if self.use_database: self.conn.autocommit = True self.logger = logger self.repository = repository self.branch = branch self.commit_id = commit_id self.commit_msg = commit_msg self.collect_csv_data = collect_csv_data # For CSV export - store all data in pandas DataFrames (only if CSV collection is enabled) if self.collect_csv_data: # Initialize empty DataFrames with proper schemas self.benchmarks_df = pd.DataFrame( columns=[ "benchmark_id", "repository", "branch", "commit_id", "commit_message", "metadata", "created_at", ] ) self.device_measurements_df = pd.DataFrame( columns=["benchmark_id", "cpu_util", "mem_megabytes", "gpu_util", "gpu_mem_megabytes", "time"] ) self.model_measurements_df = pd.DataFrame( columns=[ "benchmark_id", "time", "model_load_time", "first_eager_forward_pass_time_secs", "second_eager_forward_pass_time_secs", "first_eager_generate_time_secs", "second_eager_generate_time_secs", "time_to_first_token_secs", "time_to_second_token_secs", "time_to_third_token_secs",
null
[ 0.017711404711008072, 0.0041735186241567135, -0.0016314525855705142, 0.014945424161851406, 0.03183572366833687, 0.036731187254190445, -0.05946919322013855, 0.0032615431118756533, 0.004796238616108894, -0.006301076151430607, -0.027340032160282135, 0.012181258760392666, -0.02497803047299385, ...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
consecutive_lines
lines_81-130
81
130
"benchmark_id", "time", "model_load_time", "first_eager_forward_pass_time_secs", "second_eager_forward_pass_time_secs", "first_eager_generate_time_secs", "second_eager_generate_time_secs", "time_to_first_token_secs", "time_to_second_token_secs", "time_to_third_token_secs", "time_to_next_token_mean_secs", "first_compile_generate_time_secs", "second_compile_generate_time_secs", "third_compile_generate_time_secs", "fourth_compile_generate_time_secs", ] ) else: self.benchmarks_df = None self.device_measurements_df = None self.model_measurements_df = None def initialise_benchmark(self, metadata: dict[str, str]) -> str: """ Creates a new benchmark, returns the benchmark id (UUID) """ # Generate a unique UUID for this benchmark benchmark_id = str(uuid.uuid4()) if self.use_database: with self.conn.cursor() as cur: cur.execute( "INSERT INTO benchmarks (benchmark_id, repository, branch, commit_id, commit_message, metadata) VALUES (%s, %s, %s, %s, %s, %s)", (benchmark_id, self.repository, self.branch, self.commit_id, self.commit_msg, metadata), ) self.logger.debug(f"initialised benchmark #{benchmark_id}") # Store benchmark data for CSV export (if enabled) if self.collect_csv_data: # Add row to pandas DataFrame new_row = pd.DataFrame( [ { "benchmark_id": benchmark_id, "repository": self.repository, "branch": self.branch, "commit_id": self.commit_id, "commit_message": self.commit_msg, "metadata": json.dumps(metadata), "created_at": datetime.utcnow().isoformat(),
null
[ 0.014735515229403973, 0.0012112378608435392, 0.014619716443121433, 0.028658611699938774, 0.015364539809525013, 0.04803289473056793, -0.06613773107528687, 0.009837644174695015, 0.015617093071341515, -0.01457142923027277, 0.01829368621110916, -0.017079787328839302, -0.029957478865981102, 0.0...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
consecutive_lines
lines_121-170
121
170
new_row = pd.DataFrame( [ { "benchmark_id": benchmark_id, "repository": self.repository, "branch": self.branch, "commit_id": self.commit_id, "commit_message": self.commit_msg, "metadata": json.dumps(metadata), "created_at": datetime.utcnow().isoformat(), } ] ) self.benchmarks_df = pd.concat([self.benchmarks_df, new_row], ignore_index=True) mode_info = [] if self.use_database: mode_info.append("database") if self.collect_csv_data: mode_info.append("CSV") mode_str = " + ".join(mode_info) if mode_info else "no storage" self.logger.debug(f"initialised benchmark #{benchmark_id} ({mode_str} mode)") return benchmark_id def collect_device_measurements(self, benchmark_id: str, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes): """ Collect device metrics, such as CPU & GPU usage. These are "static", as in you cannot pass arbitrary arguments to the function. """ # Store device measurements for CSV export (if enabled) if self.collect_csv_data: # Add row to pandas DataFrame new_row = pd.DataFrame( [ { "benchmark_id": benchmark_id, "cpu_util": cpu_util, "mem_megabytes": mem_megabytes, "gpu_util": gpu_util, "gpu_mem_megabytes": gpu_mem_megabytes, "time": datetime.utcnow().isoformat(), } ] ) self.device_measurements_df = pd.concat([self.device_measurements_df, new_row], ignore_index=True) # Store in database if available if self.use_database: with self.conn.cursor() as cur: cur.execute(
null
[ 0.008969052694737911, 0.017428096383810043, 0.022808702662587166, 0.03946952149271965, 0.025932850316166878, 0.015641575679183006, -0.06931862235069275, 0.011581637896597385, -0.009512954391539097, -0.027197351679205894, 0.030236147344112396, -0.03771767020225525, -0.04885007441043854, 0.0...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
consecutive_lines
lines_161-210
161
210
"time": datetime.utcnow().isoformat(), } ] ) self.device_measurements_df = pd.concat([self.device_measurements_df, new_row], ignore_index=True) # Store in database if available if self.use_database: with self.conn.cursor() as cur: cur.execute( "INSERT INTO device_measurements (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes) VALUES (%s, %s, %s, %s, %s)", (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes), ) self.logger.debug( f"collected device measurements for benchmark #{benchmark_id} [CPU util: {cpu_util}, mem MBs: {mem_megabytes}, GPU util: {gpu_util}, GPU mem MBs: {gpu_mem_megabytes}]" ) def collect_model_measurements(self, benchmark_id: str, measurements: dict[str, float]): # Store model measurements for CSV export (if enabled) if self.collect_csv_data: # Add row to pandas DataFrame with flattened measurements row_data = {"benchmark_id": benchmark_id, "time": datetime.utcnow().isoformat()} # Flatten the measurements dict into the row row_data.update(measurements) new_row = pd.DataFrame([row_data]) self.model_measurements_df = pd.concat([self.model_measurements_df, new_row], ignore_index=True) # Store in database if available if self.use_database: with self.conn.cursor() as cur: cur.execute( """ INSERT INTO model_measurements ( benchmark_id, measurements ) VALUES (%s, %s) """, ( benchmark_id, measurements, ), ) self.logger.debug(f"collected model measurements for benchmark #{benchmark_id}: {measurements}") def export_to_csv(self, output_dir: str = "benchmark_results"): """ Export all collected data to CSV files using pandas DataFrames
null
[ 0.023678431287407875, 0.025894872844219208, 0.015543116256594658, 0.03440757095813751, 0.04176205024123192, 0.04361122474074364, -0.056690990924835205, 0.013381056487560272, 0.004350495059043169, -0.039729438722133636, 0.051821086555719376, -0.034484658390283585, -0.04494944587349892, 0.02...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
consecutive_lines
lines_201-250
201
250
benchmark_id, measurements, ), ) self.logger.debug(f"collected model measurements for benchmark #{benchmark_id}: {measurements}") def export_to_csv(self, output_dir: str = "benchmark_results"): """ Export all collected data to CSV files using pandas DataFrames """ if not self.collect_csv_data: self.logger.warning("CSV data collection is disabled - no CSV files will be generated") return if not os.path.exists(output_dir): os.makedirs(output_dir) self.logger.info(f"Created output directory: {output_dir}") timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") files_created = [] # Export using pandas DataFrames self._export_pandas_data(output_dir, timestamp, files_created) self.logger.info(f"CSV export complete! Created {len(files_created)} files in {output_dir}") def _export_pandas_data(self, output_dir: str, timestamp: str, files_created: list): """ Export CSV files using pandas DataFrames """ # Export benchmarks benchmarks_file = os.path.join(output_dir, f"benchmarks_{timestamp}.csv") self.benchmarks_df.to_csv(benchmarks_file, index=False) files_created.append(benchmarks_file) self.logger.info(f"Exported {len(self.benchmarks_df)} benchmark records to {benchmarks_file}") # Export device measurements device_file = os.path.join(output_dir, f"device_measurements_{timestamp}.csv") self.device_measurements_df.to_csv(device_file, index=False) files_created.append(device_file) self.logger.info(f"Exported {len(self.device_measurements_df)} device measurement records to {device_file}") # Export model measurements (already flattened) model_file = os.path.join(output_dir, f"model_measurements_{timestamp}.csv") self.model_measurements_df.to_csv(model_file, index=False) files_created.append(model_file) self.logger.info(f"Exported {len(self.model_measurements_df)} model measurement records to {model_file}") # Create comprehensive summary using pandas operations
null
[ 0.019224466755986214, 0.011724358424544334, 0.011800957843661308, 0.02257268689572811, 0.03772973641753197, 0.016905365511775017, -0.058647576719522476, 0.020533662289381027, -0.006040878593921661, -0.02098933421075344, 0.005558081436902285, -0.009285572916269302, -0.03828142583370209, 0.0...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
consecutive_lines
lines_241-290
241
290
files_created.append(device_file) self.logger.info(f"Exported {len(self.device_measurements_df)} device measurement records to {device_file}") # Export model measurements (already flattened) model_file = os.path.join(output_dir, f"model_measurements_{timestamp}.csv") self.model_measurements_df.to_csv(model_file, index=False) files_created.append(model_file) self.logger.info(f"Exported {len(self.model_measurements_df)} model measurement records to {model_file}") # Create comprehensive summary using pandas operations summary_file = os.path.join(output_dir, f"benchmark_summary_{timestamp}.csv") self._create_summary(summary_file) files_created.append(summary_file) def _create_summary(self, summary_file: str): """ Create a comprehensive summary CSV using pandas operations """ if len(self.benchmarks_df) == 0: # Create empty summary file summary_df = pd.DataFrame() summary_df.to_csv(summary_file, index=False) self.logger.info(f"Created empty benchmark summary at {summary_file}") return # Start with benchmarks as the base summary_df = self.benchmarks_df.copy() # Add model measurements (join on benchmark_id) if len(self.model_measurements_df) > 0: # Drop 'time' column from model measurements to avoid conflicts model_df = self.model_measurements_df.drop(columns=["time"], errors="ignore") summary_df = summary_df.merge(model_df, on="benchmark_id", how="left") # Calculate device measurement aggregates using pandas groupby if len(self.device_measurements_df) > 0: device_agg = ( self.device_measurements_df.groupby("benchmark_id") .agg( { "cpu_util": ["mean", "max", "std", "count"], "mem_megabytes": ["mean", "max", "std"], "gpu_util": ["mean", "max", "std"], "gpu_mem_megabytes": ["mean", "max", "std"], } ) .round(3) ) # Flatten column names
null
[ 0.014381919987499714, 0.0006920935702510178, 0.02662508562207222, 0.0028566692490130663, 0.03910576179623604, 0.02877449057996273, -0.06472790986299515, 0.006901935208588839, -0.02075066976249218, -0.034794002771377563, 0.03460955619812012, -0.02263317070901394, -0.04410409554839134, 0.044...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
consecutive_lines
lines_281-330
281
330
"cpu_util": ["mean", "max", "std", "count"], "mem_megabytes": ["mean", "max", "std"], "gpu_util": ["mean", "max", "std"], "gpu_mem_megabytes": ["mean", "max", "std"], } ) .round(3) ) # Flatten column names device_agg.columns = [f"{col[0]}_{col[1]}" for col in device_agg.columns] device_agg = device_agg.reset_index() # Rename count column to be more descriptive if "cpu_util_count" in device_agg.columns: device_agg = device_agg.rename(columns={"cpu_util_count": "device_measurement_count"}) # Merge with summary summary_df = summary_df.merge(device_agg, on="benchmark_id", how="left") # Export the comprehensive summary summary_df.to_csv(summary_file, index=False) self.logger.info(f"Created comprehensive benchmark summary with {len(summary_df)} records at {summary_file}") def close(self): if self.use_database and self.conn: self.conn.close() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.INFO) formatter = logging.Formatter("[%(levelname)s - %(asctime)s] %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) def parse_arguments() -> tuple[str, str, str, str, bool, str]: """ Parse command line arguments for the benchmarking CLI. """ parser = argparse.ArgumentParser(description="CLI for benchmarking the huggingface/transformers.") parser.add_argument( "repository", type=str, help="The repository name on which the benchmarking is performed.", )
null
[ -0.015881385654211044, -0.012778548523783684, 0.03989246115088463, -0.012133441865444183, 0.02357024885714054, 0.04867453873157501, -0.06734713912010193, -0.006498313508927822, -0.00024307081184815615, -0.004240423906594515, 0.036275990307331085, -0.03505629301071167, -0.0424962192773819, ...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
consecutive_lines
lines_321-370
321
370
""" Parse command line arguments for the benchmarking CLI. """ parser = argparse.ArgumentParser(description="CLI for benchmarking the huggingface/transformers.") parser.add_argument( "repository", type=str, help="The repository name on which the benchmarking is performed.", ) parser.add_argument( "branch", type=str, help="The branch name on which the benchmarking is performed.", ) parser.add_argument( "commit_id", type=str, help="The commit hash on which the benchmarking is performed.", ) parser.add_argument( "commit_msg", type=str, help="The commit message associated with the commit, truncated to 70 characters.", ) parser.add_argument("--csv", action="store_true", default=False, help="Enable CSV output files generation.") parser.add_argument( "--csv-output-dir", type=str, default="benchmark_results", help="Directory for CSV output files (default: benchmark_results).", ) args = parser.parse_args() # CSV is disabled by default, only enabled when --csv is used generate_csv = args.csv return args.repository, args.branch, args.commit_id, args.commit_msg, generate_csv, args.csv_output_dir def import_from_path(module_name, file_path): try: spec = importlib.util.spec_from_file_location(module_name, file_path) module = importlib.util.module_from_spec(spec)
null
[ 0.018684333190321922, -0.006143227685242891, 0.011610149405896664, 0.000216627013287507, -0.022635744884610176, 0.07033980637788773, -0.053336698561906815, -0.0027672552969306707, 0.012249325402081013, 0.008991978131234646, -0.02111753821372986, 0.009120797738432884, -0.017819462344050407, ...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
consecutive_lines
lines_361-410
361
410
# CSV is disabled by default, only enabled when --csv is used generate_csv = args.csv return args.repository, args.branch, args.commit_id, args.commit_msg, generate_csv, args.csv_output_dir def import_from_path(module_name, file_path): try: spec = importlib.util.spec_from_file_location(module_name, file_path) module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module spec.loader.exec_module(module) return module except Exception as e: raise ImportModuleException(f"failed to load python module: {e}") def create_database_connection(): """ Try to create a database connection. Returns None if connection fails. """ if not PSYCOPG2_AVAILABLE: logger.warning("psycopg2 not available - running in CSV-only mode") return None try: import psycopg2 conn = psycopg2.connect("dbname=metrics") logger.info("Successfully connected to database") return conn except Exception as e: logger.warning(f"Failed to connect to database: {e}. Running in CSV-only mode") return None def create_global_metrics_recorder( repository: str, branch: str, commit_id: str, commit_msg: str, generate_csv: bool = False ) -> MetricsRecorder: """ Create a global metrics recorder that will be used across all benchmarks. """ connection = create_database_connection() recorder = MetricsRecorder(connection, logger, repository, branch, commit_id, commit_msg, generate_csv) # Log the storage mode storage_modes = [] if connection is not None: storage_modes.append("database") if generate_csv:
null
[ 0.011723248288035393, 0.003245830535888672, -0.01472481433302164, 0.03338159993290901, 0.037017080932855606, 0.009181274101138115, -0.050886236131191254, 0.03773878142237663, 0.013408021070063114, -0.02792607992887497, -0.02083652652800083, -0.011896357871592045, -0.029307197779417038, 0.0...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
consecutive_lines
lines_401-450
401
450
Create a global metrics recorder that will be used across all benchmarks. """ connection = create_database_connection() recorder = MetricsRecorder(connection, logger, repository, branch, commit_id, commit_msg, generate_csv) # Log the storage mode storage_modes = [] if connection is not None: storage_modes.append("database") if generate_csv: storage_modes.append("CSV") if not storage_modes: logger.warning("Running benchmarks with NO data storage (no database connection, CSV disabled)") logger.warning("Use --csv flag to enable CSV output when database is unavailable") else: logger.info(f"Running benchmarks with: {' + '.join(storage_modes)} storage") return recorder if __name__ == "__main__": benchmarks_folder_path = os.path.dirname(os.path.realpath(__file__)) benches_folder_path = os.path.join(benchmarks_folder_path, "benches") repository, branch, commit_id, commit_msg, generate_csv, csv_output_dir = parse_arguments() # Create a global metrics recorder global_metrics_recorder = create_global_metrics_recorder(repository, branch, commit_id, commit_msg, generate_csv) successful_benchmarks = 0 failed_benchmarks = 0 # Automatically discover all benchmark modules in benches/ folder benchmark_modules = [] if os.path.exists(benches_folder_path): logger.debug(f"Scanning for benchmarks in: {benches_folder_path}") for entry in os.scandir(benches_folder_path): if not entry.name.endswith(".py"): continue if entry.name.startswith("__"): # Skip __init__.py, __pycache__, etc. continue # Check if the file has a run_benchmark function try: logger.debug(f"checking if benches/{entry.name} has run_benchmark function") module = import_from_path(entry.name.split(".")[0], entry.path) if hasattr(module, "run_benchmark"): benchmark_modules.append(entry.name)
null
[ 0.0007174232741817832, 0.00982474721968174, 0.018888290971517563, 0.04023450240492821, 0.02192861959338188, 0.035891998559236526, -0.028393594548106194, 0.02836161106824875, -0.016562921926379204, -0.06256970763206482, 0.01855567656457424, -0.017532823607325554, -0.02132343500852585, 0.052...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
consecutive_lines
lines_441-490
441
490
continue if entry.name.startswith("__"): # Skip __init__.py, __pycache__, etc. continue # Check if the file has a run_benchmark function try: logger.debug(f"checking if benches/{entry.name} has run_benchmark function") module = import_from_path(entry.name.split(".")[0], entry.path) if hasattr(module, "run_benchmark"): benchmark_modules.append(entry.name) logger.debug(f"discovered benchmark: {entry.name}") else: logger.debug(f"skipping {entry.name} - no run_benchmark function found") except Exception as e: logger.debug(f"failed to check benches/{entry.name}: {e}") else: logger.warning(f"Benches directory not found: {benches_folder_path}") if benchmark_modules: logger.info(f"Discovered {len(benchmark_modules)} benchmark(s): {benchmark_modules}") else: logger.warning("No benchmark modules found in benches/ directory") for module_name in benchmark_modules: module_path = os.path.join(benches_folder_path, module_name) try: logger.debug(f"loading: {module_name}") module = import_from_path(module_name.split(".")[0], module_path) logger.info(f"running benchmarks in: {module_name}") # Check if the module has an updated run_benchmark function that accepts metrics_recorder try: # Try the new signature first module.run_benchmark(logger, repository, branch, commit_id, commit_msg, global_metrics_recorder) except TypeError: # Fall back to the old signature for backward compatibility logger.warning( f"Module {module_name} using old run_benchmark signature - database connection will be created per module" ) module.run_benchmark(logger, repository, branch, commit_id, commit_msg) successful_benchmarks += 1 except ImportModuleException as e: logger.error(e) failed_benchmarks += 1 except Exception as e: logger.error(f"error running benchmarks for {module_name}: {e}") failed_benchmarks += 1 # Export CSV results at the end (if enabled)
null
[ 0.024649344384670258, -0.00603968370705843, -0.026207247748970985, 0.018985938280820847, 0.007834629155695438, 0.024220947176218033, -0.015755262225866318, -0.020868835970759392, -0.019664796069264412, -0.048938993364572525, 0.0016542276134714484, 0.0010421016486361623, -0.009333650581538677...
Snowflake/snowflake-arctic-embed-m
benchmarks_entrypoint.py
benchmark/benchmarks_entrypoint.py
consecutive_lines
lines_481-502
481
502
successful_benchmarks += 1 except ImportModuleException as e: logger.error(e) failed_benchmarks += 1 except Exception as e: logger.error(f"error running benchmarks for {module_name}: {e}") failed_benchmarks += 1 # Export CSV results at the end (if enabled) try: if generate_csv: global_metrics_recorder.export_to_csv(csv_output_dir) logger.info(f"CSV reports have been generated and saved to the {csv_output_dir} directory") else: logger.info("CSV generation disabled - no CSV files created (use --csv to enable)") logger.info(f"Benchmark run completed. Successful: {successful_benchmarks}, Failed: {failed_benchmarks}") except Exception as e: logger.error(f"Failed to export CSV results: {e}") finally: global_metrics_recorder.close()
null
[ 0.008198205381631851, 0.009278691373765469, 0.016326753422617912, -0.03309663385152817, 0.03369678184390068, 0.0037660773377865553, -0.02126617729663849, 0.011247134767472744, -0.029795199632644653, -0.05214228853583336, 0.018863264471292496, -0.005580400116741657, -0.012691563926637173, 0...
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
function
collect_metrics
52
64
def collect_metrics(benchmark_id, continue_metric_collection, metrics_recorder): p = psutil.Process(os.getpid()) while not continue_metric_collection.is_set(): with p.oneshot(): cpu_util = p.cpu_percent() mem_megabytes = p.memory_info().rss / (1024 * 1024) gpu_stats = gpustat.GPUStatCollection.new_query() gpu_util = gpu_stats[0]["utilization.gpu"] gpu_mem_megabytes = gpu_stats[0]["memory.used"] metrics_recorder.collect_device_measurements( benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes ) sleep(0.01)
null
[ 0.020633313804864883, 0.022031765431165695, 0.004411373753100634, 0.030387042090296745, 0.014625882729887962, 0.027647795155644417, -0.05545102804899216, 0.04227292165160179, -0.022372467443346977, -0.030897460877895355, 0.030104901641607285, -0.0046200137585401535, -0.013005988672375679, ...
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
function
run_benchmark
67
353
def run_benchmark( logger: Logger, repository: str, branch: str, commit_id: str, commit_msg: str, metrics_recorder=None, num_tokens_to_generate=100, ): # Check if required ML dependencies are available if not TRANSFORMERS_AVAILABLE: logger.error("Transformers and torch are required to run the LLaMA benchmark. Please install them with:") logger.error("pip install torch transformers") logger.error("Skipping LLaMA benchmark due to missing dependencies.") return continue_metric_collection = Event() metrics_thread = None model_id = "meta-llama/Llama-2-7b-hf" # If no metrics_recorder is provided, create one for backward compatibility if metrics_recorder is None: try: metrics_recorder = MetricsRecorder( psycopg2.connect("dbname=metrics"), logger, repository, branch, commit_id, commit_msg, True ) should_close_recorder = True except Exception as e: logger.error(f"Failed to create metrics recorder: {e}") return else: should_close_recorder = False try: gpu_stats = gpustat.GPUStatCollection.new_query() gpu_name = gpu_stats[0]["name"] benchmark_id = metrics_recorder.initialise_benchmark({"gpu_name": gpu_name, "model_id": model_id}) logger.info(f"running benchmark #{benchmark_id} on {gpu_name} for {model_id}") metrics_thread = Thread( target=collect_metrics, args=[benchmark_id, continue_metric_collection, metrics_recorder], ) metrics_thread.start() logger.info("started background thread to fetch device metrics") os.environ["TOKENIZERS_PARALLELISM"] = "false" # silence warnings when compiling device = "cuda" logger.info("downloading weights") # This is to avoid counting download in model load time measurement model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16) gen_config = GenerationConfig(do_sample=False, top_p=1, temperature=1) logger.info("loading model") start = perf_counter() model = AutoModelForCausalLM.from_pretrained( model_id, dtype=torch.float16, generation_config=gen_config ).eval() model.to(device) torch.cuda.synchronize() end = perf_counter() model_load_time = end - start logger.info(f"loaded model in: {model_load_time}s") tokenizer = AutoTokenizer.from_pretrained(model_id) prompt = "Why dogs are so cute?" inputs = tokenizer(prompt, return_tensors="pt").to(device) # Specify the max length (including both the prompt and the response) # When calling `generate` with `cache_implementation="static" later, this is also used to create a `StaticCache` object # with sequence length = `max_length`. The longer the more you will re-use it seq_length = inputs["input_ids"].shape[1] model.generation_config.max_length = seq_length + num_tokens_to_generate batch_size = inputs["input_ids"].shape[0] # Copied from the gpt-fast repo def multinomial_sample_one_no_sync(probs_sort): # Does multinomial sampling without a cuda synchronization q = torch.empty_like(probs_sort).exponential_(1) return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int) def logits_to_probs(logits, temperature: float = 1.0, top_k: int | None = None): logits = logits / max(temperature, 1e-5) if top_k is not None: v, _ = torch.topk(logits, min(top_k, logits.size(-1))) pivot = v.select(-1, -1).unsqueeze(-1) logits = torch.where(logits < pivot, -float("Inf"), logits) probs = torch.nn.functional.softmax(logits, dim=-1) return probs def sample(logits, temperature: float = 1.0, top_k: int | None = None): probs = logits_to_probs(logits[0, -1], temperature, top_k) idx_next = multinomial_sample_one_no_sync(probs) return idx_next, probs # First eager forward pass logger.info("running first eager forward pass") start = perf_counter() _ = model(**inputs) torch.cuda.synchronize() end = perf_counter() first_eager_fwd_pass_time = end - start logger.info(f"completed first eager forward pass in: {first_eager_fwd_pass_time}s") # Second eager forward pass (should be faster) logger.info("running second eager forward pass") start = perf_counter() _ = model(**inputs) torch.cuda.synchronize() end = perf_counter() second_eager_fwd_pass_time = end - start logger.info(f"completed second eager forward pass in: {second_eager_fwd_pass_time}s") # First eager generation logger.info("running first eager generation") start = perf_counter() output = model.generate(**inputs) torch.cuda.synchronize() end = perf_counter() first_eager_generate_time = end - start logger.info(f"completed first eager generation in: {first_eager_generate_time}s") logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") # Second eager generation (should be faster) logger.info("running second eager generation") start = perf_counter() output = model.generate(**inputs) torch.cuda.synchronize() end = perf_counter() second_eager_generate_time = end - start logger.info(f"completed second eager generation in: {second_eager_generate_time}s") logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") logger.info("running generation timing loop") input_pos = torch.arange(0, seq_length, device=device) inputs = inputs["input_ids"] start = perf_counter() with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): logits = model(inputs, position_ids=input_pos).logits next_token, probs = sample(logits, temperature=0.6, top_k=5) torch.cuda.synchronize() end = perf_counter() time_to_first_token = end - start input_pos = torch.tensor([seq_length], device=device, dtype=torch.int) next_token = next_token.clone() start = perf_counter() with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): logits = model(next_token, position_ids=input_pos).logits next_token, probs = sample(logits, temperature=0.6, top_k=5) torch.cuda.synchronize() end = perf_counter() time_to_second_token = end - start input_pos = torch.tensor([seq_length + 1], device=device, dtype=torch.int) next_token = next_token.clone() start = perf_counter() with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): logits = model(next_token, position_ids=input_pos).logits next_token, probs = sample(logits, temperature=0.6, top_k=5) torch.cuda.synchronize() end = perf_counter() time_to_third_token = end - start logger.info("running longer generation timing loop") total_time = 0 for i in range(20): input_pos = torch.tensor([seq_length + 2 + i], device=device, dtype=torch.int) next_token = next_token.clone() start = perf_counter() with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): logits = model(next_token, position_ids=input_pos).logits next_token, probs = sample(logits, temperature=0.6, top_k=5) torch.cuda.synchronize() end = perf_counter() total_time += end - start mean_time_to_next_token = total_time / 20 logger.info("running compilation benchmarks") # Now compile the model model = torch.compile(model, mode="max-autotune", fullgraph=True) # StaticCache for generation with torch.device(device): model.setup_caches(max_batch_size=batch_size, max_seq_len=seq_length + num_tokens_to_generate) input_pos = torch.arange(0, seq_length, device=device) inputs = tokenizer(prompt, return_tensors="pt").to(device)["input_ids"] logger.info("compiling model") model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16, generation_config=gen_config) model.to(device) model = torch.compile(model, mode="max-autotune", fullgraph=True) past_key_values = StaticCache( model.config, max_batch_size=batch_size, device=device, dtype=torch.float16, max_cache_len=seq_length + 128, ) # 1st call start = perf_counter() output = model.generate(**inputs, past_key_values=past_key_values) end = perf_counter() first_compile_generate_time = end - start logger.info(f"completed first compile generation in: {first_compile_generate_time}s") logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") past_key_values = StaticCache( model.config, max_batch_size=batch_size, device=device, dtype=torch.float16, max_cache_len=seq_length + 128, ) # 2nd call start = perf_counter() output = model.generate(**inputs, past_key_values=past_key_values) end = perf_counter() second_compile_generate_time = end - start logger.info(f"completed second compile generation in: {second_compile_generate_time}s") logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") past_key_values = StaticCache( model.config, max_batch_size=batch_size, device=device, dtype=torch.float16, max_cache_len=seq_length + 128, ) # 3rd call start = perf_counter() output = model.generate(**inputs, past_key_values=past_key_values) end = perf_counter() third_compile_generate_time = end - start logger.info(f"completed third compile generation in: {third_compile_generate_time}s") logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") past_key_values = StaticCache( model.config, max_batch_size=batch_size, device=device, dtype=torch.float16, max_cache_len=seq_length + 128, ) # 4th call start = perf_counter() output = model.generate(**inputs, past_key_values=past_key_values) end = perf_counter() fourth_compile_generate_time = end - start logger.info(f"completed fourth compile generation in: {fourth_compile_generate_time}s") logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") metrics_recorder.collect_model_measurements( benchmark_id, { "model_load_time": model_load_time, "first_eager_forward_pass_time_secs": first_eager_fwd_pass_time, "second_eager_forward_pass_time_secs": second_eager_fwd_pass_time, "first_eager_generate_time_secs": first_eager_generate_time, "second_eager_generate_time_secs": second_eager_generate_time, "time_to_first_token_secs": time_to_first_token, "time_to_second_token_secs": time_to_second_token, "time_to_third_token_secs": time_to_third_token, "time_to_next_token_mean_secs": mean_time_to_next_token, "first_compile_generate_time_secs": first_compile_generate_time, "second_compile_generate_time_secs": second_compile_generate_time, "third_compile_generate_time_secs": third_compile_generate_time, "fourth_compile_generate_time_secs": fourth_compile_generate_time, }, ) except Exception as e: logger.error(f"Caught exception: {e}") continue_metric_collection.set() if metrics_thread is not None: metrics_thread.join() # Only close the recorder if we created it locally if should_close_recorder: metrics_recorder.close()
null
[ -0.0030753505416214466, -0.017772525548934937, -0.002925154287368059, 0.005388313438743353, -0.00862713623791933, 0.02712530642747879, -0.016920633614063263, 0.02729303017258644, -0.004165332764387131, 0.01346078421920538, -0.020891569554805756, 0.012489098124206066, 0.013916180469095707, ...
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
function
multinomial_sample_one_no_sync
143
145
def multinomial_sample_one_no_sync(probs_sort): # Does multinomial sampling without a cuda synchronization q = torch.empty_like(probs_sort).exponential_(1) return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
null
[ -0.04756438732147217, -0.009014633484184742, 0.04640206694602966, 0.020962750539183617, -0.07151485979557037, 0.015379147604107857, -0.06442846357822418, 0.012705513276159763, -0.02348281443119049, -0.013511168770492077, -0.004179376643151045, 0.029651246964931488, 0.008598174899816513, 0....
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
function
logits_to_probs
147
155
def logits_to_probs(logits, temperature: float = 1.0, top_k: int | None = None): logits = logits / max(temperature, 1e-5) if top_k is not None: v, _ = torch.topk(logits, min(top_k, logits.size(-1))) pivot = v.select(-1, -1).unsqueeze(-1) logits = torch.where(logits < pivot, -float("Inf"), logits) probs = torch.nn.functional.softmax(logits, dim=-1) return probs
null
[ -0.03376872092485428, -0.009587706997990608, 0.048309892416000366, -0.010795296169817448, -0.00659869285300374, 0.035563163459300995, -0.045141272246837616, -0.01963721588253975, -0.014257922768592834, -0.04462446644902229, -0.01050804927945137, 0.03400900959968567, -0.022735539823770523, ...
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
function
sample
157
160
def sample(logits, temperature: float = 1.0, top_k: int | None = None): probs = logits_to_probs(logits[0, -1], temperature, top_k) idx_next = multinomial_sample_one_no_sync(probs) return idx_next, probs
null
[ -0.013427005149424076, -0.005928056314587593, 0.025011394172906876, 0.02269723080098629, 0.016583912074565887, 0.03230254724621773, -0.02840346284210682, -0.007256873417645693, -0.03131993114948273, -0.017054148018360138, 0.008739684708416462, 0.004900046158581972, -0.056435342878103256, 0...
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
consecutive_lines
lines_1-50
1
50
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from logging import Logger from threading import Event, Thread from time import perf_counter, sleep # Add the parent directory to Python path to import benchmarks_entrypoint sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import gpustat import psutil import psycopg2 from benchmarks_entrypoint import MetricsRecorder # Optional heavy ML dependencies - only required when actually running the benchmark try: import torch from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, StaticCache TRANSFORMERS_AVAILABLE = True except ImportError: TRANSFORMERS_AVAILABLE = False torch = None AutoModelForCausalLM = None AutoTokenizer = None GenerationConfig = None StaticCache = None os.environ["HF_XET_HIGH_PERFORMANCE"] = "1" os.environ["TOKENIZERS_PARALLELISM"] = "1" # Only set torch precision if torch is available if TRANSFORMERS_AVAILABLE: torch.set_float32_matmul_precision("high")
null
[ -0.032811857759952545, -0.033608753234148026, 0.02028663456439972, -0.023541860282421112, -0.022622715681791306, 0.044548846781253815, -0.0255027636885643, -0.01302326750010252, -0.01795409806072712, -0.002691576723009348, -0.07740379869937897, -0.05770477280020714, -0.018825478851795197, ...
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
consecutive_lines
lines_41-90
41
90
GenerationConfig = None StaticCache = None os.environ["HF_XET_HIGH_PERFORMANCE"] = "1" os.environ["TOKENIZERS_PARALLELISM"] = "1" # Only set torch precision if torch is available if TRANSFORMERS_AVAILABLE: torch.set_float32_matmul_precision("high") def collect_metrics(benchmark_id, continue_metric_collection, metrics_recorder): p = psutil.Process(os.getpid()) while not continue_metric_collection.is_set(): with p.oneshot(): cpu_util = p.cpu_percent() mem_megabytes = p.memory_info().rss / (1024 * 1024) gpu_stats = gpustat.GPUStatCollection.new_query() gpu_util = gpu_stats[0]["utilization.gpu"] gpu_mem_megabytes = gpu_stats[0]["memory.used"] metrics_recorder.collect_device_measurements( benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes ) sleep(0.01) def run_benchmark( logger: Logger, repository: str, branch: str, commit_id: str, commit_msg: str, metrics_recorder=None, num_tokens_to_generate=100, ): # Check if required ML dependencies are available if not TRANSFORMERS_AVAILABLE: logger.error("Transformers and torch are required to run the LLaMA benchmark. Please install them with:") logger.error("pip install torch transformers") logger.error("Skipping LLaMA benchmark due to missing dependencies.") return continue_metric_collection = Event() metrics_thread = None model_id = "meta-llama/Llama-2-7b-hf" # If no metrics_recorder is provided, create one for backward compatibility if metrics_recorder is None: try: metrics_recorder = MetricsRecorder(
null
[ -0.026812579482793808, -0.017946116626262665, 0.005562379024922848, 0.0054807416163384914, -0.008601082488894463, 0.012480337172746658, -0.03990432620048523, 0.02666519582271576, -0.003724657464772463, -0.012953600846230984, -0.04326978698372841, -0.03382144123315811, -0.0030485710594803095,...
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
consecutive_lines
lines_81-130
81
130
return continue_metric_collection = Event() metrics_thread = None model_id = "meta-llama/Llama-2-7b-hf" # If no metrics_recorder is provided, create one for backward compatibility if metrics_recorder is None: try: metrics_recorder = MetricsRecorder( psycopg2.connect("dbname=metrics"), logger, repository, branch, commit_id, commit_msg, True ) should_close_recorder = True except Exception as e: logger.error(f"Failed to create metrics recorder: {e}") return else: should_close_recorder = False try: gpu_stats = gpustat.GPUStatCollection.new_query() gpu_name = gpu_stats[0]["name"] benchmark_id = metrics_recorder.initialise_benchmark({"gpu_name": gpu_name, "model_id": model_id}) logger.info(f"running benchmark #{benchmark_id} on {gpu_name} for {model_id}") metrics_thread = Thread( target=collect_metrics, args=[benchmark_id, continue_metric_collection, metrics_recorder], ) metrics_thread.start() logger.info("started background thread to fetch device metrics") os.environ["TOKENIZERS_PARALLELISM"] = "false" # silence warnings when compiling device = "cuda" logger.info("downloading weights") # This is to avoid counting download in model load time measurement model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16) gen_config = GenerationConfig(do_sample=False, top_p=1, temperature=1) logger.info("loading model") start = perf_counter() model = AutoModelForCausalLM.from_pretrained( model_id, dtype=torch.float16, generation_config=gen_config ).eval() model.to(device) torch.cuda.synchronize() end = perf_counter() model_load_time = end - start logger.info(f"loaded model in: {model_load_time}s") tokenizer = AutoTokenizer.from_pretrained(model_id)
null
[ -0.0009849709458649158, 0.017275938764214516, -0.009096222929656506, 0.0036732542794197798, -0.023082874715328217, 0.029056565836071968, -0.01568129099905491, 0.05695103853940964, 0.01186277810484171, -0.010955194011330605, -0.04147773236036301, 0.01588381454348564, 0.010192228481173515, 0...
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
consecutive_lines
lines_121-170
121
170
model = AutoModelForCausalLM.from_pretrained( model_id, dtype=torch.float16, generation_config=gen_config ).eval() model.to(device) torch.cuda.synchronize() end = perf_counter() model_load_time = end - start logger.info(f"loaded model in: {model_load_time}s") tokenizer = AutoTokenizer.from_pretrained(model_id) prompt = "Why dogs are so cute?" inputs = tokenizer(prompt, return_tensors="pt").to(device) # Specify the max length (including both the prompt and the response) # When calling `generate` with `cache_implementation="static" later, this is also used to create a `StaticCache` object # with sequence length = `max_length`. The longer the more you will re-use it seq_length = inputs["input_ids"].shape[1] model.generation_config.max_length = seq_length + num_tokens_to_generate batch_size = inputs["input_ids"].shape[0] # Copied from the gpt-fast repo def multinomial_sample_one_no_sync(probs_sort): # Does multinomial sampling without a cuda synchronization q = torch.empty_like(probs_sort).exponential_(1) return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int) def logits_to_probs(logits, temperature: float = 1.0, top_k: int | None = None): logits = logits / max(temperature, 1e-5) if top_k is not None: v, _ = torch.topk(logits, min(top_k, logits.size(-1))) pivot = v.select(-1, -1).unsqueeze(-1) logits = torch.where(logits < pivot, -float("Inf"), logits) probs = torch.nn.functional.softmax(logits, dim=-1) return probs def sample(logits, temperature: float = 1.0, top_k: int | None = None): probs = logits_to_probs(logits[0, -1], temperature, top_k) idx_next = multinomial_sample_one_no_sync(probs) return idx_next, probs # First eager forward pass logger.info("running first eager forward pass") start = perf_counter() _ = model(**inputs) torch.cuda.synchronize() end = perf_counter() first_eager_fwd_pass_time = end - start logger.info(f"completed first eager forward pass in: {first_eager_fwd_pass_time}s")
null
[ -0.014203784987330437, -0.01948946714401245, 0.015036721713840961, 0.010288489051163197, -0.011819861829280853, 0.05402989313006401, -0.05903792381286621, 0.002503552008420229, -0.00805695727467537, -0.003312874585390091, -0.03209143131971359, -0.026311075314879417, -0.01901869848370552, 0...
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
consecutive_lines
lines_161-210
161
210
# First eager forward pass logger.info("running first eager forward pass") start = perf_counter() _ = model(**inputs) torch.cuda.synchronize() end = perf_counter() first_eager_fwd_pass_time = end - start logger.info(f"completed first eager forward pass in: {first_eager_fwd_pass_time}s") # Second eager forward pass (should be faster) logger.info("running second eager forward pass") start = perf_counter() _ = model(**inputs) torch.cuda.synchronize() end = perf_counter() second_eager_fwd_pass_time = end - start logger.info(f"completed second eager forward pass in: {second_eager_fwd_pass_time}s") # First eager generation logger.info("running first eager generation") start = perf_counter() output = model.generate(**inputs) torch.cuda.synchronize() end = perf_counter() first_eager_generate_time = end - start logger.info(f"completed first eager generation in: {first_eager_generate_time}s") logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") # Second eager generation (should be faster) logger.info("running second eager generation") start = perf_counter() output = model.generate(**inputs) torch.cuda.synchronize() end = perf_counter() second_eager_generate_time = end - start logger.info(f"completed second eager generation in: {second_eager_generate_time}s") logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") logger.info("running generation timing loop") input_pos = torch.arange(0, seq_length, device=device) inputs = inputs["input_ids"] start = perf_counter() with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): logits = model(inputs, position_ids=input_pos).logits next_token, probs = sample(logits, temperature=0.6, top_k=5) torch.cuda.synchronize() end = perf_counter()
null
[ -0.004166800063103437, 0.02308001182973385, 0.011992204934358597, 0.0030430806800723076, -0.01787760481238365, 0.019764719530940056, -0.06768085062503815, 0.007311366498470306, 0.028092017397284508, 0.011947919614613056, -0.01905657909810543, -0.021282454952597618, 0.026448778808116913, 0....
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
consecutive_lines
lines_201-250
201
250
input_pos = torch.arange(0, seq_length, device=device) inputs = inputs["input_ids"] start = perf_counter() with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): logits = model(inputs, position_ids=input_pos).logits next_token, probs = sample(logits, temperature=0.6, top_k=5) torch.cuda.synchronize() end = perf_counter() time_to_first_token = end - start input_pos = torch.tensor([seq_length], device=device, dtype=torch.int) next_token = next_token.clone() start = perf_counter() with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): logits = model(next_token, position_ids=input_pos).logits next_token, probs = sample(logits, temperature=0.6, top_k=5) torch.cuda.synchronize() end = perf_counter() time_to_second_token = end - start input_pos = torch.tensor([seq_length + 1], device=device, dtype=torch.int) next_token = next_token.clone() start = perf_counter() with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): logits = model(next_token, position_ids=input_pos).logits next_token, probs = sample(logits, temperature=0.6, top_k=5) torch.cuda.synchronize() end = perf_counter() time_to_third_token = end - start logger.info("running longer generation timing loop") total_time = 0 for i in range(20): input_pos = torch.tensor([seq_length + 2 + i], device=device, dtype=torch.int) next_token = next_token.clone() start = perf_counter() with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): logits = model(next_token, position_ids=input_pos).logits next_token, probs = sample(logits, temperature=0.6, top_k=5) torch.cuda.synchronize() end = perf_counter() total_time += end - start mean_time_to_next_token = total_time / 20 logger.info("running compilation benchmarks")
null
[ -0.005462292116135359, 0.022068459540605545, -0.010079331696033478, -0.008407813496887684, -0.04113483428955078, 0.034934863448143005, -0.06047486141324043, -0.010097031481564045, 0.010795402340590954, 0.008855153806507587, -0.00809742696583271, -0.01643909141421318, -0.005502169486135244, ...
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
consecutive_lines
lines_241-290
241
290
logits = model(next_token, position_ids=input_pos).logits next_token, probs = sample(logits, temperature=0.6, top_k=5) torch.cuda.synchronize() end = perf_counter() total_time += end - start mean_time_to_next_token = total_time / 20 logger.info("running compilation benchmarks") # Now compile the model model = torch.compile(model, mode="max-autotune", fullgraph=True) # StaticCache for generation with torch.device(device): model.setup_caches(max_batch_size=batch_size, max_seq_len=seq_length + num_tokens_to_generate) input_pos = torch.arange(0, seq_length, device=device) inputs = tokenizer(prompt, return_tensors="pt").to(device)["input_ids"] logger.info("compiling model") model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16, generation_config=gen_config) model.to(device) model = torch.compile(model, mode="max-autotune", fullgraph=True) past_key_values = StaticCache( model.config, max_batch_size=batch_size, device=device, dtype=torch.float16, max_cache_len=seq_length + 128, ) # 1st call start = perf_counter() output = model.generate(**inputs, past_key_values=past_key_values) end = perf_counter() first_compile_generate_time = end - start logger.info(f"completed first compile generation in: {first_compile_generate_time}s") logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") past_key_values = StaticCache( model.config, max_batch_size=batch_size, device=device, dtype=torch.float16, max_cache_len=seq_length + 128, ) # 2nd call start = perf_counter()
null
[ 0.0014855064218863845, -0.0012903983006253839, 0.0024382059928029776, -0.022714046761393547, -0.019089365378022194, 0.04544864222407341, -0.0462946780025959, -0.0005886674043722451, 0.0017544892616569996, -0.007322025019675493, -0.02887030690908432, -0.03770274668931961, -0.00449798442423343...
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
consecutive_lines
lines_281-330
281
330
past_key_values = StaticCache( model.config, max_batch_size=batch_size, device=device, dtype=torch.float16, max_cache_len=seq_length + 128, ) # 2nd call start = perf_counter() output = model.generate(**inputs, past_key_values=past_key_values) end = perf_counter() second_compile_generate_time = end - start logger.info(f"completed second compile generation in: {second_compile_generate_time}s") logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") past_key_values = StaticCache( model.config, max_batch_size=batch_size, device=device, dtype=torch.float16, max_cache_len=seq_length + 128, ) # 3rd call start = perf_counter() output = model.generate(**inputs, past_key_values=past_key_values) end = perf_counter() third_compile_generate_time = end - start logger.info(f"completed third compile generation in: {third_compile_generate_time}s") logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") past_key_values = StaticCache( model.config, max_batch_size=batch_size, device=device, dtype=torch.float16, max_cache_len=seq_length + 128, ) # 4th call start = perf_counter() output = model.generate(**inputs, past_key_values=past_key_values) end = perf_counter() fourth_compile_generate_time = end - start logger.info(f"completed fourth compile generation in: {fourth_compile_generate_time}s") logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") metrics_recorder.collect_model_measurements( benchmark_id, { "model_load_time": model_load_time,
null
[ -0.013111117295920849, 0.01637001521885395, 0.022726381197571754, -0.0187248382717371, -0.025974992662668228, 0.035660434514284134, -0.05507495999336243, 0.02105242758989334, -0.009299248456954956, 0.004822114482522011, -0.004676888231188059, -0.029282039031386375, 0.010168015956878662, 0....
Snowflake/snowflake-arctic-embed-m
llama.py
benchmark/benches/llama.py
consecutive_lines
lines_321-353
321
353
output = model.generate(**inputs, past_key_values=past_key_values) end = perf_counter() fourth_compile_generate_time = end - start logger.info(f"completed fourth compile generation in: {fourth_compile_generate_time}s") logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") metrics_recorder.collect_model_measurements( benchmark_id, { "model_load_time": model_load_time, "first_eager_forward_pass_time_secs": first_eager_fwd_pass_time, "second_eager_forward_pass_time_secs": second_eager_fwd_pass_time, "first_eager_generate_time_secs": first_eager_generate_time, "second_eager_generate_time_secs": second_eager_generate_time, "time_to_first_token_secs": time_to_first_token, "time_to_second_token_secs": time_to_second_token, "time_to_third_token_secs": time_to_third_token, "time_to_next_token_mean_secs": mean_time_to_next_token, "first_compile_generate_time_secs": first_compile_generate_time, "second_compile_generate_time_secs": second_compile_generate_time, "third_compile_generate_time_secs": third_compile_generate_time, "fourth_compile_generate_time_secs": fourth_compile_generate_time, }, ) except Exception as e: logger.error(f"Caught exception: {e}") continue_metric_collection.set() if metrics_thread is not None: metrics_thread.join() # Only close the recorder if we created it locally if should_close_recorder: metrics_recorder.close()
null
[ -0.00909113697707653, -0.0006025779293850064, 0.04237090423703194, -0.0317564494907856, -0.012976441532373428, 0.04391933232545853, -0.010673432610929012, 0.004561536479741335, -0.005857329349964857, 0.024434220045804977, -0.034379515796899796, -0.03481247276067734, -0.0016554315807297826, ...
Snowflake/snowflake-arctic-embed-m
run_benchmarks.py
benchmark_v2/run_benchmarks.py
consecutive_lines
lines_1-50
1
50
#!/usr/bin/env python3 # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Top-level benchmarking script that automatically discovers and runs all benchmarks in the ./benches directory, organizing outputs into model-specific subfolders. """ import argparse import json import logging import sys import uuid from framework.benchmark_config import BenchmarkConfig, adapt_configs, get_config_by_level from framework.benchmark_runner import BenchmarkRunner if __name__ == "__main__": # Parse arguments parser = argparse.ArgumentParser() parser.add_argument("--output-dir", type=str, default=None, help="Output dir for benchmark results") parser.add_argument("--log-level", type=str, choices=["DEBUG", "INFO", "WARNING", "ERROR"], default="WARNING") parser.add_argument("--model-id", type=str, help="Specific model ID to benchmark (if supported by benchmarks)") parser.add_argument("--warmup", "-w", type=int, default=3, help="Number of warmup iterations") parser.add_argument("--iterations", "-i", type=int, default=10, help="Number of measurement iterations") parser.add_argument("--batch-size", "-b", type=int, nargs="+", help="Batch size") parser.add_argument("--sequence-length", "-s", type=int, nargs="+", help="Sequence length") parser.add_argument("--num-tokens-to-generate", "-n", type=int, nargs="+", help="Number of tokens to generate") parser.add_argument( "--level", type=int, default=1, help="Level of coverage for the benchmark. 0: only the main config, 1: a few important configs, 2: a config for" " each attn implementation an option, 3: cross-generate all combinations of configs, 4: cross-generate all" " combinations of configs w/ all compile modes",
null
[ -0.02422455884516239, -0.008018515072762966, 0.016739843413233757, -0.022459790110588074, -0.006196336355060339, 0.0751875638961792, -0.007618805859237909, -0.025401398539543152, 0.0026982359122484922, -0.015015797689557076, -0.05267728120088577, -0.012172107584774494, -0.03831087797880173, ...
Snowflake/snowflake-arctic-embed-m
run_benchmarks.py
benchmark_v2/run_benchmarks.py
consecutive_lines
lines_41-90
41
90
parser.add_argument("--sequence-length", "-s", type=int, nargs="+", help="Sequence length") parser.add_argument("--num-tokens-to-generate", "-n", type=int, nargs="+", help="Number of tokens to generate") parser.add_argument( "--level", type=int, default=1, help="Level of coverage for the benchmark. 0: only the main config, 1: a few important configs, 2: a config for" " each attn implementation an option, 3: cross-generate all combinations of configs, 4: cross-generate all" " combinations of configs w/ all compile modes", ) parser.add_argument("--config-file", type=str, help="Path to a config file stored as a json or jsonl format") parser.add_argument("--num-tokens-to-profile", "-p", type=int, default=0, help="Number of tokens to profile") parser.add_argument("--branch-name", type=str, help="Git branch name") parser.add_argument("--commit-id", type=str, help="Git commit ID (if not provided, will auto-detect from git)") parser.add_argument("--commit-message", type=str, help="Git commit message") parser.add_argument( "--no-gpu-monitoring", action="store_true", help="Disables GPU monitoring during benchmark runs" ) parser.add_argument( "--push-result-to-dataset", type=str, default=None, help="Name of the dataset to push results to. If not provided, results are not pushed to the Hub.", ) args = parser.parse_args() # Setup logging benchmark_run_uuid = str(uuid.uuid4())[:8] numeric_level = getattr(logging, args.log_level.upper()) handlers = [logging.StreamHandler(sys.stdout)] logging.basicConfig( level=numeric_level, format="[%(levelname)s - %(asctime)s] %(name)s: %(message)s", handlers=handlers ) logger = logging.getLogger("benchmark_v2") logger.info("Starting benchmark discovery and execution") logger.info(f"Benchmark run UUID: {benchmark_run_uuid}") logger.info(f"Output directory: {args.output_dir}") # Error out if one of the arguments is not provided if any(arg is None for arg in [args.batch_size, args.sequence_length, args.num_tokens_to_generate]): raise ValueError( "All of the arguments --batch-size, --sequence-length, and --num-tokens-to-generate are required" )
null
[ -0.010521241463720798, 0.01855650544166565, 0.0030235613230615854, -0.0023667775094509125, -0.02244582772254944, 0.06006699427962303, -0.038493480533361435, -0.0005719236214645207, 0.03283891826868057, -0.005157230421900749, -0.009066972881555557, 0.01213825773447752, -0.019854819402098656, ...
Snowflake/snowflake-arctic-embed-m
run_benchmarks.py
benchmark_v2/run_benchmarks.py
consecutive_lines
lines_81-128
81
128
logger.info("Starting benchmark discovery and execution") logger.info(f"Benchmark run UUID: {benchmark_run_uuid}") logger.info(f"Output directory: {args.output_dir}") # Error out if one of the arguments is not provided if any(arg is None for arg in [args.batch_size, args.sequence_length, args.num_tokens_to_generate]): raise ValueError( "All of the arguments --batch-size, --sequence-length, and --num-tokens-to-generate are required" ) # We cannot compute ITL if we don't have at least two measurements if any(n <= 1 for n in args.num_tokens_to_generate): raise ValueError("--num_tokens_to_generate arguments should be larger than 1") # If a config file is provided, read it and use the configs therein. They will still be adapted to the given arguments. if args.config_file is not None: if args.config_file.endswith(".json"): with open(args.config_file, "r") as f: config_as_dicts = [json.load(f)] elif args.config_file.endswith(".jsonl"): with open(args.config_file, "r") as f: config_as_dicts = [json.loads(line) for line in f if line.startswith("{")] else: raise ValueError(f"Unsupported config file format: {args.config_file}") configs = [BenchmarkConfig.from_dict(config) for config in config_as_dicts] else: # Otherwise, get the configs for the given coverage level configs = get_config_by_level(args.level) # Adapt the configs to the given arguments configs = adapt_configs( configs, args.warmup, args.iterations, args.batch_size, args.sequence_length, args.num_tokens_to_generate, not args.no_gpu_monitoring, ) runner = BenchmarkRunner(logger, args.output_dir, args.branch_name, args.commit_id, args.commit_message) timestamp, results = runner.run_benchmarks( args.model_id, configs, args.num_tokens_to_profile, pretty_print_summary=True ) dataset_id = args.push_result_to_dataset if dataset_id is not None and len(results) > 0: runner.push_results_to_hub(dataset_id, results, timestamp)
null
[ -0.02845197543501854, -0.022650670260190964, 0.017097512260079384, 0.005795626435428858, 0.020992368459701538, 0.05780598893761635, -0.030709240585565567, -0.03960694745182991, -0.002440394600853324, -0.01762004755437374, 0.00553748058155179, -0.009999038651585579, -0.03962742164731026, 0....
Snowflake/snowflake-arctic-embed-m
run_benchmarks.py
benchmark_v2/run_benchmarks.py
consecutive_lines
lines_121-128
121
128
runner = BenchmarkRunner(logger, args.output_dir, args.branch_name, args.commit_id, args.commit_message) timestamp, results = runner.run_benchmarks( args.model_id, configs, args.num_tokens_to_profile, pretty_print_summary=True ) dataset_id = args.push_result_to_dataset if dataset_id is not None and len(results) > 0: runner.push_results_to_hub(dataset_id, results, timestamp)
null
[ -0.009402602910995483, -0.06102782487869263, 0.008455201052129269, 0.0027041067369282246, 0.000935781339649111, 0.04708591476082802, -0.0612715445458889, 0.0020897253416478634, -0.01596449688076973, -0.030081426724791527, -0.016172539442777634, -0.01080890092998743, -0.02072884328663349, 0...
Snowflake/snowflake-arctic-embed-m
continuous_batching_overall.py
benchmark_v2/benchmark_scripts/continuous_batching_overall.py
function
run_and_parse_cb_example
13
32
def run_and_parse_cb_example(args: str) -> dict: print(f"Benchmarking with args: {args}") output = subprocess.run( ["python", SCRIPT_LOCATION] + args.split() + COMMON_ARGS, stdout=subprocess.PIPE, ) output = output.stdout.decode("utf-8") if "generate_batch despite unexpected termination" in output: return {"args": args, **ERROR_OUTPUT} pattern = r"CB generation took: ([\d.]+) seconds for (\d+) tokens\. ([\d.]+)tok/s" match = re.search(pattern, output) if match is not None: return { "args": args, "time_seconds": float(match.group(1)), "num_tokens": int(match.group(2)), "throughput_tok_per_sec": float(match.group(3)), } else: return {"args": args, **ERROR_OUTPUT}
null
[ 0.027736829593777657, 0.02923174947500229, -0.01593787409365177, -0.03287407010793686, -0.0242815800011158, 0.054807621985673904, -0.053624387830495834, -0.05163806304335594, -0.011632172390818596, 0.022133296355605125, -0.017887767404317856, -0.04108639806509018, 0.005097236018627882, 0.0...
Snowflake/snowflake-arctic-embed-m
continuous_batching_overall.py
benchmark_v2/benchmark_scripts/continuous_batching_overall.py
consecutive_lines
lines_1-50
1
50
import re import subprocess from pathlib import Path from tabulate import tabulate SCRIPT_LOCATION = (Path(__file__).parent.parent.parent / "examples/pytorch/continuous_batching.py").as_posix() COMMON_ARGS = "--log-level WARNING --seed 0".split() ERROR_OUTPUT = {"time_seconds": "X", "num_tokens": "X", "throughput_tok_per_sec": "ERROR"} def run_and_parse_cb_example(args: str) -> dict: print(f"Benchmarking with args: {args}") output = subprocess.run( ["python", SCRIPT_LOCATION] + args.split() + COMMON_ARGS, stdout=subprocess.PIPE, ) output = output.stdout.decode("utf-8") if "generate_batch despite unexpected termination" in output: return {"args": args, **ERROR_OUTPUT} pattern = r"CB generation took: ([\d.]+) seconds for (\d+) tokens\. ([\d.]+)tok/s" match = re.search(pattern, output) if match is not None: return { "args": args, "time_seconds": float(match.group(1)), "num_tokens": int(match.group(2)), "throughput_tok_per_sec": float(match.group(3)), } else: return {"args": args, **ERROR_OUTPUT} if __name__ == "__main__": results = [ { "args": "Arguments", "time_seconds": "Duration (s)", "num_tokens": "Generated tokens", "throughput_tok_per_sec": "Throughput (tok/s)", } ] # Benchmark with low number of samples results.append(run_and_parse_cb_example("--samples 10")) results.append(run_and_parse_cb_example("--samples 20 --num-blocks 20")) # and low number of blocks results.append(run_and_parse_cb_example("--samples 50")) # Benchmark with compile: default, flash attention 2 and sdpa
null
[ 0.01665991172194481, 0.027298925444483757, -0.010032632388174534, -0.003300301730632782, -0.007413820363581181, 0.041480183601379395, -0.0798339769244194, -0.01940973475575447, 0.013037473894655704, -0.012965093366801739, -0.026607725769281387, -0.018757522106170654, -0.0008746840176172554, ...
Snowflake/snowflake-arctic-embed-m
continuous_batching_overall.py
benchmark_v2/benchmark_scripts/continuous_batching_overall.py
consecutive_lines
lines_41-66
41
66
"throughput_tok_per_sec": "Throughput (tok/s)", } ] # Benchmark with low number of samples results.append(run_and_parse_cb_example("--samples 10")) results.append(run_and_parse_cb_example("--samples 20 --num-blocks 20")) # and low number of blocks results.append(run_and_parse_cb_example("--samples 50")) # Benchmark with compile: default, flash attention 2 and sdpa results.append(run_and_parse_cb_example("--samples 100")) results.append(run_and_parse_cb_example("--samples 100 --attn flash_attention_2")) results.append(run_and_parse_cb_example("--samples 100 --attn sdpa")) # Benchmark with high number of samples results.append(run_and_parse_cb_example("--samples 500")) # Benchmark with prefix sharing and compile (best performance, but not reproducible due to compilation) results.append(run_and_parse_cb_example("--samples 500 --add-prefix --compile")) # Benchmark with parallel decoding results.append(run_and_parse_cb_example("--samples 50 --num-return-sequences 8 --do-sample")) results.append(run_and_parse_cb_example("--samples 100 --num-return-sequences 4 --do-sample")) print() print(tabulate(results, tablefmt="github"))
null
[ 0.043327003717422485, 0.051072705537080765, -0.005328958388417959, -0.011973503977060318, -0.021200550720095634, 0.01324229035526514, -0.059541188180446625, 0.041347675025463104, 0.04678291082382202, 0.01980862207710743, -0.020029060542583466, -0.04513924568891525, -0.02445385418832302, -0...
Snowflake/snowflake-arctic-embed-m
hardware_metrics.py
benchmark_v2/framework/hardware_metrics.py
function
get_device_name_and_memory_total
30
36
def get_device_name_and_memory_total() -> tuple[str, float]: """Returns the name and memory total of GPU 0.""" device_type = torch.accelerator.current_accelerator().type if is_torch_accelerator_available() else "cuda" torch_accelerator_module = getattr(torch, device_type, torch.cuda) device_name = torch_accelerator_module.get_device_properties(0).name device_memory_total = torch_accelerator_module.get_device_properties(0).total_memory / 1024**3 return device_name, device_memory_total
Returns the name and memory total of GPU 0.
[ -0.01431269571185112, 0.030482498928904533, 0.03144766390323639, -0.004073668271303177, -0.034434378147125244, 0.03978166729211807, -0.0813736692070961, 0.020165495574474335, -0.01734699122607708, 0.004411689937114716, -0.012815726920962334, 0.013513936661183834, 0.00600734306499362, 0.067...
Snowflake/snowflake-arctic-embed-m
hardware_metrics.py
benchmark_v2/framework/hardware_metrics.py
class
HardwareInfo
39
65
class HardwareInfo: """A class to hold information about the hardware.""" def __init__(self) -> None: # Retrieve GPU stats try: self.gpu_name, self.gpu_memory_total_gb = get_device_name_and_memory_total() except Exception: self.gpu_name, self.gpu_memory_total_gb = None, None # Retrieve python, torch and CUDA version self.python_version = f"{sys.version.split()[0]}" self.torch_version = torch.__version__ if hasattr(torch, "cuda") and torch.cuda.is_available(): self.cuda_version = torch.version.cuda else: self.cuda_version = None # Retrieve general hardware information self.cpu_count = psutil.cpu_count() self.memory_total_mb = int(psutil.virtual_memory().total / (1024 * 1024)) def to_dict(self) -> dict[str, None | int | float | str]: return { "gpu_name": self.gpu_name, "gpu_memory_total_gb": self.gpu_memory_total_gb, "python_version": self.python_version, "torch_version": self.torch_version, }
A class to hold information about the hardware.
[ -0.011061293072998524, 0.03778177499771118, 0.032762881368398666, 0.009522675536572933, -0.010806472972035408, 0.046850770711898804, -0.042287953197956085, 0.021816246211528778, -0.02092062309384346, 0.013997770845890045, -0.03687632828950882, 0.003095389576628804, -0.021759402006864548, 0...
Snowflake/snowflake-arctic-embed-m
hardware_metrics.py
benchmark_v2/framework/hardware_metrics.py
function
__init__
42
57
def __init__(self) -> None: # Retrieve GPU stats try: self.gpu_name, self.gpu_memory_total_gb = get_device_name_and_memory_total() except Exception: self.gpu_name, self.gpu_memory_total_gb = None, None # Retrieve python, torch and CUDA version self.python_version = f"{sys.version.split()[0]}" self.torch_version = torch.__version__ if hasattr(torch, "cuda") and torch.cuda.is_available(): self.cuda_version = torch.version.cuda else: self.cuda_version = None # Retrieve general hardware information self.cpu_count = psutil.cpu_count() self.memory_total_mb = int(psutil.virtual_memory().total / (1024 * 1024))
null
[ -0.007655762601643801, 0.01559926476329565, 0.03419027104973793, -0.004734327085316181, -0.031942665576934814, 0.0418933629989624, -0.030738331377506256, 0.02821524254977703, -0.03636491298675537, 0.00025307104806415737, -0.06326693296432495, 0.008091350086033344, 0.001332740532234311, 0.0...
Snowflake/snowflake-arctic-embed-m
hardware_metrics.py
benchmark_v2/framework/hardware_metrics.py
function
to_dict
59
65
def to_dict(self) -> dict[str, None | int | float | str]: return { "gpu_name": self.gpu_name, "gpu_memory_total_gb": self.gpu_memory_total_gb, "python_version": self.python_version, "torch_version": self.torch_version, }
null
[ -0.02214222028851509, 0.03637317940592766, 0.02420167252421379, 0.012774880975484848, -0.017831852659583092, 0.07756204158067703, -0.0682983323931694, 0.03284493833780289, -0.018614796921610832, 0.019055120646953583, -0.0327591598033905, 0.0007911151042208076, -0.04876936227083206, 0.05653...
Snowflake/snowflake-arctic-embed-m
hardware_metrics.py
benchmark_v2/framework/hardware_metrics.py
function
get_amd_gpu_stats
69
73
def get_amd_gpu_stats(device_handle) -> tuple[int, float]: """Get AMD GPU stats using amdsmi library.""" utilization = amdsmi.amdsmi_get_gpu_activity(device_handle)["gfx_activity"] memory_used = amdsmi.amdsmi_get_gpu_vram_usage(device_handle)["vram_used"] return int(utilization), float(memory_used) / 1024**3 # Convert bytes to GB
Get AMD GPU stats using amdsmi library.
[ 0.022420603781938553, 0.03164549916982651, 0.053748663514852524, -0.0370505154132843, -0.010513866320252419, 0.03583803027868271, -0.03638668730854988, 0.004177081398665905, 0.024020951241254807, 0.007032595109194517, -0.021228639408946037, 0.02846933715045452, -0.05421413853764534, 0.0790...
Snowflake/snowflake-arctic-embed-m
hardware_metrics.py
benchmark_v2/framework/hardware_metrics.py
function
get_intel_xpu_stats
76
103
def get_intel_xpu_stats() -> tuple[int, float]: """Returns the utilization and memory used of an Intel XPU""" # xpu-smi outputs CSV format: Timestamp, DeviceId, GPU Memory Utilization (%), GPU Memory Used (MiB) xpu_smi_output = subprocess.check_output(["xpu-smi", "dump", "-m", "5,18", "-n", "1"]) lines = xpu_smi_output.decode("utf-8").strip().split("\n") # Parse all data lines (skip header) and collect stats from all cards xpu_stats = [] for line in lines[1:]: data_line = line.split(",") if len(data_line) < 4: continue device_id = data_line[1].strip() utilization_str = data_line[2].strip() memory_used_str = data_line[3].strip() if utilization_str != "N/A" and memory_used_str != "N/A": utilization = int(float(utilization_str)) memory_used_mib = float(memory_used_str) xpu_stats.append((device_id, utilization, memory_used_mib)) if not xpu_stats: return 0, 0.0 # Sort by utilization (descending) and pick the highest xpu_stats.sort(key=lambda x: x[1], reverse=True) device_id, utilization, memory_used_mib = xpu_stats[0] memory_used_gb = memory_used_mib / 1024 return utilization, memory_used_gb
Returns the utilization and memory used of an Intel XPU
[ 0.0015542542096227407, 0.04538413882255554, 0.024296559393405914, 0.0002470207691658288, -0.00037209101719781756, 0.04093250632286072, -0.08060602098703384, -0.0062730200588703156, 0.002545187482610345, 0.032279614359140396, 0.004485410172492266, 0.007935311645269394, -0.04284776374697685, ...
Snowflake/snowflake-arctic-embed-m
hardware_metrics.py
benchmark_v2/framework/hardware_metrics.py
function
get_nvidia_gpu_stats
106
111
def get_nvidia_gpu_stats(device_handle) -> tuple[int, float]: """Returns the utilization and memory used of an NVIDIA GPU using pynvml.""" utilization = pynvml.nvmlDeviceGetUtilizationRates(device_handle).gpu memory_info = pynvml.nvmlDeviceGetMemoryInfo(device_handle) memory_used_gb = memory_info.used / 1024**3 return int(utilization), float(memory_used_gb)
Returns the utilization and memory used of an NVIDIA GPU using pynvml.
[ 0.01788456365466118, 0.01576312445104122, 0.03451571986079216, -0.03998270258307457, -0.030276048928499222, 0.03311420977115631, -0.05140240862965584, 0.021868910640478134, -0.035031504929065704, 0.01604416035115719, 0.006398307159543037, 0.01755739189684391, -0.006562385242432356, 0.07371...
Snowflake/snowflake-arctic-embed-m
hardware_metrics.py
benchmark_v2/framework/hardware_metrics.py
class
GPUMonitoringStatus
115
121
class GPUMonitoringStatus(Enum): """Status of GPU monitoring.""" SUCCESS = "success" FAILED = "failed" NO_GPUS_AVAILABLE = "no_gpus_available" NO_SAMPLES_COLLECTED = "no_samples_collected"
Status of GPU monitoring.
[ -0.0018276707269251347, 0.02079337276518345, 0.03558380529284477, -0.01004907675087452, -0.001567718805745244, 0.03114824742078781, -0.001290209125727415, 0.011843829415738583, -0.0028768666088581085, -0.018099866807460785, -0.04125954955816269, 0.025171035900712013, -0.06647743284702301, ...
Snowflake/snowflake-arctic-embed-m
hardware_metrics.py
benchmark_v2/framework/hardware_metrics.py
class
GPURawMetrics
125
152
class GPURawMetrics: """Raw values for GPU utilization and memory used.""" utilization: list[float] # in percent memory_used: list[float] # in GB timestamps: list[float] # in seconds timestamp_0: float # in seconds monitoring_status: GPUMonitoringStatus def to_dict(self) -> dict[str, None | int | float | str]: return { "utilization": self.utilization, "memory_used": self.memory_used, "timestamps": self.timestamps, "timestamp_0": self.timestamp_0, "monitoring_status": self.monitoring_status.value, } @classmethod def from_dict(cls, data: dict[str, None | int | float | str]) -> "GPURawMetrics": """Create a GPURawMetrics instance from a dictionary.""" return cls( utilization=data["utilization"], memory_used=data["memory_used"], timestamps=data["timestamps"], timestamp_0=data["timestamp_0"], monitoring_status=GPUMonitoringStatus(data["monitoring_status"]), )
Raw values for GPU utilization and memory used.
[ 0.00567447068169713, 0.03663041070103645, 0.035171523690223694, 0.010924815200269222, -0.02695278450846672, 0.06742826849222183, -0.055064693093299866, 0.009721719659864902, -0.005160825792700052, 0.0224408321082592, -0.0184852946549654, -0.008715687319636345, -0.07578865438699722, 0.05580...
Snowflake/snowflake-arctic-embed-m
hardware_metrics.py
benchmark_v2/framework/hardware_metrics.py
function
to_dict
134
141
def to_dict(self) -> dict[str, None | int | float | str]: return { "utilization": self.utilization, "memory_used": self.memory_used, "timestamps": self.timestamps, "timestamp_0": self.timestamp_0, "monitoring_status": self.monitoring_status.value, }
null
[ -0.005747748073190451, 0.03845261037349701, 0.019746731966733932, 0.0009678352507762611, -0.004683018662035465, 0.0744384303689003, -0.05971790477633476, 0.005172761622816324, -0.011368032544851303, -0.0019506997196003795, 0.0007714577950537205, -0.006590948905795813, -0.0770568922162056, ...
Snowflake/snowflake-arctic-embed-m
hardware_metrics.py
benchmark_v2/framework/hardware_metrics.py
function
from_dict
144
152
def from_dict(cls, data: dict[str, None | int | float | str]) -> "GPURawMetrics": """Create a GPURawMetrics instance from a dictionary.""" return cls( utilization=data["utilization"], memory_used=data["memory_used"], timestamps=data["timestamps"], timestamp_0=data["timestamp_0"], monitoring_status=GPUMonitoringStatus(data["monitoring_status"]), )
Create a GPURawMetrics instance from a dictionary.
[ 0.008930206298828125, 0.015133150853216648, 0.023329276591539383, -0.007464095950126648, -0.03441700339317322, 0.09880095720291138, -0.06564753502607346, 0.010484336875379086, -0.003942989744246006, 0.017373833805322647, -0.021834278479218483, -0.004231780301779509, -0.074624203145504, 0.0...
Snowflake/snowflake-arctic-embed-m
hardware_metrics.py
benchmark_v2/framework/hardware_metrics.py
class
GPUMonitor
156
325
class GPUMonitor: """Monitor GPU utilization during benchmark execution using a separate process.""" def __init__(self, sample_interval_sec: float = 0.05, logger: Logger | None = None): self.sample_interval_sec = sample_interval_sec self.logger = logger if logger is not None else _logger self.gpu_type = None self.process = None device_type = torch.accelerator.current_accelerator().type if is_torch_accelerator_available() else "cuda" torch_accelerator_module = getattr(torch, device_type, torch.cuda) self.num_available_gpus = torch_accelerator_module.device_count() if self.num_available_gpus == 0: self.logger.warning(f"No GPUs detected by torch.{device_type}.device_count().") return # Determine GPU type device_name, _ = get_device_name_and_memory_total() if "amd" in device_name.lower(): self.gpu_type = "amd" elif "nvidia" in device_name.lower(): self.gpu_type = "nvidia" elif "intel" in device_name.lower() or device_type == "xpu": self.gpu_type = "intel" else: self.logger.warning(f"Unsupported GPU for monitoring: {device_name}") @staticmethod def _monitor_worker(gpu_type: str, sample_interval_sec: float, connection: Connection): """Worker process for GPU monitoring.""" gpu_utilization = [] gpu_memory_used = [] timestamps = [] device_handle = None # Initialize GPU-specific monitoring if gpu_type == "amd": amdsmi.amdsmi_init() device_handle = amdsmi.amdsmi_get_processor_handles()[0] elif gpu_type == "nvidia": pynvml.nvmlInit() device_handle = pynvml.nvmlDeviceGetHandleByIndex(0) # Signal ready try: connection.send(0) except Exception: return # Monitoring loop stop = False while not stop: try: if gpu_type == "amd": utilization, memory_used = get_amd_gpu_stats(device_handle) elif gpu_type == "nvidia": utilization, memory_used = get_nvidia_gpu_stats(device_handle) elif gpu_type == "intel": utilization, memory_used = get_intel_xpu_stats() else: break gpu_utilization.append(utilization) gpu_memory_used.append(memory_used) timestamps.append(time.time()) except Exception as e: # Skips failed measurements _logger.debug(f"Failed to collect GPU metrics sample: {e}") stop = connection.poll(sample_interval_sec) # Cleanup if gpu_type == "amd": try: amdsmi.amdsmi_shut_down() except Exception as e: _logger.debug(f"Failed to shutdown AMD GPU monitoring: {e}") elif gpu_type == "nvidia": try: pynvml.nvmlShutdown() except Exception as e: _logger.debug(f"Failed to shutdown NVIDIA GPU monitoring: {e}") # Send results back try: connection.send((gpu_utilization, gpu_memory_used, timestamps)) except Exception as e: _logger.error(f"Failed to send GPU monitoring results: {e}") connection.close() def start(self): """Start monitoring GPU metrics in a separate process.""" if self.gpu_type is None: self.logger.debug("GPU monitoring skipped (no supported GPU)") return self.child_connection, self.parent_connection = Pipe() self.process = Process( target=GPUMonitor._monitor_worker, args=(self.gpu_type, self.sample_interval_sec, self.child_connection), daemon=True, ) self.process.start() # Wait for worker to signal ready if self.process.is_alive(): self.parent_connection.recv() self.logger.debug("GPU monitoring started (multiprocessing)") def stop_and_collect(self) -> GPURawMetrics: """Stop monitoring and return collected metrics.""" # No GPU available or unsupported GPU if self.process is None: return GPURawMetrics( utilization=[], memory_used=[], timestamps=[], timestamp_0=0.0, monitoring_status=GPUMonitoringStatus.NO_GPUS_AVAILABLE, ) # Process crashed before we could collect results process_failed = False if not self.process.is_alive(): process_failed = True gpu_utilization, gpu_memory_used, timestamps = [], [], [] else: # Signal stop self.parent_connection.send(0) # Get results try: gpu_utilization, gpu_memory_used, timestamps = self.parent_connection.recv() except Exception: process_failed = True gpu_utilization, gpu_memory_used, timestamps = [], [], [] self.parent_connection.close() self.process.join(timeout=2.0) if self.process.is_alive(): self.process.terminate() if gpu_utilization: timestamp_0 = timestamps[0] metrics = GPURawMetrics( utilization=gpu_utilization, memory_used=gpu_memory_used, timestamps=[t - timestamp_0 for t in timestamps], timestamp_0=timestamp_0, monitoring_status=GPUMonitoringStatus.SUCCESS, ) self.logger.debug(f"GPU monitoring completed: {len(gpu_utilization)} samples collected") elif process_failed: metrics = GPURawMetrics( utilization=[], memory_used=[], timestamps=[], timestamp_0=0.0, monitoring_status=GPUMonitoringStatus.FAILED, ) self.logger.warning("GPU monitoring failed (process crashed or timed out)") else: metrics = GPURawMetrics( utilization=[], memory_used=[], timestamps=[], timestamp_0=0.0, monitoring_status=GPUMonitoringStatus.NO_SAMPLES_COLLECTED, ) return metrics
Monitor GPU utilization during benchmark execution using a separate process.
[ -0.011124425567686558, 0.007926194928586483, 0.005850772839039564, -0.00036644478677771986, -0.01040649600327015, 0.03467322513461113, -0.03949446603655815, 0.01842268742620945, 0.017637554556131363, 0.0038092948962002993, -0.009923582896590233, 0.030442193150520325, -0.02603154070675373, ...
Snowflake/snowflake-arctic-embed-m