diff --git a/.gitattributes b/.gitattributes index 2b2e1f180d8b67fd4edbdedd16608b3b0cf6e835..1f0617e0b2a8f9dad4a3bf75b498488ca68a9b7b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -803,3 +803,4 @@ evalkit_eagle/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/base. evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so filter=lfs diff=lfs merge=lfs -text evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda124.so filter=lfs diff=lfs merge=lfs -text +evalkit_eagle/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_eagle/lib/python3.10/site-packages/openai-1.59.7.dist-info/RECORD b/evalkit_eagle/lib/python3.10/site-packages/openai-1.59.7.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..e839aba92731d46dbf2046501fde60019207974a --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/openai-1.59.7.dist-info/RECORD @@ -0,0 +1,808 @@ +../../../bin/openai,sha256=BPJMcG5Ul8vWwBvgskLtldG0MRWeiGZZSOaSfIRGf5o,228 +openai-1.59.7.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +openai-1.59.7.dist-info/METADATA,sha256=6HlD_z7wNgkm6HpVwwCLjKH6ZN0pGliuzqGYPbkMm8E,27223 +openai-1.59.7.dist-info/RECORD,, +openai-1.59.7.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +openai-1.59.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +openai-1.59.7.dist-info/entry_points.txt,sha256=kAYhQEmziJwsKs5raYAIOvJ2LWmbz5dulEXOzsY71ro,43 +openai-1.59.7.dist-info/licenses/LICENSE,sha256=1xHtN7sZrnJJr40JO4_G6nWP01VLkqxhUAwa08wOP7k,11336 +openai/__init__.py,sha256=UZfk6nnPAGguY3XX7QQfqa4kZjzvFEGp-TUyxrBcTlI,10296 +openai/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30 +openai/__pycache__/__init__.cpython-310.pyc,, +openai/__pycache__/__main__.cpython-310.pyc,, +openai/__pycache__/_base_client.cpython-310.pyc,, +openai/__pycache__/_client.cpython-310.pyc,, +openai/__pycache__/_compat.cpython-310.pyc,, +openai/__pycache__/_constants.cpython-310.pyc,, +openai/__pycache__/_exceptions.cpython-310.pyc,, +openai/__pycache__/_files.cpython-310.pyc,, +openai/__pycache__/_legacy_response.cpython-310.pyc,, +openai/__pycache__/_models.cpython-310.pyc,, +openai/__pycache__/_module_client.cpython-310.pyc,, +openai/__pycache__/_qs.cpython-310.pyc,, +openai/__pycache__/_resource.cpython-310.pyc,, +openai/__pycache__/_response.cpython-310.pyc,, +openai/__pycache__/_streaming.cpython-310.pyc,, +openai/__pycache__/_types.cpython-310.pyc,, +openai/__pycache__/_version.cpython-310.pyc,, +openai/__pycache__/pagination.cpython-310.pyc,, +openai/__pycache__/version.cpython-310.pyc,, +openai/_base_client.py,sha256=dp8TJR8ZBuS0RbjnNKVkZC--tbstwz33Q_P_UB7dKCE,69238 +openai/_client.py,sha256=FJRGkrdpHAFV2TOs04tO5uyKCA-cudlk4BlvCX3KI3Q,23355 +openai/_compat.py,sha256=Mtzi28qOK99ZBPcGcQqdjoUFk2MzzpqjaafjuwQ4NO0,6982 +openai/_constants.py,sha256=L1pfEhuz_wM2w2_U9P_9JZzTbrN4pbLo207l96rtKcQ,469 +openai/_exceptions.py,sha256=2BEuXwqce9z7X6lWLLXRqg1vOay_q-OdLz9lcj6Pluw,4798 +openai/_extras/__init__.py,sha256=LZbJLZ7aFHRcI7uiY4-wFQTdMp-BF6FER1QMhKVFkWk,107 +openai/_extras/__pycache__/__init__.cpython-310.pyc,, +openai/_extras/__pycache__/_common.cpython-310.pyc,, +openai/_extras/__pycache__/numpy_proxy.cpython-310.pyc,, +openai/_extras/__pycache__/pandas_proxy.cpython-310.pyc,, +openai/_extras/_common.py,sha256=NWWtgbdJsO3hQGQxaXGfVk0LjeIE5AFZ8VS_795hhMc,364 +openai/_extras/numpy_proxy.py,sha256=hwZXa_JBAPD5taRhor1tGxK26g5IaK52JclQDl-dky0,799 +openai/_extras/pandas_proxy.py,sha256=NCEt1Dqwc_0H85YdsWPDE3lPDJtYnBT8G-gJE_BCeEc,637 +openai/_files.py,sha256=WEf6hxJN1u3pVkdnPCpinhxCUnOV2olt4J6vLoJ_k48,3616 +openai/_legacy_response.py,sha256=YBL2OTX7W139lVpcVHnNTsHRPNJxWHBAw6ZZHqnL2fs,16046 +openai/_models.py,sha256=9AQDXMPMGn0BM-MjcKL6AZYXItM2OJPgdhgZPJiHpUA,30413 +openai/_module_client.py,sha256=gF_2bbdosIwUt29sQgrQRJOgNREvXF-IDxe4XKGhHjY,2523 +openai/_qs.py,sha256=AOkSz4rHtK4YI3ZU_kzea-zpwBUgEY8WniGmTPyEimc,4846 +openai/_resource.py,sha256=IQihFzFLhGOiGSlT2dO1ESWSTg2XypgbtAldtGdTOqU,1100 +openai/_response.py,sha256=Juwnj0AMWnHc8HDjtdcQQpMIDyX170hzZPXaAK1e9Qw,29387 +openai/_streaming.py,sha256=t1UZrg53fVJB5Rs6k2sT9PBbvjp-IGrQzUq_5nlxKG4,13102 +openai/_types.py,sha256=GxKqy9_2_AUqbaRROzqhCJ47a7c-q_T6Bu8kV9a2qhA,6242 +openai/_utils/__init__.py,sha256=WnJrKMH-HJifY1H9sSTocSjuVSm4s2W_2QnIm3-wxZI,2222 +openai/_utils/__pycache__/__init__.cpython-310.pyc,, +openai/_utils/__pycache__/_logs.cpython-310.pyc,, +openai/_utils/__pycache__/_proxy.cpython-310.pyc,, +openai/_utils/__pycache__/_reflection.cpython-310.pyc,, +openai/_utils/__pycache__/_streams.cpython-310.pyc,, +openai/_utils/__pycache__/_sync.cpython-310.pyc,, +openai/_utils/__pycache__/_transform.cpython-310.pyc,, +openai/_utils/__pycache__/_typing.cpython-310.pyc,, +openai/_utils/__pycache__/_utils.cpython-310.pyc,, +openai/_utils/_logs.py,sha256=IC5iwPflwelNpJEpWsvK3up-pol5hR8k_VL9fSukk_Y,1351 +openai/_utils/_proxy.py,sha256=z3zsateHtb0EARTWKk8QZNHfPkqJbqwd1lM993LBwGE,1902 +openai/_utils/_reflection.py,sha256=aTXm-W0Kww4PJo5LPkUnQ92N-2UvrK1-D67cJVBlIgw,1426 +openai/_utils/_streams.py,sha256=SMC90diFFecpEg_zgDRVbdR3hSEIgVVij4taD-noMLM,289 +openai/_utils/_sync.py,sha256=03JeD-UR_e2O8dJEtD-v4zcyhlEpFkrcH8bgrSJMrxI,2437 +openai/_utils/_transform.py,sha256=Dkkyr7OveGmOolepcvXmVJWE3kqim4b0nM0h7yWbgeY,13468 +openai/_utils/_typing.py,sha256=nTJz0jcrQbEgxwy4TtAkNxuU0QHHlmc6mQtA6vIR8tg,4501 +openai/_utils/_utils.py,sha256=MiRKO6s2cFkNzeBUwBc7x1MQiH_3s2-uG1WYySqwveg,12419 +openai/_version.py,sha256=7awlvvOt0N1yQ0bQTOoi1bid6H4qFs_SXBZJdn7IFYA,159 +openai/cli/__init__.py,sha256=soGgtqyomgddl92H0KJRqHqGuaXIaghq86qkzLuVp7U,31 +openai/cli/__pycache__/__init__.cpython-310.pyc,, +openai/cli/__pycache__/_cli.cpython-310.pyc,, +openai/cli/__pycache__/_errors.cpython-310.pyc,, +openai/cli/__pycache__/_models.cpython-310.pyc,, +openai/cli/__pycache__/_progress.cpython-310.pyc,, +openai/cli/__pycache__/_utils.cpython-310.pyc,, +openai/cli/_api/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58 +openai/cli/_api/__pycache__/__init__.cpython-310.pyc,, +openai/cli/_api/__pycache__/_main.cpython-310.pyc,, +openai/cli/_api/__pycache__/audio.cpython-310.pyc,, +openai/cli/_api/__pycache__/completions.cpython-310.pyc,, +openai/cli/_api/__pycache__/files.cpython-310.pyc,, +openai/cli/_api/__pycache__/image.cpython-310.pyc,, +openai/cli/_api/__pycache__/models.cpython-310.pyc,, +openai/cli/_api/_main.py,sha256=5yyfLURqCEaAN8B61gHaqVAaYgtyb9Xq0ncQ3P2BAh0,451 +openai/cli/_api/audio.py,sha256=IPbABMwryQ0CQTF4gi6VS3hJi6qFjoyj6IDV2ZoPT6A,3787 +openai/cli/_api/chat/__init__.py,sha256=MhFUQH9F6QCtbPMlbsU_DWTd7wc5DSCZ7Wy3FBGVij0,300 +openai/cli/_api/chat/__pycache__/__init__.cpython-310.pyc,, +openai/cli/_api/chat/__pycache__/completions.cpython-310.pyc,, +openai/cli/_api/chat/completions.py,sha256=9Ztetyz7rm0gP5SOPWEcpzFJnJKuIEQit626vOq42bE,5363 +openai/cli/_api/completions.py,sha256=ysOmnbXpFz3VB5N_5USPdObiYew62vEn6rMtNFwTJGQ,6412 +openai/cli/_api/files.py,sha256=6nKXFnsC2QE0bGnVUAG7BTLSu6K1_MhPE0ZJACmzgRY,2345 +openai/cli/_api/image.py,sha256=ovBExdn8oUK9ImOpsPafesfAlmcftLP2p7d37hcUtKU,5062 +openai/cli/_api/models.py,sha256=pGmIGZToj3raGGpKvPSq_EVUR-dqg4Vi0PNfZH98D2E,1295 +openai/cli/_cli.py,sha256=o6zWCnq84u-DIGZuR9YoOUxTGTpx-oCU5mgAKDi555c,6779 +openai/cli/_errors.py,sha256=nejlu1HnOyAIr2n7uqpFtWn8XclWj_9N8FwgfT3BPK8,471 +openai/cli/_models.py,sha256=tgsldjG216KpwgAZ5pS0sV02FQvONDJU2ElA4kCCiIU,491 +openai/cli/_progress.py,sha256=aMLssU9jh-LoqRYH3608jNos7r6vZKnHTRlHxFznzv4,1406 +openai/cli/_tools/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58 +openai/cli/_tools/__pycache__/__init__.cpython-310.pyc,, +openai/cli/_tools/__pycache__/_main.cpython-310.pyc,, +openai/cli/_tools/__pycache__/fine_tunes.cpython-310.pyc,, +openai/cli/_tools/__pycache__/migrate.cpython-310.pyc,, +openai/cli/_tools/_main.py,sha256=pakjEXHRHqYlTml-RxV7fNrRtRXzmZBinoPi1AJipFY,467 +openai/cli/_tools/fine_tunes.py,sha256=RQgYMzifk6S7Y1I1K6huqco2QxmXa7gVUlHl6SrKTSU,1543 +openai/cli/_tools/migrate.py,sha256=o-iomzhtC6N6X5H5GDlgQ_QOaIovE2YA9oHc_tIAUj8,4497 +openai/cli/_utils.py,sha256=oiTc9MnxQh_zxAZ1OIHPkoDpCll0NF9ZgkdFHz4T-Bs,848 +openai/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224 +openai/lib/__init__.py,sha256=BMTfMnlbugMgDA1STDIAlx4bI4t4l_8bQmJxd0th0n8,126 +openai/lib/__pycache__/__init__.cpython-310.pyc,, +openai/lib/__pycache__/_old_api.cpython-310.pyc,, +openai/lib/__pycache__/_pydantic.cpython-310.pyc,, +openai/lib/__pycache__/_tools.cpython-310.pyc,, +openai/lib/__pycache__/_validators.cpython-310.pyc,, +openai/lib/__pycache__/azure.cpython-310.pyc,, +openai/lib/_old_api.py,sha256=XZnXBrEKuTd70iJirj5mGW35fZoqruJobbBTq6bvg10,1947 +openai/lib/_parsing/__init__.py,sha256=wS3BYvMGj9TqiPqOe3rO1sleaAJqHVuCaQuCE5rZIUw,539 +openai/lib/_parsing/__pycache__/__init__.cpython-310.pyc,, +openai/lib/_parsing/__pycache__/_completions.cpython-310.pyc,, +openai/lib/_parsing/_completions.py,sha256=I1KpjdI9p8Me-nsLF2szjEYF_7x4k28WGH5GdZeKpzI,9138 +openai/lib/_pydantic.py,sha256=Lvd-6S5WiEPvwewOqNarDiGJ_ZPtkez9W28ZLcB-K_c,5336 +openai/lib/_tools.py,sha256=xrzM7jNgehZGsRQ9kSgn1q33z9cHrgf0b8UMo5wrTFw,1501 +openai/lib/_validators.py,sha256=cXJXFuaAl7jeJcYHXXnFa4NHGtHs-_zt3Zs1VVCmQo4,35288 +openai/lib/azure.py,sha256=8rGDip2BVCTvZnvaq_fT8pGQZ3479-JP6oL9WtI5NpM,23563 +openai/lib/streaming/__init__.py,sha256=kD3LpjsqU7caDQDhB-YjTUl9qqbb5sPnGGSI2yQYC70,379 +openai/lib/streaming/__pycache__/__init__.cpython-310.pyc,, +openai/lib/streaming/__pycache__/_assistants.cpython-310.pyc,, +openai/lib/streaming/__pycache__/_deltas.cpython-310.pyc,, +openai/lib/streaming/_assistants.py,sha256=LUWSinmYopQIkQ5xSg73b6BWbkRkQS5JvX62w_V9xSw,40692 +openai/lib/streaming/_deltas.py,sha256=I7B_AznXZwlBmE8Puau7ayTQUx6hMIEVE8FYTQm2fjs,2502 +openai/lib/streaming/chat/__init__.py,sha256=7krL_atOvvpQkY_byWSglSfDsMs5hdoxHmz4Ulq7lcc,1305 +openai/lib/streaming/chat/__pycache__/__init__.cpython-310.pyc,, +openai/lib/streaming/chat/__pycache__/_completions.cpython-310.pyc,, +openai/lib/streaming/chat/__pycache__/_events.cpython-310.pyc,, +openai/lib/streaming/chat/__pycache__/_types.cpython-310.pyc,, +openai/lib/streaming/chat/_completions.py,sha256=icXzr6TwaQvOOEZHRLIfw106YVUT9mLGjQt6QJ1ObKI,29944 +openai/lib/streaming/chat/_events.py,sha256=lstVmM6YR2Cs9drikzrY9JCZn9Nbfym0aKIPtNpxL6w,2618 +openai/lib/streaming/chat/_types.py,sha256=-SYVBNhGkOUoJ-8dotxpCRqPJpfyOQ8hwR2_HrsQCRI,739 +openai/pagination.py,sha256=B9ejXEAR_hYGLHfqb9xEEsE0u5dCUMjvplOce5dpY7M,2760 +openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +openai/resources/__init__.py,sha256=eYonVyf6AAmk-b8JYSYmo5EEMv89ovxiAY5A83ti8J8,4533 +openai/resources/__pycache__/__init__.cpython-310.pyc,, +openai/resources/__pycache__/batches.cpython-310.pyc,, +openai/resources/__pycache__/completions.cpython-310.pyc,, +openai/resources/__pycache__/embeddings.cpython-310.pyc,, +openai/resources/__pycache__/files.cpython-310.pyc,, +openai/resources/__pycache__/images.cpython-310.pyc,, +openai/resources/__pycache__/models.cpython-310.pyc,, +openai/resources/__pycache__/moderations.cpython-310.pyc,, +openai/resources/audio/__init__.py,sha256=YM7FHvPKVlj_v6EIgfpUQsb6q4hS2hVQ3gfkgic0sP0,1687 +openai/resources/audio/__pycache__/__init__.cpython-310.pyc,, +openai/resources/audio/__pycache__/audio.cpython-310.pyc,, +openai/resources/audio/__pycache__/speech.cpython-310.pyc,, +openai/resources/audio/__pycache__/transcriptions.cpython-310.pyc,, +openai/resources/audio/__pycache__/translations.cpython-310.pyc,, +openai/resources/audio/audio.py,sha256=MMJHbfXmyYmQU7dF8XsD0YOIqdlG3gtxUqTihOuVx8o,5499 +openai/resources/audio/speech.py,sha256=yPoi_Xozv0Yuikbf2dxhAyRdN2q_sWDQoHNCxUayC-E,8903 +openai/resources/audio/transcriptions.py,sha256=4X71pe1lvelNRPSlHy2jAtIMyETYwWieLShBdr12MN0,18507 +openai/resources/audio/translations.py,sha256=4Y-ognKnSi72qhwX8FCKB-5JhvaAS2Wnq2ivTFmpUoU,15711 +openai/resources/batches.py,sha256=8wb-oy81IkxpABjT_11JKP7nzTmGmP35lD6WGecWmn8,19578 +openai/resources/beta/__init__.py,sha256=nXoV4P8WCrbEZuNMtptbIuy_LqlVafY9lJ2qfW35GFc,1636 +openai/resources/beta/__pycache__/__init__.cpython-310.pyc,, +openai/resources/beta/__pycache__/assistants.cpython-310.pyc,, +openai/resources/beta/__pycache__/beta.cpython-310.pyc,, +openai/resources/beta/assistants.py,sha256=j1BE3q4aCGzridJ8wyhzn0FeI3Gvy56jRK57EA-SuXk,40533 +openai/resources/beta/beta.py,sha256=D9mhIg_Qc0tUq23AVRUI6Z1WRF_ekeNG5sHeRYyhFXk,6602 +openai/resources/beta/chat/__init__.py,sha256=d_fpyFMAG3iRAPIXANPfRG4HtEm6U_uMUYep7Skj2uY,263 +openai/resources/beta/chat/__pycache__/__init__.cpython-310.pyc,, +openai/resources/beta/chat/__pycache__/chat.cpython-310.pyc,, +openai/resources/beta/chat/__pycache__/completions.cpython-310.pyc,, +openai/resources/beta/chat/chat.py,sha256=sNvU8Fi_o3dWkD_X4Mobafv9XWBP6Y2dJxng-NdFXUs,597 +openai/resources/beta/chat/completions.py,sha256=Z_x_hxpemrmROMrfyx6dUALppPuqNgswgW9YQ3ngHYI,28553 +openai/resources/beta/realtime/__init__.py,sha256=0TBjHlLRsG-hudbiE8f-EXETNkDRAxqkCVAgODiUnYo,862 +openai/resources/beta/realtime/__pycache__/__init__.cpython-310.pyc,, +openai/resources/beta/realtime/__pycache__/realtime.cpython-310.pyc,, +openai/resources/beta/realtime/__pycache__/sessions.cpython-310.pyc,, +openai/resources/beta/realtime/realtime.py,sha256=iRKjT29BT2LEbc909_wtQ8mmr9lKstibKd4DZm0BWEM,37482 +openai/resources/beta/realtime/sessions.py,sha256=i53-QVMaqK3sGP22gh250kANFlRbP4V-g8uffWzKHS8,16093 +openai/resources/beta/threads/__init__.py,sha256=fQ_qdUVSfouVS5h47DlTb5mamChT4K-v-siPuuAB6do,1177 +openai/resources/beta/threads/__pycache__/__init__.cpython-310.pyc,, +openai/resources/beta/threads/__pycache__/messages.cpython-310.pyc,, +openai/resources/beta/threads/__pycache__/threads.cpython-310.pyc,, +openai/resources/beta/threads/messages.py,sha256=LBjgJAK-0g_lkhIX2WG6qNT0RzSTknO0nRlqkVQw-B8,27372 +openai/resources/beta/threads/runs/__init__.py,sha256=2FfDaqwmJJCd-IVpY_CrzWcFvw0KFyQ3cm5jnTfI-DQ,771 +openai/resources/beta/threads/runs/__pycache__/__init__.cpython-310.pyc,, +openai/resources/beta/threads/runs/__pycache__/runs.cpython-310.pyc,, +openai/resources/beta/threads/runs/__pycache__/steps.cpython-310.pyc,, +openai/resources/beta/threads/runs/runs.py,sha256=7sPjaxa8Th6aXDeils1G8VKA9_2wsyjGUs5kJh3M50I,142593 +openai/resources/beta/threads/runs/steps.py,sha256=VlGD9NXtNqOt3uwlnepCavW7v3uVlvvyi0X1h9WZ_-E,15817 +openai/resources/beta/threads/threads.py,sha256=qGh4H0-42NhJHwPpyAYZlGx1ZgssFARJ45fhEDCyDQU,94238 +openai/resources/beta/vector_stores/__init__.py,sha256=11Xn1vhgndWiI0defJHv31vmbtbDgh2GwZT3gX8GgHk,1296 +openai/resources/beta/vector_stores/__pycache__/__init__.cpython-310.pyc,, +openai/resources/beta/vector_stores/__pycache__/file_batches.cpython-310.pyc,, +openai/resources/beta/vector_stores/__pycache__/files.cpython-310.pyc,, +openai/resources/beta/vector_stores/__pycache__/vector_stores.cpython-310.pyc,, +openai/resources/beta/vector_stores/file_batches.py,sha256=EomxymvX4oCIRXUAfKGShAYWqnv1vlAahcp_Wa7Kt7Y,31985 +openai/resources/beta/vector_stores/files.py,sha256=LjN6Zazb4dGV-xeQ-XRKAVciXsFj7LXh90AKJgVQ-Cw,29724 +openai/resources/beta/vector_stores/vector_stores.py,sha256=OnzaEjKov8npQQf9YSYljPOTNBzjfwmxfW_D7f7fLkQ,28916 +openai/resources/chat/__init__.py,sha256=8Q9ODRo1wIpFa34VaNwuaWFmxqFxagDtUhIAkQNvxEU,849 +openai/resources/chat/__pycache__/__init__.cpython-310.pyc,, +openai/resources/chat/__pycache__/chat.cpython-310.pyc,, +openai/resources/chat/__pycache__/completions.cpython-310.pyc,, +openai/resources/chat/chat.py,sha256=hvYn24it5ARq8BYloSWn5kqqSlBEcYvVdQTf3ujxuV0,3360 +openai/resources/chat/completions.py,sha256=VL61UVRPoI7JuNj6b4k4G2g8Ew0mu2WfLJbtUbW_XuM,99603 +openai/resources/completions.py,sha256=5W3UuTH0V-vpTIkb8-r7gyS0Qp7tx3JZMWZkHBGIjPY,59460 +openai/resources/embeddings.py,sha256=PfwI3PKKPkmLs7wHijO-1pOwW6Fjs5Rqzpy0ALLYgAs,11655 +openai/resources/files.py,sha256=PL7b1lM7s3uJD7CvZcM_9f54kAlhBo913o31z1uXt-0,30093 +openai/resources/fine_tuning/__init__.py,sha256=s6uoq7gM4gwoywdOOZQkPeYiSbUl-OwpeuMhwJJk0lc,837 +openai/resources/fine_tuning/__pycache__/__init__.cpython-310.pyc,, +openai/resources/fine_tuning/__pycache__/fine_tuning.cpython-310.pyc,, +openai/resources/fine_tuning/fine_tuning.py,sha256=yfXXcR8IMRHkS-xnoT_nF7WEa2fjprDO-0ND-juPqhk,3394 +openai/resources/fine_tuning/jobs/__init__.py,sha256=_smlrwijZOCcsDWqKnofLxQM2QLucZzXgboL9zJBPHw,849 +openai/resources/fine_tuning/jobs/__pycache__/__init__.cpython-310.pyc,, +openai/resources/fine_tuning/jobs/__pycache__/checkpoints.cpython-310.pyc,, +openai/resources/fine_tuning/jobs/__pycache__/jobs.cpython-310.pyc,, +openai/resources/fine_tuning/jobs/checkpoints.py,sha256=LIJUhxb8hgxEgHdTFKdyb0Q-hnV4ccIprvFpQJI97ho,7474 +openai/resources/fine_tuning/jobs/jobs.py,sha256=kZLZaWRW6ynhLknoOaK64LW9XifzsSOpFHWX8VPjJcs,29392 +openai/resources/images.py,sha256=PS7PIe1X8tccsqLtd-4kx1OTzCow0S-C-L29bmVyV4c,25634 +openai/resources/models.py,sha256=qJj0Cpy_Ok9ELag8VxqTefX8tw7RPgIZ8-a6qllxl8w,11240 +openai/resources/moderations.py,sha256=H9tygVKuT1c25LW_XyrhpK9nlT72SsEYDiPolQBP7hs,7805 +openai/resources/uploads/__init__.py,sha256=HmY3WQgvUI2bN3CjfWHWQOk7UUC6Ozna97_lHhrrRSA,810 +openai/resources/uploads/__pycache__/__init__.cpython-310.pyc,, +openai/resources/uploads/__pycache__/parts.cpython-310.pyc,, +openai/resources/uploads/__pycache__/uploads.cpython-310.pyc,, +openai/resources/uploads/parts.py,sha256=NEMRVCqOOYJV2zTmBau9UtY2qXuB_yDJzzXTJ1XubUY,8150 +openai/resources/uploads/uploads.py,sha256=ft7cVZuDxphjdCV6BcS6Zs2qE3zD1RB57udvaGUR9HY,24918 +openai/types/__init__.py,sha256=GxEEa9qy8CKZVCU1wY4PokDUCq-fD_GwZxRsBxzC_-s,3177 +openai/types/__pycache__/__init__.cpython-310.pyc,, +openai/types/__pycache__/audio_model.cpython-310.pyc,, +openai/types/__pycache__/audio_response_format.cpython-310.pyc,, +openai/types/__pycache__/batch.cpython-310.pyc,, +openai/types/__pycache__/batch_create_params.cpython-310.pyc,, +openai/types/__pycache__/batch_error.cpython-310.pyc,, +openai/types/__pycache__/batch_list_params.cpython-310.pyc,, +openai/types/__pycache__/batch_request_counts.cpython-310.pyc,, +openai/types/__pycache__/chat_model.cpython-310.pyc,, +openai/types/__pycache__/completion.cpython-310.pyc,, +openai/types/__pycache__/completion_choice.cpython-310.pyc,, +openai/types/__pycache__/completion_create_params.cpython-310.pyc,, +openai/types/__pycache__/completion_usage.cpython-310.pyc,, +openai/types/__pycache__/create_embedding_response.cpython-310.pyc,, +openai/types/__pycache__/embedding.cpython-310.pyc,, +openai/types/__pycache__/embedding_create_params.cpython-310.pyc,, +openai/types/__pycache__/embedding_model.cpython-310.pyc,, +openai/types/__pycache__/file_content.cpython-310.pyc,, +openai/types/__pycache__/file_create_params.cpython-310.pyc,, +openai/types/__pycache__/file_deleted.cpython-310.pyc,, +openai/types/__pycache__/file_list_params.cpython-310.pyc,, +openai/types/__pycache__/file_object.cpython-310.pyc,, +openai/types/__pycache__/file_purpose.cpython-310.pyc,, +openai/types/__pycache__/image.cpython-310.pyc,, +openai/types/__pycache__/image_create_variation_params.cpython-310.pyc,, +openai/types/__pycache__/image_edit_params.cpython-310.pyc,, +openai/types/__pycache__/image_generate_params.cpython-310.pyc,, +openai/types/__pycache__/image_model.cpython-310.pyc,, +openai/types/__pycache__/images_response.cpython-310.pyc,, +openai/types/__pycache__/model.cpython-310.pyc,, +openai/types/__pycache__/model_deleted.cpython-310.pyc,, +openai/types/__pycache__/moderation.cpython-310.pyc,, +openai/types/__pycache__/moderation_create_params.cpython-310.pyc,, +openai/types/__pycache__/moderation_create_response.cpython-310.pyc,, +openai/types/__pycache__/moderation_image_url_input_param.cpython-310.pyc,, +openai/types/__pycache__/moderation_model.cpython-310.pyc,, +openai/types/__pycache__/moderation_multi_modal_input_param.cpython-310.pyc,, +openai/types/__pycache__/moderation_text_input_param.cpython-310.pyc,, +openai/types/__pycache__/upload.cpython-310.pyc,, +openai/types/__pycache__/upload_complete_params.cpython-310.pyc,, +openai/types/__pycache__/upload_create_params.cpython-310.pyc,, +openai/types/__pycache__/websocket_connection_options.cpython-310.pyc,, +openai/types/audio/__init__.py,sha256=sR9_rMb-gO0stG4ozTq6XJs714C_BfjB3KCgFvyhXVA,1050 +openai/types/audio/__pycache__/__init__.cpython-310.pyc,, +openai/types/audio/__pycache__/speech_create_params.cpython-310.pyc,, +openai/types/audio/__pycache__/speech_model.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_create_params.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_create_response.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_segment.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_verbose.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_word.cpython-310.pyc,, +openai/types/audio/__pycache__/translation.cpython-310.pyc,, +openai/types/audio/__pycache__/translation_create_params.cpython-310.pyc,, +openai/types/audio/__pycache__/translation_create_response.cpython-310.pyc,, +openai/types/audio/__pycache__/translation_verbose.cpython-310.pyc,, +openai/types/audio/speech_create_params.py,sha256=-iUZ3a-BGlg46IFsP_vcJBTRuK_pXruF0KJsbNn0mgU,1300 +openai/types/audio/speech_model.py,sha256=RUimvc__LYAxwEEmfrf-lj18O3EWrU1OlWZXEXN2AKY,218 +openai/types/audio/transcription.py,sha256=FP9QMwwwdqgvP3xY9P-40gBiFmMwFKxXM5yv5x8xPVk,230 +openai/types/audio/transcription_create_params.py,sha256=OP8fXaYYsi5HWi0E7HR5HIRihglsuBqeJWglxkNxLts,2264 +openai/types/audio/transcription_create_response.py,sha256=-PLGH8he9EdJtvBXV-ZrE31CLVnk4bc0VQ1ixRoN8Ck,378 +openai/types/audio/transcription_segment.py,sha256=-pPAGolwIIXUBMic-H5U7aR0u_Aq-pipSA4xTtn_viA,1153 +openai/types/audio/transcription_verbose.py,sha256=tlVK8JzyvkslQOvpAb19PmsfiRBqmbne0l-GqFmVIMU,758 +openai/types/audio/transcription_word.py,sha256=sNDdtjoqIiba6qKsD_lI2Ffs1Lr7qP9HyS59AFh5cTc,368 +openai/types/audio/translation.py,sha256=5l-Zk9Cg7AZti-TTn2-4ydsoZj2zdvDwyzzVjVp9W0g,194 +openai/types/audio/translation_create_params.py,sha256=lFQEh5IRG5XT-Z3TV7FDSNbIRqAt6yA3EsSvSsb0wsU,1585 +openai/types/audio/translation_create_response.py,sha256=x6H0yjTbZR3vd3d7LdABcn9nrMDNdeMjepcjW1oUfVc,362 +openai/types/audio/translation_verbose.py,sha256=ic6h7_fAKlyrJuCgbd4Vtr0pk9OnynQK_uobD9lAGZo,613 +openai/types/audio_model.py,sha256=pxBVwf1HGd6mW-_jd-TDVMRZtTvvCUn_rL8Pt1BXzuo,208 +openai/types/audio_response_format.py,sha256=EEItnQdwXinG8bOe1We2039Z7lp2Z8wSXXvTlFlkXzM,259 +openai/types/batch.py,sha256=Dq7btfgIT4b2yfh0knZTzAL4yFx_l95H5KLfDPO8iig,2788 +openai/types/batch_create_params.py,sha256=VXpg3mK2xwsUAIbYcFHFgRgLMrN3iBgW8l5rslk0gvQ,1441 +openai/types/batch_error.py,sha256=Xxl-gYm0jerpYyI-mKSSVxRMQRubkoLUiOP9U3v72EM,622 +openai/types/batch_list_params.py,sha256=X1_sfRspuIMSDyXWVh0YnJ9vJLeOOH66TrvgEHueC84,705 +openai/types/batch_request_counts.py,sha256=GHHrJKdJwJ3foBa1j9v5Vece_zzkdXXXgOcne8W1E30,409 +openai/types/beta/__init__.py,sha256=CbOOxDPXvdK5RInCcEiBihJ2XgaUhdm3NMBBwx90OHc,3462 +openai/types/beta/__pycache__/__init__.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_create_params.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_deleted.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_list_params.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_response_format_option.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_response_format_option_param.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_stream_event.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_choice.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_choice_function.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_choice_function_param.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_choice_option.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_choice_option_param.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_choice_param.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_param.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_update_params.cpython-310.pyc,, +openai/types/beta/__pycache__/auto_file_chunking_strategy_param.cpython-310.pyc,, +openai/types/beta/__pycache__/code_interpreter_tool.cpython-310.pyc,, +openai/types/beta/__pycache__/code_interpreter_tool_param.cpython-310.pyc,, +openai/types/beta/__pycache__/file_chunking_strategy.cpython-310.pyc,, +openai/types/beta/__pycache__/file_chunking_strategy_param.cpython-310.pyc,, +openai/types/beta/__pycache__/file_search_tool.cpython-310.pyc,, +openai/types/beta/__pycache__/file_search_tool_param.cpython-310.pyc,, +openai/types/beta/__pycache__/function_tool.cpython-310.pyc,, +openai/types/beta/__pycache__/function_tool_param.cpython-310.pyc,, +openai/types/beta/__pycache__/other_file_chunking_strategy_object.cpython-310.pyc,, +openai/types/beta/__pycache__/static_file_chunking_strategy.cpython-310.pyc,, +openai/types/beta/__pycache__/static_file_chunking_strategy_object.cpython-310.pyc,, +openai/types/beta/__pycache__/static_file_chunking_strategy_param.cpython-310.pyc,, +openai/types/beta/__pycache__/thread.cpython-310.pyc,, +openai/types/beta/__pycache__/thread_create_and_run_params.cpython-310.pyc,, +openai/types/beta/__pycache__/thread_create_params.cpython-310.pyc,, +openai/types/beta/__pycache__/thread_deleted.cpython-310.pyc,, +openai/types/beta/__pycache__/thread_update_params.cpython-310.pyc,, +openai/types/beta/__pycache__/vector_store.cpython-310.pyc,, +openai/types/beta/__pycache__/vector_store_create_params.cpython-310.pyc,, +openai/types/beta/__pycache__/vector_store_deleted.cpython-310.pyc,, +openai/types/beta/__pycache__/vector_store_list_params.cpython-310.pyc,, +openai/types/beta/__pycache__/vector_store_update_params.cpython-310.pyc,, +openai/types/beta/assistant.py,sha256=3w8FpWceagZoKuEQrGeitoosTrz-Z24IPiL-viWC4I4,4936 +openai/types/beta/assistant_create_params.py,sha256=Y5LoiGU9ZTWQ87KaYyrqN1TsMFT4iYsBvMNeDgciRd4,5986 +openai/types/beta/assistant_deleted.py,sha256=bTTUl5FPHTBI5nRm7d0sGuR9VCSBDZ-IbOn9G_IpmJQ,301 +openai/types/beta/assistant_list_params.py,sha256=yW-lj6AUkG0IRZQKre0veEr9p4VMN-9YdELFMYs74Cw,1222 +openai/types/beta/assistant_response_format_option.py,sha256=yNeoAWxM-_8Sjmwqu8exqyKRFhVZIKeTypetPY55VFA,561 +openai/types/beta/assistant_response_format_option_param.py,sha256=dyPMhwRSLBZ0ltpxiD7KM-9X6BzWnbGeG-nT_3SenuQ,628 +openai/types/beta/assistant_stream_event.py,sha256=vP4LDqYWzSKGcZ1JAfyNw7YqC__XsVPe0nqZ2qdn93E,6930 +openai/types/beta/assistant_tool.py,sha256=_0FC7Db4Ctq_0yLaKJ93zNTB5HthuJWEAHx3fadDRlw,506 +openai/types/beta/assistant_tool_choice.py,sha256=Hy4HIfPQCkWD8VruHHicuTkomNwljGHviQHk36prKhg,544 +openai/types/beta/assistant_tool_choice_function.py,sha256=aYMlVrZdX2JxmehDlyGALRK2PIEkO7VFEfsvY3VH6T4,270 +openai/types/beta/assistant_tool_choice_function_param.py,sha256=-O38277LhSaqOVhTp0haHP0ZnVTLpEBvcLJa5MRo7wE,355 +openai/types/beta/assistant_tool_choice_option.py,sha256=jrXMd_IYIQ1pt8Lkc-KrPd4CR3lR8sFV4m7_lpG8A4Y,362 +openai/types/beta/assistant_tool_choice_option_param.py,sha256=VcatO5Nej9e5eqfrwetG4uM1vFoewnBEcFz47IxAK2E,424 +openai/types/beta/assistant_tool_choice_param.py,sha256=NOWx9SzZEwYaHeAyFZTQlG3pmogMNXzjPJDGQUlbv7Q,572 +openai/types/beta/assistant_tool_param.py,sha256=6DcaU3nMjurur2VkVIYcCaRAY1QLQscXXjCd0ZHHGho,501 +openai/types/beta/assistant_update_params.py,sha256=XsLdjYNx7dbPr1aqDu0_ZGuXjgU0JVuM0waJo1NskyI,4684 +openai/types/beta/auto_file_chunking_strategy_param.py,sha256=hbBtARkJXSJE7_4RqC-ZR3NiztUp9S4WuG3s3W0GpqY,351 +openai/types/beta/chat/__init__.py,sha256=OKfJYcKb4NObdiRObqJV_dOyDQ8feXekDUge2o_4pXQ,122 +openai/types/beta/chat/__pycache__/__init__.cpython-310.pyc,, +openai/types/beta/code_interpreter_tool.py,sha256=7mgQc9OtD_ZUnZeNhoobMFcmmvtZPFCNYGB-PEnNnfs,333 +openai/types/beta/code_interpreter_tool_param.py,sha256=X6mwzFyZx1RCKEYbBCPs4kh_tZkxFxydPMK4yFNJkLs,389 +openai/types/beta/file_chunking_strategy.py,sha256=6nRvYetBl_BHgN8biTyTut-tw8G13YttgxSKtJsJLeM,560 +openai/types/beta/file_chunking_strategy_param.py,sha256=P0x4I2hB_ylbSxFFEmRqgwto3HQQsHIokX3U0is_a9s,498 +openai/types/beta/file_search_tool.py,sha256=5aNU8RZj-UNdmuqqpjCXNaa1pI9GzSP5qCPtvVSJ1oQ,1769 +openai/types/beta/file_search_tool_param.py,sha256=o6sWPrzRYY8wtNaVuF8h3D1sAQV3N0L3dbdiiaMisW0,1765 +openai/types/beta/function_tool.py,sha256=oYGJfcfPpUohKw2ikgshDjOI1HXCK-5pAWyegYNezeU,397 +openai/types/beta/function_tool_param.py,sha256=hCclpGO4Re-TxiGy_QxX75g1kcN6_ElubicO6SdJ_YI,471 +openai/types/beta/other_file_chunking_strategy_object.py,sha256=hJz1OeSkvvcWJVftPfvz2pB5ujdawWEEa3v38E6tt7g,311 +openai/types/beta/realtime/__init__.py,sha256=OJOsvJMLlDqJEJClien1XwN8K6vhnyVtNgN1qolZeW0,6167 +openai/types/beta/realtime/__pycache__/__init__.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_created_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_content.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_content_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_create_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_create_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_created_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_delete_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_delete_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_deleted_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_input_audio_transcription_completed_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_input_audio_transcription_failed_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_truncate_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_truncate_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_truncated_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/error_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_append_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_append_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_clear_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_clear_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_cleared_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_commit_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_commit_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_committed_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_speech_started_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_speech_stopped_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/rate_limits_updated_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_client_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_client_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_connect_params.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_response.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_response_status.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_response_usage.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_server_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_audio_delta_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_audio_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_audio_transcript_delta_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_audio_transcript_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_cancel_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_cancel_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_content_part_added_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_content_part_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_create_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_create_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_created_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_function_call_arguments_delta_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_function_call_arguments_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_output_item_added_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_output_item_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_text_delta_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_text_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session_create_params.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session_create_response.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session_created_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session_update_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session_update_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session_updated_event.cpython-310.pyc,, +openai/types/beta/realtime/conversation_created_event.py,sha256=U4-nesN8rAep2_25E2DrkXUMafQejj3NE_0llXKj5Y8,752 +openai/types/beta/realtime/conversation_item.py,sha256=av6WCjWVuRxBjccmxv4j26cd3TCKURj2a7cf8uS3P3s,2297 +openai/types/beta/realtime/conversation_item_content.py,sha256=dj0XAEPqj4UPVb3E2nIgb8bZBA-PRNK-E7o3des6wmw,1005 +openai/types/beta/realtime/conversation_item_content_param.py,sha256=CKEwY9j6ApnvfsLKrdkEFfOW1CtxUWyY9OL-rIMUNaw,927 +openai/types/beta/realtime/conversation_item_create_event.py,sha256=PNdOLjWMB2uc0tCm7QdWANXt7FWqKpgocnej2OiEjxw,976 +openai/types/beta/realtime/conversation_item_create_event_param.py,sha256=L9e8U-3LITXlBuJ_FQfGhSDX3Jj7R3uWN1UiG7qDTec,996 +openai/types/beta/realtime/conversation_item_created_event.py,sha256=DIeG7YQ5HdKrnbnorklB1Zfsz42yRdPKDOx5TPzfvw0,722 +openai/types/beta/realtime/conversation_item_delete_event.py,sha256=p-O6R1Ku5pxZvaxhSi4YTPqLXS1SHhdLGgJuPQyPcHY,549 +openai/types/beta/realtime/conversation_item_delete_event_param.py,sha256=a17h8Hd8MxUbXT6NQg8YpTr1ICt1ztRecpfukHw4g34,569 +openai/types/beta/realtime/conversation_item_deleted_event.py,sha256=uWHSqX5ig550romSdhtROwrdQmdeN31Oz1Vpr9IuQFI,492 +openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py,sha256=7tX1hI3g0SbrXGHcaC_Y1xAzhsoziReYwlqyA8ycB3E,764 +openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py,sha256=xYNSBIyERQJ4P-5YoFF1VptfPa8JnJ0sWaH6LGsPow0,1077 +openai/types/beta/realtime/conversation_item_param.py,sha256=x12A5-yjNWodFNJEnbHKY1WJzSzX9s7EQr2c5FuYKBQ,2177 +openai/types/beta/realtime/conversation_item_truncate_event.py,sha256=1c2_BamaTkgD26eyGZJU5xwbz7lRHupqU2HqcK0VniI,943 +openai/types/beta/realtime/conversation_item_truncate_event_param.py,sha256=hSnVOSMMtLf16nn4ISHkevYCfEsiN9kNcgxXRtHa8Kc,983 +openai/types/beta/realtime/conversation_item_truncated_event.py,sha256=K4S35U85J-UNRba9nkm-7G1ReZu8gA8Sa1z0-Vlozc0,704 +openai/types/beta/realtime/error_event.py,sha256=goNkorKXUHKiYVsVunEsnaRa6_3dsDKVtrxXQtzZCmk,877 +openai/types/beta/realtime/input_audio_buffer_append_event.py,sha256=lTKWd_WFbtDAy6AdaCjeQYBV0dgHuVNNt_PbrtPB8tg,662 +openai/types/beta/realtime/input_audio_buffer_append_event_param.py,sha256=XmN2bE6jBRrkKGVPJdnPjJql5dqMPqwbmFnxo-z22JE,682 +openai/types/beta/realtime/input_audio_buffer_clear_event.py,sha256=7AfCQfMxZQ-UoQXF9edYKw5GcTELPcfvvJWWpuLS41c,489 +openai/types/beta/realtime/input_audio_buffer_clear_event_param.py,sha256=y-zfWqJsh1n6r2i0MgLDpnNC4g1dq3GCS66Twfkng38,499 +openai/types/beta/realtime/input_audio_buffer_cleared_event.py,sha256=j9gpm7aGVmrUt48wqtvBMN8NOgtvqHciegjXjOnWm7A,429 +openai/types/beta/realtime/input_audio_buffer_commit_event.py,sha256=SLZR2xxRd6uO3IQL6-LuozkjROXiGyblKoHYQjwXk4I,493 +openai/types/beta/realtime/input_audio_buffer_commit_event_param.py,sha256=B8agXC-rUl-D-RijJ5MeTLgw43qVYzmf2_2oAVokhLY,503 +openai/types/beta/realtime/input_audio_buffer_committed_event.py,sha256=wXMxuXLw1jmT4e-FmTp6rSxcSc_4l55zO3gT7jI1Mp4,628 +openai/types/beta/realtime/input_audio_buffer_speech_started_event.py,sha256=NVp60RUsLFtte9Ilknmu_5lRk2dZp_1fXCgGHd4EvSM,861 +openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py,sha256=gszRuYQtAW8upIhd7CJZ7pxboDk-K7sqidjqxgf47q4,779 +openai/types/beta/realtime/rate_limits_updated_event.py,sha256=kBnf_p-49Q_LNdJsj0R1Szi8R4TGYAAJ_KifLuuyFZw,949 +openai/types/beta/realtime/realtime_client_event.py,sha256=TD_qJi1hNgvurWTUzG-xb27thuvUT2-2AK_pouAY3vc,1249 +openai/types/beta/realtime/realtime_client_event_param.py,sha256=qNStVbW_imzF0F8qfEHHE07AZoPIQLvjcTw9mXu4mFY,1294 +openai/types/beta/realtime/realtime_connect_params.py,sha256=AvTypkFCYmDn9qMeektVqij6cqzgovr3PpgpMalJoJ4,290 +openai/types/beta/realtime/realtime_response.py,sha256=C-3ZTF_gy40eT1eaeWIfpBS3pQC5lv3XNM_mqiLtTWg,1505 +openai/types/beta/realtime/realtime_response_status.py,sha256=gU-59Pr_58TRfMZqFzdCloc53e1qOnU4aaHY3yURUK8,1326 +openai/types/beta/realtime/realtime_response_usage.py,sha256=6XOFjCjPWioHoICZ0Q8KXuUzktQugx6WuTz0O5UvzZg,1541 +openai/types/beta/realtime/realtime_server_event.py,sha256=j8s9jdl5cARv3fVM5jEjo04f83FmNELPRS_lq5Ao_Q0,3512 +openai/types/beta/realtime/response_audio_delta_event.py,sha256=UjbnK4u_WSNTOColZj8SmJgHnAc2H8iRXD76ZnPbz7E,742 +openai/types/beta/realtime/response_audio_done_event.py,sha256=1XEWBPh1JiOgyr6V03mRt_3sLm0YFUq5ft1AhfFlNEg,679 +openai/types/beta/realtime/response_audio_transcript_delta_event.py,sha256=HEVNQ_R2_Nyo6BvNvsliMnN__b17eVd2Jx5udRHg0Hg,773 +openai/types/beta/realtime/response_audio_transcript_done_event.py,sha256=Cn5l4mJnKK3LeSN9qFL4LLqs1WOWg4kt1SaYThB-5c0,787 +openai/types/beta/realtime/response_cancel_event.py,sha256=EKx8IZUISJHdl-_3tCdHtz2BINQ85Tq_ocadnsEGPSk,637 +openai/types/beta/realtime/response_cancel_event_param.py,sha256=nidzBL83liHwyImiNGiz9Ad0V34EtFAQDw1utqcF6ns,630 +openai/types/beta/realtime/response_content_part_added_event.py,sha256=a8-rm1NAwX685fk7GdT6Xi0Yr-JfeAkyUr94-RoFe34,1232 +openai/types/beta/realtime/response_content_part_done_event.py,sha256=jO2TZygxPabbnEG9E1AfNP-JYJv1QtCMnCzgcZ_3n18,1190 +openai/types/beta/realtime/response_create_event.py,sha256=rMqjpCY5C6-new7fmlnciUriLZv3GzgJdgRPWlaX58k,4493 +openai/types/beta/realtime/response_create_event_param.py,sha256=zgKTR7n_0nPkxWPG0Og5DzlUZegYbIne1SvIsdW9WMU,4338 +openai/types/beta/realtime/response_created_event.py,sha256=zZtHx-1YjehXxX6aNE88SFINDaKOBzpzejo6sTNjq9g,506 +openai/types/beta/realtime/response_done_event.py,sha256=_yUPoECCli89iHLtV3NQkXQOW6Lc1JlxVPFw04ziBGY,494 +openai/types/beta/realtime/response_function_call_arguments_delta_event.py,sha256=Yh2mQZDucfnTLiO8LRyG9r7zeS1sjwLcMF1JPMdTFJc,793 +openai/types/beta/realtime/response_function_call_arguments_done_event.py,sha256=kxSPK6nbNWL6pxveY7zaNGgCkCXqyBFJPVYJrw9cbOw,793 +openai/types/beta/realtime/response_output_item_added_event.py,sha256=-_BZjvAqcgv3NIz-EMhvYMxIwvcXTt68FVNp0pw09dI,713 +openai/types/beta/realtime/response_output_item_done_event.py,sha256=0ClNVMZmeIxKghlEid9VGoWiZ97wp00hIdNnev4qBD8,709 +openai/types/beta/realtime/response_text_delta_event.py,sha256=B1yyuc6iMOMoG5Wh6W5KoQNYtVD1vEm2cKqHnl2CuFQ,721 +openai/types/beta/realtime/response_text_done_event.py,sha256=mPgVG6nWxwkZ3aZOX-JkVF7CpaWP5-bvtbxFrr4fK7g,724 +openai/types/beta/realtime/session.py,sha256=OqdK0L7ugOYV0PT2XlixRERimneHIF7-oHUh1JWtK70,5388 +openai/types/beta/realtime/session_create_params.py,sha256=ALVf2hYtcdZjl2A5LJyjiyPfqSFEAeTIVkELbeSaH-g,5161 +openai/types/beta/realtime/session_create_response.py,sha256=iyovJfORab-aDJJKE8PN--VQeCBR3VlnyV1tf4qE-K0,5411 +openai/types/beta/realtime/session_created_event.py,sha256=rTElnBlE7z1htmkdmpdPN4q_dUYS6Su4BkmsqO65hUc,489 +openai/types/beta/realtime/session_update_event.py,sha256=VwRvNgu-otI5_0xnXso1gqlCEWFnqzrGq9-Kar_o71Q,5751 +openai/types/beta/realtime/session_update_event_param.py,sha256=NmDaFhVTohrHi-yRd1x883NUjGH3N6ZWyfpfJ0tEpTQ,5573 +openai/types/beta/realtime/session_updated_event.py,sha256=HyR-Pz3U9finVO-bUCvnmeqsANw-fceNvVqEIF6ey10,489 +openai/types/beta/static_file_chunking_strategy.py,sha256=nHaLv70q1rencY2u8mqS7mW7X7enzHrc-zM9mg22dHw,597 +openai/types/beta/static_file_chunking_strategy_object.py,sha256=aOPxudte299F0j3bzniXcKJ7j-w4ZfQpgFHTa3CFyZ8,425 +openai/types/beta/static_file_chunking_strategy_param.py,sha256=kCMmgyOxO0XIF2wjCWjUXtyn9S6q_7mNmyUCauqrjsg,692 +openai/types/beta/thread.py,sha256=9wxx6M26S7cilx5SKWjZnkHc7g222AIOhikd0WTJfwI,2014 +openai/types/beta/thread_create_and_run_params.py,sha256=NHkj-IMm2WEqH82i9zxqgJqYkOVCBVXSpZcpl-SVznY,13175 +openai/types/beta/thread_create_params.py,sha256=U0gNXfSltPqYF3GIGQ7dloolkz6nzuDimXF-V9wjzvo,4970 +openai/types/beta/thread_deleted.py,sha256=MaYG_jZIjSiB9h_ZBiTtpMsRSwFKkCY83ziM5GO_oUk,292 +openai/types/beta/thread_update_params.py,sha256=olIjwn1eD0H2AkjdDZC38lPdT5dp2ORSjavPA7pB_08,1751 +openai/types/beta/threads/__init__.py,sha256=0WsJo0tXp08CgayozR7Tqc3b8sqzotWzvBun19CEIWc,3066 +openai/types/beta/threads/__pycache__/__init__.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/annotation.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/annotation_delta.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/file_citation_annotation.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/file_citation_delta_annotation.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/file_path_annotation.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/file_path_delta_annotation.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_file.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_file_content_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_file_content_block_param.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_file_delta.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_file_delta_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_file_param.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_url.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_url_content_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_url_content_block_param.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_url_delta.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_url_delta_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_url_param.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_content.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_content_delta.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_content_part_param.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_create_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_deleted.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_delta.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_delta_event.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_list_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_update_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/refusal_content_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/refusal_delta_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/required_action_function_tool_call.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/run.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/run_create_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/run_list_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/run_status.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/run_submit_tool_outputs_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/run_update_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/text.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/text_content_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/text_content_block_param.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/text_delta.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/text_delta_block.cpython-310.pyc,, +openai/types/beta/threads/annotation.py,sha256=Ce3Y0mSodmYRkoqyhtyIdep6WfWew6KJJgtrENOnfek,462 +openai/types/beta/threads/annotation_delta.py,sha256=iNsE-1Gn1yU0TlTHoxqKbOvPRUxWuXsF72qY_mMnWGY,510 +openai/types/beta/threads/file_citation_annotation.py,sha256=0Rs1Sr-eCLQpLsu8-WwHG7kv5Ihud4kiHO1NL7xHO0s,595 +openai/types/beta/threads/file_citation_delta_annotation.py,sha256=R87tcXkJ0RiH5UJo0Qknwk7X_c4qF1qvGsu2spOPx-I,873 +openai/types/beta/threads/file_path_annotation.py,sha256=hNc4ebprJynqMG1yk0gLvgzTpjtVzgEbXriMZftkgew,552 +openai/types/beta/threads/file_path_delta_annotation.py,sha256=RW9dgDF9Ggf357fPZ-vUu2ge3U-Hf11DVTr-ecklsBY,755 +openai/types/beta/threads/image_file.py,sha256=QVXLiplb-CigZqdMZtXlmebXKt6tF74kI-3vHxe_qUE,707 +openai/types/beta/threads/image_file_content_block.py,sha256=31I5trSERP2qLZpJ4ugZtIyta4DDoBhBvxkM4LovL3w,363 +openai/types/beta/threads/image_file_content_block_param.py,sha256=3ryZ6AV-DLwWYVP2XSK11UHkvutTUollxn6z8BZ4rSA,445 +openai/types/beta/threads/image_file_delta.py,sha256=nUJoSuP-3YyqqwBsmPJ0AqiQydz2FymVDCXQVkNYwOk,734 +openai/types/beta/threads/image_file_delta_block.py,sha256=XJ2YVX_cq0OiNcGbNmXO0_dca1IvPockOvvoM7pDvbI,492 +openai/types/beta/threads/image_file_param.py,sha256=BaKD31JPxQ5CjRfZ_0RcOG3lDTZeW_k85XCvwyctD54,717 +openai/types/beta/threads/image_url.py,sha256=EzEK-CYoO0YyqFmejIPu7pMfTEgMmp5NFscsRd2pCos,592 +openai/types/beta/threads/image_url_content_block.py,sha256=_sg3BWrtVGw-8XtAh15Rs4co6NCBB9Y3zCp_XOAz4U8,365 +openai/types/beta/threads/image_url_content_block_param.py,sha256=RWzo5KkBiwvgJSviZl6JUlsfv3VQKIFr6cp9lhkLu8E,447 +openai/types/beta/threads/image_url_delta.py,sha256=MXCp-OmuNT4njbWA9DWAbocP7pD3VpdcUy2wgeOjwm4,582 +openai/types/beta/threads/image_url_delta_block.py,sha256=Jjdfub4g9ceNKF8GuuTIghOmYba2vEeX3320mg5PWIA,484 +openai/types/beta/threads/image_url_param.py,sha256=VRLaxZf-wxnvAOcKGwyF_o6KEvwktBfE3B6KmYE5LZo,602 +openai/types/beta/threads/message.py,sha256=aGWe0kiNv5sXUYheJ0o1KpTds4oTaeDmqot1PMStJCE,3295 +openai/types/beta/threads/message_content.py,sha256=b8IC_EG28hcXk28z09EABfJwPkYZ7U-lTp_9ykdoxvU,630 +openai/types/beta/threads/message_content_delta.py,sha256=o4Edlx9BtdH2Z4OMwGWWXex8wiijknNRihJ-wu8PDUQ,615 +openai/types/beta/threads/message_content_part_param.py,sha256=RXrnoDP2-UMQHoR2jJvaT3JHrCeffLi6WzXzH05cDGI,550 +openai/types/beta/threads/message_create_params.py,sha256=WYfc_-kc7lxcxdpwKCVT2Ei-5Jl_132uqOHMtXL92OE,1957 +openai/types/beta/threads/message_deleted.py,sha256=DNnrSfGZ3kWEazmo4mVTdLhiKlIHxs-D8Ef5sNdHY1o,303 +openai/types/beta/threads/message_delta.py,sha256=-kaRyvnIA8Yr2QV5jKRn15BU2Ni068a_WtWJ4PqlLfE,570 +openai/types/beta/threads/message_delta_event.py,sha256=7SpE4Dd3Lrc_cm97SzBwZzGGhfLqiFViDeTRQz-5YmQ,579 +openai/types/beta/threads/message_list_params.py,sha256=iuwzDccnViooUxHlq-WoE1FEJArNy5-zrYCoaNgVS8k,1296 +openai/types/beta/threads/message_update_params.py,sha256=jTM_WDKDuPVJKlNKlT6J_UqQjgM2vrrD03ZhvHI5bSY,630 +openai/types/beta/threads/refusal_content_block.py,sha256=qB9jrS2Wv9UQ7XXaIVKe62dTAU1WOnN3qenR_E43mhg,310 +openai/types/beta/threads/refusal_delta_block.py,sha256=ZhgFC8KqA9LIwo_CQIX-w3VVg3Vj0h71xC1Hh1bwmnU,423 +openai/types/beta/threads/required_action_function_tool_call.py,sha256=XsR4OBbxI-RWteLvhcLEDBan6eUUGvhLORFRKjPbsLg,888 +openai/types/beta/threads/run.py,sha256=GR469hvbAlWTHL17MieCYxQfASyxaY1ZOe6Qbf0ORMI,8218 +openai/types/beta/threads/run_create_params.py,sha256=KgltVibs_KnKsL3UaZyVJgb-6aUxct7CXUtqMdkTXTM,9670 +openai/types/beta/threads/run_list_params.py,sha256=TgepSLrupUUtuQV2kbVcoGH1YA0FVUX9ESkszKuwyHY,1210 +openai/types/beta/threads/run_status.py,sha256=OU1hzoyYXaRJ3lupX4YcZ-HZkTpctNE4tzAcp6X8Q9U,351 +openai/types/beta/threads/run_submit_tool_outputs_params.py,sha256=cKiyD374BsZN_Oih5o5n5gOf_DYsxErVrbgxveNhmPI,1643 +openai/types/beta/threads/run_update_params.py,sha256=EDYJO3YuH1IKjfR1xAaBtWFonNnyXJDYAnlaMnwyXo8,622 +openai/types/beta/threads/runs/__init__.py,sha256=mg_roY9yL1bClJ8isizkQgHOAkN17iSdVr2m65iyBrs,1653 +openai/types/beta/threads/runs/__pycache__/__init__.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/code_interpreter_logs.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/code_interpreter_output_image.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/code_interpreter_tool_call.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/code_interpreter_tool_call_delta.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/file_search_tool_call.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/file_search_tool_call_delta.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/function_tool_call.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/function_tool_call_delta.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/message_creation_step_details.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/run_step.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/run_step_delta.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/run_step_delta_event.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/run_step_delta_message_delta.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/run_step_include.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/step_list_params.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/step_retrieve_params.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/tool_call.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/tool_call_delta.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/tool_call_delta_object.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/tool_calls_step_details.cpython-310.pyc,, +openai/types/beta/threads/runs/code_interpreter_logs.py,sha256=7wXZpUE9I-oZJ0K3mFG0Nwmfm2bKGiSpWJyBeo7txwo,482 +openai/types/beta/threads/runs/code_interpreter_output_image.py,sha256=8o99k0ZHMHpqH0taXkOkYR9WaDUpCN-G0Ifd5XsJpb8,613 +openai/types/beta/threads/runs/code_interpreter_tool_call.py,sha256=ekiIuH1kVCN51hCzY3AYr5i3_a4vlgUiZHJ59pl17oY,1810 +openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py,sha256=Qr2cen-bKyXTW2NDEUHnmJRE0jY-nkLcnO4NzCbBPDo,1479 +openai/types/beta/threads/runs/file_search_tool_call.py,sha256=XBgsM_USVr3ZrwTZx4L1-YG94Qv8c8GXI19ZHtDrZq8,1897 +openai/types/beta/threads/runs/file_search_tool_call_delta.py,sha256=Gx8c7GSgGYuOvGadcAr3ZIspEFMZS3e2OY7vBo_MYnM,655 +openai/types/beta/threads/runs/function_tool_call.py,sha256=aOq5yOtKOi6C5Q1FIQRxqtJJR1AcSW_K5PvRiKISNCI,920 +openai/types/beta/threads/runs/function_tool_call_delta.py,sha256=VFRtCJkj4PHX97upM1cXpJAk9-JvJSgyngie06fBIjQ,1076 +openai/types/beta/threads/runs/message_creation_step_details.py,sha256=tRFMNF2Rf4DekVliUKkoujItiOjjAE9EG9bbxJvpVPA,506 +openai/types/beta/threads/runs/run_step.py,sha256=L_CiwlW9y7NEOTumv1RyoQrQ_oCaNowRmraUHiAgJEc,3469 +openai/types/beta/threads/runs/run_step_delta.py,sha256=FNYDTddRrTO3PT_fgi7AsJ1PeMtyWsVzcxoihjbBzAw,663 +openai/types/beta/threads/runs/run_step_delta_event.py,sha256=rkDyvHSXt-hc1LngB41f9vglkn6t03kS62bsn0iGaxU,585 +openai/types/beta/threads/runs/run_step_delta_message_delta.py,sha256=UIo6oPH8STLjPHiWL-A4CtKfYe49uptvIAHWNnZ3Ums,564 +openai/types/beta/threads/runs/run_step_include.py,sha256=u-9Cw1hruRiWr70f_hw4XG0w1cwOAYfRJYKva2dEacs,264 +openai/types/beta/threads/runs/step_list_params.py,sha256=zorF5juogCzLMsZLjzMZTs_iIBcPj9WUur5HcrXuH8M,1752 +openai/types/beta/threads/runs/step_retrieve_params.py,sha256=aJ7l8RDJLPyEmqjfO4XsTV54VZOOqyb_gKSUvqp33ZI,815 +openai/types/beta/threads/runs/tool_call.py,sha256=1rwq4IbLgjQAQ-ORXYkNpmJyi9SREDnqA57nJbj_NiU,537 +openai/types/beta/threads/runs/tool_call_delta.py,sha256=t5wF8ndW3z99lHF981FL-IN5xXBS9p7eonH9bxvKu_c,600 +openai/types/beta/threads/runs/tool_call_delta_object.py,sha256=eK20VsIswEyT48XbkGu60HUrE7OD3fhpn1fbXrVauM4,615 +openai/types/beta/threads/runs/tool_calls_step_details.py,sha256=bDa-yybVF3a8H6VqhDGmFZMkpn-0gtPQM2jWWsmUvYo,574 +openai/types/beta/threads/text.py,sha256=9gjmDCqoptnxQ8Jhym87pECyd6m1lB3daCxKNzSFp4Y,319 +openai/types/beta/threads/text_content_block.py,sha256=pdGlKYM1IF9PjTvxjxo1oDg1XeGCFdJdl0kJVpZ7jIs,319 +openai/types/beta/threads/text_content_block_param.py,sha256=feQr0muF845tc1q3FJrzgYOhXeuKLU3x1x5DGFTN2Q0,407 +openai/types/beta/threads/text_delta.py,sha256=2EFeQCkg_cc8nYEJ6BtYAA3_TqgMTbmEXoMvLjzaB34,389 +openai/types/beta/threads/text_delta_block.py,sha256=pkHkVBgNsmHi9JURzs5ayPqxQXSkex3F0jH0MqJXik0,448 +openai/types/beta/vector_store.py,sha256=R8M70uuGWVKt4t0ef__Py-MPw33Ljx4sh5ddihJMbIU,2354 +openai/types/beta/vector_store_create_params.py,sha256=rvvYUSDBbc5L6PAiMGSFQD85ugyR9mLdvZMxjap0fnk,1600 +openai/types/beta/vector_store_deleted.py,sha256=Yq0E1orRLShseLwZ1deiBdDEUgEw_tcYVxGYa5gbIrM,308 +openai/types/beta/vector_store_list_params.py,sha256=KeSeQaEdqO2EiPEVtq1Nun-uRRdkfwW0P8aHeCmL5zA,1226 +openai/types/beta/vector_store_update_params.py,sha256=6OEP1IvilrGoPhHQPXOMQA0TwmCubeo7rB_ik5GQSrY,1115 +openai/types/beta/vector_stores/__init__.py,sha256=gXfm8V5Ad0iueaC_VoHDUQvSdwSfBzk2cQNwZldvY0s,671 +openai/types/beta/vector_stores/__pycache__/__init__.cpython-310.pyc,, +openai/types/beta/vector_stores/__pycache__/file_batch_create_params.cpython-310.pyc,, +openai/types/beta/vector_stores/__pycache__/file_batch_list_files_params.cpython-310.pyc,, +openai/types/beta/vector_stores/__pycache__/file_create_params.cpython-310.pyc,, +openai/types/beta/vector_stores/__pycache__/file_list_params.cpython-310.pyc,, +openai/types/beta/vector_stores/__pycache__/vector_store_file.cpython-310.pyc,, +openai/types/beta/vector_stores/__pycache__/vector_store_file_batch.cpython-310.pyc,, +openai/types/beta/vector_stores/__pycache__/vector_store_file_deleted.cpython-310.pyc,, +openai/types/beta/vector_stores/file_batch_create_params.py,sha256=lV4t5kikvEhl431RZgGDyQdFKTl-zXI-Q7YnbM0Qmv8,798 +openai/types/beta/vector_stores/file_batch_list_files_params.py,sha256=FPpQvCQI2skyLB8YCuwdCj7RbO9ba1UjaHAtvrWxAbs,1451 +openai/types/beta/vector_stores/file_create_params.py,sha256=kwSqe-le2UaYrcXGPxlP41QhH2OGvLXBbntAGlmK288,748 +openai/types/beta/vector_stores/file_list_params.py,sha256=AIzmNH1oFuy-qlpRhj9eXu9yyTA-2z_IppLYFclMtZw,1385 +openai/types/beta/vector_stores/vector_store_file.py,sha256=X8aQg4jYlK7iQumxn7B-eammIKVjUbu4lapPeq9jDWo,1788 +openai/types/beta/vector_stores/vector_store_file_batch.py,sha256=ubvj8z95EOdRGAp0rgI94g5uFQx0ob8hLgwOWHKda4E,1457 +openai/types/beta/vector_stores/vector_store_file_deleted.py,sha256=37J7oL2WYCgOd7Rhg2jX6IavaZT63vgUf3u6LC6C3Hs,322 +openai/types/chat/__init__.py,sha256=coi_C98uX9XhThMVJ0GgjPVpzOYOMgj-ZmCWulEE3EA,3849 +openai/types/chat/__pycache__/__init__.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_assistant_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_audio.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_audio_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_chunk.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_image_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_input_audio_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_refusal_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_text_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_developer_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_function_call_option_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_function_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_message.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_message_tool_call.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_message_tool_call_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_modality.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_named_tool_choice_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_prediction_content_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_reasoning_effort.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_role.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_stream_options_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_system_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_token_logprob.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_tool_choice_option_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_tool_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_tool_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_user_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/completion_create_params.cpython-310.pyc,, +openai/types/chat/__pycache__/parsed_chat_completion.cpython-310.pyc,, +openai/types/chat/__pycache__/parsed_function_tool_call.cpython-310.pyc,, +openai/types/chat/chat_completion.py,sha256=MaTVOMwtbzqGyHgyP4DP41ESEDKhv_XOM8L_fx3uoQE,2689 +openai/types/chat/chat_completion_assistant_message_param.py,sha256=E6ZrsjEN_JHOHO-wC7Uk90Fa7Qz7bfgx8jea0z6g30s,2421 +openai/types/chat/chat_completion_audio.py,sha256=vzWeaAAAbomkvbFksXQu6qpw1RVJiuFytJZswO6h6vI,656 +openai/types/chat/chat_completion_audio_param.py,sha256=MnY4PNK8-OOaODkHNhBbSbzH4HmqykKvwftsOjVpOAE,801 +openai/types/chat/chat_completion_chunk.py,sha256=aQXFY4gq9YEIrr7YBM68D5XyWGT9kKo0JO8n-55IjEA,5032 +openai/types/chat/chat_completion_content_part_image_param.py,sha256=Gqv98qyD8jB81THZp49c8v2tHrId_iQp4NzciT9SKI0,797 +openai/types/chat/chat_completion_content_part_input_audio_param.py,sha256=r1EXNEtjJo5oJ9AnP3omaJzACE1gSfdmob5Q0HKsOm4,704 +openai/types/chat/chat_completion_content_part_param.py,sha256=7lCk-fZB5iT5keHLWw9eM-Hd5jsnPh2IIHICIUpoEXk,686 +openai/types/chat/chat_completion_content_part_refusal_param.py,sha256=TV1vu-IgrvKa5IBlPSIdBxUaW8g1zDhMOOBOEmhU2w0,467 +openai/types/chat/chat_completion_content_part_text_param.py,sha256=4IpiXMKM9AuTyop5PRptPBbBhh9s93xy2vjg4Yw6NIw,429 +openai/types/chat/chat_completion_developer_message_param.py,sha256=OCFKdTWkff94VtgY7AaDUUFiZLT8LBn7WWxjbcIq2OM,830 +openai/types/chat/chat_completion_function_call_option_param.py,sha256=M-IqWHyBLkvYBcwFxxp4ydCIxbPDaMlNl4bik9UoFd4,365 +openai/types/chat/chat_completion_function_message_param.py,sha256=jIaZbBHHbt4v4xHCIyvYtYLst_X4jOznRjYNcTf0MF0,591 +openai/types/chat/chat_completion_message.py,sha256=AH7JpjgKfphxBRJyI4PhwHCMREy_-D-a4_4u4NHjSfc,1674 +openai/types/chat/chat_completion_message_param.py,sha256=aLrz_cX_CYymFdW9cMIPZpv0Z4zM50RECV3SH6QNZsc,1019 +openai/types/chat/chat_completion_message_tool_call.py,sha256=XlIe2vhSYvrt8o8Yol5AQqnacI1xHqpEIV26G4oNrZY,900 +openai/types/chat/chat_completion_message_tool_call_param.py,sha256=XNhuUpGr5qwVTo0K8YavJwleHYSdwN_urK51eKlqC24,1009 +openai/types/chat/chat_completion_modality.py,sha256=8Ga0kruwJc43WD2OIqNudn7KrVRTPDQaalVkh_8bp9I,236 +openai/types/chat/chat_completion_named_tool_choice_param.py,sha256=JsxfSJYpOmF7zIreQ0JrXRSLp07OGCBSycRRcF6OZmg,569 +openai/types/chat/chat_completion_prediction_content_param.py,sha256=Xw4K_4F379LsXENOpZvREDn55cCnbmZ69xa4fw9w3bg,868 +openai/types/chat/chat_completion_reasoning_effort.py,sha256=Bs4xRaukXpM-_NW-QSKKnUyIPDw1ffSqnWaHU-rMdIE,258 +openai/types/chat/chat_completion_role.py,sha256=Rdzg4deI1uZmqgkwnMrLHvbV2fPRqKcHLQrVmKVk9Dw,262 +openai/types/chat/chat_completion_stream_options_param.py,sha256=7-R2mYh7dbtX9qDOL3UkeyVH6FNWC_4aTCLtHYObMbs,628 +openai/types/chat/chat_completion_system_message_param.py,sha256=WYtzmsNP8ZI3Ie8cd-oU7RuNoaBF6-bBR3mOzST9hMw,815 +openai/types/chat/chat_completion_token_logprob.py,sha256=6-ipUFfsXMf5L7FDFi127NaVkDtmEooVgGBF6Ts965A,1769 +openai/types/chat/chat_completion_tool_choice_option_param.py,sha256=ef71WSM9HMQhIQUocRgVJUVW-bSRwK2_1NjFSB5TPiI,472 +openai/types/chat/chat_completion_tool_message_param.py,sha256=5K7jfKpwTuKNi1PTFabq_LHH-7wun8CUsLDh90U8zQE,730 +openai/types/chat/chat_completion_tool_param.py,sha256=J9r2TAWygkIBDInWEKx29gBE0wiCgc7HpXFyQhxSkAU,503 +openai/types/chat/chat_completion_user_message_param.py,sha256=mik-MRkwb543C5FSJ52LtTkeA2E_HdLUgtoHEdO73XQ,792 +openai/types/chat/completion_create_params.py,sha256=CGwTjckVhpxaQfA9zRKmrMCHvnYk-eaPFVmSVoA5Nls,13926 +openai/types/chat/parsed_chat_completion.py,sha256=KwcwCtj0yexl6gB7yuOnyETRW-uUvNRYbVzPMkwCe5Q,1437 +openai/types/chat/parsed_function_tool_call.py,sha256=hJzcKOpzf1tnXC6RGbPhaeCawq8EFdnLK_MfRITkW1U,920 +openai/types/chat_model.py,sha256=k9Ic_l5usRyY6xSHnqe4dBMKM5R4klTGuANg6z88WFk,1107 +openai/types/completion.py,sha256=yuYVEVkJcMVUINNLglkxOJqCx097HKCYFeJun3Js73A,1172 +openai/types/completion_choice.py,sha256=PUk77T3Cp34UJSXoMfSzTKGWDK0rQQwq84X_PSlOUJo,965 +openai/types/completion_create_params.py,sha256=TWNRWlGAcvirzY3Piy6AeYKyNxG7ktmtwjS27Q4bTi8,7535 +openai/types/completion_usage.py,sha256=uf5n0vzlCkGAU67BBn_h7yhjd_G4OHpQbJnvzz0eO2A,1735 +openai/types/create_embedding_response.py,sha256=lTAu_Pym76kFljDnnDRoDB2GNQSzWmwwlqf5ff7FNPM,798 +openai/types/embedding.py,sha256=2pV6RTSf5UV6E86Xeud5ZwmjQjMS93m_4LrQ0GN3fho,637 +openai/types/embedding_create_params.py,sha256=C9Tm1C_m96QtjyNc8fiy6wzs9HkM2GUF8CSTSS6V7ks,1850 +openai/types/embedding_model.py,sha256=0dDL87len4vZ4DR6eCp7JZJCJpgwWphRmJhMK3Se8f4,281 +openai/types/file_content.py,sha256=qLlM4J8kgu1BfrtlmYftPsQVCJu4VqYeiS1T28u8EQ8,184 +openai/types/file_create_params.py,sha256=N1I3rER1se27usx46fhkvdtn-blJ6Y9ECT7Wwzve37Q,913 +openai/types/file_deleted.py,sha256=H_r9U7XthT5xHAo_4ay1EGGkc21eURt8MkkIBRYiQcw,277 +openai/types/file_list_params.py,sha256=TmmqvM7droAJ49YlgpeFzrhPv5uVkSZDxqlG6hhumPo,960 +openai/types/file_object.py,sha256=ESuRYCTLbDtHxyuhzybKTF_TztIcq_F7TzCTQ6JToE0,1309 +openai/types/file_purpose.py,sha256=o1TzR-41XsNsQ0791GTGPe3DLkU9FEODucKdP6Q6sPc,243 +openai/types/fine_tuning/__init__.py,sha256=SZvjq_22oY9E4zcnrvVd0ul9U4sk_IBeOd0MsNALu5s,806 +openai/types/fine_tuning/__pycache__/__init__.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/fine_tuning_job.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/fine_tuning_job_event.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/fine_tuning_job_integration.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/fine_tuning_job_wandb_integration.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/fine_tuning_job_wandb_integration_object.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/job_create_params.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/job_list_events_params.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/job_list_params.cpython-310.pyc,, +openai/types/fine_tuning/fine_tuning_job.py,sha256=bu-afb1RZqgNmpUQ7MoXymTjFs3i5JSsBLMV4TKHhi8,6473 +openai/types/fine_tuning/fine_tuning_job_event.py,sha256=POxSD7-WxAtJV2KuEpA9EmZi7W_u0PikOUtUzxIXii4,854 +openai/types/fine_tuning/fine_tuning_job_integration.py,sha256=c3Uy7RMVJ32Xlat-6s9eG-5vZLl4w66COXc0B3pWk4g,242 +openai/types/fine_tuning/fine_tuning_job_wandb_integration.py,sha256=YnBeiz14UuhUSpnD0KBj5V143qLvJbDIMcUVWOCBLXY,1026 +openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py,sha256=7vEc2uEV2c_DENBjhq0Qy5X8B-rzxsKvGECjnvF1Wdw,804 +openai/types/fine_tuning/job_create_params.py,sha256=TwQlyQrZfxrgqD7nmJDWE8pwklsdUUmkYaitvB7LY34,7222 +openai/types/fine_tuning/job_list_events_params.py,sha256=4xOED4H2ky2mI9sIDytjmfJz5bNAdNWb70WIb_0bBWs,400 +openai/types/fine_tuning/job_list_params.py,sha256=yjxaEnESVTRpJ9ItvjKq30KcD_xz_trqKMIxG2eAriE,396 +openai/types/fine_tuning/jobs/__init__.py,sha256=nuWhOUsmsoVKTKMU35kknmr8sfpTF-kkIzyuOlRbJj0,295 +openai/types/fine_tuning/jobs/__pycache__/__init__.cpython-310.pyc,, +openai/types/fine_tuning/jobs/__pycache__/checkpoint_list_params.cpython-310.pyc,, +openai/types/fine_tuning/jobs/__pycache__/fine_tuning_job_checkpoint.cpython-310.pyc,, +openai/types/fine_tuning/jobs/checkpoint_list_params.py,sha256=XoDLkkKCWmf5an5rnoVEpNK8mtQHq1fHw9EqmezfrXM,415 +openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py,sha256=Z_sUhebJY9nWSssZU7QoOJwe5sez76sCAuVeSO63XhY,1347 +openai/types/image.py,sha256=9No-8GHesOUbjchemY1jqtMwh_s22oBmLVFlLn2KoQo,607 +openai/types/image_create_variation_params.py,sha256=PvvPvHXvz0etrRrzVIyvRjvDvNbjGspPu85hOq2fLII,1477 +openai/types/image_edit_params.py,sha256=cxpBybs5peY0DJMTWHgoIx3dWIXj0Y0YmvgxrjGmWjo,1837 +openai/types/image_generate_params.py,sha256=bD2AEIetbt37YDp65vEFfGxkLndOFCwhzJol1I63wfA,2132 +openai/types/image_model.py,sha256=W4YchkhJT2wZdlNDUpVkEKg8zdDDfp9S3oTf4D8Wr8g,219 +openai/types/images_response.py,sha256=EJ4qxYZ8CPGh2SZdRsyw6I0FnUvlgwxwc4NgPovJrvk,274 +openai/types/model.py,sha256=DMw8KwQx8B6S6sAI038D0xdzkmYdY5-r0oMhCUG4l6w,532 +openai/types/model_deleted.py,sha256=tXZybg03DunoOSYvwhT7zKj7KTN42R0VEs_-3PRliMo,229 +openai/types/moderation.py,sha256=6CZmxhZiafnT50gKa7BeybrTSoYfCAk7wvD5CQHvBP0,6789 +openai/types/moderation_create_params.py,sha256=EaZ2cej25g5WbRB2kIY7JFCXQPKSQQ95iyoUAAelGr4,992 +openai/types/moderation_create_response.py,sha256=e6SVfWX2_JX25Za0C6KojcnbMTtDB2A7cjUm6cFMKcs,484 +openai/types/moderation_image_url_input_param.py,sha256=t1r9WD3c-CK2Al1lpB4-DjfzLFSwgETR0g8nsRdoL0Y,622 +openai/types/moderation_model.py,sha256=BFeqSyel2My2WKC6MCa_mAIHJx4uXU3-p8UNudJANeM,319 +openai/types/moderation_multi_modal_input_param.py,sha256=RFdiEPsakWIscutX896ir5_rnEA2TLX5xQkjO5QR2vs,483 +openai/types/moderation_text_input_param.py,sha256=ardCbBcdaULf8bkFuzkSKukV9enrINSjNWvb7m0LjZg,406 +openai/types/shared/__init__.py,sha256=34RJ2IUXj0f3B73a6rqeHILu8AH5-sC8npTbEx_bnk8,551 +openai/types/shared/__pycache__/__init__.cpython-310.pyc,, +openai/types/shared/__pycache__/error_object.cpython-310.pyc,, +openai/types/shared/__pycache__/function_definition.cpython-310.pyc,, +openai/types/shared/__pycache__/function_parameters.cpython-310.pyc,, +openai/types/shared/__pycache__/response_format_json_object.cpython-310.pyc,, +openai/types/shared/__pycache__/response_format_json_schema.cpython-310.pyc,, +openai/types/shared/__pycache__/response_format_text.cpython-310.pyc,, +openai/types/shared/error_object.py,sha256=G7SGPZ9Qw3gewTKbi3fK69eM6L2Ur0C2D57N8iEapJA,305 +openai/types/shared/function_definition.py,sha256=8a5uHoIKrkrwTgfwTyE9ly4PgsZ3iLA_yRUAjubTb7Y,1447 +openai/types/shared/function_parameters.py,sha256=Dkc_pm98zCKyouQmYrl934cK8ZWX7heY_IIyunW8x7c,236 +openai/types/shared/response_format_json_object.py,sha256=15KTCXJ0o1W4c5V1vAcOQAx-u0eoIfAjxrHLoN3NuE4,344 +openai/types/shared/response_format_json_schema.py,sha256=rZS7diOPeqK48O_R6OYMJ6AtSGy_88PKTxzha6_56Fo,1399 +openai/types/shared/response_format_text.py,sha256=GX0u_40OLmDdSyawDrUcUk4jcrz1qWsKmmAMP4AD7hc,318 +openai/types/shared_params/__init__.py,sha256=GcNBmK_EPlGE-xPFmSQjlOq7SuNYd2nwDswX4ExHwoU,498 +openai/types/shared_params/__pycache__/__init__.cpython-310.pyc,, +openai/types/shared_params/__pycache__/function_definition.cpython-310.pyc,, +openai/types/shared_params/__pycache__/function_parameters.cpython-310.pyc,, +openai/types/shared_params/__pycache__/response_format_json_object.cpython-310.pyc,, +openai/types/shared_params/__pycache__/response_format_json_schema.cpython-310.pyc,, +openai/types/shared_params/__pycache__/response_format_text.cpython-310.pyc,, +openai/types/shared_params/function_definition.py,sha256=ciMXqn1tFXnp1tg9weJW0uvtyvMLrnph3WXMg4IG1Vk,1482 +openai/types/shared_params/function_parameters.py,sha256=UvxKz_3b9b5ECwWr8RFrIH511htbU2JZsp9Z9BMkF-o,272 +openai/types/shared_params/response_format_json_object.py,sha256=QT4uJCK7RzN3HK17eGjEo36jLKOIBBNGjiX-zIa9iT4,390 +openai/types/shared_params/response_format_json_schema.py,sha256=Uu2ioeSbI64bm-jJ61OY8Lr3PpofTR4d2LNBcaYxlec,1360 +openai/types/shared_params/response_format_text.py,sha256=SjHeZAfgM1-HXAoKLrkiH-VZEnQ73XPTk_RgtJmEbU4,364 +openai/types/upload.py,sha256=mEeQTGS0uqFkxbDpJzgBUvuDhGVPw9cQxhRJjPBVeLo,1186 +openai/types/upload_complete_params.py,sha256=7On-iVAlA9p_nksLSFPBPR4QbB0xEtAW-skyh7S9gR0,504 +openai/types/upload_create_params.py,sha256=ZiZr1yC6g2VqL7KEnw7lhE4kZvU-F3DfTAc2TPk-XBo,889 +openai/types/uploads/__init__.py,sha256=fDsmd3L0nIWbFldbViOLvcQavsFA4SL3jsXDfAueAck,242 +openai/types/uploads/__pycache__/__init__.cpython-310.pyc,, +openai/types/uploads/__pycache__/part_create_params.cpython-310.pyc,, +openai/types/uploads/__pycache__/upload_part.cpython-310.pyc,, +openai/types/uploads/part_create_params.py,sha256=pBByUzngaj70ov1knoSo_gpeBjaWP9D5EdiHwiG4G7U,362 +openai/types/uploads/upload_part.py,sha256=U9953cr9lJJLWEfhTiwHphRzLKARq3gWAWqrjxbhTR4,590 +openai/types/websocket_connection_options.py,sha256=4cAWpv1KKp_9pvnez7pGYzO3s8zh1WvX2xpBhpe-96k,1840 +openai/version.py,sha256=cjbXKO8Ut3aiv4YlQnugff7AdC48MpSndcx96q88Yb8,62 diff --git a/evalkit_eagle/lib/python3.10/site-packages/openai-1.59.7.dist-info/REQUESTED b/evalkit_eagle/lib/python3.10/site-packages/openai-1.59.7.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_eagle/lib/python3.10/site-packages/openai-1.59.7.dist-info/entry_points.txt b/evalkit_eagle/lib/python3.10/site-packages/openai-1.59.7.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..989993968b142c5540dedfd048fef222a6c29b15 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/openai-1.59.7.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +openai = openai.cli:main diff --git a/evalkit_eagle/lib/python3.10/site-packages/openai-1.59.7.dist-info/licenses/LICENSE b/evalkit_eagle/lib/python3.10/site-packages/openai-1.59.7.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f011417af6f72ab5e00e7f48931028fa202ca7cb --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/openai-1.59.7.dist-info/licenses/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 OpenAI + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/evalkit_eagle/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so b/evalkit_eagle/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d34bf528238e8368238be924ee9c138ba717535f --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29dd755c12af2aebb0e796b4bdd3878ddecf511ecd096f2a0e534c6ad6860f2c +size 2549016 diff --git a/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..111f284d188809c06a339ecddfa3fba2b853d25b --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/__init__.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not +# use this file except in compliance with the License. A copy of the License +# is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed on +# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +__description__ = "Hassle-free computation of shareable, comparable, and reproducible BLEU, chrF, and TER scores" + + +# Backward compatibility functions for old style API access (<= 1.4.10) +from .compat import ( + corpus_bleu, + corpus_chrf, + corpus_ter, + raw_corpus_bleu, + sentence_bleu, + sentence_chrf, + sentence_ter, +) +from .dataset import DATASETS +from .metrics import BLEU, CHRF, TER +from .metrics.helpers import extract_char_ngrams, extract_word_ngrams +from .utils import ( + SACREBLEU_DIR, + download_test_set, + get_available_testsets, + get_langpairs_for_testset, + get_reference_files, + get_source_file, + smart_open, +) +from .version import __version__ + +__all__ = [ + "smart_open", + "SACREBLEU_DIR", + "download_test_set", + "get_source_file", + "get_reference_files", + "get_available_testsets", + "get_langpairs_for_testset", + "extract_word_ngrams", + "extract_char_ngrams", + "DATASETS", + "BLEU", + "CHRF", + "TER", + "corpus_bleu", + "raw_corpus_bleu", + "sentence_bleu", + "corpus_chrf", + "sentence_chrf", + "corpus_ter", + "sentence_ter", + "__version__", +] diff --git a/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/__main__.py b/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..3833741e8076dbac776a383f8e05ce8cafaac92e --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/__main__.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not +# use this file except in compliance with the License. A copy of the License +# is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed on +# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +""" +SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores. +Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text. +It also knows all the standard test sets and handles downloading, processing, and tokenization for you. + +See the [README.md] file for more information. +""" +from .sacrebleu import main + +if __name__ == '__main__': + main() diff --git a/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/compat.py b/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..573596037928ddc5b8c5b8df99202c13f0681943 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/compat.py @@ -0,0 +1,205 @@ +from typing import Sequence, Optional + +from .metrics import BLEU, CHRF, TER, BLEUScore, CHRFScore, TERScore + + +###################################################################### +# Backward compatibility functions for old style API access (< 1.4.11) +###################################################################### +def corpus_bleu(hypotheses: Sequence[str], + references: Sequence[Sequence[str]], + smooth_method='exp', + smooth_value=None, + force=False, + lowercase=False, + tokenize=BLEU.TOKENIZER_DEFAULT, + use_effective_order=False) -> BLEUScore: + """Computes BLEU for a corpus against a single (or multiple) reference(s). + This is the main CLI entry point for computing BLEU between a system output + and a reference sentence. + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. + :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none') + :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value. + :param force: Ignore data that looks already tokenized + :param lowercase: Lowercase the data + :param tokenize: The tokenizer to use + :param use_effective_order: Don't take into account n-gram orders without any match. + :return: a `BLEUScore` object + """ + metric = BLEU( + lowercase=lowercase, force=force, tokenize=tokenize, + smooth_method=smooth_method, smooth_value=smooth_value, + effective_order=use_effective_order) + + return metric.corpus_score(hypotheses, references) + + +def raw_corpus_bleu(hypotheses: Sequence[str], + references: Sequence[Sequence[str]], + smooth_value: Optional[float] = BLEU.SMOOTH_DEFAULTS['floor']) -> BLEUScore: + """Computes BLEU for a corpus against a single (or multiple) reference(s). + This convenience function assumes a particular set of arguments i.e. + it disables tokenization and applies a `floor` smoothing with value `0.1`. + + This convenience call does not apply any tokenization at all, + neither to the system output nor the reference. It just computes + BLEU on the "raw corpus" (hence the name). + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. + :param smooth_value: The smoothing value for `floor`. If not given, the default of 0.1 is used. + :return: Returns a `BLEUScore` object. + + """ + return corpus_bleu( + hypotheses, references, smooth_method='floor', + smooth_value=smooth_value, force=True, tokenize='none', + use_effective_order=True) + + +def sentence_bleu(hypothesis: str, + references: Sequence[str], + smooth_method: str = 'exp', + smooth_value: Optional[float] = None, + lowercase: bool = False, + tokenize=BLEU.TOKENIZER_DEFAULT, + use_effective_order: bool = True) -> BLEUScore: + """ + Computes BLEU for a single sentence against a single (or multiple) reference(s). + + Disclaimer: Computing BLEU at the sentence level is not its intended use as + BLEU is a corpus-level metric. + + :param hypothesis: A single hypothesis string. + :param references: A sequence of reference strings. + :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none') + :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value. + :param lowercase: Lowercase the data + :param tokenize: The tokenizer to use + :param use_effective_order: Don't take into account n-gram orders without any match. + :return: Returns a `BLEUScore` object. + """ + metric = BLEU( + lowercase=lowercase, tokenize=tokenize, force=False, + smooth_method=smooth_method, smooth_value=smooth_value, + effective_order=use_effective_order) + + return metric.sentence_score(hypothesis, references) + + +def corpus_chrf(hypotheses: Sequence[str], + references: Sequence[Sequence[str]], + char_order: int = CHRF.CHAR_ORDER, + word_order: int = CHRF.WORD_ORDER, + beta: int = CHRF.BETA, + remove_whitespace: bool = True, + eps_smoothing: bool = False) -> CHRFScore: + """ + Computes chrF for a corpus against a single (or multiple) reference(s). + If `word_order` equals to 2, the metric is referred to as chrF++. + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. + :param char_order: Character n-gram order. + :param word_order: Word n-gram order. If equals to 2, the metric is referred to as chrF++. + :param beta: Determine the importance of recall w.r.t precision. + :param eps_smoothing: If `True`, applies epsilon smoothing similar + to reference chrF++.py, NLTK and Moses implementations. Otherwise, + it takes into account effective match order similar to sacreBLEU < 2.0.0. + :param remove_whitespace: If `True`, removes whitespaces prior to character n-gram extraction. + :return: A `CHRFScore` object. + """ + metric = CHRF( + char_order=char_order, + word_order=word_order, + beta=beta, + whitespace=not remove_whitespace, + eps_smoothing=eps_smoothing) + return metric.corpus_score(hypotheses, references) + + +def sentence_chrf(hypothesis: str, + references: Sequence[str], + char_order: int = CHRF.CHAR_ORDER, + word_order: int = CHRF.WORD_ORDER, + beta: int = CHRF.BETA, + remove_whitespace: bool = True, + eps_smoothing: bool = False) -> CHRFScore: + """ + Computes chrF for a single sentence against a single (or multiple) reference(s). + If `word_order` equals to 2, the metric is referred to as chrF++. + + :param hypothesis: A single hypothesis string. + :param references: A sequence of reference strings. + :param char_order: Character n-gram order. + :param word_order: Word n-gram order. If equals to 2, the metric is referred to as chrF++. + :param beta: Determine the importance of recall w.r.t precision. + :param eps_smoothing: If `True`, applies epsilon smoothing similar + to reference chrF++.py, NLTK and Moses implementations. Otherwise, + it takes into account effective match order similar to sacreBLEU < 2.0.0. + :param remove_whitespace: If `True`, removes whitespaces prior to character n-gram extraction. + :return: A `CHRFScore` object. + """ + metric = CHRF( + char_order=char_order, + word_order=word_order, + beta=beta, + whitespace=not remove_whitespace, + eps_smoothing=eps_smoothing) + return metric.sentence_score(hypothesis, references) + + +def corpus_ter(hypotheses: Sequence[str], + references: Sequence[Sequence[str]], + normalized: bool = False, + no_punct: bool = False, + asian_support: bool = False, + case_sensitive: bool = False) -> TERScore: + """ + Computes TER for a corpus against a single (or multiple) reference(s). + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. + :param normalized: Enable character normalization. + :param no_punct: Remove punctuation. + :param asian_support: Enable special treatment of Asian characters. + :param case_sensitive: Enables case-sensitivity. + :return: A `TERScore` object. + """ + metric = TER( + normalized=normalized, + no_punct=no_punct, + asian_support=asian_support, + case_sensitive=case_sensitive) + return metric.corpus_score(hypotheses, references) + + +def sentence_ter(hypothesis: str, + references: Sequence[str], + normalized: bool = False, + no_punct: bool = False, + asian_support: bool = False, + case_sensitive: bool = False) -> TERScore: + """ + Computes TER for a single hypothesis against a single (or multiple) reference(s). + + :param hypothesis: A single hypothesis string. + :param references: A sequence of reference strings. + :param normalized: Enable character normalization. + :param no_punct: Remove punctuation. + :param asian_support: Enable special treatment of Asian characters. + :param case_sensitive: Enable case-sensitivity. + :return: A `TERScore` object. + """ + metric = TER( + normalized=normalized, + no_punct=no_punct, + asian_support=asian_support, + case_sensitive=case_sensitive) + return metric.sentence_score(hypothesis, references) diff --git a/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/sacrebleu.py b/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/sacrebleu.py new file mode 100644 index 0000000000000000000000000000000000000000..6b7cd9e798e5a8baf3eb9d7e50b056ede59b1e0b --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/sacrebleu.py @@ -0,0 +1,576 @@ +#!/usr/bin/env python3 + +# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not +# use this file except in compliance with the License. A copy of the License +# is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed on +# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +""" +SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores. +Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text. +It also knows all the standard test sets and handles downloading, processing, and tokenization for you. + +See the [README.md] file for more information. +""" + +import io +import os +import sys +import logging +import pathlib +import argparse +from collections import defaultdict + + +# Allows calling the script as a standalone utility +# See: https://github.com/mjpost/sacrebleu/issues/86 +if __package__ is None and __name__ == '__main__': + parent = pathlib.Path(__file__).absolute().parents[1] + sys.path.insert(0, str(parent)) + __package__ = 'sacrebleu' + +from .dataset import DATASETS +from .metrics import METRICS +from .utils import smart_open, filter_subset, get_langpairs_for_testset, get_available_testsets +from .utils import print_test_set, print_subset_results, get_reference_files, download_test_set +from .utils import args_to_dict, sanity_check_lengths, print_results_table, print_single_results +from .utils import get_available_testsets_for_langpair, Color + +from . import __version__ as VERSION + +sacrelogger = logging.getLogger('sacrebleu') + +try: + # SIGPIPE is not available on Windows machines, throwing an exception. + from signal import SIGPIPE # type: ignore + + # If SIGPIPE is available, change behaviour to default instead of ignore. + from signal import signal, SIG_DFL + signal(SIGPIPE, SIG_DFL) +except ImportError: + pass + + +def parse_args(): + arg_parser = argparse.ArgumentParser( + description='sacreBLEU: Hassle-free computation of shareable BLEU scores.\n' + 'Quick usage: score your detokenized output against WMT\'14 EN-DE:\n' + ' cat output.detok.de | sacrebleu -t wmt14 -l en-de', + formatter_class=argparse.RawDescriptionHelpFormatter) + + arg_parser.add_argument('--citation', '--cite', default=False, action='store_true', + help='Dump the bibtex citation and quit.') + arg_parser.add_argument('--list', default=False, action='store_true', + help='Print a list of all available test sets.') + arg_parser.add_argument('--test-set', '-t', type=str, default=None, + help='The test set to use (see also --list) or a comma-separated list of test sets to be concatenated.') + arg_parser.add_argument('--language-pair', '-l', dest='langpair', default=None, + help='Source-target language pair (2-char ISO639-1 codes).') + arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=None, + help='Use a subset of sentences with a given original language (2-char ISO639-1 codes), "non-" prefix means negation.') + arg_parser.add_argument('--subset', dest='subset', default=None, + help='Use a subset of sentences whose document annotation matches a given regex (see SUBSETS in the source code).') + arg_parser.add_argument('--download', type=str, default=None, + help='Download a test set and quit.') + arg_parser.add_argument('--echo', nargs="+", type=str, default=None, + help='Output the source (src), reference (ref), or other available field (docid, ref:A, ref:1 for example) to STDOUT and quit. ' + 'You can get available fields with options `--list` and `-t`' 'For example: `sacrebleu -t wmt21 --list`. ' + 'If multiple fields are given, they are outputted with tsv format in the order they are given.' + 'You can also use `--echo all` to output all available fields.') + + # I/O related arguments + # Multiple input files can be provided for significance testing for example + arg_parser.add_argument('--input', '-i', type=str, nargs='*', default=None, + help='Read input from file(s) instead of STDIN.') + arg_parser.add_argument('refs', nargs='*', default=[], + help='Optional list of references. If given, it should preceed the -i/--input argument.') + arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, + help='Split the reference stream on tabs, and expect this many references. (Default: %(default)s)') + arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8', + help='Open text files with specified encoding (Default: %(default)s)') + + # Metric selection + avail_metrics = [m.lower() for m in METRICS] + arg_parser.add_argument('--metrics', '-m', choices=avail_metrics, nargs='+', default=['bleu'], + help='Space-delimited list of metrics to compute (Default: bleu)') + arg_parser.add_argument('--sentence-level', '-sl', action='store_true', help='Compute metric for each sentence.') + + # BLEU-related arguments + # since sacreBLEU had only support for BLEU initially, the argument names + # are not prefixed with 'bleu' as in chrF arguments for example. + # Let's do that manually here through dest= options, as otherwise + # things will get quite hard to maintain when other metrics are added. + bleu_args = arg_parser.add_argument_group('BLEU related arguments') + + bleu_args.add_argument('--smooth-method', '-s', choices=METRICS['BLEU'].SMOOTH_DEFAULTS.keys(), default='exp', + dest='bleu_smooth_method', + help='Smoothing method: exponential decay, floor (increment zero counts), add-k (increment num/denom by k for n>1), or none. (Default: %(default)s)') + bleu_args.add_argument('--smooth-value', '-sv', type=float, default=None, + dest='bleu_smooth_value', + help='The smoothing value. Only valid for floor and add-k. ' + f"(Defaults: floor: {METRICS['BLEU'].SMOOTH_DEFAULTS['floor']}, " + f"add-k: {METRICS['BLEU'].SMOOTH_DEFAULTS['add-k']})") + bleu_args.add_argument('--tokenize', '-tok', choices=METRICS['BLEU'].TOKENIZERS, default=None, + dest='bleu_tokenize', + help='Tokenization method to use for BLEU. If not provided, defaults to `zh` for Chinese, ' + '`ja-mecab` for Japanese, `ko-mecab` for Korean and `13a` (mteval) otherwise.') + bleu_args.add_argument('--lowercase', '-lc', dest='bleu_lowercase', action='store_true', default=False, + help='If True, enables case-insensitivity. (Default: %(default)s)') + bleu_args.add_argument('--force', default=False, action='store_true', + dest='bleu_force', help='Insist that your tokenized input is actually detokenized.') + + # ChrF-related arguments + chrf_args = arg_parser.add_argument_group('chrF related arguments') + chrf_args.add_argument('--chrf-char-order', '-cc', type=int, default=METRICS['CHRF'].CHAR_ORDER, + help='Character n-gram order. (Default: %(default)s)') + chrf_args.add_argument('--chrf-word-order', '-cw', type=int, default=METRICS['CHRF'].WORD_ORDER, + help='Word n-gram order (Default: %(default)s). If equals to 2, the metric is referred to as chrF++.') + chrf_args.add_argument('--chrf-beta', type=int, default=METRICS['CHRF'].BETA, + help='Determine the importance of recall w.r.t precision. (Default: %(default)s)') + chrf_args.add_argument('--chrf-whitespace', action='store_true', default=False, + help='Include whitespaces when extracting character n-grams. (Default: %(default)s)') + chrf_args.add_argument('--chrf-lowercase', action='store_true', default=False, + help='Enable case-insensitivity. (Default: %(default)s)') + chrf_args.add_argument('--chrf-eps-smoothing', action='store_true', default=False, + help='Enables epsilon smoothing similar to chrF++.py, NLTK and Moses; instead of effective order smoothing. (Default: %(default)s)') + + # TER related arguments + ter_args = arg_parser.add_argument_group("TER related arguments (The defaults replicate TERCOM's behavior)") + ter_args.add_argument('--ter-case-sensitive', action='store_true', + help='Enables case sensitivity. (Default: %(default)s)') + ter_args.add_argument('--ter-asian-support', action='store_true', + help='Enables special treatment of Asian characters. (Default: %(default)s)') + ter_args.add_argument('--ter-no-punct', action='store_true', + help='Removes punctuation. (Default: %(default)s)') + ter_args.add_argument('--ter-normalized', action='store_true', + help='Applies basic normalization and tokenization. (Default: %(default)s)') + + # Bootstrap resampling for confidence intervals + sign_args = arg_parser.add_argument_group('Confidence interval (CI) estimation for single-system evaluation') + sign_args.add_argument('--confidence', '-ci', action='store_true', + help='Report confidence interval using bootstrap resampling.') + sign_args.add_argument('--confidence-n', '-cin', type=int, default=1000, + help='Set the number of bootstrap resamples for CI estimation (Default: %(default)s).') + + # Paired significance testing + pair_args = arg_parser.add_argument_group('Paired significance testing for multi-system evaluation') + pair_args_choice = pair_args.add_mutually_exclusive_group() + + pair_args_choice.add_argument('--paired-ar', '-par', action='store_true', + help='Perform paired test using approximate randomization (AR). This option is ' + 'mutually exclusive with --paired-bs (Default: %(default)s).') + pair_args_choice.add_argument('--paired-bs', '-pbs', action='store_true', + help='Perform paired test using bootstrap resampling. This option is ' + 'mutually exclusive with --paired-ar (Default: %(default)s).') + + pair_args.add_argument('--paired-ar-n', '-parn', type=int, default=10000, + help='Number of trials for approximate randomization test (Default: %(default)s).') + + pair_args.add_argument('--paired-bs-n', '-pbsn', type=int, default=1000, + help='Number of bootstrap resamples for paired bootstrap resampling test (Default: %(default)s).') + + pair_args.add_argument('--paired-jobs', '-j', type=int, default=1, + help='If 0, launches as many workers as the number of systems. If > 0, sets the number of workers manually. ' + 'This feature is currently not supported on Windows.') + + # Reporting related arguments + report_args = arg_parser.add_argument_group('Reporting related arguments') + report_args.add_argument('--quiet', '-q', default=False, action='store_true', + help='Suppress verbose messages.') + report_args.add_argument('--short', '-sh', default=False, action='store_true', + help='Produce a shorter (less human readable) signature.') + report_args.add_argument('--score-only', '-b', default=False, action='store_true', + help='Print only the computed score.') + report_args.add_argument('--width', '-w', type=int, default=1, + help='Floating point width (Default: %(default)s).') + report_args.add_argument('--detail', '-d', default=False, action='store_true', + help='Print detailed information (split test sets based on origlang).') + report_args.add_argument('--no-color', '-nc', action='store_true', + help='Disable the occasional use of terminal colors.') + + output_formats = ['json', 'text', 'latex'] + report_args.add_argument('--format', '-f', default='json', choices=output_formats, + help='Set the output format. `latex` is only valid for multi-system mode whereas ' + '`json` and `text` apply to single-system mode only. This flag is overridden if the ' + 'SACREBLEU_FORMAT environment variable is set to one of the valid choices (Default: %(default)s).') + + arg_parser.add_argument('--version', '-V', action='version', version='%(prog)s {}'.format(VERSION)) + + args = arg_parser.parse_args() + + # Override the format from the environment, if any + if 'SACREBLEU_FORMAT' in os.environ: + _new_value = os.environ['SACREBLEU_FORMAT'].lower() + if _new_value in output_formats: + args.format = _new_value + + return args + + +def main(): + args = parse_args() + + # Is paired test requested? + paired_test_mode = args.paired_bs or args.paired_ar + + # Explicitly set the encoding + sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8', buffering=True, newline="\n") + sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8', buffering=True) + + if os.environ.get('NO_COLOR', False) or args.no_color: + Color.ENABLE_COLORS = False + else: + # These should come after all stdout manipulations otherwise cause + # issues esp. on Windows + import colorama + colorama.init() + + if not args.quiet: + logging.basicConfig(level=logging.INFO, format='sacreBLEU: %(message)s') + + if args.download: + download_test_set(args.download, args.langpair) + sys.exit(0) + + if args.list: + if args.test_set: + for pair in [args.langpair] if args.langpair else get_langpairs_for_testset(args.test_set): + fields = DATASETS[args.test_set].fieldnames(pair) + print(f'{pair}: {", ".join(fields)}') + else: + if args.langpair: + print(f'The available test sets for {args.langpair} are:') + testsets = get_available_testsets_for_langpair(args.langpair) + else: + print('The available test sets are:') + testsets = get_available_testsets() + for testset in sorted(testsets): + desc = DATASETS[testset].description.strip() + print(f'{testset:<30}: {desc}') + sys.exit(0) + + if args.sentence_level and len(args.metrics) > 1: + sacrelogger.error('Only one metric can be used in sentence-level mode.') + sys.exit(1) + + if args.citation: + if not args.test_set: + sacrelogger.error('I need a test set (-t).') + sys.exit(1) + for test_set in args.test_set.split(','): + if 'citation' not in DATASETS[test_set]: + sacrelogger.error(f'No citation found for {test_set}') + else: + print(DATASETS[test_set].citation) + sys.exit(0) + + if args.num_refs != 1 and (args.test_set is not None or len(args.refs) > 1): + sacrelogger.error('The --num-refs argument allows you to provide any number of tab-delimited references in a single file.') + sacrelogger.error('You can only use it with externally provided references, however (i.e., not with `-t`),') + sacrelogger.error('and you cannot then provide multiple reference files.') + sys.exit(1) + + if args.test_set is not None: + for test_set in args.test_set.split(','): + if test_set not in DATASETS: + sacrelogger.error(f'Unknown test set {test_set!r}') + sacrelogger.error('Please run with --list to see the available test sets.') + sys.exit(1) + + if args.test_set is None: + if len(args.refs) == 0: + sacrelogger.error('If manual references given, make sure to provide them ' + 'before the -i/--input argument to avoid confusion.') + sacrelogger.error('Otherwise, I need a predefined test set (-t) from the following list:') + sacrelogger.error(get_available_testsets()) + sys.exit(1) + elif len(args.refs) > 0: + sacrelogger.error('I need exactly one of (a) a predefined test set (-t) or (b) a list of references') + sys.exit(1) + elif args.langpair is None: + sacrelogger.error('I need a language pair (-l). Use --list to see available language pairs for this test set.') + sys.exit(1) + else: + for test_set in args.test_set.split(','): + langpairs = get_langpairs_for_testset(test_set) + if args.langpair not in langpairs: + sacrelogger.error(f'No such language pair {args.langpair!r}') + sacrelogger.error(f'Available language pairs for {test_set!r} are:') + for lp in langpairs: + sacrelogger.error(f' > {lp}') + sys.exit(1) + + if args.echo: + if args.langpair is None or args.test_set is None: + sacrelogger.warning("--echo requires a test set (--t) and a language pair (-l)") + sys.exit(1) + for test_set in args.test_set.split(','): + print_test_set(test_set, args.langpair, args.echo, args.origlang, args.subset) + sys.exit(0) + + # Hack: inject target language info for BLEU, so that it can + # select the tokenizer based on it + if args.langpair: + args.bleu_trg_lang = args.langpair.split('-')[1] + + if args.test_set is not None and args.bleu_tokenize == 'none': + sacrelogger.warning( + "You are turning off BLEU's internal tokenizer " + "presumably to supply your own tokenized files.") + sacrelogger.warning( + "Published numbers will not be comparable to other papers.") + + # concat_ref_files is a list of list of reference filenames + # (concatenation happens if multiple test sets are given through -t) + # Example: [[testset1_refA, testset1_refB], [testset2_refA, testset2_refB]] + concat_ref_files = [] + if args.test_set is None: + concat_ref_files.append(args.refs) + else: + # Multiple test sets can be given + for test_set in args.test_set.split(','): + ref_files = get_reference_files(test_set, args.langpair) + if len(ref_files) == 0: + sacrelogger.warning( + f'No references found for test set {test_set}/{args.langpair}.') + concat_ref_files.append(ref_files) + + ################# + # Read references + ################# + full_refs = [[] for x in range(max(len(concat_ref_files[0]), args.num_refs))] + for ref_files in concat_ref_files: + for refno, ref_file in enumerate(ref_files): + for lineno, line in enumerate(smart_open(ref_file, encoding=args.encoding), 1): + line = line.rstrip() + if args.num_refs == 1: + full_refs[refno].append(line) + else: + refs = line.split(sep='\t', maxsplit=args.num_refs - 1) + # We are strict in fixed number of references through CLI + # But the API supports having variable refs per each segment + # by simply having '' or None's as dummy placeholders + if len(refs) != args.num_refs: + sacrelogger.error(f'FATAL: line {lineno}: expected {args.num_refs} fields, but found {len(refs)}.') + sys.exit(17) + for refno, ref in enumerate(refs): + full_refs[refno].append(ref) + + # Decide on the number of final references, override the argument + args.num_refs = len(full_refs) + + # Read hypotheses + # Can't tokenize yet as each metric has its own way of tokenizing things + full_systems, sys_names = [], [] + + if args.input is None: + # Read from STDIN + inputfh = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding) + + # guess the number of systems by looking at the first line + fields = inputfh.readline().rstrip().split('\t') + + # Set number of systems + num_sys = len(fields) + + # place the first lines already + full_systems = [[s] for s in fields] + + # Enumerate the systems + sys_names = [f'System {i + 1}' for i in range(num_sys)] + + # Read the rest + for line in inputfh: + fields = line.rstrip().split('\t') + if len(fields) != num_sys: + sacrelogger.error('FATAL: the number of tab-delimited fields in the input stream differ across lines.') + sys.exit(17) + # Place systems into the list + for sys_idx, sent in enumerate(fields): + full_systems[sys_idx].append(sent.rstrip()) + else: + # Separate files are given for each system output + # Ex: --input smt.txt nmt.txt + for fname in args.input: + sys_name = fname + + if sys_name in sys_names: + if paired_test_mode and sys_name == sys_names[0]: + # We skip loading a system, if it was already the baseline + sacrelogger.info(f'Ignoring {sys_name!r} as it was also given as the baseline.') + continue + else: + # To avoid ambiguities, we fail if two systems have same names + sacrelogger.error(f"{sys_name!r} already used to name a system.") + sacrelogger.error("Make sure to have a different basename for each system.") + sys.exit(1) + + # Read the system + lines = [] + for line in smart_open(fname, encoding=args.encoding): + lines.append(line.rstrip()) + full_systems.append(lines) + sys_names.append(sys_name) + + # Set final number of systems + num_sys = len(sys_names) + + # Add baseline prefix to the first system for clarity + if paired_test_mode: + if args.input is None: + # STDIN mode, no explicit system names + sys_names = ['Baseline'] + [f'System {i + 1}' for i in range(num_sys - 1)] + else: + # --input mode, we have names for the systems, just change the 1st one + sys_names[0] = f'Baseline: {sys_names[0]}' + + if args.sentence_level: + if num_sys > 1: + sacrelogger.error('Only one system can be evaluated in sentence-level mode.') + sys.exit(1) + if args.confidence or paired_test_mode: + sacrelogger.error('Statistical tests are unavailable in sentence-level mode.') + sys.exit(1) + + # >=2.0.0: effective_order is now part of BLEU class. For sentence-BLEU + # we now need to explicitly enable it without user's intervention + # for backward compatibility. + args.bleu_effective_order = True + + if paired_test_mode and num_sys == 1: + sacrelogger.error('Paired tests require multiple input systems given to --input (-i).') + sys.exit(1) + + if num_sys > 1 and args.confidence: + sacrelogger.error('Use paired tests (--paired) for multiple systems.') + sys.exit(1) + + # Filter subsets if requested + outputs = filter_subset( + [*full_systems, *full_refs], args.test_set, args.langpair, + args.origlang, args.subset) + + # Unpack systems & references back + systems, refs = outputs[:num_sys], outputs[num_sys:] + + # Perform some sanity checks + for system in systems: + if len(system) == 0: + message = f'Test set {args.test_set!r} contains no sentence' + if args.origlang is not None or args.subset is not None: + message += ' with' + if args.origlang: + message += f' origlang={args.origlang}' + if args.subset: + message += f' subset={args.subset}' + args.subset + sacrelogger.error(message) + sys.exit(1) + + # Check lengths + sanity_check_lengths(system, refs, test_set=args.test_set) + + # Create the metrics + metrics = {} + for name in args.metrics: + # Each metric's specific arguments are prefixed with `metricname_` + # for grouping. Filter accordingly and strip the prefixes prior to + # metric object construction. + metric_args = args_to_dict(args, name.lower(), strip_prefix=True) + + # This will cache reference stats for faster re-computation if required + metric_args['references'] = refs + + # Make it uppercase for the rest of the code + name = name.upper() + metrics[name] = METRICS[name](**metric_args) + + # Handle sentence level and quit + if args.sentence_level: + # one metric and one system in use for sentence-level + metric, system = list(metrics.values())[0], systems[0] + + for hypothesis, *references in zip(system, *refs): + score = metric.sentence_score(hypothesis, references) + sig = metric.get_signature().format(args.short) + print(score.format(args.width, args.score_only, sig)) + + sys.exit(0) + + if args.detail and args.format == 'json': + # The translationese info will interfere with JSON output, disable + args.format = 'text' + + ############################## + # Corpus level evaluation mode + ############################## + if num_sys == 1: + # Single system evaluation mode + results = [] + for name in sorted(metrics): + # compute the score + score = metrics[name].corpus_score( + system, references=None, + n_bootstrap=args.confidence_n if args.confidence else 1) + # get the signature + sig = metrics[name].get_signature().format( + args.short if args.format != 'json' else False) + results.append( + score.format(args.width, args.score_only, sig, args.format == 'json')) + + print_single_results(results, args) + + # Prints detailed information for translationese effect experiments + if args.detail: + print_subset_results(metrics, full_systems[0], full_refs, args) + else: + # Multi-system evaluation mode + named_systems = [(sys_names[i], systems[i]) for i in range(num_sys)] + sacrelogger.info(f'Found {num_sys} systems.') + + if not paired_test_mode: + # Bootstrap resampling or the usual single score computation mode + sigs = {} + scores = defaultdict(list) + scores['System'] = sys_names + + for sys_name, system in named_systems: + for name in sorted(metrics): + score = metrics[name].corpus_score(system, references=None) + sigs[score.name] = metrics[name].get_signature().format(args.short) + scores[score.name].append(score.format(args.width, True)) + + else: + # Paired significance testing mode + from .significance import PairedTest + + # Set params + test_type = 'bs' if args.paired_bs else 'ar' + n_samples = args.paired_bs_n if args.paired_bs else args.paired_ar_n + + ps = PairedTest(named_systems, metrics, references=None, + test_type=test_type, n_samples=n_samples, + n_jobs=args.paired_jobs) + + # Set back the number of trials + args.paired_n = ps.n_samples + + # Run the test + sigs, scores = ps() + + # Get signature strings + sigs = {k: v.format(args.short) for k, v in sigs.items()} + + # Dump the results + print_results_table(scores, sigs, args) + + +if __name__ == '__main__': + main() diff --git a/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/significance.py b/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/significance.py new file mode 100644 index 0000000000000000000000000000000000000000..a9c71d0ab935d0eb195b68126bcad4bdf0facd6e --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/significance.py @@ -0,0 +1,435 @@ +import os +import logging +import multiprocessing as mp +from typing import Sequence, Dict, Optional, Tuple, List, Union, Any, Mapping + +import numpy as np + +from .metrics.base import Metric, Score, Signature + +IS_WINDOWS = os.name == 'nt' + + +sacrelogger = logging.getLogger('sacrebleu') + + +class Result: + """A container to represent results from a particular statistical + significance test. + :param score: The floating point score for the system at hand. + :param p_value: If exists, represents the p-value when the system at + hand is compared to a baseline using a paired test. + :param mean: When paired bootstrap test is applied, this represents + the true mean score estimated from bootstrap resamples of the system. + :param ci: When paired bootstrap test is applied, this represents + the 95% confidence interval around the true mean score `sys_mean`. + """ + def __init__(self, score: float, p_value: Optional[float] = None, + mean: Optional[float] = None, ci: Optional[float] = None): + self.score = score + self.p_value = p_value + self.mean = mean + self.ci = ci + + def __repr__(self): + return ','.join([f'{k}={str(v)}' for k, v in self.__dict__.items()]) + + +def estimate_ci(scores: np.ndarray) -> Tuple[float, float]: + """Takes a list of scores and returns mean and 95% confidence + interval around the mean. + + :param scores: A list of floating point scores. + :return: A tuple of mean and the 95% CI. + """ + # Sort the scores + scores = np.sort(scores) + n = len(scores) + + # Get CI bounds (95%, i.e. 1/40 from left) + lower_idx = n // 40 + upper_idx = n - lower_idx - 1 + lower, upper = scores[lower_idx], scores[upper_idx] + ci = 0.5 * (upper - lower) + return (scores.mean(), ci) + + +def _bootstrap_resample(stats: List[List[Union[int, float]]], + metric: Metric, n_samples: int = 1000) -> Tuple[str, List[Score]]: + """Performs bootstrap resampling for a single system to estimate + a confidence interval around the true mean. + :param stats: A list of statistics extracted from the system's hypotheses. + :param metric: The `Metric` instance to be used for score computation. + :n_samples: Number of bootstrap resamples to use. + + :return: A tuple of the seed choice as string and the list of `Score` + instances for all bootstrap resamples. + """ + + # Set numpy RNG's seed + # If given -> Fix to the given value + # If given but =='[Nn]one', don't fix the seed i.e. pull entropy from OS + seed = os.environ.get('SACREBLEU_SEED', '12345') + _seed = None if seed.lower() == 'none' else int(seed) + rng = np.random.default_rng(_seed) + + # The indices that'll produce all bootstrap resamples at once + idxs = rng.choice(len(stats), size=(n_samples, len(stats)), replace=True) + + # convert to numpy array. float32 is more efficient + stats_np = np.array(stats, dtype='float32') + + # recompute scores for all resamples + scores = [ + metric._compute_score_from_stats(_s.sum(0)) for _s in stats_np[idxs]] + + return str(seed).lower(), scores + + +def _compute_p_value(stats: np.ndarray, real_difference: float) -> float: + """Computes the p-value given the sample statistics and the real statistic. + :param stats: A numpy array with the sample statistics. + :real_difference: The real statistic. + :return: The p-value. + """ + # Taken from: significance/StratifiedApproximateRandomizationTest.java + # https://github.com/jhclark/multeval.git + + # "the != is important. if we want to score the same system against itself + # having a zero difference should not be attributed to chance." + + c = np.sum(stats > real_difference).item() + + # "+1 applies here, though it only matters for small numbers of shufflings, + # which we typically never do. it's necessary to ensure the probability of + # falsely rejecting the null hypothesis is no greater than the rejection + # level of the test (see william and morgan on significance tests) + p = (c + 1) / (len(stats) + 1) + + return p + + +def _paired_ar_test(baseline_info: Dict[str, Tuple[np.ndarray, Result]], + sys_name: str, + hypotheses: Sequence[str], + references: Optional[Sequence[Sequence[str]]], + metrics: Dict[str, Metric], + n_samples: int = 10000, + n_ar_confidence: int = -1, + seed: Optional[int] = None) -> Tuple[str, Dict[str, Result]]: + """Paired two-sided approximate randomization (AR) test for MT evaluation. + + :param baseline_info: A dictionary with `Metric` instances as the keys, + that contains sufficient statistics and a `Result` instance for the baseline system. + :param sys_name: The name of the system to be evaluated. + :param hypotheses: A sequence of string hypotheses for the system. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, references + will be used through each metric's internal cache. + :param metrics: A dictionary of `Metric` instances that will be computed + for each system. + :param n_samples: The number of AR trials. + :param n_ar_confidence: The number of bootstrap resamples to use for + confidence estimation. A value of -1 disables confidence estimation. + :param seed: The seed value for the RNG. If `None`, the RNG will not be + fixed to a particular seed. + + :return: A tuple with first element being the system name and the second + being a `Result` namedtuple. + """ + # Seed the RNG + rng = np.random.default_rng(seed) + + # Generate indices that'll select stats + pos_sel = rng.integers(2, size=(n_samples, len(hypotheses)), dtype=bool) + + # Flip mask to obtain selectors for system hypotheses + neg_sel = ~pos_sel + + if n_ar_confidence > 0: + # Perform confidence estimation as well + bs_idxs = rng.choice( + len(hypotheses), size=(n_ar_confidence, len(hypotheses)), replace=True) + + results = {} + + for name, metric in metrics.items(): + # Use pre-computed match stats for the baseline + bl_stats, bl_result = baseline_info[name] + + # Compute system's stats and score + sacrelogger.info(f'Computing {name} for {sys_name!r} and extracting sufficient statistics') + sys_stats = metric._extract_corpus_statistics(hypotheses, references) + sys_score = metric._aggregate_and_compute(sys_stats) + + # original test statistic: absolute difference between baseline and the system + diff = abs(bl_result.score - sys_score.score) + + sacrelogger.info(f' > Performing approximate randomization test (# trials: {n_samples})') + # get shuffled pseudo systems + shuf_a = pos_sel @ bl_stats + neg_sel @ sys_stats + shuf_b = neg_sel @ bl_stats + pos_sel @ sys_stats + + # Aggregate trial stats and compute scores for each + scores_a = np.array( + [metric._aggregate_and_compute(x).score for x in shuf_a[:, None]]) + scores_b = np.array( + [metric._aggregate_and_compute(x).score for x in shuf_b[:, None]]) + + # Count the statistical difference and compute the p-value + p = _compute_p_value( + np.abs(np.array(scores_a) - np.array(scores_b)), diff) + + res = Result(sys_score.score, p) + + if n_ar_confidence > 0: + sacrelogger.info(f' > Performing bootstrap resampling for confidence interval (# resamples: {n_ar_confidence})') + sys_stats = np.array(sys_stats, dtype='float32') + # recompute scores for all resamples + sys_scores = np.array([ + metric._compute_score_from_stats(_s.sum(0)).score for _s in sys_stats[bs_idxs] + ]) + res.mean, res.ci = estimate_ci(sys_scores) + + # Store the result + results[name] = res + + return sys_name, results + + +def _paired_bs_test(baseline_info: Dict[str, Tuple[np.ndarray, Result]], + sys_name: str, + hypotheses: Sequence[str], + references: Optional[Sequence[Sequence[str]]], + metrics: Dict[str, Metric], + n_samples: int = 1000, + n_ar_confidence: int = -1, + seed: Optional[int] = None) -> Tuple[str, Dict[str, Result]]: + """Paired bootstrap resampling test for MT evaluation. This function + replicates the behavior of the Moses script called + `bootstrap-hypothesis-difference-significance.pl`. + + :param baseline_info: A dictionary with `Metric` instances as the keys, + that contains sufficient statistics and a `Result` instance for the baseline system. + :param sys_name: The name of the system to be evaluated. + :param hypotheses: A sequence of string hypotheses for the system. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, references + will be used through each metric's internal cache. + :param metrics: A dictionary of `Metric` instances that will be computed + for each system. + :param n_samples: The number of bootstrap resamples. + :param n_ar_confidence: This parameter is not used for this function but + is there for signature compatibility in the API. + :param seed: The seed value for the RNG. If `None`, the RNG will not be + fixed to a particular seed. + + :return: A tuple with first element being the system name and the second + being a `Result` namedtuple. + """ + # Seed the RNG + rng = np.random.default_rng(seed) + + results = {} + + # It takes ~10ms to generated the indices + idxs = rng.choice( + len(hypotheses), size=(n_samples, len(hypotheses)), replace=True) + + for name, metric in metrics.items(): + # Use pre-computed match stats for the baseline + bl_stats, bl_result = baseline_info[name] + + # Compute system's stats and score + sacrelogger.info(f'Computing {name} for {sys_name!r} and extracting sufficient statistics') + sys_stats = metric._extract_corpus_statistics(hypotheses, references) + sys_score = metric._aggregate_and_compute(sys_stats) + + # Convert to numpy arrays for efficient indexing + sys_stats = np.array(sys_stats, dtype='float32') + bl_stats = np.array(bl_stats, dtype='float32') + + # original test statistic: absolute difference between baseline and the system + diff = abs(bl_result.score - sys_score.score) + + sacrelogger.info(f' > Performing paired bootstrap resampling test (# resamples: {n_samples})') + scores_bl = np.array( + [metric._compute_score_from_stats(_s.sum(0)).score for _s in bl_stats[idxs]]) + scores_sys = np.array( + [metric._compute_score_from_stats(_s.sum(0)).score for _s in sys_stats[idxs]]) + + # Compute CI as well + sys_mean, sys_ci = estimate_ci(scores_sys) + + # Compute the statistics + sample_diffs = np.abs(scores_sys - scores_bl) + stats = sample_diffs - sample_diffs.mean() + + # Count the statistical difference and compute the p-value + p = _compute_p_value(stats, diff) + + results[name] = Result(sys_score.score, p, sys_mean, sys_ci) + + return sys_name, results + + +class PairedTest: + """This is the manager class that will call the actual standalone implementation + for approximate randomization or paired bootstrap resampling, based on the + `test_type` argument. + + :param named_systems: A lisf of (system_name, system_hypotheses) tuples on + which the test will be applied. + :param metrics: A dictionary of `Metric` instances that will be computed + for each system. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, already cached references + will be used through each metric's internal cache. + :param test_type: `ar` for approximate randomization, `bs` for paired bootstrap. + :param n_samples: The number of AR trials (for `ar`) or bootstrap resamples (for `bs`). + The defaults (10000 or 1000 respectively) will be used if 0 is passed. + :param n_ar_confidence: If `approximate randomization` is selected, the number + of bootstrap resamples to use for confidence estimation. A value of -1 disables + confidence estimation. 0 will use the default of 1000. + :param n_jobs: If 0, a worker process will be spawned for each system variant. + If > 0, the number of workers will be set accordingly. The default of 1 + does not use multi-processing. + """ + _DEFAULT_SAMPLES = { + 'ar': 10000, + 'bs': 1000, + } + + def __init__(self, named_systems: List[Tuple[str, Sequence[str]]], + metrics: Mapping[str, Metric], + references: Optional[Sequence[Sequence[str]]], + test_type: str = 'ar', + n_samples: int = 0, + n_ar_confidence: int = -1, + n_jobs: int = 1): + assert test_type in ('ar', 'bs'), f"Unknown test type {test_type!r}" + self.test_type = test_type + + # Set method + if self.test_type == 'ar': + self._fn = _paired_ar_test + elif self.test_type == 'bs': + self._fn = _paired_bs_test + + # Set numpy RNG's seed + # If given -> Fix to the given value + # If given but =='[Nn]one', don't fix the seed i.e. pull entropy from OS + seed = os.environ.get('SACREBLEU_SEED', '12345') + self._seed = None if seed.lower() == 'none' else int(seed) + self.n_jobs = n_jobs + self.references = references + self.named_systems = named_systems + + # Set the defaults if requested + self.n_ar_confidence = n_ar_confidence if n_ar_confidence != 0 else \ + self._DEFAULT_SAMPLES['bs'] + + self.n_samples = n_samples if n_samples > 0 else \ + self._DEFAULT_SAMPLES[self.test_type] + + # Number of systems (excluding the baseline) + self.n_systems = len(named_systems) - 1 + + # Decide on number of workers + if IS_WINDOWS: + sacrelogger.warning('Parallel tests are not supported on Windows.') + self.n_jobs = 1 + elif self.n_jobs == 0: + # Decide automatically + # Divide by two to ignore hyper-threading + n_max_jobs = mp.cpu_count() // 2 + if n_max_jobs == 0: + self.n_jobs = 1 + else: + # Don't use more workers than the number of CPUs + self.n_jobs = min(n_max_jobs, self.n_systems) + + self._signatures: Dict[str, Signature] = {} + self._baseline_info: Dict[str, Tuple[Any, Result]] = {} + + ################################################## + # Pre-compute and cache baseline system statistics + ################################################## + self.metrics = {} + + bl_name, bl_hyps = self.named_systems[0] + + for name, metric in metrics.items(): + sacrelogger.info(f'Pre-computing {name} statistics for {bl_name!r}') + bl_stats = metric._extract_corpus_statistics(bl_hyps, self.references) + bl_score = metric._aggregate_and_compute(bl_stats) + + # Compute CI for the baseline here once + confidence_n = self.n_samples if self.test_type == 'bs' \ + else self.n_ar_confidence + + bl_mean, bl_ci = None, None + if confidence_n > 0: + _, bl_scores = _bootstrap_resample(bl_stats, metric, confidence_n) + bl_mean, bl_ci = estimate_ci(np.array([x.score for x in bl_scores])) + + result = Result(bl_score.score, mean=bl_mean, ci=bl_ci) + # Use updated name for the metric + self._baseline_info[bl_score.name] = (bl_stats, result) + self.metrics[bl_score.name] = metric + + # Update metric signature as well + sig = metric.get_signature() + sig.update('seed', str(self._seed).lower()) + + # Num samples for bs, num trials for AR + sig.update(self.test_type, self.n_samples) + if self.n_ar_confidence > 0: + # Bootstrap is used for AR CI as well + sig.update('bs', self.n_ar_confidence) + self._signatures[bl_score.name] = sig + + def __call__(self) -> Tuple[Dict[str, Signature], Dict[str, List[Union[str, Result]]]]: + """Runs the paired test either on single or multiple worker processes.""" + tasks = [] + scores: Dict[str, List[Union[str, Result]]] = {} + + # Add the name column + scores['System'] = [ns[0] for ns in self.named_systems] + + # Store baseline results as the first position + for metric, (_, result) in self._baseline_info.items(): + scores[metric] = [result] + + # Prepare list of arguments for each comparison + # Skip the baseline (pos: 0) + for idx, (name, hyps) in enumerate(self.named_systems[1:]): + seed = self._seed if self._seed else None + + tasks.append( + (self._baseline_info, name, hyps, self.references, + self.metrics, self.n_samples, self.n_ar_confidence, seed)) + + # Run the test(s) + if self.n_jobs == 1: + results = [self._fn(*args) for args in tasks] + else: + # NOTE: The overhead of worker creation is not negligible + # but if you have many systems and TER enabled, this significantly + # speeds up the test. + # NOTE: This only works on Linux/Mac OS X but not Windows. Windows only + # supports `spawn` backend which requires things to be called + # from within __main__. + sacrelogger.info(f'Launching {self.n_jobs} parallel workers.') + with mp.get_context('fork').Pool(self.n_jobs) as pool: + jobs = [pool.apply_async(self._fn, args) for args in tasks] + + # wait for completion + results = [j.get() for j in jobs] + + # Keep the order deterministic + for sys_name, sys_results in results: + for metric, _result in sys_results.items(): + scores[metric].append(_result) + + return self._signatures, scores diff --git a/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/utils.py b/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..34d286c48c02f0ba8fcf5fcbba48a3e609582590 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/sacrebleu/utils.py @@ -0,0 +1,639 @@ +import itertools +import json +import os +import re +import sys +import gzip +import math +import hashlib +import logging +import portalocker +from collections import defaultdict +from typing import List, Optional, Sequence, Dict +from argparse import Namespace + +from tabulate import tabulate +import colorama + + +# Where to store downloaded test sets. +# Define the environment variable $SACREBLEU, or use the default of ~/.sacrebleu. +# +# Querying for a HOME environment variable can result in None (e.g., on Windows) +# in which case the os.path.join() throws a TypeError. Using expanduser() is +# a safe way to get the user's home folder. +USERHOME = os.path.expanduser("~") +SACREBLEU_DIR = os.environ.get('SACREBLEU', os.path.join(USERHOME, '.sacrebleu')) + +sacrelogger = logging.getLogger('sacrebleu') + + +class Color: + ENABLE_COLORS = True + + @staticmethod + def format(msg: str, color: str) -> str: + """Returns a colored version of the given message string. + + :param msg: The string to Color.format. + :param color: The color specifier i.e. 'red', 'blue', 'green', etc. + :return: A colored version of the string if the output is a terminal. + """ + if not Color.ENABLE_COLORS: + return msg + _ansi_str = getattr(colorama.Fore, color.upper(), None) + if _ansi_str: + return f'{_ansi_str}{msg}{colorama.Style.RESET_ALL}' + + return msg + + +def _format_score_lines(scores: dict, + width: int = 2, + multiline: bool = True) -> Dict[str, List[str]]: + """Formats the scores prior to tabulating them.""" + new_scores = {'System': scores.pop('System')} + p_val_break_char = '\n' if multiline else ' ' + is_bootstrap = False + + def _color_p_value(p: float): + msg = f'(p = {p:.4f})' + if p > 0.05: + return Color.format(msg, 'red') + return msg + '*' + + for metric, vals in scores.items(): + new_vals = [] + + for result in vals: + if not isinstance(result, str): + # Format result instances + _str = f'{result.score:.{width}f}' + if result.mean is not None: + is_bootstrap = True + _str += f' ({result.mean:.{width}f} ± {result.ci:.{width}f})' + if result.p_value is not None: + _str += p_val_break_char + _color_p_value(result.p_value) + else: + # Already formatted in non paired-test mode + _str = result + + new_vals.append(_str) + + if is_bootstrap: + # Change titles + metric += ' (μ ± 95% CI)' + + new_scores[metric] = new_vals + + return new_scores + + +def print_results_table(results: dict, signatures: dict, args: Namespace): + """Prints out a nicely formatted table for multi-system evaluation mode.""" + + if args.format == 'json': + proper_json = [] + dict_keys = list(results.keys()) + for i in range(len(results['System'])): + value = {} + value['system'] = results['System'][i] + # parse metrics + for j in range(1, len(dict_keys)): + if isinstance(results[dict_keys[j]][i], str): + value[dict_keys[j]] = results[dict_keys[j]][i] + else: + # Values inside object as dict + value[dict_keys[j]] = results[dict_keys[j]][i].__dict__ + proper_json.append(value) + + print(json.dumps(proper_json, indent=4)) + return + + tablefmt = args.format + if tablefmt in ('text'): + tablefmt = 'fancy_grid' + elif tablefmt == 'latex': + # Use booktabs + tablefmt = 'latex_booktabs' + + # If paired testing has been given, this'll format the score lines + results = _format_score_lines( + results, args.width, multiline=tablefmt == 'fancy_grid') + + new_dict = {} + + # Color the column names and the baseline system name and scores + has_baseline = False + baseline_name = '' + for name in results.keys(): + val = results[name] + if val[0].startswith('Baseline:') or has_baseline: + if val[0].startswith('Baseline:'): + baseline_name = val[0] + has_baseline = True + val[0] = Color.format(val[0], 'yellow') + new_dict[Color.format(name, 'cyan')] = results[name] + + # Finally tabulate + table = tabulate( + new_dict, headers='keys', tablefmt=tablefmt, + colalign=('right', ), + stralign='center', + numalign='center', + floatfmt=f'.{args.width}f') + + print(table) + print() + + is_paired = args.paired_bs or args.paired_ar + + if is_paired: + test_type = 'bootstrap resampling' if args.paired_bs else 'approximate randomization' + n_samples_or_trials = args.paired_bs_n if args.paired_bs else args.paired_ar_n + test_sample_type = 'resampling trials' if args.paired_bs else 'trials' + msg = f'Paired {test_type} test with {n_samples_or_trials} {test_sample_type}' + + bline = Color.format('baseline', 'yellow') + bline_name = Color.format(baseline_name, 'yellow') + null_hyp = Color.format('Null hypothesis', 'green') + pval_color = Color.format('highlighted in red', 'red') + + # Print fancy header + print('-' * len(msg) + '\n' + msg + '\n' + '-' * len(msg)) + print(f' - Each system is pairwise compared to {bline_name}.') + if args.paired_bs: + print(' Actual system score / bootstrap estimated true mean / 95% CI are provided for each metric.') + else: + print(' Actual system score is provided for each metric.') + print() + print(f' - {null_hyp}: the system and the {bline} translations are essentially') + print(f' generated by the same underlying process. For a given system and the {bline},') + print(' the p-value is roughly the probability of the absolute score difference (delta)') + print(f' or higher occurring due to chance, under the assumption that the {null_hyp.lower()} is correct.') + print() + print(f' - Assuming a significance threshold of 0.05, the {null_hyp.lower()} can be rejected') + print(' for p-values < 0.05 (marked with "*"). This means that the delta is unlikely to be attributed') + print(f' to chance, hence the system is significantly "different" than the {bline}.') + print(f' Otherwise, the p-values are {pval_color}.') + print() + print(f' - NOTE: Significance does not tell whether a system is "better" than the {bline} but rather') + print(' emphasizes the "difference" of the systems in terms of the replicability of the delta.') + print() + + print('-----------------') + print('Metric signatures') + print('-----------------') + for name, sig in signatures.items(): + print(f' - {name:<10} {sig}') + + +def print_single_results(results: List[str], args: Namespace): + """Re-process metric strings to align them nicely.""" + if args.format == 'json': + if len(results) > 1: + proper_json = '[\n' + ',\n'.join(results) + '\n]' + print(proper_json) + else: + print(results[0]) + return + + # Color confidence strings for emphasis + if 'μ' in results[0]: + color_re = re.compile(r'(\(μ = [0-9\.]+ ± [0-9\.]+\))') + for idx in range(len(results)): + results[idx] = color_re.sub( + lambda m: Color.format(m.group(), 'cyan'), results[idx]) + + if len(results) == 1: + # Just one system, nothing to align. + print(results[0]) + return + + # Align by '=' character + lens = [] + for line in results: + # If not score_only, split lines from '=' for re-alignment + try: + lens.append(line.index('=') - 1) + except ValueError: + print(line) + + if len(lens) > 0: + w = max(lens) + for (_len, line) in zip(lens, results): + left, right = line[:_len], line[_len:] + print(f'{left:>{w}}{right}') + + +def sanity_check_lengths(system: Sequence[str], + refs: Sequence[Sequence[str]], + test_set: Optional[str] = None): + n_hyps = len(system) + if any(len(ref_stream) != n_hyps for ref_stream in refs): + sacrelogger.error("System and reference streams have different lengths.") + if test_set: + sacrelogger.error("This could be an issue with your system output " + "or with sacreBLEU's reference database if -t is given.") + sacrelogger.error("For the latter, try cleaning out the cache by typing:\n") + sacrelogger.error(f" rm -r {SACREBLEU_DIR}/{test_set}\n") + sacrelogger.error("The test sets will be re-downloaded the next time you run sacreBLEU.") + sys.exit(1) + + +def smart_open(file, mode='rt', encoding='utf-8'): + """Convenience function for reading compressed or plain text files. + :param file: The file to read. + :param mode: The file mode (read, write). + :param encoding: The file encoding. + """ + if file.endswith('.gz'): + return gzip.open(file, mode=mode, encoding=encoding, newline="\n") + return open(file, mode=mode, encoding=encoding, newline="\n") + + +def my_log(num: float) -> float: + """ + Floors the log function + + :param num: the number + :return: log(num) floored to a very low number + """ + + if num == 0.0: + return -9999999999 + return math.log(num) + + +def sum_of_lists(lists): + """Aggregates list of numeric lists by summing.""" + if len(lists) == 1: + return lists[0] + + # Preserve datatype + size = len(lists[0]) + init_val = type(lists[0][0])(0.0) + total = [init_val] * size + for ll in lists: + for i in range(size): + total[i] += ll[i] + return total + + +def args_to_dict(args, prefix: str, strip_prefix: bool = False): + """Filters argparse's `Namespace` into dictionary with arguments + beginning with the given prefix.""" + prefix += '_' + d = {} + for k, v in args.__dict__.items(): + if k.startswith(prefix): + k = k.replace(prefix, '') if strip_prefix else k + d[k] = v + return d + + +def print_test_set(test_set, langpair, requested_fields, origlang=None, subset=None): + """Prints to STDOUT the specified side of the specified test set. + + :param test_set: the test set to print + :param langpair: the language pair + :param requested_fields: the fields to print + :param origlang: print only sentences with a given original language (2-char ISO639-1 code), "non-" prefix means negation + :param subset: print only sentences whose document annotation matches a given regex + """ + if test_set not in DATASETS: + raise Exception(f"No such test set {test_set}") + + fieldnames = DATASETS[test_set].fieldnames(langpair) + all_files = DATASETS[test_set].get_files(langpair) + + if "all" in requested_fields and len(requested_fields) != 1: + sacrelogger.error("Cannot use --echo all with other fields") + sys.exit(1) + elif "all" in requested_fields: + requested_fields = fieldnames + + # backwards compatibility: allow "ref" even if not present (choose first) + if "ref" in requested_fields and "ref" not in fieldnames: + replacement_ref = min([f for f in fieldnames if f.startswith("ref")]) + requested_fields = [f if f != "ref" else replacement_ref for f in requested_fields] + + files = [] + for field in requested_fields: + if field not in fieldnames: + sacrelogger.error(f"No such field {field} in test set {test_set} for language pair {langpair}.") + sacrelogger.error(f"available fields for {test_set}/{langpair}: {', '.join(fieldnames)}") + if "ref" not in fieldnames: + subref = min([f for f in fieldnames if f.startswith("ref")]) + sacrelogger.error(f"'ref' also allowed for backwards compatibility (will return {subref})") + sys.exit(1) + index = fieldnames.index(field) + files.append(all_files[index]) + + streams = [smart_open(file) for file in files] + streams = filter_subset(streams, test_set, langpair, origlang, subset) + for lines in zip(*streams): + print('\t'.join(map(lambda x: x.rstrip(), lines))) + + +def get_source_file(test_set: str, langpair: str) -> str: + """ + Returns the source file for a given testset/langpair. + Downloads it first if it is not already local. + + :param test_set: The test set (e.g., "wmt19") + :param langpair: The language pair (e.g., "de-en") + :return: the path to the requested source file + """ + if test_set not in DATASETS: + raise Exception(f"No such test set {test_set}") + + return DATASETS[test_set].get_source_file(langpair) + + +def get_reference_files(test_set: str, langpair: str) -> List[str]: + """ + Returns a list of one or more reference file paths for the given testset/langpair. + Downloads the references first if they are not already local. + + :param test_set: The test set (e.g., "wmt19") + :param langpair: The language pair (e.g., "de-en") + :return: a list of one or more reference file paths + """ + if test_set not in DATASETS: + raise Exception(f"No such test set {test_set}") + return DATASETS[test_set].get_reference_files(langpair) + + +def get_files(test_set, langpair) -> List[str]: + """ + Returns the path of the source file and all reference files for + the provided test set / language pair. + Downloads the references first if they are not already local. + + :param test_set: The test set (e.g., "wmt19") + :param langpair: The language pair (e.g., "de-en") + :return: a list of the source file and all reference files + """ + + if test_set not in DATASETS: + raise Exception(f"No such test set {test_set}") + return DATASETS[test_set].get_files(langpair) + + +def extract_tarball(filepath, destdir): + sacrelogger.info(f'Extracting {filepath} to {destdir}') + if filepath.endswith('.tar.gz') or filepath.endswith('.tgz'): + import tarfile + with tarfile.open(filepath) as tar: + tar.extractall(path=destdir) + elif filepath.endswith('.zip'): + import zipfile + with zipfile.ZipFile(filepath, 'r') as zipfile: + zipfile.extractall(path=destdir) + + +def get_md5sum(dest_path): + # Check md5sum + md5 = hashlib.md5() + with open(dest_path, 'rb') as infile: + for line in infile: + md5.update(line) + return md5.hexdigest() + + +def download_file(source_path, dest_path, extract_to=None, expected_md5=None): + """Downloading utility. + + Downloads the specified test to the system location specified by the SACREBLEU environment variable. + + :param source_path: the remote uri to download + :param dest_path: where to save the file + :param extract_to: for tarballs, where to extract to + :param expected_md5: the MD5 sum + :return: the set of processed file names + """ + import urllib.request + import ssl + + outdir = os.path.dirname(dest_path) + os.makedirs(outdir, exist_ok=True) + + # Make sure to open in mode "a" + lockfile = f"{dest_path}.lock" + with portalocker.Lock(lockfile, timeout=60): + + if not os.path.exists(dest_path) or os.path.getsize(dest_path) == 0: + sacrelogger.info(f"Downloading {source_path} to {dest_path}") + + try: + with urllib.request.urlopen(source_path) as f, open(dest_path, 'wb') as out: + out.write(f.read()) + except ssl.SSLError: + sacrelogger.error('An SSL error was encountered in downloading the files. If you\'re on a Mac, ' + 'you may need to run the "Install Certificates.command" file located in the ' + '"Python 3" folder, often found under /Applications') + sys.exit(1) + + if expected_md5 is not None: + cur_md5 = get_md5sum(dest_path) + if cur_md5 != expected_md5: + sacrelogger.error(f'Fatal: MD5 sum of downloaded file was incorrect (got {cur_md5}, expected {expected_md5}).') + sacrelogger.error(f'Please manually delete {dest_path!r} and rerun the command.') + sacrelogger.error('If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.') + sys.exit(1) + + # Extract the tarball + if extract_to is not None: + extract_tarball(dest_path, extract_to) + + +def download_test_set(test_set, langpair=None): + """Downloads the specified test to the system location specified by the SACREBLEU environment variable. + + :param test_set: the test set to download + :param langpair: the language pair (needed for some datasets) + :return: the set of processed file names + """ + if test_set not in DATASETS: + raise Exception(f"No such test set {test_set}") + dataset = DATASETS[test_set] + file_paths = dataset.get_files(langpair) + return file_paths + + +def get_langpairs_for_testset(testset: str) -> List[str]: + """Return a list of language pairs for a given test set.""" + if testset not in DATASETS: + return [] + return list(DATASETS[testset].langpairs.keys()) + + +def get_available_testsets() -> List[str]: + """Return a list of available test sets.""" + return sorted(DATASETS.keys(), reverse=True) + +def get_available_testsets_for_langpair(langpair: str) -> List[str]: + """Return a list of available test sets for a given language pair""" + parts = langpair.split('-') + srclang = parts[0] + trglang = parts[1] + + testsets = [] + for dataset in DATASETS.values(): + if f'{srclang}-{trglang}' in dataset.langpairs \ + or f'{trglang}-{srclang}' in dataset.langpairs: + testsets.append(dataset.name) + + return testsets + + +def get_available_origlangs(test_sets, langpair) -> List[str]: + """Return a list of origlang values according to the raw XML/SGM files.""" + if test_sets is None: + return [] + + origlangs = set() + for test_set in test_sets.split(','): + dataset = DATASETS[test_set] + rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', dataset.langpairs[langpair][0]) + from .dataset.wmt_xml import WMTXMLDataset + if isinstance(dataset, WMTXMLDataset): + for origlang in dataset._unwrap_wmt21_or_later(rawfile)['origlang']: + origlangs.add(origlang) + if rawfile.endswith('.sgm'): + with smart_open(rawfile) as fin: + for line in fin: + if line.startswith(' List[str]: + """Return a list of domain values according to the raw XML files and domain/country values from the SGM files.""" + if test_sets is None: + return [] + + subsets = set() + for test_set in test_sets.split(','): + dataset = DATASETS[test_set] + from .dataset.wmt_xml import WMTXMLDataset + if isinstance(dataset, WMTXMLDataset): + rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', dataset.langpairs[langpair][0]) + fields = dataset._unwrap_wmt21_or_later(rawfile) + if 'domain' in fields: + subsets |= set(fields['domain']) + elif test_set in SUBSETS: + subsets |= set("country:" + v.split("-")[0] for v in SUBSETS[test_set].values()) + subsets |= set(v.split("-")[1] for v in SUBSETS[test_set].values()) + return sorted(list(subsets)) + +def filter_subset(systems, test_sets, langpair, origlang, subset=None): + """Filter sentences with a given origlang (or subset) according to the raw SGM files.""" + if origlang is None and subset is None: + return systems + if test_sets is None or langpair is None: + raise ValueError('Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).') + + if subset is not None and subset.startswith('country:'): + subset = subset[8:] + + re_origlang = re.compile(r'.* origlang="([^"]+)".*\n') + re_id = re.compile(r'.* docid="([^"]+)".*\n') + + indices_to_keep = [] + for test_set in test_sets.split(','): + dataset = DATASETS[test_set] + rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', dataset.langpairs[langpair][0]) + from .dataset.wmt_xml import WMTXMLDataset + if isinstance(dataset, WMTXMLDataset): + fields = dataset._unwrap_wmt21_or_later(rawfile) + domains = fields['domain'] if 'domain' in fields else itertools.repeat(None) + for doc_origlang, doc_domain in zip(fields['origlang'], domains): + if origlang is None: + include_doc = True + else: + if origlang.startswith('non-'): + include_doc = doc_origlang != origlang[4:] + else: + include_doc = doc_origlang == origlang + if subset is not None and (doc_domain is None or not re.search(subset, doc_domain)): + include_doc = False + indices_to_keep.append(include_doc) + elif rawfile.endswith('.sgm'): + doc_to_tags = {} + if subset is not None: + if test_set not in SUBSETS: + raise Exception('No subset annotation available for test set ' + test_set) + doc_to_tags = SUBSETS[test_set] + with smart_open(rawfile) as fin: + include_doc = False + for line in fin: + if line.startswith('