text stringlengths 5 631k | id stringlengths 14 178 | metadata dict | __index_level_0__ int64 0 647 |
|---|---|---|---|
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 330,
"logprob": -0.09289551,
"special": false,
"text": " A"
},
{
"id": 13088,
"logprob": -0.6743164,
"special": false,
"text": " chicken"
},
{
"id": 349,
"logprob": -0.31396484,
"special": false,
"text": " is"
},
{
"id": 6398,
"logprob": -0.051727295,
"special": false,
"text": " sitting"
},
{
"id": 356,
"logprob": -0.34448242,
"special": false,
"text": " on"
},
{
"id": 264,
"logprob": -0.1194458,
"special": false,
"text": " a"
},
{
"id": 17972,
"logprob": -0.03237915,
"special": false,
"text": " pile"
},
{
"id": 302,
"logprob": -0.00018751621,
"special": false,
"text": " of"
},
{
"id": 2445,
"logprob": -0.07043457,
"special": false,
"text": " money"
},
{
"id": 28723,
"logprob": -0.00422287,
"special": false,
"text": "."
}
],
"top_tokens": null
},
"generated_text": " A chicken is sitting on a pile of money."
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 330,
"logprob": -0.09448242,
"special": false,
"text": " A"
},
{
"id": 13088,
"logprob": -0.6743164,
"special": false,
"text": " chicken"
},
{
"id": 349,
"logprob": -0.31201172,
"special": false,
"text": " is"
},
{
"id": 6398,
"logprob": -0.051635742,
"special": false,
"text": " sitting"
},
{
"id": 356,
"logprob": -0.34033203,
"special": false,
"text": " on"
},
{
"id": 264,
"logprob": -0.1194458,
"special": false,
"text": " a"
},
{
"id": 17972,
"logprob": -0.032562256,
"special": false,
"text": " pile"
},
{
"id": 302,
"logprob": -0.00018763542,
"special": false,
"text": " of"
},
{
"id": 2445,
"logprob": -0.07122803,
"special": false,
"text": " money"
},
{
"id": 28723,
"logprob": -0.0041007996,
"special": false,
"text": "."
}
],
"top_tokens": null
},
"generated_text": " A chicken is sitting on a pile of money."
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 330,
"logprob": -0.09448242,
"special": false,
"text": " A"
},
{
"id": 13088,
"logprob": -0.6743164,
"special": false,
"text": " chicken"
},
{
"id": 349,
"logprob": -0.31201172,
"special": false,
"text": " is"
},
{
"id": 6398,
"logprob": -0.051635742,
"special": false,
"text": " sitting"
},
{
"id": 356,
"logprob": -0.34033203,
"special": false,
"text": " on"
},
{
"id": 264,
"logprob": -0.1194458,
"special": false,
"text": " a"
},
{
"id": 17972,
"logprob": -0.032562256,
"special": false,
"text": " pile"
},
{
"id": 302,
"logprob": -0.00018787384,
"special": false,
"text": " of"
},
{
"id": 2445,
"logprob": -0.07122803,
"special": false,
"text": " money"
},
{
"id": 28723,
"logprob": -0.0041007996,
"special": false,
"text": "."
}
],
"top_tokens": null
},
"generated_text": " A chicken is sitting on a pile of money."
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 330,
"logprob": -0.09448242,
"special": false,
"text": " A"
},
{
"id": 13088,
"logprob": -0.6743164,
"special": false,
"text": " chicken"
},
{
"id": 349,
"logprob": -0.31201172,
"special": false,
"text": " is"
},
{
"id": 6398,
"logprob": -0.051635742,
"special": false,
"text": " sitting"
},
{
"id": 356,
"logprob": -0.34033203,
"special": false,
"text": " on"
},
{
"id": 264,
"logprob": -0.1194458,
"special": false,
"text": " a"
},
{
"id": 17972,
"logprob": -0.032562256,
"special": false,
"text": " pile"
},
{
"id": 302,
"logprob": -0.00018763542,
"special": false,
"text": " of"
},
{
"id": 2445,
"logprob": -0.07122803,
"special": false,
"text": " money"
},
{
"id": 28723,
"logprob": -0.0041007996,
"special": false,
"text": "."
}
],
"top_tokens": null
},
"generated_text": " A chicken is sitting on a pile of money."
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_load.json",
"repo_id": "text-generation-inference",
"token_count": 4039
} | 313 |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -0.83984375,
"text": " is"
},
{
"id": 18147,
"logprob": -12.8125,
"text": " Deep"
},
{
"id": 20727,
"logprob": -2.84375,
"text": " Learning"
},
{
"id": 32,
"logprob": -1.25,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 187,
"logprob": -0.37890625,
"special": false,
"text": "\n"
},
{
"id": 187,
"logprob": -0.4296875,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.078125,
"special": false,
"text": "Deep"
},
{
"id": 4715,
"logprob": -0.515625,
"special": false,
"text": " learning"
},
{
"id": 310,
"logprob": -0.6015625,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.65625,
"special": false,
"text": " a"
},
{
"id": 747,
"logprob": -2.109375,
"special": false,
"text": " new"
},
{
"id": 1511,
"logprob": -2.328125,
"special": false,
"text": " type"
},
{
"id": 273,
"logprob": -0.0032653809,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.28125,
"special": false,
"text": " machine"
}
],
"top_tokens": null
},
"generated_text": "\n\nDeep learning is a new type of machine"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -0.80078125,
"text": " is"
},
{
"id": 18147,
"logprob": -13.25,
"text": " Deep"
},
{
"id": 20727,
"logprob": -2.828125,
"text": " Learning"
},
{
"id": 32,
"logprob": -1.1953125,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 187,
"logprob": -0.296875,
"special": false,
"text": "\n"
},
{
"id": 187,
"logprob": -0.3359375,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.2578125,
"special": false,
"text": "Deep"
},
{
"id": 4715,
"logprob": -0.5546875,
"special": false,
"text": " learning"
},
{
"id": 310,
"logprob": -0.62890625,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.64453125,
"special": false,
"text": " a"
},
{
"id": 747,
"logprob": -2.078125,
"special": false,
"text": " new"
},
{
"id": 1511,
"logprob": -2.28125,
"special": false,
"text": " type"
},
{
"id": 273,
"logprob": -0.0030670166,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.3125,
"special": false,
"text": " machine"
}
],
"top_tokens": null
},
"generated_text": "\n\nDeep learning is a new type of machine"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -0.80078125,
"text": " is"
},
{
"id": 18147,
"logprob": -13.25,
"text": " Deep"
},
{
"id": 20727,
"logprob": -2.828125,
"text": " Learning"
},
{
"id": 32,
"logprob": -1.1953125,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 187,
"logprob": -0.296875,
"special": false,
"text": "\n"
},
{
"id": 187,
"logprob": -0.3359375,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.2578125,
"special": false,
"text": "Deep"
},
{
"id": 4715,
"logprob": -0.5546875,
"special": false,
"text": " learning"
},
{
"id": 310,
"logprob": -0.62890625,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.64453125,
"special": false,
"text": " a"
},
{
"id": 747,
"logprob": -2.078125,
"special": false,
"text": " new"
},
{
"id": 1511,
"logprob": -2.28125,
"special": false,
"text": " type"
},
{
"id": 273,
"logprob": -0.0030670166,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.3125,
"special": false,
"text": " machine"
}
],
"top_tokens": null
},
"generated_text": "\n\nDeep learning is a new type of machine"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -0.80078125,
"text": " is"
},
{
"id": 18147,
"logprob": -13.25,
"text": " Deep"
},
{
"id": 20727,
"logprob": -2.828125,
"text": " Learning"
},
{
"id": 32,
"logprob": -1.1953125,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 187,
"logprob": -0.296875,
"special": false,
"text": "\n"
},
{
"id": 187,
"logprob": -0.3359375,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.2578125,
"special": false,
"text": "Deep"
},
{
"id": 4715,
"logprob": -0.5546875,
"special": false,
"text": " learning"
},
{
"id": 310,
"logprob": -0.62890625,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.64453125,
"special": false,
"text": " a"
},
{
"id": 747,
"logprob": -2.078125,
"special": false,
"text": " new"
},
{
"id": 1511,
"logprob": -2.28125,
"special": false,
"text": " type"
},
{
"id": 273,
"logprob": -0.0030670166,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.3125,
"special": false,
"text": " machine"
}
],
"top_tokens": null
},
"generated_text": "\n\nDeep learning is a new type of machine"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json",
"repo_id": "text-generation-inference",
"token_count": 5458
} | 314 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 7,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 3,
"logprob": -0.7001953,
"special": false,
"text": " "
},
{
"id": 18,
"logprob": -1.1943359,
"special": false,
"text": "-"
},
{
"id": 26937,
"logprob": -1.2099609,
"special": false,
"text": "196"
},
{
"id": 3,
"logprob": -1.2451172,
"special": false,
"text": " "
},
{
"id": 1956,
"logprob": -0.3322754,
"special": false,
"text": "°"
},
{
"id": 254,
"logprob": -0.19213867,
"special": false,
"text": "C"
},
{
"id": 1,
"logprob": -0.030151367,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "-196 °C"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json",
"repo_id": "text-generation-inference",
"token_count": 680
} | 315 |
{
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "The image is a blank white space with no visible objects or features.",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1743861909,
"id": "",
"model": "ll-re/Llama-4-Scout-17B-16E-Instruct",
"object": "chat.completion",
"system_fingerprint": "3.2.1-dev0-native",
"usage": {
"completion_tokens": 15,
"prompt_tokens": 166,
"total_tokens": 181
}
}
| text-generation-inference/integration-tests/models/__snapshots__/test_transformers_llama4/test_flash_llama4_image_base64_rgb_png.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_transformers_llama4/test_flash_llama4_image_base64_rgb_png.json",
"repo_id": "text-generation-inference",
"token_count": 283
} | 316 |
import pytest
import requests
@pytest.fixture(scope="module")
def llama_continue_final_message_handle(launcher):
with launcher("TinyLlama/TinyLlama-1.1B-Chat-v1.0") as handle:
yield handle
@pytest.fixture(scope="module")
async def llama_continue_final_message(llama_continue_final_message_handle):
await llama_continue_final_message_handle.health(300)
return llama_continue_final_message_handle.client
def test_llama_completion_single_prompt(
llama_continue_final_message, response_snapshot
):
response = requests.post(
f"{llama_continue_final_message.base_url}/v1/chat/completions",
json={
"model": "tgi",
"messages": [
{"role": "system", "content": "system message"},
{"role": "user", "content": "Which is bigger an elephant or a mouse?"},
],
"max_tokens": 30,
"stream": False,
"seed": 1337,
},
headers=llama_continue_final_message.headers,
stream=False,
)
response = response.json()
print(response)
assert len(response["choices"]) == 1
content = response["choices"][0]["message"]["content"]
assert (
content
== "Both an elephant and a mouse are mammals. However, the differences between elephants and mice are:\n\n1"
)
assert response == response_snapshot
def test_llama_completion_single_prompt_continue(
llama_continue_final_message, response_snapshot
):
response = requests.post(
f"{llama_continue_final_message.base_url}/v1/chat/completions",
json={
"model": "tgi",
"messages": [
{"role": "system", "content": "system message"},
{"role": "user", "content": "Which is bigger an elephant or a mouse?"},
{
"role": "assistant",
"content": "the elephant, but have you heard about",
},
],
"max_tokens": 30,
"stream": False,
"seed": 1337,
},
headers=llama_continue_final_message.headers,
stream=False,
)
response = response.json()
print(response)
assert len(response["choices"]) == 1
content = response["choices"][0]["message"]["content"]
assert (
content
== " the royal mouse? It is a little more slender and only weighs around 1.5 pounds for males and 1.3 pounds"
)
assert response == response_snapshot
| text-generation-inference/integration-tests/models/test_continue_final_message.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_continue_final_message.py",
"repo_id": "text-generation-inference",
"token_count": 1102
} | 317 |
import pytest
@pytest.fixture(scope="module")
def flash_llama_marlin_handle(launcher):
with launcher(
"neuralmagic/llama-2-7b-chat-marlin", num_shard=2, quantize="marlin"
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_marlin(flash_llama_marlin_handle):
await flash_llama_marlin_handle.health(300)
return flash_llama_marlin_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_marlin(flash_llama_marlin, response_snapshot):
response = await flash_llama_marlin.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_marlin_all_params(flash_llama_marlin, response_snapshot):
response = await flash_llama_marlin.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_marlin_load(
flash_llama_marlin, generate_load, response_snapshot
):
responses = await generate_load(
flash_llama_marlin, "Test request", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_llama_marlin.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_llama_marlin.py",
"repo_id": "text-generation-inference",
"token_count": 748
} | 318 |
import pytest
@pytest.fixture(scope="module")
def flash_qwen2_5_vl_handle(launcher):
with launcher("Qwen/Qwen2.5-VL-3B-Instruct") as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_qwen2_5(flash_qwen2_5_vl_handle):
await flash_qwen2_5_vl_handle.health(300)
return flash_qwen2_5_vl_handle.client
@pytest.mark.private
async def test_flash_qwen2_5_vl_simple(flash_qwen2_5, response_snapshot):
response = await flash_qwen2_5.chat(
seed=42,
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png"
},
},
{"type": "text", "text": "Describe the image"},
],
},
],
)
assert (
response.choices[0].message.content
== "The image depicts an anthropomorphic rabbit character wearing an intricate space suit, which includes a helmet with a starry face pattern and multiple suitors. The rabbit's ears are significantly large and upright, and it has a hitchhiker-like star antennas on its chest. The background is a reddish-orange, rocky landscape, suggesting a Martian environment. The suit has various buttons, a red button on the chest, and a reflective or illuminated dome on the head. The overall color scheme is dominated by shades of red, orange, and gray, giving a sense of a rugged, otherworldly setting."
)
assert response == response_snapshot
@pytest.mark.private
async def test_flash_qwen2_5_vl_simple_streaming(flash_qwen2_5, response_snapshot):
responses = await flash_qwen2_5.chat(
seed=42,
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png"
},
},
{"type": "text", "text": "Describe the image"},
],
},
],
stream=True,
)
count = 0
generated = ""
last_response = None
async for response in responses:
count += 1
generated += response.choices[0].delta.content
last_response = response
assert (
generated
== "The image depicts an anthropomorphic rabbit character wearing an intricate space suit, which includes a helmet with a starry face pattern and multiple suitors. The rabbit's ears are significantly large and upright, and it has a hitchhiker-like star antennas on its chest. The background is a reddish-orange, rocky landscape, suggesting a Martian environment. The suit has various buttons, a red button on the chest, and a reflective or illuminated dome on the head. The overall color scheme is dominated by shades of red, orange, and gray, giving a sense of a rugged, otherworldly setting."
)
assert count == 121
assert last_response == response_snapshot
@pytest.mark.private
async def test_flash_qwen2_5_vl_bay(flash_qwen2_5, response_snapshot):
response = await flash_qwen2_5.chat(
seed=42,
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
},
},
{"type": "text", "text": "Describe the image"},
],
},
],
)
assert response == response_snapshot
@pytest.mark.private
async def test_flash_qwen2_5_vl_inpaint(flash_qwen2_5, response_snapshot):
response = await flash_qwen2_5.chat(
seed=42,
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-inpaint.png"
},
},
{"type": "text", "text": "Describe the image"},
],
},
],
)
assert response == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_qwen2_5_vl.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_qwen2_5_vl.py",
"repo_id": "text-generation-inference",
"token_count": 2258
} | 319 |
import pytest
import asyncio
@pytest.fixture(scope="module")
def mllama_handle(launcher):
with launcher(
"unsloth/Llama-3.2-11B-Vision-Instruct",
num_shard=2,
) as handle:
yield handle
@pytest.fixture(scope="module")
async def mllama(mllama_handle):
await mllama_handle.health(300)
return mllama_handle.client
@pytest.mark.asyncio
async def test_mllama_simpl(mllama, response_snapshot):
response = await mllama.chat(
max_tokens=10,
temperature=0.0,
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Describe the image in 10 words.",
},
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/huggingface/text-generation-inference/main/integration-tests/images/chicken_on_money.png"
},
},
],
},
],
)
assert response.usage == {
"completion_tokens": 10,
"prompt_tokens": 45,
"total_tokens": 55,
}
assert (
response.choices[0].message.content
== "A chicken sits on a pile of money, looking"
)
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_mllama_load(mllama, generate_load, response_snapshot):
futures = [
mllama.chat(
max_tokens=10,
temperature=0.0,
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Describe the image in 10 words.",
},
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/huggingface/text-generation-inference/main/integration-tests/images/chicken_on_money.png"
},
},
],
},
],
)
# TODO with v3, 4 breaks here. Nothing accounts of the image VRAM
# because mllama is the only one doing its thing.
for i in range(2)
]
responses = await asyncio.gather(*futures)
generated_texts = [response.choices[0].message.content for response in responses]
# XXX: TODO: Fix this test.
assert generated_texts[0] == "A chicken sits on a pile of money, looking"
assert len(generated_texts) == 2
assert generated_texts, all(
[text == generated_texts[0] for text in generated_texts]
)
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_mllama.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_mllama.py",
"repo_id": "text-generation-inference",
"token_count": 1584
} | 320 |
{
buildPythonPackage,
poetry-core,
aiohttp,
huggingface-hub,
pydantic,
}:
buildPythonPackage {
name = "text-generation";
src = ../clients/python;
pyproject = true;
build-system = [ poetry-core ];
dependencies = [
aiohttp
huggingface-hub
pydantic
];
}
| text-generation-inference/nix/client.nix/0 | {
"file_path": "text-generation-inference/nix/client.nix",
"repo_id": "text-generation-inference",
"token_count": 111
} | 321 |
use crate::infer::Infer;
use crate::{
default_parameters,
server::{generate_internal, ComputeType},
Deserialize, ErrorResponse, GenerateParameters, GenerateRequest, Serialize, ToSchema,
};
use axum::extract::{Extension, Path};
use axum::http::{HeaderMap, StatusCode};
use axum::response::IntoResponse;
use axum::Json;
use futures::stream::FuturesUnordered;
use futures::TryStreamExt;
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct OutputChunk {
pub name: String,
pub shape: Vec<usize>,
pub datatype: String,
pub data: Vec<u8>,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct InferenceOutput {
pub id: String,
pub outputs: Vec<OutputChunk>,
}
#[derive(Debug, Deserialize, ToSchema)]
pub(crate) struct InferenceRequest {
pub id: String,
#[serde(default = "default_parameters")]
pub parameters: GenerateParameters,
pub inputs: Vec<Input>,
pub outputs: Vec<Output>,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub(crate) struct Input {
pub name: String,
pub shape: Vec<usize>,
pub datatype: String,
pub data: Vec<u8>,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub(crate) struct Output {
pub name: String,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct LiveResponse {
pub live: bool,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct ReadyResponse {
pub live: bool,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct MetadataServerResponse {
pub name: String,
pub version: String,
pub extensions: Vec<String>,
}
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/v2/health/live",
responses(
(status = 200, description = "Service is live", body = LiveReponse),
(status = 404, description = "Service not found", body = ErrorResponse,
example = json!({"error": "No response"}))
)
)]
pub async fn kserve_health_live() -> Json<LiveResponse> {
let data = LiveResponse { live: true };
Json(data)
}
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/v2/health/ready",
responses(
(status = 200, description = "Service is ready", body = ReadyResponse),
(status = 404, description = "Service not found", body = ErrorResponse,
example = json!({"error": "No response"}))
)
)]
pub async fn kserve_health_ready() -> Json<ReadyResponse> {
let data = ReadyResponse { live: true };
Json(data)
}
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/v2",
responses(
(status = 200, description = "Metadata retrieved", body = MetadataServerResponse),
(status = 404, description = "Service not found", body = ErrorResponse,
example = json!({"error": "No response"}))
)
)]
pub async fn kerve_server_metadata() -> Json<MetadataServerResponse> {
let data = MetadataServerResponse {
name: "text-generation-inference".to_string(),
version: env!("CARGO_PKG_VERSION").to_string(),
extensions: vec![
"health".to_string(),
"models".to_string(),
"metrics".to_string(),
],
};
Json(data)
}
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/v2/models/{model_name}/versions/{model_version}",
responses(
(status = 200, description = "Model version metadata retrieved", body = MetadataServerResponse),
(status = 404, description = "Model or version not found", body = ErrorResponse,
example = json!({"error": "No response"}))
)
)]
pub async fn kserve_model_metadata(
Path((model_name, model_version)): Path<(String, String)>,
) -> Json<MetadataServerResponse> {
let data = MetadataServerResponse {
name: model_name,
version: model_version,
extensions: vec!["infer".to_string(), "ready".to_string()],
};
Json(data)
}
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/v2/models/{model_name}/versions/{model_version}/ready",
responses(
(status = 200, description = "Model version is ready", body = ReadyResponse),
(status = 404, description = "Model or version not found", body = ErrorResponse,
example = json!({"error": "No response"}))
)
)]
pub async fn kserve_model_metadata_ready(
Path((_model_name, _model_version)): Path<(String, String)>,
) -> Json<ReadyResponse> {
let data = ReadyResponse { live: true };
Json(data)
}
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/v2/models/{model_name}/versions/{model_version}/infer",
request_body = Json<InferenceRequest>,
responses(
(status = 200, description = "Inference executed successfully", body = InferenceOutput),
(status = 404, description = "Model or version not found", body = ErrorResponse,
example = json!({"error": "No response"}))
)
)]
pub async fn kserve_model_infer(
infer: Extension<Infer>,
Extension(compute_type): Extension<ComputeType>,
Json(payload): Json<InferenceRequest>,
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
let id = payload.id.clone();
let str_inputs = payload
.inputs
.iter()
.map(|input| {
std::str::from_utf8(&input.data).map_err(|e| {
(
StatusCode::UNPROCESSABLE_ENTITY,
Json(ErrorResponse {
error: e.to_string(),
error_type: "utf8".to_string(),
}),
)
})
})
.collect::<Result<Vec<_>, _>>()?;
if str_inputs.len() != payload.outputs.len() {
return Err((
StatusCode::UNPROCESSABLE_ENTITY,
Json(ErrorResponse {
error: "Inputs and outputs length mismatch".to_string(),
error_type: "length mismatch".to_string(),
}),
));
}
let output_chunks = str_inputs
.iter()
.zip(&payload.outputs)
.map(|(str_input, output)| {
let generate_request = GenerateRequest {
inputs: str_input.to_string(),
parameters: payload.parameters.clone(),
add_special_tokens: true,
};
let infer = infer.clone();
let compute_type = compute_type.clone();
let span = tracing::Span::current();
async move {
generate_internal(infer, compute_type, Json(generate_request), span)
.await
.map(|(_, _, Json(generation))| {
let generation_as_bytes = generation.generated_text.as_bytes().to_vec();
OutputChunk {
name: output.name.clone(),
shape: vec![1, generation_as_bytes.len()],
datatype: "BYTES".to_string(),
data: generation_as_bytes,
}
})
.map_err(|_| {
(
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: "Incomplete generation".into(),
error_type: "Incomplete generation".into(),
}),
)
})
}
})
.collect::<FuturesUnordered<_>>()
.try_collect::<Vec<_>>()
.await?;
let inference_output = InferenceOutput {
id: id.clone(),
outputs: output_chunks,
};
Ok((HeaderMap::new(), Json(inference_output)))
}
| text-generation-inference/router/src/kserve.rs/0 | {
"file_path": "text-generation-inference/router/src/kserve.rs",
"repo_id": "text-generation-inference",
"token_count": 3533
} | 322 |
flash_att_v2_commit_cuda := v2.6.1
flash_att_v2_commit_rocm := 47bd46e0204a95762ae48712fd1a3978827c77fd
build-flash-attention-v2-cuda:
pip install -U packaging wheel
pip install flash-attn==$(flash_att_v2_commit_cuda)
install-flash-attention-v2-cuda: build-flash-attention-v2-cuda
echo "Flash v2 installed"
build-flash-attention-v2-rocm:
if [ ! -d 'flash-attention-v2' ]; then \
pip install -U packaging ninja --no-cache-dir && \
git clone https://github.com/mht-sharma/flash-attention.git flash-attention-v2 && \
cd flash-attention-v2 && git fetch && git checkout $(flash_att_v2_commit_rocm) && \
git submodule update --init --recursive && GPU_ARCHS="gfx90a;gfx942" PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py build; \
fi
install-flash-attention-v2-rocm: build-flash-attention-v2-rocm
cd flash-attention-v2 && \
GPU_ARCHS="gfx90a;gfx942" PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py install
| text-generation-inference/server/Makefile-flash-att-v2/0 | {
"file_path": "text-generation-inference/server/Makefile-flash-att-v2",
"repo_id": "text-generation-inference",
"token_count": 397
} | 323 |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#include <ATen/cuda/CUDAContext.h>
#include "q4_matrix.cuh"
#include <vector>
#include "../util.cuh"
#include "../matrix.cuh"
using namespace std;
const int UNSHUF_BLOCKSIZE_X = 64;
const int RECONS_THREADS_X = 64; // Block size and thread count along columns in out, each thread converts 1 column
const int RECONS_THREADS_Y = 1; // Block size and thread count along rows in x and out, each thread converts 8 rows
vector<Q4Matrix*> g_q4_matrices;
void g_q4_keep_matrix(Q4Matrix* m)
{
g_q4_matrices.push_back(m);
}
void g_q4_free_matrices()
{
for (const auto& m : g_q4_matrices) delete m;
g_q4_matrices.clear();
}
Q4Matrix::Q4Matrix
(
const int _height,
const int _width,
const int _groups,
uint32_t* _qweight,
uint32_t* _qzeros,
half* _scales,
uint32_t* _g_idx,
const int _device
) :
height(_height),
width(_width),
groups(_groups),
device(_device)
{
cudaSetDevice(device);
cuda_qweight = _qweight;
cuda_qzeros = _qzeros;
cuda_scales = _scales;
groupsize = height / groups;
if (_g_idx) make_sequential(_g_idx);
}
Q4Matrix::~Q4Matrix()
{
}
// Make sequential
__global__ void make_sequential_kernel
(
const uint32_t* __restrict__ w,
uint32_t* __restrict__ w_new,
const uint32_t* __restrict__ x_map,
const int w_height,
const int w_width
)
{
const uint64_t* w2 = (uint64_t*) w;
uint64_t* w_new2 = (uint64_t*) w_new;
int w2_stride = w_width >> 1;
int w2_column = UNSHUF_BLOCKSIZE_X * blockIdx.x + threadIdx.x;
int w_new2_row = blockIdx.y;
int x_map_idx = w_new2_row << 3;
uint64_t dst = 0;
#pragma unroll
for (int i = 0; i < 8; i++)
{
int source_row = x_map[x_map_idx++];
int w2_row = source_row >> 3;
int w2_subrow = source_row & 0x07;
int w2_row_shift = w2_subrow << 2;
int wnew2_row_shift = i << 2;
uint64_t src = w2[w2_row * w2_stride + w2_column];
src >>= w2_row_shift;
src &= 0x0000000f0000000f;
src <<= wnew2_row_shift;
dst |= src;
}
w_new2[w_new2_row * w2_stride + w2_column] = dst;
}
void Q4Matrix::make_sequential(const uint32_t* cpu_g_idx)
{
uint32_t* cuda_new_qweight = NULL;
cudaMalloc(&cuda_new_qweight, height / 8 * width * sizeof(uint32_t));
cudaMalloc(&cuda_x_map, height * sizeof(uint32_t)); // TODO: Should probably be allocated in PyTorch
uint32_t* cpu_g_idx_map = (uint32_t*) calloc(groups, sizeof(uint32_t));
uint32_t* cpu_x_map = (uint32_t*) malloc(height * sizeof(uint32_t));
uint32_t* cpu_x_map_inv = (uint32_t*) malloc(height * sizeof(uint32_t));
// Group histogram
for (int i = 0; i < height; i++) cpu_g_idx_map[cpu_g_idx[i]]++;
// Group map
for (int i = 0, acc = 0; i < groups; i++)
{
short tmp = cpu_g_idx_map[i];
cpu_g_idx_map[i] = acc;
acc += tmp;
}
// X map (inverse)
for (int row = 0; row < height; row++)
{
uint32_t target_group = cpu_g_idx[row];
uint32_t target_row = cpu_g_idx_map[target_group];
cpu_g_idx_map[target_group]++;
cpu_x_map_inv[row] = target_row;
}
// X map
for (int row = 0; row < height; row++) cpu_x_map[cpu_x_map_inv[row]] = row;
// Move to CUDA
cudaMemcpyAsync(cuda_x_map, cpu_x_map, height * sizeof(uint32_t), cudaMemcpyHostToDevice);
// Rearrange rows in w
dim3 threads(UNSHUF_BLOCKSIZE_X, 1, 1);
dim3 blocks(width / UNSHUF_BLOCKSIZE_X / 2, height / 8, 1);
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
make_sequential_kernel<<<blocks, threads, 0, stream>>>(cuda_qweight, cuda_new_qweight, cuda_x_map, height / 8, width);
// Replace qweights
cudaMemcpyAsync(cuda_qweight, cuda_new_qweight, height / 8 * width * sizeof(uint32_t), cudaMemcpyDeviceToDevice);
// Cleanup
cudaDeviceSynchronize();
cudaFree(cuda_new_qweight);
free(cpu_g_idx_map);
free(cpu_x_map);
free(cpu_x_map_inv);
}
__global__ void reconstruct_kernel
(
const uint32_t* __restrict__ w,
half* __restrict__ out, // (y)
const half* __restrict__ w_scales,
const uint32_t* __restrict__ w_zeros,
const int height,
const int width,
const int groupsize
)
{
// Start of block
int column = RECONS_THREADS_X * blockIdx.x + threadIdx.x;
int row = (RECONS_THREADS_Y * blockIdx.y + threadIdx.y) * 8;
// Views
MatrixView_q4_column w_(w, height, width);
MatrixView_half_rw out_(out, height, width);
MatrixView_half w_scales_(w_scales, height / groupsize, width);
MatrixView_q4_row w_zeros_(w_zeros, height / groupsize, width);
// Groupsize version
int group = row / groupsize;
half w_scale = w_scales_.item(group, column);
uint32_t w_zero = (w_zeros_.item(group, column) + 1) & 0x0F;
uint32_t w_read = w_.item_uint32_t(row, column);
half* out_ptr = out_.item_ptr(row, column);
#pragma unroll
for (int s = 0; s < 32; s += 4)
{
half w_item = __hmul(__int2half_rn((int)((w_read >> s) & 0x0f) - w_zero), w_scale);
*out_ptr = w_item; out_ptr += out_.width;
}
}
void Q4Matrix::reconstruct(half* out)
{
dim3 threads(RECONS_THREADS_X, RECONS_THREADS_Y, 1);
dim3 blocks
(
(width + threads.x - 1) / threads.x,
(height / 8 + threads.y - 1) / threads.y,
1
);
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
reconstruct_kernel<<<blocks, threads, 0, stream>>>(cuda_qweight, out, cuda_scales, cuda_qzeros, height / 8, width, groupsize);
}
| text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu",
"repo_id": "text-generation-inference",
"token_count": 2592
} | 324 |
#include "q_matrix.cuh"
#include "matrix_view.cuh"
#include "util.cuh"
#include "quant/qdq_2.cuh"
#include "quant/qdq_3.cuh"
#include "quant/qdq_4.cuh"
#include "quant/qdq_5.cuh"
#include "quant/qdq_6.cuh"
#include "quant/qdq_8.cuh"
#define BLOCK_KN_SIZE 128
#define THREADS_X 32
#define THREADS_Y 32
// Shuffle quantized data on load
__global__ void shuffle_kernel
(
uint32_t* __restrict__ b_q_weight,
const int size_k,
const int size_n,
const int rows_8,
const int rows_6,
const int rows_5,
const int rows_4,
const int rows_3,
const int rows_2
)
{
int n = blockIdx.x * THREADS_X + threadIdx.x;
if (n >= size_n) return;
int k = 0;
uint32_t* b_ptr = b_q_weight + n;
while (k < rows_8) { shuffle_8bit_4 (b_ptr, size_n); b_ptr += 1 * size_n; k += 4; }
while (k < rows_6) { shuffle_6bit_16(b_ptr, size_n); b_ptr += 3 * size_n; k += 16; }
while (k < rows_5) { shuffle_5bit_32(b_ptr, size_n); b_ptr += 5 * size_n; k += 32; }
while (k < rows_4) { shuffle_4bit_8 (b_ptr, size_n); b_ptr += 1 * size_n; k += 8; }
while (k < rows_3) { shuffle_3bit_32(b_ptr, size_n); b_ptr += 3 * size_n; k += 32; }
while (k < rows_2) { shuffle_2bit_16(b_ptr, size_n); b_ptr += 1 * size_n; k += 16; }
}
// QMatrix constructor
QMatrix::QMatrix
(
const int _device,
const int _height,
const int _width,
const int _groups,
uint32_t* _q_weight,
uint16_t* _q_perm,
uint16_t* _q_invperm,
uint32_t* _q_scale,
half* _q_scale_max,
uint16_t* _q_groups,
uint16_t* _q_group_map,
uint32_t* _gptq_qzeros,
half* _gptq_scales,
uint32_t* _gptq_g_idx,
half* _temp_dq
) :
device(_device),
height(_height),
width(_width),
groups(_groups),
temp_dq(_temp_dq)
{
cudaSetDevice(device);
failed = false;
cuda_q_weight = _q_weight;
cuda_q_perm = _q_perm;
cuda_q_invperm = _q_invperm;
cuda_q_scale = _q_scale;
cuda_q_scale_max = _q_scale_max;
cuda_q_groups = _q_groups;
cuda_q_group_map = _q_group_map;
cuda_gptq_qzeros = _gptq_qzeros;
cuda_gptq_scales = _gptq_scales;
is_gptq = (_gptq_qzeros != NULL);
if (is_gptq)
{
gptq_groupsize = 1;
while (gptq_groupsize * groups < height) gptq_groupsize *= 2;
}
// Create group map
rows_8 = 0;
rows_6 = 0;
rows_5 = 0;
rows_4 = 0;
rows_3 = 0;
rows_2 = 0;
if (!is_gptq)
{
uint16_t* cpu_q_groups = (uint16_t*)calloc(groups * 2, sizeof(uint16_t));
cudaMemcpy(cpu_q_groups, cuda_q_groups, groups * 2 * sizeof(uint16_t), cudaMemcpyDeviceToHost);
int row = 0;
for (int i = 0; i < groups; i++)
{
int bits = cpu_q_groups[i * 2];
int rows;
if (i < groups - 1)
{
int qrows = cpu_q_groups[i * 2 + 3] - cpu_q_groups[i * 2 + 1];
rows = qrows * 32 / bits;
}
else rows = height - row;
if (bits == 8) rows_8 += rows;
if (bits == 6) rows_6 += rows;
if (bits == 5) rows_5 += rows;
if (bits == 4) rows_4 += rows;
if (bits == 3) rows_3 += rows;
if (bits == 2) rows_2 += rows;
row += rows;
}
free(cpu_q_groups);
rows_6 += rows_8;
rows_5 += rows_6;
rows_4 += rows_5;
rows_3 += rows_4;
rows_2 += rows_3;
}
else
{
rows_4 = height;
rows_3 = height;
rows_2 = height;
if (_gptq_g_idx)
{
if (!make_sequential(_gptq_g_idx))
{
failed = true;
//printf("FAIL\n");
return;
}
}
}
// DBGI(rows_8);
// DBGI(rows_6);
// DBGI(rows_5);
// DBGI(rows_4);
// DBGI(rows_3);
// DBGI(rows_2);
// Shuffle quantized data
dim3 blockDim, gridDim;
blockDim.x = THREADS_X;
blockDim.y = 1;
gridDim.x = DIVIDE(width, THREADS_X);
gridDim.y = 1;
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
shuffle_kernel<<<gridDim, blockDim, 0, stream>>>(cuda_q_weight, height, width, rows_8, rows_6, rows_5, rows_4, rows_3, rows_2);
}
QMatrix::~QMatrix()
{
}
// Reconstruct b[k,n] (GPTQ)
__global__ void reconstruct_gptq_kernel
(
const uint32_t* __restrict__ b_q_weight,
const uint16_t* __restrict__ b_q_perm,
const uint32_t* __restrict__ b_gptq_qzeros,
const half* __restrict__ b_gptq_scales,
//const uint16_t* __restrict__ b_q_groups,
const int size_k,
const int size_n,
const int groupsize,
const int groups,
half* __restrict__ b,
const int rows_4
)
{
MatrixView_half_rw b_(b, size_k, size_n);
MatrixView_q4_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n);
MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n);
int offset_k = BLOCK_KN_SIZE * blockIdx.y;
int offset_n = BLOCK_KN_SIZE * blockIdx.x * 4;
int end_k = min(offset_k + BLOCK_KN_SIZE, size_k);
// Preload remapping table
__shared__ uint16_t perm[BLOCK_KN_SIZE];
int t = threadIdx.x;
if (b_q_perm)
{
if (offset_k + t < size_k)
perm[t] = b_q_perm[offset_k + t];
}
// Column
int n = offset_n + t * 4;
if (n >= size_n) return;
// Find initial group
int group = offset_k / groupsize;
int nextgroup = offset_k + groupsize;
// b offset
int qk = offset_k / (32 / 4);
const uint32_t* b_ptr = b_q_weight + qk * size_n + n;
// Initial zeros/scale
int zeros[4];
half2 scales[4];
half2 z1z16[4][2];
half2 y1y16[4][2];
b_gptq_qzeros_.item4(zeros, group, n);
b_gptq_scales_.item4_h2(scales, group, n);
dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]);
dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]);
dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]);
dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]);
__syncthreads();
int k = offset_k;
int lk = 0;
while (k < end_k)
{
if (k == nextgroup)
{
group++;
nextgroup += groupsize;
b_gptq_qzeros_.item4(zeros, group, n);
b_gptq_scales_.item4_h2(scales, group, n);
dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]);
dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]);
dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]);
dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]);
}
for (int p = 0; p < 4; p++)
{
half2 dq[4][4];
const int4* b_ptr4 = (int4*) b_ptr;
int4 load_int4 = *b_ptr4;
dequant_4bit_8_gptq(load_int4.x, dq[0], z1z16[0], y1y16[0], size_n, false);
dequant_4bit_8_gptq(load_int4.y, dq[1], z1z16[1], y1y16[1], size_n, false);
dequant_4bit_8_gptq(load_int4.z, dq[2], z1z16[2], y1y16[2], size_n, false);
dequant_4bit_8_gptq(load_int4.w, dq[3], z1z16[3], y1y16[3], size_n, false);
b_ptr += size_n;
//half* dqh = (half*)dq;
if (b_q_perm)
{
for (int j = 0; j < 4; j++)
{
for (int v = 0; v < 4; v++) dq[v][j] = __hmul2(scales[v], dq[v][j]);
b_.set4(perm[lk++], n, __low2half(dq[0][j]), __low2half(dq[1][j]), __low2half(dq[2][j]), __low2half(dq[3][j]));
b_.set4(perm[lk++], n, __high2half(dq[0][j]), __high2half(dq[1][j]), __high2half(dq[2][j]), __high2half(dq[3][j]));
}
}
else
{
for (int j = 0; j < 4; j++)
{
for (int v = 0; v < 4; v++) dq[v][j] = __hmul2(scales[v], dq[v][j]);
b_.set4(offset_k + lk++, n, __low2half(dq[0][j]), __low2half(dq[1][j]), __low2half(dq[2][j]), __low2half(dq[3][j]));
b_.set4(offset_k + lk++, n, __high2half(dq[0][j]), __high2half(dq[1][j]), __high2half(dq[2][j]), __high2half(dq[3][j]));
}
}
}
k += 32;
}
}
// Reconstruct b[k,n]
__global__ void reconstruct_kernel
(
const uint32_t* __restrict__ b_q_weight,
const uint16_t* __restrict__ b_q_perm,
const uint32_t* __restrict__ b_q_scale,
const half* __restrict__ b_q_scale_max,
const uint16_t* __restrict__ b_q_group_map,
const int size_k,
const int size_n,
//const int groupsize,
const int groups,
half* __restrict__ b,
const int rows_8,
const int rows_6,
const int rows_5,
const int rows_4,
const int rows_3,
const int rows_2
)
{
MatrixView_half_rw b_(b, size_k, size_n);
MatrixView_q4_row b_q_scale_(b_q_scale, groups, size_n);
int offset_k = BLOCK_KN_SIZE * blockIdx.y;
int offset_n = BLOCK_KN_SIZE * blockIdx.x;
// Preload remapping table
int t = threadIdx.x;
__shared__ uint16_t perm[BLOCK_KN_SIZE];
if (offset_k + t < size_k)
perm[t] = b_q_perm[offset_k + t];
// Column
int n = offset_n + t;
if (n >= size_n) return;
// Find initial group
// int group = offset_k / groupsize;
int group = b_q_group_map[offset_k * 2];
int pre_rows_8 = min(rows_8, offset_k);
int pre_rows_6 = offset_k > rows_8 ? min(rows_6, offset_k) - rows_8 : 0;
int pre_rows_5 = offset_k > rows_6 ? min(rows_5, offset_k) - rows_6 : 0;
int pre_rows_4 = offset_k > rows_5 ? min(rows_4, offset_k) - rows_5 : 0;
int pre_rows_3 = offset_k > rows_4 ? min(rows_3, offset_k) - rows_4 : 0;
int pre_rows_2 = offset_k > rows_3 ? min(rows_2, offset_k) - rows_3 : 0;
int qk = 0;
qk += pre_rows_8 / 32 * 8;
qk += pre_rows_6 / 32 * 6;
qk += pre_rows_5 / 32 * 5;
qk += pre_rows_4 / 32 * 4;
qk += pre_rows_3 / 32 * 3;
qk += pre_rows_2 / 32 * 2;
const uint32_t* b_ptr = b_q_weight + qk * size_n + n;
half qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]);
half2 qs_h2 = __halves2half2(qs_h, qs_h);
int nextgroup = offset_k + b_q_group_map[offset_k * 2 + 1];
int end_k = min(offset_k + BLOCK_KN_SIZE, size_k);
int k = offset_k;
int lk = 0;
__syncthreads();
while (k < rows_8 && k < end_k)
{
if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); }
for (int p = 0; p < 4; p++)
{
half2 dq[4];
uint32_t q_0 = *b_ptr; b_ptr += size_n;
uint32_t q_1 = *b_ptr; b_ptr += size_n;
dequant_8bit_8(q_0, q_1, dq, size_n);
for (int j = 0; j < 4; j++) dq[j] = __hmul2(dq[j], qs_h2);
half* dqh = (half*) dq;
for (int j = 0; j < 8; j++) b_.set(perm[lk++], n, dqh[j]);
}
k += 32;
}
while (k < rows_6 && k < end_k)
{
if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); }
for (int p = 0; p < 2; p++)
{
half2 dq[8];
uint32_t q_0 = *b_ptr; b_ptr += size_n;
uint32_t q_1 = *b_ptr; b_ptr += size_n;
uint32_t q_2 = *b_ptr; b_ptr += size_n;
dequant_6bit_16(q_0, q_1, q_2, dq, size_n);
for (int j = 0; j < 8; j++) dq[j] = __hmul2(dq[j], qs_h2);
half* dqh = (half*) dq;
for (int j = 0; j < 16; j++) b_.set(perm[lk++], n, dqh[j]);
}
k += 32;
}
while (k < rows_5 && k < end_k)
{
if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); }
for (int p = 0; p < 1; p++)
{
half2 dq[16];
uint32_t q_0 = *b_ptr; b_ptr += size_n;
uint32_t q_1 = *b_ptr; b_ptr += size_n;
uint32_t q_2 = *b_ptr; b_ptr += size_n;
uint32_t q_3 = *b_ptr; b_ptr += size_n;
uint32_t q_4 = *b_ptr; b_ptr += size_n;
dequant_5bit_32(q_0, q_1, q_2, q_3, q_4, dq, size_n);
for (int j = 0; j < 16; j++) dq[j] = __hmul2(dq[j], qs_h2);
half* dqh = (half*) dq;
for (int j = 0; j < 32; j++) b_.set(perm[lk++], n, dqh[j]);
}
k += 32;
}
while (k < rows_4 && k < end_k)
{
if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); }
for (int p = 0; p < 4; p++)
{
half2 dq[4];
uint32_t q_0 = *b_ptr; b_ptr += size_n;
dequant_4bit_8(q_0, dq, size_n);
for (int j = 0; j < 4; j++) dq[j] = __hmul2(dq[j], qs_h2);
half* dqh = (half*) dq;
for (int j = 0; j < 8; j++) b_.set(perm[lk++], n, dqh[j]);
}
k += 32;
}
while (k < rows_3 && k < end_k)
{
if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); }
for (int p = 0; p < 1; p++)
{
half2 dq[16];
uint32_t q_0 = *b_ptr; b_ptr += size_n;
uint32_t q_1 = *b_ptr; b_ptr += size_n;
uint32_t q_2 = *b_ptr; b_ptr += size_n;
dequant_3bit_32(q_0, q_1, q_2, dq, size_n);
for (int j = 0; j < 16; j++) dq[j] = __hmul2(dq[j], qs_h2);
half* dqh = (half*) dq;
for (int j = 0; j < 32; j++) b_.set(perm[lk++], n, dqh[j]);
}
k += 32;
}
while (k < rows_2 && k < end_k)
{
if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); }
for (int p = 0; p < 1; p++)
{
half2 dq[8];
uint32_t q_0 = *b_ptr; b_ptr += size_n;
dequant_2bit_16(q_0, dq, size_n);
for (int j = 0; j < 8; j++) dq[j] = __hmul2(dq[j], qs_h2);
half* dqh = (half*) dq;
for (int j = 0; j < 16; j++) b_.set(perm[lk++], n, dqh[j]);
}
k += 16;
}
}
void QMatrix::reconstruct(half* out)
{
dim3 blockDim, gridDim;
blockDim.x = BLOCK_KN_SIZE;
blockDim.y = 1;
gridDim.y = DIVIDE(height, BLOCK_KN_SIZE);
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (!is_gptq)
{
gridDim.x = DIVIDE(width, BLOCK_KN_SIZE);
reconstruct_kernel<<<gridDim, blockDim, 0, stream>>>
(
cuda_q_weight,
cuda_q_perm,
cuda_q_scale,
cuda_q_scale_max,
cuda_q_group_map,
height,
width,
//groupsize,
groups,
out,
rows_8,
rows_6,
rows_5,
rows_4,
rows_3,
rows_2
);
}
else
{
gridDim.x = DIVIDE(width, BLOCK_KN_SIZE * 4);
reconstruct_gptq_kernel<<<gridDim, blockDim, 0, stream>>>
(
cuda_q_weight,
cuda_q_perm,
cuda_gptq_qzeros,
cuda_gptq_scales,
//const uint16_t* __restrict__ b_q_groups,
height,
width,
gptq_groupsize,
groups,
out,
rows_4
);
}
}
__global__ void make_sequential_kernel
(
const uint32_t* __restrict__ w,
uint32_t* __restrict__ w_new,
const uint16_t* __restrict__ q_perm,
const int w_height,
const int w_width
)
{
const uint64_t* w2 = (uint64_t*) w;
uint64_t* w_new2 = (uint64_t*) w_new;
int w2_stride = w_width >> 1;
int w2_column = THREADS_X * blockIdx.x + threadIdx.x;
if (w2_column >= w2_stride) return;
int w_new2_row = blockIdx.y;
int q_perm_idx = w_new2_row << 3;
uint64_t dst = 0;
#pragma unroll
for (int i = 0; i < 8; i++)
{
int source_row = q_perm[q_perm_idx++];
int w2_row = source_row >> 3;
int w2_subrow = source_row & 0x07;
int w2_row_shift = w2_subrow << 2;
int wnew2_row_shift = i << 2;
uint64_t src = w2[w2_row * w2_stride + w2_column];
src >>= w2_row_shift;
src &= 0x0000000f0000000f;
src <<= wnew2_row_shift;
dst |= src;
}
w_new2[w_new2_row * w2_stride + w2_column] = dst;
}
bool QMatrix::make_sequential(const uint32_t* cpu_g_idx)
{
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
uint32_t* cuda_new_qweight = NULL;
cudaError_t err = cudaMalloc(&cuda_new_qweight, height / 8 * width * sizeof(uint32_t));
if (err != cudaSuccess) {
cudaError_t cuda_status = cudaGetLastError(); // Clear error
return false;
}
uint32_t* cpu_g_idx_map = (uint32_t*) calloc(groups, sizeof(uint32_t));
uint32_t* cpu_x_map = (uint32_t*) malloc(height * sizeof(uint32_t));
uint32_t* cpu_x_map_inv = (uint32_t*) malloc(height * sizeof(uint32_t));
// Group histogram
for (int i = 0; i < height; i++) cpu_g_idx_map[cpu_g_idx[i]]++;
// Group map
for (int i = 0, acc = 0; i < groups; i++)
{
short tmp = cpu_g_idx_map[i];
cpu_g_idx_map[i] = acc;
acc += tmp;
}
// X map (inverse)
for (int row = 0; row < height; row++)
{
uint32_t target_group = cpu_g_idx[row];
uint32_t target_row = cpu_g_idx_map[target_group];
cpu_g_idx_map[target_group]++;
cpu_x_map_inv[row] = target_row;
}
// X map
for (int row = 0; row < height; row++) cpu_x_map[cpu_x_map_inv[row]] = row;
// Reduce to uint16_t
uint16_t* cpu_x_map16 = (uint16_t*)cpu_x_map;
uint16_t* cpu_x_map_inv16 = (uint16_t*)cpu_x_map_inv;
for (int row = 0; row < height; row++) cpu_x_map16[row] = (uint16_t) cpu_x_map[row];
for (int row = 0; row < height; row++) cpu_x_map_inv16[row] = (uint16_t) cpu_x_map_inv[row];
// Move to CUDA
cudaMemcpyAsync(cuda_q_perm, cpu_x_map16, height * sizeof(uint16_t), cudaMemcpyHostToDevice);
cudaMemcpyAsync(cuda_q_invperm, cpu_x_map_inv16, height * sizeof(uint16_t), cudaMemcpyHostToDevice);
// Rearrange rows in w
dim3 blockDim, gridDim;
blockDim.x = THREADS_X;
blockDim.y = 1;
gridDim.x = DIVIDE(width, THREADS_X);
gridDim.y = height / 8;
make_sequential_kernel<<<gridDim, blockDim, 0, stream>>>
(
cuda_q_weight,
cuda_new_qweight,
cuda_q_perm,
height / 8,
width
);
// Replace qweights
cudaMemcpyAsync(cuda_q_weight, cuda_new_qweight, height / 8 * width * sizeof(uint32_t), cudaMemcpyDeviceToDevice);
// Cleanup
cudaDeviceSynchronize();
cudaFree(cuda_new_qweight);
free(cpu_g_idx_map);
free(cpu_x_map);
free(cpu_x_map_inv);
return true;
}
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu",
"repo_id": "text-generation-inference",
"token_count": 10524
} | 325 |
import os
from typing import Optional
import torch
from text_generation_server.layers.attention.kv_cache import KVCache, KVScales
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.layers.attention import Seqlen
from text_generation_server.utils.log import log_master
from text_generation_server.models.globals import (
ATTENTION,
BLOCK_SIZE,
)
from loguru import logger
import vllm._custom_ops as ops
major, minor = torch.cuda.get_device_capability()
is_sm75 = major == 7 and minor == 5
_PARTITION_SIZE_V1V2 = 1024
_PARTITION_SIZE_CUSTOM = 256
_GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName
_ON_MI250_MI300 = any(
arch in _GPU_ARCH for arch in ["gfx90a", "gfx940", "gfx941", "gfx942"]
)
use_triton = os.getenv("ROCM_USE_FLASH_ATTN_V2_TRITON", "").lower() in {"true", "1"}
ENGINE = "triton" if use_triton else "ck"
use_rocm_custom_paged_attn = os.getenv("ROCM_USE_CUSTOM_PAGED_ATTN", "1") != "0"
def _use_rocm_custom_paged_attention(
qtype: torch.dtype,
head_size: int,
block_size: int,
gqa_ratio: int,
max_seq_len: int,
) -> bool:
# rocm custom page attention not support on navi (gfx1*)
return (
use_rocm_custom_paged_attn
and _ON_MI250_MI300
and (qtype == torch.half or qtype == torch.bfloat16)
and (head_size == 64 or head_size == 128)
and (block_size == 16 or block_size == 32)
and (gqa_ratio >= 1 and gqa_ratio <= 16)
and max_seq_len <= 131072
)
def paged_attention(
query: torch.Tensor,
kv_cache: KVCache,
kv_head_mapping: torch.Tensor,
softmax_scale: float,
block_tables: torch.Tensor,
seqlen: Seqlen,
max_s: int,
*,
kv_scales: KVScales,
softcap: Optional[float] = None,
window_size_left: Optional[int] = -1,
):
# Adapted from: https://github.com/vllm-project/vllm/blob/f8a1e39fae05ca610be8d5a78be9d40f5274e5fc/vllm/model_executor/layers/attention.py
# Copyright 2023 The vLLM team. All rights
# reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
if ATTENTION == "flashdecoding":
max_q = 1
max_k = max_s
import flash_attn_2_cuda
window_size_right = -1 if window_size_left == -1 else 0
if softcap is None:
softcap = 0.0
out = flash_attn_2_cuda.varlen_fwd(
query,
kv_cache.key,
kv_cache.value,
None,
seqlen.cu_seqlen_q,
seqlen.cu_seqlen_k,
None, # pad_k
None,
block_tables,
None,
max_q,
max_k,
0.0, # dropout
softmax_scale,
False, # zero_tensors
True, # causal
window_size_left, # Window_left
window_size_right, # Window right
softcap,
False, # return softmax
None, # generator
)
return out[0]
if softcap is not None:
raise RuntimeError("Paged attention doesn't support softcapping")
# value_cache => [num_blocks, num_heads, head_size, block_size]
# block_size = kv_cache.value.shape[3]
block_size = BLOCK_SIZE
num_seqs, num_heads, head_size = query.shape
num_kv_heads = kv_cache.key.shape[1]
gqa_ratio = num_heads // num_kv_heads
use_custom = _use_rocm_custom_paged_attention(
query.dtype, head_size, block_size, gqa_ratio, max_s
)
if not use_custom:
_PARTITION_SIZE = _PARTITION_SIZE_V1V2
else:
_PARTITION_SIZE = _PARTITION_SIZE_CUSTOM
max_num_partitions = (max_s + _PARTITION_SIZE - 1) // _PARTITION_SIZE
input_lengths = seqlen.input_lengths + seqlen.cache_lengths
out = torch.empty_like(query)
if kv_cache.dtype == torch.float8_e4m3fn:
key = kv_cache.key.view(torch.uint8)
value = kv_cache.value.view(torch.uint8)
kv_cache_dtype = "fp8"
else:
key = kv_cache.key
value = kv_cache.value
kv_cache_dtype = "auto"
# NOTE(woosuk): We use a simple heuristic to decide whether to use
# PagedAttention V1 or V2. If the number of partitions is 1, we use
# V1 to avoid the overhead of reduction. Also, if the number of
# sequences or heads is large, we use V1 since there is enough work
# to parallelize.
use_v1 = (
max_s <= 8192
and (max_num_partitions == 1 or num_seqs * num_heads > 512)
and not use_custom
)
if use_v1:
ops.paged_attention_v1(
out,
query,
key,
value,
num_kv_heads,
softmax_scale,
block_tables,
input_lengths,
block_size,
max_s,
None,
kv_cache_dtype,
kv_scales.key_scale_cpu,
kv_scales.value_scale_cpu,
)
else:
# Run PagedAttention V2.
assert _PARTITION_SIZE % block_size == 0
tmp_output = torch.zeros(
size=(num_seqs, num_heads, max_num_partitions, head_size),
dtype=out.dtype,
device=out.device,
)
exp_sums = torch.zeros(
size=(num_seqs, num_heads, max_num_partitions),
dtype=torch.float32,
device=out.device,
)
max_logits = torch.zeros_like(exp_sums)
if not use_custom:
ops.paged_attention_v2(
out,
exp_sums,
max_logits,
tmp_output,
query,
key,
value,
num_kv_heads,
softmax_scale,
block_tables,
input_lengths,
block_size,
max_s,
None,
kv_cache_dtype,
kv_scales.key_scale_cpu,
kv_scales.value_scale_cpu,
)
else:
ops.paged_attention_rocm(
out,
exp_sums,
max_logits,
tmp_output,
query,
key,
value,
num_kv_heads,
softmax_scale,
block_tables,
input_lengths,
block_size,
max_s,
None,
kv_cache_dtype,
kv_scales.key_scale_cpu,
kv_scales.value_scale_cpu,
None,
_PARTITION_SIZE,
)
return out
if ENGINE != "triton":
try:
import flash_attn_2_cuda
log_master(
logger.info,
"ROCm: using Flash Attention 2 Composable Kernel implementation.",
)
except ImportError as e:
if major >= 8:
architecture_suffix = f"-{SYSTEM}"
raise ImportError(
"Flash Attention V2 is not installed.\n"
"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
f"or install flash attention v2 with `cd server && make install install-flash-attention-v2{architecture_suffix}`"
)
elif is_sm75:
raise ImportError(
"Flash Attention is not installed.\n"
"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
"or install flash attention with `cd server && make install install-flash-attention`"
) from e
else:
for idx in range(torch.cuda.device_count()):
name = torch.cuda.get_device_name(idx)
if "MI210" not in name and "MI250" not in name:
raise ImportError(
f"AMD GPU {torch.cuda.get_device_name(idx)} does not support flash-attention"
)
raise ImportError(
f"AMD GPU with ROCm capability {major} {minor} is not supported"
) from e
SUPPORTS_WINDOWING = False
def attention(
*,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
kv_cache: KVCache,
kv_scales: KVScales,
seqlen: Seqlen,
block_tables: torch.Tensor,
softmax_scale: float,
window_size_left: int = -1,
causal: bool = True,
softcap: Optional[float] = None,
):
if ENGINE == "ck":
if window_size_left <= 0 and window_size_left != -1:
raise ValueError("`window_size_left` must be > 0 or -1")
out = torch.empty_like(query)
if softcap is None:
softcap = 0.0
# We do not need to check window_size_left (not supported) here, so it is already checked ahead of time at model load.
return flash_attn_2_cuda.varlen_fwd(
query,
# flashdecoding: pass the KV caches, paged: pass the KV.
kv_cache.key if ATTENTION == "flashdecoding" else key,
kv_cache.value if ATTENTION == "flashdecoding" else value,
out,
seqlen.cu_seqlen_q,
seqlen.cu_seqlen_k,
None,
None,
block_tables if ATTENTION == "flashdecoding" else None,
None,
seqlen.max_q,
seqlen.max_k,
0.0,
softmax_scale,
False,
causal,
window_size_left,
0,
softcap,
False,
None,
)[0]
elif ENGINE == "triton":
from .flash_attn_triton import triton_attention
if softcap is not None:
raise NotImplementedError("softcap is only available with CK flash attn")
out = torch.empty_like(query)
# We do not need to check window_size_left (not supported) here, so it is already checked ahead of time at model load.
output, _ = triton_attention(
query,
key,
value,
out,
seqlen.cu_seqlen_q,
seqlen.cu_seqlen_q,
seqlen.max_q,
seqlen.max_k,
causal,
softmax_scale,
)
return output
else:
raise RuntimeError(f"Unknown attention engine {ENGINE}")
__all__ = [
"SUPPORTS_WINDOWING",
"attention",
"paged_attention",
]
| text-generation-inference/server/text_generation_server/layers/attention/rocm.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/attention/rocm.py",
"repo_id": "text-generation-inference",
"token_count": 5552
} | 326 |
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import torch
from loguru import logger
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.utils.log import log_once
from text_generation_server.utils.weights import (
Weight,
Weights,
WeightsLoader,
DefaultWeightsLoader,
)
import math
@dataclass
class GPTQWeight(Weight):
qweight: torch.Tensor
qzeros: torch.Tensor
scales: torch.Tensor
g_idx: Optional[torch.Tensor]
bits: int
groupsize: int
use_awq_kernel: bool
use_exllama: bool
def __post_init__(self):
if self.scales.dtype == torch.float:
self.scales = self.scales.half()
@property
def device(self) -> torch.device:
return self.qweight.device
def get_linear(self, bias: torch.Tensor):
if self.use_awq_kernel:
if SYSTEM == "rocm":
raise NotImplementedError(
"AWQ GEMM kernel can't be used on ROCm systems, please use `--quantize gptq` instead "
"to use Exllama/GPTQ kernels for AWQ inference."
)
try:
from text_generation_server.layers.awq.quantize import WQLinear
return WQLinear(
w_bit=self.bits,
group_size=self.groupsize,
qweight=self.qweight,
qzeros=self.qzeros,
scales=self.scales,
bias=bias,
)
except ImportError:
raise NotImplementedError(
"You do not seem to have awq installed, either install it (cd server && make install-awq), or try using GPTQ `---quantize gptq` a conversion AWQ->GPTQ will happen on the fly"
)
elif self.use_exllama:
try:
from text_generation_server.layers.gptq import ExllamaQuantLinear
except ImportError:
raise NotImplementedError(
"Exllama gptq kernels are not installed. Install them `cd server/exllama_kernels && python setup.py install && cd ../exllamav2_kernels && python setup.py install`"
)
return ExllamaQuantLinear(self, bias)
else:
if SYSTEM == "ipex" and not (
self.device.type == "xpu"
and (
self.bits != 4
or math.ceil(
(self.qweight.shape[0] * 32 // self.bits) / self.groupsize
)
!= self.scales.shape[0]
)
):
from .ipex import QuantLinear
else:
from .triton import QuantLinear
return QuantLinear(
self.qweight,
self.qzeros,
self.scales,
self.g_idx,
bias,
self.bits,
self.groupsize,
)
class GPTQWeightsLoader(WeightsLoader):
"""
Loader for GPTQ- and AWQ-quantized weights.
"""
def __init__(
self,
*,
bits: int,
desc_act: bool,
groupsize: int,
quant_method: str,
quantize: str,
sym: bool,
modules_to_not_convert: List[str],
):
self.bits = bits
self.desc_act = desc_act
self.groupsize = groupsize
self.quant_method = quant_method
self.quantize = quantize
self.sym = sym
self.modules_to_not_convert = modules_to_not_convert
def get_weights(self, weights: Weights, prefix: str):
self._get_gptq_params(weights)
use_exllama = True
if self.bits != 4:
use_exllama = False
if self.desc_act:
log_once(logger.warning, "Disabling exllama because desc_act=True")
use_exllama = False
if self.is_layer_skipped_quantization(prefix, self.modules_to_not_convert):
return DefaultWeightsLoader.get_weights(weights, prefix)
try:
qweight = weights.get_tensor(f"{prefix}.qweight")
except RuntimeError:
raise RuntimeError(
"Cannot load `gptq` weight, make sure the model is already quantized, or quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`"
)
if self.quantize == "gptq" and self.quant_method == "gptq":
g_idx = weights.get_tensor(f"{prefix}.g_idx")
else:
g_idx = None
from text_generation_server.layers.gptq import (
HAS_EXLLAMA,
CAN_EXLLAMA,
GPTQWeight,
)
if use_exllama:
if not HAS_EXLLAMA:
if CAN_EXLLAMA:
log_once(
logger.warning,
"Exllama GPTQ cuda kernels (which are faster) could have been used, but are not currently installed, try using BUILD_EXTENSIONS=True",
)
use_exllama = False
else:
log_once(logger.info, f"Using exllama kernels v{HAS_EXLLAMA}")
qzeros = weights.get_tensor(f"{prefix}.qzeros")
scales = weights.get_tensor(f"{prefix}.scales")
if use_exllama and g_idx is not None:
g_idx = g_idx - g_idx[0]
if self.quantize == "gptq" and self.quant_method == "awq":
log_once(
logger.info, "Converting AWQ model to Exllama/GPTQ packing format."
)
from text_generation_server.layers.awq.conversion_utils import (
fast_awq_to_gptq,
)
qweight, qzeros = fast_awq_to_gptq(qweight, qzeros)
if use_exllama:
g_idx = None
else:
g_idx = (
torch.arange(
qweight.shape[0] * (32 // self.bits),
device=qweight.device,
)
// self.groupsize
).to(dtype=torch.int32)
return GPTQWeight(
qweight=qweight,
qzeros=qzeros,
scales=scales,
g_idx=g_idx,
bits=self.bits,
groupsize=self.groupsize,
use_awq_kernel=self.quantize == "awq",
use_exllama=use_exllama,
)
def is_layer_skipped_quantization(
self, prefix: str, modules_to_not_convert: List[str]
):
return any(module_name in prefix for module_name in modules_to_not_convert)
def get_weights_col_packed(
self,
weights: Weights,
prefix: str,
block_sizes: Union[int, List[int]],
):
if self.is_layer_skipped_quantization(prefix, self.modules_to_not_convert):
return DefaultWeightsLoader.get_weights_col_packed(
weights, prefix, block_sizes
)
try:
qweight = weights.get_packed_sharded(
f"{prefix}.qweight", dim=1, block_sizes=block_sizes
)
except RuntimeError:
raise RuntimeError(
f"Cannot load `{self.quantize}` weight, make sure the model is already quantized."
)
scales = weights.get_packed_sharded(
f"{prefix}.scales", dim=1, block_sizes=block_sizes
)
scales = scales.to(dtype=weights.dtype)
self._get_gptq_params(weights)
qzeros = weights.get_packed_sharded(
f"{prefix}.qzeros", dim=1, block_sizes=block_sizes
)
if self.quantize == "gptq" and self.quant_method == "gptq":
g_idx = weights.get_tensor(f"{prefix}.g_idx")
elif self.quantize == "gptq" and self.quant_method == "awq":
log_once(
logger.info, "Converting AWQ model to Exllama/GPTQ packing format."
)
from text_generation_server.layers.awq.conversion_utils import (
fast_awq_to_gptq,
)
qweight, qzeros = fast_awq_to_gptq(qweight, qzeros)
g_idx = (
torch.arange(
qweight.shape[0] * (32 // self.bits),
device=qweight.device,
)
// self.groupsize
).to(dtype=torch.int32)
else:
g_idx = None
return GPTQWeight(
qweight=qweight,
qzeros=qzeros,
scales=scales,
g_idx=g_idx,
bits=self.bits,
groupsize=self.groupsize,
use_awq_kernel=self.quantize == "awq",
use_exllama=False,
)
def get_multi_weights_col(self, weights: Weights, prefixes: List[str], dim: int):
if self.is_layer_skipped_quantization(prefixes[0], self.modules_to_not_convert):
return DefaultWeightsLoader.get_multi_weights_col(weights, prefixes, dim)
try:
qweight = torch.cat(
[weights.get_sharded(f"{p}.qweight", dim=1) for p in prefixes], dim=1
)
except RuntimeError:
raise RuntimeError(
f"Cannot load `{self.quantize}` weight, make sure the model is already quantized"
)
scales = torch.cat(
[weights.get_sharded(f"{p}.scales", dim=1) for p in prefixes], dim=1
)
self._get_gptq_params(weights)
qzeros = torch.cat(
[weights.get_sharded(f"{p}.qzeros", dim=1) for p in prefixes], dim=1
)
from text_generation_server.layers.gptq import HAS_EXLLAMA
use_exllama = (
self.bits == 4
and HAS_EXLLAMA
and self.quantize == "gptq"
and not self.desc_act
)
if self.quantize == "gptq" and self.quant_method == "gptq":
w = [weights.get_tensor(f"{p}.g_idx") for p in prefixes]
for w2 in w[1:]:
torch.testing.assert_close(w2, w[0])
g_idx = w[0]
elif self.quantize == "gptq" and self.quant_method == "awq":
log_once(
logger.info, "Converting AWQ model to Exllama/GPTQ packing format."
)
from text_generation_server.layers.awq.conversion_utils import (
fast_awq_to_gptq,
)
qweight, qzeros = fast_awq_to_gptq(qweight, qzeros)
if use_exllama:
g_idx = None
else:
g_idx = (
torch.arange(
qweight.shape[0] * (32 // self.bits),
device=qweight.device,
)
// self.groupsize
).to(dtype=torch.int32)
else:
g_idx = None
return GPTQWeight(
qweight=qweight,
qzeros=qzeros,
scales=scales,
g_idx=g_idx,
bits=self.bits,
groupsize=self.groupsize,
use_awq_kernel=self.quantize == "awq",
use_exllama=use_exllama,
)
def get_weights_row(self, weights: Weights, prefix: str):
self._get_gptq_params(weights)
use_exllama = True
desc_act = self.desc_act
if self.bits != 4:
use_exllama = False
if self.desc_act:
log_once(logger.warning, "Disabling exllama because desc_act=True")
use_exllama = False
if self.is_layer_skipped_quantization(prefix, self.modules_to_not_convert):
return DefaultWeightsLoader.get_weights_row(weights, prefix)
try:
qweight = weights.get_sharded(f"{prefix}.qweight", dim=0)
except RuntimeError:
raise RuntimeError(
"Cannot load `gptq` weight, make sure the model is already quantized, or quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`"
)
if self.quantize == "gptq" and self.quant_method == "gptq":
g_idx = weights.get_sharded(f"{prefix}.g_idx", dim=0)
else:
g_idx = None
if weights.process_group.size() > 1:
if g_idx is not None:
if (
not torch.equal(
# Remove g_idx[0] to adapt the check with TP>1.
(g_idx - g_idx[0]).cpu(),
torch.tensor(
[i // self.groupsize for i in range(g_idx.shape[0])],
dtype=torch.int32,
),
)
and not (g_idx == 0).all()
):
# Exllama implementation does not support row tensor parallelism with act-order, as
# it would require to reorder input activations that are split unto several GPUs
use_exllama = False
desc_act = True
from text_generation_server.layers.gptq import (
CAN_EXLLAMA,
HAS_EXLLAMA,
GPTQWeight,
)
if use_exllama:
if not HAS_EXLLAMA:
if CAN_EXLLAMA:
log_once(
logger.warning,
"Exllama GPTQ cuda kernels (which are faster) could have been used, but are not currently installed, try using BUILD_EXTENSIONS=True",
)
use_exllama = False
else:
log_once(logger.info, f"Using exllama kernels v{HAS_EXLLAMA}")
if not desc_act and self.groupsize != -1:
qzeros = weights.get_sharded(f"{prefix}.qzeros", dim=0)
scales = weights.get_sharded(f"{prefix}.scales", dim=0)
if g_idx is not None:
# qzeros, scales sharded, and g_idx must be adjusted accordingly
g_idx = g_idx - g_idx[0]
else:
qzeros = weights.get_tensor(f"{prefix}.qzeros")
scales = weights.get_tensor(f"{prefix}.scales")
if self.quantize == "gptq" and self.quant_method == "awq":
log_once(
logger.info, "Converting AWQ model to Exllama/GPTQ packing format."
)
from text_generation_server.layers.awq.conversion_utils import (
fast_awq_to_gptq,
)
qweight, qzeros = fast_awq_to_gptq(qweight, qzeros)
if use_exllama:
g_idx = None
else:
g_idx = (
torch.arange(
qweight.shape[0] * (32 // self.bits),
device=qweight.device,
)
// self.groupsize
).to(dtype=torch.int32)
return GPTQWeight(
qweight=qweight,
qzeros=qzeros,
scales=scales,
g_idx=g_idx,
bits=self.bits,
groupsize=self.groupsize,
use_awq_kernel=self.quantize == "awq",
use_exllama=use_exllama,
)
def _get_gptq_params(self, weights: Weights):
if weights.has_tensor("gptq_bits") and weights.has_tensor("gptq_groupsize"):
self.bits = weights.get_tensor("gptq_bits").item()
self.groupsize = weights.get_tensor("gptq_groupsize").item()
self.desc_act = False
# `server quantize` used asymmetric quantization unconditionally
# before the `gptq_sym` setting tensor was added.
self.sym = (
weights.get_tensor("gptq_sym").item()
if weights.has_tensor("gptq_sym")
else False
)
self.quant_method = "gptq"
# Needs to be at the end because circular import.
try:
major, _minor = torch.cuda.get_device_capability()
except Exception:
major = 1
HAS_EXLLAMA = False
CAN_EXLLAMA = major >= 8 or SYSTEM == "rocm"
V2 = os.getenv("EXLLAMA_VERSION", "2") == "2"
if os.getenv("DISABLE_EXLLAMA") == "True":
HAS_EXLLAMA = False
elif CAN_EXLLAMA:
try:
if V2:
from text_generation_server.layers.gptq.exllamav2 import (
QuantLinear as ExllamaQuantLinear, # noqa: F401
create_exllama_buffers, # noqa: F401
set_device, # noqa: F401
)
HAS_EXLLAMA = "2"
else:
from text_generation_server.layers.gptq.exllama import (
Ex4bitLinear as ExllamaQuantLinear, # noqa: F401
create_exllama_buffers, # noqa: F401
set_device, # noqa: F401
)
HAS_EXLLAMA = "1"
except ImportError:
pass
| text-generation-inference/server/text_generation_server/layers/gptq/__init__.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/gptq/__init__.py",
"repo_id": "text-generation-inference",
"token_count": 9078
} | 327 |
import torch
from torch import nn
from typing import Tuple, Optional
from text_generation_server.utils.speculate import get_speculate
from text_generation_server.layers.linear import FastLinear
from text_generation_server.layers.tensor_parallel import (
TensorParallelHead,
TensorParallelColumnLinear,
)
class ResBlock(torch.nn.Module):
def __init__(self, config, prefix, weights):
super().__init__()
self.linear = FastLinear.load(
config, prefix=f"{prefix}.linear", weights=weights, bias=True
)
self.act = torch.nn.SiLU()
def forward(self, x):
return x + self.act(self.linear(x))
class MedusaModel(torch.nn.Module):
def __init__(self, config, medusa_config, weights):
super().__init__()
self.heads = torch.nn.ModuleList(
[
MedusaHead(config, medusa_config, prefix=f"{i}", weights=weights)
for i in range(get_speculate())
]
)
def forward(self, x):
if not self.heads:
return None
speculative_logits = torch.stack([head(x) for head in self.heads], dim=1)
return speculative_logits
class MedusaHead(torch.nn.Module):
def __init__(self, config, medusa_config, prefix, weights):
super().__init__()
self.blocks = torch.nn.ModuleList(
[
ResBlock(config, prefix=f"{prefix}.{i}", weights=weights)
for i in range(medusa_config["medusa_num_layers"])
]
)
n = len(self.blocks)
self.out = FastLinear.load(
config, prefix=f"{prefix}.{n}", weights=weights, bias=False
)
def forward(self, x):
for block in self.blocks:
x = block(x)
x = self.out(x)
return x
class MedusaHeadV1(nn.Module):
def __init__(self, lm_head, medusa):
super().__init__()
self.lm_head = lm_head
self.medusa = medusa
@staticmethod
def load(config, prefix: str, weights):
from pathlib import Path
from safetensors import safe_open
import json
speculator = config.speculator
path = speculator["path"]
medusa_config = str(Path(path) / "config.json")
for fname in speculator["model_paths"]:
filename = str(Path(path) / fname)
with open(medusa_config, "r") as f:
medusa_config = json.load(f)
routing = weights.routing
with safe_open(filename, framework="pytorch") as f:
for k in f.keys():
if k in routing and routing[k] != filename:
raise RuntimeError(
f"Key {k} was found in multiple files: {filename} and {routing[k]}"
)
routing[k] = filename
medusa = MedusaModel(config, medusa_config, weights)
lm_head = TensorParallelHead.load(config, prefix, weights)
return MedusaHeadV1(lm_head, medusa)
def forward(
self, input: torch.Tensor
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
logits = self.lm_head(input)
# If we have too many tokens, we skip speculative logits
if input.shape[0] > 128:
return logits, None
speculative_logits = self.medusa(input)
return logits, speculative_logits
class MedusaHeadV2(nn.Module):
def __init__(self, config, prefix, weights):
super().__init__()
from pathlib import Path
from safetensors import safe_open
import json
speculator_path = config.speculator["path"]
medusa_config = str(Path(speculator_path) / "config.json")
filename = str(Path(speculator_path) / "medusa_lm_head.safetensors")
with open(medusa_config, "r") as f:
medusa_config = json.load(f)
routing = weights.routing
with safe_open(filename, framework="pytorch") as f:
for k in f.keys():
if k in routing and routing[k] != filename:
raise RuntimeError(
f"Key {k} was found in multiple files: {filename} and {routing[k]}"
)
routing[k] = filename
self.n_medusa_heads = get_speculate()
assert medusa_config["medusa_num_layers"] == 1
self.linear = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{i}.0.linear" for i in range(self.n_medusa_heads)],
dim=0,
weights=weights,
bias=True,
)
self.process_group = weights.process_group
self.world_size = self.process_group.size()
self.rank = self.process_group.rank()
self.act = torch.nn.SiLU()
self.lm_head = TensorParallelHead.load(config, prefix, weights)
def forward(self, x):
# If we have too many tokens, we skip speculative logits
if x.shape[0] > 128:
logits = self.lm_head(x)
return logits, None
size = x.shape[-1]
block_size = (size + self.world_size - 1) // self.world_size
start = self.rank * block_size
stop = (self.rank + 1) * block_size
x_block = x[:, start:stop]
# Compute all medusa heads at the same time, then reshape and move the n_medusa_heads dim to dim 1
medusa_res = self.act(self.linear(x)).reshape(
*x_block.shape[:-1], self.n_medusa_heads, x_block.shape[-1]
)
# Apply all residual medusa heads
output = x[:, start:stop].unsqueeze(-2) + medusa_res
# Gather medusa heads
world_output = [
torch.empty_like(output) for _ in range(self.process_group.size())
]
torch.distributed.all_gather(world_output, output, group=self.process_group)
world_output = torch.cat(world_output, dim=-1)
# Stack x and medusa residual x
stacked_x = torch.cat([x.unsqueeze(-2), world_output], dim=-2)
# Compute lm head on x + medusa residual x
logits = self.lm_head(stacked_x)
# Finally, split logits from speculative logits
logits, speculative_logits = torch.split(
logits, [1, self.n_medusa_heads], dim=-2
)
# Squeeze added dimension
logits = logits.squeeze(-2)
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/layers/medusa.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/medusa.py",
"repo_id": "text-generation-inference",
"token_count": 2975
} | 328 |
# coding=utf-8
# Copyright 2024 Cohere team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from typing import Optional, List, Tuple
from text_generation_server.layers.attention import (
paged_attention,
attention,
Seqlen,
)
from text_generation_server.layers.attention.kv_cache import get_kv_scales
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.layers import (
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
SpeculativeHead,
get_linear,
)
from text_generation_server.layers.layernorm import (
FastLayerNorm,
)
from text_generation_server.layers.rotary import (
PositionRotaryEmbedding,
)
from text_generation_server.utils.weights import UnquantizedWeight
if SYSTEM == "cuda":
import dropout_layer_norm
else:
dropout_layer_norm = None
class CohereRotary(PositionRotaryEmbedding):
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
):
# Such controlflows may add some overhead.
if SYSTEM == "cuda":
from text_generation_server.utils.kernels import load_kernel
rotary = load_kernel(module="rotary", repo_id="kernels-community/rotary")
q1 = query[..., ::2]
q2 = query[..., 1::2]
rotary.apply_rotary(q1, q2, cos, sin, q1, q2, False)
k1 = key[..., ::2]
k2 = key[..., 1::2]
rotary.apply_rotary(k1, k2, cos, sin, k1, k2, False)
elif SYSTEM == "rocm":
import vllm._custom_ops as ops
# NOTE: On RoCm systems, we use a ROPE implementatation adapted from VLLM which launches a single kernel for both query/key, contrary to flash-attn implementation used on NVIDIA systems.
# Compiling flash-attn rotary on RoCm, it appears hipcc is unable to unroll loops, resulting in an even slower inference compared to eager: https://github.com/pytorch/pytorch/issues/113773
head_size = query.shape[-1]
# Inplace operation, updating query and key.
ops.rotary_embedding(query, key, head_size, cos, sin, False)
elif SYSTEM == "ipex":
import intel_extension_for_pytorch as ipex
ipex.llm.functional.rotary_embedding(
query, key, sin, cos, query.size(-1), False
)
else:
raise ValueError(
"Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction."
)
class CohereLayerNorm(nn.Module):
def __init__(self, prefix, weights, eps):
super().__init__()
weight = weights.get_sharded(f"{prefix}.weight", dim=0)
self.weight = nn.Parameter(weight)
# Fake weights
self.ones = weight.new_ones(weight.shape[1])
self.eps = eps
def forward(self, hidden_states):
if hidden_states.shape[-1] > 8192 or SYSTEM != "cuda":
hidden_states = hidden_states.reshape(
-1, self.weight.shape[0], self.weight.shape[1]
)
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
mean = hidden_states.mean(-1, keepdim=True)
hidden_states_minus_mean = hidden_states - mean
variance = hidden_states_minus_mean.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states_minus_mean * torch.rsqrt(variance + self.eps)
hidden_states = self.weight.to(torch.float32) * hidden_states
hidden_states = hidden_states.view(-1, self.weight.shape[1])
return hidden_states.to(input_dtype)
(
hidden_states,
*rest,
) = dropout_layer_norm.dropout_add_ln_fwd(
hidden_states,
None,
self.ones,
None,
None,
None,
None,
None,
0.0,
self.eps,
1.0,
0,
None,
False,
False,
)
# Required to apply one weight matrix per head
hidden_states = hidden_states.view(
-1, self.weight.shape[0], self.weight.shape[1]
)
hidden_states = self.weight * hidden_states
hidden_states = hidden_states.view(-1, self.weight.shape[1])
return hidden_states
def load_attention(config, prefix, weights):
if config.num_attention_heads != config.num_key_value_heads:
return _load_gqa(config, prefix, weights)
else:
return TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=config.attention_bias,
)
def _load_gqa(config, prefix: str, weights):
assert config.hidden_size % config.num_attention_heads == 0
assert config.num_attention_heads % weights.process_group.size() == 0
weight = weights.get_multi_weights_col(
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
)
if isinstance(weight, UnquantizedWeight):
weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device)
head_size = config.hidden_size // config.num_attention_heads
num_heads = config.num_attention_heads // weights.process_group.size()
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
assert list(weight.weight.shape) == [
(num_heads + 2 * num_key_value_heads) * head_size,
config.hidden_size,
], f"{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
if config.attention_bias:
w = [
weights.get_sharded(f"{p}.bias", dim=0)
for p in [f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"]
]
bias = torch.cat(w, dim=0).to(dtype=weights.dtype).to(device=weights.device)
else:
bias = None
return TensorParallelColumnLinear(get_linear(weight, bias=bias))
class FlashCohereAttention(torch.nn.Module):
def __init__(
self,
prefix: str,
config,
weights,
):
super().__init__()
self.num_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_heads
self.rotary_emb = CohereRotary.static(
config=config,
dim=self.head_size,
base=config.rope_theta,
device=weights.device,
)
self.softmax_scale = self.head_size**-0.5
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.query_key_value = load_attention(config, prefix, weights)
self.kv_scales = get_kv_scales(weights, f"{prefix}")
self.use_qk_norm = config.use_qk_norm
if self.use_qk_norm:
self.q_norm = CohereLayerNorm(
prefix=f"{prefix}.q_norm",
weights=weights,
eps=config.layer_norm_eps,
)
self.k_norm = CohereLayerNorm(
prefix=f"{prefix}.k_norm",
weights=weights,
eps=config.layer_norm_eps,
)
else:
self.q_norm = None
self.k_norm = None
self.o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=config.attention_bias,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
qkv = self.query_key_value(hidden_states)
query, key, value = qkv.split(
[
self.head_size * self.num_heads,
self.head_size * self.num_key_value_heads,
self.head_size * self.num_key_value_heads,
],
dim=1,
)
if self.use_qk_norm:
query = query.reshape(-1, self.head_size)
key = key.reshape(-1, self.head_size)
query = self.q_norm(query.contiguous())
key = self.k_norm(key.contiguous())
query = query.view(-1, self.num_heads, self.head_size)
key = key.view(-1, self.num_key_value_heads, self.head_size)
value = value.view(-1, self.num_key_value_heads, self.head_size)
self.rotary_emb(query, key, cos, sin)
kv_cache.store(
key=key,
value=value,
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query=query,
key=key,
value=value,
kv_cache=kv_cache,
kv_scales=self.kv_scales,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=self.softmax_scale,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
kv_scales=self.kv_scales,
)
return self.o_proj(
attn_output.view(-1, self.num_heads * self.head_size), reduce=False
)
class CohereMLP(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
act = config.hidden_act
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
)
# Fuse gate and up proj
self.gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
weights=weights,
dim=0,
bias=False,
)
self.down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=False,
)
self.intermediate_size = (
config.intermediate_size // weights.process_group.size()
)
def forward(self, hidden_states):
gate_up_states = self.gate_up_proj(hidden_states)
gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size)
return self.down_proj(
self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], reduce=False
)
class FlashCohereLayer(nn.Module):
def __init__(self, prefix: str, layer_id, config, weights):
super().__init__()
prefix = f"{prefix}.layers.{layer_id}"
self.self_attn = FlashCohereAttention(
prefix=f"{prefix}.self_attn", config=config, weights=weights
)
self.mlp = CohereMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
self.input_layernorm = FastLayerNorm.load_no_bias(
prefix=f"{prefix}.input_layernorm",
weights=weights,
eps=config.layer_norm_eps,
)
self.process_group = weights.process_group
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
normed_hidden_states, res = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
mlp_output = self.mlp(normed_hidden_states)
output = attn_output + mlp_output
if self.process_group.size() > 1:
torch.distributed.all_reduce(output, group=self.process_group)
return output, res
class FlashCohereModel(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
process_group = weights.process_group
self.tp_rank = process_group.rank()
self.tp_world_size = process_group.size()
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}.embed_tokens", weights=weights
)
self.layers = nn.ModuleList(
[
FlashCohereLayer(
prefix,
layer_id,
config,
weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = FastLayerNorm.load_no_bias(
prefix=f"{prefix}.norm", weights=weights, eps=config.layer_norm_eps
)
self.gradient_checkpointing = False
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: torch.Tensor,
max_s: int,
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashCohereForCausalLM(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
if not prefix:
prefix = "model"
else:
prefix = f"{prefix}.model"
self.model = FlashCohereModel(prefix, config, weights)
try:
self.lm_head = SpeculativeHead.load(
config,
prefix="lm_head",
weights=weights,
)
except RuntimeError:
self.lm_head = SpeculativeHead.load(
config,
prefix=f"{prefix}.embed_tokens",
weights=weights,
)
self.logit_scale = config.logit_scale
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
hidden_states = self.model(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits, speculative_logits = self.lm_head(hidden_states)
logits *= self.logit_scale
if speculative_logits is not None:
speculative_logits *= self.logit_scale
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 8966
} | 329 |
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from typing import Optional, List, Tuple
from text_generation_server.layers.attention import (
paged_attention,
attention,
Seqlen,
)
from text_generation_server.layers import (
TensorParallelMultiAdapterLinear,
TensorParallelAdapterRowLinear,
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
SpeculativeHead,
)
from text_generation_server.layers.attention.kv_cache import get_kv_scales
from text_generation_server.layers.rotary import PositionRotaryEmbedding
from text_generation_server.layers.layernorm import (
FastRMSNorm,
)
def load_attention(config, prefix, weights, layer_id):
prefixes = [f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"]
head_size = config.hidden_size // config.num_attention_heads
sizes = [
head_size * config.num_attention_heads,
head_size * config.num_key_value_heads,
head_size * config.num_key_value_heads,
]
if config.num_attention_heads != config.num_key_value_heads:
base_layer = _load_gqa(config, prefix, weights)
else:
base_layer = TensorParallelColumnLinear.load_multi(
config,
prefixes=prefixes,
dim=0,
weights=weights,
bias=True,
)
return TensorParallelMultiAdapterLinear.load(
base_layer=base_layer,
layer_id=layer_id,
layer_names=prefixes,
sizes=sizes,
process_group=weights.process_group,
)
def _load_gqa(config, prefix: str, weights):
assert config.hidden_size % config.num_attention_heads == 0
assert config.num_attention_heads % weights.process_group.size() == 0
return TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=True,
)
class Qwen2Attention(torch.nn.Module):
def __init__(
self,
index: int,
prefix: str,
config,
weights,
):
super().__init__()
self.window_size = (
config.sliding_window if config.sliding_window is not None else -1
)
self.num_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_heads
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.head_size,
base=config.rope_theta,
device=weights.device,
)
self.softmax_scale = self.head_size**-0.5
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.query_key_value = load_attention(config, prefix, weights, index)
self.kv_scales = get_kv_scales(weights, f"{prefix}")
o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=False,
)
self.o_proj = TensorParallelAdapterRowLinear.load(
o_proj,
index,
"o_proj",
process_group=weights.process_group,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
adapter_data,
):
qkv = self.query_key_value(hidden_states, adapter_data)
query, kv = qkv.split(
[
self.head_size * self.num_heads,
2 * self.head_size * self.num_key_value_heads,
],
dim=1,
)
query = query.view(-1, self.num_heads, self.head_size)
kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size)
self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin)
if prefill_cache_indices is not None:
kv_to_cache = kv[prefill_cache_indices]
else:
kv_to_cache = kv
kv_cache.store(
key=kv_to_cache[:, 0],
value=kv_to_cache[:, 1],
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query=query,
key=kv_to_cache[:, 0],
value=kv_to_cache[:, 1],
kv_cache=kv_cache,
kv_scales=self.kv_scales,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=self.softmax_scale,
window_size_left=self.window_size,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
kv_scales=self.kv_scales,
window_size_left=self.window_size,
)
return self.o_proj(
attn_output.view(-1, self.num_heads * self.head_size), adapter_data
)
class Qwen2MLP(nn.Module):
def __init__(self, prefix, config, weights, index):
super().__init__()
act = config.hidden_act
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
)
# Fuse gate and up proj
prefixes = [f"{prefix}.gate_proj", f"{prefix}.up_proj"]
sizes = [
config.intermediate_size,
config.intermediate_size,
]
gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=prefixes,
weights=weights,
dim=0,
bias=False,
)
self.gate_up_proj = TensorParallelMultiAdapterLinear.load(
gate_up_proj,
layer_id=index,
layer_names=prefixes,
sizes=sizes,
process_group=weights.process_group,
)
down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=False,
)
self.down_proj = TensorParallelAdapterRowLinear.load(
down_proj,
index,
"down_proj",
process_group=weights.process_group,
)
self.intermediate_size = (
config.intermediate_size // weights.process_group.size()
)
def forward(self, hidden_states, adapter_data):
gate_up_states = self.gate_up_proj(hidden_states, adapter_data)
gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size)
return self.down_proj(
self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], adapter_data
)
class Qwen2Layer(nn.Module):
def __init__(self, prefix, layer_id, config, weights):
super().__init__()
prefix = f"{prefix}.layers.{layer_id}"
self.self_attn = Qwen2Attention(
index=layer_id, prefix=f"{prefix}.self_attn", config=config, weights=weights
)
self.mlp = Qwen2MLP(
prefix=f"{prefix}.mlp", config=config, weights=weights, index=layer_id
)
self.input_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
adapter_data,
):
normed_hidden_states, residual = self.input_layernorm(hidden_states)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
adapter_data,
)
hidden_states = attn_output + residual
# faster post attention rms norm
hidden_states, residual = self.post_attention_layernorm(hidden_states)
mlp_output = self.mlp(hidden_states, adapter_data)
hidden_states = mlp_output + residual
return hidden_states
class Qwen2Model(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
prefix = f"{prefix}.model" if prefix else "model"
process_group = weights.process_group
self.tp_rank = process_group.rank()
self.tp_world_size = process_group.size()
self.layers = nn.ModuleList(
[
Qwen2Layer(
prefix,
layer_id,
config,
weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = FastRMSNorm.load(
prefix=f"{prefix}.norm", weights=weights, eps=config.rms_norm_eps
)
self.gradient_checkpointing = False
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
def forward(
self,
inputs_embeds: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
true_max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
adapter_data,
) -> torch.Tensor:
hidden_states = inputs_embeds
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids,
true_max_s,
hidden_states.dtype,
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
adapter_data,
)
hidden_states, _ = self.norm(hidden_states)
return hidden_states
class Qwen2ForCausalLM(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
self.model = Qwen2Model(prefix, config, weights)
if config.tie_word_embeddings:
suffix = "model.embed_tokens"
else:
suffix = "lm_head"
self.lm_head = SpeculativeHead.load(
config,
prefix=f"{prefix}.{suffix}" if prefix else suffix,
weights=weights,
)
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}.embed_tokens" if prefix else "model.embed_tokens",
weights=weights,
)
self.window_size = config.sliding_window
self.window_size_tensor = (
torch.tensor(config.sliding_window, device=weights.device)
if self.window_size is not None
else None
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor] = None,
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> torch.Tensor:
true_max_s = max_s
if prefill_cache_indices is not None:
# Slots also need to be sliced as it has the same size as the whole kv tensor
slots = slots[prefill_cache_indices]
elif self.window_size is not None:
# Clamp in decode mode as paged attention requires clamped values whereas the flash attention
# kernel requires the true values
seqlen = seqlen.clamp(max=self.window_size_tensor)
inputs_embeds = self.embed_tokens(input_ids)
hidden_states = self.model(
inputs_embeds,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
true_max_s,
prefill_cache_indices,
adapter_data,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits = self.lm_head(hidden_states)
return logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 7370
} | 330 |
# coding=utf-8
# Copyright 2024 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Llava-NeXT model."""
from typing import List, Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from transformers.activations import ACT2FN
from transformers.image_processing_utils import select_best_resolution
from text_generation_server.layers.attention import Seqlen
from text_generation_server.models.custom_modeling.vlm import (
load_text_model,
load_vision_model,
)
from text_generation_server.layers import (
TensorParallelColumnLinear,
TensorParallelRowLinear,
)
def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
"""
Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
Args:
image_size (`tuple`):
The size of the input image in the format (height, width).
grid_pinpoints (`List`):
A list containing possible resolutions. Each item in the list should be a tuple or list
of the form `(height, width)`.
patch_size (`int`):
The size of each image patch.
Returns:
tuple: The shape of the image patch grid in the format (height, width).
"""
if not isinstance(grid_pinpoints, list):
raise ValueError("grid_pinpoints should be a list of tuples or lists")
height, width = select_best_resolution(image_size, grid_pinpoints)
return height // patch_size, width // patch_size
def unpad_image(tensor, original_size):
"""
Unpads a PyTorch tensor of a padded and resized image.
Args:
tensor (`torch.Tensor`):
The image tensor, assumed to be of shape (num_channels, height, width).
original_size (`tuple`):
The original size of the image (height, width).
Returns:
`torch.Tensor`: The unpadded image tensor.
"""
original_height, original_width = original_size
current_height, current_width = tensor.shape[1:]
original_aspect_ratio = original_width / original_height
current_aspect_ratio = current_width / current_height
if original_aspect_ratio > current_aspect_ratio:
scale_factor = current_width / original_width
new_height = int(original_height * scale_factor)
padding = (current_height - new_height) // 2
unpadded_tensor = tensor[:, padding : current_height - padding, :]
else:
scale_factor = current_height / original_height
new_width = int(original_width * scale_factor)
padding = (current_width - new_width) // 2
unpadded_tensor = tensor[:, :, padding : current_width - padding]
return unpadded_tensor
# Copied from transformers.models.llava.modeling_llava.LlavaMultiModalProjector with Llava->LlavaNext
class LlavaNextMultiModalProjector(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
self.linear_1 = TensorParallelColumnLinear.load(
prefix=f"{prefix}.linear_1", config=config, weights=weights, bias=True
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = TensorParallelRowLinear.load(
prefix=f"{prefix}.linear_2", config=config, weights=weights, bias=True
)
def forward(self, image_features):
hidden_states = self.linear_1(image_features)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
class LlavaNextForConditionalGeneration(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
config.vision_config.quantize = config.quantize
vision_config = config.vision_config
# Instead of selecting in hidden_states[-2].
# Instead compute only the n -2 + 1 layers and don't pool
if config.vision_feature_layer < 0:
vision_config.num_hidden_layers += config.vision_feature_layer + 1
else:
vision_config.num_hidden_layers = config.vision_feature_layer + 1
self.vision_tower = load_vision_model(
prefix="vision_tower" if not prefix else f"{prefix}.vision_tower",
config=config.vision_config,
weights=weights,
)
self.multi_modal_projector = LlavaNextMultiModalProjector(
prefix="multi_modal_projector", config=config, weights=weights
)
self.image_newline = weights.get_tensor("image_newline")
self.vocab_size = config.text_config.vocab_size
self.config = config
config.text_config.quantize = config.quantize
config.text_config.speculator = config.speculator
self.text_model = load_text_model(
prefix="language_model" if not prefix else f"{prefix}.language_model",
config=config.text_config,
weights=weights,
)
self.pad_token_id = (
config.pad_token_id if config.pad_token_id is not None else -1
)
def _merge_input_ids_with_image_features(
self,
input_ids: torch.Tensor,
inputs_embeds: torch.Tensor,
image_features: torch.Tensor,
):
"""In place merges in vision_embeddings with inputs_embeds."""
mask = input_ids == self.config.image_token_index
# Let's pray we have enabled enough slots !
try:
inputs_embeds[mask] = image_features.view(-1, image_features.shape[-1])
except Exception as e:
raise RuntimeError(
f"Cannot fill images right now. If error happens at warmup, make sure you have enough `--max-input-tokens` to handle images. If error happens at regular runtime, please fill in an issue: {e}"
)
return inputs_embeds
def get_vision_embeds(
self,
pixel_values: torch.FloatTensor,
pixel_attention_mask: Optional[torch.FloatTensor] = None,
image_sizes: Optional[torch.Tensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
):
# num_special_image_tokens = (input_ids == self.config.image_token_index).sum()
# assert num_special_image_tokens == len(pixel_values), f"Received {num_special_image_tokens} for {len(pixel_values)} images, this is invalid"
# 1. Extract the input embeddings
# 2. Merge text and images
num_images, num_patches, channels, height, width = pixel_values.shape
pixel_values = pixel_values.view(
num_images * num_patches, channels, height, width
)
image_features = self.vision_tower(pixel_values)
# selected_image_feature = image_features.hidden_states[self.config.vision_feature_layer]
# Already done within the clip model
selected_image_feature = image_features.last_hidden_state
if self.config.vision_feature_select_strategy == "default":
selected_image_feature = selected_image_feature[:, 1:]
elif self.config.vision_feature_select_strategy == "full":
selected_image_feature = selected_image_feature
else:
raise RuntimeError(
f"Strategy `{self.config.vision_feature_select_strategy}` is not supported/valid."
)
image_features = self.multi_modal_projector(selected_image_feature)
# split up image_features for each of the individual images
# hence we get a list of image_features, each of shape (5, num_patches, hidden_size)
# if we assume each image has 5 image features (base image + 4 patches)
split_sizes = [num_patches] * num_images
image_features = torch.split(image_features, split_sizes, dim=0)
# NOTE we only support multimodal_patch_merge_type == "spatial_unpad"
height = width = (
self.config.vision_config.image_size // self.config.vision_config.patch_size
)
new_image_features = []
for image_idx, image_feature in enumerate(image_features):
if image_feature.shape[0] > 1:
base_image_feature = image_feature[0]
image_feature = image_feature[1:]
if height * width != base_image_feature.shape[0]:
raise ValueError(
"The number of patches is not consistent with the image size."
)
# Dimensions are intentionally swapped to be bug-compatible with
# upstream: https://github.com/LLaVA-VL/LLaVA-NeXT/issues/59
num_patch_width, num_patch_height = get_anyres_image_grid_shape(
image_sizes[image_idx],
self.config.image_grid_pinpoints,
self.config.vision_config.image_size,
)
image_feature = image_feature.view(
num_patch_height, num_patch_width, height, width, -1
)
image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
image_feature = image_feature.flatten(1, 2).flatten(2, 3)
image_feature = unpad_image(image_feature, image_sizes[image_idx])
image_feature = torch.cat(
(
image_feature,
self.image_newline[:, None, None].expand(
*image_feature.shape[:-1], 1
),
),
dim=-1,
)
image_feature = image_feature.flatten(1, 2).transpose(0, 1)
image_feature = torch.cat((base_image_feature, image_feature), dim=0)
else:
image_feature = image_feature[0]
image_feature = torch.cat(
(image_feature, self.image_newline[None]), dim=0
)
new_image_features.append(image_feature)
image_features = torch.stack(new_image_features, dim=0)
return image_features.view(-1, image_features.shape[-1])
def get_inputs_embeds(
self,
input_ids: torch.Tensor,
vision_embeds: torch.Tensor = None,
pixel_values: torch.FloatTensor = None,
image_sizes: Optional[torch.LongTensor] = None,
):
inputs_embeds = self.text_model.embed_tokens(input_ids)
if vision_embeds is not None:
# When we generate, we don't want to replace the potential image_token_id that we generated by images
# that simply don't exist
inputs_embeds = self._merge_input_ids_with_image_features(
input_ids, inputs_embeds, vision_embeds
)
return inputs_embeds
def forward(
self,
inputs_embeds: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
# Unused for this model
attention_mask: Optional[torch.BoolTensor] = None,
adapter_data: Optional[torch.Tensor] = None,
):
hidden_states = self.text_model.model(
inputs_embeds=inputs_embeds,
position_ids=position_ids,
cu_seqlen_prefill=cu_seqlen_prefill,
kv_cache=kv_cache,
block_tables=block_tables,
slots=slots,
seqlen=seqlen,
max_s=max_s,
true_max_s=max_s,
prefill_cache_indices=None,
adapter_data=adapter_data,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits, speculative_logits = self.text_model.lm_head(hidden_states)
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/llava_next.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/llava_next.py",
"repo_id": "text-generation-inference",
"token_count": 5362
} | 331 |
import torch
import torch.distributed
from transformers import AutoTokenizer, PreTrainedTokenizerBase
from typing import Optional, Union
from text_generation_server.models.custom_modeling.mamba_modeling import (
MambaConfig,
)
from loguru import logger
from text_generation_server.pb import generate_pb2
from text_generation_server.utils import (
initialize_torch_distributed,
weight_files,
Weights,
)
from text_generation_server.models.globals import CUDA_GRAPHS, MEM_POOL
import time
from text_generation_server.models.custom_modeling.mamba_modeling import (
MambaModel,
InferenceParams,
)
from text_generation_server.models import Model
from typing import Any, List, Tuple, Type, Dict
from text_generation_server.models.types import (
Batch,
Tokens,
Generation,
GeneratedText,
)
from text_generation_server.utils.chunks import concat_text_chunks
from text_generation_server.utils.quantization import get_loader
from text_generation_server.utils.tokens import batch_top_tokens, Sampling
from dataclasses import dataclass
from text_generation_server.utils import NextTokenChooser, StoppingCriteria
def new_inference_params(
n_blocks: int,
batch_size: int,
d_inner: int,
d_conv: int,
d_state: int,
seqlen_offset: int,
dtype: torch.dtype,
device: torch.device,
):
max_seqlen = 0
conv_states = torch.zeros(
(
n_blocks,
batch_size,
d_inner,
d_conv,
),
device=device,
dtype=dtype,
)
ssm_states = torch.zeros(
(
n_blocks,
batch_size,
d_inner,
d_state,
),
device=device,
dtype=dtype,
)
inference_params = InferenceParams(
max_seqlen=max_seqlen,
max_batch_size=batch_size,
seqlen_offset=seqlen_offset,
conv_states=conv_states,
ssm_states=ssm_states,
)
return inference_params
@dataclass
class MambaBatch(Batch):
batch_id: int
requests: List[generate_pb2.Request]
requests_idx_mapping: Dict[int, int]
# Decoder values
input_ids: torch.Tensor
# All tokens
all_input_ids: List[torch.Tensor]
# Lengths of all generations present in the batch
input_lengths: List[int]
prefix_offsets: List[int]
read_offsets: List[int]
# Generation helpers
next_token_choosers: List[NextTokenChooser]
stopping_criterias: List[StoppingCriteria]
top_n_tokens: List[int]
top_n_tokens_tensor: torch.Tensor
# Metadata used for padding
max_input_length: int
padding_right_offset: int
# Maximum number of tokens this batch will grow to
max_tokens: int
# Past metadata
keys_head_dim_last: bool = True
# Inference params
inference_params: Optional[Dict[str, Any]] = None
def to_pb(self) -> generate_pb2.CachedBatch:
return generate_pb2.CachedBatch(
id=self.batch_id,
request_ids=[r.id for r in self.requests],
size=len(self),
max_tokens=self.max_tokens,
current_tokens=len(self),
)
@classmethod
def from_pb(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
dtype: torch.dtype,
device: torch.device,
) -> "MambaBatch":
inputs = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
prefix_offsets = []
read_offsets = []
requests_idx_mapping = {}
# Parse batch
max_truncation = 0
padding_right_offset = 0
max_decode_tokens = 0
for i, r in enumerate(pb.requests):
requests_idx_mapping[r.id] = i
inputs.append(concat_text_chunks(r.input_chunks.chunks))
next_token_choosers.append(
NextTokenChooser.from_pb(r.parameters, device, tokenizer)
)
stopping_criteria = StoppingCriteria.from_pb(
r.stopping_parameters, tokenizer
)
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(r.top_n_tokens)
max_truncation = max(max_truncation, r.truncate)
max_decode_tokens += stopping_criteria.max_new_tokens
padding_right_offset = max(
padding_right_offset, stopping_criteria.max_new_tokens
)
tokenized_inputs = tokenizer(
inputs,
return_tensors="pt",
padding=True,
return_token_type_ids=False,
truncation=True,
max_length=max_truncation,
).to(device)
for _ in pb.requests:
input_len = tokenized_inputs["input_ids"].shape[1]
prefix_offsets.append(input_len - 5)
read_offsets.append(input_len)
input_lengths = tokenized_inputs["attention_mask"].sum(1)
max_input_length = input_lengths.max()
input_ids = tokenized_inputs["input_ids"]
all_input_ids = tokenized_inputs["input_ids"].T.split(1, dim=1)
top_n_tokens_tensor = torch.tensor(
top_n_tokens, device=device, dtype=torch.int64
)
max_tokens = len(inputs) * (max_input_length + max_decode_tokens)
return cls(
batch_id=pb.id,
requests=pb.requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=input_ids,
# past_input_ids=None,
all_input_ids=list(all_input_ids),
input_lengths=input_lengths.tolist(),
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
max_input_length=max_input_length.item(),
padding_right_offset=padding_right_offset,
max_tokens=max_tokens,
)
def filter(self, request_ids: List[int]) -> Optional["MambaBatch"]:
if len(request_ids) == 0:
raise ValueError("Batch must have at least one request")
if len(request_ids) == len(self):
return self
keep_indices = []
# New values after filtering
requests_idx_mapping = {}
requests = []
input_lengths = []
prefix_offsets = []
read_offsets = []
all_input_ids = []
max_input_length = 0
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
total_remaining_decode_tokens = 0
new_padding_right_offset = 0
indices = []
for i, request_id in enumerate(request_ids):
idx = self.requests_idx_mapping[request_id]
requests_idx_mapping[request_id] = i
keep_indices.append(idx)
requests.append(self.requests[idx])
prefix_offsets.append(self.prefix_offsets[idx])
read_offsets.append(self.read_offsets[idx])
all_input_ids.append(self.all_input_ids[idx])
request_input_length = self.input_lengths[idx]
input_lengths.append(request_input_length)
max_input_length = max(max_input_length, request_input_length)
indices.append(idx)
next_token_choosers.append(self.next_token_choosers[idx])
stopping_criteria = self.stopping_criterias[idx]
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(self.top_n_tokens[idx])
remaining_decode_tokens = (
stopping_criteria.max_new_tokens - stopping_criteria.current_tokens
)
total_remaining_decode_tokens += remaining_decode_tokens
new_padding_right_offset = max(
new_padding_right_offset, remaining_decode_tokens
)
# Apply indices to input_ids, attention mask, past key values and other items that need to be cached
input_ids = self.input_ids[keep_indices]
top_n_tokens_tensor = self.top_n_tokens_tensor[keep_indices]
max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens
self.requests = requests
self.requests_idx_mapping = requests_idx_mapping
self.input_ids = input_ids
self.all_input_ids = all_input_ids
self.input_lengths = input_lengths
self.prefix_offsets = prefix_offsets
self.read_offsets = read_offsets
self.next_token_choosers = next_token_choosers
self.stopping_criterias = stopping_criterias
self.top_n_tokens = top_n_tokens
self.top_n_tokens_tensor = top_n_tokens_tensor
self.max_input_length = max_input_length
self.padding_right_offset = new_padding_right_offset
self.max_tokens = max_tokens
# TODO
# Kept it simple by just updating the state, maybe updating the other CPU values is necessary.
self.inference_params.conv_states = self.inference_params.conv_states[
:, indices
]
self.inference_params.ssm_states = self.inference_params.ssm_states[:, indices]
return self
@classmethod
def concatenate(cls, batches: List["MambaBatch"]) -> "MambaBatch":
# Used for padding
total_batch_size = 0
max_input_length = 0
padding_right_offset = 0
for batch in batches:
total_batch_size += len(batch)
max_input_length = max(max_input_length, batch.max_input_length)
padding_right_offset = max(padding_right_offset, batch.padding_right_offset)
# Batch attributes
requests = []
requests_idx_mapping = {}
input_lengths = []
prefix_offsets = []
read_offsets = []
all_input_ids = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
max_tokens = 0
seqlen_offset = 0
(n_blocks, _, d_inner, d_conv) = batches[0].inference_params.conv_states.shape
(_, _, _, d_state) = batches[0].inference_params.ssm_states.shape
dtype = batches[0].inference_params.conv_states.dtype
device = batches[0].inference_params.conv_states.device
inference_params = new_inference_params(
n_blocks=n_blocks,
batch_size=total_batch_size,
d_state=d_state,
d_conv=d_conv,
d_inner=d_inner,
seqlen_offset=seqlen_offset,
device=device,
dtype=dtype,
)
# Batch tensors
input_ids = None
top_n_tokens_tensor = None
# Used for slicing correctly inside the tensors
# Equivalent to a cumsum on batch sizes
start_index = 0
for i, batch in enumerate(batches):
requests.extend(batch.requests)
input_lengths.extend(batch.input_lengths)
prefix_offsets.extend(batch.prefix_offsets)
read_offsets.extend(batch.read_offsets)
all_input_ids.extend(batch.all_input_ids)
next_token_choosers.extend(batch.next_token_choosers)
stopping_criterias.extend(batch.stopping_criterias)
top_n_tokens.extend(batch.top_n_tokens)
if i == 0:
requests_idx_mapping = batch.requests_idx_mapping
else:
# We need to offset the mapping for each batch by the cumulative batch size
for k, v in batch.requests_idx_mapping.items():
requests_idx_mapping[k] = v + start_index
# Slicing end index for this batch
end_index = start_index + len(batch)
# Create empty tensor
# input_ids is always of shape [batch_size, 1]
# We do not need to pad it
if input_ids is None:
input_ids = batch.input_ids.new_empty((total_batch_size, 1))
# Copy to correct indices
input_ids[start_index:end_index] = batch.input_ids
if top_n_tokens_tensor is None:
top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros(
total_batch_size,
)
top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor
# Add eventual padding tokens that were added while concatenating
max_tokens += batch.max_tokens + (
max_input_length - batch.max_input_length
) * len(batch)
inference_params.max_seqlen = max(
inference_params.max_seqlen, batch.inference_params.max_seqlen
)
assert batch.inference_params.seqlen_offset != 0, "Invalid seqlen offset"
inference_params.seqlen_offset = max(
inference_params.seqlen_offset, batch.inference_params.seqlen_offset
)
inference_params.conv_states[:, start_index:end_index] = (
batch.inference_params.conv_states
)
inference_params.ssm_states[:, start_index:end_index] = (
batch.inference_params.ssm_states
)
start_index = end_index
return cls(
batch_id=batches[0].batch_id,
requests=requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=input_ids,
all_input_ids=all_input_ids,
input_lengths=input_lengths,
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
max_input_length=max_input_length,
padding_right_offset=padding_right_offset,
keys_head_dim_last=batches[0].keys_head_dim_last,
max_tokens=max_tokens,
inference_params=inference_params,
)
def __len__(self):
return len(self.requests)
class Mamba(Model):
def __init__(
self,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
speculator: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
):
self.quantize = quantize
self.process_group, _rank, world_size = initialize_torch_distributed()
if world_size > 1:
raise RuntimeError("Mamba does not support Tensor Parallelism (TP)")
self.cuda_graphs = {}
if torch.cuda.is_available():
device = torch.device("cuda")
# Bf16 is important. In f16 accumulations in the matmul are causing
# differences while the server is under load.
# This is detectable by the integration load test
dtype = torch.bfloat16 if dtype is None else dtype
else:
if quantize:
raise ValueError("quantization is not available on CPU")
device = torch.device("cpu")
dtype = torch.float32 if dtype is None else dtype
tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
config = MambaConfig.from_pretrained(
model_id, revision=revision, trust_remote_code=trust_remote_code
)
tokenizer.bos_token_id = config.bos_token_id
tokenizer.eos_token_id = config.eos_token_id
tokenizer.pad_token = tokenizer.eos_token
config.quantize = quantize
config.speculator = speculator
torch.distributed.barrier(group=self.process_group)
weights_loader = get_loader(
quantize=quantize, model_id=model_id, revision=revision
)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
weights = Weights(
filenames,
device,
dtype,
process_group=self.process_group,
weights_loader=weights_loader,
)
model = MambaModel(config, weights)
torch.distributed.barrier(group=self.process_group)
super(Mamba, self).__init__(
model_id=model_id,
model=model,
tokenizer=tokenizer,
requires_padding=True,
dtype=dtype,
device=device,
)
@property
def batch_type(self) -> Type[MambaBatch]:
return MambaBatch
def warmup(
self, batch, max_input_tokens: Optional[int], max_total_tokens: Optional[int]
) -> Union[Optional[int], Optional[int], Optional[int]]:
# TODO: implement warmup for Mamba if needed
if CUDA_GRAPHS:
if self.speculate is None or self.speculate == 0:
try:
logger.info(f"Cuda Graphs are enabled for sizes {CUDA_GRAPHS}")
# Warmup cuda graphs
for bs in CUDA_GRAPHS:
self.cuda_graph_warmup(bs)
except Exception:
logger.exception("Decode cuda graph warmup failed")
else:
logger.info(f"Cuda Graphs are disabled (CUDA_GRAPHS={CUDA_GRAPHS}).")
if max_total_tokens is None:
max_total_tokens = min(self.tokenizer.model_max_length, 4096)
if max_input_tokens is None:
max_input_tokens = max_total_tokens - 1
return None, max_input_tokens, max_total_tokens
def cuda_graph_warmup(self, batch_size: int):
input_ids = torch.zeros((batch_size, 1), dtype=torch.int64, device=self.device)
n_blocks = len(self.model.blocks)
d_state = self.model.config.d_state
d_conv = self.model.config.d_conv
# Inner takes the expand multiplication
d_inner = self.model.config.d_inner
# Important seqlen_offset to go through the update mecanism with the state
seqlen_offset = 1
inference_params = new_inference_params(
n_blocks=n_blocks,
batch_size=batch_size,
d_state=d_state,
d_conv=d_conv,
d_inner=d_inner,
seqlen_offset=seqlen_offset,
device=self.device,
dtype=self.dtype,
)
graph = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
# Run once outside to warmup
self.model.forward(input_ids=input_ids, inference_params=inference_params)
torch.cuda.synchronize()
with torch.cuda.graph(graph, pool=MEM_POOL):
logits, speculative_logits = self.model.forward(
input_ids=input_ids, inference_params=inference_params
)
torch.cuda.synchronize()
graph_dict = {
"input_ids": input_ids,
"inference_params": inference_params,
"graph": graph,
"logits": logits,
"speculative_logits": speculative_logits,
}
self.cuda_graphs[batch_size] = graph_dict
def tunableop_warmup(self, batch_size: int, seqlen: int):
input_ids = torch.zeros((batch_size, 1), dtype=torch.int64, device=self.device)
n_blocks = len(self.model.blocks)
d_state = self.model.config.d_state
d_conv = self.model.config.d_conv
# Inner takes the expand multiplication
d_inner = self.model.config.d_inner
# Important seqlen_offset to go through the update mecanism with the state
seqlen_offset = 1
inference_params = new_inference_params(
n_blocks=n_blocks,
batch_size=seqlen,
d_state=d_state,
d_conv=d_conv,
d_inner=d_inner,
seqlen_offset=seqlen_offset,
device=self.device,
dtype=self.dtype,
)
self.model.forward(input_ids=input_ids, inference_params=inference_params)
def forward(
self, input_ids: torch.Tensor, inference_params: Any
) -> Tuple[torch.Tensor, torch.Tensor]:
bs = input_ids.shape[0]
padded_bs = bs
if bs == 3:
padded_bs = 4
elif 3 < bs <= 8:
padded_bs = 8
elif bs > 8:
padded_bs = (bs + 7) // 8 * 8
# Try to find an associated cuda graph
cuda_graph = self.cuda_graphs.get(padded_bs, None)
is_prefill = inference_params is None or inference_params.seqlen_offset == 0
if is_prefill or cuda_graph is None:
return self.model(
input_ids,
inference_params=inference_params,
)
# Copy inputs to the static inputs of the cuda graph
# Static inputs are potentially padded
cuda_graph["input_ids"][:bs] = input_ids
cuda_graph["inference_params"].conv_states[
:, :bs
] = inference_params.conv_states
cuda_graph["inference_params"].ssm_states[:, :bs] = inference_params.ssm_states
# Replay the graph
cuda_graph["graph"].replay()
inference_params.conv_states.copy_(
cuda_graph["inference_params"].conv_states[:, :bs]
)
inference_params.ssm_states.copy_(
cuda_graph["inference_params"].ssm_states[:, :bs]
)
# Slice output to the correct shape
speculative_logits = (
cuda_graph["speculative_logits"][:bs]
if cuda_graph["speculative_logits"] is not None
else None
)
logits = cuda_graph["logits"][:bs]
return logits, speculative_logits
def generate_token(self, batch) -> Tuple[List[Any], Optional[Any], Tuple[int, int]]:
start = time.time_ns()
input_ids = (
batch.input_ids
) # batch.past_input_ids if batch.past_input_ids is not None else batch.input_ids
batch_size, max_seqlen = input_ids.shape
# Inference params
if batch.inference_params is None:
# 0 is important here
seqlen_offset = 0
n_blocks = len(self.model.blocks)
d_state = self.model.config.d_state
d_conv = self.model.config.d_conv
d_inner = self.model.config.d_inner
inference_params = new_inference_params(
n_blocks=n_blocks,
batch_size=batch_size,
d_state=d_state,
d_conv=d_conv,
d_inner=d_inner,
seqlen_offset=seqlen_offset,
device=self.device,
dtype=self.dtype,
)
batch.inference_params = inference_params
# Forward pass
logits, speculative_logits = self.forward(
input_ids, inference_params=batch.inference_params
)
# batch.inference_params = new_inference_params
# Results
generations: List[Generation] = []
stopped = True
# Speculation is not active for causal
accepted_ids = torch.ones_like(batch.input_ids)[:, 0]
batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens(
batch.top_n_tokens,
batch.top_n_tokens_tensor,
torch.log_softmax(logits[:, -1], -1),
accepted_ids,
)
start_decode = time.time_ns()
# Zipped iterator
iterator = zip(
batch.requests,
batch.input_lengths,
batch.prefix_offsets,
batch.read_offsets,
logits,
batch.next_token_choosers,
batch.stopping_criterias,
batch.all_input_ids,
batch.top_n_tokens,
batch_top_token_ids,
batch_top_token_logprobs,
)
# For each member of the batch
for i, (
request,
input_length,
prefix_offset,
read_offset,
logits,
next_token_chooser,
stopping_criteria,
all_input_ids,
top_n_tokens,
top_token_ids,
top_token_logprobs,
) in enumerate(iterator):
# Select next token
next_token_id, logprobs = next_token_chooser(
all_input_ids.view(1, -1), logits[-1:, :]
)
# Append next token to all tokens
all_input_ids = torch.cat([all_input_ids, next_token_id])
new_input_length = input_length + 1
# Generated token
next_token_logprob = logprobs[-1, next_token_id]
next_token_id_squeezed = next_token_id.squeeze()
next_token_text, prefix_offset, read_offset = self.decode_token(
all_input_ids[:, 0], prefix_offset, read_offset
)
# Evaluate stopping criteria
stop, reason = stopping_criteria(
next_token_id_squeezed,
next_token_text,
)
if not stop:
stopped = False
# Shard generations
# All generations will be appended in the rust sharded client
if i % self.world_size == self.rank:
if stop:
# Decode generated tokens
output_text, _, _ = self.decode_token(
all_input_ids[:, 0],
prefix_offset=len(all_input_ids)
- stopping_criteria.current_tokens
- 1,
read_offset=len(all_input_ids)
- stopping_criteria.current_tokens,
skip_special_tokens=True,
)
# Get seed
if isinstance(next_token_chooser.choice, Sampling):
seed = next_token_chooser.choice.seed
else:
seed = None
generated_text = GeneratedText(
output_text, stopping_criteria.current_tokens, reason, seed
)
else:
generated_text = None
if stopping_criteria.current_tokens == 1 and request.prefill_logprobs:
# Remove generated token to only have prefill and add nan for first prompt token
prefill_logprobs = [float("nan")] + torch.log_softmax(
logits, -1
).gather(1, all_input_ids[1:]).squeeze(1)[
-new_input_length:-1
].tolist()
prefill_token_ids = all_input_ids[-new_input_length:-1]
prefill_texts = self.tokenizer.batch_decode(
prefill_token_ids,
clean_up_tokenization_spaces=False,
skip_special_tokens=False,
)
prefill_tokens = Tokens(
prefill_token_ids,
prefill_logprobs,
prefill_texts,
is_special=[],
)
else:
prefill_tokens = None
if top_n_tokens > 0:
toptoken_texts = self.tokenizer.batch_decode(
top_token_ids,
clean_up_tokenization_spaces=False,
skip_special_tokens=False,
)
special_toptokens = [
token_id in self.all_special_ids for token_id in top_token_ids
]
top_tokens = Tokens(
top_token_ids,
top_token_logprobs,
toptoken_texts,
special_toptokens,
)
else:
top_tokens = None
generation = Generation(
request.id,
prefill_tokens,
Tokens(
[next_token_id_squeezed],
[next_token_logprob],
[next_token_text],
[next_token_id_squeezed.item() in self.all_special_ids],
),
generated_text,
top_tokens,
)
generations.append(generation)
# Update values
batch.next_token_choosers[i] = batch.next_token_choosers[
i
].advance_grammar(next_token_id_squeezed.item())
batch.input_ids[i, 0] = next_token_id
batch.all_input_ids[i] = all_input_ids
batch.input_lengths[i] = new_input_length
batch.prefix_offsets[i] = prefix_offset
batch.read_offsets[i] = read_offset
batch.max_input_length = max(batch.max_input_length, new_input_length)
# We finished all generations in the batch; there is no next batch
if stopped:
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, None, (forward_ns, decode_ns)
# Slice unused values from prefill
batch.input_ids = batch.input_ids[:, :1]
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, batch, (forward_ns, decode_ns)
| text-generation-inference/server/text_generation_server/models/mamba.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/mamba.py",
"repo_id": "text-generation-inference",
"token_count": 15065
} | 332 |
import os
import torch
from torch.distributed import ProcessGroup
from datetime import timedelta
from loguru import logger
from text_generation_server.utils.import_utils import SYSTEM
# Tensor Parallelism settings
RANK = int(os.getenv("RANK", "0"))
WORLD_SIZE = int(os.getenv("WORLD_SIZE", "1"))
# CUDA memory fraction
MEMORY_FRACTION = float(os.getenv("CUDA_MEMORY_FRACTION", "1.0"))
class FakeBarrier:
def wait(self):
pass
class FakeGroup(ProcessGroup):
def __init__(self, rank, size):
self._rank = rank
self._size = size
super().__init__(rank, size)
def allreduce(self, *args, **kwargs):
return FakeBarrier()
def allgather(self, inputs, local_tensor, **kwargs):
assert (
len(inputs[0]) == len(local_tensor) == 1
), f"{len(inputs[0])} != {len(local_tensor)} != 1, and the FakeGroup is supposed to join on simple tensors"
for input_ in inputs:
input_[0].data = local_tensor[0].data
return FakeBarrier()
def barrier(self, *args, **kwargs):
return FakeBarrier()
def size(self):
return self._size
def rank(self):
return self._rank
def initialize_torch_distributed():
if torch.cuda.is_available():
from torch.distributed import ProcessGroupNCCL
# Set the device id.
assert WORLD_SIZE <= torch.cuda.device_count(), "Each process is one gpu"
device = RANK % torch.cuda.device_count()
torch.cuda.set_device(device)
torch.cuda.set_per_process_memory_fraction(MEMORY_FRACTION, device)
backend = "nccl"
options = ProcessGroupNCCL.Options()
options.is_high_priority_stream = True
options._timeout = timedelta(seconds=120)
else:
backend = "gloo"
options = None
if WORLD_SIZE == 1:
return FakeGroup(RANK, WORLD_SIZE), RANK, WORLD_SIZE
else:
if os.getenv("DEBUG", None) == "1":
return FakeGroup(RANK, WORLD_SIZE), RANK, WORLD_SIZE
if not torch.distributed.is_initialized():
# Call the init process.
if SYSTEM == "ipex":
import intel_extension_for_pytorch as ipex
if torch.xpu.is_available():
assert (
WORLD_SIZE <= torch.xpu.device_count()
), "Each process is one xpu"
device = RANK % torch.xpu.device_count()
torch.xpu.set_device(device)
device_id = torch.device(f"xpu:{RANK}")
torch.distributed.init_process_group(
backend="xccl",
world_size=WORLD_SIZE,
rank=RANK,
timeout=timedelta(seconds=120),
pg_options=options,
device_id=device_id,
)
else:
ipex.distributed.init_process_group(
backend="ccl",
world_size=WORLD_SIZE,
rank=RANK,
timeout=timedelta(seconds=120),
pg_options=options,
)
else:
device = torch.device(f"cuda:{RANK}")
torch.distributed.init_process_group(
backend=backend,
world_size=WORLD_SIZE,
rank=RANK,
timeout=timedelta(seconds=120),
pg_options=options,
device_id=device,
)
else:
logger.warning("torch.distributed is already initialized.")
return torch.distributed.group.WORLD, RANK, WORLD_SIZE
| text-generation-inference/server/text_generation_server/utils/dist.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/dist.py",
"repo_id": "text-generation-inference",
"token_count": 1916
} | 333 |
[package]
authors = ["Nicolas Patry <nicolas@huggingface.co>"]
edition = "2021"
name = "node"
version = "0.21.4-dev.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["cdylib"]
[dependencies]
napi = "2"
napi-derive = "2"
serde = { version = "1.0.163", features = ["derive"] }
tokenizers = { path = "../../tokenizers/" }
ahash = { version = "0.8.11", features = ["serde"] }
[build-dependencies]
napi-build = "2"
[profile.release]
lto = true
| tokenizers/bindings/node/Cargo.toml/0 | {
"file_path": "tokenizers/bindings/node/Cargo.toml",
"repo_id": "tokenizers",
"token_count": 221
} | 334 |
import { prependNormalizer, stripAccentsNormalizer, stripNormalizer } from '../../'
describe('stripNormalizer', () => {
it('instantiates with no parameters', () => {
const normalizer = stripNormalizer()
expect(normalizer.constructor.name).toEqual('Normalizer')
})
it('accepts `undefined` as first parameter', () => {
expect(stripNormalizer(undefined)).toBeDefined()
})
it('accepts `undefined` as second parameter', () => {
expect(stripNormalizer(false, undefined)).toBeDefined()
})
it('instantiates with one parameter', () => {
const normalizer = stripNormalizer(false)
expect(normalizer.constructor.name).toEqual('Normalizer')
})
it('instantiates with two parameters', () => {
const normalizer = stripNormalizer(false, true)
expect(normalizer.constructor.name).toEqual('Normalizer')
})
it('prepend instantiates with one parameter', () => {
const normalizer = prependNormalizer('_')
expect(normalizer.constructor.name).toEqual('Normalizer')
expect(normalizer.normalizeString('Hello')).toEqual('_Hello')
})
it('can normalize strings', () => {
const normalizer = stripNormalizer()
expect(normalizer.normalizeString(' Hello there ')).toEqual('Hello there')
})
})
describe('stripAccentsNormalizer', () => {
it('initialize', () => {
const normalizer = stripAccentsNormalizer()
expect(normalizer.constructor.name).toEqual('Normalizer')
})
})
| tokenizers/bindings/node/lib/bindings/normalizers.test.ts/0 | {
"file_path": "tokenizers/bindings/node/lib/bindings/normalizers.test.ts",
"repo_id": "tokenizers",
"token_count": 468
} | 335 |
{
"name": "tokenizers-linux-arm-gnueabihf",
"version": "0.13.4-rc1",
"os": [
"linux"
],
"cpu": [
"arm"
],
"main": "tokenizers.linux-arm-gnueabihf.node",
"files": [
"tokenizers.linux-arm-gnueabihf.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers"
} | tokenizers/bindings/node/npm/linux-arm-gnueabihf/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/linux-arm-gnueabihf/package.json",
"repo_id": "tokenizers",
"token_count": 278
} | 336 |
tab_spaces = 2
| tokenizers/bindings/node/rustfmt.toml/0 | {
"file_path": "tokenizers/bindings/node/rustfmt.toml",
"repo_id": "tokenizers",
"token_count": 7
} | 337 |
export type TextInputSequence = string
export type PreTokenizedInputSequence = string[]
export type InputSequence = TextInputSequence | PreTokenizedInputSequence
export type TextEncodeInput = TextInputSequence | [TextInputSequence, TextInputSequence]
export type PreTokenizedEncodeInput = PreTokenizedInputSequence | [PreTokenizedInputSequence, PreTokenizedInputSequence]
export type EncodeInput = TextEncodeInput | PreTokenizedEncodeInput
| tokenizers/bindings/node/types.ts/0 | {
"file_path": "tokenizers/bindings/node/types.ts",
"repo_id": "tokenizers",
"token_count": 114
} | 338 |
<jupyter_start><jupyter_code>!wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt -O /tmp/bert-base-uncased-vocab.txt
from tokenizers import BertWordPieceTokenizer
from tokenizers.tools import EncodingVisualizer
EncodingVisualizer.unk_token_regex.search("aaa[udsnk]aaa")
text = """Mathias Bynens 'Z͑ͫ̓ͪ̂ͫ̽͏̴̙̤̞͉͚̯̞̠͍A̴̵̜̰͔ͫ͗͢L̠ͨͧͩ͘G̴̻͈͍͔̹̑͗̎̅͛́Ǫ̵̹̻̝̳͂̌̌͘!͖̬̰̙̗̿̋ͥͥ̂ͣ̐́́͜͞': Whenever you’re working on a piece of JavaScript code that deals with strings or regular expressions in some way, just add a unit test that contains a pile of poo (💩) in a string, 💩💩💩💩💩💩💩💩💩💩💩💩 and see if anything breaks. It’s a quick, fun, and easy way to see if your code supports astral symbols. Once you’ve found a Unicode-related bug in your code, all you need to do is apply the techniques discussed in this post to fix it."""
tokenizer = BertWordPieceTokenizer("/tmp/bert-base-uncased-vocab.txt", lowercase=True)
visualizer = EncodingVisualizer(tokenizer=tokenizer)<jupyter_output><empty_output><jupyter_text>Visualizing Tokens With No Annotations<jupyter_code>visualizer(text)<jupyter_output><empty_output><jupyter_text>Visualizing Tokens With Aligned AnnotationsFirst we make some annotations with the Annotation class<jupyter_code>from tokenizers.tools import Annotation
anno1 = Annotation(start=0, end=2, label="foo")
anno2 = Annotation(start=2, end=4, label="bar")
anno3 = Annotation(start=6, end=8, label="poo")
anno4 = Annotation(start=9, end=12, label="shoe")
annotations = [
anno1,
anno2,
anno3,
anno4,
Annotation(start=23, end=30, label="random tandem bandem sandem landem fandom"),
Annotation(start=63, end=70, label="foo"),
Annotation(start=80, end=95, label="bar"),
Annotation(start=120, end=128, label="bar"),
Annotation(start=152, end=155, label="poo"),
]
visualizer(text, annotations=annotations)<jupyter_output><empty_output><jupyter_text>Using A Custom Annotation FormatEvery system has its own representation of annotations. That's why we can instantiate the EncodingVisualizer with a convertion function.<jupyter_code>funnyAnnotations = [dict(startPlace=i, endPlace=i + 3, theTag=str(i)) for i in range(0, 20, 4)]
funnyAnnotations
def converter(funny):
return Annotation(start=funny["startPlace"], end=funny["endPlace"], label=funny["theTag"])
visualizer = EncodingVisualizer(tokenizer=tokenizer, default_to_notebook=True, annotation_converter=converter)
visualizer(text, annotations=funnyAnnotations)<jupyter_output><empty_output><jupyter_text>Trying with Roberta<jupyter_code>!wget "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json" -O /tmp/roberta-base-vocab.json
!wget "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt" -O /tmp/roberta-base-merges.txt
from tokenizers import ByteLevelBPETokenizer
roberta_tokenizer = ByteLevelBPETokenizer.from_file("/tmp/roberta-base-vocab.json", "/tmp/roberta-base-merges.txt")
roberta_visualizer = EncodingVisualizer(tokenizer=roberta_tokenizer, default_to_notebook=True)
roberta_visualizer(text, annotations=annotations)<jupyter_output><empty_output> | tokenizers/bindings/python/examples/using_the_visualizer.ipynb/0 | {
"file_path": "tokenizers/bindings/python/examples/using_the_visualizer.ipynb",
"repo_id": "tokenizers",
"token_count": 1222
} | 339 |
# Generated content DO NOT EDIT
from .. import pre_tokenizers
PreTokenizer = pre_tokenizers.PreTokenizer
BertPreTokenizer = pre_tokenizers.BertPreTokenizer
ByteLevel = pre_tokenizers.ByteLevel
CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit
Digits = pre_tokenizers.Digits
FixedLength = pre_tokenizers.FixedLength
Metaspace = pre_tokenizers.Metaspace
Punctuation = pre_tokenizers.Punctuation
Sequence = pre_tokenizers.Sequence
Split = pre_tokenizers.Split
UnicodeScripts = pre_tokenizers.UnicodeScripts
Whitespace = pre_tokenizers.Whitespace
WhitespaceSplit = pre_tokenizers.WhitespaceSplit
| tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py",
"repo_id": "tokenizers",
"token_count": 188
} | 340 |
use pyo3::exceptions;
use pyo3::prelude::*;
use pyo3::types::*;
use tk::tokenizer::{Offsets, PaddingDirection};
use tk::utils::truncation::TruncationDirection;
use tokenizers as tk;
use crate::error::{deprecation_warning, PyError};
/// The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`.
#[pyclass(dict, module = "tokenizers", name = "Encoding")]
#[repr(transparent)]
pub struct PyEncoding {
pub encoding: tk::tokenizer::Encoding,
}
impl From<tk::tokenizer::Encoding> for PyEncoding {
fn from(v: tk::tokenizer::Encoding) -> Self {
Self { encoding: v }
}
}
#[pymethods]
impl PyEncoding {
#[new]
#[pyo3(text_signature = None)]
fn new() -> Self {
Self {
encoding: tk::tokenizer::Encoding::default(),
}
}
fn __getstate__(&self, py: Python) -> PyResult<PyObject> {
let data = serde_json::to_string(&self.encoding).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to pickle Encoding: {e}"
))
})?;
Ok(PyBytes::new(py, data.as_bytes()).into())
}
fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> {
match state.extract::<&[u8]>(py) {
Ok(s) => {
self.encoding = serde_json::from_slice(s).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to unpickle Encoding: {e}"
))
})?;
Ok(())
}
Err(e) => Err(e),
}
}
fn __repr__(&self) -> PyResult<String> {
Ok(format!(
"Encoding(num_tokens={}, attributes=[ids, type_ids, tokens, offsets, \
attention_mask, special_tokens_mask, overflowing])",
self.encoding.get_ids().len()
))
}
fn __len__(&self) -> PyResult<usize> {
Ok(self.encoding.len())
}
/// Merge the list of encodings into one final :class:`~tokenizers.Encoding`
///
/// Args:
/// encodings (A :obj:`List` of :class:`~tokenizers.Encoding`):
/// The list of encodings that should be merged in one
///
/// growing_offsets (:obj:`bool`, defaults to :obj:`True`):
/// Whether the offsets should accumulate while merging
///
/// Returns:
/// :class:`~tokenizers.Encoding`: The resulting Encoding
#[staticmethod]
#[pyo3(signature = (encodings, growing_offsets = true))]
#[pyo3(text_signature = "(encodings, growing_offsets=True)")]
fn merge(encodings: Vec<PyRef<PyEncoding>>, growing_offsets: bool) -> PyEncoding {
tk::tokenizer::Encoding::merge(
encodings.into_iter().map(|e| e.encoding.clone()),
growing_offsets,
)
.into()
}
/// The number of sequences represented
///
/// Returns:
/// :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding`
#[getter]
fn get_n_sequences(&self) -> usize {
self.encoding.n_sequences()
}
/// Set the given sequence index
///
/// Set the given sequence index for the whole range of tokens contained in this
/// :class:`~tokenizers.Encoding`.
#[pyo3(text_signature = "(self, sequence_id)")]
fn set_sequence_id(&mut self, sequence_id: usize) {
self.encoding.set_sequence_id(sequence_id);
}
/// The generated IDs
///
/// The IDs are the main input to a Language Model. They are the token indices,
/// the numerical representations that a LM understands.
///
/// Returns:
/// :obj:`List[int]`: The list of IDs
#[getter]
fn get_ids(&self) -> Vec<u32> {
self.encoding.get_ids().to_vec()
}
/// The generated tokens
///
/// They are the string representation of the IDs.
///
/// Returns:
/// :obj:`List[str]`: The list of tokens
#[getter]
fn get_tokens(&self) -> Vec<String> {
self.encoding.get_tokens().to_vec()
}
/// The generated word indices.
///
/// .. warning::
/// This is deprecated and will be removed in a future version.
/// Please use :obj:`~tokenizers.Encoding.word_ids` instead.
///
/// They represent the index of the word associated to each token.
/// When the input is pre-tokenized, they correspond to the ID of the given input label,
/// otherwise they correspond to the words indices as defined by the
/// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
///
/// For special tokens and such (any token that was generated from something that was
/// not part of the input), the output is :obj:`None`
///
/// Returns:
/// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
#[getter]
fn get_words(&self, py: Python<'_>) -> PyResult<Vec<Option<u32>>> {
deprecation_warning(
py,
"0.9.4",
"Encoding.words is deprecated, please use Encoding.word_ids instead.",
)?;
Ok(self.get_word_ids())
}
/// The generated word indices.
///
/// They represent the index of the word associated to each token.
/// When the input is pre-tokenized, they correspond to the ID of the given input label,
/// otherwise they correspond to the words indices as defined by the
/// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
///
/// For special tokens and such (any token that was generated from something that was
/// not part of the input), the output is :obj:`None`
///
/// Returns:
/// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
#[getter]
fn get_word_ids(&self) -> Vec<Option<u32>> {
self.encoding.get_word_ids().to_vec()
}
/// The generated sequence indices.
///
/// They represent the index of the input sequence associated to each token.
/// The sequence id can be None if the token is not related to any input sequence,
/// like for example with special tokens.
///
/// Returns:
/// A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index.
#[getter]
fn get_sequence_ids(&self) -> Vec<Option<usize>> {
self.encoding.get_sequence_ids()
}
/// The generated type IDs
///
/// Generally used for tasks like sequence classification or question answering,
/// these tokens let the LM know which input sequence corresponds to each tokens.
///
/// Returns:
/// :obj:`List[int]`: The list of type ids
#[getter]
fn get_type_ids(&self) -> Vec<u32> {
self.encoding.get_type_ids().to_vec()
}
/// The offsets associated to each token
///
/// These offsets let's you slice the input string, and thus retrieve the original
/// part that led to producing the corresponding token.
///
/// Returns:
/// A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets
#[getter]
fn get_offsets(&self) -> Vec<(usize, usize)> {
self.encoding.get_offsets().to_vec()
}
/// The special token mask
///
/// This indicates which tokens are special tokens, and which are not.
///
/// Returns:
/// :obj:`List[int]`: The special tokens mask
#[getter]
fn get_special_tokens_mask(&self) -> Vec<u32> {
self.encoding.get_special_tokens_mask().to_vec()
}
/// The attention mask
///
/// This indicates to the LM which tokens should be attended to, and which should not.
/// This is especially important when batching sequences, where we need to applying
/// padding.
///
/// Returns:
/// :obj:`List[int]`: The attention mask
#[getter]
fn get_attention_mask(&self) -> Vec<u32> {
self.encoding.get_attention_mask().to_vec()
}
/// A :obj:`List` of overflowing :class:`~tokenizers.Encoding`
///
/// When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting
/// the output into as many pieces as required to match the specified maximum length.
/// This field lets you retrieve all the subsequent pieces.
///
/// When you use pairs of sequences, the overflowing pieces will contain enough
/// variations to cover all the possible combinations, while respecting the provided
/// maximum length.
#[getter]
fn get_overflowing(&self) -> Vec<PyEncoding> {
self.encoding
.get_overflowing()
.clone()
.into_iter()
.map(|e| e.into())
.collect()
}
/// Get the encoded tokens corresponding to the word at the given index
/// in one of the input sequences.
///
/// Args:
/// word_index (:obj:`int`):
/// The index of a word in one of the input sequences.
/// sequence_index (:obj:`int`, defaults to :obj:`0`):
/// The index of the sequence that contains the target word
///
/// Returns:
/// :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)`
#[pyo3(signature = (word_index, sequence_index = 0))]
#[pyo3(text_signature = "(self, word_index, sequence_index=0)")]
fn word_to_tokens(&self, word_index: u32, sequence_index: usize) -> Option<(usize, usize)> {
self.encoding.word_to_tokens(word_index, sequence_index)
}
/// Get the offsets of the word at the given index in one of the input sequences.
///
/// Args:
/// word_index (:obj:`int`):
/// The index of a word in one of the input sequences.
/// sequence_index (:obj:`int`, defaults to :obj:`0`):
/// The index of the sequence that contains the target word
///
/// Returns:
/// :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)`
#[pyo3(signature = (word_index, sequence_index = 0))]
#[pyo3(text_signature = "(self, word_index, sequence_index=0)")]
fn word_to_chars(&self, word_index: u32, sequence_index: usize) -> Option<Offsets> {
self.encoding.word_to_chars(word_index, sequence_index)
}
/// Get the index of the sequence represented by the given token.
///
/// In the general use case, this method returns :obj:`0` for a single sequence or
/// the first sequence of a pair, and :obj:`1` for the second sequence of a pair
///
/// Args:
/// token_index (:obj:`int`):
/// The index of a token in the encoded sequence.
///
/// Returns:
/// :obj:`int`: The sequence id of the given token
#[pyo3(text_signature = "(self, token_index)")]
fn token_to_sequence(&self, token_index: usize) -> Option<usize> {
self.encoding.token_to_sequence(token_index)
}
/// Get the offsets of the token at the given index.
///
/// The returned offsets are related to the input sequence that contains the
/// token. In order to determine in which input sequence it belongs, you
/// must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
///
/// Args:
/// token_index (:obj:`int`):
/// The index of a token in the encoded sequence.
///
/// Returns:
/// :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)`
#[pyo3(text_signature = "(self, token_index)")]
fn token_to_chars(&self, token_index: usize) -> Option<Offsets> {
let (_, offsets) = self.encoding.token_to_chars(token_index)?;
Some(offsets)
}
/// Get the index of the word that contains the token in one of the input sequences.
///
/// The returned word index is related to the input sequence that contains
/// the token. In order to determine in which input sequence it belongs, you
/// must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
///
/// Args:
/// token_index (:obj:`int`):
/// The index of a token in the encoded sequence.
///
/// Returns:
/// :obj:`int`: The index of the word in the relevant input sequence.
#[pyo3(text_signature = "(self, token_index)")]
fn token_to_word(&self, token_index: usize) -> Option<u32> {
let (_, word_idx) = self.encoding.token_to_word(token_index)?;
Some(word_idx)
}
/// Get the token that contains the char at the given position in the input sequence.
///
/// Args:
/// char_pos (:obj:`int`):
/// The position of a char in the input string
/// sequence_index (:obj:`int`, defaults to :obj:`0`):
/// The index of the sequence that contains the target char
///
/// Returns:
/// :obj:`int`: The index of the token that contains this char in the encoded sequence
#[pyo3(signature = (char_pos, sequence_index = 0))]
#[pyo3(text_signature = "(self, char_pos, sequence_index=0)")]
fn char_to_token(&self, char_pos: usize, sequence_index: usize) -> Option<usize> {
self.encoding.char_to_token(char_pos, sequence_index)
}
/// Get the word that contains the char at the given position in the input sequence.
///
/// Args:
/// char_pos (:obj:`int`):
/// The position of a char in the input string
/// sequence_index (:obj:`int`, defaults to :obj:`0`):
/// The index of the sequence that contains the target char
///
/// Returns:
/// :obj:`int`: The index of the word that contains this char in the input sequence
#[pyo3(signature = (char_pos, sequence_index = 0))]
#[pyo3(text_signature = "(self, char_pos, sequence_index=0)")]
fn char_to_word(&self, char_pos: usize, sequence_index: usize) -> Option<u32> {
self.encoding.char_to_word(char_pos, sequence_index)
}
/// Pad the :class:`~tokenizers.Encoding` at the given length
///
/// Args:
/// length (:obj:`int`):
/// The desired length
///
/// direction: (:obj:`str`, defaults to :obj:`right`):
/// The expected padding direction. Can be either :obj:`right` or :obj:`left`
///
/// pad_id (:obj:`int`, defaults to :obj:`0`):
/// The ID corresponding to the padding token
///
/// pad_type_id (:obj:`int`, defaults to :obj:`0`):
/// The type ID corresponding to the padding token
///
/// pad_token (:obj:`str`, defaults to `[PAD]`):
/// The pad token to use
#[pyo3(signature = (length, **kwargs))]
#[pyo3(
text_signature = "(self, length, direction='right', pad_id=0, pad_type_id=0, pad_token='[PAD]')"
)]
fn pad(&mut self, length: usize, kwargs: Option<&Bound<'_, PyDict>>) -> PyResult<()> {
let mut pad_id = 0;
let mut pad_type_id = 0;
let mut pad_token = "[PAD]".to_string();
let mut direction = PaddingDirection::Right;
if let Some(kwargs) = kwargs {
for (key, value) in kwargs {
let key: String = key.extract()?;
match key.as_ref() {
"direction" => {
let value: String = value.extract()?;
direction = match value.as_ref() {
"left" => Ok(PaddingDirection::Left),
"right" => Ok(PaddingDirection::Right),
other => Err(PyError(format!(
"Unknown `direction`: `{other}`. Use \
one of `left` or `right`"
))
.into_pyerr::<exceptions::PyValueError>()),
}?;
}
"pad_id" => pad_id = value.extract()?,
"pad_type_id" => pad_type_id = value.extract()?,
"pad_token" => pad_token = value.extract()?,
_ => println!("Ignored unknown kwarg option {key}"),
}
}
}
self.encoding
.pad(length, pad_id, pad_type_id, &pad_token, direction);
Ok(())
}
/// Truncate the :class:`~tokenizers.Encoding` at the given length
///
/// If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating
/// this information is lost. It will be considered as representing a single sequence.
///
/// Args:
/// max_length (:obj:`int`):
/// The desired length
///
/// stride (:obj:`int`, defaults to :obj:`0`):
/// The length of previous content to be included in each overflowing piece
///
/// direction (:obj:`str`, defaults to :obj:`right`):
/// Truncate direction
#[pyo3(signature = (max_length, stride = 0, direction = "right"))]
#[pyo3(text_signature = "(self, max_length, stride=0, direction='right')")]
fn truncate(&mut self, max_length: usize, stride: usize, direction: &str) -> PyResult<()> {
let tdir = match direction {
"left" => Ok(TruncationDirection::Left),
"right" => Ok(TruncationDirection::Right),
_ => Err(
PyError(format!("Invalid truncation direction value : {direction}"))
.into_pyerr::<exceptions::PyValueError>(),
),
}?;
self.encoding.truncate(max_length, stride, tdir);
Ok(())
}
}
| tokenizers/bindings/python/src/encoding.rs/0 | {
"file_path": "tokenizers/bindings/python/src/encoding.rs",
"repo_id": "tokenizers",
"token_count": 7328
} | 341 |
import argparse
import inspect
import os
from pathlib import Path
INDENT = " " * 4
GENERATED_COMMENT = "# Generated content DO NOT EDIT\n"
def do_indent(text: str, indent: str):
return text.replace("\n", f"\n{indent}")
def function(obj, indent, text_signature=None):
if text_signature is None:
text_signature = obj.__text_signature__
string = ""
string += f"{indent}def {obj.__name__}{text_signature}:\n"
indent += INDENT
string += f'{indent}"""\n'
string += f"{indent}{do_indent(obj.__doc__, indent)}\n"
string += f'{indent}"""\n'
string += f"{indent}pass\n"
string += "\n"
string += "\n"
return string
def member_sort(member):
if inspect.isclass(member):
value = 10 + len(inspect.getmro(member))
else:
value = 1
return value
def fn_predicate(obj):
value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj)
if value:
return obj.__doc__ and obj.__text_signature__ and not obj.__name__.startswith("_")
if inspect.isgetsetdescriptor(obj):
return obj.__doc__ and not obj.__name__.startswith("_")
return False
def get_module_members(module):
members = [
member
for name, member in inspect.getmembers(module)
if not name.startswith("_") and not inspect.ismodule(member)
]
members.sort(key=member_sort)
return members
def pyi_file(obj, indent=""):
string = ""
if inspect.ismodule(obj):
string += GENERATED_COMMENT
members = get_module_members(obj)
for member in members:
string += pyi_file(member, indent)
elif inspect.isclass(obj):
indent += INDENT
mro = inspect.getmro(obj)
if len(mro) > 2:
inherit = f"({mro[1].__name__})"
else:
inherit = ""
string += f"class {obj.__name__}{inherit}:\n"
body = ""
if obj.__doc__:
body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n'
fns = inspect.getmembers(obj, fn_predicate)
# Init
if obj.__text_signature__:
body += f"{indent}def __init__{obj.__text_signature__}:\n"
body += f"{indent+INDENT}pass\n"
body += "\n"
for name, fn in fns:
body += pyi_file(fn, indent=indent)
if not body:
body += f"{indent}pass\n"
string += body
string += "\n\n"
elif inspect.isbuiltin(obj):
string += f"{indent}@staticmethod\n"
string += function(obj, indent)
elif inspect.ismethoddescriptor(obj):
string += function(obj, indent)
elif inspect.isgetsetdescriptor(obj):
# TODO it would be interesting to add the setter maybe ?
string += f"{indent}@property\n"
string += function(obj, indent, text_signature="(self)")
else:
raise Exception(f"Object {obj} is not supported")
return string
def py_file(module, origin):
members = get_module_members(module)
string = GENERATED_COMMENT
string += f"from .. import {origin}\n"
string += "\n"
for member in members:
name = member.__name__
string += f"{name} = {origin}.{name}\n"
return string
import subprocess
from typing import List, Optional, Tuple
def do_ruff(code, is_pyi: bool):
command = ["ruff", "format", "--config", "pyproject.toml", "--silent", "-"]
if is_pyi:
command.extend(["--stdin-filename", "test.pyi"])
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, _ = process.communicate(input=code.encode("utf-8"))
return stdout.decode("utf-8")
def write(module, directory, origin, check=False):
submodules = [(name, member) for name, member in inspect.getmembers(module) if inspect.ismodule(member)]
filename = os.path.join(directory, "__init__.pyi")
pyi_content = pyi_file(module)
pyi_content = do_ruff(pyi_content, is_pyi=True)
os.makedirs(directory, exist_ok=True)
if check:
with open(filename, "r") as f:
data = f.read()
assert data == pyi_content, f"The content of {filename} seems outdated, please run `python stub.py`"
else:
with open(filename, "w") as f:
f.write(pyi_content)
filename = os.path.join(directory, "__init__.py")
py_content = py_file(module, origin)
py_content = do_ruff(py_content, is_pyi=False)
os.makedirs(directory, exist_ok=True)
is_auto = False
if not os.path.exists(filename):
is_auto = True
else:
with open(filename, "r") as f:
line = f.readline()
if line == GENERATED_COMMENT:
is_auto = True
if is_auto:
if check:
with open(filename, "r") as f:
data = f.read()
assert data == py_content, f"The content of {filename} seems outdated, please run `python stub.py`"
else:
with open(filename, "w") as f:
f.write(py_content)
for name, submodule in submodules:
write(submodule, os.path.join(directory, name), f"{name}", check=check)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--check", action="store_true")
args = parser.parse_args()
import tokenizers
write(tokenizers.tokenizers, "py_src/tokenizers/", "tokenizers", check=args.check)
| tokenizers/bindings/python/stub.py/0 | {
"file_path": "tokenizers/bindings/python/stub.py",
"repo_id": "tokenizers",
"token_count": 2392
} | 342 |
# Models
<tokenizerslangcontent>
<python>
## BPE
[[autodoc]] tokenizers.models.BPE
## Model
[[autodoc]] tokenizers.models.Model
## Unigram
[[autodoc]] tokenizers.models.Unigram
## WordLevel
[[autodoc]] tokenizers.models.WordLevel
## WordPiece
[[autodoc]] tokenizers.models.WordPiece
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | tokenizers/docs/source-doc-builder/api/models.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/api/models.mdx",
"repo_id": "tokenizers",
"token_count": 179
} | 343 |
Installation with npm
----------------------------------------------------------------------------------------------------
You can simply install 🤗 Tokenizers with npm using::
npm install tokenizers
| tokenizers/docs/source/installation/node.inc/0 | {
"file_path": "tokenizers/docs/source/installation/node.inc",
"repo_id": "tokenizers",
"token_count": 31
} | 344 |
#[macro_use]
extern crate criterion;
mod common;
use common::{iter_bench_encode, iter_bench_encode_batch, iter_bench_train};
use criterion::{Criterion, Throughput};
use std::hint::black_box;
use tokenizers::{
models::{bpe::BpeTrainerBuilder, TrainerWrapper},
EncodeInput, Tokenizer,
};
static BATCH_SIZE: usize = 1_000;
pub fn llama3(c: &mut Criterion) {
let data = std::fs::read_to_string("data/big.txt").unwrap();
let mut group = c.benchmark_group("llama3-encode");
group.throughput(Throughput::Bytes(data.len() as u64));
let mut lines: Vec<EncodeInput> = vec![];
let mut batches: Vec<Vec<EncodeInput>> = vec![vec![]];
for line in data.lines() {
let line: EncodeInput = line.into();
lines.push(line.clone());
if batches.last().unwrap().len() >= BATCH_SIZE {
batches.push(vec![]);
}
batches.last_mut().unwrap().push(line);
}
let tokenizer = Tokenizer::from_file("data/llama-3-tokenizer.json").unwrap();
group.bench_function("llama3-offsets", |b| {
let data: Vec<_> = data.lines().collect();
let add_special_tokens = false;
b.iter(|| {
tokenizer
.encode_batch_char_offsets(black_box(data.clone()), add_special_tokens)
.unwrap()
})
});
group.bench_function("llama3-encode", |b| {
b.iter_custom(|iters| iter_bench_encode(iters, &tokenizer, &lines))
});
group.bench_function("llama3-batch", |b| {
b.iter_custom(|iters| iter_bench_encode_batch(iters, &tokenizer, &batches))
});
let mut trainer: TrainerWrapper = BpeTrainerBuilder::default()
.show_progress(false)
.build()
.into();
let mut tokenizer = Tokenizer::from_file("data/llama-3-tokenizer.json").unwrap();
group.bench_function("BPE Train vocabulary (big)", |b| {
b.iter_custom(|iters| {
iter_bench_train(
iters,
&mut tokenizer,
&mut trainer,
vec!["data/big.txt".to_string()],
)
})
});
group.finish();
}
criterion_group! {
name = llama_3;
config = Criterion::default().sample_size(10);
targets = llama3
}
criterion_main!(llama_3);
| tokenizers/tokenizers/benches/llama3_benchmark.rs/0 | {
"file_path": "tokenizers/tokenizers/benches/llama3_benchmark.rs",
"repo_id": "tokenizers",
"token_count": 1034
} | 345 |
// A dependency graph that contains any wasm must all be imported
// asynchronously. This `bootstrap.js` file does the single async import, so
// that no one else needs to worry about it again.
import("./index.js")
.catch(e => console.error("Error importing `index.js`:", e));
| tokenizers/tokenizers/examples/unstable_wasm/www/bootstrap.js/0 | {
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/bootstrap.js",
"repo_id": "tokenizers",
"token_count": 79
} | 346 |
//! [Byte Pair Encoding](https://www.aclweb.org/anthology/P16-1162/) model.
use std::{iter, mem};
mod model;
mod serialization;
pub mod trainer;
mod word;
type Pair = (u32, u32);
/// Errors that can be encountered while using or constructing a `BPE` model.
#[derive(thiserror::Error, Debug)]
pub enum Error {
/// An error encountered while reading files mainly.
#[error("IoError: {0}")]
Io(#[from] std::io::Error),
/// An error forwarded from Serde, while parsing JSON
#[error("JsonError: {0}")]
JsonError(#[from] serde_json::Error),
/// When the vocab.json file is in the wrong format
#[error("Bad vocabulary json file")]
BadVocabulary,
/// When the merges.txt file is in the wrong format. This error holds the line
/// number of the line that caused the error.
#[error("Merges text file invalid at line {0}")]
BadMerges(usize),
/// If a token found in merges, is not in the vocab
#[error("Token `{0}` out of vocabulary")]
MergeTokenOutOfVocabulary(String),
/// If the provided unk token is out of vocabulary
#[error("Unk token `{0}` not found in the vocabulary")]
UnkTokenOutOfVocabulary(String),
/// Dropout not between 0 and 1.
#[error("Dropout should be between 0 and 1, inclusive")]
InvalidDropout,
}
/// Provides access to the `FirstLastIterator` to any Iterator
pub(crate) trait WithFirstLastIterator: Iterator + Sized {
fn with_first_and_last(self) -> FirstLastIterator<Self>;
}
impl<I> WithFirstLastIterator for I
where
I: Iterator,
{
fn with_first_and_last(self) -> FirstLastIterator<Self> {
FirstLastIterator {
first: true,
iter: self.peekable(),
}
}
}
/// Provides information about whether an item is the first and/or the last of the iterator
pub(crate) struct FirstLastIterator<I>
where
I: Iterator,
{
first: bool,
iter: iter::Peekable<I>,
}
impl<I> Iterator for FirstLastIterator<I>
where
I: Iterator,
{
/// (is_first, is_last, item)
type Item = (bool, bool, I::Item);
fn next(&mut self) -> Option<Self::Item> {
let first = mem::replace(&mut self.first, false);
self.iter
.next()
.map(|e| (first, self.iter.peek().is_none(), e))
}
}
// Re-export
pub use model::*;
pub use trainer::*;
use word::*;
| tokenizers/tokenizers/src/models/bpe/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/bpe/mod.rs",
"repo_id": "tokenizers",
"token_count": 893
} | 347 |
use super::{super::OrderedVocabIter, WordPiece, WordPieceBuilder};
use ahash::{AHashMap, AHashSet};
use serde::{
de::{MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
impl Serialize for WordPiece {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut model = serializer.serialize_struct("WordPiece", 5)?;
// Small fields first
model.serialize_field("type", "WordPiece")?;
model.serialize_field("unk_token", &self.unk_token)?;
model.serialize_field("continuing_subword_prefix", &self.continuing_subword_prefix)?;
model.serialize_field("max_input_chars_per_word", &self.max_input_chars_per_word)?;
// Then large ones
let ordered_vocab = OrderedVocabIter::new(&self.vocab_r);
model.serialize_field("vocab", &ordered_vocab)?;
model.end()
}
}
impl<'de> Deserialize<'de> for WordPiece {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_struct(
"WordPiece",
&[
"type",
"unk_token",
"continuing_subword_prefix",
"max_input_chars_per_word",
"vocab",
],
WordPieceVisitor,
)
}
}
struct WordPieceVisitor;
impl<'de> Visitor<'de> for WordPieceVisitor {
type Value = WordPiece;
fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "struct WordPiece")
}
fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error>
where
V: MapAccess<'de>,
{
let mut builder = WordPieceBuilder::new();
let mut missing_fields = vec![
// for retrocompatibility the "type" field is not mandatory
"unk_token",
"continuing_subword_prefix",
"max_input_chars_per_word",
"vocab",
]
.into_iter()
.collect::<AHashSet<_>>();
while let Some(key) = map.next_key::<String>()? {
match key.as_ref() {
"unk_token" => builder = builder.unk_token(map.next_value()?),
"continuing_subword_prefix" => {
builder = builder.continuing_subword_prefix(map.next_value()?)
}
"max_input_chars_per_word" => {
builder = builder.max_input_chars_per_word(map.next_value()?)
}
"vocab" => {
let vocab: AHashMap<String, u32> = map.next_value()?;
builder = builder.vocab(vocab)
}
"type" => match map.next_value()? {
"WordPiece" => {}
u => {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(u),
&"WordPiece",
))
}
},
_ => {}
}
missing_fields.remove::<str>(&key);
}
if !missing_fields.is_empty() {
Err(serde::de::Error::missing_field(
missing_fields.iter().next().unwrap(),
))
} else {
Ok(builder.build().map_err(serde::de::Error::custom)?)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn serde() {
let wp = WordPiece::default();
let wp_s = "{\
\"type\":\"WordPiece\",\
\"unk_token\":\"[UNK]\",\
\"continuing_subword_prefix\":\"##\",\
\"max_input_chars_per_word\":100,\
\"vocab\":{}\
}";
assert_eq!(serde_json::to_string(&wp).unwrap(), wp_s);
assert_eq!(serde_json::from_str::<WordPiece>(wp_s).unwrap(), wp);
}
#[test]
fn deserialization_should_fail() {
let missing_unk = "{\
\"type\":\"WordPiece\",\
\"continuing_subword_prefix\":\"##\",\
\"max_input_chars_per_word\":100,\
\"vocab\":{}\
}";
assert!(serde_json::from_str::<WordPiece>(missing_unk)
.unwrap_err()
.to_string()
.starts_with("missing field `unk_token`"));
let wrong_type = "{\
\"type\":\"WordLevel\",\
\"unk_token\":\"[UNK]\",\
\"vocab\":{}\
}";
assert!(serde_json::from_str::<WordPiece>(wrong_type)
.unwrap_err()
.to_string()
.starts_with("invalid value: string \"WordLevel\", expected WordPiece"));
}
}
| tokenizers/tokenizers/src/models/wordpiece/serialization.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/wordpiece/serialization.rs",
"repo_id": "tokenizers",
"token_count": 2534
} | 348 |
use crate::tokenizer::{Decoder, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use serde::{de, Deserialize, Deserializer, Serialize};
/// Enum representing options for the metaspace prepending scheme.
#[derive(Debug, Clone, PartialEq, Serialize, Eq, Deserialize, Copy)]
#[serde(rename_all = "snake_case")]
pub enum PrependScheme {
/// Specifies that the scheme should be prepended only once, on the first split.
First,
/// Specifies that the space should not be prepended.
Never,
/// Specifies that the scheme should always be prepended.
Always,
}
impl std::fmt::Display for PrependScheme {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.serialize(f)
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Eq)]
/// Replaces all the whitespaces by the provided meta character and then
/// splits on this character
#[serde(tag = "type")]
pub struct Metaspace {
replacement: char,
pub prepend_scheme: PrependScheme,
pub split: bool,
#[serde(skip)]
str_rep: String,
}
impl<'de> Deserialize<'de> for Metaspace {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
enum Type {
Metaspace,
}
fn default_prepend_scheme_value() -> PrependScheme {
PrependScheme::Always
}
#[derive(Deserialize)]
pub struct MetaspaceHelper {
#[serde(rename = "type")]
_type: Type,
replacement: char,
pub add_prefix_space: Option<bool>,
#[serde(default = "default_prepend_scheme_value")]
pub prepend_scheme: PrependScheme,
pub split: Option<bool>,
#[serde(rename = "str_rep")]
_str_rep: Option<String>,
}
let mut helper = MetaspaceHelper::deserialize(deserializer)?;
if let Some(false) = helper.add_prefix_space {
if helper.prepend_scheme != PrependScheme::Never {
return Err(de::Error::custom(
"add_prefix_space does not match declared prepend_scheme",
));
}
helper.prepend_scheme = PrependScheme::Never;
}
let instance = Self::new(
helper.replacement,
helper.prepend_scheme,
helper.split.unwrap_or(true),
);
Ok(instance)
}
}
impl Metaspace {
pub fn new(replacement: char, prepend_scheme: PrependScheme, split: bool) -> Self {
Self {
replacement,
str_rep: replacement.to_string(),
prepend_scheme,
split,
}
}
pub fn get_replacement(&self) -> char {
self.replacement
}
pub fn set_replacement(&mut self, replacement: char) {
self.replacement = replacement;
self.str_rep = replacement.to_string();
}
pub fn get_split(&self) -> bool {
self.split
}
pub fn set_split(&mut self, split: bool) {
self.split = split;
}
pub fn get_prepend_scheme(&self) -> PrependScheme {
self.prepend_scheme
}
pub fn set_prepend_scheme(&mut self, scheme: PrependScheme) {
self.prepend_scheme = scheme;
}
}
impl Default for Metaspace {
fn default() -> Self {
Self::new('▁', PrependScheme::Always, true)
}
}
impl PreTokenizer for Metaspace {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, mut normalized| {
normalized.replace(' ', &self.str_rep)?;
match self.prepend_scheme {
PrependScheme::Always => {
if !normalized.get().starts_with(self.replacement) {
normalized.prepend(&self.str_rep);
}
}
PrependScheme::First => {
if !normalized.get().starts_with(self.replacement)
&& normalized.offsets_original().0 == 0
{
normalized.prepend(&self.str_rep);
}
}
PrependScheme::Never => {}
};
if self.split {
normalized.split(self.replacement, SplitDelimiterBehavior::MergedWithNext)
} else {
Ok(vec![normalized])
}
})
}
}
impl Decoder for Metaspace {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
Ok(tokens
.iter()
.enumerate()
.map(|(i, token)| {
token
.chars()
.flat_map(|c| {
if c == self.replacement {
if i == 0 && self.prepend_scheme != PrependScheme::Never {
None
} else {
Some(' ')
}
} else {
Some(c)
}
})
.collect::<String>()
})
.collect())
}
}
#[cfg(test)]
mod tests {
use regex::Regex;
use super::*;
use crate::{OffsetReferential, OffsetType};
#[test]
fn serialization() {
let metaspace = Metaspace::new('_', PrependScheme::Always, true);
let metaspace_s =
r#"{"type":"Metaspace","replacement":"_","prepend_scheme":"always","split":true}"#;
assert_eq!(serde_json::to_string(&metaspace).unwrap(), metaspace_s);
assert_eq!(
serde_json::from_str::<Metaspace>(metaspace_s).unwrap(),
metaspace
);
// Also check it can deserialize previous versions
let metaspace_s = r#"{"type":"Metaspace","replacement":"_","add_prefix_space":false,"prepend_scheme":"always"}"#;
assert!(serde_json::from_str::<Metaspace>(metaspace_s).is_err(),);
let metaspace = Metaspace::new('_', PrependScheme::Always, true);
let metaspace_s = r#"{"type":"Metaspace","str_rep":"_","replacement":"_","add_prefix_space":true,"prepend_scheme":"always"}"#;
assert_eq!(
serde_json::from_str::<Metaspace>(metaspace_s).unwrap(),
metaspace
);
let metaspace_parsed: Metaspace = serde_json::from_str(
r#"{"type":"Metaspace","replacement":"_","add_prefix_space":true}"#,
)
.unwrap();
assert_eq!(metaspace_parsed, metaspace);
}
#[test]
fn basic() {
let pretok = Metaspace::new('▁', PrependScheme::Always, true);
let mut pretokenized = PreTokenizedString::from("Hey friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("▁Hey", (0, 6)), ("▁friend!", (6, 16))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("▁Hey", (0, 3)), ("▁friend!", (3, 11))]
);
}
#[test]
fn multiple_spaces() {
let pretok = Metaspace::new('▁', PrependScheme::Always, true);
let mut pretokenized = PreTokenizedString::from("Hey friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 6)),
("▁", (6, 9)),
("▁", (9, 12)),
("▁friend!", (12, 22)),
]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 3)),
("▁", (3, 4)),
("▁", (4, 5)),
("▁friend!", (5, 13)),
]
);
}
#[test]
fn non_legacy_meta_space() {
let mut pretok = Metaspace::new('▁', PrependScheme::Always, true);
pretok.set_prepend_scheme(PrependScheme::Always);
assert_eq!(pretok, Metaspace::new('▁', PrependScheme::Always, true));
pretok.set_prepend_scheme(PrependScheme::Never);
assert_eq!(pretok, Metaspace::new('▁', PrependScheme::Never, true));
pretok.set_prepend_scheme(PrependScheme::First);
assert_eq!(pretok, Metaspace::new('▁', PrependScheme::First, true));
let pretok = Metaspace::new('▁', PrependScheme::First, false);
let mut pretokenized = PreTokenizedString::from("Hey my friend <s>how▁are you");
let re_ref = Regex::new(r"(<s>)").unwrap();
pretokenized
.split(|_, sequence| sequence.split(&re_ref, SplitDelimiterBehavior::Isolated))
.expect("Bad split");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey▁my▁friend▁", (0, 23)),
("<s>", (23, 26)),
("how▁are▁you", (26, 41))
]
);
let pretok = Metaspace::new('▁', PrependScheme::Always, true);
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 6)),
("▁my", (6, 11)),
("▁friend", (11, 20)),
("▁", (20, 23)),
("▁<s>", (23, 29)),
("▁how", (29, 35)),
("▁are", (35, 41)),
("▁you", (41, 47))
]
);
let pretok = Metaspace::new('▁', PrependScheme::First, false);
let mut pretokenized = PreTokenizedString::from(" Hey <s>how"); // test with prefix
pretokenized
.split(|_, sequence| sequence.split(&re_ref, SplitDelimiterBehavior::Isolated))
.expect("Bad split");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("▁Hey▁", (0, 9)), ("<s>", (9, 12)), ("how", (12, 15))]
);
let mut pretokenized = PreTokenizedString::from(" Hey <s>how <s>are <s> you"); // test with many splits
pretokenized
.split(|_, sequence| sequence.split(&re_ref, SplitDelimiterBehavior::Isolated))
.expect("Bad split");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey▁", (0, 9)),
("<s>", (9, 12)),
("how▁", (12, 18)),
("<s>", (18, 21)),
("are▁", (21, 27)),
("<s>", (27, 30)),
("▁you", (30, 36))
]
);
}
#[test]
fn decode() {
let decoder = Metaspace::new('▁', PrependScheme::Always, true);
let res = decoder
.decode_chain(vec!["▁Hey".into(), "▁friend!".into()])
.unwrap();
assert_eq!(res, vec!["Hey", " friend!"]);
let decoder = Metaspace::new('▁', PrependScheme::Never, true);
let res = decoder
.decode_chain(vec!["▁Hey".into(), "▁friend!".into()])
.unwrap();
assert_eq!(res, vec![" Hey", " friend!"]);
}
}
| tokenizers/tokenizers/src/pre_tokenizers/metaspace.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/metaspace.rs",
"repo_id": "tokenizers",
"token_count": 6687
} | 349 |
//! Represents a tokenization pipeline.
//!
//! A [`Tokenizer`](struct.Tokenizer.html) is composed of some of the following parts.
//! - [`Normalizer`](trait.Normalizer.html): Takes care of the text normalization (like unicode normalization).
//! - [`PreTokenizer`](trait.PreTokenizer.html): Takes care of the pre tokenization (ie. How to split tokens and pre-process
//! them.
//! - [`Model`](trait.Model.html): A model encapsulates the tokenization algorithm (like BPE, Word base, character
//! based, ...).
//! - [`PostProcessor`](trait.PostProcessor.html): Takes care of the processing after tokenization (like truncating, padding,
//! ...).
use ahash::AHashMap;
use std::{
fs::{read_to_string, File},
io::{prelude::*, BufReader},
ops::{Deref, DerefMut},
path::{Path, PathBuf},
};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use crate::utils::iter::ResultShunt;
use crate::utils::parallelism::*;
use crate::utils::progress::{ProgressBar, ProgressStyle};
mod added_vocabulary;
mod encoding;
pub mod normalizer;
pub mod pattern;
pub mod pre_tokenizer;
mod serialization;
// Re-export wrappers
pub use crate::decoders::DecoderWrapper;
pub use crate::models::ModelWrapper;
pub use crate::normalizers::NormalizerWrapper;
pub use crate::pre_tokenizers::PreTokenizerWrapper;
pub use crate::processors::PostProcessorWrapper;
// And some other types
pub use crate::utils::iter::LinesWithEnding;
pub use crate::utils::padding::{pad_encodings, PaddingDirection, PaddingParams, PaddingStrategy};
pub use crate::utils::truncation::{
truncate_encodings, TruncationDirection, TruncationParams, TruncationStrategy,
};
pub use added_vocabulary::*;
pub use encoding::*;
pub use normalizer::{NormalizedString, OffsetReferential, SplitDelimiterBehavior};
pub use pre_tokenizer::*;
pub type Error = Box<dyn std::error::Error + Send + Sync>;
pub type Result<T> = std::result::Result<T, Error>;
pub type Offsets = (usize, usize);
/// Takes care of pre-processing strings.
pub trait Normalizer {
fn normalize(&self, normalized: &mut NormalizedString) -> Result<()>;
}
/// The `PreTokenizer` is in charge of doing the pre-segmentation step. It splits the given string
/// in multiple substrings, keeping track of the offsets of said substrings from the
/// `NormalizedString`. In some occasions, the `PreTokenizer` might need to modify the given
/// `NormalizedString` to ensure we can entirely keep track of the offsets and the mapping with
/// the original string.
pub trait PreTokenizer {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()>;
}
/// Represents a model used during Tokenization (like BPE or Word or Unigram).
pub trait Model {
type Trainer: Trainer + Sync;
/// Tokenize the given sequence into multiple underlying `Token`. The `offsets` on the `Token`
/// are expected to be relative to the given sequence.
fn tokenize(&self, sequence: &str) -> Result<Vec<Token>>;
/// Find the ID associated to a string token
fn token_to_id(&self, token: &str) -> Option<u32>;
/// Find the string token associated to an ID
fn id_to_token(&self, id: u32) -> Option<String>;
/// Retrieve the entire vocabulary mapping (token -> ID)
fn get_vocab(&self) -> HashMap<String, u32>;
/// Retrieve the size of the vocabulary
fn get_vocab_size(&self) -> usize;
/// Save the current `Model` in the given folder, using the given `prefix` for the various
/// files that need to be saved.
fn save(&self, folder: &Path, prefix: Option<&str>) -> Result<Vec<PathBuf>>;
/// Get an instance of a Trainer capable of training this Model
fn get_trainer(&self) -> <Self as Model>::Trainer;
}
/// A `PostProcessor` has the responsibility to post process an encoded output of the `Tokenizer`.
/// It adds any special tokens that a language model would require.
pub trait PostProcessor {
/// Returns the number of tokens that will be added during the processing step
fn added_tokens(&self, is_pair: bool) -> usize;
/// Process both encodings and returns a new merged one
fn process(
&self,
encoding: Encoding,
pair_encoding: Option<Encoding>,
add_special_tokens: bool,
) -> Result<Encoding> {
let mut encodings = if let Some(pair_encoding) = pair_encoding {
vec![encoding, pair_encoding]
} else {
vec![encoding]
};
encodings.iter_mut().enumerate().for_each(|(i, encoding)| {
encoding.set_sequence_id(i);
encoding
.get_overflowing_mut()
.iter_mut()
.for_each(|encoding| encoding.set_sequence_id(i));
encoding.set_type_ids(vec![i as u32; encoding.len()]);
});
let encodings = self.process_encodings(encodings, add_special_tokens)?;
Ok(Encoding::merge(encodings, false))
}
/// Process any amount of encodings and returns a series of encoding (might merge them)
fn process_encodings(
&self,
encodings: Vec<Encoding>,
add_special_tokens: bool,
) -> Result<Vec<Encoding>>;
}
impl dyn PostProcessor {
pub fn default_process(
encodings: Vec<Encoding>,
_add_special_tokens: bool,
) -> Result<Vec<Encoding>> {
match encodings.len() {
1 => Ok(encodings),
_ => {
let mut final_encoding = Encoding::default();
for (i, mut encoding) in encodings.into_iter().enumerate() {
encoding.set_sequence_id(i);
final_encoding.merge_with(encoding, false);
}
Ok(vec![final_encoding])
}
}
}
}
#[derive(thiserror::Error, Debug)]
pub enum ProcessorError {
#[error("encodings vector length must be either 1 or 2")]
InvalidEncodingsVecLength,
}
/// A `Decoder` changes the raw tokens into its more readable form.
pub trait Decoder {
fn decode(&self, tokens: Vec<String>) -> Result<String> {
let results = self.decode_chain(tokens)?;
Ok(results.join(""))
}
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>>;
}
/// A `Trainer` has the responsibility to train a model. We feed it with lines/sentences
/// and then it can train the given `Model`.
pub trait Trainer {
type Model: Model + Sized;
/// Whether we should show progress during the training.
fn should_show_progress(&self) -> bool;
/// The actual training method. This will return a new trained Model as well as a list
/// of `special_tokens` to be added directly to the tokenizer along with the model.
fn train(&self, model: &mut Self::Model) -> Result<Vec<AddedToken>>;
/// Process an iterator of sequences, calling `process` for each of them in order to
/// pre-process the said sequence as relevant.
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> Result<Vec<String>> + Sync;
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Token {
pub id: u32,
pub value: String,
pub offsets: (usize, usize),
}
impl Token {
pub fn new(id: u32, value: String, offsets: (usize, usize)) -> Self {
Self { id, value, offsets }
}
}
use std::borrow::Cow;
use std::collections::HashMap;
#[derive(Debug, Clone)]
pub enum InputSequence<'s> {
Raw(Cow<'s, str>),
PreTokenized(Cow<'s, [&'s str]>),
PreTokenizedOwned(Cow<'s, [String]>),
PreTokenizedCow(Cow<'s, [Cow<'s, str>]>),
}
impl<'s> From<Cow<'s, str>> for InputSequence<'s> {
fn from(input: Cow<'s, str>) -> Self {
Self::Raw(input)
}
}
impl<'s> From<&'s str> for InputSequence<'s> {
fn from(input: &'s str) -> Self {
Self::Raw(Cow::Borrowed(input))
}
}
impl From<String> for InputSequence<'_> {
fn from(input: String) -> Self {
Self::Raw(Cow::Owned(input))
}
}
impl<'s> From<&'s [&'s str]> for InputSequence<'s> {
fn from(input: &'s [&'s str]) -> Self {
Self::PreTokenized(Cow::Borrowed(input))
}
}
impl<'s> From<Vec<&'s str>> for InputSequence<'s> {
fn from(input: Vec<&'s str>) -> Self {
Self::PreTokenized(Cow::Owned(input))
}
}
impl<'s> From<&'s [String]> for InputSequence<'s> {
fn from(input: &'s [String]) -> Self {
Self::PreTokenizedOwned(Cow::Borrowed(input))
}
}
impl From<Vec<String>> for InputSequence<'_> {
fn from(input: Vec<String>) -> Self {
Self::PreTokenizedOwned(Cow::Owned(input))
}
}
impl<'s> From<Vec<Cow<'s, str>>> for InputSequence<'s> {
fn from(input: Vec<Cow<'s, str>>) -> Self {
Self::PreTokenizedCow(Cow::Owned(input))
}
}
impl<'s> From<&'s [Cow<'s, str>]> for InputSequence<'s> {
fn from(input: &'s [Cow<'s, str>]) -> Self {
Self::PreTokenizedCow(Cow::Borrowed(input))
}
}
#[derive(Debug, Clone)]
pub enum EncodeInput<'s> {
Single(InputSequence<'s>),
Dual(InputSequence<'s>, InputSequence<'s>),
}
impl<'s, I: Into<InputSequence<'s>>> From<I> for EncodeInput<'s> {
fn from(input: I) -> Self {
Self::Single(input.into())
}
}
impl<'s, I1, I2> From<(I1, I2)> for EncodeInput<'s>
where
I1: Into<InputSequence<'s>>,
I2: Into<InputSequence<'s>>,
{
fn from(input: (I1, I2)) -> Self {
Self::Dual(input.0.into(), input.1.into())
}
}
#[derive(thiserror::Error, Debug)]
#[error("{0}")]
pub struct BuilderError(String);
/// Builder for Tokenizer structs.
///
/// `build()` fails if the `model` is missing.
pub struct TokenizerBuilder<M, N, PT, PP, D> {
model: Option<M>,
normalizer: Option<N>,
pre_tokenizer: Option<PT>,
post_processor: Option<PP>,
decoder: Option<D>,
added_vocabulary: AddedVocabulary,
truncation: Option<TruncationParams>,
padding: Option<PaddingParams>,
}
impl<M, N, PT, PP, D> Default for TokenizerBuilder<M, N, PT, PP, D>
where
M: Model,
N: Normalizer,
PT: PreTokenizer,
PP: PostProcessor,
D: Decoder,
{
fn default() -> Self {
Self::new()
}
}
impl<M, N, PT, PP, D> TokenizerBuilder<M, N, PT, PP, D>
where
M: Model,
N: Normalizer,
PT: PreTokenizer,
PP: PostProcessor,
D: Decoder,
{
/// Get an empty TokenizerBuilder.
pub fn new() -> Self {
Self {
model: None,
normalizer: None,
pre_tokenizer: None,
post_processor: None,
decoder: None,
added_vocabulary: AddedVocabulary::new(),
truncation: None,
padding: None,
}
}
/// Convert the TokenizerBuilder to a Tokenizer.
///
/// Conversion fails if the `model` is missing.
pub fn build(self) -> Result<TokenizerImpl<M, N, PT, PP, D>> {
let model = self
.model
.ok_or_else(|| Box::new(BuilderError("Model missing.".into())))?;
Ok(TokenizerImpl {
normalizer: self.normalizer,
pre_tokenizer: self.pre_tokenizer,
model,
post_processor: self.post_processor,
decoder: self.decoder,
added_vocabulary: self.added_vocabulary,
truncation: self.truncation,
padding: self.padding,
})
}
/// Set the model.
#[must_use]
pub fn with_model(mut self, model: M) -> Self {
self.model = Some(model);
self
}
/// Set the normalizer.
#[must_use]
pub fn with_normalizer(mut self, normalizer: Option<N>) -> Self {
self.normalizer = normalizer;
self
}
/// Set the pre-tokenizer.
#[must_use]
pub fn with_pre_tokenizer(mut self, pretokenizer: Option<PT>) -> Self {
self.pre_tokenizer = pretokenizer;
self
}
/// Set the post-processor.
#[must_use]
pub fn with_post_processor(mut self, post_processor: Option<PP>) -> Self {
self.post_processor = post_processor;
self
}
/// Set the decoder.
#[must_use]
pub fn with_decoder(mut self, decoder: Option<D>) -> Self {
self.decoder = decoder;
self
}
/// Set the added vocabulary.
pub fn with_added_vocabulary(mut self, added_vocabulary: AddedVocabulary) -> Self {
self.added_vocabulary = added_vocabulary;
self
}
/// Set the truncation parameters.
#[must_use]
pub fn with_truncation(mut self, trunc: Option<TruncationParams>) -> Self {
self.truncation = trunc;
self
}
/// Set the padding parameters.
#[must_use]
pub fn with_padding(mut self, padding: Option<PaddingParams>) -> Self {
self.padding = padding;
self
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Tokenizer(
TokenizerImpl<
ModelWrapper,
NormalizerWrapper,
PreTokenizerWrapper,
PostProcessorWrapper,
DecoderWrapper,
>,
);
impl Tokenizer {
/// Construct a new Tokenizer based on the model.
pub fn new(model: impl Into<ModelWrapper>) -> Self {
Self(TokenizerImpl::new(model.into()))
}
/// Unwrap the TokenizerImpl.
pub fn into_inner(
self,
) -> TokenizerImpl<
ModelWrapper,
NormalizerWrapper,
PreTokenizerWrapper,
PostProcessorWrapper,
DecoderWrapper,
> {
self.0
}
pub fn from_file<P: AsRef<Path>>(file: P) -> Result<Self> {
let content = read_to_string(file)?;
let tokenizer = serde_json::from_str(&content)?;
Ok(tokenizer)
}
pub fn from_bytes<P: AsRef<[u8]>>(bytes: P) -> Result<Self> {
let tokenizer = serde_json::from_slice(bytes.as_ref())?;
Ok(tokenizer)
}
#[cfg(feature = "http")]
pub fn from_pretrained<S: AsRef<str>>(
identifier: S,
params: Option<crate::utils::from_pretrained::FromPretrainedParameters>,
) -> Result<Self> {
let tokenizer_file = crate::utils::from_pretrained::from_pretrained(identifier, params)?;
Tokenizer::from_file(tokenizer_file)
}
}
impl std::str::FromStr for Tokenizer {
type Err = Box<dyn std::error::Error + Send + Sync>;
fn from_str(s: &str) -> Result<Self> {
Ok(serde_json::from_str(s)?)
}
}
impl<M, N, PT, PP, D> From<TokenizerImpl<M, N, PT, PP, D>> for Tokenizer
where
M: Into<ModelWrapper>,
N: Into<NormalizerWrapper>,
PT: Into<PreTokenizerWrapper>,
PP: Into<PostProcessorWrapper>,
D: Into<DecoderWrapper>,
{
fn from(t: TokenizerImpl<M, N, PT, PP, D>) -> Self {
Self(TokenizerImpl {
model: t.model.into(),
normalizer: t.normalizer.map(Into::into),
pre_tokenizer: t.pre_tokenizer.map(Into::into),
post_processor: t.post_processor.map(Into::into),
decoder: t.decoder.map(Into::into),
added_vocabulary: t.added_vocabulary,
padding: t.padding,
truncation: t.truncation,
})
}
}
impl Deref for Tokenizer {
type Target = TokenizerImpl<
ModelWrapper,
NormalizerWrapper,
PreTokenizerWrapper,
PostProcessorWrapper,
DecoderWrapper,
>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Tokenizer {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(thiserror::Error, Debug)]
#[error("{0}")]
pub struct TruncationParamError(String);
/// A `Tokenizer` is capable of encoding/decoding any text.
#[derive(Clone, Debug)]
pub struct TokenizerImpl<M, N, PT, PP, D> {
// Tokenizer parts
normalizer: Option<N>,
pre_tokenizer: Option<PT>,
model: M,
post_processor: Option<PP>,
decoder: Option<D>,
// Added Vocabulary capabilities
added_vocabulary: AddedVocabulary,
// General processing parameters
truncation: Option<TruncationParams>,
padding: Option<PaddingParams>,
}
impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D>
where
M: Model,
N: Normalizer,
PT: PreTokenizer,
PP: PostProcessor,
D: Decoder,
{
/// Instantiate a new Tokenizer, with the given Model
pub fn new(model: M) -> Self {
Self {
normalizer: None,
pre_tokenizer: None,
model,
post_processor: None,
decoder: None,
added_vocabulary: AddedVocabulary::new(),
truncation: None,
padding: None,
}
}
/// Set the normalizer
pub fn with_normalizer(&mut self, normalizer: Option<impl Into<N>>) -> &mut Self {
self.normalizer = normalizer.map(|norm| norm.into());
self
}
/// Get the normalizer
pub fn get_normalizer(&self) -> Option<&N> {
self.normalizer.as_ref()
}
/// Set the pre tokenizer
pub fn with_pre_tokenizer(&mut self, pre_tokenizer: Option<impl Into<PT>>) -> &mut Self {
self.pre_tokenizer = pre_tokenizer.map(|tok| tok.into());
self
}
/// Get the pre tokenizer
pub fn get_pre_tokenizer(&self) -> Option<&PT> {
self.pre_tokenizer.as_ref()
}
/// Set the post processor
pub fn with_post_processor(&mut self, post_processor: Option<impl Into<PP>>) -> &mut Self {
self.post_processor = post_processor.map(|post_proc| post_proc.into());
self
}
/// Get the post processor
pub fn get_post_processor(&self) -> Option<&PP> {
self.post_processor.as_ref()
}
/// Set the decoder
pub fn with_decoder(&mut self, decoder: Option<impl Into<D>>) -> &mut Self {
self.decoder = decoder.map(|dec| dec.into());
self
}
/// Get the decoder
pub fn get_decoder(&self) -> Option<&D> {
self.decoder.as_ref()
}
/// Set the model
pub fn with_model(&mut self, model: impl Into<M>) -> &mut Self {
self.model = model.into();
self
}
/// Get the model
pub fn get_model(&self) -> &M {
&self.model
}
/// Set the added vocabulary.
pub fn with_added_vocabulary(&mut self, added_vocabulary: AddedVocabulary) -> &mut Self {
self.added_vocabulary = added_vocabulary;
self
}
/// Get the added vocabulary
pub fn get_added_vocabulary(&self) -> &AddedVocabulary {
&self.added_vocabulary
}
/// Set the truncation parameters
///
/// Fails if `stride` is too high relative to `max_length` and `post_processor.added_tokens()`
pub fn with_truncation(&mut self, trunc: Option<TruncationParams>) -> Result<&mut Self> {
if let Some(trunc_params) = &trunc {
let n_added_tokens = self.get_n_added_tokens(false);
let effective_max_length = trunc_params.max_length - n_added_tokens;
if effective_max_length < trunc_params.stride {
return Err(Box::new(TruncationParamError(format!(
"tokenizer stride set to {}, which is greater than or equal to its effective max length of {} (= {} original max length - {} added special tokens), ",
trunc_params.stride, effective_max_length, trunc_params.max_length, n_added_tokens
))));
}
}
self.truncation = trunc;
Ok(self)
}
/// Get the currently set truncation parameters
pub fn get_truncation(&self) -> Option<&TruncationParams> {
self.truncation.as_ref()
}
/// Get a mutable reference to the currently set truncation parameters
pub fn get_truncation_mut(&mut self) -> Option<&mut TruncationParams> {
self.truncation.as_mut()
}
/// Set the padding parameters
pub fn with_padding(&mut self, padding: Option<PaddingParams>) -> &mut Self {
self.padding = padding;
self
}
/// Get the currently set padding parameters
pub fn get_padding(&self) -> Option<&PaddingParams> {
self.padding.as_ref()
}
/// Get a mutable reference to the currently set padding parameters
pub fn get_padding_mut(&mut self) -> Option<&mut PaddingParams> {
self.padding.as_mut()
}
// Get the vocabulary as a plain HashMap for bindings compatibility
pub fn get_vocab(&self, with_added_tokens: bool) -> HashMap<String, u32> {
let mut final_vocab = self.model.get_vocab();
if with_added_tokens {
let added_vocab = self.added_vocabulary.get_vocab();
if !added_vocab.is_empty() {
final_vocab.reserve(added_vocab.len());
for (token, id) in added_vocab {
final_vocab.insert(token.clone(), *id);
}
}
}
final_vocab
}
/// Get the added tokens decoder
pub fn get_added_tokens_decoder(&self) -> AHashMap<u32, AddedToken> {
self.added_vocabulary.get_added_tokens_decoder().clone()
}
/// Get the size of the vocabulary
pub fn get_vocab_size(&self, with_added_tokens: bool) -> usize {
// TODO ArthurZ THIS IS WRONG! We need to measure the length of the `set` because
// now some tokens can be both in the added_tokens_encoder and in the vocab
if with_added_tokens {
self.get_vocab(true).len()
} else {
self.model.get_vocab_size()
}
}
/// Converts a token in the corresponding id.
pub fn token_to_id(&self, token: &str) -> Option<u32> {
self.added_vocabulary.token_to_id(token, &self.model)
}
/// Converts an id to the corresponding token.
pub fn id_to_token(&self, id: u32) -> Option<String> {
self.added_vocabulary
.simple_id_to_token(id)
.or_else(|| self.model.id_to_token(id))
}
/// set the added vocab's splitting scheme
pub fn set_encode_special_tokens(&mut self, value: bool) {
self.added_vocabulary.set_encode_special_tokens(value);
}
/// Get added token value
pub fn get_encode_special_tokens(&self) -> bool {
self.added_vocabulary.get_encode_special_tokens()
}
/// Encode a single sequence
fn encode_single_sequence(
&self,
sequence: InputSequence,
type_id: u32,
offsets_type: OffsetType,
) -> Result<Encoding> {
let encode = |is_pre_tokenized, subseq_idx, subseq| -> Result<Encoding> {
let normalized = self
.added_vocabulary
.extract_and_normalize(self.normalizer.as_ref(), subseq);
let pre_tokenized = self.do_pre_tokenize(normalized)?;
let subseq_encoding = self.do_tokenize(
pre_tokenized,
type_id,
if is_pre_tokenized {
Some(subseq_idx as u32)
} else {
None
},
offsets_type,
)?;
Ok(subseq_encoding)
};
match sequence {
InputSequence::PreTokenized(seq) => seq
.iter()
.enumerate()
.map(|(i, sequence)| encode(true, i, sequence))
.collect(),
InputSequence::PreTokenizedOwned(seq) => seq
.iter()
.enumerate()
.map(|(i, sequence)| encode(true, i, sequence))
.collect(),
InputSequence::PreTokenizedCow(seq) => seq
.iter()
.enumerate()
.map(|(i, sequence)| encode(true, i, sequence))
.collect(),
InputSequence::Raw(seq) => encode(false, 0, seq.as_ref()),
}
}
/// Encode the given input. This method accepts both single sequences, as well as pair
/// sequences. Also, a sequence can be a string, or already pre-tokenized input directly:
/// Contrarily to `encode`, it does not compute offsets
/// ```
/// # use tokenizers::Tokenizer;
/// # use tokenizers::models::bpe::BPE;
/// # let mut tokenizer = Tokenizer::new(BPE::default());
/// #
/// // Sequences:
/// tokenizer.encode_fast("Single sequence", false);
/// tokenizer.encode_fast(("Sequence A", "Sequence B"), false);
///
/// // Pre-tokenized sequences:
/// tokenizer.encode_fast(&["Single", "sequence"][..], false);
/// tokenizer.encode_fast((
/// &["Sequence", "A"][..],
/// &["Sequence", "B"][..]
/// ), false);
///
/// // or even both types together:
/// tokenizer.encode_fast(("A complete sequence", &["And", "a", "tokenized"][..]), false);
/// ```
pub fn encode_fast<'s, E>(&self, input: E, add_special_tokens: bool) -> Result<Encoding>
where
E: Into<EncodeInput<'s>>,
{
// Extract sequences from the EncodeInput
let (sequence, pair) = match input.into() {
EncodeInput::Single(s1) => (s1, None),
EncodeInput::Dual(s1, s2) => (s1, Some(s2)),
};
// Encode each sequence
let encoding = self.encode_single_sequence(sequence, 0, OffsetType::None)?;
let pair_encoding = pair
.map(|sequence| self.encode_single_sequence(sequence, 1, OffsetType::None))
.transpose()?;
// And finally post process
self.post_process(encoding, pair_encoding, add_special_tokens)
}
/// Encode the given input. This method accepts both single sequences, as well as pair
/// sequences. Also, a sequence can be a string, or already pre-tokenized input directly:
///
/// ```
/// # use tokenizers::Tokenizer;
/// # use tokenizers::models::bpe::BPE;
/// # let mut tokenizer = Tokenizer::new(BPE::default());
/// #
/// // Sequences:
/// tokenizer.encode("Single sequence", false);
/// tokenizer.encode(("Sequence A", "Sequence B"), false);
///
/// // Pre-tokenized sequences:
/// tokenizer.encode(&["Single", "sequence"][..], false);
/// tokenizer.encode((
/// &["Sequence", "A"][..],
/// &["Sequence", "B"][..]
/// ), false);
///
/// // or even both types together:
/// tokenizer.encode(("A complete sequence", &["And", "a", "tokenized"][..]), false);
/// ```
pub fn encode<'s, E>(&self, input: E, add_special_tokens: bool) -> Result<Encoding>
where
E: Into<EncodeInput<'s>>,
{
// Extract sequences from the EncodeInput
let (sequence, pair) = match input.into() {
EncodeInput::Single(s1) => (s1, None),
EncodeInput::Dual(s1, s2) => (s1, Some(s2)),
};
// Encode each sequence
let encoding = self.encode_single_sequence(sequence, 0, OffsetType::Byte)?;
let pair_encoding = pair
.map(|sequence| self.encode_single_sequence(sequence, 1, OffsetType::Byte))
.transpose()?;
// And finally post process
self.post_process(encoding, pair_encoding, add_special_tokens)
}
/// Encode the given input, using offsets relative to chars instead of bytes.
/// This method accepts both single sequences, as well as pair sequences. Also,
/// a sequence can be a string, or already pre-tokenized input directly:
///
/// ```
/// # use tokenizers::Tokenizer;
/// # use tokenizers::models::bpe::BPE;
/// # let mut tokenizer = Tokenizer::new(BPE::default());
/// #
/// // Sequences:
/// tokenizer.encode("Single sequence", false);
/// tokenizer.encode(("Sequence A", "Sequence B"), false);
///
/// // Pre-tokenized sequences:
/// tokenizer.encode(&["Single", "sequence"][..], false);
/// tokenizer.encode((
/// &["Sequence", "A"][..],
/// &["Sequence", "B"][..]
/// ), false);
///
/// // or even both types together:
/// tokenizer.encode(("A complete sequence", &["And", "a", "tokenized"][..]), false);
/// ```
pub fn encode_char_offsets<'s, E>(&self, input: E, add_special_tokens: bool) -> Result<Encoding>
where
E: Into<EncodeInput<'s>>,
{
// Extract sequences from the EncodeInput
let (sequence, pair) = match input.into() {
EncodeInput::Single(s1) => (s1, None),
EncodeInput::Dual(s1, s2) => (s1, Some(s2)),
};
// Encode each sequence
let encoding = self.encode_single_sequence(sequence, 0, OffsetType::Char)?;
let pair_encoding = pair
.map(|sequence| self.encode_single_sequence(sequence, 1, OffsetType::Char))
.transpose()?;
// And finally post process
self.post_process(encoding, pair_encoding, add_special_tokens)
}
/// Decode the given ids, back to a String
pub fn decode(&self, ids: &[u32], skip_special_tokens: bool) -> Result<String> {
let tokens = ids
.iter()
.filter_map(|id| {
self.added_vocabulary
.simple_id_to_token(*id)
.or_else(|| self.model.id_to_token(*id))
.filter(|token| {
!skip_special_tokens || !self.added_vocabulary.is_special_token(token)
})
})
.collect::<Vec<_>>();
if let Some(decoder) = &self.decoder {
decoder.decode(tokens)
} else {
Ok(tokens.join(" "))
}
}
/// Decode the given ids, back to a String
/// See [`DecodeStream`]
pub fn decode_stream(&self, skip_special_tokens: bool) -> DecodeStream<'_, M, N, PT, PP, D> {
DecodeStream::new(self, skip_special_tokens)
}
}
/// DecodeStream will keep the state necessary to produce individual chunks of
/// strings given an input stream of token_ids.
///
/// This is necessary because decoding in general cannot achieve that since strings
/// depend on surrounding ids to provide a valid string. Typically stripping extra spaces
///
/// Example:
///
/// ```
/// # #[cfg(not(target_os = "windows"))]
/// # {
/// use tokenizers::Tokenizer;
/// let tokenizer = Tokenizer::from_file("data/roberta.json").unwrap();
///
/// let mut decode_stream = tokenizer.decode_stream(false);
/// assert_eq!(decode_stream.step(713).unwrap(), Some("This".to_string()));
/// assert_eq!(decode_stream.step(16).unwrap(), Some(" is".to_string()));
/// assert_eq!(decode_stream.step(41).unwrap(), Some(" an".to_string()));
/// assert_eq!(
/// decode_stream.step(1246).unwrap(),
/// Some(" example".to_string())
/// );
/// # }
/// ```
///
/// Returning `None` means the given id is not enough to produce a chunk.
/// This typically happens with `byte_fallback` options where some tokens do
/// not represent valid utf-8, and only follow-up token_ids will help produce
/// a valid chunk.
/// ```
/// use tokenizers::{Tokenizer, TokenizerBuilder, models::bpe::BPE, decoders::byte_fallback::ByteFallback, pre_tokenizers::byte_level::ByteLevel, normalizers::unicode::NFC};
/// use std::collections::HashMap;
/// use std::iter::FromIterator;
///
/// let vocab = HashMap::from_iter([
/// ("<0x20>".to_string(), 0),
/// ("<0xC3>".to_string(), 1),
/// ("<0xA9>".to_string(), 2),
/// (" This".to_string(), 3),
/// ]);
/// let merges = vec![];
/// let bpe = BPE::builder()
/// .vocab_and_merges(vocab, merges)
/// .byte_fallback(true)
/// .build()
/// .unwrap();
/// let tokenizer = TokenizerBuilder::default()
/// .with_model(bpe)
/// .with_decoder(Some(ByteFallback::default()))
/// .with_normalizer(Some(NFC))
/// .with_pre_tokenizer(Some(ByteLevel::default()))
/// .with_post_processor(Some(ByteLevel::default()))
/// .build().unwrap();
///
/// let mut decode_stream = tokenizer.decode_stream(false);
/// // Single byte_fallback is valid utf-8
/// assert_eq!(decode_stream.step(0).unwrap(), Some(" ".to_string()));
/// // Invalid utf-8
/// assert_eq!(decode_stream.step(1).unwrap(), None);
/// // Valid utf-8 again, this corresponds to both tokens: [1, 2]
/// assert_eq!(decode_stream.step(2).unwrap(), Some("é".to_string()));
/// ```
///
/// To see how [`DecodeStream`] is necessary, let's show how using raw [`TokenizerImpl::decode`] would
/// fail.
///
/// ```
/// use tokenizers::{Tokenizer, TokenizerBuilder, models::bpe::BPE, pre_tokenizers::{byte_level::ByteLevel, metaspace::Metaspace}, normalizers::unicode::NFC};
/// use std::collections::HashMap;
/// use std::iter::FromIterator;
///
/// let vocab = HashMap::from_iter([
/// ("▁This".to_string(), 0),
/// ]);
/// let merges = vec![];
/// let bpe = BPE::builder()
/// .vocab_and_merges(vocab, merges)
/// .byte_fallback(true)
/// .build()
/// .unwrap();
/// let tokenizer = TokenizerBuilder::new()
/// .with_model(bpe)
/// .with_decoder(Some(Metaspace::default()))
/// .with_normalizer(Some(NFC))
/// .with_pre_tokenizer(Some(ByteLevel::default()))
/// .with_post_processor(Some(ByteLevel::default()))
/// .build()
/// .unwrap();
///
/// // Strip decoder removes the extra initial space
/// assert_eq!(tokenizer.decode(&[0, 0], false).unwrap(), "This This");
/// // Decoding one token at a time would produce "ThisThis"
/// assert_eq!(tokenizer.decode(&[0], false).unwrap(), "This");
///
/// // Using a stream fixes it by keeping the necessary state.
/// let mut decode_stream = tokenizer.decode_stream(false);
/// assert_eq!(decode_stream.step(0).unwrap(), Some("This".to_string()));
/// assert_eq!(decode_stream.step(0).unwrap(), Some(" This".to_string()));
/// ```
pub struct DecodeStream<'tok, M, N, PT, PP, D> {
/// A reference to the tokenizer
tokenizer: &'tok TokenizerImpl<M, N, PT, PP, D>,
/// Regular decode option that is kept throughout.
skip_special_tokens: bool,
/// A temporary buffer of the necessary token_ids needed
/// to produce valid string chunks.
/// This typically contains 3 parts:
/// - read
/// - prefix
/// - rest
///
/// Read is the bit necessary to surround the prefix
/// so decoding the whole ids produces a valid prefix.
/// Prefix is the previously produced string, kept around to trim off of
/// the next valid chunk
ids: Vec<u32>,
/// The previously returned chunk that needs to be discarded from the
/// decoding of the current ids to produce the next chunk
prefix: String,
/// The index within the ids corresponding to the prefix so we can drain
/// correctly
prefix_index: usize,
}
#[derive(thiserror::Error, Debug)]
pub enum DecodeStreamError {
#[error("Invalid prefix encountered")]
InvalidPrefix,
}
impl<'tok, M, N, PT, PP, D> DecodeStream<'tok, M, N, PT, PP, D>
where
M: Model,
N: Normalizer,
PT: PreTokenizer,
PP: PostProcessor,
D: Decoder,
{
fn new(tokenizer: &'tok TokenizerImpl<M, N, PT, PP, D>, skip_special_tokens: bool) -> Self {
Self {
tokenizer,
ids: vec![],
skip_special_tokens,
prefix: "".to_string(),
prefix_index: 0,
}
}
/// See [`DecodeStream`]
pub fn step(&mut self, id: u32) -> Result<Option<String>> {
step_decode_stream(
self.tokenizer,
id,
self.skip_special_tokens,
&mut self.ids,
&mut self.prefix,
&mut self.prefix_index,
)
}
}
/// Internal function exposed only to bypass python limitations
pub fn step_decode_stream<M, N, PT, PP, D>(
tokenizer: &TokenizerImpl<M, N, PT, PP, D>,
id: u32,
skip_special_tokens: bool,
ids: &mut Vec<u32>,
prefix: &mut String,
prefix_index: &mut usize,
) -> Result<Option<String>>
where
M: Model,
N: Normalizer,
PT: PreTokenizer,
PP: PostProcessor,
D: Decoder,
{
ids.push(id);
let string = tokenizer.decode(ids.as_slice(), skip_special_tokens)?;
if string.len() > prefix.len() && !string.ends_with('�') {
if !(string.starts_with(&*prefix)) {
return Err(Box::new(DecodeStreamError::InvalidPrefix));
}
let new_text = &string[prefix.len()..].to_string();
let new_prefix_index = ids.len() - *prefix_index;
*ids = ids.drain(*prefix_index..).collect();
*prefix = tokenizer.decode(ids, skip_special_tokens)?;
*prefix_index = new_prefix_index;
Ok(Some(new_text.to_string()))
} else {
Ok(None)
}
}
impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D>
where
M: Model,
{
/// Tokenization logic, makes the bridge between the pre-tokenization phase and the real
/// tokenization phase, and converting offsets back to the original referential.
fn do_tokenize<P: Into<PreTokenizedString>>(
&self,
pretokenized: P,
type_id: u32,
word_idx: Option<u32>,
offsets_type: OffsetType,
) -> Result<Encoding> {
let mut pretokenized: PreTokenizedString = pretokenized.into();
pretokenized.tokenize(|normalized| self.model.tokenize(normalized.get()))?;
pretokenized.into_encoding(word_idx, type_id, offsets_type)
}
}
#[allow(dead_code)]
impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D>
where
N: Normalizer,
{
/// Normalization logic, go through all normalizers
fn do_normalize<V: Into<NormalizedString>>(&self, normalized: V) -> Result<NormalizedString> {
let mut normalized: NormalizedString = normalized.into();
if let Some(ref normalizer) = self.normalizer {
normalizer.normalize(&mut normalized)?;
}
Ok(normalized)
}
}
impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D>
where
N: Normalizer,
M: Model,
{
/// Register the given tokens as special tokens. This is especially useful for removing
/// these special tokens while decoding
pub fn add_special_tokens(&mut self, tokens: &[AddedToken]) -> usize {
self.added_vocabulary
.add_special_tokens(tokens, &self.model, self.normalizer.as_ref())
}
/// Add the given tokens to the added vocabulary
pub fn add_tokens(&mut self, tokens: &[AddedToken]) -> usize {
self.added_vocabulary
.add_tokens(tokens, &self.model, self.normalizer.as_ref())
}
}
impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D>
where
PT: PreTokenizer,
{
/// PreTokenization logic, handling the case where there is no PreTokenizer set
fn do_pre_tokenize<P: Into<PreTokenizedString>>(
&self,
pretokenized: P,
) -> Result<PreTokenizedString> {
let mut pretokenized: PreTokenizedString = pretokenized.into();
if let Some(ref pretok) = self.pre_tokenizer {
pretok.pre_tokenize(&mut pretokenized)?;
}
Ok(pretokenized)
}
}
impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D>
where
PP: PostProcessor,
{
/// Post processing logic, handling the case where there is no PostProcessor set
pub fn post_process(
&self,
encoding: Encoding,
pair_encoding: Option<Encoding>,
add_special_tokens: bool,
) -> Result<Encoding> {
// 1. First we truncate if needed
let (encoding, pair_encoding) = {
if let Some(trunc) = &self.truncation {
let n_added_tokens = self.get_n_added_tokens(pair_encoding.is_some());
if add_special_tokens && n_added_tokens > 0 {
let params = TruncationParams {
max_length: trunc.max_length - n_added_tokens,
..*trunc
};
truncate_encodings(encoding, pair_encoding, ¶ms)?
} else {
truncate_encodings(encoding, pair_encoding, trunc)?
}
} else {
(encoding, pair_encoding)
}
};
// 2. Then We post process
let final_encoding = if let Some(processor) = &self.post_processor {
processor.process(encoding, pair_encoding, add_special_tokens)?
} else {
let encodings = if let Some(pair_encoding) = pair_encoding {
vec![encoding, pair_encoding]
} else {
vec![encoding]
};
let mut encodings =
<dyn PostProcessor>::default_process(encodings, add_special_tokens)?;
if encodings.len() != 1 {
panic!("We haven't reduced the encodings like we should have");
}
encodings.pop().unwrap()
};
// 3. Then we pad if needed
let [final_encoding] = if let Some(params) = &self.padding {
let mut arr = [final_encoding];
pad_encodings(&mut arr, params)?;
arr
} else {
[final_encoding]
};
Ok(final_encoding)
}
fn get_n_added_tokens(&self, is_pair: bool) -> usize {
if let Some(processor) = &self.post_processor {
processor.added_tokens(is_pair)
} else {
0
}
}
}
impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D>
where
M: Model + Send + Sync,
N: Normalizer + Send + Sync,
PT: PreTokenizer + Send + Sync,
PP: PostProcessor + Send + Sync,
D: Decoder + Send + Sync,
{
/// Encode all the sentences in parallel, using multiple threads
pub fn encode_batch<'s, E>(
&self,
inputs: Vec<E>,
add_special_tokens: bool,
) -> Result<Vec<Encoding>>
where
E: Into<EncodeInput<'s>> + Send,
{
let mut encodings = inputs
.into_maybe_par_iter()
.map(|input| self.encode(input, add_special_tokens))
.collect::<Result<Vec<Encoding>>>()?;
if let Some(params) = &self.padding {
// We do the padding here to make sure we handle the batch padding
pad_encodings(&mut encodings, params)?;
}
Ok(encodings)
}
/// Encode all the sentences in parallel, using multiple threads.
/// The offsets on each `Encoding` will be relative to chars instead of bytes.
pub fn encode_batch_char_offsets<'s, E>(
&self,
inputs: Vec<E>,
add_special_tokens: bool,
) -> Result<Vec<Encoding>>
where
E: Into<EncodeInput<'s>> + Send,
{
let mut encodings = inputs
.into_maybe_par_iter()
.map(|input| self.encode_char_offsets(input, add_special_tokens))
.collect::<Result<Vec<Encoding>>>()?;
if let Some(params) = &self.padding {
// We do the padding here to make sure we handle the batch padding
pad_encodings(&mut encodings, params)?;
}
Ok(encodings)
}
/// Encode all the sentences in parallel, using multiple threads
pub fn encode_batch_fast<'s, E>(
&self,
inputs: Vec<E>,
add_special_tokens: bool,
) -> Result<Vec<Encoding>>
where
E: Into<EncodeInput<'s>> + Send,
{
let mut encodings = inputs
.into_maybe_par_iter()
.map(|input| self.encode_fast(input, add_special_tokens))
.collect::<Result<Vec<Encoding>>>()?;
if let Some(params) = &self.padding {
// We do the padding here to make sure we handle the batch padding
pad_encodings(&mut encodings, params)?;
}
Ok(encodings)
}
/// Decode all sentences in parallel
pub fn decode_batch(
&self,
sentences: &[&[u32]],
skip_special_tokens: bool,
) -> Result<Vec<String>>
where
M: Send + Sync,
{
sentences
.into_maybe_par_iter()
.map(|sentence| self.decode(sentence, skip_special_tokens))
.collect()
}
/// Train our Model from files
pub fn train_from_files<T>(&mut self, trainer: &mut T, files: Vec<String>) -> Result<&mut Self>
where
T: Trainer<Model = M> + Sync,
{
let mut len = 0;
for file in files.iter() {
len += File::open(file)
.and_then(|f| f.metadata())
.map(|m| m.len())?;
}
let max_read = 1_000_000;
ResultShunt::process(
files.into_iter().flat_map(|filename| {
match File::open(filename) {
Ok(file) => {
let file = BufReader::with_capacity(max_read, file);
// We read new lines using this API instead of the Lines Iterator
// on purpose. We want to keep the `\n` and potential `\r` between each lines
// We use an iterator to be able to chain with par_bridge.
itertools::Either::Left(file.lines_with_ending())
}
Err(e) => itertools::Either::Right(std::iter::once(Err(e))),
}
}),
|sequences| -> Result<()> {
let progress = if trainer.should_show_progress() {
let progress = ProgressBar::new(len);
progress.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {msg:<30!} {wide_bar} {percent:>18!}%")
.expect("Invalid progress template"),
);
progress
.set_message(format!("Pre-processing files ({:.2} Mo)", len / 1_000_000));
Some(progress)
} else {
None
};
trainer.feed(
sequences.inspect(|s| {
if let Some(progress) = &progress {
progress.inc(s.len() as u64)
}
}),
|seq| {
let normalized = self
.added_vocabulary
.extract_and_normalize(self.normalizer.as_ref(), seq.as_ref());
let pre_tokenized = self.do_pre_tokenize(normalized)?;
Ok(pre_tokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, _, _)| s.to_owned())
.collect())
},
)?;
if let Some(pbar) = progress {
pbar.finish();
}
let special_tokens = trainer.train(&mut self.model)?;
self.add_special_tokens(&special_tokens);
Ok(())
},
)??;
Ok(self)
}
/// Train our Model, using the given Trainer and iterator
pub fn train<T, I, S>(&mut self, trainer: &mut T, sequences: I) -> Result<&mut Self>
where
T: Trainer<Model = M> + Sync,
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
{
let (lower, upper) = sequences.size_hint();
let len = upper.unwrap_or(lower) as u64;
let progress = if trainer.should_show_progress() {
let progress = ProgressBar::new(len);
progress.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {msg:<30!} {wide_bar} {pos:<9!}/{len:>9!}")
.expect("Invalid progress template"),
);
progress.set_message("Pre-processing sequences");
Some(progress)
} else {
None
};
trainer.feed(
sequences.inspect(|_s| {
if let Some(progress) = &progress {
progress.inc(1)
}
}),
|seq| {
let normalized = self
.added_vocabulary
.extract_and_normalize(self.normalizer.as_ref(), seq.as_ref());
let pre_tokenized = self.do_pre_tokenize(normalized)?;
Ok(pre_tokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, _, _)| s.to_owned())
.collect())
},
)?;
if let Some(pbar) = progress {
pbar.finish();
}
let special_tokens = trainer.train(&mut self.model)?;
self.add_special_tokens(&special_tokens);
Ok(self)
}
}
impl<M, N, PT, PP, D> std::str::FromStr for TokenizerImpl<M, N, PT, PP, D>
where
M: for<'de> Deserialize<'de> + Model,
N: for<'de> Deserialize<'de> + Normalizer,
PT: for<'de> Deserialize<'de> + PreTokenizer,
PP: for<'de> Deserialize<'de> + PostProcessor,
D: for<'de> Deserialize<'de> + Decoder,
{
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
Ok(serde_json::from_str(s)?)
}
}
impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D>
where
M: DeserializeOwned + Model,
N: DeserializeOwned + Normalizer,
PT: DeserializeOwned + PreTokenizer,
PP: DeserializeOwned + PostProcessor,
D: DeserializeOwned + Decoder,
{
/// Instantiate a new Tokenizer from the given file
pub fn from_file<P: AsRef<Path>>(file: P) -> Result<Self> {
let content = read_to_string(file)?;
let tokenizer = serde_json::from_str(&content)?;
Ok(tokenizer)
}
}
impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D>
where
M: DeserializeOwned + Model,
N: DeserializeOwned + Normalizer,
PT: DeserializeOwned + PreTokenizer,
PP: DeserializeOwned + PostProcessor,
D: DeserializeOwned + Decoder,
{
/// Instantiate a new Tokenizer from bytes
pub fn from_bytes<P: AsRef<[u8]>>(bytes: P) -> Result<Self> {
let tokenizer = serde_json::from_slice(bytes.as_ref())?;
Ok(tokenizer)
}
}
impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D>
where
M: DeserializeOwned + Model,
N: DeserializeOwned + Normalizer,
PT: DeserializeOwned + PreTokenizer,
PP: DeserializeOwned + PostProcessor,
D: DeserializeOwned + Decoder,
{
#[deprecated(
since = "0.14.0",
note = "Users should download the file separately using https://github.com/huggingface/hf-hub instead, which splits concerns of accessing the web, and should use the new cache layout"
)]
#[cfg(feature = "http")]
/// Instantiate a new Tokenizer from a file hosted on the Hugging Face Hub.
/// It expects the `identifier` of a model that includes a `tokenizer.json` file.
pub fn from_pretrained<S: AsRef<str>>(
identifier: S,
params: Option<crate::utils::from_pretrained::FromPretrainedParameters>,
) -> Result<Self> {
let tokenizer_file = crate::utils::from_pretrained::from_pretrained(identifier, params)?;
TokenizerImpl::from_file(tokenizer_file)
}
}
impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D>
where
M: Serialize,
N: Serialize,
PT: Serialize,
PP: Serialize,
D: Serialize,
{
/// Serialize the current tokenizer as a String
pub fn to_string(&self, pretty: bool) -> Result<String> {
Ok(if pretty {
serde_json::to_string_pretty(self)?
} else {
serde_json::to_string(self)?
})
}
/// Save the current tokenizer at the given path
pub fn save<P: AsRef<Path>>(&self, path: P, pretty: bool) -> Result<()> {
let serialized = self.to_string(pretty)?;
let mut file = File::create(path)?;
file.write_all(serialized.as_bytes())?;
Ok(())
}
}
| tokenizers/tokenizers/src/tokenizer/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/tokenizer/mod.rs",
"repo_id": "tokenizers",
"token_count": 22796
} | 350 |
use tokenizers::decoders::wordpiece::WordPiece as WordPieceDecoder;
use tokenizers::models::bpe::BPE;
use tokenizers::models::wordpiece::WordPiece;
use tokenizers::normalizers::bert::BertNormalizer;
use tokenizers::pre_tokenizers::bert::BertPreTokenizer;
use tokenizers::pre_tokenizers::byte_level::ByteLevel;
use tokenizers::processors::bert::BertProcessing;
use tokenizers::tokenizer::{Model, Tokenizer};
#[allow(dead_code)]
pub fn get_empty() -> Tokenizer {
Tokenizer::new(BPE::default())
}
#[allow(dead_code)]
pub fn get_byte_level_bpe() -> BPE {
BPE::from_file("data/gpt2-vocab.json", "data/gpt2-merges.txt")
.build()
.expect("Files not found, run `make test` to download these files")
}
#[allow(dead_code)]
pub fn get_byte_level(add_prefix_space: bool, trim_offsets: bool) -> Tokenizer {
let mut tokenizer = Tokenizer::new(get_byte_level_bpe());
tokenizer
.with_pre_tokenizer(Some(
ByteLevel::default().add_prefix_space(add_prefix_space),
))
.with_decoder(Some(ByteLevel::default()))
.with_post_processor(Some(ByteLevel::default().trim_offsets(trim_offsets)));
tokenizer
}
#[allow(dead_code)]
pub fn get_bert_wordpiece() -> WordPiece {
WordPiece::from_file("data/bert-base-uncased-vocab.txt")
.build()
.expect("Files not found, run `make test` to download these files")
}
#[allow(dead_code)]
pub fn get_bert() -> Tokenizer {
let mut tokenizer = Tokenizer::new(get_bert_wordpiece());
let sep = tokenizer.get_model().token_to_id("[SEP]").unwrap();
let cls = tokenizer.get_model().token_to_id("[CLS]").unwrap();
tokenizer
.with_normalizer(Some(BertNormalizer::default()))
.with_pre_tokenizer(Some(BertPreTokenizer))
.with_decoder(Some(WordPieceDecoder::default()))
.with_post_processor(Some(BertProcessing::new(
(String::from("[SEP]"), sep),
(String::from("[CLS]"), cls),
)));
tokenizer
}
| tokenizers/tokenizers/tests/common/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/tests/common/mod.rs",
"repo_id": "tokenizers",
"token_count": 811
} | 351 |
# Building a Vanilla JavaScript Application
In this tutorial, you’ll build a simple web application that detects objects in images using Transformers.js! To follow along, all you need is a code editor, a browser, and a simple server (e.g., VS Code Live Server).
Here's how it works: the user clicks “Upload image” and selects an image using an input dialog. After analysing the image with an object detection model, the predicted bounding boxes are overlaid on top of the image, like this:

Useful links:
- [Demo site](https://huggingface.co/spaces/Scrimba/vanilla-js-object-detector)
- [Interactive code walk-through (scrim)](https://scrimba.com/scrim/cKm9bDAg)
- [Source code](https://github.com/huggingface/transformers.js/tree/main/examples/vanilla-js)
## Step 1: HTML and CSS setup
Before we start building with Transformers.js, we first need to lay the groundwork with some markup and styling. Create an `index.html` file with a basic HTML skeleton, and add the following `<main>` tag to the `<body>`:
```html
<main class="container">
<label class="custom-file-upload">
<input id="file-upload" type="file" accept="image/*" />
<img class="upload-icon" src="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/upload-icon.png" />
Upload image
</label>
<div id="image-container"></div>
<p id="status"></p>
</main>
```
<details>
<summary>Click here to see a breakdown of this markup.</summary>
We’re adding an `<input>` element with `type="file"` that accepts images. This allows the user to select an image from their local file system using a popup dialog. The default styling for this element looks quite bad, so let's add some styling. The easiest way to achieve this is to wrap the `<input>` element in a `<label>`, hide the input, and then style the label as a button.
We’re also adding an empty `<div>` container for displaying the image, plus an empty `<p>` tag that we'll use to give status updates to the user while we download and run the model, since both of these operations take some time.
</details>
Next, add the following CSS rules in a `style.css` file and link it to the HTML:
```css
html,
body {
font-family: Arial, Helvetica, sans-serif;
}
.container {
margin: 40px auto;
width: max(50vw, 400px);
display: flex;
flex-direction: column;
align-items: center;
}
.custom-file-upload {
display: flex;
align-items: center;
gap: 10px;
border: 2px solid black;
padding: 8px 16px;
cursor: pointer;
border-radius: 6px;
}
#file-upload {
display: none;
}
.upload-icon {
width: 30px;
}
#image-container {
width: 100%;
margin-top: 20px;
position: relative;
}
#image-container>img {
width: 100%;
}
```
Here's how the UI looks at this point:

## Step 2: JavaScript setup
With the *boring* part out of the way, let's start writing some JavaScript code! Create a file called `index.js` and link to it in `index.html` by adding the following to the end of the `<body>`:
```html
<script src="./index.js" type="module"></script>
```
<Tip>
The `type="module"` attribute is important, as it turns our file into a [JavaScript module](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Modules), meaning that we’ll be able to use imports and exports.
</Tip>
Moving into `index.js`, let's import Transformers.js by adding the following line to the top of the file:
```js
import { pipeline, env } from "https://cdn.jsdelivr.net/npm/@huggingface/transformers";
```
Since we will be downloading the model from the Hugging Face Hub, we can skip the local model check by setting:
```js
env.allowLocalModels = false;
```
Next, let's create references to the various DOM elements we will access later:
```js
const fileUpload = document.getElementById("file-upload");
const imageContainer = document.getElementById("image-container");
const status = document.getElementById("status");
```
## Step 3: Create an object detection pipeline
We’re finally ready to create our object detection pipeline! As a reminder, a [pipeline](../pipelines). is a high-level interface provided by the library to perform a specific task. In our case, we will instantiate an object detection pipeline with the `pipeline()` helper function.
Since this can take some time (especially the first time when we have to download the ~40MB model), we first update the `status` paragraph so that the user knows that we’re about to load the model.
```js
status.textContent = "Loading model...";
```
<Tip>
To keep this tutorial simple, we'll be loading and running the model in the main (UI) thread. This is not recommended for production applications, since the UI will freeze when we're performing these actions. This is because JavaScript is a single-threaded language. To overcome this, you can use a [web worker](https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers) to download and run the model in the background. However, we’re not going to do cover that in this tutorial...
</Tip>
We can now call the `pipeline()` function that we imported at the top of our file, to create our object detection pipeline:
```js
const detector = await pipeline("object-detection", "Xenova/detr-resnet-50");
```
We’re passing two arguments into the `pipeline()` function: (1) task and (2) model.
1. The first tells Transformers.js what kind of task we want to perform. In our case, that is `object-detection`, but there are many other tasks that the library supports, including `text-generation`, `sentiment-analysis`, `summarization`, or `automatic-speech-recognition`. See [here](https://huggingface.co/docs/transformers.js/pipelines#tasks) for the full list.
2. The second argument specifies which model we would like to use to solve the given task. We will use [`Xenova/detr-resnet-50`](https://huggingface.co/Xenova/detr-resnet-50), as it is a relatively small (~40MB) but powerful model for detecting objects in an image.
Once the function returns, we’ll tell the user that the app is ready to be used.
```js
status.textContent = "Ready";
```
## Step 4: Create the image uploader
The next step is to support uploading/selection of images. To achieve this, we will listen for "change" events from the `fileUpload` element. In the callback function, we use a `FileReader()` to read the contents of the image if one is selected (and nothing otherwise).
```js
fileUpload.addEventListener("change", function (e) {
const file = e.target.files[0];
if (!file) {
return;
}
const reader = new FileReader();
// Set up a callback when the file is loaded
reader.onload = function (e2) {
imageContainer.innerHTML = "";
const image = document.createElement("img");
image.src = e2.target.result;
imageContainer.appendChild(image);
// detect(image); // Uncomment this line to run the model
};
reader.readAsDataURL(file);
});
```
Once the image has been loaded into the browser, the `reader.onload` callback function will be invoked. In it, we append the new `<img>` element to the `imageContainer` to be displayed to the user.
Don’t worry about the `detect(image)` function call (which is commented out) - we will explain it later! For now, try to run the app and upload an image to the browser. You should see your image displayed under the button like this:

## Step 5: Run the model
We’re finally ready to start interacting with Transformers.js! Let’s uncomment the `detect(image)` function call from the snippet above. Then we’ll define the function itself:
```js
async function detect(img) {
status.textContent = "Analysing...";
const output = await detector(img.src, {
threshold: 0.5,
percentage: true,
});
status.textContent = "";
console.log("output", output);
// ...
}
```
<Tip>
NOTE: The `detect` function needs to be asynchronous, since we’ll `await` the result of the the model.
</Tip>
Once we’ve updated the `status` to "Analysing", we’re ready to perform *inference*, which simply means to run the model with some data. This is done via the `detector()` function that was returned from `pipeline()`. The first argument we’re passing is the image data (`img.src`).
The second argument is an options object:
- We set the `threshold` property to `0.5`. This means that we want the model to be at least 50% confident before claiming it has detected an object in the image. The lower the threshold, the more objects it'll detect (but may misidentify objects); the higher the threshold, the fewer objects it'll detect (but may miss objects in the scene).
- We also specify `percentage: true`, which means that we want the bounding box for the objects to be returned as percentages (instead of pixels).
If you now try to run the app and upload an image, you should see the following output logged to the console:

In the example above, we uploaded an image of two elephants, so the `output` variable holds an array with two objects, each containing a `label` (the string “elephant”), a `score` (indicating the model's confidence in its prediction) and a `box` object (representing the bounding box of the detected entity).
## Step 6: Render the boxes
The final step is to display the `box` coordinates as rectangles around each of the elephants.
At the end of our `detect()` function, we’ll run the `renderBox` function on each object in the `output` array, using `.forEach()`.
```js
output.forEach(renderBox);
```
Here’s the code for the `renderBox()` function with comments to help you understand what’s going on:
```js
// Render a bounding box and label on the image
function renderBox({ box, label }) {
const { xmax, xmin, ymax, ymin } = box;
// Generate a random color for the box
const color = "#" + Math.floor(Math.random() * 0xffffff).toString(16).padStart(6, 0);
// Draw the box
const boxElement = document.createElement("div");
boxElement.className = "bounding-box";
Object.assign(boxElement.style, {
borderColor: color,
left: 100 * xmin + "%",
top: 100 * ymin + "%",
width: 100 * (xmax - xmin) + "%",
height: 100 * (ymax - ymin) + "%",
});
// Draw the label
const labelElement = document.createElement("span");
labelElement.textContent = label;
labelElement.className = "bounding-box-label";
labelElement.style.backgroundColor = color;
boxElement.appendChild(labelElement);
imageContainer.appendChild(boxElement);
}
```
The bounding box and label span also need some styling, so add the following to the `style.css` file:
```css
.bounding-box {
position: absolute;
box-sizing: border-box;
border-width: 2px;
border-style: solid;
}
.bounding-box-label {
color: white;
position: absolute;
font-size: 12px;
margin-top: -16px;
margin-left: -2px;
padding: 1px;
}
```
**And that’s it!**
You've now built your own fully-functional AI application that detects objects in images, which runns completely in your browser: no external server, APIs, or build tools. Pretty cool! 🥳

The app is live at the following URL: [https://huggingface.co/spaces/Scrimba/vanilla-js-object-detector](https://huggingface.co/spaces/Scrimba/vanilla-js-object-detector)
| transformers.js/docs/source/tutorials/vanilla-js.md/0 | {
"file_path": "transformers.js/docs/source/tutorials/vanilla-js.md",
"repo_id": "transformers.js",
"token_count": 3621
} | 352 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Transformers.js - Code completion playground</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.jsx"></script>
</body>
</html>
| transformers.js/examples/code-completion/index.html/0 | {
"file_path": "transformers.js/examples/code-completion/index.html",
"repo_id": "transformers.js",
"token_count": 133
} | 353 |
{
"name": "cross-encoder",
"version": "0.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "cross-encoder",
"version": "0.0.0",
"dependencies": {
"@xenova/transformers": "^2.15.0",
"react": "^18.2.0",
"react-dom": "^18.2.0"
},
"devDependencies": {
"@types/react": "^18.2.43",
"@types/react-dom": "^18.2.17",
"@vitejs/plugin-react": "^4.2.1",
"autoprefixer": "^10.4.16",
"eslint": "^8.55.0",
"eslint-plugin-react": "^7.33.2",
"eslint-plugin-react-hooks": "^4.6.0",
"eslint-plugin-react-refresh": "^0.4.5",
"postcss": "^8.4.33",
"tailwindcss": "^3.4.1",
"vite": "^5.1.7"
}
},
"node_modules/@aashutoshrathi/word-wrap": {
"version": "1.2.6",
"resolved": "https://registry.npmjs.org/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz",
"integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/@alloc/quick-lru": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz",
"integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==",
"dev": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/@ampproject/remapping": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz",
"integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==",
"dev": true,
"dependencies": {
"@jridgewell/gen-mapping": "^0.3.0",
"@jridgewell/trace-mapping": "^0.3.9"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@babel/code-frame": {
"version": "7.23.5",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz",
"integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==",
"dev": true,
"dependencies": {
"@babel/highlight": "^7.23.4",
"chalk": "^2.4.2"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/compat-data": {
"version": "7.23.5",
"resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz",
"integrity": "sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw==",
"dev": true,
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/core": {
"version": "7.23.9",
"resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.9.tgz",
"integrity": "sha512-5q0175NOjddqpvvzU+kDiSOAk4PfdO6FvwCWoQ6RO7rTzEe8vlo+4HVfcnAREhD4npMs0e9uZypjTwzZPCf/cw==",
"dev": true,
"dependencies": {
"@ampproject/remapping": "^2.2.0",
"@babel/code-frame": "^7.23.5",
"@babel/generator": "^7.23.6",
"@babel/helper-compilation-targets": "^7.23.6",
"@babel/helper-module-transforms": "^7.23.3",
"@babel/helpers": "^7.23.9",
"@babel/parser": "^7.23.9",
"@babel/template": "^7.23.9",
"@babel/traverse": "^7.23.9",
"@babel/types": "^7.23.9",
"convert-source-map": "^2.0.0",
"debug": "^4.1.0",
"gensync": "^1.0.0-beta.2",
"json5": "^2.2.3",
"semver": "^6.3.1"
},
"engines": {
"node": ">=6.9.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/babel"
}
},
"node_modules/@babel/generator": {
"version": "7.23.6",
"resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.6.tgz",
"integrity": "sha512-qrSfCYxYQB5owCmGLbl8XRpX1ytXlpueOb0N0UmQwA073KZxejgQTzAmJezxvpwQD9uGtK2shHdi55QT+MbjIw==",
"dev": true,
"dependencies": {
"@babel/types": "^7.23.6",
"@jridgewell/gen-mapping": "^0.3.2",
"@jridgewell/trace-mapping": "^0.3.17",
"jsesc": "^2.5.1"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-compilation-targets": {
"version": "7.23.6",
"resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz",
"integrity": "sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ==",
"dev": true,
"dependencies": {
"@babel/compat-data": "^7.23.5",
"@babel/helper-validator-option": "^7.23.5",
"browserslist": "^4.22.2",
"lru-cache": "^5.1.1",
"semver": "^6.3.1"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-environment-visitor": {
"version": "7.22.20",
"resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz",
"integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==",
"dev": true,
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-function-name": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz",
"integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==",
"dev": true,
"dependencies": {
"@babel/template": "^7.22.15",
"@babel/types": "^7.23.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-hoist-variables": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz",
"integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==",
"dev": true,
"dependencies": {
"@babel/types": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-module-imports": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz",
"integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==",
"dev": true,
"dependencies": {
"@babel/types": "^7.22.15"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-module-transforms": {
"version": "7.23.3",
"resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.3.tgz",
"integrity": "sha512-7bBs4ED9OmswdfDzpz4MpWgSrV7FXlc3zIagvLFjS5H+Mk7Snr21vQ6QwrsoCGMfNC4e4LQPdoULEt4ykz0SRQ==",
"dev": true,
"dependencies": {
"@babel/helper-environment-visitor": "^7.22.20",
"@babel/helper-module-imports": "^7.22.15",
"@babel/helper-simple-access": "^7.22.5",
"@babel/helper-split-export-declaration": "^7.22.6",
"@babel/helper-validator-identifier": "^7.22.20"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/helper-plugin-utils": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz",
"integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==",
"dev": true,
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-simple-access": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz",
"integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==",
"dev": true,
"dependencies": {
"@babel/types": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-split-export-declaration": {
"version": "7.22.6",
"resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz",
"integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==",
"dev": true,
"dependencies": {
"@babel/types": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-string-parser": {
"version": "7.23.4",
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz",
"integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==",
"dev": true,
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-validator-identifier": {
"version": "7.22.20",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz",
"integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==",
"dev": true,
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-validator-option": {
"version": "7.23.5",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz",
"integrity": "sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw==",
"dev": true,
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helpers": {
"version": "7.23.9",
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.9.tgz",
"integrity": "sha512-87ICKgU5t5SzOT7sBMfCOZQ2rHjRU+Pcb9BoILMYz600W6DkVRLFBPwQ18gwUVvggqXivaUakpnxWQGbpywbBQ==",
"dev": true,
"dependencies": {
"@babel/template": "^7.23.9",
"@babel/traverse": "^7.23.9",
"@babel/types": "^7.23.9"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/highlight": {
"version": "7.23.4",
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz",
"integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==",
"dev": true,
"dependencies": {
"@babel/helper-validator-identifier": "^7.22.20",
"chalk": "^2.4.2",
"js-tokens": "^4.0.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/parser": {
"version": "7.23.9",
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.9.tgz",
"integrity": "sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA==",
"dev": true,
"bin": {
"parser": "bin/babel-parser.js"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@babel/plugin-transform-react-jsx-self": {
"version": "7.23.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.23.3.tgz",
"integrity": "sha512-qXRvbeKDSfwnlJnanVRp0SfuWE5DQhwQr5xtLBzp56Wabyo+4CMosF6Kfp+eOD/4FYpql64XVJ2W0pVLlJZxOQ==",
"dev": true,
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-react-jsx-source": {
"version": "7.23.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.23.3.tgz",
"integrity": "sha512-91RS0MDnAWDNvGC6Wio5XYkyWI39FMFO+JK9+4AlgaTH+yWwVTsw7/sn6LK0lH7c5F+TFkpv/3LfCJ1Ydwof/g==",
"dev": true,
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/template": {
"version": "7.23.9",
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.23.9.tgz",
"integrity": "sha512-+xrD2BWLpvHKNmX2QbpdpsBaWnRxahMwJjO+KZk2JOElj5nSmKezyS1B4u+QbHMTX69t4ukm6hh9lsYQ7GHCKA==",
"dev": true,
"dependencies": {
"@babel/code-frame": "^7.23.5",
"@babel/parser": "^7.23.9",
"@babel/types": "^7.23.9"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/traverse": {
"version": "7.23.9",
"resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.9.tgz",
"integrity": "sha512-I/4UJ9vs90OkBtY6iiiTORVMyIhJ4kAVmsKo9KFc8UOxMeUfi2hvtIBsET5u9GizXE6/GFSuKCTNfgCswuEjRg==",
"dev": true,
"dependencies": {
"@babel/code-frame": "^7.23.5",
"@babel/generator": "^7.23.6",
"@babel/helper-environment-visitor": "^7.22.20",
"@babel/helper-function-name": "^7.23.0",
"@babel/helper-hoist-variables": "^7.22.5",
"@babel/helper-split-export-declaration": "^7.22.6",
"@babel/parser": "^7.23.9",
"@babel/types": "^7.23.9",
"debug": "^4.3.1",
"globals": "^11.1.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/types": {
"version": "7.23.9",
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.9.tgz",
"integrity": "sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q==",
"dev": true,
"dependencies": {
"@babel/helper-string-parser": "^7.23.4",
"@babel/helper-validator-identifier": "^7.22.20",
"to-fast-properties": "^2.0.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@esbuild/aix-ppc64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz",
"integrity": "sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA==",
"cpu": [
"ppc64"
],
"dev": true,
"optional": true,
"os": [
"aix"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/android-arm": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.19.12.tgz",
"integrity": "sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w==",
"cpu": [
"arm"
],
"dev": true,
"optional": true,
"os": [
"android"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/android-arm64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz",
"integrity": "sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"android"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/android-x64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.19.12.tgz",
"integrity": "sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"android"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/darwin-arm64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz",
"integrity": "sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/darwin-x64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz",
"integrity": "sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/freebsd-arm64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz",
"integrity": "sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"freebsd"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/freebsd-x64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz",
"integrity": "sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"freebsd"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-arm": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz",
"integrity": "sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w==",
"cpu": [
"arm"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-arm64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz",
"integrity": "sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-ia32": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz",
"integrity": "sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA==",
"cpu": [
"ia32"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-loong64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz",
"integrity": "sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA==",
"cpu": [
"loong64"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-mips64el": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz",
"integrity": "sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w==",
"cpu": [
"mips64el"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-ppc64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz",
"integrity": "sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg==",
"cpu": [
"ppc64"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-riscv64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz",
"integrity": "sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg==",
"cpu": [
"riscv64"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-s390x": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz",
"integrity": "sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg==",
"cpu": [
"s390x"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-x64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.19.12.tgz",
"integrity": "sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/netbsd-x64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz",
"integrity": "sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"netbsd"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/openbsd-x64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz",
"integrity": "sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"openbsd"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/sunos-x64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz",
"integrity": "sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"sunos"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/win32-arm64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz",
"integrity": "sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/win32-ia32": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz",
"integrity": "sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ==",
"cpu": [
"ia32"
],
"dev": true,
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/win32-x64": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz",
"integrity": "sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@eslint-community/eslint-utils": {
"version": "4.4.0",
"resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz",
"integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==",
"dev": true,
"dependencies": {
"eslint-visitor-keys": "^3.3.0"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"peerDependencies": {
"eslint": "^6.0.0 || ^7.0.0 || >=8.0.0"
}
},
"node_modules/@eslint-community/regexpp": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.10.0.tgz",
"integrity": "sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==",
"dev": true,
"engines": {
"node": "^12.0.0 || ^14.0.0 || >=16.0.0"
}
},
"node_modules/@eslint/eslintrc": {
"version": "2.1.4",
"resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz",
"integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==",
"dev": true,
"dependencies": {
"ajv": "^6.12.4",
"debug": "^4.3.2",
"espree": "^9.6.0",
"globals": "^13.19.0",
"ignore": "^5.2.0",
"import-fresh": "^3.2.1",
"js-yaml": "^4.1.0",
"minimatch": "^3.1.2",
"strip-json-comments": "^3.1.1"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/eslint"
}
},
"node_modules/@eslint/eslintrc/node_modules/globals": {
"version": "13.24.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz",
"integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==",
"dev": true,
"dependencies": {
"type-fest": "^0.20.2"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/@eslint/js": {
"version": "8.56.0",
"resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.56.0.tgz",
"integrity": "sha512-gMsVel9D7f2HLkBma9VbtzZRehRogVRfbr++f06nL2vnCGCNlzOD+/MUov/F4p8myyAHspEhVobgjpX64q5m6A==",
"dev": true,
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
}
},
"node_modules/@huggingface/jinja": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/@huggingface/jinja/-/jinja-0.1.2.tgz",
"integrity": "sha512-x5mpbfJt1nKmVep5WNP5VjNsjWApWNj8pPYI+uYMkBWH9bWUJmQmHt2lbf0VCoQd54Oq3XuFEh/UyoVh7rPxmg==",
"engines": {
"node": ">=18"
}
},
"node_modules/@humanwhocodes/config-array": {
"version": "0.11.14",
"resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz",
"integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==",
"dev": true,
"dependencies": {
"@humanwhocodes/object-schema": "^2.0.2",
"debug": "^4.3.1",
"minimatch": "^3.0.5"
},
"engines": {
"node": ">=10.10.0"
}
},
"node_modules/@humanwhocodes/module-importer": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz",
"integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==",
"dev": true,
"engines": {
"node": ">=12.22"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/nzakas"
}
},
"node_modules/@humanwhocodes/object-schema": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.2.tgz",
"integrity": "sha512-6EwiSjwWYP7pTckG6I5eyFANjPhmPjUX9JRLUSfNPC7FX7zK9gyZAfUEaECL6ALTpGX5AjnBq3C9XmVWPitNpw==",
"dev": true
},
"node_modules/@isaacs/cliui": {
"version": "8.0.2",
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
"integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==",
"dev": true,
"dependencies": {
"string-width": "^5.1.2",
"string-width-cjs": "npm:string-width@^4.2.0",
"strip-ansi": "^7.0.1",
"strip-ansi-cjs": "npm:strip-ansi@^6.0.1",
"wrap-ansi": "^8.1.0",
"wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/@isaacs/cliui/node_modules/ansi-regex": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
"integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
"node_modules/@isaacs/cliui/node_modules/strip-ansi": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
"integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
"dev": true,
"dependencies": {
"ansi-regex": "^6.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
"node_modules/@jridgewell/gen-mapping": {
"version": "0.3.3",
"resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz",
"integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==",
"dev": true,
"dependencies": {
"@jridgewell/set-array": "^1.0.1",
"@jridgewell/sourcemap-codec": "^1.4.10",
"@jridgewell/trace-mapping": "^0.3.9"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/resolve-uri": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz",
"integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==",
"dev": true,
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/set-array": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz",
"integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==",
"dev": true,
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/sourcemap-codec": {
"version": "1.4.15",
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz",
"integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==",
"dev": true
},
"node_modules/@jridgewell/trace-mapping": {
"version": "0.3.22",
"resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.22.tgz",
"integrity": "sha512-Wf963MzWtA2sjrNt+g18IAln9lKnlRp+K2eH4jjIoF1wYeq3aMREpG09xhlhdzS0EjwU7qmUJYangWa+151vZw==",
"dev": true,
"dependencies": {
"@jridgewell/resolve-uri": "^3.1.0",
"@jridgewell/sourcemap-codec": "^1.4.14"
}
},
"node_modules/@nodelib/fs.scandir": {
"version": "2.1.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
"integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
"dev": true,
"dependencies": {
"@nodelib/fs.stat": "2.0.5",
"run-parallel": "^1.1.9"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/@nodelib/fs.stat": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
"integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
"dev": true,
"engines": {
"node": ">= 8"
}
},
"node_modules/@nodelib/fs.walk": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
"integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
"dev": true,
"dependencies": {
"@nodelib/fs.scandir": "2.1.5",
"fastq": "^1.6.0"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/@pkgjs/parseargs": {
"version": "0.11.0",
"resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
"integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==",
"dev": true,
"optional": true,
"engines": {
"node": ">=14"
}
},
"node_modules/@protobufjs/aspromise": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
"integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ=="
},
"node_modules/@protobufjs/base64": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
"integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg=="
},
"node_modules/@protobufjs/codegen": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
"integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg=="
},
"node_modules/@protobufjs/eventemitter": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
"integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q=="
},
"node_modules/@protobufjs/fetch": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
"integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==",
"dependencies": {
"@protobufjs/aspromise": "^1.1.1",
"@protobufjs/inquire": "^1.1.0"
}
},
"node_modules/@protobufjs/float": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
"integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ=="
},
"node_modules/@protobufjs/inquire": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
"integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q=="
},
"node_modules/@protobufjs/path": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
"integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA=="
},
"node_modules/@protobufjs/pool": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
"integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw=="
},
"node_modules/@protobufjs/utf8": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
"integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw=="
},
"node_modules/@rollup/rollup-android-arm-eabi": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.10.0.tgz",
"integrity": "sha512-/MeDQmcD96nVoRumKUljsYOLqfv1YFJps+0pTrb2Z9Nl/w5qNUysMaWQsrd1mvAlNT4yza1iVyIu4Q4AgF6V3A==",
"cpu": [
"arm"
],
"dev": true,
"optional": true,
"os": [
"android"
]
},
"node_modules/@rollup/rollup-android-arm64": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.10.0.tgz",
"integrity": "sha512-lvu0jK97mZDJdpZKDnZI93I0Om8lSDaiPx3OiCk0RXn3E8CMPJNS/wxjAvSJJzhhZpfjXsjLWL8LnS6qET4VNQ==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"android"
]
},
"node_modules/@rollup/rollup-darwin-arm64": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.10.0.tgz",
"integrity": "sha512-uFpayx8I8tyOvDkD7X6n0PriDRWxcqEjqgtlxnUA/G9oS93ur9aZ8c8BEpzFmsed1TH5WZNG5IONB8IiW90TQg==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"darwin"
]
},
"node_modules/@rollup/rollup-darwin-x64": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.10.0.tgz",
"integrity": "sha512-nIdCX03qFKoR/MwQegQBK+qZoSpO3LESurVAC6s6jazLA1Mpmgzo3Nj3H1vydXp/JM29bkCiuF7tDuToj4+U9Q==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"darwin"
]
},
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.10.0.tgz",
"integrity": "sha512-Fz7a+y5sYhYZMQFRkOyCs4PLhICAnxRX/GnWYReaAoruUzuRtcf+Qnw+T0CoAWbHCuz2gBUwmWnUgQ67fb3FYw==",
"cpu": [
"arm"
],
"dev": true,
"optional": true,
"os": [
"linux"
]
},
"node_modules/@rollup/rollup-linux-arm64-gnu": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.10.0.tgz",
"integrity": "sha512-yPtF9jIix88orwfTi0lJiqINnlWo6p93MtZEoaehZnmCzEmLL0eqjA3eGVeyQhMtxdV+Mlsgfwhh0+M/k1/V7Q==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"linux"
]
},
"node_modules/@rollup/rollup-linux-arm64-musl": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.10.0.tgz",
"integrity": "sha512-9GW9yA30ib+vfFiwjX+N7PnjTnCMiUffhWj4vkG4ukYv1kJ4T9gHNg8zw+ChsOccM27G9yXrEtMScf1LaCuoWQ==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"linux"
]
},
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.10.0.tgz",
"integrity": "sha512-X1ES+V4bMq2ws5fF4zHornxebNxMXye0ZZjUrzOrf7UMx1d6wMQtfcchZ8SqUnQPPHdOyOLW6fTcUiFgHFadRA==",
"cpu": [
"riscv64"
],
"dev": true,
"optional": true,
"os": [
"linux"
]
},
"node_modules/@rollup/rollup-linux-x64-gnu": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.10.0.tgz",
"integrity": "sha512-w/5OpT2EnI/Xvypw4FIhV34jmNqU5PZjZue2l2Y3ty1Ootm3SqhI+AmfhlUYGBTd9JnpneZCDnt3uNOiOBkMyw==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"linux"
]
},
"node_modules/@rollup/rollup-linux-x64-musl": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.10.0.tgz",
"integrity": "sha512-q/meftEe3QlwQiGYxD9rWwB21DoKQ9Q8wA40of/of6yGHhZuGfZO0c3WYkN9dNlopHlNT3mf5BPsUSxoPuVQaw==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"linux"
]
},
"node_modules/@rollup/rollup-win32-arm64-msvc": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.10.0.tgz",
"integrity": "sha512-NrR6667wlUfP0BHaEIKgYM/2va+Oj+RjZSASbBMnszM9k+1AmliRjHc3lJIiOehtSSjqYiO7R6KLNrWOX+YNSQ==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"win32"
]
},
"node_modules/@rollup/rollup-win32-ia32-msvc": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.10.0.tgz",
"integrity": "sha512-FV0Tpt84LPYDduIDcXvEC7HKtyXxdvhdAOvOeWMWbQNulxViH2O07QXkT/FffX4FqEI02jEbCJbr+YcuKdyyMg==",
"cpu": [
"ia32"
],
"dev": true,
"optional": true,
"os": [
"win32"
]
},
"node_modules/@rollup/rollup-win32-x64-msvc": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.10.0.tgz",
"integrity": "sha512-OZoJd+o5TaTSQeFFQ6WjFCiltiYVjIdsXxwu/XZ8qRpsvMQr4UsVrE5UyT9RIvsnuF47DqkJKhhVZ2Q9YW9IpQ==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"win32"
]
},
"node_modules/@types/babel__core": {
"version": "7.20.5",
"resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
"integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
"dev": true,
"dependencies": {
"@babel/parser": "^7.20.7",
"@babel/types": "^7.20.7",
"@types/babel__generator": "*",
"@types/babel__template": "*",
"@types/babel__traverse": "*"
}
},
"node_modules/@types/babel__generator": {
"version": "7.6.8",
"resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.8.tgz",
"integrity": "sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==",
"dev": true,
"dependencies": {
"@babel/types": "^7.0.0"
}
},
"node_modules/@types/babel__template": {
"version": "7.4.4",
"resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
"integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
"dev": true,
"dependencies": {
"@babel/parser": "^7.1.0",
"@babel/types": "^7.0.0"
}
},
"node_modules/@types/babel__traverse": {
"version": "7.20.5",
"resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.5.tgz",
"integrity": "sha512-WXCyOcRtH37HAUkpXhUduaxdm82b4GSlyTqajXviN4EfiuPgNYR109xMCKvpl6zPIpua0DGlMEDCq+g8EdoheQ==",
"dev": true,
"dependencies": {
"@babel/types": "^7.20.7"
}
},
"node_modules/@types/estree": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz",
"integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==",
"dev": true
},
"node_modules/@types/long": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.2.tgz",
"integrity": "sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA=="
},
"node_modules/@types/node": {
"version": "20.11.17",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.17.tgz",
"integrity": "sha512-QmgQZGWu1Yw9TDyAP9ZzpFJKynYNeOvwMJmaxABfieQoVoiVOS6MN1WSpqpRcbeA5+RW82kraAVxCCJg+780Qw==",
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/@types/prop-types": {
"version": "15.7.11",
"resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.11.tgz",
"integrity": "sha512-ga8y9v9uyeiLdpKddhxYQkxNDrfvuPrlFb0N1qnZZByvcElJaXthF1UhvCh9TLWJBEHeNtdnbysW7Y6Uq8CVng==",
"dev": true
},
"node_modules/@types/react": {
"version": "18.2.55",
"resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.55.tgz",
"integrity": "sha512-Y2Tz5P4yz23brwm2d7jNon39qoAtMMmalOQv6+fEFt1mT+FcM3D841wDpoUvFXhaYenuROCy3FZYqdTjM7qVyA==",
"dev": true,
"dependencies": {
"@types/prop-types": "*",
"@types/scheduler": "*",
"csstype": "^3.0.2"
}
},
"node_modules/@types/react-dom": {
"version": "18.2.19",
"resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.19.tgz",
"integrity": "sha512-aZvQL6uUbIJpjZk4U8JZGbau9KDeAwMfmhyWorxgBkqDIEf6ROjRozcmPIicqsUwPUjbkDfHKgGee1Lq65APcA==",
"dev": true,
"dependencies": {
"@types/react": "*"
}
},
"node_modules/@types/scheduler": {
"version": "0.16.8",
"resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.8.tgz",
"integrity": "sha512-WZLiwShhwLRmeV6zH+GkbOFT6Z6VklCItrDioxUnv+u4Ll+8vKeFySoFyK/0ctcRpOmwAicELfmys1sDc/Rw+A==",
"dev": true
},
"node_modules/@ungap/structured-clone": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz",
"integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==",
"dev": true
},
"node_modules/@vitejs/plugin-react": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.2.1.tgz",
"integrity": "sha512-oojO9IDc4nCUUi8qIR11KoQm0XFFLIwsRBwHRR4d/88IWghn1y6ckz/bJ8GHDCsYEJee8mDzqtJxh15/cisJNQ==",
"dev": true,
"dependencies": {
"@babel/core": "^7.23.5",
"@babel/plugin-transform-react-jsx-self": "^7.23.3",
"@babel/plugin-transform-react-jsx-source": "^7.23.3",
"@types/babel__core": "^7.20.5",
"react-refresh": "^0.14.0"
},
"engines": {
"node": "^14.18.0 || >=16.0.0"
},
"peerDependencies": {
"vite": "^4.2.0 || ^5.0.0"
}
},
"node_modules/@xenova/transformers": {
"version": "2.15.0",
"resolved": "https://registry.npmjs.org/@xenova/transformers/-/transformers-2.15.0.tgz",
"integrity": "sha512-e8pt+yLGSmwZnQR5Q/fq1NJ6fPr3+WKqxh/jF2PzfXFZ2KZsDdFQeCVlk8AnOADP4Aimlqy+Wp/xuws96/pX9A==",
"dependencies": {
"@huggingface/jinja": "^0.1.0",
"onnxruntime-web": "1.14.0",
"sharp": "^0.32.0"
},
"optionalDependencies": {
"onnxruntime-node": "1.14.0"
}
},
"node_modules/acorn": {
"version": "8.11.3",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz",
"integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==",
"dev": true,
"bin": {
"acorn": "bin/acorn"
},
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/acorn-jsx": {
"version": "5.3.2",
"resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz",
"integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==",
"dev": true,
"peerDependencies": {
"acorn": "^6.0.0 || ^7.0.0 || ^8.0.0"
}
},
"node_modules/ajv": {
"version": "6.12.6",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
"integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
"dev": true,
"dependencies": {
"fast-deep-equal": "^3.1.1",
"fast-json-stable-stringify": "^2.0.0",
"json-schema-traverse": "^0.4.1",
"uri-js": "^4.2.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/ansi-styles": {
"version": "3.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
"integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
"dev": true,
"dependencies": {
"color-convert": "^1.9.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/any-promise": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz",
"integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==",
"dev": true
},
"node_modules/anymatch": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
"integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
"dev": true,
"dependencies": {
"normalize-path": "^3.0.0",
"picomatch": "^2.0.4"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/arg": {
"version": "5.0.2",
"resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz",
"integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==",
"dev": true
},
"node_modules/argparse": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
"dev": true
},
"node_modules/array-buffer-byte-length": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz",
"integrity": "sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.5",
"is-array-buffer": "^3.0.4"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/array-includes": {
"version": "3.1.7",
"resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.7.tgz",
"integrity": "sha512-dlcsNBIiWhPkHdOEEKnehA+RNUWDc4UqFtnIXU4uuYDPtA4LDkr7qip2p0VvFAEXNDr0yWZ9PJyIRiGjRLQzwQ==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.2.0",
"es-abstract": "^1.22.1",
"get-intrinsic": "^1.2.1",
"is-string": "^1.0.7"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/array.prototype.flat": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz",
"integrity": "sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.2.0",
"es-abstract": "^1.22.1",
"es-shim-unscopables": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/array.prototype.flatmap": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.2.tgz",
"integrity": "sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.2.0",
"es-abstract": "^1.22.1",
"es-shim-unscopables": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/array.prototype.tosorted": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.3.tgz",
"integrity": "sha512-/DdH4TiTmOKzyQbp/eadcCVexiCb36xJg7HshYOYJnNZFDj33GEv0P7GxsynpShhq4OLYJzbGcBDkLsDt7MnNg==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.5",
"define-properties": "^1.2.1",
"es-abstract": "^1.22.3",
"es-errors": "^1.1.0",
"es-shim-unscopables": "^1.0.2"
}
},
"node_modules/arraybuffer.prototype.slice": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz",
"integrity": "sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==",
"dev": true,
"dependencies": {
"array-buffer-byte-length": "^1.0.1",
"call-bind": "^1.0.5",
"define-properties": "^1.2.1",
"es-abstract": "^1.22.3",
"es-errors": "^1.2.1",
"get-intrinsic": "^1.2.3",
"is-array-buffer": "^3.0.4",
"is-shared-array-buffer": "^1.0.2"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/asynciterator.prototype": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/asynciterator.prototype/-/asynciterator.prototype-1.0.0.tgz",
"integrity": "sha512-wwHYEIS0Q80f5mosx3L/dfG5t5rjEa9Ft51GTaNt862EnpyGHpgz2RkZvLPp1oF5TnAiTohkEKVEu8pQPJI7Vg==",
"dev": true,
"dependencies": {
"has-symbols": "^1.0.3"
}
},
"node_modules/autoprefixer": {
"version": "10.4.17",
"resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.17.tgz",
"integrity": "sha512-/cpVNRLSfhOtcGflT13P2794gVSgmPgTR+erw5ifnMLZb0UnSlkK4tquLmkd3BhA+nLo5tX8Cu0upUsGKvKbmg==",
"dev": true,
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/postcss/"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/autoprefixer"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
"browserslist": "^4.22.2",
"caniuse-lite": "^1.0.30001578",
"fraction.js": "^4.3.7",
"normalize-range": "^0.1.2",
"picocolors": "^1.0.0",
"postcss-value-parser": "^4.2.0"
},
"bin": {
"autoprefixer": "bin/autoprefixer"
},
"engines": {
"node": "^10 || ^12 || >=14"
},
"peerDependencies": {
"postcss": "^8.1.0"
}
},
"node_modules/available-typed-arrays": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.6.tgz",
"integrity": "sha512-j1QzY8iPNPG4o4xmO3ptzpRxTciqD3MgEHtifP/YnJpIo58Xu+ne4BejlbkuaLfXn/nz6HFiw29bLpj2PNMdGg==",
"dev": true,
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/b4a": {
"version": "1.6.6",
"resolved": "https://registry.npmjs.org/b4a/-/b4a-1.6.6.tgz",
"integrity": "sha512-5Tk1HLk6b6ctmjIkAcU/Ujv/1WqiDl0F0JdRCR80VsOcUlHcu7pWeWRlOqQLHfDEsVx9YH/aif5AG4ehoCtTmg=="
},
"node_modules/balanced-match": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
"dev": true
},
"node_modules/bare-events": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/bare-events/-/bare-events-2.2.0.tgz",
"integrity": "sha512-Yyyqff4PIFfSuthCZqLlPISTWHmnQxoPuAvkmgzsJEmG3CesdIv6Xweayl0JkCZJSB2yYIdJyEz97tpxNhgjbg==",
"optional": true
},
"node_modules/bare-fs": {
"version": "2.1.5",
"resolved": "https://registry.npmjs.org/bare-fs/-/bare-fs-2.1.5.tgz",
"integrity": "sha512-5t0nlecX+N2uJqdxe9d18A98cp2u9BETelbjKpiVgQqzzmVNFYWEAjQHqS+2Khgto1vcwhik9cXucaj5ve2WWA==",
"optional": true,
"dependencies": {
"bare-events": "^2.0.0",
"bare-os": "^2.0.0",
"bare-path": "^2.0.0",
"streamx": "^2.13.0"
}
},
"node_modules/bare-os": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/bare-os/-/bare-os-2.1.3.tgz",
"integrity": "sha512-YMKOQzoMouGCrYgJ0rassKvmbTz+TYDOKKmu4JkfNXS/qKn+If1ODCMEXnmknQXfnB1PS8bR+uHu6xBgwAQXew==",
"optional": true
},
"node_modules/bare-path": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/bare-path/-/bare-path-2.1.0.tgz",
"integrity": "sha512-DIIg7ts8bdRKwJRJrUMy/PICEaQZaPGZ26lsSx9MJSwIhSrcdHn7/C8W+XmnG/rKi6BaRcz+JO00CjZteybDtw==",
"optional": true,
"dependencies": {
"bare-os": "^2.1.0"
}
},
"node_modules/base64-js": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/binary-extensions": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
"integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/bl": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz",
"integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==",
"dependencies": {
"buffer": "^5.5.0",
"inherits": "^2.0.4",
"readable-stream": "^3.4.0"
}
},
"node_modules/brace-expansion": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"node_modules/braces": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
"dev": true,
"dependencies": {
"fill-range": "^7.1.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/browserslist": {
"version": "4.22.3",
"resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.22.3.tgz",
"integrity": "sha512-UAp55yfwNv0klWNapjs/ktHoguxuQNGnOzxYmfnXIS+8AsRDZkSDxg7R1AX3GKzn078SBI5dzwzj/Yx0Or0e3A==",
"dev": true,
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/browserslist"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/browserslist"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
"caniuse-lite": "^1.0.30001580",
"electron-to-chromium": "^1.4.648",
"node-releases": "^2.0.14",
"update-browserslist-db": "^1.0.13"
},
"bin": {
"browserslist": "cli.js"
},
"engines": {
"node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
}
},
"node_modules/buffer": {
"version": "5.7.1",
"resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz",
"integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"dependencies": {
"base64-js": "^1.3.1",
"ieee754": "^1.1.13"
}
},
"node_modules/call-bind": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz",
"integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==",
"dev": true,
"dependencies": {
"es-define-property": "^1.0.0",
"es-errors": "^1.3.0",
"function-bind": "^1.1.2",
"get-intrinsic": "^1.2.4",
"set-function-length": "^1.2.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
"integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/camelcase-css": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
"integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==",
"dev": true,
"engines": {
"node": ">= 6"
}
},
"node_modules/caniuse-lite": {
"version": "1.0.30001587",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001587.tgz",
"integrity": "sha512-HMFNotUmLXn71BQxg8cijvqxnIAofforZOwGsxyXJ0qugTdspUF4sPSJ2vhgprHCB996tIDzEq1ubumPDV8ULA==",
"dev": true,
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/browserslist"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/caniuse-lite"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
]
},
"node_modules/chalk": {
"version": "2.4.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
"dev": true,
"dependencies": {
"ansi-styles": "^3.2.1",
"escape-string-regexp": "^1.0.5",
"supports-color": "^5.3.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/chokidar": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz",
"integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==",
"dev": true,
"dependencies": {
"anymatch": "~3.1.2",
"braces": "~3.0.2",
"glob-parent": "~5.1.2",
"is-binary-path": "~2.1.0",
"is-glob": "~4.0.1",
"normalize-path": "~3.0.0",
"readdirp": "~3.6.0"
},
"engines": {
"node": ">= 8.10.0"
},
"funding": {
"url": "https://paulmillr.com/funding/"
},
"optionalDependencies": {
"fsevents": "~2.3.2"
}
},
"node_modules/chokidar/node_modules/glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dev": true,
"dependencies": {
"is-glob": "^4.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/chownr": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
"integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="
},
"node_modules/color": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz",
"integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==",
"dependencies": {
"color-convert": "^2.0.1",
"color-string": "^1.9.0"
},
"engines": {
"node": ">=12.5.0"
}
},
"node_modules/color-convert": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
"integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
"dev": true,
"dependencies": {
"color-name": "1.1.3"
}
},
"node_modules/color-name": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
"integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw=="
},
"node_modules/color-string": {
"version": "1.9.1",
"resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz",
"integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==",
"dependencies": {
"color-name": "^1.0.0",
"simple-swizzle": "^0.2.2"
}
},
"node_modules/color/node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dependencies": {
"color-name": "~1.1.4"
},
"engines": {
"node": ">=7.0.0"
}
},
"node_modules/color/node_modules/color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
},
"node_modules/commander": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz",
"integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==",
"dev": true,
"engines": {
"node": ">= 6"
}
},
"node_modules/concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
"integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
"dev": true
},
"node_modules/convert-source-map": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
"integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
"dev": true
},
"node_modules/cross-spawn": {
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
"integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
"dev": true,
"dependencies": {
"path-key": "^3.1.0",
"shebang-command": "^2.0.0",
"which": "^2.0.1"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/cssesc": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
"integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==",
"dev": true,
"bin": {
"cssesc": "bin/cssesc"
},
"engines": {
"node": ">=4"
}
},
"node_modules/csstype": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz",
"integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==",
"dev": true
},
"node_modules/debug": {
"version": "4.3.4",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
"integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
"dev": true,
"dependencies": {
"ms": "2.1.2"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/decompress-response": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz",
"integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==",
"dependencies": {
"mimic-response": "^3.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/deep-extend": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
"integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
"engines": {
"node": ">=4.0.0"
}
},
"node_modules/deep-is": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz",
"integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==",
"dev": true
},
"node_modules/define-data-property": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
"integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==",
"dev": true,
"dependencies": {
"es-define-property": "^1.0.0",
"es-errors": "^1.3.0",
"gopd": "^1.0.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/define-properties": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz",
"integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==",
"dev": true,
"dependencies": {
"define-data-property": "^1.0.1",
"has-property-descriptors": "^1.0.0",
"object-keys": "^1.1.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/detect-libc": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz",
"integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==",
"engines": {
"node": ">=8"
}
},
"node_modules/didyoumean": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz",
"integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==",
"dev": true
},
"node_modules/dlv": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz",
"integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==",
"dev": true
},
"node_modules/doctrine": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz",
"integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==",
"dev": true,
"dependencies": {
"esutils": "^2.0.2"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/eastasianwidth": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
"integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==",
"dev": true
},
"node_modules/electron-to-chromium": {
"version": "1.4.668",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.668.tgz",
"integrity": "sha512-ZOBocMYCehr9W31+GpMclR+KBaDZOoAEabLdhpZ8oU1JFDwIaFY0UDbpXVEUFc0BIP2O2Qn3rkfCjQmMR4T/bQ==",
"dev": true
},
"node_modules/emoji-regex": {
"version": "9.2.2",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
"integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==",
"dev": true
},
"node_modules/end-of-stream": {
"version": "1.4.4",
"resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
"integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
"dependencies": {
"once": "^1.4.0"
}
},
"node_modules/es-abstract": {
"version": "1.22.4",
"resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.22.4.tgz",
"integrity": "sha512-vZYJlk2u6qHYxBOTjAeg7qUxHdNfih64Uu2J8QqWgXZ2cri0ZpJAkzDUK/q593+mvKwlxyaxr6F1Q+3LKoQRgg==",
"dev": true,
"dependencies": {
"array-buffer-byte-length": "^1.0.1",
"arraybuffer.prototype.slice": "^1.0.3",
"available-typed-arrays": "^1.0.6",
"call-bind": "^1.0.7",
"es-define-property": "^1.0.0",
"es-errors": "^1.3.0",
"es-set-tostringtag": "^2.0.2",
"es-to-primitive": "^1.2.1",
"function.prototype.name": "^1.1.6",
"get-intrinsic": "^1.2.4",
"get-symbol-description": "^1.0.2",
"globalthis": "^1.0.3",
"gopd": "^1.0.1",
"has-property-descriptors": "^1.0.2",
"has-proto": "^1.0.1",
"has-symbols": "^1.0.3",
"hasown": "^2.0.1",
"internal-slot": "^1.0.7",
"is-array-buffer": "^3.0.4",
"is-callable": "^1.2.7",
"is-negative-zero": "^2.0.2",
"is-regex": "^1.1.4",
"is-shared-array-buffer": "^1.0.2",
"is-string": "^1.0.7",
"is-typed-array": "^1.1.13",
"is-weakref": "^1.0.2",
"object-inspect": "^1.13.1",
"object-keys": "^1.1.1",
"object.assign": "^4.1.5",
"regexp.prototype.flags": "^1.5.2",
"safe-array-concat": "^1.1.0",
"safe-regex-test": "^1.0.3",
"string.prototype.trim": "^1.2.8",
"string.prototype.trimend": "^1.0.7",
"string.prototype.trimstart": "^1.0.7",
"typed-array-buffer": "^1.0.1",
"typed-array-byte-length": "^1.0.0",
"typed-array-byte-offset": "^1.0.0",
"typed-array-length": "^1.0.4",
"unbox-primitive": "^1.0.2",
"which-typed-array": "^1.1.14"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/es-define-property": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz",
"integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==",
"dev": true,
"dependencies": {
"get-intrinsic": "^1.2.4"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-errors": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
"dev": true,
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-iterator-helpers": {
"version": "1.0.17",
"resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.0.17.tgz",
"integrity": "sha512-lh7BsUqelv4KUbR5a/ZTaGGIMLCjPGPqJ6q+Oq24YP0RdyptX1uzm4vvaqzk7Zx3bpl/76YLTTDj9L7uYQ92oQ==",
"dev": true,
"dependencies": {
"asynciterator.prototype": "^1.0.0",
"call-bind": "^1.0.7",
"define-properties": "^1.2.1",
"es-abstract": "^1.22.4",
"es-errors": "^1.3.0",
"es-set-tostringtag": "^2.0.2",
"function-bind": "^1.1.2",
"get-intrinsic": "^1.2.4",
"globalthis": "^1.0.3",
"has-property-descriptors": "^1.0.2",
"has-proto": "^1.0.1",
"has-symbols": "^1.0.3",
"internal-slot": "^1.0.7",
"iterator.prototype": "^1.1.2",
"safe-array-concat": "^1.1.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-set-tostringtag": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.2.tgz",
"integrity": "sha512-BuDyupZt65P9D2D2vA/zqcI3G5xRsklm5N3xCwuiy+/vKy8i0ifdsQP1sLgO4tZDSCaQUSnmC48khknGMV3D2Q==",
"dev": true,
"dependencies": {
"get-intrinsic": "^1.2.2",
"has-tostringtag": "^1.0.0",
"hasown": "^2.0.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-shim-unscopables": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz",
"integrity": "sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==",
"dev": true,
"dependencies": {
"hasown": "^2.0.0"
}
},
"node_modules/es-to-primitive": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz",
"integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==",
"dev": true,
"dependencies": {
"is-callable": "^1.1.4",
"is-date-object": "^1.0.1",
"is-symbol": "^1.0.2"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/esbuild": {
"version": "0.19.12",
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.12.tgz",
"integrity": "sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg==",
"dev": true,
"hasInstallScript": true,
"bin": {
"esbuild": "bin/esbuild"
},
"engines": {
"node": ">=12"
},
"optionalDependencies": {
"@esbuild/aix-ppc64": "0.19.12",
"@esbuild/android-arm": "0.19.12",
"@esbuild/android-arm64": "0.19.12",
"@esbuild/android-x64": "0.19.12",
"@esbuild/darwin-arm64": "0.19.12",
"@esbuild/darwin-x64": "0.19.12",
"@esbuild/freebsd-arm64": "0.19.12",
"@esbuild/freebsd-x64": "0.19.12",
"@esbuild/linux-arm": "0.19.12",
"@esbuild/linux-arm64": "0.19.12",
"@esbuild/linux-ia32": "0.19.12",
"@esbuild/linux-loong64": "0.19.12",
"@esbuild/linux-mips64el": "0.19.12",
"@esbuild/linux-ppc64": "0.19.12",
"@esbuild/linux-riscv64": "0.19.12",
"@esbuild/linux-s390x": "0.19.12",
"@esbuild/linux-x64": "0.19.12",
"@esbuild/netbsd-x64": "0.19.12",
"@esbuild/openbsd-x64": "0.19.12",
"@esbuild/sunos-x64": "0.19.12",
"@esbuild/win32-arm64": "0.19.12",
"@esbuild/win32-ia32": "0.19.12",
"@esbuild/win32-x64": "0.19.12"
}
},
"node_modules/escalade": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz",
"integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/escape-string-regexp": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
"integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
"dev": true,
"engines": {
"node": ">=0.8.0"
}
},
"node_modules/eslint": {
"version": "8.56.0",
"resolved": "https://registry.npmjs.org/eslint/-/eslint-8.56.0.tgz",
"integrity": "sha512-Go19xM6T9puCOWntie1/P997aXxFsOi37JIHRWI514Hc6ZnaHGKY9xFhrU65RT6CcBEzZoGG1e6Nq+DT04ZtZQ==",
"dev": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.2.0",
"@eslint-community/regexpp": "^4.6.1",
"@eslint/eslintrc": "^2.1.4",
"@eslint/js": "8.56.0",
"@humanwhocodes/config-array": "^0.11.13",
"@humanwhocodes/module-importer": "^1.0.1",
"@nodelib/fs.walk": "^1.2.8",
"@ungap/structured-clone": "^1.2.0",
"ajv": "^6.12.4",
"chalk": "^4.0.0",
"cross-spawn": "^7.0.2",
"debug": "^4.3.2",
"doctrine": "^3.0.0",
"escape-string-regexp": "^4.0.0",
"eslint-scope": "^7.2.2",
"eslint-visitor-keys": "^3.4.3",
"espree": "^9.6.1",
"esquery": "^1.4.2",
"esutils": "^2.0.2",
"fast-deep-equal": "^3.1.3",
"file-entry-cache": "^6.0.1",
"find-up": "^5.0.0",
"glob-parent": "^6.0.2",
"globals": "^13.19.0",
"graphemer": "^1.4.0",
"ignore": "^5.2.0",
"imurmurhash": "^0.1.4",
"is-glob": "^4.0.0",
"is-path-inside": "^3.0.3",
"js-yaml": "^4.1.0",
"json-stable-stringify-without-jsonify": "^1.0.1",
"levn": "^0.4.1",
"lodash.merge": "^4.6.2",
"minimatch": "^3.1.2",
"natural-compare": "^1.4.0",
"optionator": "^0.9.3",
"strip-ansi": "^6.0.1",
"text-table": "^0.2.0"
},
"bin": {
"eslint": "bin/eslint.js"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/eslint"
}
},
"node_modules/eslint-plugin-react": {
"version": "7.33.2",
"resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.33.2.tgz",
"integrity": "sha512-73QQMKALArI8/7xGLNI/3LylrEYrlKZSb5C9+q3OtOewTnMQi5cT+aE9E41sLCmli3I9PGGmD1yiZydyo4FEPw==",
"dev": true,
"dependencies": {
"array-includes": "^3.1.6",
"array.prototype.flatmap": "^1.3.1",
"array.prototype.tosorted": "^1.1.1",
"doctrine": "^2.1.0",
"es-iterator-helpers": "^1.0.12",
"estraverse": "^5.3.0",
"jsx-ast-utils": "^2.4.1 || ^3.0.0",
"minimatch": "^3.1.2",
"object.entries": "^1.1.6",
"object.fromentries": "^2.0.6",
"object.hasown": "^1.1.2",
"object.values": "^1.1.6",
"prop-types": "^15.8.1",
"resolve": "^2.0.0-next.4",
"semver": "^6.3.1",
"string.prototype.matchall": "^4.0.8"
},
"engines": {
"node": ">=4"
},
"peerDependencies": {
"eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8"
}
},
"node_modules/eslint-plugin-react-hooks": {
"version": "4.6.0",
"resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz",
"integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==",
"dev": true,
"engines": {
"node": ">=10"
},
"peerDependencies": {
"eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0"
}
},
"node_modules/eslint-plugin-react-refresh": {
"version": "0.4.5",
"resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.5.tgz",
"integrity": "sha512-D53FYKJa+fDmZMtriODxvhwrO+IOqrxoEo21gMA0sjHdU6dPVH4OhyFip9ypl8HOF5RV5KdTo+rBQLvnY2cO8w==",
"dev": true,
"peerDependencies": {
"eslint": ">=7"
}
},
"node_modules/eslint-plugin-react/node_modules/doctrine": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz",
"integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==",
"dev": true,
"dependencies": {
"esutils": "^2.0.2"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/eslint-scope": {
"version": "7.2.2",
"resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz",
"integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==",
"dev": true,
"dependencies": {
"esrecurse": "^4.3.0",
"estraverse": "^5.2.0"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/eslint"
}
},
"node_modules/eslint-visitor-keys": {
"version": "3.4.3",
"resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz",
"integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==",
"dev": true,
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/eslint"
}
},
"node_modules/eslint/node_modules/ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"dependencies": {
"color-convert": "^2.0.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/eslint/node_modules/chalk": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
"integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
"dev": true,
"dependencies": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/eslint/node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"dependencies": {
"color-name": "~1.1.4"
},
"engines": {
"node": ">=7.0.0"
}
},
"node_modules/eslint/node_modules/color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true
},
"node_modules/eslint/node_modules/escape-string-regexp": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
"integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
"dev": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/eslint/node_modules/globals": {
"version": "13.24.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz",
"integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==",
"dev": true,
"dependencies": {
"type-fest": "^0.20.2"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/eslint/node_modules/has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/eslint/node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/espree": {
"version": "9.6.1",
"resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz",
"integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==",
"dev": true,
"dependencies": {
"acorn": "^8.9.0",
"acorn-jsx": "^5.3.2",
"eslint-visitor-keys": "^3.4.1"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/eslint"
}
},
"node_modules/esquery": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz",
"integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==",
"dev": true,
"dependencies": {
"estraverse": "^5.1.0"
},
"engines": {
"node": ">=0.10"
}
},
"node_modules/esrecurse": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
"integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
"dev": true,
"dependencies": {
"estraverse": "^5.2.0"
},
"engines": {
"node": ">=4.0"
}
},
"node_modules/estraverse": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
"integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
"dev": true,
"engines": {
"node": ">=4.0"
}
},
"node_modules/esutils": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
"integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/expand-template": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz",
"integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==",
"engines": {
"node": ">=6"
}
},
"node_modules/fast-deep-equal": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
"dev": true
},
"node_modules/fast-fifo": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/fast-fifo/-/fast-fifo-1.3.2.tgz",
"integrity": "sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ=="
},
"node_modules/fast-glob": {
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz",
"integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==",
"dev": true,
"dependencies": {
"@nodelib/fs.stat": "^2.0.2",
"@nodelib/fs.walk": "^1.2.3",
"glob-parent": "^5.1.2",
"merge2": "^1.3.0",
"micromatch": "^4.0.4"
},
"engines": {
"node": ">=8.6.0"
}
},
"node_modules/fast-glob/node_modules/glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dev": true,
"dependencies": {
"is-glob": "^4.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/fast-json-stable-stringify": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
"integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
"dev": true
},
"node_modules/fast-levenshtein": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz",
"integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==",
"dev": true
},
"node_modules/fastq": {
"version": "1.17.1",
"resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz",
"integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==",
"dev": true,
"dependencies": {
"reusify": "^1.0.4"
}
},
"node_modules/file-entry-cache": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz",
"integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==",
"dev": true,
"dependencies": {
"flat-cache": "^3.0.4"
},
"engines": {
"node": "^10.12.0 || >=12.0.0"
}
},
"node_modules/fill-range": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
"dev": true,
"dependencies": {
"to-regex-range": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/find-up": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
"integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==",
"dev": true,
"dependencies": {
"locate-path": "^6.0.0",
"path-exists": "^4.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/flat-cache": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz",
"integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==",
"dev": true,
"dependencies": {
"flatted": "^3.2.9",
"keyv": "^4.5.3",
"rimraf": "^3.0.2"
},
"engines": {
"node": "^10.12.0 || >=12.0.0"
}
},
"node_modules/flatbuffers": {
"version": "1.12.0",
"resolved": "https://registry.npmjs.org/flatbuffers/-/flatbuffers-1.12.0.tgz",
"integrity": "sha512-c7CZADjRcl6j0PlvFy0ZqXQ67qSEZfrVPynmnL+2zPc+NtMvrF8Y0QceMo7QqnSPc7+uWjUIAbvCQ5WIKlMVdQ=="
},
"node_modules/flatted": {
"version": "3.2.9",
"resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.9.tgz",
"integrity": "sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==",
"dev": true
},
"node_modules/for-each": {
"version": "0.3.3",
"resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz",
"integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==",
"dev": true,
"dependencies": {
"is-callable": "^1.1.3"
}
},
"node_modules/foreground-child": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz",
"integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==",
"dev": true,
"dependencies": {
"cross-spawn": "^7.0.0",
"signal-exit": "^4.0.1"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/fraction.js": {
"version": "4.3.7",
"resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz",
"integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==",
"dev": true,
"engines": {
"node": "*"
},
"funding": {
"type": "patreon",
"url": "https://github.com/sponsors/rawify"
}
},
"node_modules/fs-constants": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz",
"integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow=="
},
"node_modules/fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
"dev": true
},
"node_modules/fsevents": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
"integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
"dev": true,
"hasInstallScript": true,
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
}
},
"node_modules/function-bind": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
"dev": true,
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/function.prototype.name": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.6.tgz",
"integrity": "sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.2.0",
"es-abstract": "^1.22.1",
"functions-have-names": "^1.2.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/functions-have-names": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz",
"integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==",
"dev": true,
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/gensync": {
"version": "1.0.0-beta.2",
"resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
"integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
"dev": true,
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/get-intrinsic": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
"integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
"dev": true,
"dependencies": {
"es-errors": "^1.3.0",
"function-bind": "^1.1.2",
"has-proto": "^1.0.1",
"has-symbols": "^1.0.3",
"hasown": "^2.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-symbol-description": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.2.tgz",
"integrity": "sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.5",
"es-errors": "^1.3.0",
"get-intrinsic": "^1.2.4"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/github-from-package": {
"version": "0.0.0",
"resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz",
"integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw=="
},
"node_modules/glob": {
"version": "7.2.3",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
"dev": true,
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
},
"engines": {
"node": "*"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/glob-parent": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
"integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
"dev": true,
"dependencies": {
"is-glob": "^4.0.3"
},
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/globals": {
"version": "11.12.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
"integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/globalthis": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz",
"integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==",
"dev": true,
"dependencies": {
"define-properties": "^1.1.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/gopd": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
"integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==",
"dev": true,
"dependencies": {
"get-intrinsic": "^1.1.3"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/graphemer": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz",
"integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==",
"dev": true
},
"node_modules/guid-typescript": {
"version": "1.0.9",
"resolved": "https://registry.npmjs.org/guid-typescript/-/guid-typescript-1.0.9.tgz",
"integrity": "sha512-Y8T4vYhEfwJOTbouREvG+3XDsjr8E3kIr7uf+JZ0BYloFsttiHU0WfvANVsR7TxNUJa/WpCnw/Ino/p+DeBhBQ=="
},
"node_modules/has-bigints": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz",
"integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==",
"dev": true,
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-flag": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
"integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/has-property-descriptors": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz",
"integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==",
"dev": true,
"dependencies": {
"es-define-property": "^1.0.0"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-proto": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz",
"integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==",
"dev": true,
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-symbols": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
"integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==",
"dev": true,
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-tostringtag": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
"dev": true,
"dependencies": {
"has-symbols": "^1.0.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/hasown": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.1.tgz",
"integrity": "sha512-1/th4MHjnwncwXsIW6QMzlvYL9kG5e/CpVvLRZe4XPa8TOUNbCELqmvhDmnkNsAjwaG4+I8gJJL0JBvTTLO9qA==",
"dev": true,
"dependencies": {
"function-bind": "^1.1.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/ieee754": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
"integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/ignore": {
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz",
"integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==",
"dev": true,
"engines": {
"node": ">= 4"
}
},
"node_modules/import-fresh": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
"integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
"dev": true,
"dependencies": {
"parent-module": "^1.0.0",
"resolve-from": "^4.0.0"
},
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/imurmurhash": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
"integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==",
"dev": true,
"engines": {
"node": ">=0.8.19"
}
},
"node_modules/inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
"dev": true,
"dependencies": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
},
"node_modules/ini": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
"integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew=="
},
"node_modules/internal-slot": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.7.tgz",
"integrity": "sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==",
"dev": true,
"dependencies": {
"es-errors": "^1.3.0",
"hasown": "^2.0.0",
"side-channel": "^1.0.4"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/is-array-buffer": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.4.tgz",
"integrity": "sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"get-intrinsic": "^1.2.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-arrayish": {
"version": "0.3.2",
"resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz",
"integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ=="
},
"node_modules/is-async-function": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.0.0.tgz",
"integrity": "sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==",
"dev": true,
"dependencies": {
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-bigint": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz",
"integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==",
"dev": true,
"dependencies": {
"has-bigints": "^1.0.1"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-binary-path": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
"integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
"dev": true,
"dependencies": {
"binary-extensions": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/is-boolean-object": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz",
"integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-callable": {
"version": "1.2.7",
"resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz",
"integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==",
"dev": true,
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-core-module": {
"version": "2.13.1",
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz",
"integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==",
"dev": true,
"dependencies": {
"hasown": "^2.0.0"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-date-object": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz",
"integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==",
"dev": true,
"dependencies": {
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-extglob": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
"integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-finalizationregistry": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.0.2.tgz",
"integrity": "sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/is-generator-function": {
"version": "1.0.10",
"resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz",
"integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==",
"dev": true,
"dependencies": {
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-glob": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
"integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
"dev": true,
"dependencies": {
"is-extglob": "^2.1.1"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-map": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.2.tgz",
"integrity": "sha512-cOZFQQozTha1f4MxLFzlgKYPTyj26picdZTx82hbc/Xf4K/tZOOXSCkMvU4pKioRXGDLJRn0GM7Upe7kR721yg==",
"dev": true,
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-negative-zero": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz",
"integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==",
"dev": true,
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-number": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
"dev": true,
"engines": {
"node": ">=0.12.0"
}
},
"node_modules/is-number-object": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz",
"integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==",
"dev": true,
"dependencies": {
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-path-inside": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz",
"integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/is-regex": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz",
"integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-set": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.2.tgz",
"integrity": "sha512-+2cnTEZeY5z/iXGbLhPrOAaK/Mau5k5eXq9j14CpRTftq0pAJu2MwVRSZhyZWBzx3o6X795Lz6Bpb6R0GKf37g==",
"dev": true,
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-shared-array-buffer": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz",
"integrity": "sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-string": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz",
"integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==",
"dev": true,
"dependencies": {
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-symbol": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz",
"integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==",
"dev": true,
"dependencies": {
"has-symbols": "^1.0.2"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-typed-array": {
"version": "1.1.13",
"resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.13.tgz",
"integrity": "sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==",
"dev": true,
"dependencies": {
"which-typed-array": "^1.1.14"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-weakmap": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.1.tgz",
"integrity": "sha512-NSBR4kH5oVj1Uwvv970ruUkCV7O1mzgVFO4/rev2cLRda9Tm9HrL70ZPut4rOHgY0FNrUu9BCbXA2sdQ+x0chA==",
"dev": true,
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-weakref": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz",
"integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-weakset": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.2.tgz",
"integrity": "sha512-t2yVvttHkQktwnNNmBQ98AhENLdPUTDTE21uPqAQ0ARwQfGeQKRVS0NNurH7bTf7RrvcVn1OOge45CnBeHCSmg==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"get-intrinsic": "^1.1.1"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/isarray": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz",
"integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==",
"dev": true
},
"node_modules/isexe": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
"integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
"dev": true
},
"node_modules/iterator.prototype": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.2.tgz",
"integrity": "sha512-DR33HMMr8EzwuRL8Y9D3u2BMj8+RqSE850jfGu59kS7tbmPLzGkZmVSfyCFSDxuZiEY6Rzt3T2NA/qU+NwVj1w==",
"dev": true,
"dependencies": {
"define-properties": "^1.2.1",
"get-intrinsic": "^1.2.1",
"has-symbols": "^1.0.3",
"reflect.getprototypeof": "^1.0.4",
"set-function-name": "^2.0.1"
}
},
"node_modules/jackspeak": {
"version": "2.3.6",
"resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz",
"integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==",
"dev": true,
"dependencies": {
"@isaacs/cliui": "^8.0.2"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
},
"optionalDependencies": {
"@pkgjs/parseargs": "^0.11.0"
}
},
"node_modules/jiti": {
"version": "1.21.0",
"resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.0.tgz",
"integrity": "sha512-gFqAIbuKyyso/3G2qhiO2OM6shY6EPP/R0+mkDbyspxKazh8BXDC5FiFsUjlczgdNz/vfra0da2y+aHrusLG/Q==",
"dev": true,
"bin": {
"jiti": "bin/jiti.js"
}
},
"node_modules/js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
},
"node_modules/js-yaml": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
"dev": true,
"dependencies": {
"argparse": "^2.0.1"
},
"bin": {
"js-yaml": "bin/js-yaml.js"
}
},
"node_modules/jsesc": {
"version": "2.5.2",
"resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
"integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
"dev": true,
"bin": {
"jsesc": "bin/jsesc"
},
"engines": {
"node": ">=4"
}
},
"node_modules/json-buffer": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz",
"integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==",
"dev": true
},
"node_modules/json-schema-traverse": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
"integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
"dev": true
},
"node_modules/json-stable-stringify-without-jsonify": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz",
"integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==",
"dev": true
},
"node_modules/json5": {
"version": "2.2.3",
"resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
"integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
"dev": true,
"bin": {
"json5": "lib/cli.js"
},
"engines": {
"node": ">=6"
}
},
"node_modules/jsx-ast-utils": {
"version": "3.3.5",
"resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz",
"integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==",
"dev": true,
"dependencies": {
"array-includes": "^3.1.6",
"array.prototype.flat": "^1.3.1",
"object.assign": "^4.1.4",
"object.values": "^1.1.6"
},
"engines": {
"node": ">=4.0"
}
},
"node_modules/keyv": {
"version": "4.5.4",
"resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz",
"integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==",
"dev": true,
"dependencies": {
"json-buffer": "3.0.1"
}
},
"node_modules/levn": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz",
"integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==",
"dev": true,
"dependencies": {
"prelude-ls": "^1.2.1",
"type-check": "~0.4.0"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/lilconfig": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz",
"integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==",
"dev": true,
"engines": {
"node": ">=10"
}
},
"node_modules/lines-and-columns": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
"integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==",
"dev": true
},
"node_modules/locate-path": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
"integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
"dev": true,
"dependencies": {
"p-locate": "^5.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/lodash.merge": {
"version": "4.6.2",
"resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz",
"integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==",
"dev": true
},
"node_modules/long": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz",
"integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA=="
},
"node_modules/loose-envify": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
"integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
"dependencies": {
"js-tokens": "^3.0.0 || ^4.0.0"
},
"bin": {
"loose-envify": "cli.js"
}
},
"node_modules/lru-cache": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
"integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
"dev": true,
"dependencies": {
"yallist": "^3.0.2"
}
},
"node_modules/merge2": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
"integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
"dev": true,
"engines": {
"node": ">= 8"
}
},
"node_modules/micromatch": {
"version": "4.0.5",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
"integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
"dev": true,
"dependencies": {
"braces": "^3.0.2",
"picomatch": "^2.3.1"
},
"engines": {
"node": ">=8.6"
}
},
"node_modules/mimic-response": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz",
"integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/minimatch": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
"integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
"dev": true,
"dependencies": {
"brace-expansion": "^1.1.7"
},
"engines": {
"node": "*"
}
},
"node_modules/minimist": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
"integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/minipass": {
"version": "7.0.4",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz",
"integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==",
"dev": true,
"engines": {
"node": ">=16 || 14 >=14.17"
}
},
"node_modules/mkdirp-classic": {
"version": "0.5.3",
"resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz",
"integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A=="
},
"node_modules/ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
"dev": true
},
"node_modules/mz": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz",
"integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==",
"dev": true,
"dependencies": {
"any-promise": "^1.0.0",
"object-assign": "^4.0.1",
"thenify-all": "^1.0.0"
}
},
"node_modules/nanoid": {
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz",
"integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"bin": {
"nanoid": "bin/nanoid.cjs"
},
"engines": {
"node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
}
},
"node_modules/napi-build-utils": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-1.0.2.tgz",
"integrity": "sha512-ONmRUqK7zj7DWX0D9ADe03wbwOBZxNAfF20PlGfCWQcD3+/MakShIHrMqx9YwPTfxDdF1zLeL+RGZiR9kGMLdg=="
},
"node_modules/natural-compare": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
"integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==",
"dev": true
},
"node_modules/node-abi": {
"version": "3.54.0",
"resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.54.0.tgz",
"integrity": "sha512-p7eGEiQil0YUV3ItH4/tBb781L5impVmmx2E9FRKF7d18XXzp4PGT2tdYMFY6wQqgxD0IwNZOiSJ0/K0fSi/OA==",
"dependencies": {
"semver": "^7.3.5"
},
"engines": {
"node": ">=10"
}
},
"node_modules/node-abi/node_modules/lru-cache": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dependencies": {
"yallist": "^4.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/node-abi/node_modules/semver": {
"version": "7.6.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz",
"integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==",
"dependencies": {
"lru-cache": "^6.0.0"
},
"bin": {
"semver": "bin/semver.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/node-abi/node_modules/yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
},
"node_modules/node-addon-api": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-6.1.0.tgz",
"integrity": "sha512-+eawOlIgy680F0kBzPUNFhMZGtJ1YmqM6l4+Crf4IkImjYrO/mqPwRMh352g23uIaQKFItcQ64I7KMaJxHgAVA=="
},
"node_modules/node-releases": {
"version": "2.0.14",
"resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz",
"integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==",
"dev": true
},
"node_modules/normalize-path": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
"integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/normalize-range": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
"integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/object-assign": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
"integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/object-hash": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz",
"integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==",
"dev": true,
"engines": {
"node": ">= 6"
}
},
"node_modules/object-inspect": {
"version": "1.13.1",
"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz",
"integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==",
"dev": true,
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/object-keys": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
"integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
"dev": true,
"engines": {
"node": ">= 0.4"
}
},
"node_modules/object.assign": {
"version": "4.1.5",
"resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz",
"integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.5",
"define-properties": "^1.2.1",
"has-symbols": "^1.0.3",
"object-keys": "^1.1.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/object.entries": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.7.tgz",
"integrity": "sha512-jCBs/0plmPsOnrKAfFQXRG2NFjlhZgjjcBLSmTnEhU8U6vVTsVe8ANeQJCHTl3gSsI4J+0emOoCgoKlmQPMgmA==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.2.0",
"es-abstract": "^1.22.1"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/object.fromentries": {
"version": "2.0.7",
"resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.7.tgz",
"integrity": "sha512-UPbPHML6sL8PI/mOqPwsH4G6iyXcCGzLin8KvEPenOZN5lpCNBZZQ+V62vdjB1mQHrmqGQt5/OJzemUA+KJmEA==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.2.0",
"es-abstract": "^1.22.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/object.hasown": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.3.tgz",
"integrity": "sha512-fFI4VcYpRHvSLXxP7yiZOMAd331cPfd2p7PFDVbgUsYOfCT3tICVqXWngbjr4m49OvsBwUBQ6O2uQoJvy3RexA==",
"dev": true,
"dependencies": {
"define-properties": "^1.2.0",
"es-abstract": "^1.22.1"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/object.values": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.7.tgz",
"integrity": "sha512-aU6xnDFYT3x17e/f0IiiwlGPTy2jzMySGfUB4fq6z7CV8l85CWHDk5ErhyhpfDHhrOMwGFhSQkhMGHaIotA6Ng==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.2.0",
"es-abstract": "^1.22.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"dependencies": {
"wrappy": "1"
}
},
"node_modules/onnx-proto": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/onnx-proto/-/onnx-proto-4.0.4.tgz",
"integrity": "sha512-aldMOB3HRoo6q/phyB6QRQxSt895HNNw82BNyZ2CMh4bjeKv7g/c+VpAFtJuEMVfYLMbRx61hbuqnKceLeDcDA==",
"dependencies": {
"protobufjs": "^6.8.8"
}
},
"node_modules/onnxruntime-common": {
"version": "1.14.0",
"resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.14.0.tgz",
"integrity": "sha512-3LJpegM2iMNRX2wUmtYfeX/ytfOzNwAWKSq1HbRrKc9+uqG/FsEA0bbKZl1btQeZaXhC26l44NWpNUeXPII7Ew=="
},
"node_modules/onnxruntime-node": {
"version": "1.14.0",
"resolved": "https://registry.npmjs.org/onnxruntime-node/-/onnxruntime-node-1.14.0.tgz",
"integrity": "sha512-5ba7TWomIV/9b6NH/1x/8QEeowsb+jBEvFzU6z0T4mNsFwdPqXeFUM7uxC6QeSRkEbWu3qEB0VMjrvzN/0S9+w==",
"optional": true,
"os": [
"win32",
"darwin",
"linux"
],
"dependencies": {
"onnxruntime-common": "~1.14.0"
}
},
"node_modules/onnxruntime-web": {
"version": "1.14.0",
"resolved": "https://registry.npmjs.org/onnxruntime-web/-/onnxruntime-web-1.14.0.tgz",
"integrity": "sha512-Kcqf43UMfW8mCydVGcX9OMXI2VN17c0p6XvR7IPSZzBf/6lteBzXHvcEVWDPmCKuGombl997HgLqj91F11DzXw==",
"dependencies": {
"flatbuffers": "^1.12.0",
"guid-typescript": "^1.0.9",
"long": "^4.0.0",
"onnx-proto": "^4.0.4",
"onnxruntime-common": "~1.14.0",
"platform": "^1.3.6"
}
},
"node_modules/optionator": {
"version": "0.9.3",
"resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz",
"integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==",
"dev": true,
"dependencies": {
"@aashutoshrathi/word-wrap": "^1.2.3",
"deep-is": "^0.1.3",
"fast-levenshtein": "^2.0.6",
"levn": "^0.4.1",
"prelude-ls": "^1.2.1",
"type-check": "^0.4.0"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/p-limit": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
"integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
"dev": true,
"dependencies": {
"yocto-queue": "^0.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-locate": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz",
"integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==",
"dev": true,
"dependencies": {
"p-limit": "^3.0.2"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/parent-module": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
"integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
"dev": true,
"dependencies": {
"callsites": "^3.0.0"
},
"engines": {
"node": ">=6"
}
},
"node_modules/path-exists": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
"integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/path-is-absolute": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/path-key": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/path-parse": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
"dev": true
},
"node_modules/path-scurry": {
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.1.tgz",
"integrity": "sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ==",
"dev": true,
"dependencies": {
"lru-cache": "^9.1.1 || ^10.0.0",
"minipass": "^5.0.0 || ^6.0.2 || ^7.0.0"
},
"engines": {
"node": ">=16 || 14 >=14.17"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/path-scurry/node_modules/lru-cache": {
"version": "10.2.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.0.tgz",
"integrity": "sha512-2bIM8x+VAf6JT4bKAljS1qUWgMsqZRPGJS6FSahIMPVvctcNhyVp7AJu7quxOW9jwkryBReKZY5tY5JYv2n/7Q==",
"dev": true,
"engines": {
"node": "14 || >=16.14"
}
},
"node_modules/picocolors": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
"integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==",
"dev": true
},
"node_modules/picomatch": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
"dev": true,
"engines": {
"node": ">=8.6"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/pify": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz",
"integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/pirates": {
"version": "4.0.6",
"resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz",
"integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==",
"dev": true,
"engines": {
"node": ">= 6"
}
},
"node_modules/platform": {
"version": "1.3.6",
"resolved": "https://registry.npmjs.org/platform/-/platform-1.3.6.tgz",
"integrity": "sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg=="
},
"node_modules/postcss": {
"version": "8.4.35",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.35.tgz",
"integrity": "sha512-u5U8qYpBCpN13BsiEB0CbR1Hhh4Gc0zLFuedrHJKMctHCHAGrMdG0PRM/KErzAL3CU6/eckEtmHNB3x6e3c0vA==",
"dev": true,
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/postcss/"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/postcss"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
"nanoid": "^3.3.7",
"picocolors": "^1.0.0",
"source-map-js": "^1.0.2"
},
"engines": {
"node": "^10 || ^12 || >=14"
}
},
"node_modules/postcss-import": {
"version": "15.1.0",
"resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz",
"integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==",
"dev": true,
"dependencies": {
"postcss-value-parser": "^4.0.0",
"read-cache": "^1.0.0",
"resolve": "^1.1.7"
},
"engines": {
"node": ">=14.0.0"
},
"peerDependencies": {
"postcss": "^8.0.0"
}
},
"node_modules/postcss-import/node_modules/resolve": {
"version": "1.22.8",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
"integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==",
"dev": true,
"dependencies": {
"is-core-module": "^2.13.0",
"path-parse": "^1.0.7",
"supports-preserve-symlinks-flag": "^1.0.0"
},
"bin": {
"resolve": "bin/resolve"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/postcss-js": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz",
"integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==",
"dev": true,
"dependencies": {
"camelcase-css": "^2.0.1"
},
"engines": {
"node": "^12 || ^14 || >= 16"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/postcss/"
},
"peerDependencies": {
"postcss": "^8.4.21"
}
},
"node_modules/postcss-load-config": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.2.tgz",
"integrity": "sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==",
"dev": true,
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/postcss/"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
"lilconfig": "^3.0.0",
"yaml": "^2.3.4"
},
"engines": {
"node": ">= 14"
},
"peerDependencies": {
"postcss": ">=8.0.9",
"ts-node": ">=9.0.0"
},
"peerDependenciesMeta": {
"postcss": {
"optional": true
},
"ts-node": {
"optional": true
}
}
},
"node_modules/postcss-load-config/node_modules/lilconfig": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.0.0.tgz",
"integrity": "sha512-K2U4W2Ff5ibV7j7ydLr+zLAkIg5JJ4lPn1Ltsdt+Tz/IjQ8buJ55pZAxoP34lqIiwtF9iAvtLv3JGv7CAyAg+g==",
"dev": true,
"engines": {
"node": ">=14"
}
},
"node_modules/postcss-nested": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.1.tgz",
"integrity": "sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==",
"dev": true,
"dependencies": {
"postcss-selector-parser": "^6.0.11"
},
"engines": {
"node": ">=12.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/postcss/"
},
"peerDependencies": {
"postcss": "^8.2.14"
}
},
"node_modules/postcss-selector-parser": {
"version": "6.0.15",
"resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.15.tgz",
"integrity": "sha512-rEYkQOMUCEMhsKbK66tbEU9QVIxbhN18YiniAwA7XQYTVBqrBy+P2p5JcdqsHgKM2zWylp8d7J6eszocfds5Sw==",
"dev": true,
"dependencies": {
"cssesc": "^3.0.0",
"util-deprecate": "^1.0.2"
},
"engines": {
"node": ">=4"
}
},
"node_modules/postcss-value-parser": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
"integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==",
"dev": true
},
"node_modules/prebuild-install": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.1.tgz",
"integrity": "sha512-jAXscXWMcCK8GgCoHOfIr0ODh5ai8mj63L2nWrjuAgXE6tDyYGnx4/8o/rCgU+B4JSyZBKbeZqzhtwtC3ovxjw==",
"dependencies": {
"detect-libc": "^2.0.0",
"expand-template": "^2.0.3",
"github-from-package": "0.0.0",
"minimist": "^1.2.3",
"mkdirp-classic": "^0.5.3",
"napi-build-utils": "^1.0.1",
"node-abi": "^3.3.0",
"pump": "^3.0.0",
"rc": "^1.2.7",
"simple-get": "^4.0.0",
"tar-fs": "^2.0.0",
"tunnel-agent": "^0.6.0"
},
"bin": {
"prebuild-install": "bin.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/prebuild-install/node_modules/tar-fs": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz",
"integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==",
"dependencies": {
"chownr": "^1.1.1",
"mkdirp-classic": "^0.5.2",
"pump": "^3.0.0",
"tar-stream": "^2.1.4"
}
},
"node_modules/prebuild-install/node_modules/tar-stream": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz",
"integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==",
"dependencies": {
"bl": "^4.0.3",
"end-of-stream": "^1.4.1",
"fs-constants": "^1.0.0",
"inherits": "^2.0.3",
"readable-stream": "^3.1.1"
},
"engines": {
"node": ">=6"
}
},
"node_modules/prelude-ls": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz",
"integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==",
"dev": true,
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/prop-types": {
"version": "15.8.1",
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
"integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
"dev": true,
"dependencies": {
"loose-envify": "^1.4.0",
"object-assign": "^4.1.1",
"react-is": "^16.13.1"
}
},
"node_modules/protobufjs": {
"version": "6.11.4",
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.4.tgz",
"integrity": "sha512-5kQWPaJHi1WoCpjTGszzQ32PG2F4+wRY6BmAT4Vfw56Q2FZ4YZzK20xUYQH4YkfehY1e6QSICrJquM6xXZNcrw==",
"hasInstallScript": true,
"dependencies": {
"@protobufjs/aspromise": "^1.1.2",
"@protobufjs/base64": "^1.1.2",
"@protobufjs/codegen": "^2.0.4",
"@protobufjs/eventemitter": "^1.1.0",
"@protobufjs/fetch": "^1.1.0",
"@protobufjs/float": "^1.0.2",
"@protobufjs/inquire": "^1.1.0",
"@protobufjs/path": "^1.1.2",
"@protobufjs/pool": "^1.1.0",
"@protobufjs/utf8": "^1.1.0",
"@types/long": "^4.0.1",
"@types/node": ">=13.7.0",
"long": "^4.0.0"
},
"bin": {
"pbjs": "bin/pbjs",
"pbts": "bin/pbts"
}
},
"node_modules/pump": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
"integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
"dependencies": {
"end-of-stream": "^1.1.0",
"once": "^1.3.1"
}
},
"node_modules/punycode": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
"integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/queue-microtask": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
"integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/queue-tick": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/queue-tick/-/queue-tick-1.0.1.tgz",
"integrity": "sha512-kJt5qhMxoszgU/62PLP1CJytzd2NKetjSRnyuj31fDd3Rlcz3fzlFdFLD1SItunPwyqEOkca6GbV612BWfaBag=="
},
"node_modules/rc": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
"integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
"dependencies": {
"deep-extend": "^0.6.0",
"ini": "~1.3.0",
"minimist": "^1.2.0",
"strip-json-comments": "~2.0.1"
},
"bin": {
"rc": "cli.js"
}
},
"node_modules/rc/node_modules/strip-json-comments": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
"integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/react": {
"version": "18.2.0",
"resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz",
"integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==",
"dependencies": {
"loose-envify": "^1.1.0"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/react-dom": {
"version": "18.2.0",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz",
"integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==",
"dependencies": {
"loose-envify": "^1.1.0",
"scheduler": "^0.23.0"
},
"peerDependencies": {
"react": "^18.2.0"
}
},
"node_modules/react-is": {
"version": "16.13.1",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
"integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==",
"dev": true
},
"node_modules/react-refresh": {
"version": "0.14.0",
"resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.14.0.tgz",
"integrity": "sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/read-cache": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz",
"integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==",
"dev": true,
"dependencies": {
"pify": "^2.3.0"
}
},
"node_modules/readable-stream": {
"version": "3.6.2",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz",
"integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==",
"dependencies": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/readdirp": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
"integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
"dev": true,
"dependencies": {
"picomatch": "^2.2.1"
},
"engines": {
"node": ">=8.10.0"
}
},
"node_modules/reflect.getprototypeof": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.5.tgz",
"integrity": "sha512-62wgfC8dJWrmxv44CA36pLDnP6KKl3Vhxb7PL+8+qrrFMMoJij4vgiMP8zV4O8+CBMXY1mHxI5fITGHXFHVmQQ==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.5",
"define-properties": "^1.2.1",
"es-abstract": "^1.22.3",
"es-errors": "^1.0.0",
"get-intrinsic": "^1.2.3",
"globalthis": "^1.0.3",
"which-builtin-type": "^1.1.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/regexp.prototype.flags": {
"version": "1.5.2",
"resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz",
"integrity": "sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.6",
"define-properties": "^1.2.1",
"es-errors": "^1.3.0",
"set-function-name": "^2.0.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/resolve": {
"version": "2.0.0-next.5",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.5.tgz",
"integrity": "sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==",
"dev": true,
"dependencies": {
"is-core-module": "^2.13.0",
"path-parse": "^1.0.7",
"supports-preserve-symlinks-flag": "^1.0.0"
},
"bin": {
"resolve": "bin/resolve"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/resolve-from": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
"integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/reusify": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
"integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==",
"dev": true,
"engines": {
"iojs": ">=1.0.0",
"node": ">=0.10.0"
}
},
"node_modules/rimraf": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
"integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
"dev": true,
"dependencies": {
"glob": "^7.1.3"
},
"bin": {
"rimraf": "bin.js"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/rollup": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.10.0.tgz",
"integrity": "sha512-t2v9G2AKxcQ8yrG+WGxctBes1AomT0M4ND7jTFBCVPXQ/WFTvNSefIrNSmLKhIKBrvN8SG+CZslimJcT3W2u2g==",
"dev": true,
"dependencies": {
"@types/estree": "1.0.5"
},
"bin": {
"rollup": "dist/bin/rollup"
},
"engines": {
"node": ">=18.0.0",
"npm": ">=8.0.0"
},
"optionalDependencies": {
"@rollup/rollup-android-arm-eabi": "4.10.0",
"@rollup/rollup-android-arm64": "4.10.0",
"@rollup/rollup-darwin-arm64": "4.10.0",
"@rollup/rollup-darwin-x64": "4.10.0",
"@rollup/rollup-linux-arm-gnueabihf": "4.10.0",
"@rollup/rollup-linux-arm64-gnu": "4.10.0",
"@rollup/rollup-linux-arm64-musl": "4.10.0",
"@rollup/rollup-linux-riscv64-gnu": "4.10.0",
"@rollup/rollup-linux-x64-gnu": "4.10.0",
"@rollup/rollup-linux-x64-musl": "4.10.0",
"@rollup/rollup-win32-arm64-msvc": "4.10.0",
"@rollup/rollup-win32-ia32-msvc": "4.10.0",
"@rollup/rollup-win32-x64-msvc": "4.10.0",
"fsevents": "~2.3.2"
}
},
"node_modules/run-parallel": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
"integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"dependencies": {
"queue-microtask": "^1.2.2"
}
},
"node_modules/safe-array-concat": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.0.tgz",
"integrity": "sha512-ZdQ0Jeb9Ofti4hbt5lX3T2JcAamT9hfzYU1MNB+z/jaEbB6wfFfPIR/zEORmZqobkCCJhSjodobH6WHNmJ97dg==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.5",
"get-intrinsic": "^1.2.2",
"has-symbols": "^1.0.3",
"isarray": "^2.0.5"
},
"engines": {
"node": ">=0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/safe-regex-test": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.3.tgz",
"integrity": "sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.6",
"es-errors": "^1.3.0",
"is-regex": "^1.1.4"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/scheduler": {
"version": "0.23.0",
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz",
"integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==",
"dependencies": {
"loose-envify": "^1.1.0"
}
},
"node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"dev": true,
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/set-function-length": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.1.tgz",
"integrity": "sha512-j4t6ccc+VsKwYHso+kElc5neZpjtq9EnRICFZtWyBsLojhmeF/ZBd/elqm22WJh/BziDe/SBiOeAt0m2mfLD0g==",
"dev": true,
"dependencies": {
"define-data-property": "^1.1.2",
"es-errors": "^1.3.0",
"function-bind": "^1.1.2",
"get-intrinsic": "^1.2.3",
"gopd": "^1.0.1",
"has-property-descriptors": "^1.0.1"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/set-function-name": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.1.tgz",
"integrity": "sha512-tMNCiqYVkXIZgc2Hnoy2IvC/f8ezc5koaRFkCjrpWzGpCd3qbZXPzVy9MAZzK1ch/X0jvSkojys3oqJN0qCmdA==",
"dev": true,
"dependencies": {
"define-data-property": "^1.0.1",
"functions-have-names": "^1.2.3",
"has-property-descriptors": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/sharp": {
"version": "0.32.6",
"resolved": "https://registry.npmjs.org/sharp/-/sharp-0.32.6.tgz",
"integrity": "sha512-KyLTWwgcR9Oe4d9HwCwNM2l7+J0dUQwn/yf7S0EnTtb0eVS4RxO0eUSvxPtzT4F3SY+C4K6fqdv/DO27sJ/v/w==",
"hasInstallScript": true,
"dependencies": {
"color": "^4.2.3",
"detect-libc": "^2.0.2",
"node-addon-api": "^6.1.0",
"prebuild-install": "^7.1.1",
"semver": "^7.5.4",
"simple-get": "^4.0.1",
"tar-fs": "^3.0.4",
"tunnel-agent": "^0.6.0"
},
"engines": {
"node": ">=14.15.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/sharp/node_modules/lru-cache": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dependencies": {
"yallist": "^4.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/sharp/node_modules/semver": {
"version": "7.6.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz",
"integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==",
"dependencies": {
"lru-cache": "^6.0.0"
},
"bin": {
"semver": "bin/semver.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/sharp/node_modules/yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
},
"node_modules/shebang-command": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
"dev": true,
"dependencies": {
"shebang-regex": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/shebang-regex": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/side-channel": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.5.tgz",
"integrity": "sha512-QcgiIWV4WV7qWExbN5llt6frQB/lBven9pqliLXfGPB+K9ZYXxDozp0wLkHS24kWCm+6YXH/f0HhnObZnZOBnQ==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.6",
"es-errors": "^1.3.0",
"get-intrinsic": "^1.2.4",
"object-inspect": "^1.13.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/signal-exit": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
"integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
"dev": true,
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/simple-concat": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz",
"integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/simple-get": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz",
"integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"dependencies": {
"decompress-response": "^6.0.0",
"once": "^1.3.1",
"simple-concat": "^1.0.0"
}
},
"node_modules/simple-swizzle": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz",
"integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==",
"dependencies": {
"is-arrayish": "^0.3.1"
}
},
"node_modules/source-map-js": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz",
"integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/streamx": {
"version": "2.15.8",
"resolved": "https://registry.npmjs.org/streamx/-/streamx-2.15.8.tgz",
"integrity": "sha512-6pwMeMY/SuISiRsuS8TeIrAzyFbG5gGPHFQsYjUr/pbBadaL1PCWmzKw+CHZSwainfvcF6Si6cVLq4XTEwswFQ==",
"dependencies": {
"fast-fifo": "^1.1.0",
"queue-tick": "^1.0.1"
},
"optionalDependencies": {
"bare-events": "^2.2.0"
}
},
"node_modules/string_decoder": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
"integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
"dependencies": {
"safe-buffer": "~5.2.0"
}
},
"node_modules/string-width": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
"integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
"dev": true,
"dependencies": {
"eastasianwidth": "^0.2.0",
"emoji-regex": "^9.2.2",
"strip-ansi": "^7.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/string-width-cjs": {
"name": "string-width",
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dev": true,
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/string-width-cjs/node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true
},
"node_modules/string-width/node_modules/ansi-regex": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
"integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
"node_modules/string-width/node_modules/strip-ansi": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
"integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
"dev": true,
"dependencies": {
"ansi-regex": "^6.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
"node_modules/string.prototype.matchall": {
"version": "4.0.10",
"resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.10.tgz",
"integrity": "sha512-rGXbGmOEosIQi6Qva94HUjgPs9vKW+dkG7Y8Q5O2OYkWL6wFaTRZO8zM4mhP94uX55wgyrXzfS2aGtGzUL7EJQ==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.2.0",
"es-abstract": "^1.22.1",
"get-intrinsic": "^1.2.1",
"has-symbols": "^1.0.3",
"internal-slot": "^1.0.5",
"regexp.prototype.flags": "^1.5.0",
"set-function-name": "^2.0.0",
"side-channel": "^1.0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/string.prototype.trim": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.8.tgz",
"integrity": "sha512-lfjY4HcixfQXOfaqCvcBuOIapyaroTXhbkfJN3gcB1OtyupngWK4sEET9Knd0cXd28kTUqu/kHoV4HKSJdnjiQ==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.2.0",
"es-abstract": "^1.22.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/string.prototype.trimend": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.7.tgz",
"integrity": "sha512-Ni79DqeB72ZFq1uH/L6zJ+DKZTkOtPIHovb3YZHQViE+HDouuU4mBrLOLDn5Dde3RF8qw5qVETEjhu9locMLvA==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.2.0",
"es-abstract": "^1.22.1"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/string.prototype.trimstart": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.7.tgz",
"integrity": "sha512-NGhtDFu3jCEm7B4Fy0DpLewdJQOZcQ0rGbwQ/+stjnrp2i+rlKeCvos9hOIeCmqwratM47OBxY7uFZzjxHXmrg==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.2.0",
"es-abstract": "^1.22.1"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dev": true,
"dependencies": {
"ansi-regex": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-ansi-cjs": {
"name": "strip-ansi",
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dev": true,
"dependencies": {
"ansi-regex": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-json-comments": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
"integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
"dev": true,
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/sucrase": {
"version": "3.35.0",
"resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz",
"integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==",
"dev": true,
"dependencies": {
"@jridgewell/gen-mapping": "^0.3.2",
"commander": "^4.0.0",
"glob": "^10.3.10",
"lines-and-columns": "^1.1.6",
"mz": "^2.7.0",
"pirates": "^4.0.1",
"ts-interface-checker": "^0.1.9"
},
"bin": {
"sucrase": "bin/sucrase",
"sucrase-node": "bin/sucrase-node"
},
"engines": {
"node": ">=16 || 14 >=14.17"
}
},
"node_modules/sucrase/node_modules/brace-expansion": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/sucrase/node_modules/glob": {
"version": "10.3.10",
"resolved": "https://registry.npmjs.org/glob/-/glob-10.3.10.tgz",
"integrity": "sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==",
"dev": true,
"dependencies": {
"foreground-child": "^3.1.0",
"jackspeak": "^2.3.5",
"minimatch": "^9.0.1",
"minipass": "^5.0.0 || ^6.0.2 || ^7.0.0",
"path-scurry": "^1.10.1"
},
"bin": {
"glob": "dist/esm/bin.mjs"
},
"engines": {
"node": ">=16 || 14 >=14.17"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/sucrase/node_modules/minimatch": {
"version": "9.0.3",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz",
"integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==",
"dev": true,
"dependencies": {
"brace-expansion": "^2.0.1"
},
"engines": {
"node": ">=16 || 14 >=14.17"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/supports-color": {
"version": "5.5.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
"dev": true,
"dependencies": {
"has-flag": "^3.0.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/supports-preserve-symlinks-flag": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
"integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
"dev": true,
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/tailwindcss": {
"version": "3.4.1",
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.1.tgz",
"integrity": "sha512-qAYmXRfk3ENzuPBakNK0SRrUDipP8NQnEY6772uDhflcQz5EhRdD7JNZxyrFHVQNCwULPBn6FNPp9brpO7ctcA==",
"dev": true,
"dependencies": {
"@alloc/quick-lru": "^5.2.0",
"arg": "^5.0.2",
"chokidar": "^3.5.3",
"didyoumean": "^1.2.2",
"dlv": "^1.1.3",
"fast-glob": "^3.3.0",
"glob-parent": "^6.0.2",
"is-glob": "^4.0.3",
"jiti": "^1.19.1",
"lilconfig": "^2.1.0",
"micromatch": "^4.0.5",
"normalize-path": "^3.0.0",
"object-hash": "^3.0.0",
"picocolors": "^1.0.0",
"postcss": "^8.4.23",
"postcss-import": "^15.1.0",
"postcss-js": "^4.0.1",
"postcss-load-config": "^4.0.1",
"postcss-nested": "^6.0.1",
"postcss-selector-parser": "^6.0.11",
"resolve": "^1.22.2",
"sucrase": "^3.32.0"
},
"bin": {
"tailwind": "lib/cli.js",
"tailwindcss": "lib/cli.js"
},
"engines": {
"node": ">=14.0.0"
}
},
"node_modules/tailwindcss/node_modules/resolve": {
"version": "1.22.8",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
"integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==",
"dev": true,
"dependencies": {
"is-core-module": "^2.13.0",
"path-parse": "^1.0.7",
"supports-preserve-symlinks-flag": "^1.0.0"
},
"bin": {
"resolve": "bin/resolve"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/tar-fs": {
"version": "3.0.5",
"resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-3.0.5.tgz",
"integrity": "sha512-JOgGAmZyMgbqpLwct7ZV8VzkEB6pxXFBVErLtb+XCOqzc6w1xiWKI9GVd6bwk68EX7eJ4DWmfXVmq8K2ziZTGg==",
"dependencies": {
"pump": "^3.0.0",
"tar-stream": "^3.1.5"
},
"optionalDependencies": {
"bare-fs": "^2.1.1",
"bare-path": "^2.1.0"
}
},
"node_modules/tar-stream": {
"version": "3.1.7",
"resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.7.tgz",
"integrity": "sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==",
"dependencies": {
"b4a": "^1.6.4",
"fast-fifo": "^1.2.0",
"streamx": "^2.15.0"
}
},
"node_modules/text-table": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
"integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==",
"dev": true
},
"node_modules/thenify": {
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz",
"integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==",
"dev": true,
"dependencies": {
"any-promise": "^1.0.0"
}
},
"node_modules/thenify-all": {
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz",
"integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==",
"dev": true,
"dependencies": {
"thenify": ">= 3.1.0 < 4"
},
"engines": {
"node": ">=0.8"
}
},
"node_modules/to-fast-properties": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
"integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
"dev": true,
"dependencies": {
"is-number": "^7.0.0"
},
"engines": {
"node": ">=8.0"
}
},
"node_modules/ts-interface-checker": {
"version": "0.1.13",
"resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz",
"integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==",
"dev": true
},
"node_modules/tunnel-agent": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz",
"integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==",
"dependencies": {
"safe-buffer": "^5.0.1"
},
"engines": {
"node": "*"
}
},
"node_modules/type-check": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz",
"integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==",
"dev": true,
"dependencies": {
"prelude-ls": "^1.2.1"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/type-fest": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
"integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==",
"dev": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/typed-array-buffer": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.1.tgz",
"integrity": "sha512-RSqu1UEuSlrBhHTWC8O9FnPjOduNs4M7rJ4pRKoEjtx1zUNOPN2sSXHLDX+Y2WPbHIxbvg4JFo2DNAEfPIKWoQ==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.6",
"es-errors": "^1.3.0",
"is-typed-array": "^1.1.13"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/typed-array-byte-length": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.0.tgz",
"integrity": "sha512-Or/+kvLxNpeQ9DtSydonMxCx+9ZXOswtwJn17SNLvhptaXYDJvkFFP5zbfU/uLmvnBJlI4yrnXRxpdWH/M5tNA==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"for-each": "^0.3.3",
"has-proto": "^1.0.1",
"is-typed-array": "^1.1.10"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/typed-array-byte-offset": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.0.tgz",
"integrity": "sha512-RD97prjEt9EL8YgAgpOkf3O4IF9lhJFr9g0htQkm0rchFp/Vx7LW5Q8fSXXub7BXAODyUQohRMyOc3faCPd0hg==",
"dev": true,
"dependencies": {
"available-typed-arrays": "^1.0.5",
"call-bind": "^1.0.2",
"for-each": "^0.3.3",
"has-proto": "^1.0.1",
"is-typed-array": "^1.1.10"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/typed-array-length": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.4.tgz",
"integrity": "sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"for-each": "^0.3.3",
"is-typed-array": "^1.1.9"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/unbox-primitive": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz",
"integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.2",
"has-bigints": "^1.0.2",
"has-symbols": "^1.0.3",
"which-boxed-primitive": "^1.0.2"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="
},
"node_modules/update-browserslist-db": {
"version": "1.0.13",
"resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz",
"integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==",
"dev": true,
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/browserslist"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/browserslist"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
"escalade": "^3.1.1",
"picocolors": "^1.0.0"
},
"bin": {
"update-browserslist-db": "cli.js"
},
"peerDependencies": {
"browserslist": ">= 4.21.0"
}
},
"node_modules/uri-js": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
"integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
"dev": true,
"dependencies": {
"punycode": "^2.1.0"
}
},
"node_modules/util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
},
"node_modules/vite": {
"version": "5.1.7",
"resolved": "https://registry.npmjs.org/vite/-/vite-5.1.7.tgz",
"integrity": "sha512-sgnEEFTZYMui/sTlH1/XEnVNHMujOahPLGMxn1+5sIT45Xjng1Ec1K78jRP15dSmVgg5WBin9yO81j3o9OxofA==",
"dev": true,
"dependencies": {
"esbuild": "^0.19.3",
"postcss": "^8.4.35",
"rollup": "^4.2.0"
},
"bin": {
"vite": "bin/vite.js"
},
"engines": {
"node": "^18.0.0 || >=20.0.0"
},
"funding": {
"url": "https://github.com/vitejs/vite?sponsor=1"
},
"optionalDependencies": {
"fsevents": "~2.3.3"
},
"peerDependencies": {
"@types/node": "^18.0.0 || >=20.0.0",
"less": "*",
"lightningcss": "^1.21.0",
"sass": "*",
"stylus": "*",
"sugarss": "*",
"terser": "^5.4.0"
},
"peerDependenciesMeta": {
"@types/node": {
"optional": true
},
"less": {
"optional": true
},
"lightningcss": {
"optional": true
},
"sass": {
"optional": true
},
"stylus": {
"optional": true
},
"sugarss": {
"optional": true
},
"terser": {
"optional": true
}
}
},
"node_modules/which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"dev": true,
"dependencies": {
"isexe": "^2.0.0"
},
"bin": {
"node-which": "bin/node-which"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/which-boxed-primitive": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz",
"integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==",
"dev": true,
"dependencies": {
"is-bigint": "^1.0.1",
"is-boolean-object": "^1.1.0",
"is-number-object": "^1.0.4",
"is-string": "^1.0.5",
"is-symbol": "^1.0.3"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/which-builtin-type": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.1.3.tgz",
"integrity": "sha512-YmjsSMDBYsM1CaFiayOVT06+KJeXf0o5M/CAd4o1lTadFAtacTUM49zoYxr/oroopFDfhvN6iEcBxUyc3gvKmw==",
"dev": true,
"dependencies": {
"function.prototype.name": "^1.1.5",
"has-tostringtag": "^1.0.0",
"is-async-function": "^2.0.0",
"is-date-object": "^1.0.5",
"is-finalizationregistry": "^1.0.2",
"is-generator-function": "^1.0.10",
"is-regex": "^1.1.4",
"is-weakref": "^1.0.2",
"isarray": "^2.0.5",
"which-boxed-primitive": "^1.0.2",
"which-collection": "^1.0.1",
"which-typed-array": "^1.1.9"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/which-collection": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.1.tgz",
"integrity": "sha512-W8xeTUwaln8i3K/cY1nGXzdnVZlidBcagyNFtBdD5kxnb4TvGKR7FfSIS3mYpwWS1QUCutfKz8IY8RjftB0+1A==",
"dev": true,
"dependencies": {
"is-map": "^2.0.1",
"is-set": "^2.0.1",
"is-weakmap": "^2.0.1",
"is-weakset": "^2.0.1"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/which-typed-array": {
"version": "1.1.14",
"resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.14.tgz",
"integrity": "sha512-VnXFiIW8yNn9kIHN88xvZ4yOWchftKDsRJ8fEPacX/wl1lOvBrhsJ/OeJCXq7B0AaijRuqgzSKalJoPk+D8MPg==",
"dev": true,
"dependencies": {
"available-typed-arrays": "^1.0.6",
"call-bind": "^1.0.5",
"for-each": "^0.3.3",
"gopd": "^1.0.1",
"has-tostringtag": "^1.0.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/wrap-ansi": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
"integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
"dev": true,
"dependencies": {
"ansi-styles": "^6.1.0",
"string-width": "^5.0.1",
"strip-ansi": "^7.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/wrap-ansi-cjs": {
"name": "wrap-ansi",
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"dev": true,
"dependencies": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/wrap-ansi-cjs/node_modules/ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"dependencies": {
"color-convert": "^2.0.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/wrap-ansi-cjs/node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"dependencies": {
"color-name": "~1.1.4"
},
"engines": {
"node": ">=7.0.0"
}
},
"node_modules/wrap-ansi-cjs/node_modules/color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true
},
"node_modules/wrap-ansi-cjs/node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true
},
"node_modules/wrap-ansi-cjs/node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dev": true,
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/wrap-ansi/node_modules/ansi-regex": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
"integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
"node_modules/wrap-ansi/node_modules/ansi-styles": {
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
"integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/wrap-ansi/node_modules/strip-ansi": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
"integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
"dev": true,
"dependencies": {
"ansi-regex": "^6.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
},
"node_modules/yallist": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
"integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
"dev": true
},
"node_modules/yaml": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.4.tgz",
"integrity": "sha512-8aAvwVUSHpfEqTQ4w/KMlf3HcRdt50E5ODIQJBw1fQ5RL34xabzxtUlzTXVqc4rkZsPbvrXKWnABCD7kWSmocA==",
"dev": true,
"engines": {
"node": ">= 14"
}
},
"node_modules/yocto-queue": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
"integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
"dev": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
}
}
}
| transformers.js/examples/cross-encoder/package-lock.json/0 | {
"file_path": "transformers.js/examples/cross-encoder/package-lock.json",
"repo_id": "transformers.js",
"token_count": 126976
} | 354 |
import path from 'path';
// Needed for deploying to GitHub pages
const BASE_PATH = process.env.BASE_PATH ?? '';
export default {
// config options
base: BASE_PATH,
root: path.join(__dirname, 'src'),
build: {
outDir: path.join(__dirname, 'dist')
},
publicDir: path.join(__dirname, 'public'),
} | transformers.js/examples/demo-site/vite.config.js/0 | {
"file_path": "transformers.js/examples/demo-site/vite.config.js",
"repo_id": "transformers.js",
"token_count": 126
} | 355 |
const { app, BrowserWindow, ipcMain } = require('electron');
const path = require('path');
const { session } = require('electron');
const { run } = require('./model.js');
// Handle creating/removing shortcuts on Windows when installing/uninstalling.
if (require('electron-squirrel-startup')) {
app.quit();
}
const createWindow = () => {
// Create the browser window.
const mainWindow = new BrowserWindow({
width: 800,
height: 600,
webPreferences: {
preload: path.join(__dirname, 'preload.js'),
},
});
// and load the index.html of the app.
mainWindow.loadFile(path.join(__dirname, 'index.html'));
// Open the DevTools.
mainWindow.webContents.openDevTools();
};
// This method will be called when Electron has finished
// initialization and is ready to create browser windows.
// Some APIs can only be used after this event occurs.
app.on('ready', () => {
// Add a handler for the `transformers:run` event. This enables 2-way communication
// between the renderer process (UI) and the main process (processing).
// https://www.electronjs.org/docs/latest/tutorial/ipc#pattern-2-renderer-to-main-two-way
ipcMain.handle('transformers:run', run)
createWindow();
// Define a custom Content Security Policy to only allow loading resources from the app's origin.
session.defaultSession.webRequest.onHeadersReceived((details, callback) => {
callback({
responseHeaders: {
...details.responseHeaders,
'Content-Security-Policy': ["default-src 'self'"]
}
})
})
});
// Quit when all windows are closed, except on macOS. There, it's common
// for applications and their menu bar to stay active until the user quits
// explicitly with Cmd + Q.
app.on('window-all-closed', () => {
if (process.platform !== 'darwin') {
app.quit();
}
});
app.on('activate', () => {
// On OS X it's common to re-create a window in the app when the
// dock icon is clicked and there are no other windows open.
if (BrowserWindow.getAllWindows().length === 0) {
createWindow();
}
});
// In this file you can include the rest of your app's specific main process
// code. You can also put them in separate files and import them here.
| transformers.js/examples/electron/src/index.js/0 | {
"file_path": "transformers.js/examples/electron/src/index.js",
"repo_id": "transformers.js",
"token_count": 796
} | 356 |
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>MusicGen Web | In-browser text-to-music w/ 🤗 Transformers.js!</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.jsx"></script>
</body>
</html>
| transformers.js/examples/musicgen-web/index.html/0 | {
"file_path": "transformers.js/examples/musicgen-web/index.html",
"repo_id": "transformers.js",
"token_count": 145
} | 357 |
{
"name": "next",
"version": "0.1.0",
"private": true,
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint"
},
"dependencies": {
"@huggingface/transformers": "^3.0.0-alpha.5",
"autoprefixer": "10.4.14",
"eslint": "8.45.0",
"eslint-config-next": "13.4.12",
"next": "^14.2.3",
"postcss": "8.4.31",
"react": "18.2.0",
"react-dom": "18.2.0",
"tailwindcss": "3.3.3"
},
"overrides": {
"protobufjs": "^7.2.4"
}
}
| transformers.js/examples/next-client/package.json/0 | {
"file_path": "transformers.js/examples/next-client/package.json",
"repo_id": "transformers.js",
"token_count": 280
} | 358 |
{
"name": "next",
"version": "0.1.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "next",
"version": "0.1.0",
"dependencies": {
"@xenova/transformers": "^2.4.2",
"autoprefixer": "10.4.14",
"eslint": "8.45.0",
"eslint-config-next": "13.4.12",
"next": "^14.2.3",
"postcss": "^8.4.31",
"react": "18.2.0",
"react-dom": "18.2.0",
"tailwindcss": "3.3.3"
}
},
"node_modules/@aashutoshrathi/word-wrap": {
"version": "1.2.6",
"resolved": "https://registry.npmjs.org/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz",
"integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/@alloc/quick-lru": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz",
"integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/@babel/runtime": {
"version": "7.22.6",
"resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.6.tgz",
"integrity": "sha512-wDb5pWm4WDdF6LFUde3Jl8WzPA+3ZbxYqkC6xAXuD3irdEHN1k0NfTRrJD8ZD378SJ61miMLCqIOXYhd8x+AJQ==",
"dependencies": {
"regenerator-runtime": "^0.13.11"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@eslint-community/eslint-utils": {
"version": "4.4.0",
"resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz",
"integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==",
"dependencies": {
"eslint-visitor-keys": "^3.3.0"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"peerDependencies": {
"eslint": "^6.0.0 || ^7.0.0 || >=8.0.0"
}
},
"node_modules/@eslint-community/regexpp": {
"version": "4.6.0",
"resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.6.0.tgz",
"integrity": "sha512-uiPeRISaglZnaZk8vwrjQZ1CxogZeY/4IYft6gBOTqu1WhVXWmCmZMWxUv2Q/pxSvPdp1JPaO62kLOcOkMqWrw==",
"engines": {
"node": "^12.0.0 || ^14.0.0 || >=16.0.0"
}
},
"node_modules/@eslint/eslintrc": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.0.tgz",
"integrity": "sha512-Lj7DECXqIVCqnqjjHMPna4vn6GJcMgul/wuS0je9OZ9gsL0zzDpKPVtcG1HaDVc+9y+qgXneTeUMbCqXJNpH1A==",
"dependencies": {
"ajv": "^6.12.4",
"debug": "^4.3.2",
"espree": "^9.6.0",
"globals": "^13.19.0",
"ignore": "^5.2.0",
"import-fresh": "^3.2.1",
"js-yaml": "^4.1.0",
"minimatch": "^3.1.2",
"strip-json-comments": "^3.1.1"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/eslint"
}
},
"node_modules/@eslint/js": {
"version": "8.44.0",
"resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.44.0.tgz",
"integrity": "sha512-Ag+9YM4ocKQx9AarydN0KY2j0ErMHNIocPDrVo8zAE44xLTjEtz81OdR68/cydGtk6m6jDb5Za3r2useMzYmSw==",
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
}
},
"node_modules/@humanwhocodes/config-array": {
"version": "0.11.10",
"resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.10.tgz",
"integrity": "sha512-KVVjQmNUepDVGXNuoRRdmmEjruj0KfiGSbS8LVc12LMsWDQzRXJ0qdhN8L8uUigKpfEHRhlaQFY0ib1tnUbNeQ==",
"dependencies": {
"@humanwhocodes/object-schema": "^1.2.1",
"debug": "^4.1.1",
"minimatch": "^3.0.5"
},
"engines": {
"node": ">=10.10.0"
}
},
"node_modules/@humanwhocodes/module-importer": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz",
"integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==",
"engines": {
"node": ">=12.22"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/nzakas"
}
},
"node_modules/@humanwhocodes/object-schema": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz",
"integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA=="
},
"node_modules/@jridgewell/gen-mapping": {
"version": "0.3.3",
"resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz",
"integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==",
"dependencies": {
"@jridgewell/set-array": "^1.0.1",
"@jridgewell/sourcemap-codec": "^1.4.10",
"@jridgewell/trace-mapping": "^0.3.9"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/resolve-uri": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz",
"integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==",
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/set-array": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz",
"integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==",
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/sourcemap-codec": {
"version": "1.4.15",
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz",
"integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg=="
},
"node_modules/@jridgewell/trace-mapping": {
"version": "0.3.18",
"resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz",
"integrity": "sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==",
"dependencies": {
"@jridgewell/resolve-uri": "3.1.0",
"@jridgewell/sourcemap-codec": "1.4.14"
}
},
"node_modules/@jridgewell/trace-mapping/node_modules/@jridgewell/sourcemap-codec": {
"version": "1.4.14",
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz",
"integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw=="
},
"node_modules/@next/env": {
"version": "14.2.3",
"resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.3.tgz",
"integrity": "sha512-W7fd7IbkfmeeY2gXrzJYDx8D2lWKbVoTIj1o1ScPHNzvp30s1AuoEFSdr39bC5sjxJaxTtq3OTCZboNp0lNWHA=="
},
"node_modules/@next/eslint-plugin-next": {
"version": "13.4.12",
"resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-13.4.12.tgz",
"integrity": "sha512-6rhK9CdxEgj/j1qvXIyLTWEaeFv7zOK8yJMulz3Owel0uek0U9MJCGzmKgYxM3aAUBo3gKeywCZKyQnJKto60A==",
"dependencies": {
"glob": "7.1.7"
}
},
"node_modules/@next/swc-darwin-arm64": {
"version": "14.2.3",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.3.tgz",
"integrity": "sha512-3pEYo/RaGqPP0YzwnlmPN2puaF2WMLM3apt5jLW2fFdXD9+pqcoTzRk+iZsf8ta7+quAe4Q6Ms0nR0SFGFdS1A==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": ">= 10"
}
},
"node_modules/@next/swc-darwin-x64": {
"version": "14.2.3",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.3.tgz",
"integrity": "sha512-6adp7waE6P1TYFSXpY366xwsOnEXM+y1kgRpjSRVI2CBDOcbRjsJ67Z6EgKIqWIue52d2q/Mx8g9MszARj8IEA==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": ">= 10"
}
},
"node_modules/@next/swc-linux-arm64-gnu": {
"version": "14.2.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.3.tgz",
"integrity": "sha512-cuzCE/1G0ZSnTAHJPUT1rPgQx1w5tzSX7POXSLaS7w2nIUJUD+e25QoXD/hMfxbsT9rslEXugWypJMILBj/QsA==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">= 10"
}
},
"node_modules/@next/swc-linux-arm64-musl": {
"version": "14.2.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.3.tgz",
"integrity": "sha512-0D4/oMM2Y9Ta3nGuCcQN8jjJjmDPYpHX9OJzqk42NZGJocU2MqhBq5tWkJrUQOQY9N+In9xOdymzapM09GeiZw==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">= 10"
}
},
"node_modules/@next/swc-linux-x64-gnu": {
"version": "14.2.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.3.tgz",
"integrity": "sha512-ENPiNnBNDInBLyUU5ii8PMQh+4XLr4pG51tOp6aJ9xqFQ2iRI6IH0Ds2yJkAzNV1CfyagcyzPfROMViS2wOZ9w==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">= 10"
}
},
"node_modules/@next/swc-linux-x64-musl": {
"version": "14.2.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.3.tgz",
"integrity": "sha512-BTAbq0LnCbF5MtoM7I/9UeUu/8ZBY0i8SFjUMCbPDOLv+un67e2JgyN4pmgfXBwy/I+RHu8q+k+MCkDN6P9ViQ==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">= 10"
}
},
"node_modules/@next/swc-win32-arm64-msvc": {
"version": "14.2.3",
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.3.tgz",
"integrity": "sha512-AEHIw/dhAMLNFJFJIJIyOFDzrzI5bAjI9J26gbO5xhAKHYTZ9Or04BesFPXiAYXDNdrwTP2dQceYA4dL1geu8A==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">= 10"
}
},
"node_modules/@next/swc-win32-ia32-msvc": {
"version": "14.2.3",
"resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.3.tgz",
"integrity": "sha512-vga40n1q6aYb0CLrM+eEmisfKCR45ixQYXuBXxOOmmoV8sYST9k7E3US32FsY+CkkF7NtzdcebiFT4CHuMSyZw==",
"cpu": [
"ia32"
],
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">= 10"
}
},
"node_modules/@next/swc-win32-x64-msvc": {
"version": "14.2.3",
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.3.tgz",
"integrity": "sha512-Q1/zm43RWynxrO7lW4ehciQVj+5ePBhOK+/K2P7pLFX3JaJ/IZVC69SHidrmZSOkqz7ECIOhhy7XhAFG4JYyHA==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">= 10"
}
},
"node_modules/@nodelib/fs.scandir": {
"version": "2.1.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
"integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
"dependencies": {
"@nodelib/fs.stat": "2.0.5",
"run-parallel": "^1.1.9"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/@nodelib/fs.stat": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
"integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
"engines": {
"node": ">= 8"
}
},
"node_modules/@nodelib/fs.walk": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
"integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
"dependencies": {
"@nodelib/fs.scandir": "2.1.5",
"fastq": "^1.6.0"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/@pkgr/utils": {
"version": "2.4.2",
"resolved": "https://registry.npmjs.org/@pkgr/utils/-/utils-2.4.2.tgz",
"integrity": "sha512-POgTXhjrTfbTV63DiFXav4lBHiICLKKwDeaKn9Nphwj7WH6m0hMMCaJkMyRWjgtPFyRKRVoMXXjczsTQRDEhYw==",
"dependencies": {
"cross-spawn": "^7.0.3",
"fast-glob": "^3.3.0",
"is-glob": "^4.0.3",
"open": "^9.1.0",
"picocolors": "^1.0.0",
"tslib": "^2.6.0"
},
"engines": {
"node": "^12.20.0 || ^14.18.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/unts"
}
},
"node_modules/@protobufjs/aspromise": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
"integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ=="
},
"node_modules/@protobufjs/base64": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
"integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg=="
},
"node_modules/@protobufjs/codegen": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
"integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg=="
},
"node_modules/@protobufjs/eventemitter": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
"integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q=="
},
"node_modules/@protobufjs/fetch": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
"integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==",
"dependencies": {
"@protobufjs/aspromise": "^1.1.1",
"@protobufjs/inquire": "^1.1.0"
}
},
"node_modules/@protobufjs/float": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
"integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ=="
},
"node_modules/@protobufjs/inquire": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
"integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q=="
},
"node_modules/@protobufjs/path": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
"integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA=="
},
"node_modules/@protobufjs/pool": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
"integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw=="
},
"node_modules/@protobufjs/utf8": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
"integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw=="
},
"node_modules/@rushstack/eslint-patch": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.3.2.tgz",
"integrity": "sha512-V+MvGwaHH03hYhY+k6Ef/xKd6RYlc4q8WBx+2ANmipHJcKuktNcI/NgEsJgdSUF6Lw32njT6OnrRsKYCdgHjYw=="
},
"node_modules/@swc/counter": {
"version": "0.1.3",
"resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz",
"integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ=="
},
"node_modules/@swc/helpers": {
"version": "0.5.5",
"resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz",
"integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==",
"dependencies": {
"@swc/counter": "^0.1.3",
"tslib": "^2.4.0"
}
},
"node_modules/@types/json5": {
"version": "0.0.29",
"resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz",
"integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ=="
},
"node_modules/@types/node": {
"version": "20.4.4",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.4.4.tgz",
"integrity": "sha512-CukZhumInROvLq3+b5gLev+vgpsIqC2D0deQr/yS1WnxvmYLlJXZpaQrQiseMY+6xusl79E04UjWoqyr+t1/Ew=="
},
"node_modules/@typescript-eslint/parser": {
"version": "5.62.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.62.0.tgz",
"integrity": "sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==",
"dependencies": {
"@typescript-eslint/scope-manager": "5.62.0",
"@typescript-eslint/types": "5.62.0",
"@typescript-eslint/typescript-estree": "5.62.0",
"debug": "^4.3.4"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/typescript-eslint"
},
"peerDependencies": {
"eslint": "^6.0.0 || ^7.0.0 || ^8.0.0"
},
"peerDependenciesMeta": {
"typescript": {
"optional": true
}
}
},
"node_modules/@typescript-eslint/scope-manager": {
"version": "5.62.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz",
"integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==",
"dependencies": {
"@typescript-eslint/types": "5.62.0",
"@typescript-eslint/visitor-keys": "5.62.0"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/typescript-eslint"
}
},
"node_modules/@typescript-eslint/types": {
"version": "5.62.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.62.0.tgz",
"integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==",
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/typescript-eslint"
}
},
"node_modules/@typescript-eslint/typescript-estree": {
"version": "5.62.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz",
"integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==",
"dependencies": {
"@typescript-eslint/types": "5.62.0",
"@typescript-eslint/visitor-keys": "5.62.0",
"debug": "^4.3.4",
"globby": "^11.1.0",
"is-glob": "^4.0.3",
"semver": "^7.3.7",
"tsutils": "^3.21.0"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/typescript-eslint"
},
"peerDependenciesMeta": {
"typescript": {
"optional": true
}
}
},
"node_modules/@typescript-eslint/visitor-keys": {
"version": "5.62.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz",
"integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==",
"dependencies": {
"@typescript-eslint/types": "5.62.0",
"eslint-visitor-keys": "^3.3.0"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/typescript-eslint"
}
},
"node_modules/@xenova/transformers": {
"version": "2.4.2",
"resolved": "https://registry.npmjs.org/@xenova/transformers/-/transformers-2.4.2.tgz",
"integrity": "sha512-m1QlvNsic/kQJ1F1N02TpYkIBPwB68hZGljO32EM4mHEw4nKlPoQ/9gZ+oUKkavKC/LqgCnmiNQ8jWfa4Zl5AQ==",
"dependencies": {
"onnxruntime-web": "1.14.0",
"sharp": "^0.32.0"
},
"optionalDependencies": {
"onnxruntime-node": "1.14.0"
}
},
"node_modules/acorn": {
"version": "8.10.0",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz",
"integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==",
"bin": {
"acorn": "bin/acorn"
},
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/acorn-jsx": {
"version": "5.3.2",
"resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz",
"integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==",
"peerDependencies": {
"acorn": "^6.0.0 || ^7.0.0 || ^8.0.0"
}
},
"node_modules/ajv": {
"version": "6.12.6",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
"integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
"dependencies": {
"fast-deep-equal": "^3.1.1",
"fast-json-stable-stringify": "^2.0.0",
"json-schema-traverse": "^0.4.1",
"uri-js": "^4.2.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"engines": {
"node": ">=8"
}
},
"node_modules/ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dependencies": {
"color-convert": "^2.0.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/any-promise": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz",
"integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A=="
},
"node_modules/anymatch": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
"integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
"dependencies": {
"normalize-path": "^3.0.0",
"picomatch": "^2.0.4"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/arg": {
"version": "5.0.2",
"resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz",
"integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg=="
},
"node_modules/argparse": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="
},
"node_modules/aria-query": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz",
"integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==",
"dependencies": {
"dequal": "^2.0.3"
}
},
"node_modules/array-buffer-byte-length": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.0.tgz",
"integrity": "sha512-LPuwb2P+NrQw3XhxGc36+XSvuBPopovXYTR9Ew++Du9Yb/bx5AzBfrIsBoj0EZUifjQU+sHL21sseZ3jerWO/A==",
"dependencies": {
"call-bind": "^1.0.2",
"is-array-buffer": "^3.0.1"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/array-includes": {
"version": "3.1.6",
"resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.6.tgz",
"integrity": "sha512-sgTbLvL6cNnw24FnbaDyjmvddQ2ML8arZsgaJhoABMoplz/4QRhtrYS+alr1BUM1Bwp6dhx8vVCBSLG+StwOFw==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.4",
"es-abstract": "^1.20.4",
"get-intrinsic": "^1.1.3",
"is-string": "^1.0.7"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/array-union": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
"integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==",
"engines": {
"node": ">=8"
}
},
"node_modules/array.prototype.flat": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.1.tgz",
"integrity": "sha512-roTU0KWIOmJ4DRLmwKd19Otg0/mT3qPNt0Qb3GWW8iObuZXxrjB/pzn0R3hqpRSWg4HCwqx+0vwOnWnvlOyeIA==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.4",
"es-abstract": "^1.20.4",
"es-shim-unscopables": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/array.prototype.flatmap": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.1.tgz",
"integrity": "sha512-8UGn9O1FDVvMNB0UlLv4voxRMze7+FpHyF5mSMRjWHUMlpoDViniy05870VlxhfgTnLbpuwTzvD76MTtWxB/mQ==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.4",
"es-abstract": "^1.20.4",
"es-shim-unscopables": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/array.prototype.tosorted": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.1.tgz",
"integrity": "sha512-pZYPXPRl2PqWcsUs6LOMn+1f1532nEoPTYowBtqLwAW+W8vSVhkIGnmOX1t/UQjD6YGI0vcD2B1U7ZFGQH9jnQ==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.4",
"es-abstract": "^1.20.4",
"es-shim-unscopables": "^1.0.0",
"get-intrinsic": "^1.1.3"
}
},
"node_modules/arraybuffer.prototype.slice": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.1.tgz",
"integrity": "sha512-09x0ZWFEjj4WD8PDbykUwo3t9arLn8NIzmmYEJFpYekOAQjpkGSyrQhNoRTcwwcFRu+ycWF78QZ63oWTqSjBcw==",
"dependencies": {
"array-buffer-byte-length": "^1.0.0",
"call-bind": "^1.0.2",
"define-properties": "^1.2.0",
"get-intrinsic": "^1.2.1",
"is-array-buffer": "^3.0.2",
"is-shared-array-buffer": "^1.0.2"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/ast-types-flow": {
"version": "0.0.7",
"resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.7.tgz",
"integrity": "sha512-eBvWn1lvIApYMhzQMsu9ciLfkBY499mFZlNqG+/9WR7PVlroQw0vG30cOQQbaKz3sCEc44TAOu2ykzqXSNnwag=="
},
"node_modules/autoprefixer": {
"version": "10.4.14",
"resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.14.tgz",
"integrity": "sha512-FQzyfOsTlwVzjHxKEqRIAdJx9niO6VCBCoEwax/VLSoQF29ggECcPuBqUMZ+u8jCZOPSy8b8/8KnuFbp0SaFZQ==",
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/postcss/"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/autoprefixer"
}
],
"dependencies": {
"browserslist": "^4.21.5",
"caniuse-lite": "^1.0.30001464",
"fraction.js": "^4.2.0",
"normalize-range": "^0.1.2",
"picocolors": "^1.0.0",
"postcss-value-parser": "^4.2.0"
},
"bin": {
"autoprefixer": "bin/autoprefixer"
},
"engines": {
"node": "^10 || ^12 || >=14"
},
"peerDependencies": {
"postcss": "^8.1.0"
}
},
"node_modules/available-typed-arrays": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz",
"integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/axe-core": {
"version": "4.7.2",
"resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.7.2.tgz",
"integrity": "sha512-zIURGIS1E1Q4pcrMjp+nnEh+16G56eG/MUllJH8yEvw7asDo7Ac9uhC9KIH5jzpITueEZolfYglnCGIuSBz39g==",
"engines": {
"node": ">=4"
}
},
"node_modules/axobject-query": {
"version": "3.2.1",
"resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-3.2.1.tgz",
"integrity": "sha512-jsyHu61e6N4Vbz/v18DHwWYKK0bSWLqn47eeDSKPB7m8tqMHF9YJ+mhIk2lVteyZrY8tnSj/jHOv4YiTCuCJgg==",
"dependencies": {
"dequal": "^2.0.3"
}
},
"node_modules/b4a": {
"version": "1.6.4",
"resolved": "https://registry.npmjs.org/b4a/-/b4a-1.6.4.tgz",
"integrity": "sha512-fpWrvyVHEKyeEvbKZTVOeZF3VSKKWtJxFIxX/jaVPf+cLbGUSitjb49pHLqPV2BUNNZ0LcoeEGfE/YCpyDYHIw=="
},
"node_modules/balanced-match": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
},
"node_modules/base64-js": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/big-integer": {
"version": "1.6.51",
"resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.51.tgz",
"integrity": "sha512-GPEid2Y9QU1Exl1rpO9B2IPJGHPSupF5GnVIP0blYvNOMer2bTvSWs1jGOUg04hTmu67nmLsQ9TBo1puaotBHg==",
"engines": {
"node": ">=0.6"
}
},
"node_modules/binary-extensions": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
"integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==",
"engines": {
"node": ">=8"
}
},
"node_modules/bl": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz",
"integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==",
"dependencies": {
"buffer": "^5.5.0",
"inherits": "^2.0.4",
"readable-stream": "^3.4.0"
}
},
"node_modules/bplist-parser": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/bplist-parser/-/bplist-parser-0.2.0.tgz",
"integrity": "sha512-z0M+byMThzQmD9NILRniCUXYsYpjwnlO8N5uCFaCqIOpqRsJCrQL9NK3JsD67CN5a08nF5oIL2bD6loTdHOuKw==",
"dependencies": {
"big-integer": "^1.6.44"
},
"engines": {
"node": ">= 5.10.0"
}
},
"node_modules/brace-expansion": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
"dependencies": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"node_modules/braces": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
"dependencies": {
"fill-range": "^7.1.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/browserslist": {
"version": "4.21.9",
"resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.9.tgz",
"integrity": "sha512-M0MFoZzbUrRU4KNfCrDLnvyE7gub+peetoTid3TBIqtunaDJyXlwhakT+/VkvSXcfIzFfK/nkCs4nmyTmxdNSg==",
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/browserslist"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/browserslist"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
"caniuse-lite": "^1.0.30001503",
"electron-to-chromium": "^1.4.431",
"node-releases": "^2.0.12",
"update-browserslist-db": "^1.0.11"
},
"bin": {
"browserslist": "cli.js"
},
"engines": {
"node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
}
},
"node_modules/buffer": {
"version": "5.7.1",
"resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz",
"integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"dependencies": {
"base64-js": "^1.3.1",
"ieee754": "^1.1.13"
}
},
"node_modules/bundle-name": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-3.0.0.tgz",
"integrity": "sha512-PKA4BeSvBpQKQ8iPOGCSiell+N8P+Tf1DlwqmYhpe2gAhKPHn8EYOxVT+ShuGmhg8lN8XiSlS80yiExKXrURlw==",
"dependencies": {
"run-applescript": "^5.0.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/busboy": {
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz",
"integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==",
"dependencies": {
"streamsearch": "^1.1.0"
},
"engines": {
"node": ">=10.16.0"
}
},
"node_modules/call-bind": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz",
"integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==",
"dependencies": {
"function-bind": "^1.1.1",
"get-intrinsic": "^1.0.2"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
"integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
"engines": {
"node": ">=6"
}
},
"node_modules/camelcase-css": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
"integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==",
"engines": {
"node": ">= 6"
}
},
"node_modules/caniuse-lite": {
"version": "1.0.30001621",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001621.tgz",
"integrity": "sha512-+NLXZiviFFKX0fk8Piwv3PfLPGtRqJeq2TiNoUff/qB5KJgwecJTvCXDpmlyP/eCI/GUEmp/h/y5j0yckiiZrA==",
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/browserslist"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/caniuse-lite"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
]
},
"node_modules/chalk": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
"integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
"dependencies": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/chokidar": {
"version": "3.5.3",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz",
"integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==",
"funding": [
{
"type": "individual",
"url": "https://paulmillr.com/funding/"
}
],
"dependencies": {
"anymatch": "~3.1.2",
"braces": "~3.0.2",
"glob-parent": "~5.1.2",
"is-binary-path": "~2.1.0",
"is-glob": "~4.0.1",
"normalize-path": "~3.0.0",
"readdirp": "~3.6.0"
},
"engines": {
"node": ">= 8.10.0"
},
"optionalDependencies": {
"fsevents": "~2.3.2"
}
},
"node_modules/chokidar/node_modules/glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dependencies": {
"is-glob": "^4.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/chownr": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
"integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="
},
"node_modules/client-only": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz",
"integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA=="
},
"node_modules/color": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz",
"integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==",
"dependencies": {
"color-convert": "^2.0.1",
"color-string": "^1.9.0"
},
"engines": {
"node": ">=12.5.0"
}
},
"node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dependencies": {
"color-name": "~1.1.4"
},
"engines": {
"node": ">=7.0.0"
}
},
"node_modules/color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
},
"node_modules/color-string": {
"version": "1.9.1",
"resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz",
"integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==",
"dependencies": {
"color-name": "^1.0.0",
"simple-swizzle": "^0.2.2"
}
},
"node_modules/commander": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz",
"integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==",
"engines": {
"node": ">= 6"
}
},
"node_modules/concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
"integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="
},
"node_modules/cross-spawn": {
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
"integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
"dependencies": {
"path-key": "^3.1.0",
"shebang-command": "^2.0.0",
"which": "^2.0.1"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/cssesc": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
"integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==",
"bin": {
"cssesc": "bin/cssesc"
},
"engines": {
"node": ">=4"
}
},
"node_modules/damerau-levenshtein": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz",
"integrity": "sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA=="
},
"node_modules/debug": {
"version": "4.3.4",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
"integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
"dependencies": {
"ms": "2.1.2"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/decompress-response": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz",
"integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==",
"dependencies": {
"mimic-response": "^3.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/deep-extend": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
"integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
"engines": {
"node": ">=4.0.0"
}
},
"node_modules/deep-is": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz",
"integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ=="
},
"node_modules/default-browser": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/default-browser/-/default-browser-4.0.0.tgz",
"integrity": "sha512-wX5pXO1+BrhMkSbROFsyxUm0i/cJEScyNhA4PPxc41ICuv05ZZB/MX28s8aZx6xjmatvebIapF6hLEKEcpneUA==",
"dependencies": {
"bundle-name": "^3.0.0",
"default-browser-id": "^3.0.0",
"execa": "^7.1.1",
"titleize": "^3.0.0"
},
"engines": {
"node": ">=14.16"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/default-browser-id": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-3.0.0.tgz",
"integrity": "sha512-OZ1y3y0SqSICtE8DE4S8YOE9UZOJ8wO16fKWVP5J1Qz42kV9jcnMVFrEE/noXb/ss3Q4pZIH79kxofzyNNtUNA==",
"dependencies": {
"bplist-parser": "^0.2.0",
"untildify": "^4.0.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/define-lazy-prop": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz",
"integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/define-properties": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.0.tgz",
"integrity": "sha512-xvqAVKGfT1+UAvPwKTVw/njhdQ8ZhXK4lI0bCIuCMrp2up9nPnaDftrLtmpTazqd1o+UY4zgzU+avtMbDP+ldA==",
"dependencies": {
"has-property-descriptors": "^1.0.0",
"object-keys": "^1.1.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/dequal": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
"integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==",
"engines": {
"node": ">=6"
}
},
"node_modules/detect-libc": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz",
"integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==",
"engines": {
"node": ">=8"
}
},
"node_modules/didyoumean": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz",
"integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw=="
},
"node_modules/dir-glob": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
"integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
"dependencies": {
"path-type": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/dlv": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz",
"integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA=="
},
"node_modules/doctrine": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz",
"integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==",
"dependencies": {
"esutils": "^2.0.2"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/electron-to-chromium": {
"version": "1.4.468",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.468.tgz",
"integrity": "sha512-6M1qyhaJOt7rQtNti1lBA0GwclPH+oKCmsra/hkcWs5INLxfXXD/dtdnaKUYQu/pjOBP/8Osoe4mAcNvvzoFag=="
},
"node_modules/emoji-regex": {
"version": "9.2.2",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
"integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
},
"node_modules/end-of-stream": {
"version": "1.4.4",
"resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
"integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
"dependencies": {
"once": "^1.4.0"
}
},
"node_modules/enhanced-resolve": {
"version": "5.15.0",
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.15.0.tgz",
"integrity": "sha512-LXYT42KJ7lpIKECr2mAXIaMldcNCh/7E0KBKOu4KSfkHmP+mZmSs+8V5gBAqisWBy0OO4W5Oyys0GO1Y8KtdKg==",
"dependencies": {
"graceful-fs": "^4.2.4",
"tapable": "^2.2.0"
},
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/es-abstract": {
"version": "1.22.1",
"resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.22.1.tgz",
"integrity": "sha512-ioRRcXMO6OFyRpyzV3kE1IIBd4WG5/kltnzdxSCqoP8CMGs/Li+M1uF5o7lOkZVFjDs+NLesthnF66Pg/0q0Lw==",
"dependencies": {
"array-buffer-byte-length": "^1.0.0",
"arraybuffer.prototype.slice": "^1.0.1",
"available-typed-arrays": "^1.0.5",
"call-bind": "^1.0.2",
"es-set-tostringtag": "^2.0.1",
"es-to-primitive": "^1.2.1",
"function.prototype.name": "^1.1.5",
"get-intrinsic": "^1.2.1",
"get-symbol-description": "^1.0.0",
"globalthis": "^1.0.3",
"gopd": "^1.0.1",
"has": "^1.0.3",
"has-property-descriptors": "^1.0.0",
"has-proto": "^1.0.1",
"has-symbols": "^1.0.3",
"internal-slot": "^1.0.5",
"is-array-buffer": "^3.0.2",
"is-callable": "^1.2.7",
"is-negative-zero": "^2.0.2",
"is-regex": "^1.1.4",
"is-shared-array-buffer": "^1.0.2",
"is-string": "^1.0.7",
"is-typed-array": "^1.1.10",
"is-weakref": "^1.0.2",
"object-inspect": "^1.12.3",
"object-keys": "^1.1.1",
"object.assign": "^4.1.4",
"regexp.prototype.flags": "^1.5.0",
"safe-array-concat": "^1.0.0",
"safe-regex-test": "^1.0.0",
"string.prototype.trim": "^1.2.7",
"string.prototype.trimend": "^1.0.6",
"string.prototype.trimstart": "^1.0.6",
"typed-array-buffer": "^1.0.0",
"typed-array-byte-length": "^1.0.0",
"typed-array-byte-offset": "^1.0.0",
"typed-array-length": "^1.0.4",
"unbox-primitive": "^1.0.2",
"which-typed-array": "^1.1.10"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/es-set-tostringtag": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.1.tgz",
"integrity": "sha512-g3OMbtlwY3QewlqAiMLI47KywjWZoEytKr8pf6iTC8uJq5bIAH52Z9pnQ8pVL6whrCto53JZDuUIsifGeLorTg==",
"dependencies": {
"get-intrinsic": "^1.1.3",
"has": "^1.0.3",
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-shim-unscopables": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.0.tgz",
"integrity": "sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==",
"dependencies": {
"has": "^1.0.3"
}
},
"node_modules/es-to-primitive": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz",
"integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==",
"dependencies": {
"is-callable": "^1.1.4",
"is-date-object": "^1.0.1",
"is-symbol": "^1.0.2"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/escalade": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
"integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
"engines": {
"node": ">=6"
}
},
"node_modules/escape-string-regexp": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
"integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/eslint": {
"version": "8.45.0",
"resolved": "https://registry.npmjs.org/eslint/-/eslint-8.45.0.tgz",
"integrity": "sha512-pd8KSxiQpdYRfYa9Wufvdoct3ZPQQuVuU5O6scNgMuOMYuxvH0IGaYK0wUFjo4UYYQQCUndlXiMbnxopwvvTiw==",
"dependencies": {
"@eslint-community/eslint-utils": "^4.2.0",
"@eslint-community/regexpp": "^4.4.0",
"@eslint/eslintrc": "^2.1.0",
"@eslint/js": "8.44.0",
"@humanwhocodes/config-array": "^0.11.10",
"@humanwhocodes/module-importer": "^1.0.1",
"@nodelib/fs.walk": "^1.2.8",
"ajv": "^6.10.0",
"chalk": "^4.0.0",
"cross-spawn": "^7.0.2",
"debug": "^4.3.2",
"doctrine": "^3.0.0",
"escape-string-regexp": "^4.0.0",
"eslint-scope": "^7.2.0",
"eslint-visitor-keys": "^3.4.1",
"espree": "^9.6.0",
"esquery": "^1.4.2",
"esutils": "^2.0.2",
"fast-deep-equal": "^3.1.3",
"file-entry-cache": "^6.0.1",
"find-up": "^5.0.0",
"glob-parent": "^6.0.2",
"globals": "^13.19.0",
"graphemer": "^1.4.0",
"ignore": "^5.2.0",
"imurmurhash": "^0.1.4",
"is-glob": "^4.0.0",
"is-path-inside": "^3.0.3",
"js-yaml": "^4.1.0",
"json-stable-stringify-without-jsonify": "^1.0.1",
"levn": "^0.4.1",
"lodash.merge": "^4.6.2",
"minimatch": "^3.1.2",
"natural-compare": "^1.4.0",
"optionator": "^0.9.3",
"strip-ansi": "^6.0.1",
"text-table": "^0.2.0"
},
"bin": {
"eslint": "bin/eslint.js"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/eslint"
}
},
"node_modules/eslint-config-next": {
"version": "13.4.12",
"resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-13.4.12.tgz",
"integrity": "sha512-ZF0r5vxKaVazyZH/37Au/XItiG7qUOBw+HaH3PeyXltIMwXorsn6bdrl0Nn9N5v5v9spc+6GM2ryjugbjF6X2g==",
"dependencies": {
"@next/eslint-plugin-next": "13.4.12",
"@rushstack/eslint-patch": "^1.1.3",
"@typescript-eslint/parser": "^5.42.0",
"eslint-import-resolver-node": "^0.3.6",
"eslint-import-resolver-typescript": "^3.5.2",
"eslint-plugin-import": "^2.26.0",
"eslint-plugin-jsx-a11y": "^6.5.1",
"eslint-plugin-react": "^7.31.7",
"eslint-plugin-react-hooks": "5.0.0-canary-7118f5dd7-20230705"
},
"peerDependencies": {
"eslint": "^7.23.0 || ^8.0.0",
"typescript": ">=3.3.1"
},
"peerDependenciesMeta": {
"typescript": {
"optional": true
}
}
},
"node_modules/eslint-import-resolver-node": {
"version": "0.3.7",
"resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.7.tgz",
"integrity": "sha512-gozW2blMLJCeFpBwugLTGyvVjNoeo1knonXAcatC6bjPBZitotxdWf7Gimr25N4c0AAOo4eOUfaG82IJPDpqCA==",
"dependencies": {
"debug": "^3.2.7",
"is-core-module": "^2.11.0",
"resolve": "^1.22.1"
}
},
"node_modules/eslint-import-resolver-node/node_modules/debug": {
"version": "3.2.7",
"resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
"integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
"dependencies": {
"ms": "^2.1.1"
}
},
"node_modules/eslint-import-resolver-typescript": {
"version": "3.5.5",
"resolved": "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-3.5.5.tgz",
"integrity": "sha512-TdJqPHs2lW5J9Zpe17DZNQuDnox4xo2o+0tE7Pggain9Rbc19ik8kFtXdxZ250FVx2kF4vlt2RSf4qlUpG7bhw==",
"dependencies": {
"debug": "^4.3.4",
"enhanced-resolve": "^5.12.0",
"eslint-module-utils": "^2.7.4",
"get-tsconfig": "^4.5.0",
"globby": "^13.1.3",
"is-core-module": "^2.11.0",
"is-glob": "^4.0.3",
"synckit": "^0.8.5"
},
"engines": {
"node": "^14.18.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/unts/projects/eslint-import-resolver-ts"
},
"peerDependencies": {
"eslint": "*",
"eslint-plugin-import": "*"
}
},
"node_modules/eslint-import-resolver-typescript/node_modules/globby": {
"version": "13.2.2",
"resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz",
"integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==",
"dependencies": {
"dir-glob": "^3.0.1",
"fast-glob": "^3.3.0",
"ignore": "^5.2.4",
"merge2": "^1.4.1",
"slash": "^4.0.0"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/eslint-import-resolver-typescript/node_modules/slash": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz",
"integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/eslint-module-utils": {
"version": "2.8.0",
"resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.8.0.tgz",
"integrity": "sha512-aWajIYfsqCKRDgUfjEXNN/JlrzauMuSEy5sbd7WXbtW3EH6A6MpwEh42c7qD+MqQo9QMJ6fWLAeIJynx0g6OAw==",
"dependencies": {
"debug": "^3.2.7"
},
"engines": {
"node": ">=4"
},
"peerDependenciesMeta": {
"eslint": {
"optional": true
}
}
},
"node_modules/eslint-module-utils/node_modules/debug": {
"version": "3.2.7",
"resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
"integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
"dependencies": {
"ms": "^2.1.1"
}
},
"node_modules/eslint-plugin-import": {
"version": "2.27.5",
"resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.27.5.tgz",
"integrity": "sha512-LmEt3GVofgiGuiE+ORpnvP+kAm3h6MLZJ4Q5HCyHADofsb4VzXFsRiWj3c0OFiV+3DWFh0qg3v9gcPlfc3zRow==",
"dependencies": {
"array-includes": "^3.1.6",
"array.prototype.flat": "^1.3.1",
"array.prototype.flatmap": "^1.3.1",
"debug": "^3.2.7",
"doctrine": "^2.1.0",
"eslint-import-resolver-node": "^0.3.7",
"eslint-module-utils": "^2.7.4",
"has": "^1.0.3",
"is-core-module": "^2.11.0",
"is-glob": "^4.0.3",
"minimatch": "^3.1.2",
"object.values": "^1.1.6",
"resolve": "^1.22.1",
"semver": "^6.3.0",
"tsconfig-paths": "^3.14.1"
},
"engines": {
"node": ">=4"
},
"peerDependencies": {
"eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8"
}
},
"node_modules/eslint-plugin-import/node_modules/debug": {
"version": "3.2.7",
"resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
"integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
"dependencies": {
"ms": "^2.1.1"
}
},
"node_modules/eslint-plugin-import/node_modules/doctrine": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz",
"integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==",
"dependencies": {
"esutils": "^2.0.2"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/eslint-plugin-import/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/eslint-plugin-jsx-a11y": {
"version": "6.7.1",
"resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.7.1.tgz",
"integrity": "sha512-63Bog4iIethyo8smBklORknVjB0T2dwB8Mr/hIC+fBS0uyHdYYpzM/Ed+YC8VxTjlXHEWFOdmgwcDn1U2L9VCA==",
"dependencies": {
"@babel/runtime": "^7.20.7",
"aria-query": "^5.1.3",
"array-includes": "^3.1.6",
"array.prototype.flatmap": "^1.3.1",
"ast-types-flow": "^0.0.7",
"axe-core": "^4.6.2",
"axobject-query": "^3.1.1",
"damerau-levenshtein": "^1.0.8",
"emoji-regex": "^9.2.2",
"has": "^1.0.3",
"jsx-ast-utils": "^3.3.3",
"language-tags": "=1.0.5",
"minimatch": "^3.1.2",
"object.entries": "^1.1.6",
"object.fromentries": "^2.0.6",
"semver": "^6.3.0"
},
"engines": {
"node": ">=4.0"
},
"peerDependencies": {
"eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8"
}
},
"node_modules/eslint-plugin-jsx-a11y/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/eslint-plugin-react": {
"version": "7.33.0",
"resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.33.0.tgz",
"integrity": "sha512-qewL/8P34WkY8jAqdQxsiL82pDUeT7nhs8IsuXgfgnsEloKCT4miAV9N9kGtx7/KM9NH/NCGUE7Edt9iGxLXFw==",
"dependencies": {
"array-includes": "^3.1.6",
"array.prototype.flatmap": "^1.3.1",
"array.prototype.tosorted": "^1.1.1",
"doctrine": "^2.1.0",
"estraverse": "^5.3.0",
"jsx-ast-utils": "^2.4.1 || ^3.0.0",
"minimatch": "^3.1.2",
"object.entries": "^1.1.6",
"object.fromentries": "^2.0.6",
"object.hasown": "^1.1.2",
"object.values": "^1.1.6",
"prop-types": "^15.8.1",
"resolve": "^2.0.0-next.4",
"semver": "^6.3.1",
"string.prototype.matchall": "^4.0.8"
},
"engines": {
"node": ">=4"
},
"peerDependencies": {
"eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8"
}
},
"node_modules/eslint-plugin-react-hooks": {
"version": "5.0.0-canary-7118f5dd7-20230705",
"resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.0.0-canary-7118f5dd7-20230705.tgz",
"integrity": "sha512-AZYbMo/NW9chdL7vk6HQzQhT+PvTAEVqWk9ziruUoW2kAOcN5qNyelv70e0F1VNQAbvutOC9oc+xfWycI9FxDw==",
"engines": {
"node": ">=10"
},
"peerDependencies": {
"eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0"
}
},
"node_modules/eslint-plugin-react/node_modules/doctrine": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz",
"integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==",
"dependencies": {
"esutils": "^2.0.2"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/eslint-plugin-react/node_modules/resolve": {
"version": "2.0.0-next.4",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.4.tgz",
"integrity": "sha512-iMDbmAWtfU+MHpxt/I5iWI7cY6YVEZUQ3MBgPQ++XD1PELuJHIl82xBmObyP2KyQmkNB2dsqF7seoQQiAn5yDQ==",
"dependencies": {
"is-core-module": "^2.9.0",
"path-parse": "^1.0.7",
"supports-preserve-symlinks-flag": "^1.0.0"
},
"bin": {
"resolve": "bin/resolve"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/eslint-plugin-react/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/eslint-scope": {
"version": "7.2.1",
"resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.1.tgz",
"integrity": "sha512-CvefSOsDdaYYvxChovdrPo/ZGt8d5lrJWleAc1diXRKhHGiTYEI26cvo8Kle/wGnsizoCJjK73FMg1/IkIwiNA==",
"dependencies": {
"esrecurse": "^4.3.0",
"estraverse": "^5.2.0"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/eslint"
}
},
"node_modules/eslint-visitor-keys": {
"version": "3.4.1",
"resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.1.tgz",
"integrity": "sha512-pZnmmLwYzf+kWaM/Qgrvpen51upAktaaiI01nsJD/Yr3lMOdNtq0cxkrrg16w64VtisN6okbs7Q8AfGqj4c9fA==",
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/eslint"
}
},
"node_modules/espree": {
"version": "9.6.1",
"resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz",
"integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==",
"dependencies": {
"acorn": "^8.9.0",
"acorn-jsx": "^5.3.2",
"eslint-visitor-keys": "^3.4.1"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/eslint"
}
},
"node_modules/esquery": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz",
"integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==",
"dependencies": {
"estraverse": "^5.1.0"
},
"engines": {
"node": ">=0.10"
}
},
"node_modules/esrecurse": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
"integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
"dependencies": {
"estraverse": "^5.2.0"
},
"engines": {
"node": ">=4.0"
}
},
"node_modules/estraverse": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
"integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
"engines": {
"node": ">=4.0"
}
},
"node_modules/esutils": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
"integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/execa": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/execa/-/execa-7.1.1.tgz",
"integrity": "sha512-wH0eMf/UXckdUYnO21+HDztteVv05rq2GXksxT4fCGeHkBhw1DROXh40wcjMcRqDOWE7iPJ4n3M7e2+YFP+76Q==",
"dependencies": {
"cross-spawn": "^7.0.3",
"get-stream": "^6.0.1",
"human-signals": "^4.3.0",
"is-stream": "^3.0.0",
"merge-stream": "^2.0.0",
"npm-run-path": "^5.1.0",
"onetime": "^6.0.0",
"signal-exit": "^3.0.7",
"strip-final-newline": "^3.0.0"
},
"engines": {
"node": "^14.18.0 || ^16.14.0 || >=18.0.0"
},
"funding": {
"url": "https://github.com/sindresorhus/execa?sponsor=1"
}
},
"node_modules/expand-template": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz",
"integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==",
"engines": {
"node": ">=6"
}
},
"node_modules/fast-deep-equal": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="
},
"node_modules/fast-fifo": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/fast-fifo/-/fast-fifo-1.3.0.tgz",
"integrity": "sha512-IgfweLvEpwyA4WgiQe9Nx6VV2QkML2NkvZnk1oKnIzXgXdWxuhF7zw4DvLTPZJn6PIUneiAXPF24QmoEqHTjyw=="
},
"node_modules/fast-glob": {
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz",
"integrity": "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==",
"dependencies": {
"@nodelib/fs.stat": "^2.0.2",
"@nodelib/fs.walk": "^1.2.3",
"glob-parent": "^5.1.2",
"merge2": "^1.3.0",
"micromatch": "^4.0.4"
},
"engines": {
"node": ">=8.6.0"
}
},
"node_modules/fast-glob/node_modules/glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dependencies": {
"is-glob": "^4.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/fast-json-stable-stringify": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
"integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="
},
"node_modules/fast-levenshtein": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz",
"integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw=="
},
"node_modules/fastq": {
"version": "1.15.0",
"resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz",
"integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==",
"dependencies": {
"reusify": "^1.0.4"
}
},
"node_modules/file-entry-cache": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz",
"integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==",
"dependencies": {
"flat-cache": "^3.0.4"
},
"engines": {
"node": "^10.12.0 || >=12.0.0"
}
},
"node_modules/fill-range": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
"dependencies": {
"to-regex-range": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/find-up": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
"integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==",
"dependencies": {
"locate-path": "^6.0.0",
"path-exists": "^4.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/flat-cache": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz",
"integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==",
"dependencies": {
"flatted": "^3.1.0",
"rimraf": "^3.0.2"
},
"engines": {
"node": "^10.12.0 || >=12.0.0"
}
},
"node_modules/flatbuffers": {
"version": "1.12.0",
"resolved": "https://registry.npmjs.org/flatbuffers/-/flatbuffers-1.12.0.tgz",
"integrity": "sha512-c7CZADjRcl6j0PlvFy0ZqXQ67qSEZfrVPynmnL+2zPc+NtMvrF8Y0QceMo7QqnSPc7+uWjUIAbvCQ5WIKlMVdQ=="
},
"node_modules/flatted": {
"version": "3.2.7",
"resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz",
"integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ=="
},
"node_modules/for-each": {
"version": "0.3.3",
"resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz",
"integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==",
"dependencies": {
"is-callable": "^1.1.3"
}
},
"node_modules/fraction.js": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.2.0.tgz",
"integrity": "sha512-MhLuK+2gUcnZe8ZHlaaINnQLl0xRIGRfcGk2yl8xoQAfHrSsL3rYu6FCmBdkdbhc9EPlwyGHewaRsvwRMJtAlA==",
"engines": {
"node": "*"
},
"funding": {
"type": "patreon",
"url": "https://www.patreon.com/infusion"
}
},
"node_modules/fs-constants": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz",
"integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow=="
},
"node_modules/fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw=="
},
"node_modules/fsevents": {
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
"integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
"hasInstallScript": true,
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
}
},
"node_modules/function-bind": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
"integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
},
"node_modules/function.prototype.name": {
"version": "1.1.5",
"resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.5.tgz",
"integrity": "sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.3",
"es-abstract": "^1.19.0",
"functions-have-names": "^1.2.2"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/functions-have-names": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz",
"integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-intrinsic": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.1.tgz",
"integrity": "sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==",
"dependencies": {
"function-bind": "^1.1.1",
"has": "^1.0.3",
"has-proto": "^1.0.1",
"has-symbols": "^1.0.3"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-stream": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
"integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/get-symbol-description": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz",
"integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==",
"dependencies": {
"call-bind": "^1.0.2",
"get-intrinsic": "^1.1.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-tsconfig": {
"version": "4.6.2",
"resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.6.2.tgz",
"integrity": "sha512-E5XrT4CbbXcXWy+1jChlZmrmCwd5KGx502kDCXJJ7y898TtWW9FwoG5HfOLVRKmlmDGkWN2HM9Ho+/Y8F0sJDg==",
"dependencies": {
"resolve-pkg-maps": "^1.0.0"
},
"funding": {
"url": "https://github.com/privatenumber/get-tsconfig?sponsor=1"
}
},
"node_modules/github-from-package": {
"version": "0.0.0",
"resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz",
"integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw=="
},
"node_modules/glob": {
"version": "7.1.7",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz",
"integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==",
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.0.4",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
},
"engines": {
"node": "*"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/glob-parent": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
"integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
"dependencies": {
"is-glob": "^4.0.3"
},
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/globals": {
"version": "13.20.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz",
"integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==",
"dependencies": {
"type-fest": "^0.20.2"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/globalthis": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz",
"integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==",
"dependencies": {
"define-properties": "^1.1.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/globby": {
"version": "11.1.0",
"resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz",
"integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==",
"dependencies": {
"array-union": "^2.1.0",
"dir-glob": "^3.0.1",
"fast-glob": "^3.2.9",
"ignore": "^5.2.0",
"merge2": "^1.4.1",
"slash": "^3.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/gopd": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
"integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==",
"dependencies": {
"get-intrinsic": "^1.1.3"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/graceful-fs": {
"version": "4.2.11",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
"integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="
},
"node_modules/graphemer": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz",
"integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag=="
},
"node_modules/guid-typescript": {
"version": "1.0.9",
"resolved": "https://registry.npmjs.org/guid-typescript/-/guid-typescript-1.0.9.tgz",
"integrity": "sha512-Y8T4vYhEfwJOTbouREvG+3XDsjr8E3kIr7uf+JZ0BYloFsttiHU0WfvANVsR7TxNUJa/WpCnw/Ino/p+DeBhBQ=="
},
"node_modules/has": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
"integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
"dependencies": {
"function-bind": "^1.1.1"
},
"engines": {
"node": ">= 0.4.0"
}
},
"node_modules/has-bigints": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz",
"integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"engines": {
"node": ">=8"
}
},
"node_modules/has-property-descriptors": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz",
"integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==",
"dependencies": {
"get-intrinsic": "^1.1.1"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-proto": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz",
"integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-symbols": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
"integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-tostringtag": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz",
"integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==",
"dependencies": {
"has-symbols": "^1.0.2"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/human-signals": {
"version": "4.3.1",
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-4.3.1.tgz",
"integrity": "sha512-nZXjEF2nbo7lIw3mgYjItAfgQXog3OjJogSbKa2CQIIvSGWcKgeJnQlNXip6NglNzYH45nSRiEVimMvYL8DDqQ==",
"engines": {
"node": ">=14.18.0"
}
},
"node_modules/ieee754": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
"integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/ignore": {
"version": "5.2.4",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz",
"integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==",
"engines": {
"node": ">= 4"
}
},
"node_modules/import-fresh": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
"integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
"dependencies": {
"parent-module": "^1.0.0",
"resolve-from": "^4.0.0"
},
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/imurmurhash": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
"integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==",
"engines": {
"node": ">=0.8.19"
}
},
"node_modules/inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
"dependencies": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
},
"node_modules/ini": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
"integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew=="
},
"node_modules/internal-slot": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.5.tgz",
"integrity": "sha512-Y+R5hJrzs52QCG2laLn4udYVnxsfny9CpOhNhUvk/SSSVyF6T27FzRbF0sroPidSu3X8oEAkOn2K804mjpt6UQ==",
"dependencies": {
"get-intrinsic": "^1.2.0",
"has": "^1.0.3",
"side-channel": "^1.0.4"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/is-array-buffer": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.2.tgz",
"integrity": "sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==",
"dependencies": {
"call-bind": "^1.0.2",
"get-intrinsic": "^1.2.0",
"is-typed-array": "^1.1.10"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-arrayish": {
"version": "0.3.2",
"resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz",
"integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ=="
},
"node_modules/is-bigint": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz",
"integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==",
"dependencies": {
"has-bigints": "^1.0.1"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-binary-path": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
"integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
"dependencies": {
"binary-extensions": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/is-boolean-object": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz",
"integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==",
"dependencies": {
"call-bind": "^1.0.2",
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-callable": {
"version": "1.2.7",
"resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz",
"integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-core-module": {
"version": "2.12.1",
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.1.tgz",
"integrity": "sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==",
"dependencies": {
"has": "^1.0.3"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-date-object": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz",
"integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==",
"dependencies": {
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-docker": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz",
"integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==",
"bin": {
"is-docker": "cli.js"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-extglob": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
"integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-glob": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
"integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
"dependencies": {
"is-extglob": "^2.1.1"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-inside-container": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz",
"integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==",
"dependencies": {
"is-docker": "^3.0.0"
},
"bin": {
"is-inside-container": "cli.js"
},
"engines": {
"node": ">=14.16"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-negative-zero": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz",
"integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-number": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
"engines": {
"node": ">=0.12.0"
}
},
"node_modules/is-number-object": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz",
"integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==",
"dependencies": {
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-path-inside": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz",
"integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==",
"engines": {
"node": ">=8"
}
},
"node_modules/is-regex": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz",
"integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==",
"dependencies": {
"call-bind": "^1.0.2",
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-shared-array-buffer": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz",
"integrity": "sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==",
"dependencies": {
"call-bind": "^1.0.2"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-stream": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz",
"integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==",
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-string": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz",
"integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==",
"dependencies": {
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-symbol": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz",
"integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==",
"dependencies": {
"has-symbols": "^1.0.2"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-typed-array": {
"version": "1.1.12",
"resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.12.tgz",
"integrity": "sha512-Z14TF2JNG8Lss5/HMqt0//T9JeHXttXy5pH/DBU4vi98ozO2btxzq9MwYDZYnKwU8nRsz/+GVFVRDq3DkVuSPg==",
"dependencies": {
"which-typed-array": "^1.1.11"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-weakref": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz",
"integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==",
"dependencies": {
"call-bind": "^1.0.2"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-wsl": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz",
"integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==",
"dependencies": {
"is-docker": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/is-wsl/node_modules/is-docker": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz",
"integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==",
"bin": {
"is-docker": "cli.js"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/isarray": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz",
"integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw=="
},
"node_modules/isexe": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
"integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="
},
"node_modules/jiti": {
"version": "1.19.1",
"resolved": "https://registry.npmjs.org/jiti/-/jiti-1.19.1.tgz",
"integrity": "sha512-oVhqoRDaBXf7sjkll95LHVS6Myyyb1zaunVwk4Z0+WPSW4gjS0pl01zYKHScTuyEhQsFxV5L4DR5r+YqSyqyyg==",
"bin": {
"jiti": "bin/jiti.js"
}
},
"node_modules/js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
},
"node_modules/js-yaml": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
"dependencies": {
"argparse": "^2.0.1"
},
"bin": {
"js-yaml": "bin/js-yaml.js"
}
},
"node_modules/json-schema-traverse": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
"integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="
},
"node_modules/json-stable-stringify-without-jsonify": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz",
"integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw=="
},
"node_modules/json5": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
"integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
"dependencies": {
"minimist": "^1.2.0"
},
"bin": {
"json5": "lib/cli.js"
}
},
"node_modules/jsx-ast-utils": {
"version": "3.3.4",
"resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.4.tgz",
"integrity": "sha512-fX2TVdCViod6HwKEtSWGHs57oFhVfCMwieb9PuRDgjDPh5XeqJiHFFFJCHxU5cnTc3Bu/GRL+kPiFmw8XWOfKw==",
"dependencies": {
"array-includes": "^3.1.6",
"array.prototype.flat": "^1.3.1",
"object.assign": "^4.1.4",
"object.values": "^1.1.6"
},
"engines": {
"node": ">=4.0"
}
},
"node_modules/language-subtag-registry": {
"version": "0.3.22",
"resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.22.tgz",
"integrity": "sha512-tN0MCzyWnoz/4nHS6uxdlFWoUZT7ABptwKPQ52Ea7URk6vll88bWBVhodtnlfEuCcKWNGoc+uGbw1cwa9IKh/w=="
},
"node_modules/language-tags": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.5.tgz",
"integrity": "sha512-qJhlO9cGXi6hBGKoxEG/sKZDAHD5Hnu9Hs4WbOY3pCWXDhw0N8x1NenNzm2EnNLkLkk7J2SdxAkDSbb6ftT+UQ==",
"dependencies": {
"language-subtag-registry": "~0.3.2"
}
},
"node_modules/levn": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz",
"integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==",
"dependencies": {
"prelude-ls": "^1.2.1",
"type-check": "~0.4.0"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/lilconfig": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz",
"integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==",
"engines": {
"node": ">=10"
}
},
"node_modules/lines-and-columns": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
"integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg=="
},
"node_modules/locate-path": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
"integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
"dependencies": {
"p-locate": "^5.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/lodash.merge": {
"version": "4.6.2",
"resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz",
"integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ=="
},
"node_modules/long": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz",
"integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA=="
},
"node_modules/loose-envify": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
"integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
"dependencies": {
"js-tokens": "^3.0.0 || ^4.0.0"
},
"bin": {
"loose-envify": "cli.js"
}
},
"node_modules/lru-cache": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dependencies": {
"yallist": "^4.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/merge-stream": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
"integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w=="
},
"node_modules/merge2": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
"integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
"engines": {
"node": ">= 8"
}
},
"node_modules/micromatch": {
"version": "4.0.5",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
"integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
"dependencies": {
"braces": "^3.0.2",
"picomatch": "^2.3.1"
},
"engines": {
"node": ">=8.6"
}
},
"node_modules/mimic-fn": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz",
"integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/mimic-response": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz",
"integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/minimatch": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
"integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
"dependencies": {
"brace-expansion": "^1.1.7"
},
"engines": {
"node": "*"
}
},
"node_modules/minimist": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
"integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/mkdirp-classic": {
"version": "0.5.3",
"resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz",
"integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A=="
},
"node_modules/ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
},
"node_modules/mz": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz",
"integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==",
"dependencies": {
"any-promise": "^1.0.0",
"object-assign": "^4.0.1",
"thenify-all": "^1.0.0"
}
},
"node_modules/nanoid": {
"version": "3.3.6",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz",
"integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"bin": {
"nanoid": "bin/nanoid.cjs"
},
"engines": {
"node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
}
},
"node_modules/napi-build-utils": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-1.0.2.tgz",
"integrity": "sha512-ONmRUqK7zj7DWX0D9ADe03wbwOBZxNAfF20PlGfCWQcD3+/MakShIHrMqx9YwPTfxDdF1zLeL+RGZiR9kGMLdg=="
},
"node_modules/natural-compare": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
"integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw=="
},
"node_modules/next": {
"version": "14.2.3",
"resolved": "https://registry.npmjs.org/next/-/next-14.2.3.tgz",
"integrity": "sha512-dowFkFTR8v79NPJO4QsBUtxv0g9BrS/phluVpMAt2ku7H+cbcBJlopXjkWlwxrk/xGqMemr7JkGPGemPrLLX7A==",
"dependencies": {
"@next/env": "14.2.3",
"@swc/helpers": "0.5.5",
"busboy": "1.6.0",
"caniuse-lite": "^1.0.30001579",
"graceful-fs": "^4.2.11",
"postcss": "8.4.31",
"styled-jsx": "5.1.1"
},
"bin": {
"next": "dist/bin/next"
},
"engines": {
"node": ">=18.17.0"
},
"optionalDependencies": {
"@next/swc-darwin-arm64": "14.2.3",
"@next/swc-darwin-x64": "14.2.3",
"@next/swc-linux-arm64-gnu": "14.2.3",
"@next/swc-linux-arm64-musl": "14.2.3",
"@next/swc-linux-x64-gnu": "14.2.3",
"@next/swc-linux-x64-musl": "14.2.3",
"@next/swc-win32-arm64-msvc": "14.2.3",
"@next/swc-win32-ia32-msvc": "14.2.3",
"@next/swc-win32-x64-msvc": "14.2.3"
},
"peerDependencies": {
"@opentelemetry/api": "^1.1.0",
"@playwright/test": "^1.41.2",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"sass": "^1.3.0"
},
"peerDependenciesMeta": {
"@opentelemetry/api": {
"optional": true
},
"@playwright/test": {
"optional": true
},
"sass": {
"optional": true
}
}
},
"node_modules/node-abi": {
"version": "3.45.0",
"resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.45.0.tgz",
"integrity": "sha512-iwXuFrMAcFVi/ZoZiqq8BzAdsLw9kxDfTC0HMyjXfSL/6CSDAGD5UmR7azrAgWV1zKYq7dUUMj4owusBWKLsiQ==",
"dependencies": {
"semver": "^7.3.5"
},
"engines": {
"node": ">=10"
}
},
"node_modules/node-addon-api": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-6.1.0.tgz",
"integrity": "sha512-+eawOlIgy680F0kBzPUNFhMZGtJ1YmqM6l4+Crf4IkImjYrO/mqPwRMh352g23uIaQKFItcQ64I7KMaJxHgAVA=="
},
"node_modules/node-releases": {
"version": "2.0.13",
"resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz",
"integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ=="
},
"node_modules/normalize-path": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
"integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/normalize-range": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
"integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/npm-run-path": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.1.0.tgz",
"integrity": "sha512-sJOdmRGrY2sjNTRMbSvluQqg+8X7ZK61yvzBEIDhz4f8z1TZFYABsqjjCBd/0PUNE9M6QDgHJXQkGUEm7Q+l9Q==",
"dependencies": {
"path-key": "^4.0.0"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/npm-run-path/node_modules/path-key": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz",
"integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/object-assign": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
"integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/object-hash": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz",
"integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==",
"engines": {
"node": ">= 6"
}
},
"node_modules/object-inspect": {
"version": "1.12.3",
"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz",
"integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/object-keys": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
"integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/object.assign": {
"version": "4.1.4",
"resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz",
"integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.4",
"has-symbols": "^1.0.3",
"object-keys": "^1.1.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/object.entries": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.6.tgz",
"integrity": "sha512-leTPzo4Zvg3pmbQ3rDK69Rl8GQvIqMWubrkxONG9/ojtFE2rD9fjMKfSI5BxW3osRH1m6VdzmqK8oAY9aT4x5w==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.4",
"es-abstract": "^1.20.4"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/object.fromentries": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.6.tgz",
"integrity": "sha512-VciD13dswC4j1Xt5394WR4MzmAQmlgN72phd/riNp9vtD7tp4QQWJ0R4wvclXcafgcYK8veHRed2W6XeGBvcfg==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.4",
"es-abstract": "^1.20.4"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/object.hasown": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.2.tgz",
"integrity": "sha512-B5UIT3J1W+WuWIU55h0mjlwaqxiE5vYENJXIXZ4VFe05pNYrkKuK0U/6aFcb0pKywYJh7IhfoqUfKVmrJJHZHw==",
"dependencies": {
"define-properties": "^1.1.4",
"es-abstract": "^1.20.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/object.values": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.6.tgz",
"integrity": "sha512-FVVTkD1vENCsAcwNs9k6jea2uHC/X0+JcjG8YA60FN5CMaJmG95wT9jek/xX9nornqGRrBkKtzuAu2wuHpKqvw==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.4",
"es-abstract": "^1.20.4"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"dependencies": {
"wrappy": "1"
}
},
"node_modules/onetime": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz",
"integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==",
"dependencies": {
"mimic-fn": "^4.0.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/onnx-proto": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/onnx-proto/-/onnx-proto-4.0.4.tgz",
"integrity": "sha512-aldMOB3HRoo6q/phyB6QRQxSt895HNNw82BNyZ2CMh4bjeKv7g/c+VpAFtJuEMVfYLMbRx61hbuqnKceLeDcDA==",
"dependencies": {
"protobufjs": "^6.8.8"
}
},
"node_modules/onnxruntime-common": {
"version": "1.14.0",
"resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.14.0.tgz",
"integrity": "sha512-3LJpegM2iMNRX2wUmtYfeX/ytfOzNwAWKSq1HbRrKc9+uqG/FsEA0bbKZl1btQeZaXhC26l44NWpNUeXPII7Ew=="
},
"node_modules/onnxruntime-node": {
"version": "1.14.0",
"resolved": "https://registry.npmjs.org/onnxruntime-node/-/onnxruntime-node-1.14.0.tgz",
"integrity": "sha512-5ba7TWomIV/9b6NH/1x/8QEeowsb+jBEvFzU6z0T4mNsFwdPqXeFUM7uxC6QeSRkEbWu3qEB0VMjrvzN/0S9+w==",
"optional": true,
"os": [
"win32",
"darwin",
"linux"
],
"dependencies": {
"onnxruntime-common": "~1.14.0"
}
},
"node_modules/onnxruntime-web": {
"version": "1.14.0",
"resolved": "https://registry.npmjs.org/onnxruntime-web/-/onnxruntime-web-1.14.0.tgz",
"integrity": "sha512-Kcqf43UMfW8mCydVGcX9OMXI2VN17c0p6XvR7IPSZzBf/6lteBzXHvcEVWDPmCKuGombl997HgLqj91F11DzXw==",
"dependencies": {
"flatbuffers": "^1.12.0",
"guid-typescript": "^1.0.9",
"long": "^4.0.0",
"onnx-proto": "^4.0.4",
"onnxruntime-common": "~1.14.0",
"platform": "^1.3.6"
}
},
"node_modules/open": {
"version": "9.1.0",
"resolved": "https://registry.npmjs.org/open/-/open-9.1.0.tgz",
"integrity": "sha512-OS+QTnw1/4vrf+9hh1jc1jnYjzSG4ttTBB8UxOwAnInG3Uo4ssetzC1ihqaIHjLJnA5GGlRl6QlZXOTQhRBUvg==",
"dependencies": {
"default-browser": "^4.0.0",
"define-lazy-prop": "^3.0.0",
"is-inside-container": "^1.0.0",
"is-wsl": "^2.2.0"
},
"engines": {
"node": ">=14.16"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/optionator": {
"version": "0.9.3",
"resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz",
"integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==",
"dependencies": {
"@aashutoshrathi/word-wrap": "^1.2.3",
"deep-is": "^0.1.3",
"fast-levenshtein": "^2.0.6",
"levn": "^0.4.1",
"prelude-ls": "^1.2.1",
"type-check": "^0.4.0"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/p-limit": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
"integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
"dependencies": {
"yocto-queue": "^0.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-locate": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz",
"integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==",
"dependencies": {
"p-limit": "^3.0.2"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/parent-module": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
"integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
"dependencies": {
"callsites": "^3.0.0"
},
"engines": {
"node": ">=6"
}
},
"node_modules/path-exists": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
"integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
"engines": {
"node": ">=8"
}
},
"node_modules/path-is-absolute": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/path-key": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
"engines": {
"node": ">=8"
}
},
"node_modules/path-parse": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
},
"node_modules/path-type": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
"integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==",
"engines": {
"node": ">=8"
}
},
"node_modules/picocolors": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
"integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ=="
},
"node_modules/picomatch": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
"engines": {
"node": ">=8.6"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/pify": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz",
"integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/pirates": {
"version": "4.0.6",
"resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz",
"integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==",
"engines": {
"node": ">= 6"
}
},
"node_modules/platform": {
"version": "1.3.6",
"resolved": "https://registry.npmjs.org/platform/-/platform-1.3.6.tgz",
"integrity": "sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg=="
},
"node_modules/postcss": {
"version": "8.4.31",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz",
"integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==",
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/postcss/"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/postcss"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
"nanoid": "^3.3.6",
"picocolors": "^1.0.0",
"source-map-js": "^1.0.2"
},
"engines": {
"node": "^10 || ^12 || >=14"
}
},
"node_modules/postcss-import": {
"version": "15.1.0",
"resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz",
"integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==",
"dependencies": {
"postcss-value-parser": "^4.0.0",
"read-cache": "^1.0.0",
"resolve": "^1.1.7"
},
"engines": {
"node": ">=14.0.0"
},
"peerDependencies": {
"postcss": "^8.0.0"
}
},
"node_modules/postcss-js": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz",
"integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==",
"dependencies": {
"camelcase-css": "^2.0.1"
},
"engines": {
"node": "^12 || ^14 || >= 16"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/postcss/"
},
"peerDependencies": {
"postcss": "^8.4.21"
}
},
"node_modules/postcss-load-config": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.1.tgz",
"integrity": "sha512-vEJIc8RdiBRu3oRAI0ymerOn+7rPuMvRXslTvZUKZonDHFIczxztIyJ1urxM1x9JXEikvpWWTUUqal5j/8QgvA==",
"dependencies": {
"lilconfig": "^2.0.5",
"yaml": "^2.1.1"
},
"engines": {
"node": ">= 14"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/postcss/"
},
"peerDependencies": {
"postcss": ">=8.0.9",
"ts-node": ">=9.0.0"
},
"peerDependenciesMeta": {
"postcss": {
"optional": true
},
"ts-node": {
"optional": true
}
}
},
"node_modules/postcss-nested": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.1.tgz",
"integrity": "sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==",
"dependencies": {
"postcss-selector-parser": "^6.0.11"
},
"engines": {
"node": ">=12.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/postcss/"
},
"peerDependencies": {
"postcss": "^8.2.14"
}
},
"node_modules/postcss-selector-parser": {
"version": "6.0.13",
"resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.13.tgz",
"integrity": "sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==",
"dependencies": {
"cssesc": "^3.0.0",
"util-deprecate": "^1.0.2"
},
"engines": {
"node": ">=4"
}
},
"node_modules/postcss-value-parser": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
"integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="
},
"node_modules/prebuild-install": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.1.tgz",
"integrity": "sha512-jAXscXWMcCK8GgCoHOfIr0ODh5ai8mj63L2nWrjuAgXE6tDyYGnx4/8o/rCgU+B4JSyZBKbeZqzhtwtC3ovxjw==",
"dependencies": {
"detect-libc": "^2.0.0",
"expand-template": "^2.0.3",
"github-from-package": "0.0.0",
"minimist": "^1.2.3",
"mkdirp-classic": "^0.5.3",
"napi-build-utils": "^1.0.1",
"node-abi": "^3.3.0",
"pump": "^3.0.0",
"rc": "^1.2.7",
"simple-get": "^4.0.0",
"tar-fs": "^2.0.0",
"tunnel-agent": "^0.6.0"
},
"bin": {
"prebuild-install": "bin.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/prebuild-install/node_modules/tar-fs": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz",
"integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==",
"dependencies": {
"chownr": "^1.1.1",
"mkdirp-classic": "^0.5.2",
"pump": "^3.0.0",
"tar-stream": "^2.1.4"
}
},
"node_modules/prebuild-install/node_modules/tar-stream": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz",
"integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==",
"dependencies": {
"bl": "^4.0.3",
"end-of-stream": "^1.4.1",
"fs-constants": "^1.0.0",
"inherits": "^2.0.3",
"readable-stream": "^3.1.1"
},
"engines": {
"node": ">=6"
}
},
"node_modules/prelude-ls": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz",
"integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==",
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/prop-types": {
"version": "15.8.1",
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
"integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
"dependencies": {
"loose-envify": "^1.4.0",
"object-assign": "^4.1.1",
"react-is": "^16.13.1"
}
},
"node_modules/protobufjs": {
"version": "7.2.6",
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.6.tgz",
"integrity": "sha512-dgJaEDDL6x8ASUZ1YqWciTRrdOuYNzoOf27oHNfdyvKqHr5i0FV7FSLU+aIeFjyFgVxrpTOtQUi0BLLBymZaBw==",
"hasInstallScript": true,
"dependencies": {
"@protobufjs/aspromise": "^1.1.2",
"@protobufjs/base64": "^1.1.2",
"@protobufjs/codegen": "^2.0.4",
"@protobufjs/eventemitter": "^1.1.0",
"@protobufjs/fetch": "^1.1.0",
"@protobufjs/float": "^1.0.2",
"@protobufjs/inquire": "^1.1.0",
"@protobufjs/path": "^1.1.2",
"@protobufjs/pool": "^1.1.0",
"@protobufjs/utf8": "^1.1.0",
"@types/node": ">=13.7.0",
"long": "^5.0.0"
},
"engines": {
"node": ">=12.0.0"
}
},
"node_modules/protobufjs/node_modules/long": {
"version": "5.2.3",
"resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz",
"integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q=="
},
"node_modules/pump": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
"integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
"dependencies": {
"end-of-stream": "^1.1.0",
"once": "^1.3.1"
}
},
"node_modules/punycode": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz",
"integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==",
"engines": {
"node": ">=6"
}
},
"node_modules/queue-microtask": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
"integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/queue-tick": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/queue-tick/-/queue-tick-1.0.1.tgz",
"integrity": "sha512-kJt5qhMxoszgU/62PLP1CJytzd2NKetjSRnyuj31fDd3Rlcz3fzlFdFLD1SItunPwyqEOkca6GbV612BWfaBag=="
},
"node_modules/rc": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
"integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
"dependencies": {
"deep-extend": "^0.6.0",
"ini": "~1.3.0",
"minimist": "^1.2.0",
"strip-json-comments": "~2.0.1"
},
"bin": {
"rc": "cli.js"
}
},
"node_modules/rc/node_modules/strip-json-comments": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
"integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/react": {
"version": "18.2.0",
"resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz",
"integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==",
"dependencies": {
"loose-envify": "^1.1.0"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/react-dom": {
"version": "18.2.0",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz",
"integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==",
"dependencies": {
"loose-envify": "^1.1.0",
"scheduler": "^0.23.0"
},
"peerDependencies": {
"react": "^18.2.0"
}
},
"node_modules/react-is": {
"version": "16.13.1",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
"integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="
},
"node_modules/read-cache": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz",
"integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==",
"dependencies": {
"pify": "^2.3.0"
}
},
"node_modules/readable-stream": {
"version": "3.6.2",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz",
"integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==",
"dependencies": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/readdirp": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
"integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
"dependencies": {
"picomatch": "^2.2.1"
},
"engines": {
"node": ">=8.10.0"
}
},
"node_modules/regenerator-runtime": {
"version": "0.13.11",
"resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz",
"integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg=="
},
"node_modules/regexp.prototype.flags": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.0.tgz",
"integrity": "sha512-0SutC3pNudRKgquxGoRGIz946MZVHqbNfPjBdxeOhBrdgDKlRoXmYLQN9xRbrR09ZXWeGAdPuif7egofn6v5LA==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.2.0",
"functions-have-names": "^1.2.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/resolve": {
"version": "1.22.2",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz",
"integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==",
"dependencies": {
"is-core-module": "^2.11.0",
"path-parse": "^1.0.7",
"supports-preserve-symlinks-flag": "^1.0.0"
},
"bin": {
"resolve": "bin/resolve"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/resolve-from": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
"integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
"engines": {
"node": ">=4"
}
},
"node_modules/resolve-pkg-maps": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz",
"integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==",
"funding": {
"url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1"
}
},
"node_modules/reusify": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
"integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==",
"engines": {
"iojs": ">=1.0.0",
"node": ">=0.10.0"
}
},
"node_modules/rimraf": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
"integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
"dependencies": {
"glob": "^7.1.3"
},
"bin": {
"rimraf": "bin.js"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/run-applescript": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-5.0.0.tgz",
"integrity": "sha512-XcT5rBksx1QdIhlFOCtgZkB99ZEouFZ1E2Kc2LHqNW13U3/74YGdkQRmThTwxy4QIyookibDKYZOPqX//6BlAg==",
"dependencies": {
"execa": "^5.0.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/run-applescript/node_modules/execa": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz",
"integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==",
"dependencies": {
"cross-spawn": "^7.0.3",
"get-stream": "^6.0.0",
"human-signals": "^2.1.0",
"is-stream": "^2.0.0",
"merge-stream": "^2.0.0",
"npm-run-path": "^4.0.1",
"onetime": "^5.1.2",
"signal-exit": "^3.0.3",
"strip-final-newline": "^2.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sindresorhus/execa?sponsor=1"
}
},
"node_modules/run-applescript/node_modules/human-signals": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz",
"integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==",
"engines": {
"node": ">=10.17.0"
}
},
"node_modules/run-applescript/node_modules/is-stream": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
"integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==",
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/run-applescript/node_modules/mimic-fn": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
"integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
"engines": {
"node": ">=6"
}
},
"node_modules/run-applescript/node_modules/npm-run-path": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
"integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
"dependencies": {
"path-key": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/run-applescript/node_modules/onetime": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
"integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
"dependencies": {
"mimic-fn": "^2.1.0"
},
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/run-applescript/node_modules/strip-final-newline": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
"integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==",
"engines": {
"node": ">=6"
}
},
"node_modules/run-parallel": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
"integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"dependencies": {
"queue-microtask": "^1.2.2"
}
},
"node_modules/safe-array-concat": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.0.0.tgz",
"integrity": "sha512-9dVEFruWIsnie89yym+xWTAYASdpw3CJV7Li/6zBewGf9z2i1j31rP6jnY0pHEO4QZh6N0K11bFjWmdR8UGdPQ==",
"dependencies": {
"call-bind": "^1.0.2",
"get-intrinsic": "^1.2.0",
"has-symbols": "^1.0.3",
"isarray": "^2.0.5"
},
"engines": {
"node": ">=0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/safe-regex-test": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.0.tgz",
"integrity": "sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==",
"dependencies": {
"call-bind": "^1.0.2",
"get-intrinsic": "^1.1.3",
"is-regex": "^1.1.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/scheduler": {
"version": "0.23.0",
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz",
"integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==",
"dependencies": {
"loose-envify": "^1.1.0"
}
},
"node_modules/semver": {
"version": "7.5.4",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
"integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
"dependencies": {
"lru-cache": "^6.0.0"
},
"bin": {
"semver": "bin/semver.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/sharp": {
"version": "0.32.6",
"resolved": "https://registry.npmjs.org/sharp/-/sharp-0.32.6.tgz",
"integrity": "sha512-KyLTWwgcR9Oe4d9HwCwNM2l7+J0dUQwn/yf7S0EnTtb0eVS4RxO0eUSvxPtzT4F3SY+C4K6fqdv/DO27sJ/v/w==",
"hasInstallScript": true,
"dependencies": {
"color": "^4.2.3",
"detect-libc": "^2.0.2",
"node-addon-api": "^6.1.0",
"prebuild-install": "^7.1.1",
"semver": "^7.5.4",
"simple-get": "^4.0.1",
"tar-fs": "^3.0.4",
"tunnel-agent": "^0.6.0"
},
"engines": {
"node": ">=14.15.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/shebang-command": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
"dependencies": {
"shebang-regex": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/shebang-regex": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
"engines": {
"node": ">=8"
}
},
"node_modules/side-channel": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz",
"integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==",
"dependencies": {
"call-bind": "^1.0.0",
"get-intrinsic": "^1.0.2",
"object-inspect": "^1.9.0"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/signal-exit": {
"version": "3.0.7",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
"integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="
},
"node_modules/simple-concat": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz",
"integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/simple-get": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz",
"integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"dependencies": {
"decompress-response": "^6.0.0",
"once": "^1.3.1",
"simple-concat": "^1.0.0"
}
},
"node_modules/simple-swizzle": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz",
"integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==",
"dependencies": {
"is-arrayish": "^0.3.1"
}
},
"node_modules/slash": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
"integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
"engines": {
"node": ">=8"
}
},
"node_modules/source-map-js": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz",
"integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/streamsearch": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz",
"integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==",
"engines": {
"node": ">=10.0.0"
}
},
"node_modules/streamx": {
"version": "2.15.0",
"resolved": "https://registry.npmjs.org/streamx/-/streamx-2.15.0.tgz",
"integrity": "sha512-HcxY6ncGjjklGs1xsP1aR71INYcsXFJet5CU1CHqihQ2J5nOsbd4OjgjHO42w/4QNv9gZb3BueV+Vxok5pLEXg==",
"dependencies": {
"fast-fifo": "^1.1.0",
"queue-tick": "^1.0.1"
}
},
"node_modules/string_decoder": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
"integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
"dependencies": {
"safe-buffer": "~5.2.0"
}
},
"node_modules/string.prototype.matchall": {
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.8.tgz",
"integrity": "sha512-6zOCOcJ+RJAQshcTvXPHoxoQGONa3e/Lqx90wUA+wEzX78sg5Bo+1tQo4N0pohS0erG9qtCqJDjNCQBjeWVxyg==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.4",
"es-abstract": "^1.20.4",
"get-intrinsic": "^1.1.3",
"has-symbols": "^1.0.3",
"internal-slot": "^1.0.3",
"regexp.prototype.flags": "^1.4.3",
"side-channel": "^1.0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/string.prototype.trim": {
"version": "1.2.7",
"resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.7.tgz",
"integrity": "sha512-p6TmeT1T3411M8Cgg9wBTMRtY2q9+PNy9EV1i2lIXUN/btt763oIfxwN3RR8VU6wHX8j/1CFy0L+YuThm6bgOg==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.4",
"es-abstract": "^1.20.4"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/string.prototype.trimend": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.6.tgz",
"integrity": "sha512-JySq+4mrPf9EsDBEDYMOb/lM7XQLulwg5R/m1r0PXEFqrV0qHvl58sdTilSXtKOflCsK2E8jxf+GKC0T07RWwQ==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.4",
"es-abstract": "^1.20.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/string.prototype.trimstart": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.6.tgz",
"integrity": "sha512-omqjMDaY92pbn5HOX7f9IccLA+U1tA9GvtU4JrodiXFfYB7jPzzHpRzpglLAjtUV6bB557zwClJezTqnAiYnQA==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.4",
"es-abstract": "^1.20.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dependencies": {
"ansi-regex": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-bom": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
"integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==",
"engines": {
"node": ">=4"
}
},
"node_modules/strip-final-newline": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz",
"integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/strip-json-comments": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
"integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/styled-jsx": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.1.tgz",
"integrity": "sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==",
"dependencies": {
"client-only": "0.0.1"
},
"engines": {
"node": ">= 12.0.0"
},
"peerDependencies": {
"react": ">= 16.8.0 || 17.x.x || ^18.0.0-0"
},
"peerDependenciesMeta": {
"@babel/core": {
"optional": true
},
"babel-plugin-macros": {
"optional": true
}
}
},
"node_modules/sucrase": {
"version": "3.34.0",
"resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.34.0.tgz",
"integrity": "sha512-70/LQEZ07TEcxiU2dz51FKaE6hCTWC6vr7FOk3Gr0U60C3shtAN+H+BFr9XlYe5xqf3RA8nrc+VIwzCfnxuXJw==",
"dependencies": {
"@jridgewell/gen-mapping": "^0.3.2",
"commander": "^4.0.0",
"glob": "7.1.6",
"lines-and-columns": "^1.1.6",
"mz": "^2.7.0",
"pirates": "^4.0.1",
"ts-interface-checker": "^0.1.9"
},
"bin": {
"sucrase": "bin/sucrase",
"sucrase-node": "bin/sucrase-node"
},
"engines": {
"node": ">=8"
}
},
"node_modules/sucrase/node_modules/glob": {
"version": "7.1.6",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
"integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.0.4",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
},
"engines": {
"node": "*"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/supports-preserve-symlinks-flag": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
"integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/synckit": {
"version": "0.8.5",
"resolved": "https://registry.npmjs.org/synckit/-/synckit-0.8.5.tgz",
"integrity": "sha512-L1dapNV6vu2s/4Sputv8xGsCdAVlb5nRDMFU/E27D44l5U6cw1g0dGd45uLc+OXjNMmF4ntiMdCimzcjFKQI8Q==",
"dependencies": {
"@pkgr/utils": "^2.3.1",
"tslib": "^2.5.0"
},
"engines": {
"node": "^14.18.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/unts"
}
},
"node_modules/tailwindcss": {
"version": "3.3.3",
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.3.tgz",
"integrity": "sha512-A0KgSkef7eE4Mf+nKJ83i75TMyq8HqY3qmFIJSWy8bNt0v1lG7jUcpGpoTFxAwYcWOphcTBLPPJg+bDfhDf52w==",
"dependencies": {
"@alloc/quick-lru": "^5.2.0",
"arg": "^5.0.2",
"chokidar": "^3.5.3",
"didyoumean": "^1.2.2",
"dlv": "^1.1.3",
"fast-glob": "^3.2.12",
"glob-parent": "^6.0.2",
"is-glob": "^4.0.3",
"jiti": "^1.18.2",
"lilconfig": "^2.1.0",
"micromatch": "^4.0.5",
"normalize-path": "^3.0.0",
"object-hash": "^3.0.0",
"picocolors": "^1.0.0",
"postcss": "^8.4.23",
"postcss-import": "^15.1.0",
"postcss-js": "^4.0.1",
"postcss-load-config": "^4.0.1",
"postcss-nested": "^6.0.1",
"postcss-selector-parser": "^6.0.11",
"resolve": "^1.22.2",
"sucrase": "^3.32.0"
},
"bin": {
"tailwind": "lib/cli.js",
"tailwindcss": "lib/cli.js"
},
"engines": {
"node": ">=14.0.0"
}
},
"node_modules/tapable": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz",
"integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==",
"engines": {
"node": ">=6"
}
},
"node_modules/tar-fs": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-3.0.4.tgz",
"integrity": "sha512-5AFQU8b9qLfZCX9zp2duONhPmZv0hGYiBPJsyUdqMjzq/mqVpy/rEUSeHk1+YitmxugaptgBh5oDGU3VsAJq4w==",
"dependencies": {
"mkdirp-classic": "^0.5.2",
"pump": "^3.0.0",
"tar-stream": "^3.1.5"
}
},
"node_modules/tar-stream": {
"version": "3.1.6",
"resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.6.tgz",
"integrity": "sha512-B/UyjYwPpMBv+PaFSWAmtYjwdrlEaZQEhMIBFNC5oEG8lpiW8XjcSdmEaClj28ArfKScKHs2nshz3k2le6crsg==",
"dependencies": {
"b4a": "^1.6.4",
"fast-fifo": "^1.2.0",
"streamx": "^2.15.0"
}
},
"node_modules/text-table": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
"integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw=="
},
"node_modules/thenify": {
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz",
"integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==",
"dependencies": {
"any-promise": "^1.0.0"
}
},
"node_modules/thenify-all": {
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz",
"integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==",
"dependencies": {
"thenify": ">= 3.1.0 < 4"
},
"engines": {
"node": ">=0.8"
}
},
"node_modules/titleize": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/titleize/-/titleize-3.0.0.tgz",
"integrity": "sha512-KxVu8EYHDPBdUYdKZdKtU2aj2XfEx9AfjXxE/Aj0vT06w2icA09Vus1rh6eSu1y01akYg6BjIK/hxyLJINoMLQ==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
"dependencies": {
"is-number": "^7.0.0"
},
"engines": {
"node": ">=8.0"
}
},
"node_modules/ts-interface-checker": {
"version": "0.1.13",
"resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz",
"integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA=="
},
"node_modules/tsconfig-paths": {
"version": "3.14.2",
"resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.2.tgz",
"integrity": "sha512-o/9iXgCYc5L/JxCHPe3Hvh8Q/2xm5Z+p18PESBU6Ff33695QnCHBEjcytY2q19ua7Mbl/DavtBOLq+oG0RCL+g==",
"dependencies": {
"@types/json5": "^0.0.29",
"json5": "^1.0.2",
"minimist": "^1.2.6",
"strip-bom": "^3.0.0"
}
},
"node_modules/tslib": {
"version": "2.6.0",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.0.tgz",
"integrity": "sha512-7At1WUettjcSRHXCyYtTselblcHl9PJFFVKiCAy/bY97+BPZXSQ2wbq0P9s8tK2G7dFQfNnlJnPAiArVBVBsfA=="
},
"node_modules/tsutils": {
"version": "3.21.0",
"resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz",
"integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==",
"dependencies": {
"tslib": "^1.8.1"
},
"engines": {
"node": ">= 6"
},
"peerDependencies": {
"typescript": ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta"
}
},
"node_modules/tsutils/node_modules/tslib": {
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
"integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="
},
"node_modules/tunnel-agent": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz",
"integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==",
"dependencies": {
"safe-buffer": "^5.0.1"
},
"engines": {
"node": "*"
}
},
"node_modules/type-check": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz",
"integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==",
"dependencies": {
"prelude-ls": "^1.2.1"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/type-fest": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
"integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/typed-array-buffer": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.0.tgz",
"integrity": "sha512-Y8KTSIglk9OZEr8zywiIHG/kmQ7KWyjseXs1CbSo8vC42w7hg2HgYTxSWwP0+is7bWDc1H+Fo026CpHFwm8tkw==",
"dependencies": {
"call-bind": "^1.0.2",
"get-intrinsic": "^1.2.1",
"is-typed-array": "^1.1.10"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/typed-array-byte-length": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.0.tgz",
"integrity": "sha512-Or/+kvLxNpeQ9DtSydonMxCx+9ZXOswtwJn17SNLvhptaXYDJvkFFP5zbfU/uLmvnBJlI4yrnXRxpdWH/M5tNA==",
"dependencies": {
"call-bind": "^1.0.2",
"for-each": "^0.3.3",
"has-proto": "^1.0.1",
"is-typed-array": "^1.1.10"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/typed-array-byte-offset": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.0.tgz",
"integrity": "sha512-RD97prjEt9EL8YgAgpOkf3O4IF9lhJFr9g0htQkm0rchFp/Vx7LW5Q8fSXXub7BXAODyUQohRMyOc3faCPd0hg==",
"dependencies": {
"available-typed-arrays": "^1.0.5",
"call-bind": "^1.0.2",
"for-each": "^0.3.3",
"has-proto": "^1.0.1",
"is-typed-array": "^1.1.10"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/typed-array-length": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.4.tgz",
"integrity": "sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng==",
"dependencies": {
"call-bind": "^1.0.2",
"for-each": "^0.3.3",
"is-typed-array": "^1.1.9"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/typescript": {
"version": "5.1.6",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.6.tgz",
"integrity": "sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==",
"peer": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
},
"engines": {
"node": ">=14.17"
}
},
"node_modules/unbox-primitive": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz",
"integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==",
"dependencies": {
"call-bind": "^1.0.2",
"has-bigints": "^1.0.2",
"has-symbols": "^1.0.3",
"which-boxed-primitive": "^1.0.2"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/untildify": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/untildify/-/untildify-4.0.0.tgz",
"integrity": "sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw==",
"engines": {
"node": ">=8"
}
},
"node_modules/update-browserslist-db": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz",
"integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==",
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/browserslist"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/browserslist"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
"escalade": "^3.1.1",
"picocolors": "^1.0.0"
},
"bin": {
"update-browserslist-db": "cli.js"
},
"peerDependencies": {
"browserslist": ">= 4.21.0"
}
},
"node_modules/uri-js": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
"integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
"dependencies": {
"punycode": "^2.1.0"
}
},
"node_modules/util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
},
"node_modules/which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"dependencies": {
"isexe": "^2.0.0"
},
"bin": {
"node-which": "bin/node-which"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/which-boxed-primitive": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz",
"integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==",
"dependencies": {
"is-bigint": "^1.0.1",
"is-boolean-object": "^1.1.0",
"is-number-object": "^1.0.4",
"is-string": "^1.0.5",
"is-symbol": "^1.0.3"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/which-typed-array": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.11.tgz",
"integrity": "sha512-qe9UWWpkeG5yzZ0tNYxDmd7vo58HDBc39mZ0xWWpolAGADdFOzkfamWLDxkOWcvHQKVmdTyQdLD4NOfjLWTKew==",
"dependencies": {
"available-typed-arrays": "^1.0.5",
"call-bind": "^1.0.2",
"for-each": "^0.3.3",
"gopd": "^1.0.1",
"has-tostringtag": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
},
"node_modules/yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
},
"node_modules/yaml": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.1.tgz",
"integrity": "sha512-2eHWfjaoXgTBC2jNM1LRef62VQa0umtvRiDSk6HSzW7RvS5YtkabJrwYLLEKWBc8a5U2PTSCs+dJjUTJdlHsWQ==",
"engines": {
"node": ">= 14"
}
},
"node_modules/yocto-queue": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
"integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
}
}
}
| transformers.js/examples/next-server/package-lock.json/0 | {
"file_path": "transformers.js/examples/next-server/package-lock.json",
"repo_id": "transformers.js",
"token_count": 107116
} | 359 |
import http from 'http';
import querystring from 'querystring';
import url from 'url';
import { pipeline, env } from '@xenova/transformers';
class MyClassificationPipeline {
static task = 'text-classification';
static model = 'Xenova/distilbert-base-uncased-finetuned-sst-2-english';
static instance = null;
static async getInstance(progress_callback = null) {
if (this.instance === null) {
// NOTE: Uncomment this to change the cache directory
// env.cacheDir = './.cache';
this.instance = pipeline(this.task, this.model, { progress_callback });
}
return this.instance;
}
}
// Comment out this line if you don't want to start loading the model as soon as the server starts.
// If commented out, the model will be loaded when the first request is received (i.e,. lazily).
MyClassificationPipeline.getInstance();
// Define the HTTP server
const server = http.createServer();
const hostname = '127.0.0.1';
const port = 3000;
// Listen for requests made to the server
server.on('request', async (req, res) => {
// Parse the request URL
const parsedUrl = url.parse(req.url);
// Extract the query parameters
const { text } = querystring.parse(parsedUrl.query);
// Set the response headers
res.setHeader('Content-Type', 'application/json');
let response;
if (parsedUrl.pathname === '/classify' && text) {
const classifier = await MyClassificationPipeline.getInstance();
response = await classifier(text);
res.statusCode = 200;
} else {
response = { 'error': 'Bad request' }
res.statusCode = 400;
}
// Send the JSON response
res.end(JSON.stringify(response));
});
server.listen(port, hostname, () => {
console.log(`Server running at http://${hostname}:${port}/`);
});
| transformers.js/examples/node/esm/app.js/0 | {
"file_path": "transformers.js/examples/node/esm/app.js",
"repo_id": "transformers.js",
"token_count": 559
} | 360 |
import './style.css';
import { AutoModel, AutoProcessor, env, RawImage } from '@xenova/transformers';
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
env.allowLocalModels = false;
// Proxy the WASM backend to prevent the UI from freezing
env.backends.onnx.wasm.proxy = true;
// Constants
const EXAMPLE_URL = 'https://images.pexels.com/photos/5965592/pexels-photo-5965592.jpeg?auto=compress&cs=tinysrgb&w=1024';
// Reference the elements that we will need
const status = document.getElementById('status');
const fileUpload = document.getElementById('upload');
const imageContainer = document.getElementById('container');
const example = document.getElementById('example');
// Load model and processor
status.textContent = 'Loading model...';
const model = await AutoModel.from_pretrained('briaai/RMBG-1.4', {
// Do not require config.json to be present in the repository
config: { model_type: 'custom' },
});
const processor = await AutoProcessor.from_pretrained('briaai/RMBG-1.4', {
// Do not require config.json to be present in the repository
config: {
do_normalize: true,
do_pad: false,
do_rescale: true,
do_resize: true,
image_mean: [0.5, 0.5, 0.5],
feature_extractor_type: "ImageFeatureExtractor",
image_std: [1, 1, 1],
resample: 2,
rescale_factor: 0.00392156862745098,
size: { width: 1024, height: 1024 },
}
});
status.textContent = 'Ready';
example.addEventListener('click', (e) => {
e.preventDefault();
predict(EXAMPLE_URL);
});
fileUpload.addEventListener('change', function (e) {
const file = e.target.files[0];
if (!file) {
return;
}
const reader = new FileReader();
// Set up a callback when the file is loaded
reader.onload = e2 => predict(e2.target.result);
reader.readAsDataURL(file);
});
// Predict foreground of the given image
async function predict(url) {
// Read image
const image = await RawImage.fromURL(url);
// Update UI
imageContainer.innerHTML = '';
imageContainer.style.backgroundImage = `url(${url})`;
// Set container width and height depending on the image aspect ratio
const ar = image.width / image.height;
const [cw, ch] = (ar > 720 / 480) ? [720, 720 / ar] : [480 * ar, 480];
imageContainer.style.width = `${cw}px`;
imageContainer.style.height = `${ch}px`;
status.textContent = 'Analysing...';
// Preprocess image
const { pixel_values } = await processor(image);
// Predict alpha matte
const { output } = await model({ input: pixel_values });
// Resize mask back to original size
const mask = await RawImage.fromTensor(output[0].mul(255).to('uint8')).resize(image.width, image.height);
// Create new canvas
const canvas = document.createElement('canvas');
canvas.width = image.width;
canvas.height = image.height;
const ctx = canvas.getContext('2d');
// Draw original image output to canvas
ctx.drawImage(image.toCanvas(), 0, 0);
// Update alpha channel
const pixelData = ctx.getImageData(0, 0, image.width, image.height);
for (let i = 0; i < mask.data.length; ++i) {
pixelData.data[4 * i + 3] = mask.data[i];
}
ctx.putImageData(pixelData, 0, 0);
// Update UI
imageContainer.append(canvas);
imageContainer.style.removeProperty('background-image');
imageContainer.style.background = `url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQBAMAAADt3eJSAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAGUExURb+/v////5nD/3QAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAAUSURBVBjTYwABQSCglEENMxgYGAAynwRB8BEAgQAAAABJRU5ErkJggg==")`;
status.textContent = 'Done!';
}
| transformers.js/examples/remove-background-client/main.js/0 | {
"file_path": "transformers.js/examples/remove-background-client/main.js",
"repo_id": "transformers.js",
"token_count": 1399
} | 361 |
{
"name": "semantic-audio-search",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview"
},
"devDependencies": {
"vite": "^5.0.13"
},
"dependencies": {
"@xenova/transformers": "^2.10.1",
"deepscatter": "github:nomic-ai/deepscatter"
}
}
| transformers.js/examples/semantic-audio-search/package.json/0 | {
"file_path": "transformers.js/examples/semantic-audio-search/package.json",
"repo_id": "transformers.js",
"token_count": 168
} | 362 |
'use client'
export function SearchBar({ search }) {
return (<form
onSubmit={e => {
e.preventDefault();
const formData = new FormData(e.target);
const text = formData.get('text');
search(text);
}}
className='relative mb-2'
>
<div className="absolute inset-y-0 left-0 flex items-center pl-3 pointer-events-none">
<svg className="w-4 h-4 text-gray-500 dark:text-gray-400" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 20 20">
<path stroke="currentColor" strokeLinecap="round" strokeLinejoin="round" strokeWidth="2" d="m19 19-4-4m0-7A7 7 0 1 1 1 8a7 7 0 0 1 14 0Z" />
</svg>
</div>
<input
type="search"
name="text"
id="default-search"
className="block w-full p-4 pl-10 text-sm text-gray-900 border border-gray-300 rounded-lg bg-gray-50 focus:ring-blue-500 focus:border-blue-500 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500"
placeholder="Search for images..."
required
/>
<button
type="submit"
className="text-white absolute right-2.5 bottom-2.5 bg-blue-700 hover:bg-blue-800 focus:ring-4 focus:outline-none focus:ring-blue-300 font-medium rounded-lg text-sm px-4 py-2 dark:bg-blue-600 dark:hover:bg-blue-700 dark:focus:ring-blue-800"
>
Search
</button>
</form>)
} | transformers.js/examples/semantic-image-search-client/src/app/components/SearchBar.jsx/0 | {
"file_path": "transformers.js/examples/semantic-image-search-client/src/app/components/SearchBar.jsx",
"repo_id": "transformers.js",
"token_count": 734
} | 363 |
{
"name": "semantic-image-search",
"version": "0.1.0",
"private": true,
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint"
},
"dependencies": {
"@supabase/supabase-js": "^2.31.0",
"@xenova/transformers": "^2.5.0",
"autoprefixer": "10.4.14",
"blurhash": "^2.0.5",
"eslint": "8.45.0",
"eslint-config-next": "13.4.12",
"next": "^14.2.3",
"postcss": "^8.4.31",
"react": "18.2.0",
"react-dom": "18.2.0",
"tailwindcss": "3.3.3"
},
"overrides": {
"protobufjs": "^7.2.4"
}
}
| transformers.js/examples/semantic-image-search/package.json/0 | {
"file_path": "transformers.js/examples/semantic-image-search/package.json",
"repo_id": "transformers.js",
"token_count": 319
} | 364 |
module.exports = {
root: true,
env: { browser: true, es2020: true },
extends: [
'eslint:recommended',
'plugin:react/recommended',
'plugin:react/jsx-runtime',
'plugin:react-hooks/recommended',
],
ignorePatterns: ['dist', '.eslintrc.cjs'],
parserOptions: { ecmaVersion: 'latest', sourceType: 'module' },
settings: { react: { version: '18.2' } },
plugins: ['react-refresh'],
rules: {
'react-refresh/only-export-components': [
'warn',
{ allowConstantExport: true },
],
},
}
| transformers.js/examples/text-to-speech-client/.eslintrc.cjs/0 | {
"file_path": "transformers.js/examples/text-to-speech-client/.eslintrc.cjs",
"repo_id": "transformers.js",
"token_count": 211
} | 365 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="stylesheet" href="style.css" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Transformers.js - Object Detection demo</title>
</head>
<body>
<main class="container">
<label class="custom-file-upload">
<input id="file-upload" type="file" accept="image/*" />
<img class="upload-icon" src="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/upload-icon.png" />
Upload image
</label>
<div id="image-container"></div>
<p id="status"></p>
</main>
<script src="index.js" type="module"></script>
</body>
</html> | transformers.js/examples/vanilla-js/index.html/0 | {
"file_path": "transformers.js/examples/vanilla-js/index.html",
"repo_id": "transformers.js",
"token_count": 316
} | 366 |
import {
AutoTokenizer,
CLIPTextModelWithProjection,
AutoProcessor,
CLIPVisionModelWithProjection,
RawImage,
dot,
softmax,
} from '@xenova/transformers';
import './style.css';
// Reference the elements that we will need
const status = document.getElementById('status');
const container = document.getElementById('container');
const video = document.getElementById('video');
const labelsInput = document.getElementById('labels');
const templateInput = document.getElementById('template');
const overlay = document.getElementById('overlay');
status.textContent = 'Loading model (300MB)...';
// Use fp16 if available, otherwise use fp32
async function hasFp16() {
try {
const adapter = await navigator.gpu.requestAdapter();
return adapter.features.has('shader-f16');
} catch (e) {
return false;
}
}
const dtype = (await hasFp16()) ? 'fp16' : 'fp32';
// Load object detection pipeline
const model_id = 'Xenova/clip-vit-base-patch16';
let tokenizer, text_model, processor, vision_model;
try {
// Load tokenizer and text model
tokenizer = await AutoTokenizer.from_pretrained(model_id);
text_model = await CLIPTextModelWithProjection.from_pretrained(model_id, {
device: 'webgpu',
dtype,
});
// Load processor and vision model
processor = await AutoProcessor.from_pretrained(model_id);
vision_model = await CLIPVisionModelWithProjection.from_pretrained(model_id, {
device: 'webgpu',
dtype,
});
} catch (err) {
status.textContent = err.message;
alert(err.message)
throw err;
}
labelsInput.disabled = false;
templateInput.disabled = false;
status.textContent = 'Ready';
// See `model.logit_scale` parameter of original model
const exp_logit_scale = Math.exp(4.6052);
const IMAGE_SIZE = 224;
const canvas = document.createElement('canvas');
canvas.width = canvas.height = IMAGE_SIZE;
const context = canvas.getContext('2d', { willReadFrequently: true });
let isProcessing = false;
let previousTime;
let textEmbeddings;
let prevTextInputs;
let prevTemplate;
let labels;
function onFrameUpdate() {
if (!isProcessing) {
isProcessing = true;
(async function () {
// If text inputs have changed, update the embeddings
if (prevTextInputs !== labelsInput.value || prevTemplate !== templateInput.value) {
textEmbeddings = null;
prevTextInputs = labelsInput.value;
prevTemplate = templateInput.value;
labels = prevTextInputs.split(/\s*,\s*/).filter(x => x);
if (labels.length > 0) {
const texts = labels.map(x => templateInput.value.replaceAll('{}', x));
const text_inputs = tokenizer(texts, { padding: true, truncation: true });
// Compute embeddings
const { text_embeds } = await text_model(text_inputs);
textEmbeddings = text_embeds.normalize().tolist();
} else {
overlay.innerHTML = '';
}
}
if (textEmbeddings) {
// Read the current frame from the video
context.drawImage(video, 0, 0, IMAGE_SIZE, IMAGE_SIZE);
const pixelData = context.getImageData(0, 0, IMAGE_SIZE, IMAGE_SIZE).data;
const image = new RawImage(pixelData, IMAGE_SIZE, IMAGE_SIZE, 4);
const image_inputs = await processor(image);
// Compute embeddings
const { image_embeds } = await vision_model(image_inputs);
const imageEmbedding = image_embeds.normalize().tolist()[0];
// Compute similarity
const similarities = textEmbeddings.map(
x => dot(x, imageEmbedding) * exp_logit_scale
);
const sortedIndices = softmax(similarities)
.map((x, i) => [x, i])
.sort((a, b) => b[0] - a[0]);
// Update UI
overlay.innerHTML = '';
for (const [score, index] of sortedIndices) {
overlay.appendChild(document.createTextNode(`${labels[index]}: ${score.toFixed(2)}`));
overlay.appendChild(document.createElement('br'));
}
}
if (previousTime !== undefined) {
const fps = 1000 / (performance.now() - previousTime);
status.textContent = `FPS: ${fps.toFixed(2)}`;
}
previousTime = performance.now();
isProcessing = false;
})();
}
window.requestAnimationFrame(onFrameUpdate);
}
// Start the video stream
navigator.mediaDevices.getUserMedia(
{ video: true }, // Ask for video
).then((stream) => {
// Set up the video and canvas elements.
video.srcObject = stream;
video.play();
const videoTrack = stream.getVideoTracks()[0];
const { width, height } = videoTrack.getSettings();
video.width = width;
video.height = height;
// Set container width and height depending on the image aspect ratio
const ar = width / height;
const [cw, ch] = (ar > 720 / 405) ? [720, 720 / ar] : [405 * ar, 405];
container.style.width = `${cw}px`;
container.style.height = `${ch}px`;
// Start the animation loop
window.requestAnimationFrame(onFrameUpdate);
}).catch((error) => {
alert(error);
});
| transformers.js/examples/webgpu-clip/main.js/0 | {
"file_path": "transformers.js/examples/webgpu-clip/main.js",
"repo_id": "transformers.js",
"token_count": 2330
} | 367 |
import { useState } from "react";
import CrossIcon from "./icons/CrossIcon"
export default function ImagePreview({ src, onRemove, ...props }) {
const [hover, setHover] = useState(false);
return (
<div
{...props}
onMouseEnter={() => setHover(true)}
onMouseLeave={() => setHover(false)}
>
<CrossIcon onClick={onRemove} className={`absolute top-0 right-0 cursor-pointer dark:fill-gray-400 dark:text-gray-100 fill-gray-200 text-gray-800 ${hover ? '' : 'hidden'}`} />
<img src={src} alt="Upload preview" className="w-full h-full object-cover rounded-md" />
</div>)
}
| transformers.js/examples/webgpu-vlm/src/components/ImagePreview.jsx/0 | {
"file_path": "transformers.js/examples/webgpu-vlm/src/components/ImagePreview.jsx",
"repo_id": "transformers.js",
"token_count": 272
} | 368 |
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/png" href="/logo.png" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Whisper WebGPU</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.jsx"></script>
</body>
</html>
| transformers.js/examples/webgpu-whisper/index.html/0 | {
"file_path": "transformers.js/examples/webgpu-whisper/index.html",
"repo_id": "transformers.js",
"token_count": 153
} | 369 |
# React + Vite
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
Currently, two official plugins are available:
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
| transformers.js/examples/whisper-word-timestamps/README.md/0 | {
"file_path": "transformers.js/examples/whisper-word-timestamps/README.md",
"repo_id": "transformers.js",
"token_count": 152
} | 370 |
# Support exporting vision and text models separately:
# Adapted from https://github.com/huggingface/optimum/issues/1186#issuecomment-1637641760
from optimum.exporters.onnx.model_configs import CLIPTextOnnxConfig, ViTOnnxConfig
from typing import Dict
class CLIPVisionOnnxConfig(ViTOnnxConfig):
pass
class CLIPTextModelWithProjectionOnnxConfig(CLIPTextOnnxConfig):
@property
def outputs(self) -> Dict[str, Dict[int, str]]:
return {
"text_embeds": {0: "batch_size"},
}
def generate_dummy_inputs(self, framework: str = "pt", **kwargs):
dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)
if framework == "pt":
import torch
dummy_inputs["input_ids"] = dummy_inputs["input_ids"].to(dtype=torch.int64)
return dummy_inputs
class CLIPVisionModelWithProjectionOnnxConfig(CLIPVisionOnnxConfig):
@property
def outputs(self) -> Dict[str, Dict[int, str]]:
return {
"image_embeds": {0: "batch_size"},
}
| transformers.js/scripts/extra/clip.py/0 | {
"file_path": "transformers.js/scripts/extra/clip.py",
"repo_id": "transformers.js",
"token_count": 439
} | 371 |
/**
* @file Processors are used to prepare inputs (e.g., text, image or audio) for a model.
*
* **Example:** Using a `WhisperProcessor` to prepare an audio input for a model.
* ```javascript
* import { AutoProcessor, read_audio } from '@huggingface/transformers';
*
* const processor = await AutoProcessor.from_pretrained('openai/whisper-tiny.en');
* const audio = await read_audio('https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac', 16000);
* const { input_features } = await processor(audio);
* // Tensor {
* // data: Float32Array(240000) [0.4752984642982483, 0.5597258806228638, 0.56434166431427, ...],
* // dims: [1, 80, 3000],
* // type: 'float32',
* // size: 240000,
* // }
* ```
*
* @module processors
*/
import { PROCESSOR_NAME, CHAT_TEMPLATE_NAME } from '../utils/constants.js';
import {
Callable,
} from '../utils/generic.js';
import { getModelJSON, getModelText } from '../utils/hub.js';
/**
* @typedef {Object} ProcessorProperties Additional processor-specific properties.
* @typedef {import('../utils/hub.js').PretrainedOptions & ProcessorProperties} PretrainedProcessorOptions
* @typedef {import('../tokenizers.js').PreTrainedTokenizer} PreTrainedTokenizer
*/
/**
* Represents a Processor that extracts features from an input.
*/
export class Processor extends Callable {
static classes = [
'image_processor_class',
'tokenizer_class',
'feature_extractor_class',
]
static uses_processor_config = false;
static uses_chat_template_file = false;
/**
* Creates a new Processor with the given components
* @param {Object} config
* @param {Record<string, Object>} components
* @param {string} chat_template
*/
constructor(config, components, chat_template) {
super();
this.config = config;
this.components = components;
this.chat_template = chat_template;
}
/**
* @returns {import('./image_processors_utils.js').ImageProcessor|undefined} The image processor of the processor, if it exists.
*/
get image_processor() {
return this.components.image_processor;
}
/**
* @returns {PreTrainedTokenizer|undefined} The tokenizer of the processor, if it exists.
*/
get tokenizer() {
return this.components.tokenizer;
}
/**
* @returns {import('./feature_extraction_utils.js').FeatureExtractor|undefined} The feature extractor of the processor, if it exists.
*/
get feature_extractor() {
return this.components.feature_extractor;
}
/**
* @param {Parameters<PreTrainedTokenizer['apply_chat_template']>[0]} messages
* @param {Parameters<PreTrainedTokenizer['apply_chat_template']>[1]} options
* @returns {ReturnType<PreTrainedTokenizer['apply_chat_template']>}
*/
apply_chat_template(messages, options = {}) {
if (!this.tokenizer) {
throw new Error('Unable to apply chat template without a tokenizer.');
}
return this.tokenizer.apply_chat_template(messages, {
tokenize: false, // default to false
chat_template: this.chat_template ?? undefined,
...options,
});
}
/**
* @param {Parameters<PreTrainedTokenizer['batch_decode']>} args
* @returns {ReturnType<PreTrainedTokenizer['batch_decode']>}
*/
batch_decode(...args) {
if (!this.tokenizer) {
throw new Error('Unable to decode without a tokenizer.');
}
return this.tokenizer.batch_decode(...args);
}
/**
* @param {Parameters<PreTrainedTokenizer['decode']>} args
* @returns {ReturnType<PreTrainedTokenizer['decode']>}
*/
decode(...args) {
if (!this.tokenizer) {
throw new Error('Unable to decode without a tokenizer.');
}
return this.tokenizer.decode(...args);
}
/**
* Calls the feature_extractor function with the given input.
* @param {any} input The input to extract features from.
* @param {...any} args Additional arguments.
* @returns {Promise<any>} A Promise that resolves with the extracted features.
*/
async _call(input, ...args) {
for (const item of [this.image_processor, this.feature_extractor, this.tokenizer]) {
if (item) {
return item(input, ...args);
}
}
throw new Error('No image processor, feature extractor, or tokenizer found.');
}
/**
* Instantiate one of the processor classes of the library from a pretrained model.
*
* The processor class to instantiate is selected based on the `image_processor_type` (or `feature_extractor_type`; legacy)
* property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible)
*
* @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either:
* - A string, the *model id* of a pretrained processor hosted inside a model repo on huggingface.co.
* Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
* user or organization name, like `dbmdz/bert-base-german-cased`.
* - A path to a *directory* containing processor files, e.g., `./my_model_directory/`.
* @param {PretrainedProcessorOptions} options Additional options for loading the processor.
*
* @returns {Promise<Processor>} A new instance of the Processor class.
*/
static async from_pretrained(pretrained_model_name_or_path, options={}) {
const [config, components, chat_template] = await Promise.all([
// TODO:
this.uses_processor_config
? getModelJSON(pretrained_model_name_or_path, PROCESSOR_NAME, true, options)
: {},
Promise.all(
this.classes
.filter((cls) => cls in this)
.map(async (cls) => {
const component = await this[cls].from_pretrained(pretrained_model_name_or_path, options);
return [cls.replace(/_class$/, ''), component];
})
).then(Object.fromEntries),
this.uses_chat_template_file
? getModelText(pretrained_model_name_or_path, CHAT_TEMPLATE_NAME, true, options)
: null,
]);
return new this(config, components, chat_template);
}
}
| transformers.js/src/base/processing_utils.js/0 | {
"file_path": "transformers.js/src/base/processing_utils.js",
"repo_id": "transformers.js",
"token_count": 2559
} | 372 |
import {
ImageProcessor,
} from "../../base/image_processors_utils.js";
export class ChineseCLIPFeatureExtractor extends ImageProcessor { }
| transformers.js/src/models/chinese_clip/image_processing_chinese_clip.js/0 | {
"file_path": "transformers.js/src/models/chinese_clip/image_processing_chinese_clip.js",
"repo_id": "transformers.js",
"token_count": 45
} | 373 |
import {
ImageProcessor,
} from "../../base/image_processors_utils.js";
export class GLPNFeatureExtractor extends ImageProcessor { }
| transformers.js/src/models/glpn/image_processing_glpn.js/0 | {
"file_path": "transformers.js/src/models/glpn/image_processing_glpn.js",
"repo_id": "transformers.js",
"token_count": 44
} | 374 |
import {
ImageProcessor,
} from "../../base/image_processors_utils.js";
export class MobileNetV2ImageProcessor extends ImageProcessor { }
export class MobileNetV2FeatureExtractor extends MobileNetV2ImageProcessor { }
| transformers.js/src/models/mobilenet_v2/image_processing_mobilenet_v2.js/0 | {
"file_path": "transformers.js/src/models/mobilenet_v2/image_processing_mobilenet_v2.js",
"repo_id": "transformers.js",
"token_count": 66
} | 375 |
import { Processor } from '../../base/processing_utils.js';
import { PyAnnoteFeatureExtractor } from './feature_extraction_pyannote.js';
export class PyAnnoteProcessor extends Processor {
static feature_extractor_class = PyAnnoteFeatureExtractor
/**
* Calls the feature_extractor function with the given audio input.
* @param {any} audio The audio input to extract features from.
* @returns {Promise<any>} A Promise that resolves with the extracted features.
*/
async _call(audio) {
return await this.feature_extractor(audio)
}
/** @type {PyAnnoteFeatureExtractor['post_process_speaker_diarization']} */
post_process_speaker_diarization(...args) {
return /** @type {PyAnnoteFeatureExtractor} */(this.feature_extractor).post_process_speaker_diarization(...args);
}
get sampling_rate() {
return this.feature_extractor.config.sampling_rate;
}
}
| transformers.js/src/models/pyannote/processing_pyannote.js/0 | {
"file_path": "transformers.js/src/models/pyannote/processing_pyannote.js",
"repo_id": "transformers.js",
"token_count": 316
} | 376 |
import { AutoFeatureExtractor } from "../auto/feature_extraction_auto.js"
import { AutoTokenizer } from "../../tokenizers.js"
import { Processor } from "../../base/processing_utils.js"
/**
* Represents a UltravoxProcessor that extracts features from an audio input.
*/
export class UltravoxProcessor extends Processor {
static tokenizer_class = AutoTokenizer
static feature_extractor_class = AutoFeatureExtractor
static uses_processor_config = true;
/**
* @param {string} text The text input to process.
* @param {Float32Array} audio The audio input to process.
*/
async _call(text, audio = null, kwargs = {}) {
// TODO: Support batched inputs
if (Array.isArray(text)) {
throw new Error("Batched inputs are not supported yet.");
}
let audio_inputs = {};
if (audio) {
const audio_len = audio.length;
const { input_features } = await this.feature_extractor(audio, {
...kwargs,
max_length: audio_len,
});
const nb_encoder_frames = Math.round(audio_len / this.config.encoder_ds_factor + 1e-4);
// NOTE: The python version appears to have an off-by-one error.
const audio_embed_frames = 1 + Math.ceil(nb_encoder_frames / this.config.stack_factor);
audio_inputs["audio_token_len"] = [audio_embed_frames];
audio_inputs["audio_values"] = input_features;
const image_token = this.config.audio_placeholder;
if (!text.includes(image_token)) {
throw new Error(`The input text does not contain the image token ${image_token}.`);
}
text = text.replaceAll(image_token, image_token.repeat(audio_embed_frames));
}
const text_inputs = this.tokenizer(text, {
add_special_tokens: false,
...kwargs,
});
return {
...text_inputs,
...audio_inputs,
}
}
}
| transformers.js/src/models/ultravox/processing_ultravox.js/0 | {
"file_path": "transformers.js/src/models/ultravox/processing_ultravox.js",
"repo_id": "transformers.js",
"token_count": 852
} | 377 |
/**
* @file Tokenizers are used to prepare textual inputs for a model.
*
* **Example:** Create an `AutoTokenizer` and use it to tokenize a sentence.
* This will automatically detect the tokenizer type based on the tokenizer class defined in `tokenizer.json`.
* ```javascript
* import { AutoTokenizer } from '@huggingface/transformers';
*
* const tokenizer = await AutoTokenizer.from_pretrained('Xenova/bert-base-uncased');
* const { input_ids } = await tokenizer('I love transformers!');
* // Tensor {
* // data: BigInt64Array(6) [101n, 1045n, 2293n, 19081n, 999n, 102n],
* // dims: [1, 6],
* // type: 'int64',
* // size: 6,
* // }
* ```
*
* @module tokenizers
*/
import {
Callable,
} from './utils/generic.js';
import {
reverseDictionary,
escapeRegExp,
isIntegralNumber,
mergeArrays,
len,
} from './utils/core.js';
import {
getModelJSON,
} from './utils/hub.js';
import { max, min, round } from './utils/maths.js';
import { Tensor } from './utils/tensor.js';
import {
PriorityQueue,
TokenLattice,
CharTrie,
DictionarySplitter,
LRUCache,
} from './utils/data-structures.js';
import { Template } from '@huggingface/jinja';
import {
WHISPER_LANGUAGE_MAPPING
} from './models/whisper/common_whisper.js';
/**
* @typedef {Object} TokenizerProperties Additional tokenizer-specific properties.
* @property {boolean} [legacy=false] Whether or not the `legacy` behavior of the tokenizer should be used.
* @typedef {import('./utils/hub.js').PretrainedOptions & TokenizerProperties} PretrainedTokenizerOptions
*/
/**
* Loads a tokenizer from the specified path.
* @param {string} pretrained_model_name_or_path The path to the tokenizer directory.
* @param {PretrainedTokenizerOptions} options Additional options for loading the tokenizer.
* @returns {Promise<any[]>} A promise that resolves with information about the loaded tokenizer.
*/
async function loadTokenizer(pretrained_model_name_or_path, options) {
const info = await Promise.all([
getModelJSON(pretrained_model_name_or_path, 'tokenizer.json', true, options),
getModelJSON(pretrained_model_name_or_path, 'tokenizer_config.json', true, options),
])
// Override legacy option if `options.legacy` is not null
if (options.legacy !== null) {
info[1].legacy = options.legacy;
}
return info;
}
/**
* Helper function to split a string on a regex, but keep the delimiters.
* This is required, because the JavaScript `.split()` method does not keep the delimiters,
* and wrapping in a capturing group causes issues with existing capturing groups (due to nesting).
* @param {string} text The text to split.
* @param {RegExp} regex The regex to split on.
* @returns {string[]} The split string.
*/
function regexSplit(text, regex) {
const result = [];
let prev = 0;
for (const match of text.matchAll(regex)) {
const fullMatch = match[0];
if (prev < match.index) {
result.push(text.slice(prev, match.index));
}
if (fullMatch.length > 0) {
result.push(fullMatch);
}
prev = match.index + fullMatch.length;
}
if (prev < text.length) {
result.push(text.slice(prev));
}
return result;
}
/**
* Helper method to construct a pattern from a config object.
* @param {Object} pattern The pattern object.
* @param {boolean} invert Whether to invert the pattern.
* @returns {RegExp|null} The compiled pattern.
*/
function createPattern(pattern, invert = true) {
if (pattern.Regex !== undefined) {
// In certain cases, the pattern may contain unnecessary escape sequences (e.g., \# or \& or \~).
// i.e., valid in Python (where the patterns are exported from) but invalid in JavaScript (where the patterns are parsed).
// This isn't an issue when creating the regex w/o the 'u' flag, but it is when the 'u' flag is used.
// For this reason, it is necessary to remove these backslashes before creating the regex.
// See https://stackoverflow.com/a/63007777/13989043 for more information
let regex = pattern.Regex.replace(/\\([#&~])/g, '$1'); // TODO: add more characters to this list if necessary
// We also handle special cases where the regex contains invalid (non-JS compatible) syntax.
for (const [key, value] of PROBLEMATIC_REGEX_MAP) {
regex = regex.replaceAll(key, value);
}
return new RegExp(regex, 'gu');
} else if (pattern.String !== undefined) {
const escaped = escapeRegExp(pattern.String);
// NOTE: if invert is true, we wrap the pattern in a group so that it is kept when performing .split()
return new RegExp(invert ? escaped : `(${escaped})`, 'gu');
} else {
console.warn('Unknown pattern type:', pattern)
return null;
}
}
/**
* Helper function to convert an Object to a Map
* @param {Object} obj The object to convert.
* @returns {Map<string, any>} The map.
*/
function objectToMap(obj) {
return new Map(Object.entries(obj));
}
/**
* Helper function to convert a tensor to a list before decoding.
* @param {Tensor} tensor The tensor to convert.
* @returns {number[]} The tensor as a list.
*/
function prepareTensorForDecode(tensor) {
const dims = tensor.dims;
switch (dims.length) {
case 1:
return tensor.tolist();
case 2:
if (dims[0] !== 1) {
throw new Error('Unable to decode tensor with `batch size !== 1`. Use `tokenizer.batch_decode(...)` for batched inputs.');
}
return tensor.tolist()[0];
default:
throw new Error(`Expected tensor to have 1-2 dimensions, got ${dims.length}.`)
}
}
/**
* Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms
* @param {string} text The text to clean up.
* @returns {string} The cleaned up text.
*/
function clean_up_tokenization(text) {
// Clean up a list of simple English tokenization artifacts
// like spaces before punctuations and abbreviated forms
return text.replace(/ \./g, '.')
.replace(/ \?/g, '?')
.replace(/ \!/g, '!')
.replace(/ ,/g, ',')
.replace(/ \' /g, "'")
.replace(/ n\'t/g, "n't")
.replace(/ \'m/g, "'m")
.replace(/ \'s/g, "'s")
.replace(/ \'ve/g, "'ve")
.replace(/ \'re/g, "'re");
}
/**
* Helper function to remove accents from a string.
* @param {string} text The text to remove accents from.
* @returns {string} The text with accents removed.
*/
function remove_accents(text) {
return text.replace(/\p{M}/gu, '');
}
/**
* Helper function to lowercase a string and remove accents.
* @param {string} text The text to lowercase and remove accents from.
* @returns {string} The lowercased text with accents removed.
*/
function lowercase_and_remove_accent(text) {
return remove_accents(text.toLowerCase());
}
/**
* Checks whether the given Unicode codepoint represents a CJK (Chinese, Japanese, or Korean) character.
*
* A "chinese character" is defined as anything in the CJK Unicode block:
* https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
*
* Note that the CJK Unicode block is NOT all Japanese and Korean characters, despite its name.
* The modern Korean Hangul alphabet is a different block, as is Japanese Hiragana and Katakana.
* Those alphabets are used to write space-separated words, so they are not treated specially
* and are handled like all other languages.
*
* @param {number|bigint} cp The Unicode codepoint to check.
* @returns {boolean} True if the codepoint represents a CJK character, false otherwise.
*/
export function is_chinese_char(cp) {
return (
(cp >= 0x4E00 && cp <= 0x9FFF)
|| (cp >= 0x3400 && cp <= 0x4DBF)
|| (cp >= 0x20000 && cp <= 0x2A6DF)
|| (cp >= 0x2A700 && cp <= 0x2B73F)
|| (cp >= 0x2B740 && cp <= 0x2B81F)
|| (cp >= 0x2B820 && cp <= 0x2CEAF)
|| (cp >= 0xF900 && cp <= 0xFAFF)
|| (cp >= 0x2F800 && cp <= 0x2FA1F)
)
}
/**
* Helper function to fuse consecutive unknown tokens.
* @param {string[]} arr The list of input tokens
* @param {Map<string, any>} tokens_to_ids The mapping from tokens to token ids.
* @param {number} unk_token_id The value to fuse on.
* @private
*/
function fuse_unk(arr, tokens_to_ids, unk_token_id) {
const fused = [];
let i = 0;
while (i < arr.length) {
fused.push(arr[i])
if ((tokens_to_ids.get(arr[i]) ?? unk_token_id) !== unk_token_id) {
++i;
continue;
}
while (++i < arr.length && (tokens_to_ids.get(arr[i]) ?? unk_token_id) === unk_token_id) {
if (tokens_to_ids.get(fused.at(-1)) !== unk_token_id) {
fused[fused.length - 1] += arr[i];
}
}
}
return fused;
}
/**
* Split a string on whitespace.
* @param {string} text The text to split.
* @returns {string[]} The split string.
*/
function whitespace_split(text) {
return text.match(/\S+/g) || [];
}
const PUNCTUATION_REGEX = '\\p{P}\\u0021-\\u002F\\u003A-\\u0040\\u005B-\\u0060\\u007B-\\u007E';
const PUNCTUATION_ONLY_REGEX = new RegExp(`^[${PUNCTUATION_REGEX}]+$`, 'gu');
const BLOOM_SPLIT_CHARS = '.,!?\u2026\u3002\uff0c\u3001\u0964\u06d4\u060c';
// A mapping of regex patterns to their equivalent (but possibly longer) JS-compatible versions.
const PROBLEMATIC_REGEX_MAP = new Map([
// This uses the case insensitive group modifier, which is not supported in JavaScript.
// When parsing the regex, an "Invalid group" error is thrown.
["(?i:'s|'t|'re|'ve|'m|'ll|'d)", "(?:'([sS]|[tT]|[rR][eE]|[vV][eE]|[mM]|[lL][lL]|[dD]))"],
// Used to override the default (invalid) regex of the bloom pretokenizer.
// For more information, see https://github.com/huggingface/transformers.js/issues/94
[` ?[^(\\s|[${BLOOM_SPLIT_CHARS}])]+`, ` ?[^\\s${BLOOM_SPLIT_CHARS}]+`],
])
/**
* Represent a token added by the user on top of the existing Model vocabulary.
* AddedToken can be configured to specify the behavior they should have in various situations like:
* - Whether they should only match single words
* - Whether to include any whitespace on its left or right
*/
class AddedToken {
/**
* Creates a new instance of AddedToken.
* @param {Object} config Added token configuration object.
* @param {string} config.content The content of the added token.
* @param {number} config.id The id of the added token.
* @param {boolean} [config.single_word=false] Whether this token must be a single word or can break words.
* @param {boolean} [config.lstrip=false] Whether this token should strip whitespaces on its left.
* @param {boolean} [config.rstrip=false] Whether this token should strip whitespaces on its right.
* @param {boolean} [config.normalized=false] Whether this token should be normalized.
* @param {boolean} [config.special=false] Whether this token is special.
*/
constructor(config) {
this.content = config.content;
this.id = config.id;
this.single_word = config.single_word ?? false;
this.lstrip = config.lstrip ?? false;
this.rstrip = config.rstrip ?? false;
this.special = config.special ?? false;
this.normalized = config.normalized ?? null;
}
}
/**
* Abstract base class for tokenizer models.
*
* @extends Callable
*/
export class TokenizerModel extends Callable {
/**
* Creates a new instance of TokenizerModel.
* @param {Object} config The configuration object for the TokenizerModel.
*/
constructor(config) {
super();
this.config = config;
/** @type {string[]} */
this.vocab = [];
/**
* A mapping of tokens to ids.
* @type {Map<string, number>}
*/
this.tokens_to_ids = new Map();
this.unk_token_id = undefined;
this.unk_token = undefined;
this.end_of_word_suffix = undefined;
/** @type {boolean} Whether to fuse unknown tokens when encoding. Defaults to false. */
this.fuse_unk = this.config.fuse_unk ?? false;
}
/**
* Instantiates a new TokenizerModel instance based on the configuration object provided.
* @param {Object} config The configuration object for the TokenizerModel.
* @param {...*} args Optional arguments to pass to the specific TokenizerModel constructor.
* @returns {TokenizerModel} A new instance of a TokenizerModel.
* @throws Will throw an error if the TokenizerModel type in the config is not recognized.
*/
static fromConfig(config, ...args) {
switch (config.type) {
case 'WordPiece':
return new WordPieceTokenizer(config);
case 'Unigram':
// @ts-ignore
return new Unigram(config, ...args);
case 'BPE':
return new BPE(config);
default:
// Some older tokenizers, like `google-t5/t5-small`, `openai-community/gpt2`, and `distilbert/distilbert-base-uncased`, do not have a `type` field.
// In this case, we can infer the tokenizer type based on the structure of the `vocab` field and other properties.
if (config.vocab) {
if (Array.isArray(config.vocab)) {
// config.vocab is of type `[string, number][]`
// @ts-ignore
return new Unigram(config, ...args);
} else if (Object.hasOwn(config, 'continuing_subword_prefix') && Object.hasOwn(config, 'unk_token')) {
if (Object.hasOwn(config, 'merges')) {
return new BPE(config);
} else {
return new WordPieceTokenizer(config);
}
} else {
// @ts-ignore
return new LegacyTokenizerModel(config, ...args);
}
}
throw new Error(`Unknown TokenizerModel type: ${config.type}`);
}
}
/**
* Internal function to call the TokenizerModel instance.
* @param {string[]} tokens The tokens to encode.
* @returns {string[]} The encoded tokens.
*/
_call(tokens) {
tokens = this.encode(tokens);
if (this.fuse_unk) {
// Fuse unknown tokens
tokens = fuse_unk(tokens, this.tokens_to_ids, this.unk_token_id);
}
return tokens;
}
/**
* Encodes a list of tokens into a list of token IDs.
* @param {string[]} tokens The tokens to encode.
* @returns {string[]} The encoded tokens.
* @throws Will throw an error if not implemented in a subclass.
*/
encode(tokens) {
throw Error("encode should be implemented in subclass.")
}
/**
* Converts a list of tokens into a list of token IDs.
* @param {string[]} tokens The tokens to convert.
* @returns {number[]} The converted token IDs.
*/
convert_tokens_to_ids(tokens) {
return tokens.map(t => this.tokens_to_ids.get(t) ?? this.unk_token_id);
}
/**
* Converts a list of token IDs into a list of tokens.
* @param {number[]|bigint[]} ids The token IDs to convert.
* @returns {string[]} The converted tokens.
*/
convert_ids_to_tokens(ids) {
return ids.map(i => this.vocab[i] ?? this.unk_token);
}
}
/**
* A subclass of TokenizerModel that uses WordPiece encoding to encode tokens.
* @extends TokenizerModel
*/
class WordPieceTokenizer extends TokenizerModel {
/**
* @param {Object} config The configuration object.
* @param {Object} config.vocab A mapping of tokens to ids.
* @param {string} config.unk_token The unknown token string.
* @param {string} config.continuing_subword_prefix The prefix to use for continuing subwords.
* @param {number} [config.max_input_chars_per_word=100] The maximum number of characters per word.
*/
constructor(config) {
super(config);
/**
* A mapping of tokens to ids.
* @type {Map<string, number>}
*/
this.tokens_to_ids = objectToMap(config.vocab);
/**
* The id of the unknown token.
* @type {number}
*/
this.unk_token_id = this.tokens_to_ids.get(config.unk_token);
/**
* The unknown token string.
* @type {string}
*/
this.unk_token = config.unk_token;
/**
* The maximum number of characters allowed per word.
* @type {number}
*/
this.max_input_chars_per_word = config.max_input_chars_per_word ?? 100;
/**
* An array of tokens.
* @type {string[]}
*/
this.vocab = new Array(this.tokens_to_ids.size);
for (const [key, value] of this.tokens_to_ids) {
this.vocab[value] = key;
}
}
/**
* Encodes an array of tokens using WordPiece encoding.
* @param {string[]} tokens The tokens to encode.
* @returns {string[]} An array of encoded tokens.
*/
encode(tokens) {
const outputTokens = [];
for (const token of tokens) {
const chars = [...token];
if (chars.length > this.max_input_chars_per_word) {
outputTokens.push(this.unk_token);
continue;
}
let isUnknown = false;
let start = 0;
const subTokens = [];
while (start < chars.length) {
let end = chars.length;
let currentSubstring = null;
while (start < end) {
let substr = chars.slice(start, end).join('');
if (start > 0) {
substr = this.config.continuing_subword_prefix + substr;
}
if (this.tokens_to_ids.has(substr)) {
currentSubstring = substr;
break;
}
--end;
}
if (currentSubstring === null) {
isUnknown = true;
break;
}
subTokens.push(currentSubstring);
start = end;
}
if (isUnknown) {
outputTokens.push(this.unk_token);
} else {
outputTokens.push(...subTokens);
}
}
return outputTokens;
}
}
/**
* Class representing a Unigram tokenizer model.
* @extends TokenizerModel
*/
class Unigram extends TokenizerModel {
/**
* Create a new Unigram tokenizer model.
* @param {Object} config The configuration object for the Unigram model.
* @param {number} config.unk_id The ID of the unknown token
* @param {[string, number][]} config.vocab A 2D array representing a mapping of tokens to scores.
* @param {Object} moreConfig Additional configuration object for the Unigram model.
*/
constructor(config, moreConfig) {
super(config);
const vocabSize = config.vocab.length;
this.vocab = new Array(vocabSize);
/** @type {number[]} */
this.scores = new Array(vocabSize);
for (let i = 0; i < vocabSize; ++i) {
[this.vocab[i], this.scores[i]] = config.vocab[i];
}
this.unk_token_id = config.unk_id;
this.unk_token = this.vocab[config.unk_id];
this.tokens_to_ids = new Map(this.vocab.map((x, i) => [x, i]));
this.bos_token = ' '; // beginning of a sentence token
this.bos_token_id = this.tokens_to_ids.get(this.bos_token); // NOTE: may be undefined
this.eos_token = moreConfig.eos_token;
this.eos_token_id = this.tokens_to_ids.get(this.eos_token);
this.unk_token = this.vocab[this.unk_token_id];
this.minScore = min(this.scores)[0];
this.unk_score = this.minScore - 10.0;
this.scores[this.unk_token_id] = this.unk_score;
this.trie = new CharTrie();
this.trie.extend(this.vocab);
// NOTE: `fuse_unk` is hardcoded to true for Unigram models
// See: https://github.com/huggingface/tokenizers/blob/b58227c7f1ccf8b73ee2268354336da56d91e492/tokenizers/src/models/unigram/model.rs#L119
this.fuse_unk = true;
}
/**
* Populates lattice nodes.
* @param {TokenLattice} lattice The token lattice to populate with nodes.
*/
populateNodes(lattice) {
const chars = lattice.chars;
const mblen = 1;
let beginPos = 0;
while (beginPos < chars.length) {
let hasSingleNode = false;
const tokens = [];
const sliced = chars.slice(beginPos).join('');
const prefixedTokens = this.trie.commonPrefixSearch(sliced);
for (const token of prefixedTokens) {
tokens.push(token);
const tokenId = this.tokens_to_ids.get(token);
const tokenScore = this.scores[tokenId];
const n = len(token);
lattice.insert(beginPos, n, tokenScore, tokenId);
if (!hasSingleNode && n === mblen) {
hasSingleNode = true;
}
}
if (!hasSingleNode) {
lattice.insert(beginPos, mblen, this.unk_score, this.unk_token_id);
}
beginPos += mblen;
}
}
/**
* Encodes an array of tokens into an array of subtokens using the unigram model.
*
* @param {string} normalized The normalized string.
* @returns {string[]} An array of subtokens obtained by encoding the input tokens using the unigram model.
*/
tokenize(normalized) {
const lattice = new TokenLattice(normalized, this.bos_token_id, this.eos_token_id);
this.populateNodes(lattice);
return lattice.tokens();
}
/**
* Encodes an array of tokens using Unigram encoding.
* @param {string[]} tokens The tokens to encode.
* @returns {string[]} An array of encoded tokens.
*/
encode(tokens) {
const toReturn = [];
for (const token of tokens) {
const tokenized = this.tokenize(token);
toReturn.push(...tokenized);
}
return toReturn;
}
}
/**
* Returns list of utf-8 byte and a mapping to unicode strings.
* Specifically avoids mapping to whitespace/control characters the BPE code barfs on.
* @returns {Object} Object with utf-8 byte keys and unicode string values.
*/
const BYTES_TO_UNICODE = (() => {
// Returns list of utf-8 byte and a mapping to unicode strings.
// We specifically avoids mapping to whitespace/control characters
// the bpe code barfs on.
const bs = [
...Array.from({ length: "~".charCodeAt(0) - "!".charCodeAt(0) + 1 }, (_, i) => i + "!".charCodeAt(0)),
...Array.from({ length: "¬".charCodeAt(0) - "¡".charCodeAt(0) + 1 }, (_, i) => i + "¡".charCodeAt(0)),
...Array.from({ length: "ÿ".charCodeAt(0) - "®".charCodeAt(0) + 1 }, (_, i) => i + "®".charCodeAt(0)),
];
const cs = bs.slice();
let n = 0;
for (let b = 0; b < 256; ++b) {
if (!bs.includes(b)) {
bs.push(b);
cs.push(256 + n);
n += 1;
}
}
const ccs = cs.map(n => String.fromCharCode(n));
return Object.fromEntries(bs.map((b, i) => [b, ccs[i]]));
})();
const UNICODE_TO_BYTES = reverseDictionary(BYTES_TO_UNICODE);
/**
* @typedef {Object} BPENode
* @property {string} token The token associated with the node
* @property {number} bias A positional bias for the node.
* @property {number} [score] The score of the node.
* @property {BPENode} [prev] The previous node in the linked list.
* @property {BPENode} [next] The next node in the linked list.
*/
/**
* BPE class for encoding text into Byte-Pair-Encoding (BPE) tokens.
* @extends TokenizerModel
*/
class BPE extends TokenizerModel {
/**
* Create a BPE instance.
* @param {Object} config The configuration object for BPE.
* @param {Object} config.vocab A mapping of tokens to ids.
* @param {string[]|[string, string][]} config.merges An array of BPE merges as strings.
* @param {string} config.unk_token The unknown token used for out of vocabulary words.
* @param {string} config.end_of_word_suffix The suffix to place at the end of each word.
* @param {string} [config.continuing_subword_suffix] The suffix to insert between words.
* @param {boolean} [config.byte_fallback=false] Whether to use spm byte-fallback trick (defaults to False)
* @param {boolean} [config.ignore_merges=false] Whether or not to match tokens with the vocab before using merges.
*/
constructor(config) {
super(config);
/** @type {Map<string, number>} */
this.tokens_to_ids = objectToMap(config.vocab);
this.unk_token_id = this.tokens_to_ids.get(config.unk_token);
this.unk_token = config.unk_token;
this.vocab = new Array(this.tokens_to_ids.size);
for (const [key, value] of this.tokens_to_ids) {
this.vocab[value] = key;
}
// Tokenizers >= 0.20.0 serializes BPE merges as a [string, string][] instead of a string[],
// which resolves the ambiguity for merges containing spaces.
const use_new_merge_format = Array.isArray(config.merges[0]);
/** @type {[string, string][]} */
this.merges = use_new_merge_format
? /** @type {[string, string][]} */(config.merges)
: (/** @type {string[]} */(config.merges)).map(x => /** @type {[string, string]} */(x.split(' ', 2)));
this.bpe_ranks = new Map(this.merges.map((x, i) => [JSON.stringify(x), i]));
this.end_of_word_suffix = config.end_of_word_suffix;
// NOTE: `continuing_subword_suffix` is custom (to support `BlenderbotSmallTokenizer`)
this.continuing_subword_suffix = config.continuing_subword_suffix ?? null;
this.byte_fallback = this.config.byte_fallback ?? false;
if (this.byte_fallback) {
this.text_encoder = new TextEncoder();
}
this.ignore_merges = this.config.ignore_merges ?? false;
/**
* The maximum length we should cache in a model.
* Strings that are too long have minimal chances to cache hit anyway
*/
this.max_length_to_cache = 256;
/**
* The default capacity for a `BPE`'s internal cache.
*/
this.cache_capacity = 10000;
this.cache = new LRUCache(this.cache_capacity);
}
/**
* Clears the cache.
*/
clear_cache() {
this.cache.clear();
}
/**
* Apply Byte-Pair-Encoding (BPE) to a given token. Efficient heap-based priority
* queue implementation adapted from https://github.com/belladoreai/llama-tokenizer-js.
* @param {string} token The token to encode.
* @returns {string[]} The BPE encoded tokens.
*/
bpe(token) {
if (token.length === 0) {
return [];
}
const cached = this.cache.get(token);
if (cached !== undefined) {
return cached;
}
const word = Array.from(token);
if (this.end_of_word_suffix) {
word[word.length - 1] += this.end_of_word_suffix;
}
let result = [];
if (word.length > 1) {
// Create a priority queue to store the nodes that will be merged.
// The comparator function compares the scores of the nodes.
const queue = new PriorityQueue((a, b) => a.score < b.score);
// Construct a doubly-linked list of nodes that will be inserted into the priority queue,
// starting with the individual characters. We also populate each node with a positional
// bias to break ties in the priority queue.
let startingNode = {
token: word[0],
bias: 0,
prev: null,
next: null,
}
let previousNode = startingNode
for (let i = 1; i < word.length; ++i) {
const currentNode = {
bias: i / word.length, // Add fractional component to break ties
token: word[i],
prev: previousNode,
next: null,
}
previousNode.next = currentNode
this._add_node(queue, previousNode)
previousNode = currentNode
}
while (!queue.isEmpty()) {
// Get the next node with the highest priority
const node = queue.pop();
// Check that this merge is still possible
if (node.deleted || !node.next || node.next.deleted) continue;
// Here, we mark the current node (left side of the merge) and the next node (right side of the merge) as deleted.
// This is because they will both be replaced by a new node representing the merge result.
node.deleted = true;
node.next.deleted = true;
// Next, we fix the node that comes before the current node (i.e., left side of the merge).
if (node.prev) {
// Make a shallow copy of the previous node
const newPreviousNode = { ...node.prev };
// Mark the old previous node as deleted. This avoids erroneous merges later,
// because there may still be references to this node in the priority queue.
node.prev.deleted = true;
node.prev = newPreviousNode;
// Update the reference of the previous node, by pointing its previous node to this new previous node.
if (newPreviousNode.prev) {
newPreviousNode.prev.next = newPreviousNode;
} else {
// If the previous of the previous node does not exist, it means that
// `newPreviousNode` must be the new `startingNode`.
startingNode = newPreviousNode;
}
}
// Create a new node which represents the result of the merge.
const merged = {
token: node.token + node.next.token,
bias: node.bias,
prev: node.prev,
next: node.next.next,
}
// We now consider where we can add the new merged node to the priority queue:
// 1. prev <-> merged
if (merged.prev) {
merged.prev.next = merged;
this._add_node(queue, merged.prev);
} else {
// If `merged.prev` does not exist, then `merged` must be the new `startingNode`.
startingNode = merged;
}
// 2. merged <-> next
if (merged.next) {
merged.next.prev = merged;
this._add_node(queue, merged);
}
}
// Traverse the linked list, starting from the `startingNode`, and collect the tokens.
for (let currentNode = startingNode; currentNode !== null; currentNode = currentNode.next) {
result.push(currentNode.token);
}
} else {
result = word;
}
// Possibly append suffix
if (this.continuing_subword_suffix) {
// Do not append suffix to the last token
for (let i = 0; i < result.length - 1; ++i) {
result[i] += this.continuing_subword_suffix;
}
}
if (token.length < this.max_length_to_cache) {
// Save the result to the cache
this.cache.put(token, result);
}
return result;
}
/**
* Helper function to add a node to the priority queue.
* @param {PriorityQueue} queue
* @param {BPENode} node
* @private
*/
_add_node(queue, node) {
// `score` is a measure of the merge priority: lower means higher priority
// We use the BPE rank as a measure of priority (i.e., the local of the merge in the merges list)
// We also add a fractional component to the score to break ties (with the earlier character having higher priority)
const rank = this.bpe_ranks.get(JSON.stringify([node.token, node.next.token]));
if (rank !== undefined) {
node.score = rank + node.bias;
queue.push(node);
}
}
/**
* Encodes the input sequence of tokens using the BPE algorithm and returns the resulting subword tokens.
* @param {string[]} tokens The input sequence of tokens to encode.
* @returns {string[]} The resulting subword tokens after applying the BPE algorithm to the input sequence of tokens.
*/
encode(tokens) {
const outputTokens = [];
for (const token of tokens) {
if (this.ignore_merges && this.tokens_to_ids.has(token)) {
outputTokens.push(token);
continue;
}
const bpe_token_list = this.bpe(token);
for (const t of bpe_token_list) {
if (this.tokens_to_ids.has(t)) {
outputTokens.push(t);
} else if (this.byte_fallback) {
const byteTokens = Array.from(this.text_encoder.encode(t))
.map(x => `<0x${x.toString(16).toUpperCase().padStart(2, '0')}>`);
if (byteTokens.every(x => this.tokens_to_ids.has(x))) {
// Ensure the byte tokens are actually in the vocabulary, otherwise
// we fall back to the unknown token. For more information, see
// https://github.com/huggingface/transformers/issues/28096.
outputTokens.push(...byteTokens);
} else {
outputTokens.push(this.unk_token);
}
} else {
outputTokens.push(this.unk_token);
}
}
}
return outputTokens;
}
}
/**
* Legacy tokenizer class for tokenizers with only a vocabulary.
*/
class LegacyTokenizerModel extends TokenizerModel {
/**
* Create a LegacyTokenizerModel instance.
* @param {Object} config The configuration object for LegacyTokenizerModel.
* @param {Object} config.vocab A (possibly nested) mapping of tokens to ids.
* @param {Object} moreConfig Additional configuration object for the LegacyTokenizerModel model.
*/
constructor(config, moreConfig) {
super(config);
/**@type {Map<string, number>} */
this.tokens_to_ids = objectToMap(
moreConfig.target_lang
? config.vocab[moreConfig.target_lang]
: config.vocab
);
this.bos_token = moreConfig.bos_token;
this.bos_token_id = this.tokens_to_ids.get(this.bos_token);
this.eos_token = moreConfig.eos_token;
this.eos_token_id = this.tokens_to_ids.get(this.eos_token);
this.pad_token = moreConfig.pad_token;
this.pad_token_id = this.tokens_to_ids.get(this.pad_token);
this.unk_token = moreConfig.unk_token;
this.unk_token_id = this.tokens_to_ids.get(this.unk_token);
this.vocab = new Array(this.tokens_to_ids.size);
for (const [key, value] of this.tokens_to_ids) {
this.vocab[value] = key;
}
}
encode(tokens) {
return tokens;
}
}
/**
* A base class for text normalization.
* @abstract
*/
class Normalizer extends Callable {
/**
* @param {Object} config The configuration object for the normalizer.
*/
constructor(config) {
super();
this.config = config;
}
/**
* Factory method for creating normalizers from config objects.
* @static
* @param {Object} config The configuration object for the normalizer.
* @returns {Normalizer} A Normalizer object.
* @throws {Error} If an unknown Normalizer type is specified in the config.
*/
static fromConfig(config) {
if (config === null) return null;
switch (config.type) {
case 'BertNormalizer':
return new BertNormalizer(config);
case 'Precompiled':
return new Precompiled(config);
case 'Sequence':
return new NormalizerSequence(config);
case 'Replace':
return new Replace(config);
case 'NFC':
return new NFC(config);
case 'NFD':
return new NFD(config);
case 'NFKC':
return new NFKC(config);
case 'NFKD':
return new NFKD(config);
case 'Strip':
return new StripNormalizer(config);
case 'StripAccents':
return new StripAccents(config);
case 'Lowercase':
return new Lowercase(config);
case 'Prepend':
return new Prepend(config);
default:
throw new Error(`Unknown Normalizer type: ${config.type}`);
}
}
/**
* Normalize the input text.
* @abstract
* @param {string} text The text to normalize.
* @returns {string} The normalized text.
* @throws {Error} If this method is not implemented in a subclass.
*/
normalize(text) {
throw Error("normalize should be implemented in subclass.")
}
/**
* Alias for {@link Normalizer#normalize}.
* @param {string} text The text to normalize.
* @returns {string} The normalized text.
*/
_call(text) {
return this.normalize(text);
}
}
/**
* Replace normalizer that replaces occurrences of a pattern with a given string or regular expression.
* @extends Normalizer
*/
class Replace extends Normalizer {
/**
* Normalize the input text by replacing the pattern with the content.
* @param {string} text The input text to be normalized.
* @returns {string} The normalized text after replacing the pattern with the content.
*/
normalize(text) {
const pattern = createPattern(this.config.pattern);
return pattern === null
? text
: text.replaceAll(pattern, this.config.content);
}
}
/**
* A normalizer that applies Unicode normalization to the input text.
* @extends Normalizer
* @abstract
*/
class UnicodeNormalizer extends Normalizer {
/**
* @type {string} The Unicode normalization form to apply.
* Should be one of: 'NFC', 'NFD', 'NFKC', or 'NFKD'.
*/
form = undefined;
/**
* Normalize the input text by applying Unicode normalization.
* @param {string} text The input text to be normalized.
* @returns {string} The normalized text.
*/
normalize(text) {
text = text.normalize(this.form)
return text;
}
}
/**
* A normalizer that applies Unicode normalization form C (NFC) to the input text.
* Canonical Decomposition, followed by Canonical Composition.
* @extends UnicodeNormalizer
*/
class NFC extends UnicodeNormalizer {
form = 'NFC';
}
/**
* A normalizer that applies Unicode normalization form D (NFD) to the input text.
* Canonical Decomposition.
* @extends UnicodeNormalizer
*/
class NFD extends UnicodeNormalizer {
form = 'NFD';
}
/**
* A normalizer that applies Unicode normalization form KC (NFKC) to the input text.
* Compatibility Decomposition, followed by Canonical Composition.
* @extends UnicodeNormalizer
*/
class NFKC extends UnicodeNormalizer {
form = 'NFKC';
}
/**
* A normalizer that applies Unicode normalization form KD (NFKD) to the input text.
* Compatibility Decomposition.
* @extends UnicodeNormalizer
*/
class NFKD extends UnicodeNormalizer {
form = 'NFKD';
}
/**
* A normalizer that strips leading and/or trailing whitespace from the input text.
*/
class StripNormalizer extends Normalizer {
/**
* Strip leading and/or trailing whitespace from the input text.
* @param {string} text The input text.
* @returns {string} The normalized text.
*/
normalize(text) {
if (this.config.strip_left && this.config.strip_right) {
// Fast path to avoid an extra trim call
text = text.trim();
} else {
if (this.config.strip_left) {
text = text.trimStart();
}
if (this.config.strip_right) {
text = text.trimEnd();
}
}
return text;
}
}
/**
* StripAccents normalizer removes all accents from the text.
* @extends Normalizer
*/
class StripAccents extends Normalizer {
/**
* Remove all accents from the text.
* @param {string} text The input text.
* @returns {string} The normalized text without accents.
*/
normalize(text) {
text = remove_accents(text);
return text;
}
}
/**
* A Normalizer that lowercases the input string.
* @extends Normalizer
*/
class Lowercase extends Normalizer {
/**
* Lowercases the input string.
* @param {string} text The text to normalize.
* @returns {string} The normalized text.
*/
normalize(text) {
text = text.toLowerCase();
return text;
}
}
/**
* A Normalizer that prepends a string to the input string.
* @extends Normalizer
*/
class Prepend extends Normalizer {
/**
* Prepends the input string.
* @param {string} text The text to normalize.
* @returns {string} The normalized text.
*/
normalize(text) {
text = this.config.prepend + text;
return text;
}
}
/**
* A Normalizer that applies a sequence of Normalizers.
* @extends Normalizer
*/
class NormalizerSequence extends Normalizer {
/**
* Create a new instance of NormalizerSequence.
* @param {Object} config The configuration object.
* @param {Object[]} config.normalizers An array of Normalizer configuration objects.
*/
constructor(config) {
super(config);
this.normalizers = config.normalizers.map(x => Normalizer.fromConfig(x));
}
/**
* Apply a sequence of Normalizers to the input text.
* @param {string} text The text to normalize.
* @returns {string} The normalized text.
*/
normalize(text) {
return this.normalizers.reduce((t, normalizer) => {
return normalizer.normalize(t);
}, text);
}
}
/**
* A class representing a normalizer used in BERT tokenization.
* @extends Normalizer
*/
class BertNormalizer extends Normalizer {
/**
* Adds whitespace around any CJK (Chinese, Japanese, or Korean) character in the input text.
*
* @param {string} text The input text to tokenize.
* @returns {string} The tokenized text with whitespace added around CJK characters.
*/
_tokenize_chinese_chars(text) {
/* Adds whitespace around any CJK character. */
const output = [];
for (let i = 0; i < text.length; ++i) {
const char = text[i];
const cp = char.charCodeAt(0);
if (is_chinese_char(cp)) {
output.push(" ");
output.push(char);
output.push(" ");
} else {
output.push(char);
}
}
return output.join("");
}
/**
* Strips accents from the given text.
* @param {string} text The text to strip accents from.
* @returns {string} The text with accents removed.
*/
stripAccents(text) {
// "Mark, Nonspacing" (Mn)
return text.normalize('NFD').replace(/\p{Mn}/gu, '');
}
/**
* Checks whether `char` is a control character.
* @param {string} char The character to check.
* @returns {boolean} Whether `char` is a control character.
* @private
*/
_is_control(char) {
switch (char) {
case '\t':
case '\n':
case '\r':
// These are technically control characters but we count them as whitespace characters.
return false;
default:
// Check if unicode category starts with C:
// Cc - Control
// Cf - Format
// Co - Private Use
// Cs - Surrogate
return /^\p{Cc}|\p{Cf}|\p{Co}|\p{Cs}$/u.test(char);
}
}
/**
* Performs invalid character removal and whitespace cleanup on text.
* @param {string} text The text to clean.
* @returns {string} The cleaned text.
* @private
*/
_clean_text(text) {
const output = [];
for (const char of text) {
const cp = char.charCodeAt(0);
if (cp === 0 || cp === 0xFFFD || this._is_control(char)) {
continue;
}
if (/^\s$/.test(char)) { // is whitespace
output.push(" ");
} else {
output.push(char);
}
}
return output.join("");
}
/**
* Normalizes the given text based on the configuration.
* @param {string} text The text to normalize.
* @returns {string} The normalized text.
*/
normalize(text) {
if (this.config.clean_text) {
text = this._clean_text(text);
}
if (this.config.handle_chinese_chars) {
text = this._tokenize_chinese_chars(text);
}
if (this.config.lowercase) {
text = text.toLowerCase();
if (this.config.strip_accents !== false) {
text = this.stripAccents(text);
}
} else if (this.config.strip_accents) {
text = this.stripAccents(text);
}
return text;
}
}
/**
* A callable class representing a pre-tokenizer used in tokenization. Subclasses
* should implement the `pre_tokenize_text` method to define the specific pre-tokenization logic.
* @extends Callable
*/
class PreTokenizer extends Callable {
/**
* Factory method that returns an instance of a subclass of `PreTokenizer` based on the provided configuration.
*
* @static
* @param {Object} config A configuration object for the pre-tokenizer.
* @returns {PreTokenizer} An instance of a subclass of `PreTokenizer`.
* @throws {Error} If the provided configuration object does not correspond to any known pre-tokenizer.
*/
static fromConfig(config) {
if (config === null) return null;
switch (config.type) {
case 'BertPreTokenizer':
return new BertPreTokenizer(config);
case 'Sequence':
return new PreTokenizerSequence(config);
case 'Whitespace':
return new WhitespacePreTokenizer(config);
case 'WhitespaceSplit':
return new WhitespaceSplit(config);
case 'Metaspace':
return new MetaspacePreTokenizer(config);
case 'ByteLevel':
return new ByteLevelPreTokenizer(config);
case 'Split':
return new SplitPreTokenizer(config);
case 'Punctuation':
return new PunctuationPreTokenizer(config);
case 'Digits':
return new DigitsPreTokenizer(config);
case 'Replace':
return new ReplacePreTokenizer(config);
default:
throw new Error(`Unknown PreTokenizer type: ${config.type}`);
}
}
/**
* Method that should be implemented by subclasses to define the specific pre-tokenization logic.
*
* @abstract
* @param {string} text The text to pre-tokenize.
* @param {Object} [options] Additional options for the pre-tokenization logic.
* @returns {string[]} The pre-tokenized text.
* @throws {Error} If the method is not implemented in the subclass.
*/
pre_tokenize_text(text, options) {
throw Error("pre_tokenize_text should be implemented in subclass.")
}
/**
* Tokenizes the given text into pre-tokens.
* @param {string|string[]} text The text or array of texts to pre-tokenize.
* @param {Object} [options] Additional options for the pre-tokenization logic.
* @returns {string[]} An array of pre-tokens.
*/
pre_tokenize(text, options) {
return (Array.isArray(text)
? text.map(x => this.pre_tokenize_text(x, options))
: this.pre_tokenize_text(text, options)
).flat();
}
/**
* Alias for {@link PreTokenizer#pre_tokenize}.
* @param {string|string[]} text The text or array of texts to pre-tokenize.
* @param {Object} [options] Additional options for the pre-tokenization logic.
* @returns {string[]} An array of pre-tokens.
*/
_call(text, options) {
return this.pre_tokenize(text, options);
}
}
/**
* @extends PreTokenizer
*/
class BertPreTokenizer extends PreTokenizer {
/**
* A PreTokenizer that splits text into wordpieces using a basic tokenization scheme
* similar to that used in the original implementation of BERT.
*
* @param {Object} config The configuration object.
*/
constructor(config) {
super();
// Construct a pattern which matches the rust implementation:
// https://github.com/huggingface/tokenizers/blob/b4fcc9ce6e4ad5806e82826f816acfdfdc4fcc67/tokenizers/src/pre_tokenizers/bert.rs#L11
// Equivalent to removing whitespace and splitting on punctuation (both \p{P} and other ascii characters)
this.pattern = new RegExp(`[^\\s${PUNCTUATION_REGEX}]+|[${PUNCTUATION_REGEX}]`, 'gu');
}
/**
* Tokenizes a single text using the BERT pre-tokenization scheme.
*
* @param {string} text The text to tokenize.
* @param {Object} [options] Additional options for the pre-tokenization logic.
* @returns {string[]} An array of tokens.
*/
pre_tokenize_text(text, options) {
return text.trim().match(this.pattern) || [];
}
}
/**
* A pre-tokenizer that splits text into Byte-Pair-Encoding (BPE) subwords.
* @extends PreTokenizer
*/
class ByteLevelPreTokenizer extends PreTokenizer {
/**
* Creates a new instance of the `ByteLevelPreTokenizer` class.
* @param {Object} config The configuration object.
*/
constructor(config) {
super();
this.config = config;
/**
* @type {boolean} Whether to add a leading space to the first word.
* This allows to treat the leading word just as any other word.
*/
this.add_prefix_space = this.config.add_prefix_space;
/**
* @type {boolean} Whether the post processing step should trim offsets
* to avoid including whitespaces.
* @todo Use this in the pretokenization step.
*/
this.trim_offsets = this.config.trim_offsets;
/**
* @type {boolean} Whether to use the standard GPT2 regex for whitespace splitting.
* Set it to False if you want to use your own splitting. Defaults to true.
*/
this.use_regex = this.config.use_regex ?? true;
this.pattern = /'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+/gu;
this.byte_encoder = BYTES_TO_UNICODE;
this.text_encoder = new TextEncoder();
}
/**
* Tokenizes a single piece of text using byte-level tokenization.
* @param {string} text The text to tokenize.
* @param {Object} [options] Additional options for the pre-tokenization logic.
* @returns {string[]} An array of tokens.
*/
pre_tokenize_text(text, options) {
// Add a leading space if the option is enabled
if (this.add_prefix_space && !text.startsWith(' ')) {
text = ' ' + text;
}
// Split on whitespace and punctuation
const tokens = this.use_regex ? (text.match(this.pattern) || []) : [text];
// Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
return tokens.map(
token => Array.from(this.text_encoder.encode(token), byte => this.byte_encoder[byte]).join('')
);
}
}
/**
* @typedef {'removed'|'isolated'|'mergedWithPrevious'|'mergedWithNext'|'contiguous'} SplitDelimiterBehavior
*/
/**
* Splits text using a given pattern.
* @extends PreTokenizer
*/
class SplitPreTokenizer extends PreTokenizer {
/**
* @param {Object} config The configuration options for the pre-tokenizer.
* @param {Object} config.pattern The pattern used to split the text. Can be a string or a regex object.
* @param {string|undefined} config.pattern.String The string to use for splitting. Only defined if the pattern is a string.
* @param {string|undefined} config.pattern.Regex The regex to use for splitting. Only defined if the pattern is a regex.
* @param {SplitDelimiterBehavior} config.behavior The behavior to use when splitting.
* @param {boolean} config.invert Whether to split (invert=false) or match (invert=true) the pattern.
*/
constructor(config) {
super();
this.config = config;
// TODO support all behaviours (config.behavior)
this.pattern = createPattern(this.config.pattern, this.config.invert);
}
/**
* Tokenizes text by splitting it using the given pattern.
* @param {string} text The text to tokenize.
* @param {Object} [options] Additional options for the pre-tokenization logic.
* @returns {string[]} An array of tokens.
*/
pre_tokenize_text(text, options) {
if (this.pattern === null) {
return [];
}
if (this.config.invert) {
return text.match(this.pattern) || [];
} else if (this.config.behavior?.toLowerCase() === 'removed') {
return text.split(this.pattern).filter(x => x);
} else {
return regexSplit(text, this.pattern);
}
}
}
/**
* Splits text based on punctuation.
* @extends PreTokenizer
*/
class PunctuationPreTokenizer extends PreTokenizer {
/**
* @param {Object} config The configuration options for the pre-tokenizer.
* @param {SplitDelimiterBehavior} config.behavior The behavior to use when splitting.
*/
constructor(config) {
super();
this.config = config;
this.pattern = new RegExp(`[^${PUNCTUATION_REGEX}]+|[${PUNCTUATION_REGEX}]+`, 'gu');
}
/**
* Tokenizes text by splitting it using the given pattern.
* @param {string} text The text to tokenize.
* @param {Object} [options] Additional options for the pre-tokenization logic.
* @returns {string[]} An array of tokens.
*/
pre_tokenize_text(text, options) {
return text.match(this.pattern) || [];
}
}
/**
* Splits text based on digits.
* @extends PreTokenizer
*/
class DigitsPreTokenizer extends PreTokenizer {
/**
* @param {Object} config The configuration options for the pre-tokenizer.
* @param {boolean} config.individual_digits Whether to split on individual digits.
*/
constructor(config) {
super();
this.config = config;
// Construct a pattern which matches the rust implementation:
const digit_pattern = `[^\\d]+|\\d${this.config.individual_digits ? '' : '+'}`;
this.pattern = new RegExp(digit_pattern, 'gu');
}
/**
* Tokenizes text by splitting it using the given pattern.
* @param {string} text The text to tokenize.
* @param {Object} [options] Additional options for the pre-tokenization logic.
* @returns {string[]} An array of tokens.
*/
pre_tokenize_text(text, options) {
return text.match(this.pattern) || [];
}
}
/**
* @typedef {Object} PostProcessedOutput
* @property {string[]} tokens List of token produced by the post-processor.
* @property {number[]} [token_type_ids] List of token type ids produced by the post-processor.
*/
/**
* @typedef {Object} EncodingSingle
* @property {number[]} input_ids List of token ids to be fed to a model.
* @property {number[]} attention_mask List of token type ids to be fed to a model
* @property {number[]} [token_type_ids] List of indices specifying which tokens should be attended to by the model
*/
/**
* @extends Callable
*/
class PostProcessor extends Callable {
/**
* @param {Object} config The configuration for the post-processor.
*/
constructor(config) {
super();
this.config = config;
}
/**
* Factory method to create a PostProcessor object from a configuration object.
*
* @param {Object} config Configuration object representing a PostProcessor.
* @returns {PostProcessor} A PostProcessor object created from the given configuration.
* @throws {Error} If an unknown PostProcessor type is encountered.
*/
static fromConfig(config) {
if (config === null) return null;
switch (config.type) {
case 'TemplateProcessing':
return new TemplateProcessing(config);
case 'ByteLevel':
return new ByteLevelPostProcessor(config);
case 'RobertaProcessing':
return new RobertaProcessing(config);
case 'BertProcessing':
return new BertProcessing(config);
case 'Sequence':
return new PostProcessorSequence(config);
default:
throw new Error(`Unknown PostProcessor type: ${config.type}`);
}
}
/**
* Method to be implemented in subclass to apply post-processing on the given tokens.
*
* @param {Array} tokens The input tokens to be post-processed.
* @param {...*} args Additional arguments required by the post-processing logic.
* @returns {PostProcessedOutput} The post-processed tokens.
* @throws {Error} If the method is not implemented in subclass.
*/
post_process(tokens, ...args) {
throw Error("post_process should be implemented in subclass.")
}
/**
* Alias for {@link PostProcessor#post_process}.
* @param {Array} tokens The text or array of texts to post-process.
* @param {...*} args Additional arguments required by the post-processing logic.
* @returns {PostProcessedOutput} The post-processed tokens.
*/
_call(tokens, ...args) {
return this.post_process(tokens, ...args);
}
}
/**
* A post-processor that adds special tokens to the beginning and end of the input.
*/
class BertProcessing extends PostProcessor {
/**
* @param {Object} config The configuration for the post-processor.
* @param {string[]} config.cls The special tokens to add to the beginning of the input.
* @param {string[]} config.sep The special tokens to add to the end of the input.
*/
constructor(config) {
super(config);
// TODO use all of config: add_prefix_space, trim_offsets
this.cls = config.cls[0];
this.sep = config.sep[0];
}
/**
* Adds the special tokens to the beginning and end of the input.
* @param {string[]} tokens The input tokens.
* @param {string[]} [tokens_pair=null] An optional second set of input tokens.
* @returns {PostProcessedOutput} The post-processed tokens with the special tokens added to the beginning and end.
*/
post_process(tokens, tokens_pair = null, {
add_special_tokens = true,
} = {}) {
if (add_special_tokens) {
tokens = mergeArrays([this.cls], tokens, [this.sep]);
}
let token_type_ids = new Array(tokens.length).fill(0);
if (tokens_pair !== null) {
// NOTE: It is intended to add 2 EOS tokens after the first set of tokens
// https://github.com/huggingface/tokenizers/issues/983
const middle = (add_special_tokens && this instanceof RobertaProcessing)
? [this.sep]
: [];
const after = add_special_tokens ? [this.sep] : [];
tokens = mergeArrays(tokens, middle, tokens_pair, after);
token_type_ids = mergeArrays(token_type_ids, new Array(tokens_pair.length + middle.length + after.length).fill(1));
}
return { tokens, token_type_ids };
}
}
class RobertaProcessing extends BertProcessing { } // NOTE: extends BertProcessing
/**
* Post processor that replaces special tokens in a template with actual tokens.
* @extends PostProcessor
*/
class TemplateProcessing extends PostProcessor {
/**
* Creates a new instance of `TemplateProcessing`.
* @param {Object} config The configuration options for the post processor.
* @param {Array} config.single The template for a single sequence of tokens.
* @param {Array} config.pair The template for a pair of sequences of tokens.
*/
constructor(config) {
super(config);
this.single = config.single;
this.pair = config.pair;
}
/**
* Replaces special tokens in the template with actual tokens.
* @param {string[]} tokens The list of tokens for the first sequence.
* @param {string[]} [tokens_pair=null] The list of tokens for the second sequence (optional).
* @returns {PostProcessedOutput} An object containing the list of tokens with the special tokens replaced with actual tokens.
*/
post_process(tokens, tokens_pair = null, {
add_special_tokens = true,
} = {}) {
const type = tokens_pair === null ? this.single : this.pair
let processedTokens = [];
let types = [];
for (const item of type) {
if ('SpecialToken' in item) {
if (add_special_tokens) {
processedTokens.push(item.SpecialToken.id);
types.push(item.SpecialToken.type_id);
}
} else if ('Sequence' in item) {
if (item.Sequence.id === 'A') {
processedTokens = mergeArrays(processedTokens, tokens);
types = mergeArrays(types, new Array(tokens.length).fill(item.Sequence.type_id));
} else if (item.Sequence.id === 'B') {
processedTokens = mergeArrays(processedTokens, tokens_pair);
types = mergeArrays(types, new Array(tokens_pair.length).fill(item.Sequence.type_id));
}
}
}
return { tokens: processedTokens, token_type_ids: types };
}
}
/**
* A PostProcessor that returns the given tokens as is.
* @extends PostProcessor
*/
class ByteLevelPostProcessor extends PostProcessor {
/**
* Post process the given tokens.
* @param {string[]} tokens The list of tokens for the first sequence.
* @param {string[]} [tokens_pair=null] The list of tokens for the second sequence (optional).
* @returns {PostProcessedOutput} An object containing the post-processed tokens.
*/
post_process(tokens, tokens_pair = null) {
if (tokens_pair) {
tokens = mergeArrays(tokens, tokens_pair);
}
return { tokens };
}
}
/**
* A post-processor that applies multiple post-processors in sequence.
*/
class PostProcessorSequence extends PostProcessor {
/**
* Creates a new instance of PostProcessorSequence.
* @param {Object} config The configuration object.
* @param {Object[]} config.processors The list of post-processors to apply.
*/
constructor(config) {
super(config);
this.processors = config.processors.map(x => PostProcessor.fromConfig(x));
}
/**
* Post process the given tokens.
* @param {string[]} tokens The list of tokens for the first sequence.
* @param {string[]} [tokens_pair=null] The list of tokens for the second sequence (optional).
* @returns {PostProcessedOutput} An object containing the post-processed tokens.
*/
post_process(tokens, tokens_pair = null, options = {}) {
let token_type_ids;
for (const processor of this.processors) {
if (processor instanceof ByteLevelPostProcessor) {
// Special case where we need to pass the tokens_pair to the post-processor
const output = processor.post_process(tokens);
tokens = output.tokens;
if (tokens_pair) {
const pair_output = processor.post_process(tokens_pair);
tokens_pair = pair_output.tokens;
}
} else {
const output = processor.post_process(tokens, tokens_pair, options);
tokens = output.tokens;
token_type_ids = output.token_type_ids;
}
}
return { tokens, token_type_ids };
}
}
/**
* The base class for token decoders.
* @extends Callable
*/
class Decoder extends Callable {
/**
* Creates an instance of `Decoder`.
*
* @param {Object} config The configuration object.
*/
constructor(config) {
super();
this.config = config;
/** @type {AddedToken[]} */
this.added_tokens = [];
this.end_of_word_suffix = null;
this.trim_offsets = config.trim_offsets;
}
/**
* Creates a decoder instance based on the provided configuration.
*
* @param {Object} config The configuration object.
* @returns {Decoder} A decoder instance.
* @throws {Error} If an unknown decoder type is provided.
*/
static fromConfig(config) {
if (config === null) return null;
switch (config.type) {
case 'WordPiece':
return new WordPieceDecoder(config);
case 'Metaspace':
return new MetaspaceDecoder(config);
case 'ByteLevel':
return new ByteLevelDecoder(config);
case 'Replace':
return new ReplaceDecoder(config);
case 'ByteFallback':
return new ByteFallback(config);
case 'Fuse':
return new FuseDecoder(config);
case 'Strip':
return new StripDecoder(config);
case 'Sequence':
return new DecoderSequence(config);
case 'CTC':
return new CTCDecoder(config);
case 'BPEDecoder':
return new BPEDecoder(config);
default:
throw new Error(`Unknown Decoder type: ${config.type}`);
}
}
/**
* Calls the `decode` method.
*
* @param {string[]} tokens The list of tokens.
* @returns {string} The decoded string.
*/
_call(tokens) {
return this.decode(tokens);
}
/**
* Decodes a list of tokens.
* @param {string[]} tokens The list of tokens.
* @returns {string} The decoded string.
*/
decode(tokens) {
return this.decode_chain(tokens).join('');
}
/**
* Apply the decoder to a list of tokens.
*
* @param {string[]} tokens The list of tokens.
* @returns {string[]} The decoded list of tokens.
* @throws {Error} If the `decode_chain` method is not implemented in the subclass.
*/
decode_chain(tokens) {
throw Error("`decode_chain` should be implemented in subclass.")
}
}
class ReplaceDecoder extends Decoder {
/** @type {Decoder['decode_chain']} */
decode_chain(tokens) {
const pattern = createPattern(this.config.pattern);
return pattern === null
? tokens
: tokens.map(token => token.replaceAll(pattern, this.config.content))
}
}
class ByteFallback extends Decoder {
constructor(config) {
super(config);
this.text_decoder = new TextDecoder();
}
/** @type {Decoder['decode_chain']} */
decode_chain(tokens) {
const new_tokens = [];
let previous_byte_tokens = [];
for (const token of tokens) {
let bytes = null;
if (token.length === 6 && token.startsWith('<0x') && token.endsWith('>')) {
const byte = parseInt(token.slice(3, 5), 16);
if (!isNaN(byte)) {
bytes = byte;
}
}
if (bytes !== null) {
previous_byte_tokens.push(bytes);
} else {
if (previous_byte_tokens.length > 0) {
const string = this.text_decoder.decode(Uint8Array.from(previous_byte_tokens));
new_tokens.push(string);
previous_byte_tokens = [];
}
new_tokens.push(token);
}
}
if (previous_byte_tokens.length > 0) {
const string = this.text_decoder.decode(Uint8Array.from(previous_byte_tokens));
new_tokens.push(string);
previous_byte_tokens = [];
}
return new_tokens;
}
}
/**
* Fuse simply fuses all tokens into one big string.
* It's usually the last decoding step anyway, but this decoder
* exists incase some decoders need to happen after that step
*/
class FuseDecoder extends Decoder {
/** @type {Decoder['decode_chain']} */
decode_chain(tokens) {
return [tokens.join('')];
}
}
class StripDecoder extends Decoder {
constructor(config) {
super(config);
this.content = this.config.content;
this.start = this.config.start;
this.stop = this.config.stop;
}
/** @type {Decoder['decode_chain']} */
decode_chain(tokens) {
return tokens.map(token => {
let start_cut = 0;
for (let i = 0; i < this.start; ++i) {
if (token[i] === this.content) {
start_cut = i + 1;
continue;
} else {
break;
}
}
let stop_cut = token.length;
for (let i = 0; i < this.stop; ++i) {
const index = token.length - i - 1;
if (token[index] === this.content) {
stop_cut = index;
continue;
} else {
break;
}
}
return token.slice(start_cut, stop_cut)
});
}
}
/**
* A decoder that decodes a list of WordPiece tokens into a single string.
* @extends Decoder
*/
class WordPieceDecoder extends Decoder {
/**
* Creates a new instance of WordPieceDecoder.
* @param {Object} config The configuration object.
* @param {string} config.prefix The prefix used for WordPiece encoding.
* @param {boolean} config.cleanup Whether to cleanup the decoded string.
*/
constructor(config) {
super(config);
this.cleanup = config.cleanup;
}
/** @type {Decoder['decode_chain']} */
decode_chain(tokens) {
return tokens.map((token, i) => {
if (i !== 0) {
if (token.startsWith(this.config.prefix)) {
// NOTE: .replace() is intended; only replace first occurrence
token = token.replace(this.config.prefix, '');
} else {
token = ' ' + token;
}
}
if (this.cleanup) {
token = clean_up_tokenization(token)
}
return token;
});
}
}
/**
* Byte-level decoder for tokenization output. Inherits from the `Decoder` class.
* @extends Decoder
*/
class ByteLevelDecoder extends Decoder {
/**
* Create a `ByteLevelDecoder` object.
* @param {Object} config Configuration object.
*/
constructor(config) {
super(config);
this.byte_decoder = UNICODE_TO_BYTES;
this.text_decoder = new TextDecoder("utf-8", {
fatal: false,
ignoreBOM: true,
});
this.end_of_word_suffix = null;
}
/**
* Convert an array of tokens to string by decoding each byte.
* @param {string[]} tokens Array of tokens to be decoded.
* @returns {string} The decoded string.
*/
convert_tokens_to_string(tokens) {
const text = tokens.join('');
const byteArray = new Uint8Array([...text].map(c => this.byte_decoder[c]));
const decoded_text = this.text_decoder.decode(byteArray);
return decoded_text;
}
/** @type {Decoder['decode_chain']} */
decode_chain(tokens) {
// TODO move to base class (like HF)
// tokens === filtered_tokens
// To avoid mixing byte-level and unicode for byte-level BPT
// we need to build string separately for added tokens and byte-level tokens
// cf. https://github.com/huggingface/transformers/issues/1133
const sub_texts = [];
let current_sub_text = [];
for (const token of tokens) {
// tokens sent here are already filtered, so we don't need to do this
// if (skip_special_tokens && this.all_special_ids.includes(token)) {
// continue;
// }
if (this.added_tokens.find(x => x.content === token) !== undefined) {
if (current_sub_text.length > 0) {
sub_texts.push(this.convert_tokens_to_string(current_sub_text));
current_sub_text = [];
}
sub_texts.push(token);
} else {
current_sub_text.push(token);
}
}
if (current_sub_text.length > 0) {
sub_texts.push(this.convert_tokens_to_string(current_sub_text));
}
// TODO add spaces_between_special_tokens and clean_up_tokenization_spaces options
return sub_texts;
}
}
/**
* The CTC (Connectionist Temporal Classification) decoder.
* See https://github.com/huggingface/tokenizers/blob/bb38f390a61883fc2f29d659af696f428d1cda6b/tokenizers/src/decoders/ctc.rs
*/
class CTCDecoder extends Decoder {
constructor(config) {
super(config);
this.pad_token = this.config.pad_token;
this.word_delimiter_token = this.config.word_delimiter_token;
this.cleanup = this.config.cleanup;
}
/**
* Converts a connectionist-temporal-classification (CTC) output tokens into a single string.
* @param {string[]} tokens Array of tokens to be decoded.
* @returns {string} The decoded string.
*/
convert_tokens_to_string(tokens) {
if (tokens.length === 0) return '';
// group same tokens into non-repeating tokens in CTC style decoding
const grouped_tokens = [tokens[0]];
for (let i = 1; i < tokens.length; ++i) {
if (tokens[i] !== grouped_tokens.at(-1)) {
grouped_tokens.push(tokens[i]);
}
}
// filter self.pad_token which is used as CTC-blank token
const filtered_tokens = grouped_tokens.filter(token => token !== this.pad_token);
let text = filtered_tokens.join('');
if (this.cleanup) {
// cleanup and replace delimiter token
text = clean_up_tokenization(text)
.replaceAll(this.word_delimiter_token, ' ')
.trim();
}
return text;
}
/** @type {Decoder['decode_chain']} */
decode_chain(tokens) {
return [this.convert_tokens_to_string(tokens)];
}
}
/**
* Apply a sequence of decoders.
* @extends Decoder
*/
class DecoderSequence extends Decoder {
/**
* Creates a new instance of DecoderSequence.
* @param {Object} config The configuration object.
* @param {Object[]} config.decoders The list of decoders to apply.
*/
constructor(config) {
super(config);
this.decoders = config.decoders.map(x => Decoder.fromConfig(x));
}
/** @type {Decoder['decode_chain']} */
decode_chain(tokens) {
// Use reduce to apply each decoder to the tokens
return this.decoders.reduce((toks, decoder) => {
return decoder.decode_chain(toks);
}, tokens);
}
}
class BPEDecoder extends Decoder {
constructor(config) {
super(config);
this.suffix = this.config.suffix;
}
/** @type {Decoder['decode_chain']} */
decode_chain(tokens) {
return tokens.map((token, i) => {
return token.replaceAll(this.suffix, (i === tokens.length - 1) ? '' : ' ')
});
}
}
// Custom decoder for VITS
class VitsDecoder extends Decoder {
/** @type {Decoder['decode_chain']} */
decode_chain(tokens) {
let decoded = '';
for (let i = 1; i < tokens.length; i += 2) {
decoded += tokens[i];
}
return [decoded];
}
}
/**
* This PreTokenizer replaces spaces with the given replacement character, adds a prefix space if requested,
* and returns a list of tokens.
* @extends PreTokenizer
*/
class MetaspacePreTokenizer extends PreTokenizer {
/**
* @param {Object} config The configuration object for the MetaspacePreTokenizer.
* @param {boolean} config.add_prefix_space Whether to add a prefix space to the first token.
* @param {string} config.replacement The character to replace spaces with.
* @param {string} [config.str_rep=config.replacement] An optional string representation of the replacement character.
* @param {'first'|'never'|'always'} [config.prepend_scheme='always'] The metaspace prepending scheme.
*/
constructor(config) {
super();
this.addPrefixSpace = config.add_prefix_space;
this.replacement = config.replacement;
this.strRep = config.str_rep || this.replacement;
this.prepend_scheme = config.prepend_scheme ?? 'always';
}
/**
* This method takes a string, replaces spaces with the replacement character,
* adds a prefix space if requested, and returns a new list of tokens.
* @param {string} text The text to pre-tokenize.
* @param {Object} [options] The options for the pre-tokenization.
* @param {number} [options.section_index] The index of the section to pre-tokenize.
* @returns {string[]} A new list of pre-tokenized tokens.
*/
pre_tokenize_text(text, {
section_index = undefined,
} = {}) {
let normalized = text.replaceAll(' ', this.strRep);
if (
// We add a prefix space if:
// (1) The addPrefixSpace option is enabled and the normalized
// token does not already start with the replacement character.
(this.addPrefixSpace && !normalized.startsWith(this.replacement))
// and (2) either:
// (a) prepend_scheme is 'always'
// (b) prepend_scheme is 'first' and this is the first section
&& (
this.prepend_scheme === 'always' ||
(this.prepend_scheme === 'first' && section_index === 0)
)
) {
normalized = this.strRep + normalized;
}
return [normalized];
}
}
/**
* MetaspaceDecoder class extends the Decoder class and decodes Metaspace tokenization.
* @extends Decoder
*/
class MetaspaceDecoder extends Decoder {
/**
* Constructs a new MetaspaceDecoder object.
* @param {Object} config The configuration object for the MetaspaceDecoder.
* @param {boolean} config.add_prefix_space Whether to add a prefix space to the decoded string.
* @param {string} config.replacement The string to replace spaces with.
*/
constructor(config) {
super(config);
this.addPrefixSpace = config.add_prefix_space;
this.replacement = config.replacement;
}
/** @type {Decoder['decode_chain']} */
decode_chain(tokens) {
const result = [];
for (let i = 0; i < tokens.length; ++i) {
let normalized = tokens[i].replaceAll(this.replacement, ' ');
if (this.addPrefixSpace && i == 0 && normalized.startsWith(' ')) {
normalized = normalized.substring(1);
}
result.push(normalized);
}
return result;
}
}
/**
* A normalizer that applies a precompiled charsmap.
* This is useful for applying complex normalizations in C++ and exposing them to JavaScript.
* @extends Normalizer
* @param {Object} config The configuration object for the Precompiled normalizer.
* @param {Object} config.precompiled_charsmap The precompiled charsmap object.
*/
class Precompiled extends Normalizer {
/**
* Create a new instance of Precompiled normalizer.
* @param {Object} config The configuration object.
* @param {any} config.precompiled_charsmap Precompiled chars mapping.
*/
constructor(config) {
super(config);
this.charsmap = config.precompiled_charsmap;
}
/**
* Normalizes the given text by applying the precompiled charsmap.
* @param {string} text The text to normalize.
* @returns {string} The normalized text.
*/
normalize(text) {
// As stated in the sentencepiece normalization docs (https://github.com/google/sentencepiece/blob/master/doc/normalization.md#use-pre-defined-normalization-rule),
// there are 5 pre-defined normalization rules:
// 1. nmt_nfkc: NFKC normalization with some additional normalization around spaces. (default)
// 2. nfkc: original NFKC normalization.
// 3. nmt_nfkc_cf: nmt_nfkc + Unicode case folding (mostly lower casing)
// 4. nfkc_cf: nfkc + Unicode case folding.
// 5. identity: no normalization
//
// For now, we only implement the default (nmt_nfkc).
// See https://raw.githubusercontent.com/google/sentencepiece/master/data/nmt_nfkc.tsv for the full list of rules.
// TODO: detect when a different `this.charsmap` is used.
text = text.replace(/[\u0001-\u0008\u000B\u000E-\u001F\u007F\u008F\u009F]/gm, ''); // Remove control characters
text = text.replace(/[\u0009\u000A\u000C\u000D\u00A0\u1680\u2000-\u200F\u2028\u2029\u202F\u205F\u2581\u3000\uFEFF\uFFFD]/gm, '\u0020'); // Replace certain characters with a space
if (text.includes('\uFF5E')) {
// To match the sentencepiece implementation 100%, we must handle a very strange edge-case.
// For some reason, the "Fullwidth Tilde" character (\uFF5E) should not be converted to the standard Tilde character (\u007E).
// However, NFKC normalization does do this conversion. As a result, we split the string on the Fullwidth Tilde character,
// perform NFKC normalization on each substring, and then join them back together with the Fullwidth Tilde character.
const parts = text.split('\uFF5E');
text = parts.map(part => part.normalize('NFKC')).join('\uFF5E');
} else {
text = text.normalize('NFKC');
}
return text;
}
}
/**
* A pre-tokenizer that applies a sequence of pre-tokenizers to the input text.
* @extends PreTokenizer
*/
class PreTokenizerSequence extends PreTokenizer {
/**
* Creates an instance of PreTokenizerSequence.
* @param {Object} config The configuration object for the pre-tokenizer sequence.
* @param {Object[]} config.pretokenizers An array of pre-tokenizer configurations.
*/
constructor(config) {
super();
this.tokenizers = config.pretokenizers.map(x => PreTokenizer.fromConfig(x));
}
/**
* Applies each pre-tokenizer in the sequence to the input text in turn.
* @param {string} text The text to pre-tokenize.
* @param {Object} [options] Additional options for the pre-tokenization logic.
* @returns {string[]} The pre-tokenized text.
*/
pre_tokenize_text(text, options) {
// Use reduce to apply each tokenizer to the text
return this.tokenizers.reduce((preTokenizedText, tokenizer) => {
return tokenizer.pre_tokenize(preTokenizedText, options);
}, [text]);
}
}
/**
* Splits on word boundaries (using the following regular expression: `\w+|[^\w\s]+`).
*/
class WhitespacePreTokenizer extends PreTokenizer {
/**
* Creates an instance of WhitespacePreTokenizer.
* @param {Object} config The configuration object for the pre-tokenizer.
*/
constructor(config) {
super();
}
/**
* Pre-tokenizes the input text by splitting it on word boundaries.
* @param {string} text The text to be pre-tokenized.
* @param {Object} [options] Additional options for the pre-tokenization logic.
* @returns {string[]} An array of tokens produced by splitting the input text on whitespace.
*/
pre_tokenize_text(text, options) {
return text.match(/\w+|[^\w\s]+/g) || [];
}
}
/**
* Splits a string of text by whitespace characters into individual tokens.
* @extends PreTokenizer
*/
class WhitespaceSplit extends PreTokenizer {
/**
* Creates an instance of WhitespaceSplit.
* @param {Object} config The configuration object for the pre-tokenizer.
*/
constructor(config) {
super();
}
/**
* Pre-tokenizes the input text by splitting it on whitespace characters.
* @param {string} text The text to be pre-tokenized.
* @param {Object} [options] Additional options for the pre-tokenization logic.
* @returns {string[]} An array of tokens produced by splitting the input text on whitespace.
*/
pre_tokenize_text(text, options) {
return whitespace_split(text);
}
}
// NOTE: `ReplacePreTokenizer` is custom (to support `BlenderbotSmallTokenizer`)
class ReplacePreTokenizer extends PreTokenizer {
/**
* @param {Object} config The configuration options for the pre-tokenizer.
* @param {Object} config.pattern The pattern used to split the text. Can be a string or a regex object.
* @param {string} config.content What to replace the pattern with.
*/
constructor(config) {
super();
this.config = config;
this.pattern = createPattern(this.config.pattern);
this.content = this.config.content;
}
/**
* Pre-tokenizes the input text by replacing certain characters.
* @param {string} text The text to be pre-tokenized.
* @param {Object} [options] Additional options for the pre-tokenization logic.
* @returns {string[]} An array of tokens produced by replacing certain characters.
*/
pre_tokenize_text(text, options) {
if (this.pattern === null) {
return [text];
}
return [text.replaceAll(this.pattern, this.config.content)];
}
}
const SPECIAL_TOKEN_ATTRIBUTES = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
// additional_special_tokens (TODO)
]
/**
*
* Helper function for padding values of an object, which are each arrays.
* NOTE: No additional checks are made here for validity of arguments.
* @param {Record<string, any[]>} item The input object.
* @param {number} length The length to pad to.
* @param {(key: string) => any} value_fn Determine the value to fill the array, based on its key.
* @param {string} side Which side to pad the array.
* @private
*/
function padHelper(item, length, value_fn, side) {
for (const key of Object.keys(item)) {
const diff = length - item[key].length;
const value = value_fn(key);
const padData = new Array(diff).fill(value);
item[key] = side === 'right'
? mergeArrays(item[key], padData)
: mergeArrays(padData, item[key]);
}
}
/**
* Helper function for truncating values of an object, which are each arrays.
* NOTE: No additional checks are made here for validity of arguments.
* @param {Record<string, any[]>} item The input object.
* @param {number} length The length to truncate to.
* @private
*/
function truncateHelper(item, length) {
// Setting .length to a lower value truncates the array in-place:
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/length
for (const key of Object.keys(item)) {
item[key].length = length;
}
}
/**
* @typedef {Object} Message
* @property {string} role The role of the message (e.g., "user" or "assistant" or "system").
* @property {string} content The content of the message.
*/
export class PreTrainedTokenizer extends Callable {
return_token_type_ids = false;
padding_side = 'right';
/**
* Create a new PreTrainedTokenizer instance.
* @param {Object} tokenizerJSON The JSON of the tokenizer.
* @param {Object} tokenizerConfig The config of the tokenizer.
*/
constructor(tokenizerJSON, tokenizerConfig) {
super();
this.config = tokenizerConfig;
// Construct parts of the tokenizer from the JSON
this.normalizer = Normalizer.fromConfig(tokenizerJSON.normalizer);
this.pre_tokenizer = PreTokenizer.fromConfig(tokenizerJSON.pre_tokenizer);
this.model = TokenizerModel.fromConfig(tokenizerJSON.model, tokenizerConfig);
this.post_processor = PostProcessor.fromConfig(tokenizerJSON.post_processor);
this.decoder = Decoder.fromConfig(tokenizerJSON.decoder);
// Add added_tokens to model
this.special_tokens = [];
this.all_special_ids = [];
/** @type {AddedToken[]} */
this.added_tokens = [];
for (const addedToken of tokenizerJSON.added_tokens) {
const token = new AddedToken(addedToken);
this.added_tokens.push(token);
this.model.tokens_to_ids.set(token.content, token.id);
this.model.vocab[token.id] = token.content;
if (token.special) {
this.special_tokens.push(token.content);
this.all_special_ids.push(token.id);
}
}
// Update additional_special_tokens
this.additional_special_tokens = tokenizerConfig.additional_special_tokens ?? [];
this.special_tokens.push(...this.additional_special_tokens);
this.special_tokens = [...new Set(this.special_tokens)]; // Remove duplicates
if (this.decoder) {
// Slight hack, but it prevents code duplication:
this.decoder.added_tokens = this.added_tokens;
// Another slight hack to add `end_of_word_suffix` (if present) to the decoder
// This is needed for cases where BPE model and ByteLevel decoder are used
// For more information, see https://github.com/huggingface/transformers.js/issues/74
// TODO: save this to the decoder when exporting?
this.decoder.end_of_word_suffix = this.model.end_of_word_suffix;
}
this.added_tokens_splitter = new DictionarySplitter(
this.added_tokens.map(x => x.content),
);
/** @type {Map<string, AddedToken>} */
this.added_tokens_map = new Map(this.added_tokens.map(x => [x.content, x]))
// Set mask token if present (otherwise will be undefined, which is fine)
this.mask_token = this.getToken('mask_token');
this.mask_token_id = this.model.tokens_to_ids.get(this.mask_token);
this.pad_token = this.getToken('pad_token', 'eos_token');
this.pad_token_id = this.model.tokens_to_ids.get(this.pad_token);
this.sep_token = this.getToken('sep_token');
this.sep_token_id = this.model.tokens_to_ids.get(this.sep_token);
this.unk_token = this.getToken('unk_token');
this.unk_token_id = this.model.tokens_to_ids.get(this.unk_token);
this.bos_token = this.getToken('bos_token');
this.bos_token_id = this.model.tokens_to_ids.get(this.bos_token);
this.eos_token = this.getToken('eos_token');
this.eos_token_id = this.model.tokens_to_ids.get(this.eos_token);
this.model_max_length = tokenizerConfig.model_max_length;
/** @type {boolean} Whether or not to strip the text when tokenizing (removing excess spaces before and after the string). */
this.remove_space = tokenizerConfig.remove_space;
this.clean_up_tokenization_spaces = tokenizerConfig.clean_up_tokenization_spaces ?? true;
this.do_lowercase_and_remove_accent = tokenizerConfig.do_lowercase_and_remove_accent ?? false;
if (tokenizerConfig.padding_side) {
this.padding_side = tokenizerConfig.padding_side;
}
this.add_bos_token = tokenizerConfig.add_bos_token;
this.add_eos_token = tokenizerConfig.add_eos_token;
this.legacy = false;
this.chat_template = tokenizerConfig.chat_template ?? null;
if (Array.isArray(this.chat_template)) {
// Chat templates are stored as lists of dicts with fixed key names,
// we reconstruct that into a single dict while loading them.
const chat_template = Object.create(null);
for (const { name, template } of this.chat_template) {
if (typeof name !== 'string' || typeof template !== 'string') {
throw new Error('Chat template must be a list of objects with "name" and "template" properties');
}
chat_template[name] = template;
}
this.chat_template = chat_template;
}
this._compiled_template_cache = new Map();
}
/**
* Returns the value of the first matching key in the tokenizer config object.
* @param {...string} keys One or more keys to search for in the tokenizer config object.
* @returns {string|null} The value associated with the first matching key, or null if no match is found.
* @throws {Error} If an object is found for a matching key and its __type property is not "AddedToken".
* @private
*/
getToken(...keys) {
for (const key of keys) {
const item = this.config[key];
if (!item) continue;
if (typeof item === 'object') {
if (item.__type === 'AddedToken') {
return item.content;
} else {
throw Error(`Unknown token: ${item}`);
}
} else {
return item;
}
}
return null;
}
/**
* Loads a pre-trained tokenizer from the given `pretrained_model_name_or_path`.
*
* @param {string} pretrained_model_name_or_path The path to the pre-trained tokenizer.
* @param {PretrainedTokenizerOptions} options Additional options for loading the tokenizer.
*
* @throws {Error} Throws an error if the tokenizer.json or tokenizer_config.json files are not found in the `pretrained_model_name_or_path`.
* @returns {Promise<PreTrainedTokenizer>} A new instance of the `PreTrainedTokenizer` class.
*/
static async from_pretrained(pretrained_model_name_or_path, {
progress_callback = null,
config = null,
cache_dir = null,
local_files_only = false,
revision = 'main',
legacy = null,
} = {}) {
const info = await loadTokenizer(pretrained_model_name_or_path, {
progress_callback,
config,
cache_dir,
local_files_only,
revision,
legacy,
})
// @ts-ignore
return new this(...info);
}
/**
* @typedef {number[]|number[][]|Tensor} BatchEncodingItem
*
* @typedef {Object} BatchEncoding Holds the output of the tokenizer's call function.
* @property {BatchEncodingItem} input_ids List of token ids to be fed to a model.
* @property {BatchEncodingItem} attention_mask List of indices specifying which tokens should be attended to by the model.
* @property {BatchEncodingItem} [token_type_ids] List of token type ids to be fed to a model.
*/
/**
* Encode/tokenize the given text(s).
* @param {string|string[]} text The text to tokenize.
* @param {Object} options An optional object containing the following properties:
* @param {string|string[]} [options.text_pair=null] Optional second sequence to be encoded. If set, must be the same type as text.
* @param {boolean|'max_length'} [options.padding=false] Whether to pad the input sequences.
* @param {boolean} [options.add_special_tokens=true] Whether or not to add the special tokens associated with the corresponding model.
* @param {boolean} [options.truncation=null] Whether to truncate the input sequences.
* @param {number} [options.max_length=null] Maximum length of the returned list and optionally padding length.
* @param {boolean} [options.return_tensor=true] Whether to return the results as Tensors or arrays.
* @param {boolean} [options.return_token_type_ids=null] Whether to return the token type ids.
* @returns {BatchEncoding} Object to be passed to the model.
*/
_call(
// Required positional arguments
text,
// Optional keyword arguments
{
text_pair = null,
add_special_tokens = true,
padding = false,
truncation = null,
max_length = null,
return_tensor = true, // Different to HF
return_token_type_ids = null,
} = {},
) {
const isBatched = Array.isArray(text);
/** @type {EncodingSingle[]} */
let encodedTokens;
if (isBatched) {
if (text.length === 0) {
throw Error('text array must be non-empty')
}
if (text_pair !== null) {
if (!Array.isArray(text_pair)) {
throw Error('text_pair must also be an array')
} else if (text.length !== text_pair.length) {
throw Error('text and text_pair must have the same length')
}
encodedTokens = text.map(
(t, i) => this._encode_plus(t, { text_pair: text_pair[i], add_special_tokens, return_token_type_ids })
)
} else {
encodedTokens = text.map(x => this._encode_plus(x, { add_special_tokens, return_token_type_ids }));
}
} else {
if (text === null || text === undefined) {
throw Error('text may not be null or undefined')
}
if (Array.isArray(text_pair)) {
throw Error('When specifying `text_pair`, since `text` is a string, `text_pair` must also be a string (i.e., not an array).')
}
// For single input, we just wrap in an array, and then unwrap later.
encodedTokens = [this._encode_plus(text, { text_pair, add_special_tokens, return_token_type_ids })];
}
// At this point, `encodedTokens` is batched, of shape [batch_size, tokens].
// However, array may be jagged. So, we may need pad to max_length.
if (max_length === null) {
max_length = this.model_max_length;
} else if (truncation === null) {
if (padding === true) {
console.warn(
"`max_length` is ignored when `padding: true` and there is no truncation strategy. " +
"To pad to max length, use `padding: 'max_length'`."
)
max_length = this.model_max_length;
} else if (padding === false) {
console.warn("Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation: true` to explicitly truncate examples to max length.");
truncation = true;
}
}
// padding: 'max_length' doesn't require any additional calculation
// but padding: true has to calculate max_length from the sequences
if (padding === true) {
max_length = Math.min(max(encodedTokens.map(x => x.input_ids.length))[0], max_length ?? Infinity);
}
// Ensure it is less than model max length
max_length = Math.min(max_length, this.model_max_length ?? Infinity);
if (padding || truncation) {
// Perform padding and/or truncation
for (let i = 0; i < encodedTokens.length; ++i) {
if (encodedTokens[i].input_ids.length === max_length) {
continue;
} else if (encodedTokens[i].input_ids.length > max_length) {
// possibly truncate
if (truncation) {
truncateHelper(encodedTokens[i], max_length);
}
} else { // t.length < max_length
// possibly pad
if (padding) {
padHelper(
encodedTokens[i],
max_length,
key => key === 'input_ids' ? this.pad_token_id : 0,
this.padding_side
);
}
}
}
}
const result = {};
if (return_tensor) {
if (!(padding && truncation)) {
// Not, guaranteed that all items have same length, so
// we perform additional check
if (
encodedTokens.some(x => {
for (const key of Object.keys(x)) {
if (x[key].length !== encodedTokens[0][key]?.length) {
return true;
}
}
return false;
})
) {
throw Error(
"Unable to create tensor, you should probably activate truncation and/or padding " +
"with 'padding=true' and 'truncation=true' to have batched tensors with the same length."
)
}
}
// Now we actually convert to tensor
// NOTE: In the same way as the python library, we return a batched tensor, regardless of
// whether we have a single input or multiple inputs.
const dims = [encodedTokens.length, encodedTokens[0].input_ids.length];
for (const key of Object.keys(encodedTokens[0])) {
result[key] = new Tensor('int64',
BigInt64Array.from(encodedTokens.flatMap(x => x[key]).map(BigInt)),
dims
);
}
} else {
for (const key of Object.keys(encodedTokens[0])) {
result[key] = encodedTokens.map(x => x[key]);
}
// If not returning a tensor, we match the input type
if (!isBatched) {
// Input was not batched, so we unwrap
for (const key of Object.keys(result)) {
result[key] = result[key][0];
}
}
}
return /** @type {BatchEncoding} */(result);
}
/**
* Encodes a single text using the preprocessor pipeline of the tokenizer.
*
* @param {string|null} text The text to encode.
* @returns {string[]|null} The encoded tokens.
*/
_encode_text(text) {
if (text === null) return null;
// Actual function which does encoding, for a single text
// First, we take care of special tokens. Needed to avoid issues arising from
// normalization and/or pretokenization (which may not preserve special tokens)
const sections = this.added_tokens_splitter.split(text);
// Process left/right stripping of added tokens
for (let i = 0; i < sections.length; ++i) {
const addedToken = this.added_tokens_map.get(sections[i]);
if (addedToken) {
if (addedToken.lstrip && i > 0) {
sections[i - 1] = sections[i - 1].trimEnd();
}
if (addedToken.rstrip && i < sections.length - 1) {
sections[i + 1] = sections[i + 1].trimStart();
}
}
}
const tokens = sections.flatMap((x, section_index) => {
if (x.length === 0) return [];
if (this.added_tokens_map.has(x)) return [x]; // Return added tokens unchanged
if (this.remove_space === true) {
x = x.trim().split(/\s+/).join(' ');
}
if (this.do_lowercase_and_remove_accent) {
x = lowercase_and_remove_accent(x);
}
if (this.normalizer !== null) {
x = this.normalizer(x);
}
// If, after normalization, this section is empty (e.g., trimming whitespace),
// we return an empty array
if (x.length === 0) {
return [];
}
const sectionTokens = (this.pre_tokenizer !== null) ? this.pre_tokenizer(x, {
section_index,
}) : [x];
const tokens = this.model(sectionTokens);
return tokens;
});
return tokens;
}
/**
* Encodes a single text or a pair of texts using the model's tokenizer.
*
* @param {string} text The text to encode.
* @param {Object} options An optional object containing the following properties:
* @param {string} [options.text_pair=null] The optional second text to encode.
* @param {boolean} [options.add_special_tokens=true] Whether or not to add the special tokens associated with the corresponding model.
* @param {boolean} [options.return_token_type_ids=null] Whether to return token_type_ids.
* @returns {EncodingSingle} An object containing the encoded text.
* @private
*/
_encode_plus(text, {
text_pair = null,
add_special_tokens = true,
return_token_type_ids = null,
} = {}) {
const { tokens, token_type_ids } = this._tokenize_helper(text, { pair: text_pair, add_special_tokens });
const input_ids = this.model.convert_tokens_to_ids(tokens);
const result = {
input_ids,
attention_mask: new Array(input_ids.length).fill(1),
}
if ((return_token_type_ids ?? this.return_token_type_ids) && token_type_ids) {
result.token_type_ids = token_type_ids;
}
return result;
}
/**
* Internal helper function to tokenize a text, and optionally a pair of texts.
* @param {string} text The text to tokenize.
* @param {Object} options An optional object containing the following properties:
* @param {string} [options.pair=null] The optional second text to tokenize.
* @param {boolean} [options.add_special_tokens=false] Whether or not to add the special tokens associated with the corresponding model.
* @returns {{tokens: string[], token_type_ids?: number[]}} An object containing the tokens and optionally the token type IDs.
*/
_tokenize_helper(text, {
pair = null,
add_special_tokens = false,
} = {}) {
const tokens = this._encode_text(text);
const tokens2 = this._encode_text(pair);
return this.post_processor
? this.post_processor(tokens, tokens2, { add_special_tokens })
: { tokens: mergeArrays(tokens ?? [], tokens2 ?? []) };
}
/**
* Converts a string into a sequence of tokens.
* @param {string} text The sequence to be encoded.
* @param {Object} options An optional object containing the following properties:
* @param {string} [options.pair] A second sequence to be encoded with the first.
* @param {boolean} [options.add_special_tokens=false] Whether or not to add the special tokens associated with the corresponding model.
* @returns {string[]} The list of tokens.
*/
tokenize(text, {
pair = null,
add_special_tokens = false,
} = {}) {
return this._tokenize_helper(text, { pair, add_special_tokens }).tokens;
}
/**
* Encodes a single text or a pair of texts using the model's tokenizer.
*
* @param {string} text The text to encode.
* @param {Object} options An optional object containing the following properties:
* @param {string} [options.text_pair=null] The optional second text to encode.
* @param {boolean} [options.add_special_tokens=true] Whether or not to add the special tokens associated with the corresponding model.
* @param {boolean} [options.return_token_type_ids=null] Whether to return token_type_ids.
* @returns {number[]} An array of token IDs representing the encoded text(s).
*/
encode(text, {
text_pair = null,
add_special_tokens = true,
return_token_type_ids = null,
} = {}) {
return this._encode_plus(text, {
text_pair,
add_special_tokens,
return_token_type_ids,
}).input_ids;
}
/**
* Decode a batch of tokenized sequences.
* @param {number[][]|Tensor} batch List/Tensor of tokenized input sequences.
* @param {Object} decode_args (Optional) Object with decoding arguments.
* @returns {string[]} List of decoded sequences.
*/
batch_decode(batch, decode_args = {}) {
if (batch instanceof Tensor) {
batch = batch.tolist();
}
return batch.map(x => this.decode(x, decode_args));
}
/**
* Decodes a sequence of token IDs back to a string.
*
* @param {number[]|bigint[]|Tensor} token_ids List/Tensor of token IDs to decode.
* @param {Object} [decode_args={}]
* @param {boolean} [decode_args.skip_special_tokens=false] If true, special tokens are removed from the output string.
* @param {boolean} [decode_args.clean_up_tokenization_spaces=true] If true, spaces before punctuations and abbreviated forms are removed.
*
* @returns {string} The decoded string.
* @throws {Error} If `token_ids` is not a non-empty array of integers.
*/
decode(
token_ids,
decode_args = {},
) {
if (token_ids instanceof Tensor) {
token_ids = prepareTensorForDecode(token_ids);
}
if (!Array.isArray(token_ids) || token_ids.length === 0 || !isIntegralNumber(token_ids[0])) {
throw Error("token_ids must be a non-empty array of integers.");
}
return this.decode_single(token_ids, decode_args)
}
/**
* Decode a single list of token ids to a string.
* @param {number[]|bigint[]} token_ids List of token ids to decode
* @param {Object} decode_args Optional arguments for decoding
* @param {boolean} [decode_args.skip_special_tokens=false] Whether to skip special tokens during decoding
* @param {boolean} [decode_args.clean_up_tokenization_spaces=null] Whether to clean up tokenization spaces during decoding.
* If null, the value is set to `this.decoder.cleanup` if it exists, falling back to `this.clean_up_tokenization_spaces` if it exists, falling back to `true`.
* @returns {string} The decoded string
*/
decode_single(
token_ids,
{
skip_special_tokens = false,
clean_up_tokenization_spaces = null,
}
) {
let tokens = this.model.convert_ids_to_tokens(token_ids);
if (skip_special_tokens) {
tokens = tokens.filter(x => !this.special_tokens.includes(x));
}
// If `this.decoder` is null, we just join tokens with a space:
// https://github.com/huggingface/tokenizers/blob/8edec536a737cb04494b454805be16c020abb14f/tokenizers/src/tokenizer/mod.rs#L835
/** @type {string} */
let decoded = this.decoder ? this.decoder(tokens) : tokens.join(' ');
// Slight hack, but prevents having to pass `skip_special_tokens` to
// each call to `decode`, which would lead to code duplication.
if (this.decoder && this.decoder.end_of_word_suffix) {
decoded = decoded.replaceAll(this.decoder.end_of_word_suffix, ' ');
if (skip_special_tokens) {
decoded = decoded.trim();
}
}
if (clean_up_tokenization_spaces ?? this.clean_up_tokenization_spaces) {
decoded = clean_up_tokenization(decoded);
}
return decoded;
}
/**
* Retrieve the chat template string used for tokenizing chat messages. This template is used
* internally by the `apply_chat_template` method and can also be used externally to retrieve the model's chat
* template for better generation tracking.
*
* @param {Object} options An optional object containing the following properties:
* @param {string} [options.chat_template=null]
* A Jinja template or the name of a template to use for this conversion.
* It is usually not necessary to pass anything to this argument,
* as the model's template will be used by default.
* @param {Object[]} [options.tools=null]
* A list of tools (callable functions) that will be accessible to the model. If the template does not
* support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
* giving the name, description and argument types for the tool. See our
* [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use)
* for more information.
* @returns {string} The chat template string.
*/
get_chat_template({
chat_template = null,
tools = null,
} = {}) {
// First, handle the cases when the model has a dict of multiple templates
if (this.chat_template && typeof this.chat_template === 'object') {
const template_dict = this.chat_template;
if (chat_template !== null && Object.hasOwn(template_dict, chat_template)) {
// The user can pass the name of a template to the chat template argument instead of an entire template
chat_template = template_dict[chat_template];
} else if (chat_template === null) {
if (tools !== null && 'tool_use' in template_dict) {
chat_template = template_dict['tool_use'];
} else if ('default' in template_dict) {
chat_template = template_dict['default'];
} else {
throw Error(
`This model has multiple chat templates with no default specified! Please either pass a chat ` +
`template or the name of the template you wish to use to the 'chat_template' argument. Available ` +
`template names are ${Object.keys(template_dict).sort()}.`
)
}
}
} else if (chat_template === null) {
// These are the cases when the model has a single template
// priority: `chat_template` argument > `tokenizer.chat_template`
if (this.chat_template) {
chat_template = this.chat_template;
} else {
throw Error(
"Cannot use apply_chat_template() because tokenizer.chat_template is not set and no template " +
"argument was passed! For information about writing templates and setting the " +
"tokenizer.chat_template attribute, please see the documentation at " +
"https://huggingface.co/docs/transformers/main/en/chat_templating"
)
}
}
return chat_template;
}
/**
* Converts a list of message objects with `"role"` and `"content"` keys to a list of token
* ids. This method is intended for use with chat models, and will read the tokenizer's chat_template attribute to
* determine the format and control tokens to use when converting.
*
* See [here](https://huggingface.co/docs/transformers/chat_templating) for more information.
*
* **Example:** Applying a chat template to a conversation.
*
* ```javascript
* import { AutoTokenizer } from "@huggingface/transformers";
*
* const tokenizer = await AutoTokenizer.from_pretrained("Xenova/mistral-tokenizer-v1");
*
* const chat = [
* { "role": "user", "content": "Hello, how are you?" },
* { "role": "assistant", "content": "I'm doing great. How can I help you today?" },
* { "role": "user", "content": "I'd like to show off how chat templating works!" },
* ]
*
* const text = tokenizer.apply_chat_template(chat, { tokenize: false });
* // "<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]"
*
* const input_ids = tokenizer.apply_chat_template(chat, { tokenize: true, return_tensor: false });
* // [1, 733, 16289, 28793, 22557, 28725, 910, 460, 368, 28804, 733, 28748, 16289, 28793, 28737, 28742, 28719, 2548, 1598, 28723, 1602, 541, 315, 1316, 368, 3154, 28804, 2, 28705, 733, 16289, 28793, 315, 28742, 28715, 737, 298, 1347, 805, 910, 10706, 5752, 1077, 3791, 28808, 733, 28748, 16289, 28793]
* ```
*
* @param {Message[]} conversation A list of message objects with `"role"` and `"content"` keys,
* representing the chat history so far.
* @param {Object} options An optional object containing the following properties:
* @param {string} [options.chat_template=null] A Jinja template to use for this conversion. If
* this is not passed, the model's chat template will be used instead.
* @param {Object[]} [options.tools=null]
* A list of tools (callable functions) that will be accessible to the model. If the template does not
* support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
* giving the name, description and argument types for the tool. See our
* [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use)
* for more information.
* @param {Record<string, string>[]} [options.documents=null]
* A list of dicts representing documents that will be accessible to the model if it is performing RAG
* (retrieval-augmented generation). If the template does not support RAG, this argument will have no
* effect. We recommend that each document should be a dict containing "title" and "text" keys. Please
* see the RAG section of the [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#arguments-for-RAG)
* for examples of passing documents with chat templates.
* @param {boolean} [options.add_generation_prompt=false] Whether to end the prompt with the token(s) that indicate
* the start of an assistant message. This is useful when you want to generate a response from the model.
* Note that this argument will be passed to the chat template, and so it must be supported in the
* template for this argument to have any effect.
* @param {boolean} [options.tokenize=true] Whether to tokenize the output. If false, the output will be a string.
* @param {boolean} [options.padding=false] Whether to pad sequences to the maximum length. Has no effect if tokenize is false.
* @param {boolean} [options.truncation=false] Whether to truncate sequences to the maximum length. Has no effect if tokenize is false.
* @param {number} [options.max_length=null] Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is false.
* If not specified, the tokenizer's `max_length` attribute will be used as a default.
* @param {boolean} [options.return_tensor=true] Whether to return the output as a Tensor or an Array. Has no effect if tokenize is false.
* @param {boolean} [options.return_dict=true] Whether to return a dictionary with named outputs. Has no effect if tokenize is false.
* @param {Object} [options.tokenizer_kwargs={}] Additional options to pass to the tokenizer.
* @returns {string | Tensor | number[]| number[][]|BatchEncoding} The tokenized output.
*/
apply_chat_template(conversation, {
tools = null,
documents = null,
chat_template = null,
add_generation_prompt = false,
tokenize = true,
padding = false,
truncation = false,
max_length = null,
return_tensor = true,
return_dict = false,
tokenizer_kwargs = {},
...kwargs
} = {}) {
chat_template = this.get_chat_template({ chat_template, tools });
if (typeof chat_template !== 'string') {
throw Error(`chat_template must be a string, but got ${typeof chat_template}`);
}
// Compilation function uses a cache to avoid recompiling the same template
let compiledTemplate = this._compiled_template_cache.get(chat_template);
if (compiledTemplate === undefined) {
compiledTemplate = new Template(chat_template);
this._compiled_template_cache.set(chat_template, compiledTemplate);
}
const special_tokens_map = Object.create(null);
for (const key of SPECIAL_TOKEN_ATTRIBUTES) {
const value = this.getToken(key);
if (value) {
special_tokens_map[key] = value;
}
}
const rendered = compiledTemplate.render({
messages: conversation,
add_generation_prompt,
tools,
documents,
...special_tokens_map,
...kwargs,
});
if (tokenize) {
const out = this._call(rendered, {
add_special_tokens: false,
padding,
truncation,
max_length,
return_tensor,
...tokenizer_kwargs,
});
return return_dict ? out : out.input_ids;
}
return rendered;
}
}
/**
* BertTokenizer is a class used to tokenize text for BERT models.
* @extends PreTrainedTokenizer
*/
export class BertTokenizer extends PreTrainedTokenizer {
return_token_type_ids = true;
}
/**
* Albert tokenizer
* @extends PreTrainedTokenizer
*/
export class AlbertTokenizer extends PreTrainedTokenizer {
return_token_type_ids = true;
}
export class MobileBertTokenizer extends PreTrainedTokenizer {
return_token_type_ids = true;
}
export class SqueezeBertTokenizer extends PreTrainedTokenizer {
return_token_type_ids = true;
}
export class DebertaTokenizer extends PreTrainedTokenizer {
return_token_type_ids = true;
}
export class DebertaV2Tokenizer extends PreTrainedTokenizer {
return_token_type_ids = true;
}
export class HerbertTokenizer extends PreTrainedTokenizer {
return_token_type_ids = true;
}
export class ConvBertTokenizer extends PreTrainedTokenizer {
return_token_type_ids = true;
}
export class RoFormerTokenizer extends PreTrainedTokenizer {
return_token_type_ids = true;
}
export class DistilBertTokenizer extends PreTrainedTokenizer { }
export class CamembertTokenizer extends PreTrainedTokenizer { }
export class XLMTokenizer extends PreTrainedTokenizer {
return_token_type_ids = true;
constructor(tokenizerJSON, tokenizerConfig) {
super(tokenizerJSON, tokenizerConfig);
console.warn('WARNING: `XLMTokenizer` is not yet supported by Hugging Face\'s "fast" tokenizers library. Therefore, you may experience slightly inaccurate results.')
}
}
export class ElectraTokenizer extends PreTrainedTokenizer {
return_token_type_ids = true;
}
export class T5Tokenizer extends PreTrainedTokenizer { }
export class GPT2Tokenizer extends PreTrainedTokenizer { }
export class BartTokenizer extends PreTrainedTokenizer { }
export class MBartTokenizer extends PreTrainedTokenizer {
constructor(tokenizerJSON, tokenizerConfig) {
super(tokenizerJSON, tokenizerConfig);
this.languageRegex = /^[a-z]{2}_[A-Z]{2}$/;
this.language_codes = this.special_tokens.filter(x => this.languageRegex.test(x));
this.lang_to_token = x => x; // Identity function
}
/**
* Helper function to build translation inputs for an `MBartTokenizer`.
* @param {string|string[]} raw_inputs The text to tokenize.
* @param {Object} tokenizer_options Options to be sent to the tokenizer
* @param {Object} generate_kwargs Generation options.
* @returns {Object} Object to be passed to the model.
*/
_build_translation_inputs(raw_inputs, tokenizer_options, generate_kwargs) {
return _build_translation_inputs(this, raw_inputs, tokenizer_options, generate_kwargs);
}
}
export class MBart50Tokenizer extends MBartTokenizer { } // NOTE: extends MBartTokenizer
export class RobertaTokenizer extends PreTrainedTokenizer { }
export class BloomTokenizer extends PreTrainedTokenizer { }
const SPIECE_UNDERLINE = "▁";
export class LlamaTokenizer extends PreTrainedTokenizer {
padding_side = 'left';
constructor(tokenizerJSON, tokenizerConfig) {
super(tokenizerJSON, tokenizerConfig);
this.legacy = tokenizerConfig.legacy ?? true;
if (!this.legacy) {
// See https://github.com/huggingface/transformers/pull/24565 for more information
this.normalizer = null;
this.pre_tokenizer = new MetaspacePreTokenizer({
replacement: SPIECE_UNDERLINE,
add_prefix_space: true,
prepend_scheme: "first",
});
}
}
/**
* Helper function to handle legacy encoding of SPM tokenizers.
* Adapted from https://github.com/huggingface/transformers/blob/e6dcf8abd6f65bb4b6dfc1831b20d9ba49ce00e2/src/transformers/models/t5/tokenization_t5.py#L374-L387
* @param {string} text The text to encode.
* @returns {string[]} The encoded tokens.
*/
_encode_text(text) {
if (text === null) return null;
if (this.legacy || text.length === 0) {
return super._encode_text(text);
}
let tokens = super._encode_text(SPIECE_UNDERLINE + text.replaceAll(SPIECE_UNDERLINE, " "));
if (tokens.length > 1 && tokens[0] === SPIECE_UNDERLINE && this.special_tokens.includes(tokens[1])) {
tokens = tokens.slice(1);
}
return tokens;
}
}
export class CodeLlamaTokenizer extends PreTrainedTokenizer { }
export class XLMRobertaTokenizer extends PreTrainedTokenizer { }
export class MPNetTokenizer extends PreTrainedTokenizer { }
export class FalconTokenizer extends PreTrainedTokenizer { }
export class GPTNeoXTokenizer extends PreTrainedTokenizer { }
export class EsmTokenizer extends PreTrainedTokenizer { }
export class Qwen2Tokenizer extends PreTrainedTokenizer { }
export class GemmaTokenizer extends PreTrainedTokenizer { }
export class Grok1Tokenizer extends PreTrainedTokenizer { }
/**
* Helper function to build translation inputs for an `NllbTokenizer` or `M2M100Tokenizer`.
* @param {PreTrainedTokenizer} self The tokenizer instance.
* @param {string|string[]} raw_inputs The text to tokenize.
* @param {Object} tokenizer_options Options to be sent to the tokenizer
* @param {Object} generate_kwargs Generation options.
* @returns {Object} Object to be passed to the model.
* @private
*/
function _build_translation_inputs(self, raw_inputs, tokenizer_options, generate_kwargs) {
if (!('language_codes' in self) || !Array.isArray(self.language_codes)) {
throw new Error('Tokenizer must have `language_codes` attribute set and it should be an array of language ids.')
}
if (!('languageRegex' in self) || !(self.languageRegex instanceof RegExp)) {
throw new Error('Tokenizer must have `languageRegex` attribute set and it should be a regular expression.')
}
if (!('lang_to_token' in self) || typeof self.lang_to_token !== 'function') {
throw new Error('Tokenizer must have `lang_to_token` attribute set and it should be a function.')
}
const src_lang_token = generate_kwargs.src_lang;
const tgt_lang_token = generate_kwargs.tgt_lang;
// Check that the target language is valid:
if (!self.language_codes.includes(tgt_lang_token)) {
throw new Error(`Target language code "${tgt_lang_token}" is not valid. Must be one of: {${self.language_codes.join(', ')}}`);
}
// Allow `src_lang` to be optional. If not set, we'll use the tokenizer's default.
if (src_lang_token !== undefined) {
// Check that the source language is valid:
if (!self.language_codes.includes(src_lang_token)) {
throw new Error(`Source language code "${src_lang_token}" is not valid. Must be one of: {${self.language_codes.join(', ')}}`);
}
// In the same way as the Python library, we override the post-processor
// to force the source language to be first:
for (const item of self.post_processor.config.single) {
if ('SpecialToken' in item && self.languageRegex.test(item.SpecialToken.id)) {
item.SpecialToken.id = self.lang_to_token(src_lang_token);
break;
}
}
// TODO: Do the same for pair?
}
// Override the `forced_bos_token_id` to force the correct language
generate_kwargs.forced_bos_token_id = self.model.convert_tokens_to_ids([self.lang_to_token(tgt_lang_token)])[0];
return self._call(raw_inputs, tokenizer_options);
}
/**
* The NllbTokenizer class is used to tokenize text for NLLB ("No Language Left Behind") models.
*
* No Language Left Behind (NLLB) is a first-of-its-kind, AI breakthrough project
* that open-sources models capable of delivering high-quality translations directly
* between any pair of 200+ languages — including low-resource languages like Asturian,
* Luganda, Urdu and more. It aims to help people communicate with anyone, anywhere,
* regardless of their language preferences. For more information, check out their
* [paper](https://huggingface.co/papers/2207.04672).
*
* For a list of supported languages (along with their language codes),
* @see {@link https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200}
*/
export class NllbTokenizer extends PreTrainedTokenizer {
constructor(tokenizerJSON, tokenizerConfig) {
super(tokenizerJSON, tokenizerConfig);
this.languageRegex = /^[a-z]{3}_[A-Z][a-z]{3}$/;
this.language_codes = this.special_tokens.filter(x => this.languageRegex.test(x));
this.lang_to_token = x => x; // Identity function
}
/**
* Helper function to build translation inputs for an `NllbTokenizer`.
* @param {string|string[]} raw_inputs The text to tokenize.
* @param {Object} tokenizer_options Options to be sent to the tokenizer
* @param {Object} generate_kwargs Generation options.
* @returns {Object} Object to be passed to the model.
*/
_build_translation_inputs(raw_inputs, tokenizer_options, generate_kwargs) {
return _build_translation_inputs(this, raw_inputs, tokenizer_options, generate_kwargs);
}
}
/**
* The M2M100Tokenizer class is used to tokenize text for M2M100 ("Many-to-Many") models.
*
* M2M100 is a multilingual encoder-decoder (seq-to-seq) model trained for Many-to-Many
* multilingual translation. It was introduced in this [paper](https://huggingface.co/papers/2010.11125)
* and first released in [this](https://github.com/pytorch/fairseq/tree/master/examples/m2m_100) repository.
*
* For a list of supported languages (along with their language codes),
* @see {@link https://huggingface.co/facebook/m2m100_418M#languages-covered}
*/
export class M2M100Tokenizer extends PreTrainedTokenizer {
constructor(tokenizerJSON, tokenizerConfig) {
super(tokenizerJSON, tokenizerConfig);
this.languageRegex = /^__[a-z]{2,3}__$/;
this.language_codes = this.special_tokens
.filter(x => this.languageRegex.test(x))
.map(x => x.slice(2, -2));
this.lang_to_token = x => `__${x}__`;
}
/**
* Helper function to build translation inputs for an `M2M100Tokenizer`.
* @param {string|string[]} raw_inputs The text to tokenize.
* @param {Object} tokenizer_options Options to be sent to the tokenizer
* @param {Object} generate_kwargs Generation options.
* @returns {Object} Object to be passed to the model.
*/
_build_translation_inputs(raw_inputs, tokenizer_options, generate_kwargs) {
return _build_translation_inputs(this, raw_inputs, tokenizer_options, generate_kwargs);
}
}
/**
* WhisperTokenizer tokenizer
* @extends PreTrainedTokenizer
*/
export class WhisperTokenizer extends PreTrainedTokenizer {
get timestamp_begin() {
return this.model.convert_tokens_to_ids(["<|notimestamps|>"])[0] + 1;
}
/**
* Decodes automatic speech recognition (ASR) sequences.
* @param {Array<{tokens: bigint[], token_timestamps?: number[], stride: number[]}>} sequences The sequences to decode.
* @param {Object} options The options to use for decoding.
* @returns {Array<string|{chunks?: undefined|Array<{language: string|null, timestamp: Array<number|null>, text: string}>}>} The decoded sequences.
*/
_decode_asr(sequences, {
return_timestamps = false,
return_language = false,
time_precision = null,
force_full_sequences = true
} = {}) {
// Set force_full_sequences=false if you want streaming
// TODO add support for `return_language`
// Internal method meant to only be used by asr pipeline.
// Handles all the little quirks specific to whisper to handle
// the various options not allowed in other seq2seq models
// =========== Overview ============
// - iterate over all outputs
// - all tokens within output
// - Each token can be
// - language token
// - special token
// - timestamp token
// - text token
// - We accumulate the text tokens.
// - We split on end timestamps
// - Lots of complexity comes from stride and timestamps
if (time_precision === null) {
throw Error("Must specify time_precision")
}
let last_language = null;
const returnWordTimestamps = return_timestamps === "word";
function new_chunk() {
return { "language": last_language, "timestamp": [null, null], "text": "" };
}
// Welcome to the state machine!
const chunks = [];
let chunk = new_chunk();
let time_offset = 0.0;
const timestamp_begin = this.timestamp_begin;
// Whisper timestamp tokens start from 0.00 and go to timestamp 30.00 in 0.02 increments.
// We can calculate the last time stamp token as timestamp_begin plus the number of tokens
// tokens from 0.00 to 30.00 which is 1500.
const total_timestamp_tokens = 1500; // (30.00 - 0.00) / 0.02
const timestamp_end = timestamp_begin + total_timestamp_tokens;
let previous_tokens = [];
let previous_token_timestamps = [];
let skip = false;
let right_stride_start = null;
const all_special_ids = new Set(this.all_special_ids);
for (const output of sequences) {
// NOTE: python version has batches, so it uses [0]
const token_ids = output.tokens;
const token_timestamps = returnWordTimestamps ? output.token_timestamps : null;
// These keep track of timestamps within strides, which need
// to be skipped and resolve all tokens in a single chunk.
let last_timestamp = null;
let first_timestamp = timestamp_begin;
if ("stride" in output) {
const [chunk_len, stride_left, stride_right] = output.stride;
// Offset the timings to account for the other `model_outputs`.
time_offset -= stride_left;
right_stride_start = chunk_len - stride_right;
// Keeping track of timestamps within strides
// We're going to NOT split on those, and delay until we're
// out of BOTH stride. Otherwise lots of issues occur and
// corner cases
if (stride_left) {
first_timestamp = stride_left / time_precision + timestamp_begin;
}
if (stride_right) {
for (let i = token_ids.length - 1; i >= 0; --i) {
const token = Number(token_ids[i]);
if (token >= timestamp_begin) {
// There can be several token in the right stride
// But the last one is ALWAYS going to be skipped
if (last_timestamp !== null && (token - timestamp_begin) * time_precision < right_stride_start) {
break;
}
last_timestamp = token;
}
}
}
}
let current_tokens = [];
let current_token_timestamps = [];
// - all tokens within output
for (let i = 0; i < token_ids.length; ++i) {
const token = Number(token_ids[i]);
// 4 possible states for each token
// - 1/ Language code
// - 2/ all other special tokens (which we ignore)
// - 3/ Timestamp
// - 4/ Regular text
if (all_special_ids.has(token)) {
const text = this.decode([token]);
const language = WHISPER_LANGUAGE_MAPPING.get(text.slice(2, -2));
if (language !== undefined) {
// 1/ Indeed some language
// TODO Handle when language is different from the previous
// one, and we cannot use timestamped tokens to create chunks
if (last_language !== null && language !== last_language && !return_timestamps) {
previous_tokens.push(current_tokens);
const resolved_tokens = this.findLongestCommonSequence(previous_tokens)[0];
const resolved_text = this.decode(resolved_tokens);
chunk.text = resolved_text;
chunks.push(chunk);
// Flush all our temporary context
previous_tokens = [];
current_tokens = [];
chunk = new_chunk();
}
last_language = chunk.language = language;
} else {
// 2/ This is a regular special token, ignoring it
}
} else if (token >= timestamp_begin && token <= timestamp_end) {
// 3/ Timestamp token
const time = (token - timestamp_begin) * time_precision + time_offset;
const rounded_time = round(time, 2);
if (last_timestamp !== null && token >= last_timestamp) {
// Whisper outputted a timestamp token, but it falls within
// our stride, so we're going to skip it for the time being
// and resolve this later
// Skip is necessary because timestamp tokens always come
// by pair, so we need to skip the next one too (which would mark the start of another chunk).
skip = true;
} else if (skip || (previous_tokens.length > 0 && token < first_timestamp)) {
skip = false;
} else if (chunk.timestamp[0] === null) {
chunk.timestamp[0] = rounded_time;
} else {
// This is the end of the timestamp chunk
if (rounded_time === chunk.timestamp[0]) {
// This is a bug in timestamp token output
// where we're taking the duplicate token
// as a stop where it should be a start.
// This is an issue in the underlying model output
// Let's just skip it so it becomes de-factor a start agin
} else {
chunk.timestamp[1] = rounded_time;
// Handling merges
previous_tokens.push(current_tokens)
if (returnWordTimestamps) {
previous_token_timestamps.push(current_token_timestamps);
}
const [resolved_tokens, resolved_token_timestamps] = this.findLongestCommonSequence(
previous_tokens, previous_token_timestamps
)
const resolved_text = this.decode(resolved_tokens)
chunk.text = resolved_text
if (returnWordTimestamps) {
chunk.words = this.collateWordTimestamps(
resolved_tokens, resolved_token_timestamps, last_language,
)
}
chunks.push(chunk)
// Flush all our temporary context
previous_tokens = []
current_tokens = []
previous_token_timestamps = []
current_token_timestamps = []
chunk = new_chunk()
}
}
} else {
// 4/ Regular token
// We just append to the list of all tokens so we can handle
// merges later and decode into text.
current_tokens.push(token)
if (returnWordTimestamps) {
let start_time = round(token_timestamps[i] + time_offset, 2);
let end_time;
if (i + 1 < token_timestamps.length) {
end_time = round(token_timestamps[i + 1] + time_offset, 2);
// Do not allow punctuation-only tokens to have a duration.
// This prevents long pauses from messing up the timestamps.
const decoded_text = this.decode([token]);
if (PUNCTUATION_ONLY_REGEX.test(decoded_text)) {
// Add `time_precision` to avoid overlapping timestamps
end_time = round(Math.min(start_time + time_precision, end_time), 2);
}
} else {
// should never happen
end_time = null;
}
current_token_timestamps.push([start_time, end_time]);
}
}
}
if ('stride' in output) {
const [chunk_len, stride_left, stride_right] = output.stride;
time_offset += chunk_len - stride_right
}
// Leftover tokens
if (current_tokens.length > 0) {
previous_tokens.push(current_tokens)
if (returnWordTimestamps) {
previous_token_timestamps.push(current_token_timestamps);
}
} else if (previous_tokens.every(p => p.length === 0)) {
// Flushing previous tokens (END)"
chunk = new_chunk()
previous_tokens = []
current_tokens = []
previous_token_timestamps = [];
current_token_timestamps = [];
}
}
if (previous_tokens.length > 0) {
if (force_full_sequences && return_timestamps) {
// Last token should always be timestamps, so there shouldn't be
// leftover
throw new Error(
"Whisper did not predict an ending timestamp, which can happen if audio is cut off in the middle of a word. " +
"Also make sure WhisperTimeStampLogitsProcessor was used during generation."
);
}
// Happens when we don't use timestamps
const [resolved_tokens, resolved_token_timestamps] = this.findLongestCommonSequence(previous_tokens, previous_token_timestamps);
// Flushing previous tokens (FINAL)
const resolved_text = this.decode(resolved_tokens);
chunk.text = resolved_text;
if (returnWordTimestamps) {
chunk.words = this.collateWordTimestamps(
resolved_tokens, resolved_token_timestamps, last_language,
)
}
chunks.push(chunk);
}
let optional = Object.create(null);
// Preparing and cleaning up the pipeline output
const full_text = chunks.map(chunk => chunk.text).join('');
if (return_timestamps || return_language) {
for (let i = 0; i < chunks.length; ++i) {
const chunk = chunks[i];
if (!return_timestamps) {
delete chunk["timestamp"];
}
if (!return_language) {
delete chunk["language"];
}
}
if (returnWordTimestamps) {
const new_chunks = [];
for (const chunk of chunks) {
for (const word of chunk.words) {
new_chunks.push(word);
}
}
optional = { "chunks": new_chunks };
} else {
optional = { "chunks": chunks };
}
}
return [full_text, optional];
}
/**
* Finds the longest common sequence among the provided sequences.
* @param {number[][]} sequences An array of sequences of token ids to compare.
* @returns {number[][]} The longest common sequence found.
* @throws {Error} If there is a bug within the function.
* @private
*/
findLongestCommonSequence(sequences, token_timestamp_sequences = null) {
// It would be much harder to do O(n) because of fault tolerance.
// We actually have a really good property which is that the total sequence
// MUST be those subsequences in order.
// If token_timestamp_sequences is provided, will split those sequences in
// exactly the same way.
let leftSequence = sequences[0];
let leftLength = leftSequence.length;
let totalSequence = [];
const use_token_timestamp_sequences = Array.isArray(token_timestamp_sequences) && token_timestamp_sequences.length > 0;
let total_token_timestamp_sequence = use_token_timestamp_sequences ? [] : null;
let left_token_timestamp_sequence = use_token_timestamp_sequences ? token_timestamp_sequences[0] : null;
for (let i = 1; i < sequences.length; ++i) {
const rightSequence = sequences[i];
let max = 0.0;
let maxIndices = [leftLength, leftLength, 0, 0];
// Here we're sliding matches
// [a, b, c, d]
// [c, d, f]
// = [c] == [d]
// [a, b, c, d]
// [c, d, f]
// = [c, d] == [c, d]
// [a, b, c, d]
// [c, d, f]
// = [b, c, d] == [c, d, f]
// [a, b, c, d]
// [c, d, f]
// [a, b, c] == [c, d, f]
// [a, b, c, d]
// [d, f]
// [a, b] == [d, f]
// [a, b, c, d]
// [f]
// [a] == [f]
const rightLength = rightSequence.length;
for (let j = 1; j < leftLength + rightLength; ++j) {
// Slightly convoluted because we don't want out of bound indices
// This will be necessary for a small conflict resolution optimization
// later
const leftStart = Math.max(0, leftLength - j);
const leftStop = Math.min(leftLength, leftLength + rightLength - j);
const left = leftSequence.slice(leftStart, leftStop);
const rightStart = Math.max(0, j - leftLength);
const rightStop = Math.min(rightLength, j);
const right = rightSequence.slice(rightStart, rightStop);
if (left.length !== right.length) {
throw new Error("There is a bug within whisper `decode_asr` function, please report it. Dropping to prevent bad inference.");
}
let matches;
if (use_token_timestamp_sequences) {
// Get length of longest subsequence of tokens that match
// and have timestamps that are in order
matches = left.filter((elem, idx) => (
elem === right[idx]
&& left_token_timestamp_sequence[leftStart + idx] <= token_timestamp_sequences[i][rightStart + idx]
)).length;
} else {
matches = left.filter((elem, idx) => elem === right[idx]).length;
}
// epsilon to favor long perfect matches
const eps = j / 10000.0;
const matching = matches / j + eps;
if (matches > 1 && matching > max) {
max = matching;
maxIndices = [leftStart, leftStop, rightStart, rightStop];
}
}
const [leftStart, leftStop, rightStart, rightStop] = maxIndices;
const leftMid = Math.floor((leftStop + leftStart) / 2);
const rightMid = Math.floor((rightStop + rightStart) / 2);
totalSequence.push(...leftSequence.slice(0, leftMid));
leftSequence = rightSequence.slice(rightMid);
leftLength = leftSequence.length;
if (use_token_timestamp_sequences) {
total_token_timestamp_sequence.push(...left_token_timestamp_sequence.slice(0, leftMid));
left_token_timestamp_sequence = token_timestamp_sequences[i].slice(rightMid);
}
}
totalSequence.push(...leftSequence);
if (use_token_timestamp_sequences) {
total_token_timestamp_sequence.push(...left_token_timestamp_sequence);
return [totalSequence, total_token_timestamp_sequence];
} else {
return [totalSequence, []];
}
}
/** @private */
collateWordTimestamps(tokens, token_timestamps, language) {
const [words, _, token_indices] = this.combineTokensIntoWords(tokens, language);
const timings = [];
for (let i = 0; i < words.length; ++i) {
const indices = token_indices[i];
timings.push({
text: words[i],
timestamp: [
token_timestamps[indices.at(0)][0],
token_timestamps[indices.at(-1)][1],
],
});
}
return timings;
}
/**
* Groups tokens by word. Returns a tuple containing a list of strings with the words,
* and a list of `token_id` sequences with the tokens making up each word.
* @param {number[]} tokens
* @param {string} [language]
* @param {string} prepend_punctionations
* @param {string} append_punctuations
*
* @private
*/
combineTokensIntoWords(tokens, language, prepend_punctionations = "\"'“¡¿([{-", append_punctuations = "\"'.。,,!!??::”)]}、") {
language = language ?? 'english';
let words, word_tokens, token_indices;
if (["chinese", "japanese", "thai", "lao", "myanmar"].includes(language)) {
// These languages don't typically use spaces.
[words, word_tokens, token_indices] = this.splitTokensOnUnicode(tokens)
} else {
[words, word_tokens, token_indices] = this.splitTokensOnSpaces(tokens)
}
return this.mergePunctuations(words, word_tokens, token_indices, prepend_punctionations, append_punctuations);
}
/** @type {PreTrainedTokenizer['decode']} */
decode(
token_ids,
decode_args,
) {
let text;
// @ts-ignore
if (decode_args?.decode_with_timestamps) {
if (token_ids instanceof Tensor) {
token_ids = prepareTensorForDecode(token_ids);
}
text = this.decodeWithTimestamps(token_ids, decode_args);
} else {
text = super.decode(token_ids, decode_args);
}
// TODO: implement offsets
// if (decode_args.output_offsets) {
// let offsets = this.computeOffsets
// }
return text;
}
/**
* @param {number[]|bigint[]} token_ids List of token IDs to decode.
* @param {Object} decode_args Optional arguments for decoding
* @private
*/
decodeWithTimestamps(token_ids, decode_args) {
const time_precision = decode_args?.time_precision ?? 0.02;
const timestamp_begin = Array.from(this.all_special_ids).at(-1) + 1;
/**@type {Array} */
let outputs = [[]];
for (let token of token_ids) {
token = Number(token);
if (token >= timestamp_begin) {
const timestamp = ((token - timestamp_begin) * time_precision).toFixed(2);
outputs.push(`<|${timestamp}|>`);
outputs.push([]);
} else {
outputs[outputs.length - 1].push(token);
}
}
outputs = outputs.map(
s => typeof s === 'string' ? s : super.decode(s, decode_args)
)
return outputs.join('');
}
/**
* Combine tokens into words by splitting at any position where the tokens are decoded as valid unicode points.
* @param {number[]} tokens
* @returns {*}
* @private
*/
splitTokensOnUnicode(tokens) {
const decoded_full = this.decode(tokens, {
// @ts-ignore
decode_with_timestamps: true,
});
const replacement_char = '\uFFFD';
const words = []
const word_tokens = []
const token_indices = []
let current_tokens = []
let current_indices = []
let unicode_offset = 0
for (let token_idx = 0; token_idx < tokens.length; ++token_idx) {
const token = tokens[token_idx];
current_tokens.push(token);
current_indices.push(token_idx);
const decoded = this.decode(current_tokens, {
// @ts-ignore
decode_with_timestamps: true,
});
if (!decoded.includes(replacement_char) || decoded_full[unicode_offset + decoded.indexOf(replacement_char)] === replacement_char) {
words.push(decoded)
word_tokens.push(current_tokens)
token_indices.push(current_indices)
current_tokens = []
current_indices = []
unicode_offset += decoded.length;
}
}
return [words, word_tokens, token_indices]
}
/**
* Combine tokens into words by splitting at whitespace and punctuation tokens.
* @param {number[]} tokens
* @private
*/
splitTokensOnSpaces(tokens) {
const [subwords, subword_tokens_list, subword_indices_list] = this.splitTokensOnUnicode(tokens);
const words = []
const word_tokens = []
const token_indices = []
const punctuationRegex = new RegExp(`^[${PUNCTUATION_REGEX}]$`, 'gu');
for (let i = 0; i < subwords.length; ++i) {
const subword = subwords[i];
const subword_tokens = subword_tokens_list[i];
const subword_indices = subword_indices_list[i];
// @ts-ignore
const special = subword_tokens[0] >= this.model.tokens_to_ids.get('<|endoftext|>');
const with_space = subword.startsWith(' ');
const trimmed = subword.trim();
const punctuation = punctuationRegex.test(trimmed);
if (special || with_space || punctuation || words.length === 0) {
words.push(subword);
word_tokens.push(subword_tokens);
token_indices.push(subword_indices);
} else {
const ix = words.length - 1;
words[ix] += subword;
word_tokens[ix].push(...subword_tokens);
token_indices[ix].push(...subword_indices);
}
}
return [words, word_tokens, token_indices];
}
/**
* Merges punctuation tokens with neighboring words.
* @param {string[]} words
* @param {number[][]} tokens
* @param {number[][]} indices
* @param {string} prepended
* @param {string} appended
* @private
*/
mergePunctuations(words, tokens, indices, prepended, appended) {
const newWords = structuredClone(words);
const newTokens = structuredClone(tokens);
const newIndices = structuredClone(indices);
// prepend punctuations
let i = newWords.length - 2;
let j = newWords.length - 1;
while (i >= 0) {
if (newWords[i].startsWith(' ') && prepended.includes(newWords[i].trim())) {
newWords[j] = newWords[i] + newWords[j];
newTokens[j] = mergeArrays(newTokens[i], newTokens[j]);
newIndices[j] = mergeArrays(newIndices[i], newIndices[j]);
newWords[i] = '';
newTokens[i] = [];
newIndices[i] = [];
} else {
j = i;
}
--i;
}
// append punctuations
i = 0;
j = 1;
while (j < newWords.length) {
if (!newWords[i].endsWith(' ') && appended.includes(newWords[j])) {
newWords[i] += newWords[j];
newTokens[i] = mergeArrays(newTokens[i], newTokens[j]);
newIndices[i] = mergeArrays(newIndices[i], newIndices[j]);
newWords[j] = '';
newTokens[j] = [];
newIndices[j] = [];
} else {
i = j;
}
++j;
}
return [
newWords.filter(x => x),
newTokens.filter(x => x.length > 0),
newIndices.filter(x => x.length > 0),
]
}
}
export class CodeGenTokenizer extends PreTrainedTokenizer { }
export class CLIPTokenizer extends PreTrainedTokenizer { }
export class SiglipTokenizer extends PreTrainedTokenizer { }
/**
* @todo This model is not yet supported by Hugging Face's "fast" tokenizers library (https://github.com/huggingface/tokenizers).
* Therefore, this implementation (which is based on fast tokenizers) may produce slightly inaccurate results.
*/
export class MarianTokenizer extends PreTrainedTokenizer {
/**
* Create a new MarianTokenizer instance.
* @param {Object} tokenizerJSON The JSON of the tokenizer.
* @param {Object} tokenizerConfig The config of the tokenizer.
*/
constructor(tokenizerJSON, tokenizerConfig) {
super(tokenizerJSON, tokenizerConfig);
this.languageRegex = /^(>>\w+<<)\s*/g;
this.supported_language_codes = this.model.vocab.filter(
x => this.languageRegex.test(x)
);
console.warn('WARNING: `MarianTokenizer` is not yet supported by Hugging Face\'s "fast" tokenizers library. Therefore, you may experience slightly inaccurate results.')
}
/**
* Encodes a single text. Overriding this method is necessary since the language codes
* must be removed before encoding with sentencepiece model.
* @see https://github.com/huggingface/transformers/blob/12d51db243a00726a548a43cc333390ebae731e3/src/transformers/models/marian/tokenization_marian.py#L204-L213
*
* @param {string|null} text The text to encode.
* @returns {Array} The encoded tokens.
*/
_encode_text(text) {
if (text === null) return null;
// Check if text starts with language code:
const [matchInfo, ...remainder] = text.trim().split(this.languageRegex);
if (remainder.length === 0) {
// No language code, encode normally
return super._encode_text(matchInfo);
} else if (remainder.length === 2) {
// Text starts with language code, so we do not encode it with sentencepiece.
const [language, text] = remainder;
if (!this.supported_language_codes.includes(language)) {
console.warn(`Unsupported language code "${language}" detected, which may lead to unexpected behavior. Should be one of: ${JSON.stringify(this.supported_language_codes)}`)
}
return mergeArrays([language], super._encode_text(text));
}
}
}
export class Wav2Vec2CTCTokenizer extends PreTrainedTokenizer { }
export class BlenderbotTokenizer extends PreTrainedTokenizer { }
export class BlenderbotSmallTokenizer extends PreTrainedTokenizer { }
export class SpeechT5Tokenizer extends PreTrainedTokenizer { }
export class NougatTokenizer extends PreTrainedTokenizer { }
export class VitsTokenizer extends PreTrainedTokenizer {
constructor(tokenizerJSON, tokenizerConfig) {
super(tokenizerJSON, tokenizerConfig);
// Custom decoder function
this.decoder = new VitsDecoder({});
}
}
export class CohereTokenizer extends PreTrainedTokenizer { }
export class MgpstrTokenizer extends PreTrainedTokenizer { }
export class Ernie4_5_Tokenizer extends PreTrainedTokenizer { }
/**
* Helper class which is used to instantiate pretrained tokenizers with the `from_pretrained` function.
* The chosen tokenizer class is determined by the type specified in the tokenizer config.
*
* @example
* const tokenizer = await AutoTokenizer.from_pretrained('Xenova/bert-base-uncased');
*/
export class AutoTokenizer {
static TOKENIZER_CLASS_MAPPING = {
T5Tokenizer,
DistilBertTokenizer,
CamembertTokenizer,
DebertaTokenizer,
DebertaV2Tokenizer,
BertTokenizer,
HerbertTokenizer,
ConvBertTokenizer,
RoFormerTokenizer,
XLMTokenizer,
ElectraTokenizer,
MobileBertTokenizer,
SqueezeBertTokenizer,
AlbertTokenizer,
GPT2Tokenizer,
BartTokenizer,
MBartTokenizer,
MBart50Tokenizer,
RobertaTokenizer,
WhisperTokenizer,
CodeGenTokenizer,
CLIPTokenizer,
SiglipTokenizer,
MarianTokenizer,
BloomTokenizer,
NllbTokenizer,
M2M100Tokenizer,
LlamaTokenizer,
CodeLlamaTokenizer,
XLMRobertaTokenizer,
MPNetTokenizer,
FalconTokenizer,
GPTNeoXTokenizer,
EsmTokenizer,
Wav2Vec2CTCTokenizer,
BlenderbotTokenizer,
BlenderbotSmallTokenizer,
SpeechT5Tokenizer,
NougatTokenizer,
VitsTokenizer,
Qwen2Tokenizer,
GemmaTokenizer,
Grok1Tokenizer,
CohereTokenizer,
MgpstrTokenizer,
Ernie4_5_Tokenizer,
// Base case:
PreTrainedTokenizer,
}
/**
* Instantiate one of the tokenizer classes of the library from a pretrained model.
*
* The tokenizer class to instantiate is selected based on the `tokenizer_class` property of the config object
* (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible)
*
* @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either:
* - A string, the *model id* of a pretrained tokenizer hosted inside a model repo on huggingface.co.
* Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
* user or organization name, like `dbmdz/bert-base-german-cased`.
* - A path to a *directory* containing tokenizer files, e.g., `./my_model_directory/`.
* @param {PretrainedTokenizerOptions} options Additional options for loading the tokenizer.
*
* @returns {Promise<PreTrainedTokenizer>} A new instance of the PreTrainedTokenizer class.
*/
static async from_pretrained(pretrained_model_name_or_path, {
progress_callback = null,
config = null,
cache_dir = null,
local_files_only = false,
revision = 'main',
legacy = null,
} = {}) {
const [tokenizerJSON, tokenizerConfig] = await loadTokenizer(pretrained_model_name_or_path, {
progress_callback,
config,
cache_dir,
local_files_only,
revision,
legacy,
})
// Some tokenizers are saved with the "Fast" suffix, so we remove that if present.
const tokenizerName = tokenizerConfig.tokenizer_class?.replace(/Fast$/, '') ?? 'PreTrainedTokenizer';
let cls = this.TOKENIZER_CLASS_MAPPING[tokenizerName];
if (!cls) {
console.warn(`Unknown tokenizer class "${tokenizerName}", attempting to construct from base class.`);
cls = PreTrainedTokenizer;
}
return new cls(tokenizerJSON, tokenizerConfig);
}
}
| transformers.js/src/tokenizers.js/0 | {
"file_path": "transformers.js/src/tokenizers.js",
"repo_id": "transformers.js",
"token_count": 71479
} | 378 |
import { AutoConfig, env } from "../src/transformers.js";
import { getFile } from "../src/utils/hub.js";
// Initialise the testing environment
env.allowLocalModels = false;
env.useFSCache = false;
const TEST_DATA = {
"Xenova/bert-base-uncased": {
model_type: "bert",
},
};
describe("Configs", () => {
for (const [model_id, minimal_config] of Object.entries(TEST_DATA)) {
it(model_id, async () => {
const config = await AutoConfig.from_pretrained(model_id);
for (const [key, value] of Object.entries(minimal_config)) {
expect(config[key]).toEqual(value);
}
});
}
});
| transformers.js/tests/configs.test.js/0 | {
"file_path": "transformers.js/tests/configs.test.js",
"repo_id": "transformers.js",
"token_count": 238
} | 379 |
import { AutoImageProcessor, CLIPFeatureExtractor } from "../../../src/transformers.js";
import { load_cached_image } from "../../asset_cache.js";
import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js";
export default () => {
// CLIPFeatureExtractor
// - tests center crop (do_center_crop=true, crop_size=224)
describe("CLIPFeatureExtractor", () => {
const model_id = "Xenova/clip-vit-base-patch16";
/** @type {CLIPFeatureExtractor} */
let processor;
beforeAll(async () => {
processor = await AutoImageProcessor.from_pretrained(model_id);
}, MAX_PROCESSOR_LOAD_TIME);
it(
"default",
async () => {
const image = await load_cached_image("tiger");
const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image);
expect(pixel_values.dims).toEqual([1, 3, 224, 224]);
expect(pixel_values.mean().item()).toBeCloseTo(-0.06678297738282096, 6);
expect(original_sizes).toEqual([[408, 612]]);
expect(reshaped_input_sizes).toEqual([[224, 224]]);
},
MAX_TEST_EXECUTION_TIME,
);
});
};
| transformers.js/tests/models/clip/test_image_processing_clip.js/0 | {
"file_path": "transformers.js/tests/models/clip/test_image_processing_clip.js",
"repo_id": "transformers.js",
"token_count": 464
} | 380 |
import { Florence2Processor, Florence2ForConditionalGeneration, RawImage, full } from "../../../src/transformers.js";
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js";
export default () => {
const texts = ["Describe with a paragraph what is shown in the image.", "Locate the objects with category name in the image."];
// Empty white image
const dims = [224, 224, 3];
const image = new RawImage(new Uint8ClampedArray(dims[0] * dims[1] * dims[2]).fill(255), ...dims);
describe("Florence2ForConditionalGeneration", () => {
const model_id = "Xenova/tiny-random-Florence2ForConditionalGeneration";
/** @type {Florence2ForConditionalGeneration} */
let model;
/** @type {Florence2Processor} */
let processor;
beforeAll(async () => {
model = await Florence2ForConditionalGeneration.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS);
processor = await Florence2Processor.from_pretrained(model_id);
}, MAX_MODEL_LOAD_TIME);
it(
"forward",
async () => {
const inputs = await processor(image, texts[0]);
const { logits } = await model({
...inputs,
decoder_input_ids: full([1, 1], 2n),
});
expect(logits.dims).toEqual([1, 1, 51289]);
},
MAX_TEST_EXECUTION_TIME,
);
it.skip(
"batch_size=1",
async () => {
{
const text_inputs = processor.tokenizer(texts[0]);
const generate_ids = await model.generate({ ...text_inputs, max_new_tokens: 10 });
expect(generate_ids.tolist()).toEqual([[2n, 0n, 0n, 0n, 1n, 0n, 0n, 2n]]);
}
{
const inputs = await processor(image, texts[0]);
const generate_ids = await model.generate({ ...inputs, max_new_tokens: 10 });
expect(generate_ids.tolist()).toEqual([[2n, 0n, 0n, 0n, 1n, 0n, 0n, 2n]]);
}
},
MAX_TEST_EXECUTION_TIME,
);
it.skip(
"batch_size>1",
async () => {
{
const text_inputs = processor.tokenizer(texts, { padding: true });
const generate_ids = await model.generate({ ...text_inputs, max_new_tokens: 10 });
expect(generate_ids.tolist()).toEqual([
[2n, 0n, 0n, 0n, 1n, 0n, 0n, 2n],
[2n, 0n, 0n, 0n, 1n, 0n, 0n, 2n],
]);
}
{
const inputs = await processor([image, image], texts, { padding: true });
const generate_ids = await model.generate({ ...inputs, max_new_tokens: 10 });
expect(generate_ids.tolist()).toEqual([
[2n, 0n, 0n, 0n, 1n, 0n, 0n, 2n],
[2n, 0n, 0n, 0n, 1n, 0n, 0n, 2n],
]);
}
},
MAX_TEST_EXECUTION_TIME,
);
afterAll(async () => {
await model?.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
};
| transformers.js/tests/models/florence2/test_modeling_florence2.js/0 | {
"file_path": "transformers.js/tests/models/florence2/test_modeling_florence2.js",
"repo_id": "transformers.js",
"token_count": 1343
} | 381 |
import { PreTrainedTokenizer, HeliumForCausalLM } from "../../../src/transformers.js";
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js";
export default () => {
describe("HeliumForCausalLM", () => {
const model_id = "hf-internal-testing/tiny-random-HeliumForCausalLM";
/** @type {HeliumForCausalLM} */
let model;
/** @type {PreTrainedTokenizer} */
let tokenizer;
beforeAll(async () => {
model = await HeliumForCausalLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS);
tokenizer = await PreTrainedTokenizer.from_pretrained(model_id);
tokenizer.padding_side = "left";
}, MAX_MODEL_LOAD_TIME);
it(
"batch_size=1",
async () => {
const inputs = tokenizer("hello");
const outputs = await model.generate({
...inputs,
max_length: 10,
});
expect(outputs.tolist()).toEqual([[1n, 456n, 5660n, 1700n, 1486n, 37744n, 35669n, 39396n, 12024n, 32253n]]);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"batch_size>1",
async () => {
const inputs = tokenizer(["hello", "hello world"], { padding: true });
const outputs = await model.generate({
...inputs,
max_length: 10,
});
expect(outputs.tolist()).toEqual([
[3n, 1n, 456n, 5660n, 1700n, 1486n, 37744n, 35669n, 39396n, 12024n],
[1n, 456n, 5660n, 998n, 6136n, 2080n, 172n, 8843n, 40579n, 23953n],
]);
},
MAX_TEST_EXECUTION_TIME,
);
afterAll(async () => {
await model?.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
};
| transformers.js/tests/models/helium/test_modeling_helium.js/0 | {
"file_path": "transformers.js/tests/models/helium/test_modeling_helium.js",
"repo_id": "transformers.js",
"token_count": 783
} | 382 |
import { AutoImageProcessor, MobileViTFeatureExtractor, MobileViTImageProcessor } from "../../../src/transformers.js";
import { load_cached_image } from "../../asset_cache.js";
import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js";
export default () => {
// MobileViTFeatureExtractor
describe("MobileViTFeatureExtractor (default)", () => {
const model_id = "Xenova/mobilevit-small";
/** @type {MobileViTFeatureExtractor} */
let processor;
beforeAll(async () => {
processor = await AutoImageProcessor.from_pretrained(model_id);
}, MAX_PROCESSOR_LOAD_TIME);
it(
"default",
async () => {
const image = await load_cached_image("tiger");
const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image);
expect(pixel_values.dims).toEqual([1, 3, 256, 256]);
expect(pixel_values.mean().item()).toBeCloseTo(0.4599160496887033, 6);
expect(original_sizes).toEqual([[408, 612]]);
expect(reshaped_input_sizes).toEqual([[256, 256]]);
},
MAX_TEST_EXECUTION_TIME,
);
});
// MobileViTFeatureExtractor
// - tests not converting to rgb (do_convert_rgb=false)
describe("MobileViTFeatureExtractor (do_convert_rgb=false)", () => {
const model_id = "Xenova/quickdraw-mobilevit-small";
/** @type {MobileViTFeatureExtractor} */
let processor;
beforeAll(async () => {
processor = await AutoImageProcessor.from_pretrained(model_id);
}, MAX_PROCESSOR_LOAD_TIME);
it(
"grayscale image",
async () => {
const image = await load_cached_image("skateboard");
const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image);
expect(pixel_values.dims).toEqual([1, 1, 28, 28]);
expect(pixel_values.mean().item()).toBeCloseTo(0.08558923671585128, 6);
expect(original_sizes).toEqual([[28, 28]]);
expect(reshaped_input_sizes).toEqual([[28, 28]]);
},
MAX_TEST_EXECUTION_TIME,
);
});
// MobileViTImageProcessor
// - tests converting RGB to BGR (do_flip_channel_order=true)
describe("MobileViTImageProcessor (do_flip_channel_order=true)", () => {
const model_id = "Xenova/mobilevitv2-1.0-imagenet1k-256";
/** @type {MobileViTImageProcessor} */
let processor;
beforeAll(async () => {
processor = await AutoImageProcessor.from_pretrained(model_id);
}, MAX_PROCESSOR_LOAD_TIME);
it(
"RGB to BGR",
async () => {
const image = await load_cached_image("cats");
const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image);
const { data, dims } = pixel_values;
expect(dims).toEqual([1, 3, 256, 256]);
expect(pixel_values.mean().item()).toBeCloseTo(0.5215385556221008, 2);
expect(original_sizes).toEqual([[480, 640]]);
expect(reshaped_input_sizes).toEqual([[256, 256]]);
// Ensure RGB to BGR conversion
expect(data.slice(0, 3)).toBeCloseToNested([0.24313725531101227, 0.250980406999588, 0.364705890417099], 4);
},
MAX_TEST_EXECUTION_TIME,
);
});
};
| transformers.js/tests/models/mobilevit/test_image_processing_mobilevit.js/0 | {
"file_path": "transformers.js/tests/models/mobilevit/test_image_processing_mobilevit.js",
"repo_id": "transformers.js",
"token_count": 1322
} | 383 |
import { PatchTSTModel, PatchTSTForPrediction, Tensor } from "../../../src/transformers.js";
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js";
export default () => {
const dims = [64, 512, 7];
const prod = dims.reduce((a, b) => a * b, 1);
const past_values = new Tensor(
"float32",
Float32Array.from({ length: prod }, (_, i) => i / prod),
dims,
);
describe("PatchTSTModel", () => {
const model_id = "hf-internal-testing/tiny-random-PatchTSTModel";
/** @type {PatchTSTModel} */
let model;
beforeAll(async () => {
model = await PatchTSTModel.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS);
}, MAX_MODEL_LOAD_TIME);
it(
"default",
async () => {
const { last_hidden_state } = await model({ past_values });
const { num_input_channels, d_model } = model.config;
expect(last_hidden_state.dims).toEqual([dims[0], num_input_channels, 43, d_model]);
expect(last_hidden_state.mean().item()).toBeCloseTo(0.016672514379024506, 5);
},
MAX_TEST_EXECUTION_TIME,
);
afterAll(async () => {
await model?.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
describe("PatchTSTForPrediction", () => {
const model_id = "onnx-community/granite-timeseries-patchtst";
/** @type {PatchTSTForPrediction} */
let model;
beforeAll(async () => {
model = await PatchTSTForPrediction.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS);
}, MAX_MODEL_LOAD_TIME);
it(
"default",
async () => {
const { prediction_outputs } = await model({ past_values });
const { prediction_length, num_input_channels } = model.config;
expect(prediction_outputs.dims).toEqual([dims[0], prediction_length, num_input_channels]);
expect(prediction_outputs.mean().item()).toBeCloseTo(0.506528377532959, 5);
},
MAX_TEST_EXECUTION_TIME,
);
afterAll(async () => {
await model?.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
};
| transformers.js/tests/models/patchtst/test_modeling_patchtst.js/0 | {
"file_path": "transformers.js/tests/models/patchtst/test_modeling_patchtst.js",
"repo_id": "transformers.js",
"token_count": 870
} | 384 |
import { VisionEncoderDecoderModel, full } from "../../../src/transformers.js";
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js";
export default () => {
describe("VisionEncoderDecoderModel", () => {
const model_id = "hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2";
/** @type {VisionEncoderDecoderModel} */
let model;
beforeAll(async () => {
model = await VisionEncoderDecoderModel.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS);
}, MAX_MODEL_LOAD_TIME);
it(
"batch_size=1",
async () => {
const outputs = await model.generate({
pixel_values: full([1, 3, 30, 30], -1.0),
max_length: 5,
});
expect(outputs.tolist()).toEqual([[0n, 400n, 400n, 400n, 400n]]);
},
MAX_TEST_EXECUTION_TIME,
);
// TODO: Add back
// it('batch_size>1', async () => {
// const outputs = await model.generate({
// pixel_values: cat([
// full([1, 3, 30, 30], -1.0),
// full([1, 3, 30, 30], 0.0),
// ]),
// max_length: 5,
// });
// expect(outputs.tolist()).toEqual([
// // Generation continues
// [0n, 400n, 400n, 400n, 400n],
// // Finishes early. 1023 is the padding token
// [0n, 0n, 1023n, 1023n, 1023n],
// ]);
// }, MAX_TEST_EXECUTION_TIME);
afterAll(async () => {
await model?.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
};
| transformers.js/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.js/0 | {
"file_path": "transformers.js/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.js",
"repo_id": "transformers.js",
"token_count": 752
} | 385 |
import { pipeline, DepthEstimationPipeline } from "../../src/transformers.js";
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js";
import { load_cached_image } from "../asset_cache.js";
const PIPELINE_ID = "depth-estimation";
export default () => {
describe("Depth Estimation", () => {
const model_id = "hf-internal-testing/tiny-random-DPTForDepthEstimation";
/** @type {DepthEstimationPipeline} */
let pipe;
let images;
beforeAll(async () => {
pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS);
images = await Promise.all([load_cached_image("white_image"), load_cached_image("blue_image")]);
}, MAX_MODEL_LOAD_TIME);
it("should be an instance of DepthEstimationPipeline", () => {
expect(pipe).toBeInstanceOf(DepthEstimationPipeline);
});
describe("batch_size=1", () => {
it(
"default",
async () => {
const output = await pipe(images[0]);
expect(output.predicted_depth.dims).toEqual([224, 224]);
expect(output.predicted_depth.mean().item()).toBeCloseTo(0.000006106501587055391, 6);
expect(output.depth.size).toEqual(images[0].size);
},
MAX_TEST_EXECUTION_TIME,
);
});
describe("batch_size>1", () => {
it(
"default",
async () => {
const output = await pipe(images);
expect(output).toHaveLength(images.length);
expect(output[0].predicted_depth.dims).toEqual([224, 224]);
expect(output[0].predicted_depth.mean().item()).toBeCloseTo(0.000006106501587055391, 6);
expect(output[0].depth.size).toEqual(images[0].size);
expect(output[1].predicted_depth.dims).toEqual([224, 224]);
expect(output[1].predicted_depth.mean().item()).toBeCloseTo(0.0000014548650142387487, 6);
expect(output[1].depth.size).toEqual(images[1].size);
},
MAX_TEST_EXECUTION_TIME,
);
});
afterAll(async () => {
await pipe.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
};
| transformers.js/tests/pipelines/test_pipelines_depth_estimation.js/0 | {
"file_path": "transformers.js/tests/pipelines/test_pipelines_depth_estimation.js",
"repo_id": "transformers.js",
"token_count": 917
} | 386 |
import { pipeline, TokenClassificationPipeline } from "../../src/transformers.js";
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js";
const PIPELINE_ID = "token-classification";
export default () => {
describe("Token Classification", () => {
const model_id = "hf-internal-testing/tiny-random-BertForTokenClassification";
/** @type {TokenClassificationPipeline} */
let pipe;
beforeAll(async () => {
pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS);
}, MAX_MODEL_LOAD_TIME);
it("should be an instance of TokenClassificationPipeline", () => {
expect(pipe).toBeInstanceOf(TokenClassificationPipeline);
});
describe("batch_size=1", () => {
it(
"default",
async () => {
const output = await pipe("1 2 3");
// TODO: Add start/end to target
const target = [
{
entity: "LABEL_0",
score: 0.5292708,
index: 1,
word: "1",
// 'start': 0, 'end': 1
},
{
entity: "LABEL_0",
score: 0.5353687,
index: 2,
word: "2",
// 'start': 2, 'end': 3
},
{
entity: "LABEL_1",
score: 0.51381934,
index: 3,
word: "3",
// 'start': 4, 'end': 5
},
];
expect(output).toBeCloseToNested(target, 5);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"custom (ignore_labels set)",
async () => {
const output = await pipe("1 2 3", { ignore_labels: ["LABEL_0"] });
const target = [
{
entity: "LABEL_1",
score: 0.51381934,
index: 3,
word: "3",
// 'start': 4, 'end': 5
},
];
expect(output).toBeCloseToNested(target, 5);
},
MAX_TEST_EXECUTION_TIME,
);
});
describe("batch_size>1", () => {
it(
"default",
async () => {
const output = await pipe(["1 2 3", "4 5"]);
const target = [
[
{
entity: "LABEL_0",
score: 0.5292708,
index: 1,
word: "1",
// 'start': 0, 'end': 1
},
{
entity: "LABEL_0",
score: 0.5353687,
index: 2,
word: "2",
// 'start': 2, 'end': 3
},
{
entity: "LABEL_1",
score: 0.51381934,
index: 3,
word: "3",
// 'start': 4, 'end': 5
},
],
[
{
entity: "LABEL_0",
score: 0.5432807,
index: 1,
word: "4",
// 'start': 0, 'end': 1
},
{
entity: "LABEL_1",
score: 0.5007693,
index: 2,
word: "5",
// 'start': 2, 'end': 3
},
],
];
expect(output).toBeCloseToNested(target, 5);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"custom (ignore_labels set)",
async () => {
const output = await pipe(["1 2 3", "4 5"], { ignore_labels: ["LABEL_0"] });
const target = [
[
{
entity: "LABEL_1",
score: 0.51381934,
index: 3,
word: "3",
// 'start': 4, 'end': 5
},
],
[
{
entity: "LABEL_1",
score: 0.5007693,
index: 2,
word: "5",
// 'start': 2, 'end': 3
},
],
];
expect(output).toBeCloseToNested(target, 5);
},
MAX_TEST_EXECUTION_TIME,
);
});
afterAll(async () => {
await pipe.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
};
| transformers.js/tests/pipelines/test_pipelines_token_classification.js/0 | {
"file_path": "transformers.js/tests/pipelines/test_pipelines_token_classification.js",
"repo_id": "transformers.js",
"token_count": 2608
} | 387 |
import { Tensor, interpolate_4d, matmul, rfft, slice } from "../../src/transformers.js";
import { init } from "../init.js";
// Initialise the testing environment
init();
function expectToBeCloseToArray(actual, expected) {
expect(actual.length).toEqual(expected.length);
actual.forEach((x, i) => expect(x).toBeCloseTo(expected[i]), 6);
}
function range(start, stop = undefined, step = 1) {
if (stop === undefined) {
stop = start;
start = 0;
}
const result = [];
for (let i = start; i < stop; i += step) {
result.push(i);
}
return result;
}
describe("Tensor operations", () => {
describe("interpolate", () => {
describe("downscale", () => {
const input = new Tensor(
"float32",
new Float32Array(2 * 3 * 4 * 5).map((_, i) => i),
[2, 3, 4, 5],
);
const size = [2, 3, 3, 2];
it("bilinear", async () => {
const resized = await interpolate_4d(input, { mode: "bilinear", size });
const target = new Float32Array(
[
[
[
[1.5833335, 4.0833335],
[8.25, 10.75],
[14.916668, 17.416668],
],
[
[21.583332, 24.083334],
[28.25, 30.75],
[34.916668, 37.416668],
],
[
[41.583332, 44.083332],
[48.25, 50.75],
[54.916668, 57.416668],
],
],
[
[
[61.583332, 64.083336],
[68.25, 70.75],
[74.916664, 77.41667],
],
[
[81.58333, 84.083336],
[88.25, 90.75],
[94.91667, 97.41667],
],
[
[101.583336, 104.08333],
[108.25, 110.75],
[114.916664, 117.416664],
],
],
].flat(Infinity),
);
expectToBeCloseToArray(target, resized.data);
});
it("bicubic", async () => {
const resized = await interpolate_4d(input, { mode: "bicubic", size });
const target = new Float32Array(
[
[
[
[1.2987545, 3.9628172],
[8.167969, 10.832031],
[15.037184, 17.701244],
],
[
[21.298756, 23.962818],
[28.167969, 30.832031],
[35.037186, 37.701252],
],
[
[41.298756, 43.96282],
[48.16797, 50.83203],
[55.037193, 57.701256],
],
],
[
[
[61.29875, 63.96282],
[68.16797, 70.83203],
[75.03719, 77.701256],
],
[
[81.29875, 83.96282],
[88.16797, 90.83203],
[95.03721, 97.70126],
],
[
[101.29875, 103.962814],
[108.16797, 110.83203],
[115.03721, 117.70127],
],
],
].flat(Infinity),
);
expectToBeCloseToArray(target, resized.data);
});
});
describe("upscale", () => {
const input = new Tensor(
"float32",
new Float32Array(2 * 3 * 3 * 2).map((_, i) => i),
[2, 3, 3, 2],
);
const size = [2, 3, 4, 5];
it("bilinear", async () => {
const resized = await interpolate_4d(input, { mode: "bilinear", size });
const target = new Float32Array(
[
[
[
[0.0, 0.1, 0.5, 0.9, 1.0],
[1.25, 1.35, 1.75, 2.15, 2.25],
[2.75, 2.85, 3.25, 3.65, 3.75],
[4.0, 4.1, 4.5, 4.9, 5.0],
],
[
[6.0, 6.1, 6.5, 6.9, 7.0],
[7.25, 7.35, 7.75, 8.15, 8.25],
[8.75, 8.85, 9.25, 9.65, 9.75],
[10.0, 10.1, 10.5, 10.9, 11.0],
],
[
[12.0, 12.1, 12.5, 12.9, 13.0],
[13.25, 13.35, 13.75, 14.15, 14.25],
[14.75, 14.85, 15.25, 15.65, 15.75],
[16.0, 16.1, 16.5, 16.9, 17.0],
],
],
[
[
[18.0, 18.1, 18.5, 18.9, 19.0],
[19.25, 19.35, 19.75, 20.15, 20.25],
[20.75, 20.85, 21.25, 21.65, 21.75],
[22.0, 22.1, 22.5, 22.9, 23.0],
],
[
[24.0, 24.1, 24.5, 24.9, 25.0],
[25.25, 25.35, 25.75, 26.15, 26.25],
[26.75, 26.85, 27.25, 27.65, 27.75],
[28.0, 28.1, 28.5, 28.9, 29.0],
],
[
[30.0, 30.1, 30.5, 30.9, 31.0],
[31.25, 31.35, 31.75, 32.15, 32.25],
[32.75, 32.85, 33.25, 33.65, 33.75],
[34.0, 34.1, 34.5, 34.9, 35.0],
],
],
].flat(Infinity),
);
expectToBeCloseToArray(target, resized.data);
});
it("bicubic", async () => {
const resized = await interpolate_4d(input, { mode: "bicubic", size });
const target = new Float32Array(
[
[
[
[-0.253804475069046, -0.06155451014637947, 0.3564453125, 0.7744455337524414, 0.9666945934295654],
[0.9493208527565002, 1.1415706872940063, 1.5595703125, 1.977570652961731, 2.1698191165924072],
[2.8301806449890137, 3.022430181503296, 3.4404296875, 3.8584301471710205, 4.050677299499512],
[4.033306121826172, 4.225555419921875, 4.6435546875, 5.061554908752441, 5.253802299499512],
],
[
[5.746196269989014, 5.938446998596191, 6.3564453125, 6.774445533752441, 6.966691493988037],
[6.94932222366333, 7.14157247543335, 7.5595703125, 7.977570056915283, 8.169816970825195],
[8.830181121826172, 9.022432327270508, 9.4404296875, 9.858429908752441, 10.050675392150879],
[10.033307075500488, 10.225557327270508, 10.6435546875, 11.061556816101074, 11.253799438476562],
],
[
[11.746198654174805, 11.938446998596191, 12.3564453125, 12.774446487426758, 12.966689109802246],
[12.949322700500488, 13.141572952270508, 13.5595703125, 13.977571487426758, 14.16981315612793],
[14.830183029174805, 15.022432327270508, 15.4404296875, 15.858430862426758, 16.05067253112793],
[16.033309936523438, 16.225557327270508, 16.6435546875, 17.061555862426758, 17.25379753112793],
],
],
[
[
[17.746200561523438, 17.938447952270508, 18.3564453125, 18.774446487426758, 18.966686248779297],
[18.949325561523438, 19.14157485961914, 19.5595703125, 19.977571487426758, 20.169809341430664],
[20.830184936523438, 21.02243423461914, 21.4404296875, 21.858430862426758, 22.050668716430664],
[22.03331184387207, 22.225557327270508, 22.6435546875, 23.061555862426758, 23.25379180908203],
],
[
[23.746200561523438, 23.93844985961914, 24.3564453125, 24.77444839477539, 24.96668243408203],
[24.949325561523438, 25.141576766967773, 25.5595703125, 25.977571487426758, 26.1698055267334],
[26.830184936523438, 27.022436141967773, 27.4404296875, 27.858430862426758, 28.05066680908203],
[28.033313751220703, 28.225557327270508, 28.6435546875, 29.061555862426758, 29.25379180908203],
],
[
[29.74620246887207, 29.93844985961914, 30.3564453125, 30.77444839477539, 30.96668243408203],
[30.949325561523438, 31.141578674316406, 31.5595703125, 31.977571487426758, 32.16980743408203],
[32.8301887512207, 33.022438049316406, 33.4404296875, 33.858428955078125, 34.050662994384766],
[34.03330993652344, 34.22556686401367, 34.6435546875, 35.06155014038086, 35.253787994384766],
],
],
].flat(Infinity),
);
expectToBeCloseToArray(target, resized.data);
});
});
});
describe("matmul", () => {
it("(2, 5) @ (5, 4) -> (2, 4)", async () => {
const a = new Tensor("float32", range(10), [2, 5]);
const b = new Tensor("float32", range(20), [5, 4]);
const result = await matmul(a, b);
const target = new Float32Array(
[
[120.0, 130.0, 140.0, 150.0],
[320.0, 355.0, 390.0, 425.0],
].flat(),
);
expectToBeCloseToArray(target, result.data);
});
});
describe("rfft", () => {
it("non-power of 2", async () => {
const rows = 2;
const cols = 3;
const input = new Tensor("float32", range(rows * cols), [rows, cols]);
const dim = new Tensor("int64", [-1n], []);
const result = await rfft(input, dim);
const target = new Float32Array(
[
[
[3, 0],
[-1.5, 0.8660262823104858],
],
[
[12, 0],
[-1.5, 0.866027295589447],
],
].flat(Infinity),
);
expectToBeCloseToArray(target, result.data);
});
it("power of 2", async () => {
const rows = 2;
const cols = 4;
const input = new Tensor("float32", range(rows * cols), [rows, cols]);
const dim = new Tensor("int64", [-1n], []);
const result = await rfft(input, dim);
const target = new Float32Array(
[
[
[6, 0],
[-2, 2],
[-2, 0],
],
[
[22, 0],
[-2, 2],
[-2, 0],
],
].flat(Infinity),
);
expectToBeCloseToArray(target, result.data);
});
});
describe("slice", () => {
it("should slice", async () => {
const input = new Tensor("float32", [1, 2, 3, 4, 5, 6, 7, 8, 9], [3, 3]);
const target = new Float32Array(
[
[1, 2],
[4, 5],
].flat(Infinity),
);
const starts = [0, 0];
const ends = [2, 2];
const axes = [0, 1];
const steps = [1, 1];
const result = await slice(input, starts, ends, axes, steps);
expectToBeCloseToArray(target, result.data);
});
});
});
| transformers.js/tests/utils/tensor_ops.test.js/0 | {
"file_path": "transformers.js/tests/utils/tensor_ops.test.js",
"repo_id": "transformers.js",
"token_count": 6379
} | 388 |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import importlib.util
import logging
import os
import sys
import json
import uuid
from datetime import datetime
from typing import Dict, Tuple, Optional, List
import pandas as pd
try:
from psycopg2.extensions import register_adapter
from psycopg2.extras import Json
register_adapter(dict, Json)
PSYCOPG2_AVAILABLE = True
except ImportError:
PSYCOPG2_AVAILABLE = False
class ImportModuleException(Exception):
pass
class MetricsRecorder:
def __init__(
self, connection, logger: logging.Logger, repository: str, branch: str, commit_id: str, commit_msg: str,
collect_csv_data: bool = True
):
self.conn = connection
self.use_database = connection is not None
if self.use_database:
self.conn.autocommit = True
self.logger = logger
self.repository = repository
self.branch = branch
self.commit_id = commit_id
self.commit_msg = commit_msg
self.collect_csv_data = collect_csv_data
# For CSV export - store all data in pandas DataFrames (only if CSV collection is enabled)
if self.collect_csv_data:
# Initialize empty DataFrames with proper schemas
self.benchmarks_df = pd.DataFrame(columns=[
'benchmark_id', 'repository', 'branch', 'commit_id', 'commit_message',
'metadata', 'created_at'
])
self.device_measurements_df = pd.DataFrame(columns=[
'benchmark_id', 'cpu_util', 'mem_megabytes', 'gpu_util',
'gpu_mem_megabytes', 'time'
])
self.model_measurements_df = pd.DataFrame(columns=[
'benchmark_id', 'time', 'model_load_time', 'first_eager_forward_pass_time_secs',
'second_eager_forward_pass_time_secs', 'first_eager_generate_time_secs',
'second_eager_generate_time_secs', 'time_to_first_token_secs',
'time_to_second_token_secs', 'time_to_third_token_secs',
'time_to_next_token_mean_secs', 'first_compile_generate_time_secs',
'second_compile_generate_time_secs', 'third_compile_generate_time_secs',
'fourth_compile_generate_time_secs'
])
else:
self.benchmarks_df = None
self.device_measurements_df = None
self.model_measurements_df = None
def initialise_benchmark(self, metadata: dict[str, str]) -> str:
"""
Creates a new benchmark, returns the benchmark id (UUID)
"""
# Generate a unique UUID for this benchmark
benchmark_id = str(uuid.uuid4())
if self.use_database:
with self.conn.cursor() as cur:
cur.execute(
"INSERT INTO benchmarks (benchmark_id, repository, branch, commit_id, commit_message, metadata) VALUES (%s, %s, %s, %s, %s, %s)",
(benchmark_id, self.repository, self.branch, self.commit_id, self.commit_msg, metadata),
)
self.logger.debug(f"initialised benchmark #{benchmark_id}")
# Store benchmark data for CSV export (if enabled)
if self.collect_csv_data:
# Add row to pandas DataFrame
new_row = pd.DataFrame([{
'benchmark_id': benchmark_id,
'repository': self.repository,
'branch': self.branch,
'commit_id': self.commit_id,
'commit_message': self.commit_msg,
'metadata': json.dumps(metadata),
'created_at': datetime.utcnow().isoformat()
}])
self.benchmarks_df = pd.concat([self.benchmarks_df, new_row], ignore_index=True)
mode_info = []
if self.use_database:
mode_info.append("database")
if self.collect_csv_data:
mode_info.append("CSV")
mode_str = " + ".join(mode_info) if mode_info else "no storage"
self.logger.debug(f"initialised benchmark #{benchmark_id} ({mode_str} mode)")
return benchmark_id
def collect_device_measurements(self, benchmark_id: str, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes):
"""
Collect device metrics, such as CPU & GPU usage. These are "static", as in you cannot pass arbitrary arguments to the function.
"""
# Store device measurements for CSV export (if enabled)
if self.collect_csv_data:
# Add row to pandas DataFrame
new_row = pd.DataFrame([{
'benchmark_id': benchmark_id,
'cpu_util': cpu_util,
'mem_megabytes': mem_megabytes,
'gpu_util': gpu_util,
'gpu_mem_megabytes': gpu_mem_megabytes,
'time': datetime.utcnow().isoformat()
}])
self.device_measurements_df = pd.concat([self.device_measurements_df, new_row], ignore_index=True)
# Store in database if available
if self.use_database:
with self.conn.cursor() as cur:
cur.execute(
"INSERT INTO device_measurements (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes) VALUES (%s, %s, %s, %s, %s)",
(benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes),
)
self.logger.debug(
f"collected device measurements for benchmark #{benchmark_id} [CPU util: {cpu_util}, mem MBs: {mem_megabytes}, GPU util: {gpu_util}, GPU mem MBs: {gpu_mem_megabytes}]"
)
def collect_model_measurements(self, benchmark_id: str, measurements: dict[str, float]):
# Store model measurements for CSV export (if enabled)
if self.collect_csv_data:
# Add row to pandas DataFrame with flattened measurements
row_data = {
'benchmark_id': benchmark_id,
'time': datetime.utcnow().isoformat()
}
# Flatten the measurements dict into the row
row_data.update(measurements)
new_row = pd.DataFrame([row_data])
self.model_measurements_df = pd.concat([self.model_measurements_df, new_row], ignore_index=True)
# Store in database if available
if self.use_database:
with self.conn.cursor() as cur:
cur.execute(
"""
INSERT INTO model_measurements (
benchmark_id,
measurements
) VALUES (%s, %s)
""",
(
benchmark_id,
measurements,
),
)
self.logger.debug(f"collected model measurements for benchmark #{benchmark_id}: {measurements}")
def export_to_csv(self, output_dir: str = "benchmark_results"):
"""
Export all collected data to CSV files using pandas DataFrames
"""
if not self.collect_csv_data:
self.logger.warning("CSV data collection is disabled - no CSV files will be generated")
return
if not os.path.exists(output_dir):
os.makedirs(output_dir)
self.logger.info(f"Created output directory: {output_dir}")
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
files_created = []
# Export using pandas DataFrames
self._export_pandas_data(output_dir, timestamp, files_created)
self.logger.info(f"CSV export complete! Created {len(files_created)} files in {output_dir}")
def _export_pandas_data(self, output_dir: str, timestamp: str, files_created: list):
"""
Export CSV files using pandas DataFrames
"""
# Export benchmarks
benchmarks_file = os.path.join(output_dir, f"benchmarks_{timestamp}.csv")
self.benchmarks_df.to_csv(benchmarks_file, index=False)
files_created.append(benchmarks_file)
self.logger.info(f"Exported {len(self.benchmarks_df)} benchmark records to {benchmarks_file}")
# Export device measurements
device_file = os.path.join(output_dir, f"device_measurements_{timestamp}.csv")
self.device_measurements_df.to_csv(device_file, index=False)
files_created.append(device_file)
self.logger.info(f"Exported {len(self.device_measurements_df)} device measurement records to {device_file}")
# Export model measurements (already flattened)
model_file = os.path.join(output_dir, f"model_measurements_{timestamp}.csv")
self.model_measurements_df.to_csv(model_file, index=False)
files_created.append(model_file)
self.logger.info(f"Exported {len(self.model_measurements_df)} model measurement records to {model_file}")
# Create comprehensive summary using pandas operations
summary_file = os.path.join(output_dir, f"benchmark_summary_{timestamp}.csv")
self._create_summary(summary_file)
files_created.append(summary_file)
def _create_summary(self, summary_file: str):
"""
Create a comprehensive summary CSV using pandas operations
"""
if len(self.benchmarks_df) == 0:
# Create empty summary file
summary_df = pd.DataFrame()
summary_df.to_csv(summary_file, index=False)
self.logger.info(f"Created empty benchmark summary at {summary_file}")
return
# Start with benchmarks as the base
summary_df = self.benchmarks_df.copy()
# Add model measurements (join on benchmark_id)
if len(self.model_measurements_df) > 0:
# Drop 'time' column from model measurements to avoid conflicts
model_df = self.model_measurements_df.drop(columns=['time'], errors='ignore')
summary_df = summary_df.merge(model_df, on='benchmark_id', how='left')
# Calculate device measurement aggregates using pandas groupby
if len(self.device_measurements_df) > 0:
device_agg = self.device_measurements_df.groupby('benchmark_id').agg({
'cpu_util': ['mean', 'max', 'std', 'count'],
'mem_megabytes': ['mean', 'max', 'std'],
'gpu_util': ['mean', 'max', 'std'],
'gpu_mem_megabytes': ['mean', 'max', 'std']
}).round(3)
# Flatten column names
device_agg.columns = [f"{col[0]}_{col[1]}" for col in device_agg.columns]
device_agg = device_agg.reset_index()
# Rename count column to be more descriptive
if 'cpu_util_count' in device_agg.columns:
device_agg = device_agg.rename(columns={'cpu_util_count': 'device_measurement_count'})
# Merge with summary
summary_df = summary_df.merge(device_agg, on='benchmark_id', how='left')
# Export the comprehensive summary
summary_df.to_csv(summary_file, index=False)
self.logger.info(f"Created comprehensive benchmark summary with {len(summary_df)} records at {summary_file}")
def close(self):
if self.use_database and self.conn:
self.conn.close()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter("[%(levelname)s - %(asctime)s] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def parse_arguments() -> tuple[str, str, str, str, bool, str]:
"""
Parse command line arguments for the benchmarking CLI.
"""
parser = argparse.ArgumentParser(description="CLI for benchmarking the huggingface/transformers.")
parser.add_argument(
"repository",
type=str,
help="The repository name on which the benchmarking is performed.",
)
parser.add_argument(
"branch",
type=str,
help="The branch name on which the benchmarking is performed.",
)
parser.add_argument(
"commit_id",
type=str,
help="The commit hash on which the benchmarking is performed.",
)
parser.add_argument(
"commit_msg",
type=str,
help="The commit message associated with the commit, truncated to 70 characters.",
)
parser.add_argument(
"--csv",
action="store_true",
default=False,
help="Enable CSV output files generation."
)
parser.add_argument(
"--csv-output-dir",
type=str,
default="benchmark_results",
help="Directory for CSV output files (default: benchmark_results)."
)
args = parser.parse_args()
# CSV is disabled by default, only enabled when --csv is used
generate_csv = args.csv
return args.repository, args.branch, args.commit_id, args.commit_msg, generate_csv, args.csv_output_dir
def import_from_path(module_name, file_path):
try:
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
return module
except Exception as e:
raise ImportModuleException(f"failed to load python module: {e}")
def create_database_connection():
"""
Try to create a database connection. Returns None if connection fails.
"""
if not PSYCOPG2_AVAILABLE:
logger.warning("psycopg2 not available - running in CSV-only mode")
return None
try:
import psycopg2
conn = psycopg2.connect("dbname=metrics")
logger.info("Successfully connected to database")
return conn
except Exception as e:
logger.warning(f"Failed to connect to database: {e}. Running in CSV-only mode")
return None
def create_global_metrics_recorder(repository: str, branch: str, commit_id: str, commit_msg: str,
generate_csv: bool = False) -> MetricsRecorder:
"""
Create a global metrics recorder that will be used across all benchmarks.
"""
connection = create_database_connection()
recorder = MetricsRecorder(connection, logger, repository, branch, commit_id, commit_msg, generate_csv)
# Log the storage mode
storage_modes = []
if connection is not None:
storage_modes.append("database")
if generate_csv:
storage_modes.append("CSV")
if not storage_modes:
logger.warning("Running benchmarks with NO data storage (no database connection, CSV disabled)")
logger.warning("Use --csv flag to enable CSV output when database is unavailable")
else:
logger.info(f"Running benchmarks with: {' + '.join(storage_modes)} storage")
return recorder
if __name__ == "__main__":
benchmarks_folder_path = os.path.dirname(os.path.realpath(__file__))
benches_folder_path = os.path.join(benchmarks_folder_path, "benches")
repository, branch, commit_id, commit_msg, generate_csv, csv_output_dir = parse_arguments()
# Create a global metrics recorder
global_metrics_recorder = create_global_metrics_recorder(repository, branch, commit_id, commit_msg, generate_csv)
successful_benchmarks = 0
failed_benchmarks = 0
# Automatically discover all benchmark modules in benches/ folder
benchmark_modules = []
if os.path.exists(benches_folder_path):
logger.debug(f"Scanning for benchmarks in: {benches_folder_path}")
for entry in os.scandir(benches_folder_path):
if not entry.name.endswith(".py"):
continue
if entry.name.startswith("__"): # Skip __init__.py, __pycache__, etc.
continue
# Check if the file has a run_benchmark function
try:
logger.debug(f"checking if benches/{entry.name} has run_benchmark function")
module = import_from_path(entry.name.split(".")[0], entry.path)
if hasattr(module, 'run_benchmark'):
benchmark_modules.append(entry.name)
logger.debug(f"discovered benchmark: {entry.name}")
else:
logger.debug(f"skipping {entry.name} - no run_benchmark function found")
except Exception as e:
logger.debug(f"failed to check benches/{entry.name}: {e}")
else:
logger.warning(f"Benches directory not found: {benches_folder_path}")
if benchmark_modules:
logger.info(f"Discovered {len(benchmark_modules)} benchmark(s): {benchmark_modules}")
else:
logger.warning("No benchmark modules found in benches/ directory")
for module_name in benchmark_modules:
module_path = os.path.join(benches_folder_path, module_name)
try:
logger.debug(f"loading: {module_name}")
module = import_from_path(module_name.split(".")[0], module_path)
logger.info(f"running benchmarks in: {module_name}")
# Check if the module has an updated run_benchmark function that accepts metrics_recorder
try:
# Try the new signature first
module.run_benchmark(logger, repository, branch, commit_id, commit_msg, global_metrics_recorder)
except TypeError:
# Fall back to the old signature for backward compatibility
logger.warning(f"Module {module_name} using old run_benchmark signature - database connection will be created per module")
module.run_benchmark(logger, repository, branch, commit_id, commit_msg)
successful_benchmarks += 1
except ImportModuleException as e:
logger.error(e)
failed_benchmarks += 1
except Exception as e:
logger.error(f"error running benchmarks for {module_name}: {e}")
failed_benchmarks += 1
# Export CSV results at the end (if enabled)
try:
if generate_csv:
global_metrics_recorder.export_to_csv(csv_output_dir)
logger.info(f"CSV reports have been generated and saved to the {csv_output_dir} directory")
else:
logger.info("CSV generation disabled - no CSV files created (use --csv to enable)")
logger.info(f"Benchmark run completed. Successful: {successful_benchmarks}, Failed: {failed_benchmarks}")
except Exception as e:
logger.error(f"Failed to export CSV results: {e}")
finally:
global_metrics_recorder.close()
| transformers/benchmark/benchmarks_entrypoint.py/0 | {
"file_path": "transformers/benchmark/benchmarks_entrypoint.py",
"repo_id": "transformers",
"token_count": 8546
} | 389 |
FROM python:3.9-slim
ENV PYTHONDONTWRITEBYTECODE=1
ARG REF=main
USER root
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake g++
ENV UV_PYTHON=/usr/local/bin/python
RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]"
RUN uv pip install --no-cache-dir "protobuf==3.20.3" tensorflow_probability
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
| transformers/docker/pipeline-tf.dockerfile/0 | {
"file_path": "transformers/docker/pipeline-tf.dockerfile",
"repo_id": "transformers",
"token_count": 227
} | 390 |
FROM google/cloud-sdk:slim
# Build args.
ARG GITHUB_REF=refs/heads/main
# TODO: This Dockerfile installs pytorch/xla 3.6 wheels. There are also 3.7
# wheels available; see below.
ENV PYTHON_VERSION=3.6
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
cmake \
git \
curl \
ca-certificates
# Install conda and python.
# NOTE new Conda does not forward the exit status... https://github.com/conda/conda/issues/8385
RUN curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-4.7.12-Linux-x86_64.sh && \
chmod +x ~/miniconda.sh && \
~/miniconda.sh -b && \
rm ~/miniconda.sh
ENV PATH=/root/miniconda3/bin:$PATH
RUN conda create -y --name container python=$PYTHON_VERSION
# Run the rest of commands within the new conda env.
# Use absolute path to appease Codefactor.
SHELL ["/root/miniconda3/bin/conda", "run", "-n", "container", "/bin/bash", "-c"]
RUN conda install -y python=$PYTHON_VERSION mkl
RUN pip uninstall -y torch && \
# Python 3.7 wheels are available. Replace cp36-cp36m with cp37-cp37m
gsutil cp 'gs://tpu-pytorch/wheels/torch-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' . && \
gsutil cp 'gs://tpu-pytorch/wheels/torch_xla-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' . && \
gsutil cp 'gs://tpu-pytorch/wheels/torchvision-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' . && \
pip install 'torch-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \
pip install 'torch_xla-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \
pip install 'torchvision-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \
rm 'torch-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \
rm 'torch_xla-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \
rm 'torchvision-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \
apt-get install -y libomp5
ENV LD_LIBRARY_PATH=root/miniconda3/envs/container/lib
# Install huggingface/transformers at the current PR, plus dependencies.
RUN git clone https://github.com/huggingface/transformers.git && \
cd transformers && \
git fetch origin $GITHUB_REF:CI && \
git checkout CI && \
cd .. && \
pip install ./transformers && \
pip install -r ./transformers/examples/pytorch/_test_requirements.txt && \
pip install pytest
RUN python -c "import torch_xla; print(torch_xla.__version__)"
RUN python -c "import transformers as trf; print(trf.__version__)"
RUN conda init bash
COPY docker-entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
CMD ["bash"]
| transformers/docker/transformers-pytorch-tpu/Dockerfile/0 | {
"file_path": "transformers/docker/transformers-pytorch-tpu/Dockerfile",
"repo_id": "transformers",
"token_count": 1235
} | 391 |
# قوالب نماذج الدردشة
## مقدمة
تعد **الدردشة** أحد استخدامات نماذج اللغات الكبيرة (LLMs) شائعة الاستخدام بشكل متزايد. ففي سياق الدردشة، وبدلاً من متابعة سلسلة نصية واحدة (كما هو الحال مع نماذج اللغات القياسية)، يواصل النموذج بدلاً من ذلك محادثة تتكون من رسالة واحدة أو أكثر، تتضمن كل منها دورًا، مثل "المستخدم" أو "المساعد"، بالإضافة إلى نص الرسالة.
وكما هو الحال مع تقسيم النص إلى رموز (tokenization)، تتوقع النماذج المختلفة تنسيقات إدخال مختلفة تمامًا للمحادثة. لهذا السبب أضفنا **قوالب الدردشة** كميزة جديدة. تُعد قوالب المحادثة جزءًا من tokenizer. تحدد هذه القوالب كيفية تحويل المحادثات، والتي يتم تمثيلها كقوائم من الرسائل، إلى سلسلة نصية واحدة قابلة للتقسيم إلى رموز بالتنسيق الذي يتوقعه النموذج.
دعونا نجعل هذا ملموسًا بمثال سريع باستخدام نموذج `BlenderBot`. لدى BlenderBot قالب افتراضي بسيط للغاية، والذي يضيف في الغالب مسافات بيضاء بين جولات الحوار:
```python
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
>>> chat = [
... {"role": "user", "content": "Hello, how are you?"},
... {"role": "assistant", "content": "I'm doing great. How can I help you today?"},
... {"role": "user", "content": "I'd like to show off how chat templating works!"},
... ]
>>> tokenizer.apply_chat_template(chat, tokenize=False)
" Hello, how are you? I'm doing great. How can I help you today? I'd like to show off how chat templating works!</s>"
```
لاحظ كيف تم ضغط الدردشة بأكملها في سلسلة واحدة. إذا استخدمنا `tokenize=True`، وهو الإعداد الافتراضي، فسيتم أيضًا تحليل السلسلة نحويًا نيابة عنا. ولكن، لنشاهد قالبًا أكثر تعقيدًا في العمل، دعونا نستخدم نموذج `mistralai/Mistral-7B-Instruct-v0.1`.
```python
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
>>> chat = [
... {"role": "user", "content": "Hello, how are you?"},
... {"role": "assistant", "content": "I'm doing great. How can I help you today?"},
... {"role": "user", "content": "I'd like to show off how chat templating works!"},
... ]
>>> tokenizer.apply_chat_template(chat, tokenize=False)
"<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]</s>"
```
لاحظ كيف أضاف المجزىء اللغوى tokenizer رموز التحكم `[INST]` و `[/INST]` للإشارة إلى بداية ونهاية رسائل المستخدم (ولكن ليس رسائل المساعد!) ، وتم تكثيف المحادثة بأكملها في سلسلة نصية واحدة. إذا استخدمنا `tokenize=True` ، وهو الإعداد الافتراضي ، فسيتم أيضًا تقسيم تلك السلسلة إلى رموز.
حاول الآن استخدام نفس الشفرة، لكن مع استبدال النموذج بـ `HuggingFaceH4/zephyr-7b-beta` ، وستحصل على:
```text
<|user|>
Hello, how are you?</s>
<|assistant|>
I'm doing great. How can I help you today?</s>
<|user|>
I'd like to show off how chat templating works!</s>
```
تم ضبط كل من Zephyr و Mistral-Instruct من نفس النموذج الأصلي ، Mistral-7B-v0.1. ومع ذلك ، فقد تم تدريبهم بتنسيقات دردشة مختلفة تمامًا. بدون قوالب المحادثة، ستضطر إلى كتابة شفرة تنسيق يدويًا لكل نموذج ، ومن السهل جدًا ارتكاب أخطاء بسيطة تؤثر على الأداء! تُدير قوالب المحادثة تفاصيل التنسيق نيابةً عنك ، مما يُتيح لك كتابة شفرة عامة تعمل مع أي نموذج.
## كيف أستخدم قوالب الدردشة؟
كما رأيت في المثال السابق، من السهل استخدام قوالب الدردشة. قم ببساطة بإنشاء قائمة من الرسائل، مع مفتاحي `role` و`content`، ثم قم بتمريرها إلى [`~PreTrainedTokenizer.apply_chat_template`] . بمجرد قيامك بذلك، ستحصل على مخرجات جاهزة للاستخدام! عند استخدام قوالب الدردشة كإدخال لتوليد نصوص بواسطة النموذج، فمن الجيد أيضًا استخدام `add_generation_prompt=True` لإضافة [مطالبات توليد النصوص](#what-are-generation-prompts).
فيما يلي مثال على إعداد الإدخال لـ `model.generate()`، باستخدام Zephyr مرة أخرى:
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
checkpoint = "HuggingFaceH4/zephyr-7b-beta"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForCausalLM.from_pretrained(checkpoint) # قد ترغب في استخدام bfloat16 و/أو الانتقال إلى GPU هنا
messages = [
{
"role": "system",
"content": "You are a friendly chatbot who always responds in the style of a pirate",
},
{"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
]
tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
print(tokenizer.decode(tokenized_chat[0]))
```
سيؤدي هذا إلى إنتاج سلسلة نصية بتنسيق الإدخال الذي يتوقعه Zephyr.
```text
<|system|>
You are a friendly chatbot who always responds in the style of a pirate</s>
<|user|>
How many helicopters can a human eat in one sitting?</s>
<|assistant|>
```
الآن بعد أن تم تنسيق الإدخال بشكل صحيح لـ Zephyr، يمكننا استخدام النموذج لإنشاء رد على سؤال المستخدم:
```python
outputs = model.generate(tokenized_chat, max_new_tokens=128)
print(tokenizer.decode(outputs[0]))
```
سيؤدي هذا إلى ما يلي:
```text
<|system|>
You are a friendly chatbot who always responds in the style of a pirate</s>
<|user|>
How many helicopters can a human eat in one sitting?</s>
<|assistant|>
Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all.
```
كان ذلك سهلاً بعد كل شيء !
## هل هناك قنوات معالجة أوتوماتيكية للدردشة؟
نعم يوجد ! تدعم قنوات المعالجة توليد النصوص مدخلات الدردشة ، مما يُسهّل استخدام نماذج الدردشة . في الماضي ، كنا نستخدم فئة "ConversationalPipeline" المُخصّصة ، ولكن تم الآن إيقافها وتم دمج وظائفها في [`TextGenerationPipeline`]. دعونا نجرّب مثال Zephyr مرة أخرى ، ولكن هذه المرة باستخدام قناة معالجة:
```python
from transformers import pipeline
pipe = pipeline("text-generation", "HuggingFaceH4/zephyr-7b-beta")
messages = [
{
"role": "system",
"content": "You are a friendly chatbot who always responds in the style of a pirate",
},
{"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
]
print(pipe(messages, max_new_tokens=128)[0]['generated_text'][-1]) # طباعة استجابة المساعد
```
```النص
{'role': 'assistant', 'content': "Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all."}
```
سيُراعي قناة المعالجة جميع تفاصيل تقسيم النص إلى رموز واستدعاء apply_chat_template نيابةً عنك - بمجرد أن يصبح لِدى النموذج قالب دردشة ، فكل ما تحتاج إلى القيام به هو تهيئة قناة معالجة وتمرير قائمة الرسائل إليها!
## ما هي "مطالبات التوليد"؟
قد تلاحظ أن طريقة `apply_chat_template` لها معامل `add_generation_prompt`. تخبر هذه المعامل القالب بإضافة رموز تشير إلى بداية رد البوت. على سبيل المثال، ضع في اعتبارك الدردشة التالية:
```python
messages = [
{"role": "user", "content": "Hi there!"},
{"role": "assistant", "content": "Nice to meet you!"},
{"role": "user", "content": "Can I ask a question?"}
]
```
إليك كيف سيبدو ذلك بدون موجه توليد نصوص ، بالنسبة لنموذج يستخدم تنسيق "ChatML" القياسي :
```python
tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
"""<|im_start|>user
Hi there!<|im_end|>
<|im_start|>assistant
Nice to meet you!<|im_end|>
<|im_start|>user
Can I ask a question?<|im_end|>
"""
```
وهكذا يبدو الأمر **مع** مطالبة التوليد:
```python
tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
"""<|im_start|>user
Hi there!<|im_end|>
<|im_start|>assistant
Nice to meet you!<|im_end|>
<|im_start|>user
Can I ask a question?<|im_end|>
<|im_start|>assistant
"""
```
لاحظ أننا أضفنا هذه المرة الرموز التي تشير إلى بداية رد البوت. يضمن هذا أنه عندما يُولّد النموذج نصًا فسيكتب رد البوت بدلاً من القيام بشيء غير متوقع، مثل الاستمرار في رسالة المستخدم. تذكر، أن نماذج الدردشة لا تزال مجرد نماذج للغة - فهي مدربة على متابعة النصوص، والدردشة هي مجرد نوع خاص من النصوص بالنسبة لها! يجب توجيهها برموز تحكم مناسبة، حتى تعرف ما الذي يجب عليها فعله.
لا تتطلب جميع النماذج الرموز التحكمية لتوليد نصوص . بعض النماذج ، مثل LLaMA ، ليس لديها أي رموز خاصة قبل ردود البوت . في هذه الحالات ، لن يكون لمعامل `add_generation_prompt` أي تأثير. يعتمد التأثير الدقيق الذي تُحدثه `add_generation_prompt` على القالب المستخدم .
## ما وظيفة "continue_final_message"؟
عند تمرير قائمة من الرسائل إلى `apply_chat_template` أو `TextGenerationPipeline` ، يمكنك اختيار تنسيق المحادثة بحيث يواصل النموذج الرسالة الأخيرة في المحادثة بدلاً من بدء رسالة جديدة. يتم ذلك عن طريق إزالة أي رموز نهاية التسلسل التي تشير إلى نهاية الرسالة الأخيرة ، بحيث يقوم النموذج ببساطة بتمديد الرسالة الأخيرة عندما يبدأ في توليد النص . يُعد هذا أمرًا مفيدًا "لِمَلء بداية" رد النموذج مُسبقًا.
وهنا مثال:
```python
chat = [
{"role": "user", "content": "Can you format the answer in JSON?"},
{"role": "assistant", "content": '{"name": "'},
]
formatted_chat = tokenizer.apply_chat_template(chat, tokenize=True, return_dict=True, continue_final_message=True)
model.generate(**formatted_chat)
```
سيقوم النموذج بتوليد نص يكمل سلسلة JSON ، بدلاً من بدء رسالة جديدة . يمكن أن يكون هذا النهج مفيدًا جدًا لتحسين دقة اتباع النموذج للإرشادات عندما تعرف كيف تريد أن يبدأ ردوده .
.
نظرًا لأن `add_generation_prompt` تضيف الرموز التي تبدأ رسالة جديدة ، و `continue_final_message` تزيل أي رموز نهاية الرسالة من الرسالة الأخيرة ، فليس من المنطقي استخدامهما معًا . ونتيجة لذلك ، ستتلقّى خطأً إذا حاولت ذلك !
السلوك الافتراضي لِـ `TextGenerationPipeline` هو تعيين `add_generation_prompt=True` بحيث تبدأ رسالة جديدة . ومع ذلك ، إذا كانت الرسالة الأخيرة في المحادثة التي تم إدخالها لديها دور "assistant" ، فسوف تفترض أن هذه الرسالة هي "مَلء بداية" وتتحوّل إلى `continue_final_message=True` بدلاً من ذلك ، لأن مُعظم النماذج لا تدعم عدة رسائل متتالية للمساعد . يمكنك تجاوز هذا السلوك عن طريق تمرير معامل `continue_final_message` بشكل صريح عند استدعاء قناة المعالجة .
## هل يمكنني استخدام قوالب الدردشة في التدريب؟
نعم ! تُعد هذه طريقة جيدة للتأكد من أن قالب الدردشة يتطابق مع الرموز التي يراها النموذج أثناء التدريب . نوصي بتطبيق قالب الدردشة كخطوة معالجة أولية لمجموعة بياناتك . بعد ذلك ، يمكنك ببساطة متابعة عملية التدريب كما هو الحال مع أي مهمة تدريب نماذج لغات أخرى . عند التدريب ، يجب أن تُعيّن عادةً `add_generation_prompt=False` ، لأنه لن تكون الرموز المُضافة لتحفيز رد المساعد مفيدة أثناء التدريب . دعونا نرى مثالاً :
```python
from transformers import AutoTokenizer
from datasets import Dataset
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
chat1 = [
{"role": "user", "content": "Which is bigger, the moon or the sun?"},
{"role": "assistant", "content": "The sun."}
]
chat2 = [
{"role": "user", "content": "Which is bigger, a virus or a bacterium?"},
{"role": "assistant", "content": "A bacterium."}
]
dataset = Dataset.from_dict({"chat": [chat1, chat2]})
dataset = dataset.map(lambda x: {"formatted_chat": tokenizer.apply_chat_template(x["chat"], tokenize=False, add_generation_prompt=False)})
print(dataset['formatted_chat'][0])
```
ونحصل على:
```text
<|user|>
Which is bigger, the moon or the sun?</s>
<|assistant|>
The sun.</s>
```
من هنا، استمر في التدريب كما تفعل مع مهمة نمذجة اللغة القياسية، باستخدام عمود `formatted_chat`.
<Tip>
بشكل افتراضي ، تضيف بعض *tokenizers* رموزًا خاصة مثل `<bos>` و `<eos>` إلى النص الذي تقوم بتقسيمه إلى رموز. يجب أن تتضمن قوالب المحادثة بالفعل جميع الرموز الخاصة التي تحتاجها ، وبالتالي فإن الرموز الخاصة الإضافية ستكون غالبًا غير صحيحة أو مُكررة ، مما سيؤثر سلبًا على أداء النموذج .
لذلك ، إذا قمت بتنسيق النص باستخدام `apply_chat_template(tokenize=False)` ، فيجب تعيين المعامل `add_special_tokens=False` عندما تقوم بتقسيم ذلك النص إلى رموز لاحقًا . إذا كنت تستخدم `apply_chat_template(tokenize=True)` ، فلن تحتاج إلى القلق بشأن ذلك !
</Tip>
## متقدّم: مدخلات إضافية لِقوالب الدردشة
المعامل الوحيدة التي تتطلبها طريقة `apply_chat_template` هي `messages`. ومع ذلك، يمكنك تمرير أي معامل ككلمة مفتاحية إلى `apply_chat_template` وستكون متاحة داخل القالب. يمنحك هذا الكثير من المرونة لاستخدام قوالب الدردشة للعديد من الأشياء. لا توجد قيود على أسماء هذه المعامﻻت أو تنسيقاتها - يمكنك تمرير سلاسل نصية أو قوائم أو قواميس أو أي شيء آخر تريده.
ومع ذلك، هناك بعض الحالات الشائعة لاستخدام هذه المعامﻻت الإضافية، مثل تمرير أدوات لاستدعاء الوظائف، أو المستندات لإنشاء النصوص المُعزّزة بالاسترجاع. في هذه الحالات الشائعة، لدينا بعض التوصيات المُحدّدة حول أسماء هذه المعامﻻت وتنسيقاتها، والتي يتم وصفها في الأقسام التالية. نشجع مطوّري النماذج على جعل قوالب الدردشة الخاصة بهم متوافقة مع هذا التنسيق، لتسهيل نقل التعليمات البرمجية لاستدعاء الأدوات بين النماذج.
## متقدم: استخدام الأداة / استدعاء الدالة
يمكن لنماذج "استخدام الأداة" اختيار استدعاء الدوال كأدوات خارجية قبل توليد الإجابة. عند تمرير الأدوات إلى نموذج استخدام الأدوات، يمكنك ببساطة تمرير قائمة من الوظائف إلى معامل `tools`:
```python
import datetime
def current_time():
"""Get the current local time as a string."""
return str(datetime.now())
def multiply(a: float, b: float):
"""
A function that multiplies two numbers
Args:
a: The first number to multiply
b: The second number to multiply
"""
return a * b
tools = [current_time, multiply]
model_input = tokenizer.apply_chat_template(
messages,
tools=tools
)
```
لكي يعمل هذا بشكل صحيح، يجب عليك كتابة وظائفك بالتنسيق السابق، حتى يمكن تحليلها بشكل صحيح كأدوات. على وجه التحديد، يجب عليك اتباع هذه القواعد:
- يجب أن يكون للدالة اسم وصفي.
- يجب أن يكون لكل معامل نوع للتلميح.
- يجب أن تحتوي الدالة على سلسلة مستندية بتنسيق Google القياسي (بمعنى وصف الدالة الأولي متبوعًا بكتلة `Args:` التي تصف المعاﻻت، ما لم تكن الدالة لا تحتوي على أي معامﻻت.
- لا تقم بتضمين الأنواع في كتلة `Args:` . بعبارة أخرى، اكتب `a: The first number to multiply`، وليس `a (int): The first number to multiply`. يجب أن تذهب تلميحات الأنواع في رأس الدالة بدلاً من ذلك.
- يمكن أن يكون للدالة نوع للإرجاع ومربع `Returns:` في السلسلة. ومع ذلك، فهذه اختيارية لأن معظم نماذج استخدام الأدوات تتجاهلها.
### تمرير نتائج الأداة إلى النموذج
يكفي الكود السابقة لسرد الأدوات المتاحة لنموذجك، ولكن ماذا يحدث إذا أراد النموذج استخدام واحدة منها؟ إذا حدث ذلك، فيجب عليك:
1. تحليل مخرجات النموذج للحصول على اسم (أسماء) الأدوات ومعامﻻتها.
2. أضف استدعاء (استدعاءات) النموذج لِلأدوات إلى المحادثة.
3. استدعاء الدالة (الدالات) المقابلة بتلك المعامﻻت.
4. أضف النتيجة (النتائج) إلى المحادثة
### مثال كامل على استخدام الأداة
سنستعرض مثالاً على استخدام الأدوات خطوة بخطوة . في هذا المثال ، سنستخدم نموذج `Hermes-2-Pro` بحجم 8 مليارات معامل ، نظرًا لأنه أحد أعلى نماذج استخدام الأدوات أداءً في فئة حجمه وقت كتابة هذا النص . إذا كان لديك الذاكرة الكافية ، فيمكنك النظر في استخدام نموذج أكبر بدلاً من ذلك مثل `Command-R` أو `Mixtral-8x22B` ، وكلاهما يدعم استخدام الأدوات ويوفر أداءً أقوى .
أولاً ، لنقم بتحميل نموذجنا و tokenizer الخاص بنا:
```python
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
checkpoint = "NousResearch/Hermes-2-Pro-Llama-3-8B"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForCausalLM.from_pretrained(checkpoint, dtype=torch.bfloat16, device_map="auto")
```python
messages = [
{"role": "system", "content": "You are a bot that responds to weather queries. You should reply with the unit used in the queried location."},
{"role": "user", "content": "Hey, what's the temperature in Paris right now?"}
]
```
الآن، لنقم نطبق قالب الدردشة ونولد رد:
```python
inputs = tokenizer.apply_chat_template(messages, chat_template="tool_use", tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
inputs = {k: v.to(model.device) for k, v in inputs.items()}
out = model.generate(**inputs, max_new_tokens=128)
print(tokenizer.decode(out[0][len(inputs["input_ids"][0]):]))
```
ونحصل على:
```text
<tool_call>
{"arguments": {"location": "Paris, France", "unit": "celsius"}, "name": "get_current_temperature"}
</tool_call><|im_end|>
```
لقد قام النموذج باستدعاء الدالة مع معامﻻت صحيحة، بالصيغة التي طلبتها توثيق الدالة. لقد استنتج أننا نشير على الأرجح إلى باريس في فرنسا، وتذكر أنه بكونها موطن وحدات القياس الدولية، يجب عرض درجة الحرارة في فرنسا بالدرجة المئوية.
دعنا نضيف استدعاء الأداة الخاص بالنموذج إلى المحادثة. لاحظ أننا نولد معرف استدعاء أداة عشوائيًا هنا. لا تستخدم جميع النماذج هذه المعرفات، ولكنها تسمح للنماذج بإصدار عدة استدعاءات للأدوات في نفس الوقت وتتبع الاستجابة المقابلة لكل استدعاء. يمكنك توليد هذه المعرفات بأي طريقة تريدها، ولكن يجب أن تكون فريدة داخل كل محادثة.
```python
tool_call_id = "vAHdf3" # Random ID, should be unique for each tool call
tool_call = {"name": "get_current_temperature", "arguments": {"location": "Paris, France", "unit": "celsius"}}
messages.append({"role": "assistant", "tool_calls": [{"id": tool_call_id, "type": "function", "function": tool_call}]})
```
الآن بعد أن أضفنا استدعاء الأداة إلى المحادثة، يمكننا استدعاء الدالة وإضافة النتيجة إلى المحادثة. نظرًا لأننا نستخدم دالة وهمية لهذا المثال والتي تعيد دائمًا 22.0، فيمكننا ببساطة إضافة تلك النتيجة مباشرةً. لاحظ معرف استدعاء الأداة - يجب أن يتطابق مع المعرف المستخدم في استدعاء الأداة أعلاه.
```python
messages.append({"role": "tool", "tool_call_id": tool_call_id, "name": "get_current_temperature", "content": "22.0"})
```
أخيرًا، دعنا نجعل المساعد يقرأ مخرجات الدالة ويكمل الدردشة مع المستخدم:
```python
inputs = tokenizer.apply_chat_template(messages, chat_template="tool_use", tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
inputs = {k: v.to(model.device) for k, v in inputs.items()}
out = model.generate(**inputs, max_new_tokens=128)
print(tokenizer.decode(out[0][len(inputs["input_ids"][0]):]))
```
ونحصل على:
```text
The current temperature in Paris, France is 22.0 ° Celsius.<|im_end|>
```
<Tip>
لا تستخدم جميع نماذج استخدام الأدوات جميع ميزات استدعاء الأدوات الموضحة أعلاه. يستخدم البعض معرفات استدعاء الأدوات، بينما يستخدم البعض الآخر ببساطة اسم الدالة ويقارن استدعاءات الأدوات بالنتائج باستخدام الترتيب، وهناك عدة نماذج لا تستخدم أيًا منهما ولا تصدر سوى استدعاء أداة واحد في كل مرة لتجنب الارتباك. إذا كنت تريد أن يكون رمزك متوافقًا مع أكبر عدد ممكن من النماذج، فإننا نوصي بهيكلة استدعاءات الأدوات الخاصة بك كما هو موضح هنا، وإعادة نتائج الأدوات بالترتيب الذي أصدرها النموذج. يجب أن تتعامل قوالب الدردشة على كل نموذج مع الباقي.
</Tip>
### فهم مخططات الأدوات
يتم تحويل كل دالة تقوم بتمريرها إلى معامل `tools` في دالة `apply_chat_template` إلى [مخطط JSON](https://json-schema.org/learn/getting-started-step-by-step). يتم بعد ذلك تمرير هذه المخططات إلى قالب الدردشة النموذج. وبعبارة أخرى، فإن نماذج استخدام الأدوات لا ترى دوالك مباشرة، ولا ترى مطلقًا الكود الموجود بداخلها. ما يهمها هو**تعريفات** الدوال و**المعامﻻت** التي تحتاج إلى تمريرها إليها - فهي تهتم بما تفعله الأدوات وكيفية استخدامها، وليس بكيفية عملها! يقع على عاتقك قراءة مخرجاتها، والكشف عما إذا كانت قد طلبت استخدام أداة، وتمرير المعامﻻت إلى دالة الأداة، وإرجاع الرد في الدردشة.
يجب أن يكون إنشاء مخططات JSON لتمريرها إلى القالب تلقائيًا وغير مرئي طالما أن دوالك تتبع المواصفات الموضحة أعلاه، ولكن إذا واجهت مشكلات، أو إذا كنت تريد ببساطة مزيدًا من التحكم في التحويل، فيمكنك التعامل مع التحويل يدويًا. فيما يلي مثال على تحويل مخطط يدوي:
```python
from transformers.utils import get_json_schema
def multiply(a: float, b: float):
"""
A function that multiplies two numbers
Args:
a: The first number to multiply
b: The second number to multiply
"""
return a * b
schema = get_json_schema(multiply)
print(schema)
```
سيؤدي هذا إلى ما يلي:
```json
{
"type": "function",
"function": {
"name": "multiply",
"description": "A function that multiplies two numbers",
"parameters": {
"type": "object",
"properties": {
"a": {
"type": "number",
"description": "The first number to multiply"
},
"b": {
"type": "number",
"description": "The second number to multiply"
}
},
"required": ["a", "b"]
}
}
}
```
إذا كنت ترغب في ذلك، يمكنك تحرير هذه المخططات، أو حتى كتابتها من البداية بنفسك دون استخدام `get_json_schema` على الإطلاق. يمكن تمرير مخططات JSON مباشرةً إلى معامل `tools` في `apply_chat_template` - يمنحك هذا الكثير من القوة لتعريف مخططات دقيقة لوظائف أكثر تعقيدًا. ولكن كن حذرًا - كلما زاد تعقيد مخططاتك، زاد احتمال ارتباك النموذج عند التعامل معها! نوصي بتوقيعات دوال بسيطة حيثما أمكن، مع تقليل المعامﻻت (وخاصة المعامﻻت المعقدة والمتداخلة) إلى الحد الأدنى.
فيما يلي مثال على تعريف المخططات يدويًا، وتمريرها مباشرةً إلى `apply_chat_template`:
```python
# A simple function that takes no arguments
current_time = {
"type": "function",
"function": {
"name": "current_time",
"description": "Get the current local time as a string.",
"parameters": {
'type': 'object',
'properties': {}
}
}
}
# A more complete function that takes two numerical arguments
multiply = {
'type': 'function',
'function': {
'name': 'multiply',
'description': 'A function that multiplies two numbers',
'parameters': {
'type': 'object',
'properties': {
'a': {
'type': 'number',
'description': 'The first number to multiply'
},
'b': {
'type': 'number', 'description': 'The second number to multiply'
}
},
'required': ['a', 'b']
}
}
}
model_input = tokenizer.apply_chat_template(
messages,
tools = [current_time, multiply]
)
```
## متقدم: توليد قائم على الاسترجاع
يمكن لنماذج اللغة الكبيرة من نوع "توليد قائم على الاسترجاع" أو "RAG" البحث في مجموعة نصوص عن معلومات قبل الرد على الاستعلام. يسمح هذا للنماذج بتوسيع قاعدة معارفها بشكل كبير إلى ما هو أبعد من حجم سياقها المحدود. توصيتنا لنماذج RAG هي أن يقبل قالبها وسيطة `documents`. يجب أن تكون هذه قائمة من المستندات، حيث يكون كل "مستند" عبارة عن قاموس واحد بمفاتيح `title` و `contents`، وكلاهما سلاسل نصية. نظرًا لأن هذا التنسيق أبسط بكثير من مخططات JSON المستخدمة للأدوات، فلا توجد حاجة إلى دوال مساعدة.
فيما يلي مثال على قالب RAG بالفعل:
```python
from transformers import AutoTokenizer, AutoModelForCausalLM
# تحميل النموذج والمجزىء اللغوي
model_id = "CohereForAI/c4ai-command-r-v01-4bit"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
device = model.device # الحصول على الجهاز الذي تم تحميل النموذج عليه
# تعريف مُدخلات المحادثة
conversation = [
{"role": "user", "content": "What has Man always dreamed of?"}
]
# تعريف المستندات لتوليد قائم على الاسترجاع
documents = [
{
"title": "The Moon: Our Age-Old Foe",
"text": "Man has always dreamed of destroying the moon. In this essay, I shall..."
},
{
"title": "The Sun: Our Age-Old Friend",
"text": "Although often underappreciated, the sun provides several notable benefits..."
}
]
# معالجة المحادثة والمستندات باستخدام قالب RAG، وإرجاع موترات PyTorch.
input_ids = tokenizer.apply_chat_template(
conversation=conversation,
documents=documents,
chat_template="rag",
tokenize=True,
add_generation_prompt=True,
return_tensors="pt").to(device)
# توليد الرد
gen_tokens = model.generate(
input_ids,
max_new_tokens=100,
do_sample=True,
temperature=0.3,
)
# فك تشفير النص المُوَلّد وطباعته
gen_text = tokenizer.decode(gen_tokens[0])
print(gen_text)
```
إن مُدخل documents للتوليد القائم على الاسترجاع غير مدعوم على نطاق واسع، والعديد من النماذج لديها قوالب دردشة تتجاهل هذا المُدخل ببساطة.
للتحقق مما إذا كان النموذج يدعم مُدخل `documents`، يمكنك قراءة بطاقة النموذج الخاصة به، أو `print(tokenizer.chat_template)` لمعرفة ما إذا كان مفتاح `documents` مستخدمًا في أي مكان.
<Tip>
ومع ذلك، فإن أحد فئات النماذج التي تدعمه هي [Command-R](https://huggingface.co/CohereForAI/c4ai-command-r-08-2024) و [Command-R+](https://huggingface.co/CohereForAI/c4ai-command-r-pluse-08-2024) من Cohere، من خلال قالب الدردشة rag الخاص بهم. يمكنك رؤية أمثلة إضافية على التوليد باستخدام هذه الميزة في بطاقات النموذج الخاصة بهم.
</Tip>
## متقدم: كيف تعمل قوالب الدردشة؟
يتم تخزين قالب الدردشة للنموذج في الخاصية `tokenizer.chat_template`. إذا لم يتم تعيين قالب دردشة، فسيتم استخدام القالب الافتراضي لفئة النموذج هذه بدلاً من ذلك. دعونا نلقي نظرة على قالب دردشة `Zephyr`، ولكن لاحظ أن هذا القالب مُبسّط قليلاً عن القالب الفعلي!
```
{%- for message in messages %}
{{- '<|' + message['role'] + |>\n' }}
{{- message['content'] + eos_token }}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|assistant|>\n' }}
{%- endif %}
```
إذا لم تكن قد رأيت أحد هذه القوالب من قبل، فهذا [قالب Jinja](https://jinja.palletsprojects.com/en/3.1.x/templates/) .Jinja هي لغة قوالب تسمح لك بكتابة تعليمات برمجية بسيطة تُوَلّد نصًا. من نواحٍ عديدة، يُشبه الرمز والتركيب للغة Python. أما في لغة Python، سيبدو هذا القالب كما يلي:
```python
for message in messages:
print(f'<|{message["role"]}|>')
print(message['content'] + eos_token)
if add_generation_prompt:
print('<|assistant|>')
```
يقوم القالب بثلاثة أشياء بشكل فعال:
- لكل رسالة، بطبع الدور مُحاطًا بـ `<|` و `|>`، مثل `<|user|>` أو `<|assistant|>`.
- بعد ذلك، يطبع محتوى الرسالة، متبوعًا برمز نهاية التسلسل `eos_token` .
- أخيرًا، إذا تم تعيين `add_generation_prompt` ، يطبع الرمز المساعد، حتى يعرف النموذج أنه يجب أن يبدأ في توليد استجابة المساعد.
هذا قالب بسيط جدًا، لكن Jinja تمنحك الكثير من المرونة للقيام بأشياء أكثر تعقيدًا! دعونا نرى قالب Jinja يُمكنه تنسيق المُدخلات بطريقة تُشبه الطريقة التي تُنسّق بها LLaMA مُدخلاتها (لاحظ أن قالب LLaMA الحقيقي يتضمن معالجة لرسائل النظام الافتراضية ومعالجة رسائل النظام بشكل مختلف قليلاً بشكل عام - لا تستخدم هذا القالب في التعليمات البرمجية الفعلية الخاصة بك!)
```
{%- for message in messages %}
{%- if message['role'] == 'user' %}
{{- bos_token + '[INST] ' + message['content'] + ' [/INST]' }}
{%- elif message['role'] == 'system' %}
{{- '<<SYS>>\\n' + message['content'] + '\\n<</SYS>>\\n\\n' }}
{%- elif message['role'] == 'assistant' %}
{{- ' ' + message['content'] + ' ' + eos_token }}
{%- endif %}
{%- endfor %}
```
نأمل أنه إذا حدقت في هذا لفترة قصيرة، يمكنك أن ترى ما يفعله هذا القالب - فهو يُضيف رموزًا مُحددة مثل `[INST]` و `[/INST]` بناءً على دور كل رسالة. يمكن تمييز رسائل المستخدم والمساعد والنظام بوضوح للنموذج بسبب الرموز التي تُحيط بها.
## متقدم: إضافة وتعديل قوالب الدردشة
### كيف أنشئ قالب دردشة؟
ببساطة، اكتب قالب Jinja واضبط `tokenizer.chat_template`. قد تجد أنه من الأسهل البدء بقالب موجود من نموذج آخر وتحريره ببساطة ليناسب احتياجاتك! على سبيل المثال، يمكننا أن نأخذ قالب LLaMA أعلاه ونضيف `[ASST]` و `[/ASST]` إلى رسائل المساعد:
```
{%- for message in messages %}
{%- if message['role'] == 'user' %}
{{- bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }}
{%- elif message['role'] == 'system' %}
{{- '<<SYS>>\\n' + message['content'].strip() + '\\n<</SYS>>\\n\\n' }}
{%- elif message['role'] == 'assistant' %}
{{- '[ASST] ' + message['content'] + ' [/ASST]' + eos_token }}
{%- endif %}
{%- endfor %}
```
الآن، اضبط ببساطة الخاصية `tokenizer.chat_template`. في المرة القادمة التي تستخدم فيها [`~PreTrainedTokenizer.apply_chat_template`] ، سيستخدم القالب الجديد الخاص بك! سيتم حفظ هذه الخاصية في ملف `tokenizer_config.json`، حتى تتمكن من استخدام [`~utils.PushToHubMixin.push_to_hub`] لتحميل قالبك الجديد إلى Hub والتأكد من أن الجميع يستخدم القالب الصحيح لنموذجك!
```python
template = tokenizer.chat_template
template = template.replace("SYS", "SYSTEM") # تغيير رمز النظام
tokenizer.chat_template = template # تعيين القالب الجديد
tokenizer.push_to_hub("model_name") # تحميل القالب الجديد إلى Hub!
```
يتم استدعاء الدالة [`~PreTrainedTokenizer.apply_chat_template`] الذي نستخدم قالب الدردشة الخاص بك بواسطة فئة [`TextGenerationPipeline`] لذلك بمجرد تعيين قالب الدردشة الصحيح، سيصبح نموذجك متوافقًا تلقائيًا مع [`TextGenerationPipeline`].
<Tip>
إذا كنت تُجري ضبطًا دقيقًا لنموذج للدردشة، بالإضافة إلى تعيين قالب دردشة، فربما يجب عليك إضافة أي رموز تحكم دردشة جديدة كرموز خاصة في المجزىء اللغوي. لا يتم تقسيم الرموز الخاصة أبدًا، مما يضمن معالجة رموز التحكم الخاصة بك دائمًا كرموز فردية بدلاً من تجزئتها إلى أجزاء. يجب عليك أيضًا تعيين خاصية `eos_token` للمجزىء اللغوي إلى الرمز الذي يُشير إلى نهاية توليدات المساعد في قالبك. سيضمن هذا أن أدوات توليد النصوص يمكنها تحديد وقت إيقاف توليد النص بشكل صحيح.
</Tip>
### لماذا تحتوي بعض النماذج على قوالب متعددة؟
تستخدم بعض النماذج قوالب مختلفة لحالات استخدام مختلفة. على سبيل المثال، قد تستخدم قالبًا واحدًا للدردشة العادية وآخر لاستخدام الأدوات، أو التوليد القائم على الاسترجاع. في هذه الحالات، تكون `tokenizer.chat_template` قاموسًا. يمكن أن يتسبب هذا في بعض الارتباك، وحيثما أمكن، نوصي باستخدام قالب واحد لجميع حالات الاستخدام. يمكنك استخدام عبارات Jinja مثل `if tools is defined` وتعريفات `{% macro %}` لتضمين مسارات تعليمات برمجية متعددة بسهولة في قالب واحد.
عندما يحتوي المعالج اللغوي على قوالب متعددة، ستكون `tokenizer.chat_template dict`، حيث يكون كل مفتاح هو اسم قالب. يحتوي أسلوب `apply_chat_template` على معالجة خاصة لأسماء قوالب مُعينة: على وجه التحديد، سيبحث عن قالب باسم `default` في معظم الحالات، وسيُثير خطأً إذا لم يتمكن من العثور على واحد. ومع ذلك، إذا كان هناك قالب باسم `tool_use` عندما قام المستخدم بتمرير وسيطة `tools`، فسيستخدم هذا القالب بدلاً من ذلك. للوصول إلى قوالب بأسماء أخرى، مرر اسم القالب الذي تُريده إلى وسيطة `chat_template` لـ `apply_chat_template()`.
نجد أن هذا قد يكون مُربكًا بعض الشيء للمستخدمين - لذلك إذا كنت تكتب قالبًا بنفسك، فننصحك بمحاولة وضعه كله في قالب واحد حيثما أمكن!
## ما القالب الذي يجب أن أستخدمه؟
عند تعيين قالب لنموذج تم تدريبه بالفعل على الدردشة، يجب التأكد من أن القالب يتطابق تمامًا مع تنسيق الرسالة الذي شاهده النموذج أثناء التدريب، وإلا فمن المحتمل أن تواجه تدهورًا في الأداء. هذا صحيح حتى إذا كنت تدرب النموذج بشكل إضافي - فمن المحتمل أن تحصل على أفضل أداء إذا قمت بإبقاء رموز الدردشة ثابتة. يُشبه هذا إلى حد كبير عملية التجزئة - فأنت تحصل بشكل عام على أفضل أداء للاستدلال أو الضبط الدقيق عندما تتطابق بدقة مع التجزئة المستخدمة أثناء التدريب.
من ناحية أخرى، إذا كنت تُدرّب نموذجًا من البداية، أو تقوم بضبط دقيق لنموذج لغة أساسي للدردشة، لديك حرية اختيار قالب مناسب! تتمتع LLMs بالذكاء الكافي للتعامل مع العديد من تنسيقات الإدخال المختلفة. أحد الخيارات الشائعة هو تنسيق "ChatML"، وهو خيار جيد ومرن للعديد من حالات الاستخدام. يبدو كالتالي:
```
{%- for message in messages %}
{{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}
{%- endfor %}
```
إذا أعجبك هذا، فإليك نسخة جاهزة لوضعها في كودك. يتضمن الخط المفرد أيضًا دعمًا مفيدًا [لإرشادات التوليد](#what-are-generation-prompts)، ولكن لاحظ أنه لا يضيف رموز BOS أو EOS! إذا كان نموذجك يتوقع هذه الرموز، فلن يتم إضافتها تلقائيًا بواسطة "apply_chat_template" - بمعنى آخر، سيتم تجزئة النص باستخدام "add_special_tokens=False". هذا لتجنب التعارضات المحتملة بين القالب ومنطق "add_special_tokens". إذا كان نموذجك يتوقع رموزًا خاصة، فتأكد من إضافتها إلى القالب!
```python
tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
```
يُحيط هذا القالب كل رسالة بين الرمزين "<|im_start|>" و "<|im_end|>"، ويكتب ببساطة الدور كسلسلة نصية، مما يسمح بالمرونة في الأدوار التي تتدرب عليها. يبدو الناتج كما يلي:
```text
<|im_start|>system
You are a helpful chatbot that will do its best not to say anything so stupid that people tweet about it.<|im_end|>
<|im_start|>user
How are you?<|im_end|>
<|im_start|>assistant
I'm doing great!<|im_end|>
```
تعد أدوار "user" و "system" و "assistant" هي الأدوار القياسية للدردشة، ونوصي باستخدامها عندما يكون ذلك منطقيًا، خاصة إذا كنت تريد أن يعمل نموذجك بشكل جيد مع [`TextGenerationPipeline`]. ومع ذلك، فأنت لست مقيدًا بهذه الأدوار - فإن القوالب مرنة للغاية، ويمكن أن تكون أي سلسلة نصية دورًا.
## أريد إضافة بعض قوالب الدردشة! كيف أبدأ؟
إذا كان لديك أي نماذج دردشة، فيجب عليك تعيين الخاصية "tokenizer.chat_template" الخاصة بها واختبارها باستخدام [`~PreTrainedTokenizer.apply_chat_template`]، ثم رفع المجزىء اللغوي المُحدّث إلى Hub. ينطبق هذا حتى إذا لم تكن مالك النموذج - إذا كنت تستخدم نموذجًا بقالب دردشة فارغ، أو لا يزال يستخدم قالب الفئة الافتراضية، فيرجى فتح [طلب سحب](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) إلى مستودع النموذج حتى يمكن تعيين الخاصية بشكل صحيح!
بمجرد تعيين الخاصية، هذا كل شيء، لقد انتهيت! ستعمل "tokenizer.apply_chat_template" الآن بشكل صحيح لهذا النموذج، مما يعني أنها مدعومة أيضًا بشكل تلقائي في أماكن مثل "TextGenerationPipeline"!
من خلال ضمان امتلاك النماذج لهذه الخاصية، يُمكننا التأكد من أن المجتمع بأكمله يستخدم القوة الكاملة للنماذج مفتوحة المصدر. لقد كانت عدم تطابق التنسيق تطارد المجال وأضرت الأداء بصمت لفترة طويلة جدًا - لقد حان الوقت لوضع حد لها!
## متقدم: نصائح لكتابة القوالب
<Tip>
أسهل طريقة للبدء في كتابة قوالب Jinja هي إلقاء نظرة على بعض القوالب الموجودة. يمكنك استخدام `print(tokenizer.chat_template)` لأي نموذج دردشة لمعرفة القالب الذي يستخدمه. بشكل عام، تحتوي النماذج التي تدعم استخدام الأدوات على قوالب أكثر تعقيدًا بكثير من النماذج الأخرى - لذلك عندما تبدأ للتو، فمن المحتمل أنها مثال سيئ للتعلم منه! يمكنك أيضًا إلقاء نظرة على [وثائق Jinja](https://jinja.palletsprojects.com/en/3.1.x/templates/#synopsis) للحصول على تفاصيل حول تنسيق Jinja العام وتركيبه.
</Tip>
تُطابق قوالب Jinja في `transformers` قوالب Jinja في أي مكان آخر. الشيء الرئيسي الذي يجب معرفته هو أن سجل الدردشة سيكون متاحًا داخل قالبك كمتغير يسمى `messages`. ستتمكن من الوصول إلى `messages` في قالبك تمامًا كما يمكنك في Python، مما يعني أنه يمكنك التكرار خلاله باستخدام `{% for message in messages %}` أو الوصول إلى رسائل فردية باستخدام `{{ messages[0] }}`، على سبيل المثال.
يمكنك أيضًا استخدام النصائح التالية لكتابة قوالب Jinja نظيفة وفعالة:
### إقتطاع المسافات الفارغة
بشكل افتراضي، ستطبع Jinja أي مسافات فارغة تأتي قبل أو بعد كتلة. يمكن أن يكون هذا مشكلة لقوالب الدردشة، والتي تريد عادةً أن تكون دقيقة جدًا مع المسافات! لتجنب ذلك، نوصي بشدة بكتابة قوالبك على النحو التالي:
```
{%- for message in messages %}
{{- message['role'] + message['content'] }}
{%- endfor %}
```
بدلاً من ذلك:
```
{% for message in messages %}
{{ message['role'] + message['content'] }}
{% endfor %}
```
سيؤدي إضافة "-" إلى إزالة أي مسافات تأتي قبل الكتلة. يبدو المثال الثاني عادية، ولكن قد يتم تضمين السطر الجديد والمسافة البادئة في المخرجات، وهو على الأرجح ليس ما تُريده!
### المتغيرات الخاصة
داخل قالبك، سيكون لديك حق الوصول إلى العديد من المتغيرات الخاصة. أهمها هو `messages`، والذي يحتوي على سجل الدردشة كقائمة من قواميس الرسائل. ومع ذلك، هناك العديد من المتغيرات الأخرى. لن يتم استخدام كل متغير في كل قالب. المتغيرات الأكثر شيوعًا هي:
- `tools` تحتوي على قائمة بالأدوات بتنسيق مخطط JSON. ستكون `None` أو غير مُعرّفة إذا لم يتم تمرير أي أدوات.
- `documents` تحتوي على قائمة من المستندات بالتنسيق `{"title": "العنوان", "contents": "المحتويات"}`، تُستخدم للتوليد المُعزز بالاسترجاع. ستكون `None` أو غير مُعرّفة إذا لم يتم تمرير أي مستندات.
- `add_generation_prompt` هي قيمة منطقية تكون `True` إذا طلب المستخدم مُطالبة توليد، و `False` بخلاف ذلك. إذا تم تعيين هذا، فيجب أن يُضيف قالبك رأس رسالة مساعد إلى نهاية المحادثة. إذا لم يكن لدى نموذجك رأس مُحدد لرسائل المساعد، فيمكنك تجاهل هذا العلم.
- **الرموز الخاصة** مثل `bos_token` و `eos_token`. يتم استخراجها من `tokenizer.special_tokens_map`. ستختلف الرموز الدقيقة المتاحة داخل كل قالب اعتمادًا على المجزىء اللغوي الأصلي.
<Tip>
يمكنك في الواقع تمرير أي `kwarg` إلى `apply_chat_template`، وستكون متاحة داخل القالب كمتغير. بشكل عام، نوصي بمحاولة الالتزام بالمتغيرات الأساسية المذكورة أعلاه، لأن ذلك سيجعل نموذجك أكثر صعوبة في الاستخدام إذا كان على المستخدمين كتابة تعليمات برمجية مخصصة لتمرير `kwargs` خاصة بالنموذج. ومع ذلك، فنحن نُدرك أن هذا المجال يتحرك بسرعة، لذلك إذا كانت لديك حالة استخدام جديدة لا تتناسب مع واجهة برمجة التطبيقات الأساسية، فلا تتردد في استخدام `kwarg` معامل جديد لها! إذا أصبح `kwarg` المعامل الجديد شائعًا، فقد نقوم بترقيته إلى واجهة برمجة التطبيقات الأساسية وإنشاء وتوثيق الخاص به.
</Tip>
### دوال قابلة للاستدعاء
هناك أيضًا قائمة قصيرة من الدوال القابلة للاستدعاء المتاحة لك داخل قوالبك. هذه هي:
- `raise_exception(msg)`: تُثير `TemplateException`. هذا مفيد لتصحيح الأخطاء، ولإخبار المستخدمين عندما يفعلون شيئًا لا يدعمه قالبك.
- `strftime_now(format_str)`: تُكافئ `datetime.now().strftime(format_str)` في Python. يُستخدم هذا للحصول على التاريخ/الوقت الحالي بتنسيق مُحدد، والذي يتم تضمينه أحيانًا في رسائل النظام.
### التوافق مع Jinja غير Python
هناك تطبيقات متعددة لـ Jinja بلغات مختلفة. عادة ما يكون لها نفس التركيب، ولكن الاختلاف الرئيسي هو أنه عند كتابة قالبًا في Python، يمكنك استخدام أساليب Python، مثل ".lower()" على السلاسل أو ".items()" على القواميس. سيؤدي هذا إلى كسر إذا حاول شخص ما استخدام قالبك في تنفيذ غير Python لـ Jinja. تعد التطبيقات غير Python شائعة بشكل خاص في بيئات النشر، حيث تعد JS و Rust شائعة جدًا.
لا تقلق، على الرغم من ذلك! هناك بعض التغييرات البسيطة التي يمكنك إجراؤها على قوالبك لضمان توافقها عبر جميع تطبيقات Jinja:
- استبدل أساليب Python بمرشحات Jinja. عادة ما يكون لها نفس الاسم، على سبيل المثال، يصبح "string.lower()" عبارة عن "string|lower"، ويصبح "dict.items()" عبارة عن "dict|items". أحد التغييرات الملحوظة هو أن "string.strip()" يصبح "string|trim". راجع [قائمة المرشحات المدمجة](https://jinja.palletsprojects.com/en/3.1.x/templates/#builtin-filters) في وثائق Jinja لمزيد من المعلومات.
- استبدل "True" و "False" و "None"، وهي خاصة بـ Python، بـ "true" و "false" و "none".
- قد يؤدي عرض قاموس أو قائمة مباشرة إلى نتائج مختلفة في التطبيقات الأخرى (على سبيل المثال، قد تتغير مدخﻻت السلسلة النصية من علامات اقتباس مفردة ' إلى علامات اقتباس مزدوجة "). يمكن أن يساعد إضافة "tojson" في ضمان الاتساق هنا.
## كتابة مطالبات التوليد
لقد ذكرنا أعلاه أن add_generation_prompt هو متغير خاص يمكن الوصول إليه داخل قالبك، ويتحكم فيه المستخدم من خلال تعيين معامل add_generation_prompt. إذا كان نموذجك يتوقع عنوان لرسائل المساعد، فيجب أن يدعم قالبك إضافة العنوان عند تعيين add_generation_prompt.
فيما يلي مثال على قالب يُنسّق الرسائل بأسلوب ChatML، مع دعم مُطالبة التوليد:
```text
{{- bos_token }}
{%- for message in messages %}
{{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant\n' }}
{%- endif %}
```
سيعتمد المحتوى الدقيق لعنوان المساعد على نموذجك المُحدد، ولكن يجب أن يكون دائمًا السلسلة النصية التي تُمثل بداية رسالة المساعد، بحيث إذا قام المستخدم بتطبيق قالبك باستخدام add_generation_prompt=True ثم قام بتوليد نص، سيكتب النموذج استجابة المساعد. لاحظ أيضًا أن بعض النماذج لا تحتاج إلى مُطالبة توليد، لأن رسائل المساعد تبدأ دائمًا فورًا بعد رسائل المستخدم. هذا شائع بشكل خاص لنماذج LLaMA و Mistral، حيث تبدأ رسائل المساعد فورًا بعد رمز [/INST] الذي ينهي رسائل المستخدم. في هذه الحالات، يمكن للقالب تجاهل معامل add_generation_prompt.
مُطالبات التوليد مُهمة! إذا كان نموذجك يتطلب مُطالبة توليد ولكنها غير مُعيّنة في القالب، فمن المُحتمل أن تتدهور عمليات توليد النموذج بشدة، أو قد يُظهر النموذج سلوكًا غير عادي مثل متابعة رسالة المستخدم الأخيرة!
### كتابة قوالب أكبر وتصحيحها
عندما تم تقديم هذه الميزة، كانت معظم القوالب صغيرة جدًا، أي ما يُعادل نص برمجي "من سطر واحد" في Jinja. ومع ذلك، مع النماذج والميزات الجديدة مثل استخدام الأدوات و RAG، يمكن أن يصل طول بعض القوالب إلى 100 سطر أو أكثر. عند كتابة قوالب كهذه، من الجيد كتابتها في ملف مُنفصل، باستخدام مُحرر نصوص. يمكنك بسهولة استخراج قالب دردشة إلى ملف:
```python
open("template.jinja", "w").write(tokenizer.chat_template)
```
أو تحميل القالب المُحرر مرة أخرى إلى المعالج اللغوي:
```python
tokenizer.chat_template = open("template.jinja").read()
```
كميزة إضافية، عندما تكتب قالبًا طويلاً متعدد الأسطر في ملف مُنفصل، ستتوافق أرقام الأسطر في هذا الملف تمامًا مع أرقام الأسطر في أخطاء تحليل القالب أو تنفيذه. سيُسهّل هذا كثيرًا تحديد مكان المشكلات.
### كتابة قوالب للأدوات
على الرغم من أن قوالب الدردشة لا تفرض واجهة برمجة تطبيقات مُحددة للأدوات (أو لأي شيء حقًا)، فإننا نوصي مؤلفي القوالب بمحاولة الالتزام بواجهة برمجة تطبيقات قياسية حيثما أمكن. الهدف النهائي لقوالب الدردشة هو السماح بنقل التعليمات البرمجية عبر النماذج، لذا فإن الانحراف عن واجهة برمجة تطبيقات الأدوات القياسية يعني أن المستخدمين سيضطرون إلى كتابة تعليمات برمجية مخصصة لاستخدام الأدوات مع نموذجك. في بعض الأحيان يكون ذلك أمرًا لا مفر منه، ولكن غالبًا ما يكون من الممكن استخدام واجهة برمجة التطبيقات القياسية من خلال استخدام قوالب ذكية!
أدناه، سنُدرج عناصر واجهة برمجة التطبيقات القياسية، ونقدم نصائح حول كتابة قوالب ستعمل بشكل جيد معها.
#### تعريفات الأدوات
يجب أن يتوقع قالبك أن يكون المتغير tools إما فارغًا (إذا لم يتم تمرير أي أدوات)، أو قائمة من قواميس مخطط JSON. تسمح أساليب قالب الدردشة الخاصة بنا للمستخدمين بتمرير الأدوات إما كمخطط JSON أو كدوال Python، ولكن عندما يتم تمرير الدوال، فإننا نقوم تلقائيًا بإنشاء مخطط JSON وتمريره إلى قالبك. نتيجة لذلك، سيكون متغير tools الذي يستقبله قالبك دائمًا قائمة من مخططات JSON. هنا مخطط JSON أداة نموذجي:
```json
{
"type": "function",
"function": {
"name": "multiply",
"description": "دالة تضرب عددين",
"parameters": {
"type": "object",
"properties": {
"a": {
"type": "number",
"description": "الرقم الأول للضرب"
},
"b": {
"type": "number",
"description": "الرقم الثاني للضرب"
}
},
"required": ["a", "b"]
}
}
}
```
وهنا بعض الأمثلة البرمجية للتعامل مع الأدوات في قالب الدردشة الخاص بك. تذكر أن هذا مجرد مثال لتنسيق مُحدد - من المحتمل أن يحتاج نموذجك إلى تنسيق مختلف!
```text
{%- if tools %}
{%- for tool in tools %}
{{- '<tool>' + tool['function']['name'] + '\n' }}
{%- for argument in tool['function']['parameters']['properties'] %}
{{- argument + ': ' + tool['function']['parameters']['properties'][argument]['description'] + '\n' }}
{%- endfor %}
{{- '\n</tool>' }}
{%- endif %}
{%- endif %}
```
يجب بالطبع اختيار الرموز المحددة ووصف الأدوات التي يُعرضها قالبك لتتناسب مع تلك التي تم تدريب نموذجك عليها. لا يوجد شرط أن يفهم نموذجك مُدخلات مخطط JSON، فقط أن يتمكن قالبك من ترجمة مخطط JSON إلى تنسيق نموذجك. على سبيل المثال، تم تدريب Command-R باستخدام أدوات مُعرّفة باستخدام رؤوس دوال Python، ولكن يقبل قالب أداة Command-R مخطط JSON، ويُحوّل الأنواع داخليًا ويُعرض أدوات الإدخال كعناوين Python. يمكنك فعل الكثير باستخدام القوالب!
#### استدعاءات الأدوات
استدعاءات الأدوات، إذا كانت موجودة، ستكون قائمة مُرفقة برسالة بدور "assistant". لاحظ أن tool_calls هي دائمًا قائمة، على الرغم من أن معظم نماذج استدعاء الأدوات تدعم فقط استدعاءات أدوات فردية في كل مرة، مما يعني أن القائمة ستحتوي عادةً على عنصر واحد فقط. هنا قاموس رسالة نموذجي يحتوي على استدعاء أداة:
```json
{
"role": "assistant",
"tool_calls": [
{
"type": "function",
"function": {
"name": "multiply",
"arguments": {
"a": 5,
"b": 6
}
}
}
]
}
```
والنمط الشائع للتعامل معها سيكون كهذا:
```text
{%- if message['role'] == 'assistant' and 'tool_calls' in message %}
{%- for tool_call in message['tool_calls'] %}
{{- '<tool_call>' + tool_call['function']['name'] + '\n' + tool_call['function']['arguments']|tojson + '\n</tool_call>' }}
{%- endif %}
{%- endfor %}
{%- endif %}
```
مرة أخرى، يجب عليك عرض استدعاء الأداة بالتنسيق والرموز الخاصة التي يتوقعها نموذجك.
#### استجابات الأدوات
استجابات الأدوات لها تنسيق بسيط: إنها قاموس رسالة بدور "tool"، ومفتاح "name" يُعطي اسم الدالة المُستدعاة، ومفتاح "content" يحتوي على نتيجة استدعاء الأداة. هنا استجابة أداة نموذجية:
```json
{
"role": "tool",
"name": "multiply",
"content": "30"
}
```
لست بحاجة إلى استخدام جميع المفاتيح في استجابة الأداة. على سبيل المثال، إذا كان نموذجك لا يتوقع تضمين اسم الدالة في استجابة الأداة، فيمكن أن يكون عرضها بسيطًا مثل:
```text
{%- if message['role'] == 'tool' %}
{{- "<tool_result>" + message['content'] + "</tool_result>" }}
{%- endif %}
```
مرة أخرى، تذكر أن التنسيق الفعلي والرموز الخاصة خاصة بالنموذج - يجب أن تُولي عناية كبيرة لضمان أن الرموز والمسافات الفارغة وكل شيء آخر يتطابق تمامًا مع التنسيق الذي تم تدريب نموذجك عليه!
| transformers/docs/source/ar/chat_templating.md/0 | {
"file_path": "transformers/docs/source/ar/chat_templating.md",
"repo_id": "transformers",
"token_count": 33667
} | 392 |
# المحولات النمطية
مكتبة `transformers` هي إطار عمل ذو فلسفة محدد؛ يتم تعريف فلسفتنا في [الدليل المفاهيمي](./philosophy).
جوهر هذه الفلسفة يتمثل في مبدأ [نموذج واحد، ملف واحد](https://huggingface.co/blog/transformers-design-philosophy)
في المكتبة. الجانب السلبي لهذا المكون هو تقييده لوراثة واستيراد مكونات الملفات.
نتيجة لذلك، تتكرر مكونات النموذج عبر العديد من الملفات. يحتوي `transformers` على عدد كبير من طبقات الانتباه، يقارب عدد النماذج، والكثير منها متطابق. يتسبب هذا في تباعد عمليات التنفيذ المستقلة مع تطبيق الإصلاحات والتغييرات.
على أجزاء محددة من التعليمات البرمجية.
ولمعالجة ذلك، اعتمدنا مفهوم "النسخ" في المكتبة. فبإضافة تعليق يُشير إلى أن التعليمات البرمجية هي نسخة من أخرى، نضمن من خلال أنظمة CI والأوامر المحلية عدم تباعد النسخ. لكن هذه العملية، رغم بساطتها، تُسبب إرهاقاً. كما أنها تزيد العبء على المساهمين، وهو ما نهدف إلى تجاوزه.
غالباً ما تتطلب مساهمات النماذج إضافة تعليمات برمجية (حوالي 1000 سطر)، ومعالج (حوالي 500 سطر)، واختبارات، ووثائق، إلخ. ونادراً ما تقل مساهمات النماذج عن 3000-5000 سطر من التعليمات البرمجية، معظمها أكواد نمطية. هذا يرفع مستوى المساهمات،
ونهدف مع المحولات النمطية إلى خفض هذا المستوى إلى حدّ مقبول.
## ما هو؟
تقدم المحولات النمطية مفهوم ملف "نمطي" لمجلد نموذج. يقبل هذا الملف النمطي تعليمات برمجية
غير مقبولة عادة في ملفات النمذجة/المعالجة، حيث يسمح بالاستيراد من نماذج مجاورة وكذلك
الوراثة من الفئات إلى فئات أخرى.
يعرّف هذا الملف النمطي النماذج والمعالجات وفئة التكوين التي سيتم تعريفها في وحداتهم
المتعلقة.
وأخيرًا، يقدم هذا الميزة أداة `linter` جديدة والتي ستعمل على "تفكيك" الملف النمطي إلى بنية "نموذج واحد، ملف واحد"
هيكل الدليل. سيتم إنشاء هذه الملفات تلقائيًا في كل مرة يتم فيها تشغيل البرنامج النصي؛ مما يقلل من المساهمات المطلوبة
إلى الملف النمطي، وبالتالي فقط إلى التغييرات بين النموذج المساهم والنماذج الأخرى.
سيقوم مستخدمو النموذج في النهاية باستيراد واستخدام واجهة الملف الواحد، لذا لا يتوقع حدوث أي تغيير هنا. من خلال القيام بذلك،
نأمل في الجمع بين أفضل ما في العالمين: تمكين المساهمات البسيطة مع الالتزام بفلسفتنا.
لذلك، هذا بديل لعلامات `# Copied from`، ويمكن توقع انتقال النماذج المساهمة سابقًا إلى
تنسيق المحولات النمطية الجديد في الأشهر المقبلة.
### التفاصيل
تُبسط أداة "linter" الوراثة، مُنشئةً جميع الملفات المفردة من الملف النمطي، مع الحفاظ على شفافيتها أمام مستخدمي Python. حاليًا، تُبسط الأداة مستوىً واحدًا من الوراثة
على سبيل المثال:
- إذا ورثت فئة التكوين من فئة أخرى وأضافت/حذفت معامل، فسيتم إما الإشارة إلى الملف المولد مباشرةً
(في حالة الإضافة) أو إزالته تمامًا (في حالة الحذف).
- إذا ورثت فئة من فئة أخرى، على سبيل المثال: `class GemmaModel(LlamaModel):`، تُستنتج التبعيات تلقائيًا
سيتم استنتاج جميع الوحدات الفرعية تلقائيًا من الفئة الأصلية.
- إذا قمت بتعريف وظائف جديدة في الملف `modular` واستخدمتها داخل الفئات، فستستنتج أداة linter ذلك تلقائيًا
يجب أن تكون قادرًا على كتابة كل شيء (المجزىء اللغوي، ومُعالِج الصور، والنموذج، والتكوين) في الملف `modular`، وسيتم إنشاء الملفات المُقابلة تلقائيًا.
### التطبيق
[TODO] نقدم اختبارًا جديدًا، للتأكد من أن المحتوى المولد يتطابق مع ما هو موجود في `modular_xxxx.py`
### الأمثلة
هنا مثال سريع باستخدام BERT و RoBERTa. النموذجان مرتبطان ارتباطًا وثيقًا: يختلف تنفيذهما النموذجي في طبقة تضمين.
بدلاً من إعادة تعريف النموذج بالكامل، إليك كيف يبدو ملف `modular_roberta.py` لفئات النمذجة والتكوين (لأغراض المثال، يتم تجاهل المجزىء اللغوي في هذا الوقت حيث أنه مختلف جدًا).
```python
from torch import nn
from ..bert.configuration_bert import BertConfig
from ..bert.modeling_bert import (
BertModel,
BertEmbeddings,
BertForMaskedLM
)
# تكوين RoBERTa مطابق لتكوين BERT
class RobertaConfig(BertConfig):
model_type = 'roberta'
# نعيد تعريف الإضافات هنا لتسليط الضوء على اختلاف معرف الحشو، ونعيد تعريف الإضافات الموضعية
class RobertaEmbeddings(BertEmbeddings):
def __init__(self, config):
super().__init__(config())
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
# نموذج RoBERTa مطابق لنموذج BERT، باستثناء طبقة الإضافات.
# نعيد تعريف الإضافات أعلاه، لذا هنا لا توجد حاجة لعمل إضافي
class RobertaModel(BertModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = RobertaEmbeddings(config)
# الرؤوس الآن تحتاج فقط إلى إعادة تعريف النموذج داخل `RobertaModel` الصحيح
class RobertaForMaskedLM(BertForMaskedLM):
def __init__(self, config):
super().__init__(config)
self.model = RobertaModel(config)
```
لاحظ أنه إذا لم تستخدم الاعتماد الذي حددته، فستحصل على الخطأ التالي:
```bash
ValueError: You defined `RobertaEmbeddings` in the modular_roberta.py, it should be used
when you define `BertModel`, as it is one of it's direct dependencies. Make sure
you use it in the `__init__` function.
```
بالإضافة إلى ذلك، قد تجد قائمة بالأمثلة هنا:
## ما هو ليس كذلك
ليس بديلاً لتعليمات برمجة النمذجة (بعد؟)، وإذا لم يكن نموذجك يعتمد على أي شيء آخر موجود من قبل، فيمكنك إضافة ملف `نمذجة` كالعادة.
## الاستخدام المتقدم
### إزالة السمات والوظائف
لإزالة السمات التي لا تستخدم في نموذجك النمطي، والتي لا تريد رؤيتها في النمذجة المفككة:
```python
class GemmaModel(LlamaModel): | class GemmaModel(PreTrainedModel):
def __init__(self, config): | def __init__(self, config):
super().__init__(self, eos_token) | super().__init__(config)
del self.embed_tokens | self.padding_idx = config.pad_token_id
| self.vocab_size = config.vocab_size
|
| self.layers = nn.ModuleList(
| [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
| )
| self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
| self.rotary_emb = LlamaRotaryEmbedding(config=config)
| self.gradient_checkpointing = False
|
| # Initialize weights and apply final processing
| self.post_init()
```
إذا قمت بالتحقق من `LlamaModel` الأصلي، فستجد `embed_tokens` الذي تمت إزالته هنا (كما هو متوقع!)
إزالة وظيفة مشابهة، تحتاج فقط إلى كتابتها مع `raise ValueError("")` لمحاكاة السلوك الذي تريده فعليًا عند إزالة وظيفة أصلية في بايثون.
```python
class GemmaTokenizer(LlamaTokenizer):
...
def get_spm_processor(self):
raise AttributeError("Not needed for Gemma")
def unk_token_length(self):
raise AttributeError("Not needed for Gemma")
```
### تعريف وظائف جديدة
إذا قمت بتعريف وظيفة جديدة في الملف `modular` لاستخدامها داخل فئة، على سبيل المثال
```python
def my_new_function(*args, **kwargs):
# Do something here
pass
class GemmaModel(LlamaModel):
def forward(*args, **kwargs):
# Call the function
example = my_new_function(*args, **kwargs)
# continue here
```
سيتم نسخ وظيفة `my_new_function` (وبشكل متكرر، أي وظائف أخرى جديدة يتم استدعاؤها في جسمها) تلقائيًا
في الملف الذي يتم استخدامه.
### استدعاء `super()`
قمنا مؤخرًا بشحن بعض الميزات التي تسمح لك بالانتقال من:
```python
class GemmaTokenizer(LlamaTokenizer, PretrainedTokenizerFast): | class GemmaModel(nn.Module):
def __init__(self, eos_token="</s>"): | def __init__(self):
eos_token = AddedToken(eos_token) | eos_token = AddedToken(eos_token)
PretrainedTokenizerFast.__init__(self, eos_token) | super().__init__(eos_token)
```
هذا مفيد عندما لا تريد تفكيك استدعاء `super()`، وتريد التمييز بين أي استدعاء super init تقوم به!
### التسمية الخاصة
ندعم الآن أيضًا حالات خاصة مثل
```python
class GemmaVisionModel(CLIPModel):
pass
```
حيث اسم فئة `GemmaVision` الخاصة بك ليس هو نفسه `Gemma` النمطي. هذا مفيد للغاية للنماذج المركبة.
| transformers/docs/source/ar/modular_transformers.md/0 | {
"file_path": "transformers/docs/source/ar/modular_transformers.md",
"repo_id": "transformers",
"token_count": 6710
} | 393 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# نمذجة اللغة المقنعة (Masked language modeling)
[[open-in-colab]]
<Youtube id="mqElG5QJWUg"/>
تتنبأ نمذجة اللغة المقنعة برمز مقنع في تسلسل، ويمكن للنموذج الانتباه إلى الرموز بشكل ثنائي الاتجاه. هذا
يعني أن النموذج لديه إمكانية الوصول الكاملة إلى الرموز الموجودة على اليسار واليمين. تعد نمذجة اللغة المقنعة ممتازة للمهام التي
تتطلب فهمًا سياقيًا جيدًا لتسلسل كامل. BERT هو مثال على نموذج لغة مقنع.
سيوضح لك هذا الدليل كيفية:
1. تكييف [DistilRoBERTa](https://huggingface.co/distilbert/distilroberta-base) على مجموعة فرعية [r/askscience](https://www.reddit.com/r/askscience/) من مجموعة بيانات [ELI5](https://huggingface.co/datasets/eli5).
2. استخدام نموذج المدرب الخاص بك للاستدلال.
<Tip>
لمعرفة جميع البنى والنسخ المتوافقة مع هذه المهمة، نوصي بالتحقق من [صفحة المهمة](https://huggingface.co/tasks/fill-mask)
</Tip>
قبل أن تبدأ، تأكد من تثبيت جميع المكتبات الضرورية:
```bash
pip install transformers datasets evaluate
```
نحن نشجعك على تسجيل الدخول إلى حساب Hugging Face الخاص بك حتى تتمكن من تحميل ومشاركة نموذجك مع المجتمع. عندما تتم مطالبتك، أدخل رمزك لتسجيل الدخول:
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
## تحميل مجموعة بيانات ELI5
ابدأ بتحميل أول 5000 مثال من مجموعة بيانات [ELI5-Category](https://huggingface.co/datasets/eli5_category) باستخدام مكتبة 🤗 Datasets. سيعطيك هذا فرصة للتجربة والتأكد من أن كل شيء يعمل قبل قضاء المزيد من الوقت في التدريب على مجموعة البيانات الكاملة.
```py
>>> from datasets import load_dataset
>>> eli5 = load_dataset("eli5_category", split="train[:5000]")
```
قم بتقسيم مجموعة البيانات `train` إلى مجموعتي تدريب واختبار باستخدام الدالة [`~datasets.Dataset.train_test_split`]:
```py
>>> eli5 = eli5.train_test_split(test_size=0.2)
```
ثم ألق نظرة على مثال:
```py
>>> eli5["train"][0]
{'q_id': '7h191n',
'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?',
'selftext': '',
'category': 'Economics',
'subreddit': 'explainlikeimfive',
'answers': {'a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'],
'text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.",
'None yet. It has to be reconciled with a vastly different house bill and then passed again.',
'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?',
'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'],
'score': [21, 19, 5, 3],
'text_urls': [[],
[],
[],
['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']]},
'title_urls': ['url'],
'selftext_urls': ['url']}
```
على الرغم من أن هذا قد يبدو كثيرًا، إلا أنك مهتم حقًا بحقل `text`. ما هو رائع حول مهام نمذجة اللغة هو أنك لا تحتاج إلى تسميات (تُعرف أيضًا باسم المهمة غير الخاضعة للإشراف) لأن الكلمة التالية *هي* التسمية.
## معالجة مسبقة (Preprocess)
<Youtube id="8PmhEIXhBvI"/>
بالنسبة لنمذجة اللغة المقنعة، فإن الخطوة التالية هي تحميل معالج DistilRoBERTa لمعالجة حقل `text` الفرعي:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilroberta-base")
```
ستلاحظ من المثال أعلاه، أن حقل `text` موجود بالفعل داخل `answers`. هذا يعني أنك ستحتاج إلى استخراج حقل `text` الفرعي من بنيته المضمنة باستخدام الدالة [`flatten`](https://huggingface.co/docs/datasets/process#flatten):
```py
>>> eli5 = eli5.flatten()
>>> eli5["train"][0]
{'q_id': '7h191n',
'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?',
'selftext': '',
'category': 'Economics',
'subreddit': 'explainlikeimfive',
'answers.a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'],
'answers.text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.",
'None yet. It has to be reconciled with a vastly different house bill and then passed again.',
'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?',
'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'],
'answers.score': [21, 19, 5, 3],
'answers.text_urls': [[],
[],
[],
['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']],
'title_urls': ['url'],
'selftext_urls': ['url']}
```
كل حقل فرعي هو الآن عمود منفصل كما هو موضح بواسطة بادئة `answers`، وحقل `text` هو قائمة الآن. بدلاً من
معالجة كل جملة بشكل منفصل، قم بتحويل القائمة إلى سلسلة حتى تتمكن من معالجتها بشكل مشترك.
هنا أول دالة معالجة مسبقة لربط قائمة السلاسل لكل مثال ومعالجة النتيجة:
```py
>>> def preprocess_function(examples):
... return tokenizer([" ".join(x) for x in examples["answers.text"]])
```
لتطبيق دالة المعالجة المسبقة على مجموعة البيانات بأكملها، استخدم الدالة 🤗 Datasets [`~datasets.Dataset.map`]. يمكنك تسريع دالة `map` عن طريق تعيين `batched=True` لمعالجة عدة عناصر في وقت واحد، وزيادة عدد العمليات باستخدام `num_proc`. احذف أي أعمدة غير ضرورية:
```py
>>> tokenized_eli5 = eli5.map(
... preprocess_function,
... batched=True,
... num_proc=4,
... remove_columns=eli5["train"].column_names,
... )
```
تحتوي مجموعة البيانات هذه على تسلسلات رمزية، ولكن بعضها أطول من الطول الأقصى للمدخلات للنموذج.
يمكنك الآن استخدام دالة معالجة مسبقة ثانية لـ:
- تجميع جميع التسلسلات
- تقسيم التسلسلات المجمّعة إلى أجزاء أقصر محددة بـ `block_size`، والتي يجب أن تكون أقصر من الحد الأقصى لطول المدخلات ومناسبة لذاكرة GPU.
```py
>>> block_size = 128
>>> def group_texts(examples):
... # تجميع جميع النصوص.
... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
... total_length = len(concatenated_examples[list(examples.keys())[0]])
... # نتجاهل الجزء المتبقي الصغير، يمكننا إضافة الحشو إذا كان النموذج يدعمه بدلاً من هذا الإسقاط، يمكنك
... # تخصيص هذا الجزء حسب احتياجاتك.
... if total_length >= block_size:
... total_length = (total_length // block_size) * block_size
... # تقسيمها إلى أجزاء بحجم block_size.
... result = {
... k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
... for k, t in concatenated_examples.items()
... }
... return result
```
طبق دالة `group_texts` على مجموعة البيانات بأكملها:
```py
>>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4)
```
الآن، قم بإنشاء دفعة من الأمثلة باستخدام [`DataCollatorForLanguageModeling`]. من الأكثر كفاءة أن تقوم بـ *الحشو الديناميكي* ليصل طولها إلى أطول جملة في الدفعة أثناء التجميع، بدلاً من حشو مجموعة البيانات بأكملها إلى الطول الأقصى.
<frameworkcontent>
<pt>
استخدم رمز نهاية التسلسل كرمز الحشو وحدد `mlm_probability` لحجب الرموز عشوائياً كل مرة تكرر فيها البيانات:
```py
>>> from transformers import DataCollatorForLanguageModeling
>>> tokenizer.pad_token = tokenizer.eos_token
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15)
```
</pt>
<tf>
استخدم رمز نهاية التسلسل كرمز الحشو وحدد `mlm_probability` لحجب الرموز عشوائياً كل مرة تكرر فيها البيانات:
```py
>>> from transformers import DataCollatorForLanguageModeling
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15, return_tensors="tf")
```
</tf>
</frameworkcontent>
## التدريب (Train)
<frameworkcontent>
<pt>
<Tip>
إذا لم تكن على دراية بتعديل نموذج باستخدام [`Trainer`], ألق نظرة على الدليل الأساسي [هنا](../training#train-with-pytorch-trainer)!
</Tip>
أنت مستعد الآن لبدء تدريب نموذجك! قم بتحميل DistilRoBERTa باستخدام [`AutoModelForMaskedLM`]:
```py
>>> from transformers import AutoModelForMaskedLM
>>> model = AutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base")
```
في هذه المرحلة، تبقى ثلاث خطوات فقط:
1. حدد معلمات التدريب الخاصة بك في [`TrainingArguments`]. المعلمة الوحيدة المطلوبة هي `output_dir` والتي تحدد مكان حفظ نموذجك. ستقوم بدفع هذا النموذج إلى Hub عن طريق تعيين `push_to_hub=True` (يجب أن تكون مسجلاً الدخول إلى Hugging Face لتحميل نموذجك).
2. قم بتمرير معلمات التدريب إلى [`Trainer`] مع النموذج، ومجموعات البيانات، ومجمّع البيانات.
3. قم باستدعاء [`~Trainer.train`] لتعديل نموذجك.
```py
>>> training_args = TrainingArguments(
... output_dir="my_awesome_eli5_mlm_model",
... eval_strategy="epoch",
... learning_rate=2e-5,
... num_train_epochs=3,
... weight_decay=0.01,
... push_to_hub=True,
... )
>>> trainer = Trainer(
... model=model,
... args=training_args,
... train_dataset=lm_dataset["train"],
... eval_dataset=lm_dataset["test"],
... data_collator=data_collator,
... tokenizer=tokenizer,
... )
>>> trainer.train()
```
بمجرد اكتمال التدريب، استخدم طريقة [`~transformers.Trainer.evaluate`] لتقييم النموذج والحصول على مقياس
الحيرة:
```py
>>> import math
>>> eval_results = trainer.evaluate()
>>> print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")
Perplexity: 8.76
```
ثم شارك نموذجك على Hub باستخدام طريقة [`~transformers.Trainer.push_to_hub`] حتى يتمكن الجميع من استخدام نموذجك:
```py
>>> trainer.push_to_hub()
```
</pt>
<tf>
<Tip>
إذا لم تكن على دراية بتعديل نموذج باستخدام Keras، ألق نظرة على الدليل الأساسي [هنا](../training#train-a-tensorflow-model-with-keras)!
</Tip>
لتعديل نموذج في TensorFlow، ابدأ بإعداد دالة محسن، وجدول معدل التعلم، وبعض معلمات التدريب:
```py
>>> from transformers import create_optimizer, AdamWeightDecay
>>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)
```
ثم يمكنك تحميل DistilRoBERTa باستخدام [`TFAutoModelForMaskedLM`]:
```py
>>> from transformers import TFAutoModelForMaskedLM
>>> model = TFAutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base")
```
قم بتحويل مجموعات بياناتك إلى تنسيق `tf.data.Dataset` باستخدام [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
```py
>>> tf_train_set = model.prepare_tf_dataset(
... lm_dataset["train"],
... shuffle=True,
... batch_size=16,
... collate_fn=data_collator,
... )
>>> tf_test_set = model.prepare_tf_dataset(
... lm_dataset["test"],
... shuffle=False,
... batch_size=16,
... collate_fn=data_collator,
... )
```
قم بتهيئة النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). لاحظ أن نماذج Transformers لديها جميعها دالة خسارة افتراضية ذات صلة بالمهمة، لذلك لا تحتاج إلى تحديد واحدة ما لم تكن تريد ذلك:
```py
>>> import tensorflow as tf
>>> model.compile(optimizer=optimizer) # لا توجد حجة للخسارة!
```
يمكن القيام بذلك عن طريق تحديد مكان دفع نموذجك ومعالج الرموز في [`~transformers.PushToHubCallback`]:
```py
>>> from transformers.keras_callbacks import PushToHubCallback
>>> callback = PushToHubCallback(
... output_dir="my_awesome_eli5_mlm_model",
... tokenizer=tokenizer,
... )
```
أخيراً، أنت مستعد لبدء تدريب نموذجك! قم باستدعاء [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع مجموعات بيانات التدريب والتحقق، وعدد العصور، والتعليقات الخاصة بك لتعديل النموذج:
```py
>>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=[callback])
```
بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائياً إلى Hub حتى يتمكن الجميع من استخدامه!
</tf>
</frameworkcontent>
<Tip>
لمثال أكثر تفصيلاً حول كيفية تعديل نموذج للنمذجة اللغوية المقنعة، ألق نظرة على الدفتر المقابل
[دفتر PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)
أو [دفتر TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb).
</Tip>
## الاستدلال
رائع، الآن بعد أن قمت بتعديل نموذج، يمكنك استخدامه للاستدلال!
جهّز بعض النصوص التي تريد أن يملأ النموذج الفراغات فيها، واستخدم الرمز الخاص `<mask>` للإشارة إلى الفراغ:
```py
>>> text = "The Milky Way is a <mask> galaxy."
```
أبسط طريقة لتجربة نموذجك المعدل للاستدلال هي استخدامه في [`pipeline`]. قم بإنشاء كائن `pipeline` لملء الفراغ مع نموذجك، ومرر نصك إليه. إذا أردت، يمكنك استخدام معلمة `top_k` لتحديد عدد التنبؤات التي تريد إرجاعها:
```py
>>> from transformers import pipeline
>>> mask_filler = pipeline("fill-mask", "username/my_awesome_eli5_mlm_model")
>>> mask_filler(text, top_k=3)
[{'score': 0.5150994658470154,
'token': 21300,
'token_str': ' spiral',
'sequence': 'The Milky Way is a spiral galaxy.'},
{'score': 0.07087188959121704,
'token': 2232,
'token_str': ' massive',
'sequence': 'The Milky Way is a massive galaxy.'},
{'score': 0.06434620916843414,
'token': 650,
'token_str': ' small',
'sequence': 'The Milky Way is a small galaxy.'}]
```
<frameworkcontent>
<pt>
قم بتجزئة النص وإرجاع `input_ids` كمتجهات PyTorch. ستحتاج أيضًا إلى تحديد موضع رمز `<mask>`:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_mlm_model")
>>> inputs = tokenizer(text, return_tensors="pt")
>>> mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1]
```
قم بتمرير المدخلات إلى النموذج وإرجاع `logits` للرمز المقنع:
```py
>>> from transformers import AutoModelForMaskedLM
>>> model = AutoModelForMaskedLM.from_pretrained("username/my_awesome_eli5_mlm_model")
>>> logits = model(**inputs).logits
>>> mask_token_logits = logits[0, mask_token_index, :]
```
ثم قم بإرجاع الرموز الثلاثة المقنعة ذات الاحتمالية الأعلى وطباعتها:
```py
>>> top_3_tokens = torch.topk(mask_token_logits, 3, dim=1).indices[0].tolist()
>>> for token in top_3_tokens:
... print(text.replace(tokenizer.mask_token, tokenizer.decode([token])))
The Milky Way is a spiral galaxy.
The Milky Way is a massive galaxy.
The Milky Way is a small galaxy.
```
</pt>
<tf>
قم بتقسيم النص إلى رموز وإرجاع `input_ids` كـ TensorFlow tensors. ستحتاج أيضًا إلى تحديد موضع رمز `<mask>`:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_mlm_model")
>>> inputs = tokenizer(text, return_tensors="tf")
>>> mask_token_index = tf.where(inputs["input_ids"] == tokenizer.mask_token_id)[0, 1]
```
قم بتمرير المدخلات إلى النموذج وإرجاع `logits` للرمز المقنع:
```py
>>> from transformers import TFAutoModelForMaskedLM
>>> model = TFAutoModelForMaskedLM.from_pretrained("username/my_awesome_eli5_mlm_model")
>>> logits = model(**inputs).logits
>>> mask_token_logits = logits[0, mask_token_index, :]
```
ثم قم بإرجاع الرموز الثلاثة المقنعة ذات الاحتمالية الأعلى وطباعتها:
```py
>>> top_3_tokens = tf.math.top_k(mask_token_logits, 3).indices.numpy()
>>> for token in top_3_tokens:
... print(text.replace(tokenizer.mask_token, tokenizer.decode([token])))
The Milky Way is a spiral galaxy.
The Milky Way is a massive galaxy.
The Milky Way is a small galaxy.
```
</tf>
</frameworkcontent> | transformers/docs/source/ar/tasks/masked_language_modeling.md/0 | {
"file_path": "transformers/docs/source/ar/tasks/masked_language_modeling.md",
"repo_id": "transformers",
"token_count": 9368
} | 394 |
- sections:
- local: index
title: 🤗 Transformers
- local: quicktour
title: Schnellstart
- local: installation
title: Installation
title: Erste Schritte
- sections:
- local: pipeline_tutorial
title: Pipelines für Inferenzen
- local: autoclass_tutorial
title: Laden von vortrainierten Instanzen mit einer AutoClass
- local: preprocessing
title: Vorverarbeiten
- local: training
title: Optimierung eines vortrainierten Modells
- local: run_scripts
title: Trainieren mit einem Skript
- local: accelerate
title: Verteiltes Training mit 🤗 Accelerate
- local: peft
title: Laden und Trainieren von Adaptern mit 🤗 PEFT
- local: model_sharing
title: Ein Modell teilen
- local: llm_tutorial
title: Generation with LLMs
title: Tutorials
- sections:
- local: contributing
title: Wie kann man zu 🤗 Transformers beitragen?
- local: add_new_model
title: Wie fügt man ein Modell zu 🤗 Transformers hinzu?
- local: add_new_pipeline
title: Wie fügt man eine Pipeline zu 🤗 Transformers hinzu?
- local: testing
title: Testen
- local: pr_checks
title: Überprüfung einer Pull Request
title: Contribute
| transformers/docs/source/de/_toctree.yml/0 | {
"file_path": "transformers/docs/source/de/_toctree.yml",
"repo_id": "transformers",
"token_count": 430
} | 395 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Testen
Werfen wir einen Blick darauf, wie 🤗 Transformers-Modelle getestet werden und wie Sie neue Tests schreiben und die vorhandenen verbessern können.
Es gibt 2 Testsuiten im Repository:
1. `tests` -- Tests für die allgemeine API
2. `examples` -- Tests hauptsächlich für verschiedene Anwendungen, die nicht Teil der API sind
## Wie Transformatoren getestet werden
1. Sobald ein PR eingereicht wurde, wird er mit 9 CircleCi Jobs getestet. Jeder neue Commit zu diesem PR wird erneut getestet. Diese Aufträge
sind in dieser [Konfigurationsdatei](https://github.com/huggingface/transformers/tree/main/.circleci/config.yml) definiert, so dass Sie bei Bedarf die gleiche Umgebung auf Ihrem Rechner reproduzieren können.
Umgebung auf Ihrem Rechner reproduzieren können.
Diese CI-Jobs führen keine `@slow`-Tests durch.
2. Es gibt 3 Jobs, die von [github actions](https://github.com/huggingface/transformers/actions) ausgeführt werden:
- [torch hub integration](https://github.com/huggingface/transformers/tree/main/.github/workflows/github-torch-hub.yml): prüft, ob die torch hub
Integration funktioniert.
- [self-hosted (push)](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-push.yml): führt schnelle Tests auf der GPU nur bei Commits auf
`main`. Es wird nur ausgeführt, wenn ein Commit auf `main` den Code in einem der folgenden Ordner aktualisiert hat: `src`,
`tests`, `.github` (um zu verhindern, dass er auf hinzugefügten Modellkarten, Notebooks usw. läuft)
- [self-hosted runner](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-scheduled.yml): führt normale und langsame Tests auf GPU in
`tests` und `examples`:
```bash
RUN_SLOW=1 pytest tests/
RUN_SLOW=1 pytest examples/
```
Die Ergebnisse können Sie [hier](https://github.com/huggingface/transformers/actions) sehen.
## Tests ausführen
### Auswahl der auszuführenden Tests
In diesem Dokument wird ausführlich erläutert, wie Tests ausgeführt werden können. Wenn Sie nach der Lektüre noch mehr Details benötigen
finden Sie diese [hier](https://docs.pytest.org/en/latest/usage.html).
Hier sind einige der nützlichsten Möglichkeiten, Tests auszuführen.
Alle ausführen:
```console
pytest
```
oder:
```bash
make test
```
Beachten Sie, dass Letzteres wie folgt definiert ist:
```bash
python -m pytest -n auto --dist=loadfile -s -v ./tests/
```
was pytest anweist:
- so viele Testprozesse laufen zu lassen, wie es CPU-Kerne gibt (was zu viele sein könnten, wenn Sie nicht über eine Menge RAM verfügen!)
- sicherzustellen, dass alle Tests aus derselben Datei von demselben Testprozess ausgeführt werden
- Erfassen Sie keine Ausgaben
- im ausführlichen Modus laufen lassen
### Abrufen der Liste aller Tests
Alle Tests der Testsuite:
```bash
pytest --collect-only -q
```
Alle Tests einer bestimmten Testdatei:
```bash
pytest tests/test_optimization.py --collect-only -q
```
### Führen Sie ein bestimmtes Testmodul aus
Um ein einzelnes Testmodul auszuführen:
```bash
pytest tests/utils/test_logging.py
```
### Spezifische Tests ausführen
Da unittest in den meisten Tests verwendet wird, müssen Sie, um bestimmte Untertests auszuführen, den Namen der unittest
Klasse, die diese Tests enthält. Er könnte zum Beispiel lauten:
```bash
pytest tests/test_optimization.py::OptimizationTest::test_adam_w
```
Hier:
- `tests/test_optimization.py` - die Datei mit den Tests
- `OptimizationTest` - der Name der Klasse
- `test_adam_w` - der Name der spezifischen Testfunktion
Wenn die Datei mehrere Klassen enthält, können Sie auswählen, dass nur die Tests einer bestimmten Klasse ausgeführt werden sollen. Zum Beispiel:
```bash
pytest tests/test_optimization.py::OptimizationTest
```
führt alle Tests innerhalb dieser Klasse aus.
Wie bereits erwähnt, können Sie sehen, welche Tests in der Klasse `OptimizationTest` enthalten sind, indem Sie sie ausführen:
```bash
pytest tests/test_optimization.py::OptimizationTest --collect-only -q
```
Sie können Tests mit Hilfe von Schlüsselwortausdrücken ausführen.
Um nur Tests auszuführen, deren Name `adam` enthält:
```bash
pytest -k adam tests/test_optimization.py
```
Die logischen `und` und `oder` können verwendet werden, um anzugeben, ob alle Schlüsselwörter übereinstimmen sollen oder nur eines. `nicht` kann verwendet werden, um
negieren.
Um alle Tests auszuführen, außer denen, deren Name `adam` enthält:
```bash
pytest -k "not adam" tests/test_optimization.py
```
Und Sie können die beiden Muster in einem kombinieren:
```bash
pytest -k "ada and not adam" tests/test_optimization.py
```
Um zum Beispiel sowohl `test_adafactor` als auch `test_adam_w` auszuführen, können Sie verwenden:
```bash
pytest -k "test_adam_w or test_adam_w" tests/test_optimization.py
```
Beachten Sie, dass wir hier `oder` verwenden, da wir wollen, dass eines der Schlüsselwörter übereinstimmt, um beide einzuschließen.
Wenn Sie nur Tests einschließen möchten, die beide Muster enthalten, müssen Sie `und` verwenden:
```bash
pytest -k "test and ada" tests/test_optimization.py
```
### Führen Sie `accelerate` Tests durch
Manchmal müssen Sie `accelerate` Tests für Ihre Modelle ausführen. Dazu fügen Sie einfach `-m accelerate_tests` zu Ihrem Befehl hinzu, wenn Sie diese Tests bei einem `OPT`-Lauf ausführen möchten:
```bash
RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
```
### Dokumentationstests ausführen
Um zu testen, ob die Dokumentationsbeispiele korrekt sind, sollten Sie überprüfen, ob die `doctests` erfolgreich sind.
Lassen Sie uns als Beispiel den docstring von [WhisperModel.forward](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035) verwenden:
```python
r"""
Returns:
Example:
```python
>>> import torch
>>> from transformers import WhisperModel, WhisperFeatureExtractor
>>> from datasets import load_dataset
>>> model = WhisperModel.from_pretrained("openai/whisper-base")
>>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_features = inputs.input_features
>>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
>>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
>>> list(last_hidden_state.shape)
[1, 2, 512]
```"""
```
Führen Sie einfach die folgende Zeile aus, um automatisch jedes docstring-Beispiel in der gewünschten Datei zu testen:
```bash
pytest --doctest-modules <path_to_file_or_dir>
```
Wenn die Datei eine Markdown-Erweiterung hat, sollten Sie das Argument `--doctest-glob="*.md"` hinzufügen.
### Nur geänderte Tests ausführen
Mit [pytest-picked](https://github.com/anapaulagomes/pytest-picked) können Sie die Tests ausführen, die sich auf die unstaged Dateien oder den aktuellen Zweig (gemäß Git) beziehen. Auf diese Weise können Sie schnell testen, ob Ihre Änderungen nichts kaputt gemacht haben.
nichts kaputt gemacht haben, da die Tests für Dateien, die Sie nicht verändert haben, nicht ausgeführt werden.
```bash
pip install pytest-picked
```
```bash
pytest --picked
```
Alle Tests werden von Dateien und Ordnern ausgeführt, die geändert, aber noch nicht übergeben wurden.
### Fehlgeschlagene Tests bei Änderung der Quelle automatisch wiederholen
[pytest-xdist](https://github.com/pytest-dev/pytest-xdist) bietet eine sehr nützliche Funktion zur Erkennung aller fehlgeschlagenen
Tests zu erkennen und dann darauf zu warten, dass Sie Dateien ändern, um die fehlgeschlagenen Tests so lange zu wiederholen, bis sie erfolgreich sind, während Sie die
sie reparieren. So müssen Sie pytest nicht erneut starten, nachdem Sie die Korrektur vorgenommen haben. Dies wird so lange wiederholt, bis alle Tests bestanden sind.
Danach wird erneut ein vollständiger Durchlauf durchgeführt.
```bash
pip install pytest-xdist
```
So rufen Sie den Modus auf: `pytest -f` oder `pytest --looponfail`
Datei-Änderungen werden erkannt, indem die Wurzelverzeichnisse von `looponfailroots` und alle ihre Inhalte (rekursiv) untersucht werden.
Wenn die Vorgabe für diesen Wert für Sie nicht funktioniert, können Sie ihn in Ihrem Projekt ändern, indem Sie eine Konfigurations
Option in der Datei `setup.cfg` ändern:
```ini
[tool:pytest]
looponfailroots = transformers tests
```
oder die Dateien `pytest.ini`/`tox.ini``:
```ini
[pytest]
looponfailroots = transformers tests
```
Dies würde dazu führen, dass nur nach Dateiänderungen in den jeweiligen Verzeichnissen gesucht wird, die relativ zum Verzeichnis der ini-Datei angegeben sind.
Verzeichnis.
[pytest-watch](https://github.com/joeyespo/pytest-watch) ist eine alternative Implementierung dieser Funktionalität.
### Überspringen eines Testmoduls
Wenn Sie alle Testmodule ausführen möchten, mit Ausnahme einiger weniger, können Sie diese ausschließen, indem Sie eine explizite Liste der auszuführenden Tests angeben. Für
Beispiel: Um alle Tests außer `test_modeling_*.py` auszuführen:
```bash
pytest *ls -1 tests/*py | grep -v test_modeling*
```
### Status leeren
CI-Builds und wenn Isolation wichtig ist (gegen Geschwindigkeit), sollte der Cache geleert werden:
```bash
pytest --cache-clear tests
```
### Tests parallel ausführen
Wie bereits erwähnt, führt `make test` über das Plugin `pytest-xdist` Tests parallel aus (Argument `-n X`, z.B. `-n 2`
um 2 Jobs parallel laufen zu lassen).
Mit der Option `--dist=` von `pytest-xdist` können Sie steuern, wie die Tests gruppiert werden. Mit `--dist=loadfile` werden die
Tests, die sich in einer Datei befinden, in denselben Prozess.
Da die Reihenfolge der ausgeführten Tests unterschiedlich und nicht vorhersehbar ist, kann die Ausführung der Testsuite mit `pytest-xdist`
zu Fehlern führt (was bedeutet, dass wir einige unentdeckte gekoppelte Tests haben), verwenden Sie [pytest-replay](https://github.com/ESSS/pytest-replay), um die Tests in der gleichen Reihenfolge abzuspielen, was dabei helfen sollte
diese fehlgeschlagene Sequenz auf ein Minimum zu reduzieren.
### Testreihenfolge und Wiederholung
Es ist gut, die Tests mehrmals zu wiederholen, nacheinander, zufällig oder in Gruppen, um mögliche
Abhängigkeiten und zustandsbezogene Fehler zu erkennen (Abriss). Und die einfache, mehrfache Wiederholung ist einfach gut, um
einige Probleme zu erkennen, die durch die Zufälligkeit von DL aufgedeckt werden.
#### Wiederholungstests
- [pytest-flakefinder](https://github.com/dropbox/pytest-flakefinder):
```bash
pip install pytest-flakefinder
```
Und führen Sie dann jeden Test mehrmals durch (standardmäßig 50):
```bash
pytest --flake-finder --flake-runs=5 tests/test_failing_test.py
```
<Tip>
Dieses Plugin funktioniert nicht mit dem `-n` Flag von `pytest-xdist`.
</Tip>
<Tip>
Es gibt noch ein anderes Plugin `pytest-repeat`, aber es funktioniert nicht mit `unittest`.
</Tip>
#### Run tests in a random order
```bash
pip install pytest-random-order
```
Wichtig: Das Vorhandensein von `pytest-random-order` sorgt für eine automatische Zufallsanordnung der Tests, es sind keine Konfigurationsänderungen oder
Befehlszeilenoptionen sind nicht erforderlich.
Wie bereits erläutert, ermöglicht dies die Erkennung von gekoppelten Tests - bei denen der Zustand eines Tests den Zustand eines anderen beeinflusst. Wenn
`pytest-random-order` installiert ist, gibt es den Zufallswert aus, der für diese Sitzung verwendet wurde, z.B:
```bash
pytest tests
[...]
Using --random-order-bucket=module
Using --random-order-seed=573663
```
Wenn eine bestimmte Sequenz fehlschlägt, können Sie sie reproduzieren, indem Sie genau diesen Seed hinzufügen, z.B:
```bash
pytest --random-order-seed=573663
[...]
Using --random-order-bucket=module
Using --random-order-seed=573663
```
Es wird nur dann die exakte Reihenfolge reproduzieren, wenn Sie genau dieselbe Liste von Tests (oder gar keine Liste) verwenden. Sobald Sie beginnen, die Liste
die Liste manuell einzugrenzen, können Sie sich nicht mehr auf den Seed verlassen, sondern müssen die Tests manuell in der genauen Reihenfolge auflisten
auflisten und pytest anweisen, sie nicht zu randomisieren, indem Sie `--random-order-bucket=none` verwenden, z.B.:
```bash
pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.py
```
So deaktivieren Sie das Shuffling für alle Tests:
```bash
pytest --random-order-bucket=none
```
Standardmäßig ist `--random-order-bucket=module` impliziert, wodurch die Dateien auf den Modulebenen gemischt werden. Es kann auch
auf den Ebenen `class`, `package`, `global` und `none` mischen. Die vollständigen Details entnehmen Sie bitte der
[Dokumentation](https://github.com/jbasko/pytest-random-order).
Eine weitere Alternative zur Randomisierung ist: [`pytest-random`](https://github.com/pytest-dev/pytest-randomly). Dieses
Modul hat eine sehr ähnliche Funktionalität/Schnittstelle, aber es hat nicht die Eimermodi, die in
`pytest-random-order` zur Verfügung. Es hat das gleiche Problem, dass es sich nach der Installation aufdrängt.
### Variationen von Aussehen und Bedienung
#### pytest-zucker
[pytest-sugar](https://github.com/Frozenball/pytest-sugar) ist ein Plugin, das das Erscheinungsbild verbessert, eine
Fortschrittsbalken hinzufügt und Tests, die fehlschlagen, sowie die Bestätigung sofort anzeigt. Es wird bei der Installation automatisch aktiviert.
```bash
pip install pytest-sugar
```
Um Tests ohne sie durchzuführen, führen Sie aus:
```bash
pytest -p no:sugar
```
oder deinstallieren Sie es.
#### Melden Sie den Namen jedes Subtests und seinen Fortschritt
Für einen einzelnen oder eine Gruppe von Tests über `pytest` (nach `pip install pytest-pspec`):
```bash
pytest --pspec tests/test_optimization.py
```
#### Zeigt fehlgeschlagene Tests sofort an
[pytest-instafail](https://github.com/pytest-dev/pytest-instafail) zeigt Fehlschläge und Fehler sofort an, anstatt
bis zum Ende der Testsitzung zu warten.
```bash
pip install pytest-instafail
```
```bash
pytest --instafail
```
### Zu GPU oder nicht zu GPU
Bei einem GPU-aktivierten Setup fügen Sie zum Testen im reinen CPU-Modus `CUDA_VISIBLE_DEVICES=""` hinzu:
```bash
CUDA_VISIBLE_DEVICES="" pytest tests/utils/test_logging.py
```
oder wenn Sie mehrere Grafikprozessoren haben, können Sie angeben, welcher von `pytest` verwendet werden soll. Wenn Sie zum Beispiel nur den
zweiten Grafikkarte zu verwenden, wenn Sie die Grafikkarten `0` und `1` haben, können Sie folgendes ausführen:
```bash
CUDA_VISIBLE_DEVICES="1" pytest tests/utils/test_logging.py
```
Dies ist praktisch, wenn Sie verschiedene Aufgaben auf verschiedenen GPUs ausführen möchten.
Einige Tests müssen nur auf der CPU ausgeführt werden, andere entweder auf der CPU, der GPU oder der TPU und wieder andere auf mehreren GPUs. Die folgenden skip
Dekorateure werden verwendet, um die Anforderungen von Tests in Bezug auf CPU/GPU/TPU festzulegen:
- `require_torch` - dieser Test wird nur unter Torch ausgeführt
- `require_torch_gpu` - wie `require_torch` plus erfordert mindestens 1 GPU
- `require_torch_multi_gpu` - wie `require_torch` und zusätzlich mindestens 2 GPUs erforderlich
- `require_torch_non_multi_gpu` - wie `require_torch` plus benötigt 0 oder 1 GPUs
- `require_torch_up_to_2_gpus` - wie `require_torch` plus erfordert 0 oder 1 oder 2 GPUs
- `require_torch_xla` - wie `require_torch` plus erfordert mindestens 1 TPU
Lassen Sie uns die GPU-Anforderungen in der folgenden Tabelle darstellen:
| n gpus | decorator |
|--------|--------------------------------|
| `>= 0` | `@require_torch` |
| `>= 1` | `@require_torch_gpu` |
| `>= 2` | `@require_torch_multi_gpu` |
| `< 2` | `@require_torch_non_multi_gpu` |
| `< 3` | `@require_torch_up_to_2_gpus` |
Hier ist zum Beispiel ein Test, der nur ausgeführt werden muss, wenn 2 oder mehr GPUs verfügbar sind und pytorch installiert ist:
```python no-style
@require_torch_multi_gpu
def test_example_with_multi_gpu():
```
Diese Dekors können gestapelt werden. Wenn zum Beispiel ein Test langsam ist und mindestens eine GPU unter pytorch benötigt, können Sie
wie Sie ihn einrichten können:
```python no-style
@require_torch_gpu
@slow
def test_example_slow_on_gpu():
```
Einige Dekoratoren wie `@parametrized` schreiben Testnamen um, daher müssen `@require_*`-Sprungdekoratoren als letztes aufgeführt werden.
zuletzt aufgeführt werden, damit sie korrekt funktionieren. Hier ist ein Beispiel für die korrekte Verwendung:
```python no-style
@parameterized.expand(...)
@require_torch_multi_gpu
def test_integration_foo():
```
Dieses Problem mit der Reihenfolge gibt es bei `@pytest.mark.parametrize` nicht, Sie können es an den Anfang oder an den Schluss setzen und es wird trotzdem funktionieren.
funktionieren. Aber es funktioniert nur bei Nicht-Unittests.
Innerhalb von Tests:
- Wie viele GPUs sind verfügbar:
```python
from transformers.testing_utils import get_gpu_count
n_gpu = get_gpu_count() # works with torch and tf
```
### Testen mit einem bestimmten PyTorch-Backend oder Gerät
Um die Testsuite auf einem bestimmten Torch-Gerät auszuführen, fügen Sie `TRANSFORMERS_TEST_DEVICE="$Gerät"` hinzu, wobei `$Gerät` das Ziel-Backend ist. Zum Beispiel, um nur auf der CPU zu testen:
```bash
TRANSFORMERS_TEST_DEVICE="cpu" pytest tests/utils/test_logging.py
```
Diese Variable ist nützlich, um benutzerdefinierte oder weniger verbreitete PyTorch-Backends wie `mps` zu testen. Sie kann auch verwendet werden, um den gleichen Effekt wie `CUDA_VISIBLE_DEVICES` zu erzielen, indem Sie bestimmte GPUs anvisieren oder im reinen CPU-Modus testen.
Bestimmte Geräte erfordern einen zusätzlichen Import, nachdem Sie `torch` zum ersten Mal importiert haben. Dies kann über die Umgebungsvariable `TRANSFORMERS_TEST_BACKEND` festgelegt werden:
```bash
TRANSFORMERS_TEST_BACKEND="torch_npu" pytest tests/utils/test_logging.py
```
### Verteiltes Training
`pytest` kann nicht direkt mit verteiltem Training umgehen. Wenn dies versucht wird, tun die Unterprozesse nicht das Richtige
und denken am Ende, sie seien `pytest` und beginnen, die Testsuite in Schleifen auszuführen. Es funktioniert jedoch, wenn man
einen normalen Prozess erzeugt, der dann mehrere Worker erzeugt und die IO-Pipes verwaltet.
Hier sind einige Tests, die dies verwenden:
- [test_trainer_distributed.py](https://github.com/huggingface/transformers/tree/main/tests/trainer/test_trainer_distributed.py)
- [test_deepspeed.py](https://github.com/huggingface/transformers/tree/main/tests/deepspeed/test_deepspeed.py)
Um direkt mit der Ausführung zu beginnen, suchen Sie in diesen Tests nach dem Aufruf `execute_subprocess_async`.
Sie benötigen mindestens 2 GPUs, um diese Tests in Aktion zu sehen:
```bash
CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py
```
### Erfassung von Ausgaben
Während der Testausführung werden alle Ausgaben, die an `stdout` und `stderr` gesendet werden, aufgezeichnet. Wenn ein Test oder eine Setup-Methode fehlschlägt, wird die
wird die entsprechende aufgezeichnete Ausgabe in der Regel zusammen mit dem Fehler-Traceback angezeigt.
Um die Aufzeichnung von Ausgaben zu deaktivieren und `stdout` und `stderr` normal zu erhalten, verwenden Sie `-s` oder `--capture=no`:
```bash
pytest -s tests/utils/test_logging.py
```
So senden Sie Testergebnisse an die JUnit-Formatausgabe:
```bash
py.test tests --junitxml=result.xml
```
### Farbsteuerung
Keine Farbe zu haben (z.B. gelb auf weißem Hintergrund ist nicht lesbar):
```bash
pytest --color=no tests/utils/test_logging.py
```
### Testbericht an den Online-Dienst pastebin senden
Erstellen Sie eine URL für jeden Testfehler:
```bash
pytest --pastebin=failed tests/utils/test_logging.py
```
Dadurch werden Informationen über den Testlauf an einen entfernten Paste-Dienst übermittelt und eine URL für jeden Fehlschlag bereitgestellt. Sie können die
Tests wie gewohnt auswählen oder z.B. -x hinzufügen, wenn Sie nur einen bestimmten Fehler senden möchten.
Erstellen einer URL für ein ganzes Testsitzungsprotokoll:
```bash
pytest --pastebin=all tests/utils/test_logging.py
```
## Tests schreiben
🤗 Die Tests von Transformers basieren auf `unittest`, werden aber von `pytest` ausgeführt, so dass die meiste Zeit Funktionen aus beiden Systemen
verwendet werden können.
Sie können [hier](https://docs.pytest.org/en/stable/unittest.html) nachlesen, welche Funktionen unterstützt werden, aber das Wichtigste ist
Wichtig ist, dass die meisten `pytest`-Fixtures nicht funktionieren. Auch die Parametrisierung nicht, aber wir verwenden das Modul
`parametrisiert`, das auf ähnliche Weise funktioniert.
### Parametrisierung
Oft besteht die Notwendigkeit, denselben Test mehrmals auszuführen, aber mit unterschiedlichen Argumenten. Das könnte innerhalb des Tests geschehen
des Tests gemacht werden, aber dann gibt es keine Möglichkeit, den Test mit nur einem Satz von Argumenten auszuführen.
```python
# test_this1.py
import unittest
from parameterized import parameterized
class TestMathUnitTest(unittest.TestCase):
@parameterized.expand(
[
("negative", -1.5, -2.0),
("integer", 1, 1.0),
("large fraction", 1.6, 1),
]
)
def test_floor(self, name, input, expected):
assert_equal(math.floor(input), expected)
```
Nun wird dieser Test standardmäßig 3 Mal ausgeführt, wobei jedes Mal die letzten 3 Argumente von `test_floor` den entsprechenden Argumenten in der Parameterliste zugeordnet werden.
die entsprechenden Argumente in der Parameterliste.
Sie können auch nur die Parameter `negativ` und `ganzzahlig` mit ausführen:
```bash
pytest -k "negative and integer" tests/test_mytest.py
```
oder alle Untertests außer `negativ`, mit:
```bash
pytest -k "not negative" tests/test_mytest.py
```
Neben der Verwendung des gerade erwähnten Filters `-k` können Sie auch den genauen Namen jedes Untertests herausfinden und jeden
oder alle unter Verwendung ihrer genauen Namen ausführen.
```bash
pytest test_this1.py --collect-only -q
```
und es wird aufgelistet:
```bash
test_this1.py::TestMathUnitTest::test_floor_0_negative
test_this1.py::TestMathUnitTest::test_floor_1_integer
test_this1.py::TestMathUnitTest::test_floor_2_large_fraction
```
Jetzt können Sie also nur 2 spezifische Untertests durchführen:
```bash
pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer
```
Das Modul [parametrisiert](https://pypi.org/project/parameterized/), das sich bereits in den Entwickler-Abhängigkeiten befindet
von `transformers` befindet, funktioniert sowohl für `unittests` als auch für `pytest` Tests.
Wenn es sich bei dem Test jedoch nicht um einen `Unittest` handelt, können Sie `pytest.mark.parametrize` verwenden (oder Sie können sehen, dass es in
einigen bestehenden Tests verwendet wird, meist unter `Beispiele`).
Hier ist das gleiche Beispiel, diesmal unter Verwendung der `parametrize`-Markierung von `pytest`:
```python
# test_this2.py
import pytest
@pytest.mark.parametrize(
"name, input, expected",
[
("negative", -1.5, -2.0),
("integer", 1, 1.0),
("large fraction", 1.6, 1),
],
)
def test_floor(name, input, expected):
assert_equal(math.floor(input), expected)
```
Genau wie bei `parametrisiert` können Sie mit `pytest.mark.parametrize` genau steuern, welche Subtests ausgeführt werden
ausgeführt werden, wenn der Filter `-k` nicht ausreicht. Allerdings erzeugt diese Parametrisierungsfunktion einen etwas anderen Satz von
Namen für die Untertests. Sie sehen folgendermaßen aus:
```bash
pytest test_this2.py --collect-only -q
```
und es wird aufgelistet:
```bash
test_this2.py::test_floor[integer-1-1.0]
test_this2.py::test_floor[negative--1.5--2.0]
test_this2.py::test_floor[large fraction-1.6-1]
```
Jetzt können Sie also nur den spezifischen Test durchführen:
```bash
pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[integer-1-1.0]
```
wie im vorherigen Beispiel.
### Dateien und Verzeichnisse
In Tests müssen wir oft wissen, wo sich Dinge relativ zur aktuellen Testdatei befinden, und das ist nicht trivial, da der Test
von mehreren Verzeichnissen aus aufgerufen werden kann oder sich in Unterverzeichnissen mit unterschiedlicher Tiefe befinden kann. Eine Hilfsklasse
`transformers.test_utils.TestCasePlus` löst dieses Problem, indem sie alle grundlegenden Pfade sortiert und einfache
Zugriffsmöglichkeiten auf sie bietet:
- `pathlib`-Objekte (alle vollständig aufgelöst):
- `test_file_path` - der aktuelle Testdateipfad, d.h. `__file__`
- `test_file_dir` - das Verzeichnis, das die aktuelle Testdatei enthält
- `tests_dir` - das Verzeichnis der `tests` Testreihe
- `examples_dir` - das Verzeichnis der `examples` Test-Suite
- `repo_root_dir` - das Verzeichnis des Repositorys
- `src_dir` - das Verzeichnis von `src` (d.h. wo sich das Unterverzeichnis `transformers` befindet)
- stringifizierte Pfade - wie oben, aber diese geben Pfade als Strings zurück, anstatt als `pathlib`-Objekte:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Um diese zu verwenden, müssen Sie lediglich sicherstellen, dass der Test in einer Unterklasse von
`transformers.test_utils.TestCasePlus` befindet. Zum Beispiel:
```python
from transformers.testing_utils import TestCasePlus
class PathExampleTest(TestCasePlus):
def test_something_involving_local_locations(self):
data_dir = self.tests_dir / "fixtures/tests_samples/wmt_en_ro"
```
Wenn Sie Pfade nicht über `pathlib` manipulieren müssen oder nur einen Pfad als String benötigen, können Sie jederzeit
`str()` auf das `pathlib`-Objekt anwenden oder die Accessoren mit der Endung `_str` verwenden. Zum Beispiel:
```python
from transformers.testing_utils import TestCasePlus
class PathExampleTest(TestCasePlus):
def test_something_involving_stringified_locations(self):
examples_dir = self.examples_dir_str
```
### Temporäre Dateien und Verzeichnisse
Die Verwendung eindeutiger temporärer Dateien und Verzeichnisse ist für die parallele Durchführung von Tests unerlässlich, damit sich die Tests nicht gegenseitig überschreiben.
Daten gegenseitig überschreiben. Außerdem möchten wir, dass die temporären Dateien und Verzeichnisse am Ende jedes Tests, der sie erstellt hat, gelöscht werden.
erstellt hat. Daher ist die Verwendung von Paketen wie `tempfile`, die diese Anforderungen erfüllen, unerlässlich.
Beim Debuggen von Tests müssen Sie jedoch sehen können, was in der temporären Datei oder dem temporären Verzeichnis gespeichert wird und Sie möchten
Sie müssen den genauen Pfad kennen und dürfen ihn nicht bei jedem neuen Testdurchlauf zufällig ändern.
Für solche Zwecke ist die Hilfsklasse `transformers.test_utils.TestCasePlus` am besten geeignet. Sie ist eine Unterklasse von
Unittest.TestCase`, so dass wir in den Testmodulen einfach von ihr erben können.
Hier ist ein Beispiel für die Verwendung dieser Klasse:
```python
from transformers.testing_utils import TestCasePlus
class ExamplesTests(TestCasePlus):
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
Dieser Code erstellt ein eindeutiges temporäres Verzeichnis und setzt `tmp_dir` auf dessen Speicherort.
- Erstellen Sie ein eindeutiges temporäres Verzeichnis:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
tmp_dir" enthält den Pfad zu dem erstellten temporären Verzeichnis. Es wird am Ende des Tests automatisch entfernt.
Tests entfernt.
- Erstellen Sie ein temporäres Verzeichnis meiner Wahl, stellen Sie sicher, dass es leer ist, bevor der Test beginnt, und leeren Sie es nach dem Test nicht.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
Dies ist nützlich für die Fehlersuche, wenn Sie ein bestimmtes Verzeichnis überwachen und sicherstellen möchten, dass die vorherigen Tests keine Daten darin hinterlassen haben.
keine Daten dort hinterlassen haben.
- Sie können das Standardverhalten außer Kraft setzen, indem Sie die Argumente `before` und `after` direkt überschreiben, was zu einem der folgenden Verhaltensweisen führt
folgenden Verhaltensweisen:
- `before=True`: das temporäre Verzeichnis wird immer zu Beginn des Tests gelöscht.
- `before=False`: wenn das temporäre Verzeichnis bereits existiert, bleiben alle vorhandenen Dateien dort erhalten.
- `after=True`: das temporäre Verzeichnis wird immer am Ende des Tests gelöscht.
- `after=False`: das temporäre Verzeichnis wird am Ende des Tests immer beibehalten.
<Tip>
Um das Äquivalent von `rm -r` sicher ausführen zu können, sind nur Unterverzeichnisse des Projektarchivs checkout erlaubt, wenn
ein explizites `tmp_dir` verwendet wird, so dass nicht versehentlich ein `/tmp` oder ein ähnlich wichtiger Teil des Dateisystems vernichtet wird.
d.h. geben Sie bitte immer Pfade an, die mit `./` beginnen.
</Tip>
<Tip>
Jeder Test kann mehrere temporäre Verzeichnisse registrieren, die alle automatisch entfernt werden, sofern nicht anders gewünscht.
anders.
</Tip>
### Temporäre Überschreibung von sys.path
Wenn Sie `sys.path` vorübergehend überschreiben müssen, um z.B. von einem anderen Test zu importieren, können Sie den
Kontextmanager `ExtendSysPath` verwenden. Beispiel:
```python
import os
from transformers.testing_utils import ExtendSysPath
bindir = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/.."):
from test_trainer import TrainerIntegrationCommon # noqa
```
### Überspringen von Tests
Dies ist nützlich, wenn ein Fehler gefunden und ein neuer Test geschrieben wird, der Fehler aber noch nicht behoben ist. Damit wir ihn
in das Haupt-Repository zu übertragen, müssen wir sicherstellen, dass er bei `make test` übersprungen wird.
Methoden:
- Ein **Skip** bedeutet, dass Sie erwarten, dass Ihr Test nur dann erfolgreich ist, wenn einige Bedingungen erfüllt sind, andernfalls sollte pytest den Test überspringen.
die Ausführung des Tests ganz überspringen. Übliche Beispiele sind das Überspringen von Tests, die nur unter Windows laufen, auf Nicht-Windows-Plattformen oder das Überspringen von
Tests, die von einer externen Ressource abhängen, die im Moment nicht verfügbar ist (z.B. eine Datenbank).
- Ein **xfail** bedeutet, dass Sie erwarten, dass ein Test aus irgendeinem Grund fehlschlägt. Ein gängiges Beispiel ist ein Test für eine Funktion, die noch nicht
noch nicht implementiert oder ein noch nicht behobener Fehler. Wenn ein Test trotz eines erwarteten Fehlschlags bestanden wird (markiert mit
pytest.mark.xfail), ist dies ein xpass und wird in der Testzusammenfassung gemeldet.
Einer der wichtigsten Unterschiede zwischen den beiden ist, dass `skip` den Test nicht ausführt, während `xfail` dies tut. Wenn also der
Code, der fehlerhaft ist, einen schlechten Zustand verursacht, der sich auf andere Tests auswirkt, sollten Sie also nicht `xfail` verwenden.
#### Implementierung
- Hier sehen Sie, wie Sie einen ganzen Test bedingungslos überspringen können:
```python no-style
@unittest.skip(reason="this bug needs to be fixed")
def test_feature_x():
```
oder mit pytest:
```python no-style
@pytest.mark.skip(reason="this bug needs to be fixed")
```
oder mit dem `xfail` Weg:
```python no-style
@pytest.mark.xfail
def test_feature_x():
```
- Hier erfahren Sie, wie Sie einen Test aufgrund einer internen Prüfung innerhalb des Tests auslassen können:
```python
def test_feature_x():
if not has_something():
pytest.skip("unsupported configuration")
```
oder das ganze Modul:
```python
import pytest
if not pytest.config.getoption("--custom-flag"):
pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True)
```
oder mit dem `xfail` Weg:
```python
def test_feature_x():
pytest.xfail("expected to fail until bug XYZ is fixed")
```
- Hier erfahren Sie, wie Sie alle Tests in einem Modul überspringen können, wenn ein Import fehlt:
```python
docutils = pytest.importorskip("docutils", minversion="0.3")
```
- Einen Test aufgrund einer Bedingung überspringen:
```python no-style
@pytest.mark.skipif(sys.version_info < (3,6), reason="requires python3.6 or higher")
def test_feature_x():
```
oder:
```python no-style
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_feature_x():
```
oder überspringen Sie das ganze Modul:
```python no-style
@pytest.mark.skipif(sys.platform == 'win32', reason="does not run on windows")
class TestClass():
def test_feature_x(self):
```
Weitere Details, Beispiele und Möglichkeiten finden Sie [hier](https://docs.pytest.org/en/latest/skipping.html).
### Langsame Tests
Die Bibliothek der Tests wächst ständig, und einige der Tests brauchen Minuten, um ausgeführt zu werden, daher können wir es uns nicht leisten, eine Stunde zu warten, bis die
eine Stunde auf die Fertigstellung der Testsuite auf CI zu warten. Daher sollten langsame Tests, mit einigen Ausnahmen für wichtige Tests, wie im folgenden Beispiel
wie im folgenden Beispiel markiert werden:
```python no-style
from transformers.testing_utils import slow
@slow
def test_integration_foo():
```
Sobald ein Test als `@slow` markiert ist, setzen Sie die Umgebungsvariable `RUN_SLOW=1`, um solche Tests auszuführen, z.B:
```bash
RUN_SLOW=1 pytest tests
```
Einige Dekoratoren wie `@parameterized` schreiben Testnamen um, daher müssen `@slow` und die übrigen Skip-Dekoratoren
`@require_*` müssen als letztes aufgeführt werden, damit sie korrekt funktionieren. Hier ist ein Beispiel für die korrekte Verwendung:
```python no-style
@parameterized.expand(...)
@slow
def test_integration_foo():
```
Wie zu Beginn dieses Dokuments erläutert, werden langsame Tests nach einem Zeitplan ausgeführt und nicht in PRs CI
Prüfungen. Es ist also möglich, dass einige Probleme bei der Einreichung eines PRs übersehen werden und zusammengeführt werden. Solche Probleme werden
werden beim nächsten geplanten CI-Job abgefangen. Das bedeutet aber auch, dass es wichtig ist, die langsamen Tests auf Ihrem
Rechner auszuführen, bevor Sie den PR einreichen.
Hier ist ein grober Entscheidungsmechanismus für die Auswahl der Tests, die als langsam markiert werden sollen:
Wenn der Test auf eine der internen Komponenten der Bibliothek ausgerichtet ist (z.B. Modellierungsdateien, Tokenisierungsdateien,
Pipelines), dann sollten wir diesen Test in der nicht langsamen Testsuite ausführen. Wenn er sich auf einen anderen Aspekt der Bibliothek bezieht,
wie z.B. die Dokumentation oder die Beispiele, dann sollten wir diese Tests in der langsamen Testsuite durchführen. Und dann, zur Verfeinerung
Ansatz zu verfeinern, sollten wir Ausnahmen einführen:
- Alle Tests, die einen umfangreichen Satz von Gewichten oder einen Datensatz mit einer Größe von mehr als ~50MB herunterladen müssen (z.B. Modell- oder
Tokenizer-Integrationstests, Pipeline-Integrationstests) sollten auf langsam gesetzt werden. Wenn Sie ein neues Modell hinzufügen, sollten Sie
sollten Sie eine kleine Version des Modells (mit zufälligen Gewichtungen) für Integrationstests erstellen und in den Hub hochladen. Dies wird
wird in den folgenden Abschnitten erläutert.
- Alle Tests, die ein Training durchführen müssen, das nicht speziell auf Schnelligkeit optimiert ist, sollten auf langsam gesetzt werden.
- Wir können Ausnahmen einführen, wenn einige dieser Tests, die nicht langsam sein sollten, unerträglich langsam sind, und sie auf
`@slow`. Auto-Modellierungstests, die große Dateien auf der Festplatte speichern und laden, sind ein gutes Beispiel für Tests, die als
als `@slow` markiert sind.
- Wenn ein Test in weniger als 1 Sekunde auf CI abgeschlossen wird (einschließlich eventueller Downloads), sollte es sich trotzdem um einen normalen Test handeln.
Insgesamt müssen alle nicht langsamen Tests die verschiedenen Interna abdecken und dabei schnell bleiben. Zum Beispiel,
kann eine signifikante Abdeckung erreicht werden, indem Sie mit speziell erstellten kleinen Modellen mit zufälligen Gewichten testen. Solche Modelle
haben eine sehr geringe Anzahl von Schichten (z.B. 2), Vokabeln (z.B. 1000), usw. Dann können die `@slow`-Tests große
langsame Modelle verwenden, um qualitative Tests durchzuführen. Um die Verwendung dieser Modelle zu sehen, suchen Sie einfach nach *winzigen* Modellen mit:
```bash
grep tiny tests examples
```
Hier ist ein Beispiel für ein [Skript](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py), das das winzige Modell erstellt hat
[stas/tiny-wmt19-en-de](https://huggingface.co/stas/tiny-wmt19-en-de). Sie können es ganz einfach an Ihre eigene
Architektur Ihres Modells anpassen.
Es ist leicht, die Laufzeit falsch zu messen, wenn zum Beispiel ein großes Modell heruntergeladen wird, aber wenn
Sie es lokal testen, würden die heruntergeladenen Dateien zwischengespeichert und somit die Download-Zeit nicht gemessen werden. Prüfen Sie daher den
Ausführungsgeschwindigkeitsbericht in den CI-Protokollen (die Ausgabe von `pytest --durations=0 tests`).
Dieser Bericht ist auch nützlich, um langsame Ausreißer zu finden, die nicht als solche gekennzeichnet sind oder die neu geschrieben werden müssen, um schnell zu sein.
Wenn Sie bemerken, dass die Testsuite beim CI langsam wird, zeigt die oberste Liste dieses Berichts die langsamsten
Tests.
### Testen der stdout/stderr-Ausgabe
Um Funktionen zu testen, die in `stdout` und/oder `stderr` schreiben, kann der Test auf diese Ströme zugreifen, indem er die
[capsys system](https://docs.pytest.org/en/latest/capture.html) von `pytest` zugreifen. So wird dies bewerkstelligt:
```python
import sys
def print_to_stdout(s):
print(s)
def print_to_stderr(s):
sys.stderr.write(s)
def test_result_and_stdout(capsys):
msg = "Hello"
print_to_stdout(msg)
print_to_stderr(msg)
out, err = capsys.readouterr() # consume the captured output streams
# optional: if you want to replay the consumed streams:
sys.stdout.write(out)
sys.stderr.write(err)
# test:
assert msg in out
assert msg in err
```
Und natürlich wird `stderr` in den meisten Fällen als Teil einer Ausnahme auftreten, so dass try/except in einem solchen Fall verwendet werden muss
Fall verwendet werden:
```python
def raise_exception(msg):
raise ValueError(msg)
def test_something_exception():
msg = "Not a good value"
error = ""
try:
raise_exception(msg)
except Exception as e:
error = str(e)
assert msg in error, f"{msg} is in the exception:\n{error}"
```
Ein anderer Ansatz zur Erfassung von stdout ist `contextlib.redirect_stdout`:
```python
from io import StringIO
from contextlib import redirect_stdout
def print_to_stdout(s):
print(s)
def test_result_and_stdout():
msg = "Hello"
buffer = StringIO()
with redirect_stdout(buffer):
print_to_stdout(msg)
out = buffer.getvalue()
# optional: if you want to replay the consumed streams:
sys.stdout.write(out)
# test:
assert msg in out
```
Ein wichtiges potenzielles Problem beim Erfassen von stdout ist, dass es `r` Zeichen enthalten kann, die bei normalem `print`
alles zurücksetzen, was bisher gedruckt wurde. Mit `pytest` gibt es kein Problem, aber mit `pytest -s` werden diese
werden diese Zeichen in den Puffer aufgenommen. Um den Test mit und ohne `-s` laufen zu lassen, müssen Sie also eine zusätzliche Bereinigung
zusätzliche Bereinigung der erfassten Ausgabe vornehmen, indem Sie `re.sub(r'~.*\r', '', buf, 0, re.M)` verwenden.
Aber dann haben wir einen Hilfskontextmanager-Wrapper, der sich automatisch um alles kümmert, unabhängig davon, ob er
einige "*.*.*.*" enthält oder nicht:
```python
from transformers.testing_utils import CaptureStdout
with CaptureStdout() as cs:
function_that_writes_to_stdout()
print(cs.out)
```
Hier ist ein vollständiges Testbeispiel:
```python
from transformers.testing_utils import CaptureStdout
msg = "Secret message\r"
final = "Hello World"
with CaptureStdout() as cs:
print(msg + final)
assert cs.out == final + "\n", f"captured: {cs.out}, expecting {final}"
```
Wenn Sie `stderr` aufzeichnen möchten, verwenden Sie stattdessen die Klasse `CaptureStderr`:
```python
from transformers.testing_utils import CaptureStderr
with CaptureStderr() as cs:
function_that_writes_to_stderr()
print(cs.err)
```
Wenn Sie beide Streams auf einmal erfassen müssen, verwenden Sie die übergeordnete Klasse `CaptureStd`:
```python
from transformers.testing_utils import CaptureStd
with CaptureStd() as cs:
function_that_writes_to_stdout_and_stderr()
print(cs.err, cs.out)
```
Um das Debuggen von Testproblemen zu erleichtern, geben diese Kontextmanager standardmäßig die aufgezeichneten Streams beim Verlassen
aus dem Kontext wieder.
### Erfassen von Logger-Streams
Wenn Sie die Ausgabe eines Loggers validieren müssen, können Sie `CaptureLogger` verwenden:
```python
from transformers import logging
from transformers.testing_utils import CaptureLogger
msg = "Testing 1, 2, 3"
logging.set_verbosity_info()
logger = logging.get_logger("transformers.models.bart.tokenization_bart")
with CaptureLogger(logger) as cl:
logger.info(msg)
assert cl.out, msg + "\n"
```
### Testen mit Umgebungsvariablen
Wenn Sie die Auswirkungen von Umgebungsvariablen für einen bestimmten Test testen möchten, können Sie einen Hilfsdekorator verwenden
`transformers.testing_utils.mockenv`
```python
from transformers.testing_utils import mockenv
class HfArgumentParserTest(unittest.TestCase):
@mockenv(TRANSFORMERS_VERBOSITY="error")
def test_env_override(self):
env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None)
```
Manchmal muss ein externes Programm aufgerufen werden, was die Einstellung von `PYTHONPATH` in `os.environ` erfordert, um mehrere lokale Pfade einzuschließen.
mehrere lokale Pfade. Eine Hilfsklasse `transformers.test_utils.TestCasePlus` hilft Ihnen dabei:
```python
from transformers.testing_utils import TestCasePlus
class EnvExampleTest(TestCasePlus):
def test_external_prog(self):
env = self.get_env()
# now call the external program, passing `env` to it
```
Je nachdem, ob die Testdatei in der Testsuite `tests` oder in `examples` war, wird sie korrekt eingerichtet
`env[PYTHONPATH]` eines dieser beiden Verzeichnisse und auch das `src` Verzeichnis, um sicherzustellen, dass der Test gegen das aktuelle
um sicherzustellen, dass der Test mit dem aktuellen Projektarchiv durchgeführt wird, und schließlich mit dem, was in `env[PYTHONPATH]` bereits eingestellt war, bevor der Test aufgerufen wurde.
wenn überhaupt.
Diese Hilfsmethode erstellt eine Kopie des Objekts `os.environ`, so dass das Original intakt bleibt.
### Reproduzierbare Ergebnisse erhalten
In manchen Situationen möchten Sie vielleicht die Zufälligkeit Ihrer Tests beseitigen. Um identische, reproduzierbare Ergebnisse zu erhalten, müssen Sie
müssen Sie den Seed festlegen:
```python
seed = 42
# python RNG
import random
random.seed(seed)
# pytorch RNGs
import torch
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
# numpy RNG
import numpy as np
np.random.seed(seed)
```
### Tests debuggen
Um einen Debugger an der Stelle zu starten, an der die Warnung auftritt, gehen Sie wie folgt vor:
```bash
pytest tests/utils/test_logging.py -W error::UserWarning --pdb
```
## Arbeiten mit Github-Aktionen-Workflows
Um einen CI-Job für einen Self-Push-Workflow auszulösen, müssen Sie:
1. Erstellen Sie einen neuen Zweig auf `transformers` Ursprung (keine Gabelung!).
2. Der Name der Verzweigung muss entweder mit `ci_` oder `ci-` beginnen (`main` löst ihn auch aus, aber wir können keine PRs auf
`main`). Es wird auch nur für bestimmte Pfade ausgelöst - Sie können die aktuelle Definition finden, falls sie
falls sie sich seit der Erstellung dieses Dokuments geändert hat [hier](https://github.com/huggingface/transformers/blob/main/.github/workflows/self-push.yml) unter *push:*
3. Erstellen Sie einen PR von diesem Zweig.
4. Dann können Sie sehen, wie der Job erscheint [hier](https://github.com/huggingface/transformers/actions/workflows/self-push.yml). Er wird möglicherweise nicht sofort ausgeführt, wenn es
ein Backlog vorhanden ist.
## Testen experimenteller CI-Funktionen
Das Testen von CI-Funktionen kann potenziell problematisch sein, da es die normale CI-Funktion beeinträchtigen kann. Wenn also eine
neue CI-Funktion hinzugefügt werden soll, sollte dies wie folgt geschehen.
1. Erstellen Sie einen neuen Auftrag, der die zu testende Funktion testet.
2. Der neue Job muss immer erfolgreich sein, so dass er uns ein grünes ✓ gibt (Details unten).
3. Lassen Sie ihn einige Tage lang laufen, um zu sehen, dass eine Vielzahl verschiedener PR-Typen darauf laufen (Benutzer-Gabelzweige,
nicht geforkte Zweige, Zweige, die von github.com UI direct file edit stammen, verschiedene erzwungene Pushes, etc. - es gibt
es gibt so viele), während Sie die Protokolle des experimentellen Jobs überwachen (nicht den gesamten Job grün, da er absichtlich immer
grün)
4. Wenn klar ist, dass alles in Ordnung ist, fügen Sie die neuen Änderungen in die bestehenden Jobs ein.
Auf diese Weise wird der normale Arbeitsablauf nicht durch Experimente mit der CI-Funktionalität selbst beeinträchtigt.
Wie können wir nun dafür sorgen, dass der Auftrag immer erfolgreich ist, während die neue CI-Funktion entwickelt wird?
Einige CIs, wie TravisCI, unterstützen ignore-step-failure und melden den gesamten Job als erfolgreich, aber CircleCI und
Github Actions unterstützen dies zum jetzigen Zeitpunkt nicht.
Sie können also die folgende Abhilfe verwenden:
1. Setzen Sie `set +euo pipefail` am Anfang des Ausführungsbefehls, um die meisten potenziellen Fehler im Bash-Skript zu unterdrücken.
2. Der letzte Befehl muss ein Erfolg sein: `echo "done"` oder einfach `true` reicht aus.
Hier ist ein Beispiel:
```yaml
- run:
name: run CI experiment
command: |
set +euo pipefail
echo "setting run-all-despite-any-errors-mode"
this_command_will_fail
echo "but bash continues to run"
# emulate another failure
false
# but the last command must be a success
echo "during experiment do not remove: reporting success to CI, even if there were failures"
```
Für einfache Befehle können Sie auch Folgendes tun:
```bash
cmd_that_may_fail || true
```
Wenn Sie mit den Ergebnissen zufrieden sind, integrieren Sie den experimentellen Schritt oder Job natürlich in den Rest der normalen Jobs,
Entfernen Sie dabei `set +euo pipefail` oder andere Dinge, die Sie eventuell hinzugefügt haben, um sicherzustellen, dass der experimentelle Auftrag nicht
den normalen CI-Betrieb nicht beeinträchtigt.
Dieser ganze Prozess wäre viel einfacher gewesen, wenn wir nur etwas wie `allow-failure` für den
experimentellen Schritt festlegen könnten und ihn scheitern lassen würden, ohne den Gesamtstatus der PRs zu beeinträchtigen. Aber wie bereits erwähnt, haben CircleCI und
Github Actions dies im Moment nicht unterstützen.
Sie können in diesen CI-spezifischen Threads für diese Funktion stimmen und sehen, wo sie steht:
- [Github Actions:](https://github.com/actions/toolkit/issues/399)
- [CircleCI:](https://ideas.circleci.com/ideas/CCI-I-344)
| transformers/docs/source/de/testing.md/0 | {
"file_path": "transformers/docs/source/de/testing.md",
"repo_id": "transformers",
"token_count": 19213
} | 396 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Multimodal chat templates
Multimodal chat models accept inputs like images, audio or video, in addition to text. The `content` key in a multimodal chat history is a list containing multiple items of different types. This is unlike text-only chat models whose `content` key is a single string.
In the same way the [Tokenizer](./fast_tokenizer) class handles chat templates and tokenization for text-only models,
the [Processor](./processors) class handles preprocessing, tokenization and chat templates for multimodal models. Their [`~ProcessorMixin.apply_chat_template`] methods are almost identical.
This guide will show you how to chat with multimodal models with the high-level [`ImageTextToTextPipeline`] and at a lower level using the [`~ProcessorMixin.apply_chat_template`] and [`~GenerationMixin.generate`] methods.
## ImageTextToTextPipeline
[`ImageTextToTextPipeline`] is a high-level image and text generation class with a “chat mode”. Chat mode is enabled when a conversational model is detected and the chat prompt is [properly formatted](./llm_tutorial#wrong-prompt-format).
Add image and text blocks to the `content` key in the chat history.
```py
messages = [
{
"role": "system",
"content": [{"type": "text", "text": "You are a friendly chatbot who always responds in the style of a pirate"}],
},
{
"role": "user",
"content": [
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
{"type": "text", "text": "What are these?"},
],
},
]
```
Create an [`ImageTextToTextPipeline`] and pass the chat to it. For large models, setting [device_map=“auto”](./models#big-model-inference) helps load the model quicker and automatically places it on the fastest device available. Setting the data type to [auto](./models#model-data-type) also helps save memory and improve speed.
```python
import torch
from transformers import pipeline
pipe = pipeline("image-text-to-text", model="Qwen/Qwen2.5-VL-3B-Instruct", device_map="auto", dtype="auto")
out = pipe(text=messages, max_new_tokens=128)
print(out[0]['generated_text'][-1]['content'])
```
```
Ahoy, me hearty! These be two feline friends, likely some tabby cats, taking a siesta on a cozy pink blanket. They're resting near remote controls, perhaps after watching some TV or just enjoying some quiet time together. Cats sure know how to find comfort and relaxation, don't they?
```
Aside from the gradual descent from pirate-speak into modern American English (it **is** only a 3B model, after all), this is correct!
## Using `apply_chat_template`
Like [text-only models](./chat_templating), use the [`~ProcessorMixin.apply_chat_template`] method to prepare the chat messages for multimodal models.
This method handles the tokenization and formatting of the chat messages, including images and other media types. The resulting inputs are passed to the model for generation.
```python
from transformers import AutoProcessor, AutoModelForImageTextToText
model = AutoModelForImageTextToText.from_pretrained("Qwen/Qwen2.5-VL-3B-Instruct", device_map="auto", torch_dtype="auto")
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-3B-Instruct")
messages = [
{
"role": "system",
"content": [{"type": "text", "text": "You are a friendly chatbot who always responds in the style of a pirate"}],
},
{
"role": "user",
"content": [
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
{"type": "text", "text": "What are these?"},
],
},
]
```
Pass `messages` to [`~ProcessorMixin.apply_chat_template`] to tokenize the input content. Unlike text models, the output of `apply_chat_template`
contains a `pixel_values` key with the preprocessed image data, in addition to the tokenized text.
```py
processed_chat = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt")
print(list(processed_chat.keys()))
```
```
['input_ids', 'attention_mask', 'pixel_values', 'image_grid_thw']
```
Pass these inputs to [`~GenerationMixin.generate`].
```python
out = model.generate(**processed_chat.to(model.device), max_new_tokens=128)
print(processor.decode(out[0]))
```
The decoded output contains the full conversation so far, including the user message and the placeholder tokens that contain the image information. You may need to trim the previous conversation from the output before displaying it to the user.
## Video inputs
Some vision models also support video inputs. The message format is very similar to the format for [image inputs](#image-inputs).
- The content `"type"` should be `"video"` to indicate the content is a video.
- For videos, it can be a link to the video (`"url"`) or it could be a file path (`"path"`). Videos loaded from a URL can only be decoded with [PyAV](https://pyav.basswood-io.com/docs/stable/) or [Decord](https://github.com/dmlc/decord).
- In addition to loading videos from a URL or file path, you can also pass decoded video data directly. This is useful if you’ve already preprocessed or decoded video frames elsewhere in memory (e.g., using OpenCV, decord, or torchvision). You don't need to save to files or store it in an URL.
> [!WARNING]
> Loading a video from `"url"` is only supported by the PyAV or Decord backends.
```python
from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration
model_id = "llava-hf/llava-onevision-qwen2-0.5b-ov-hf"
model = LlavaOnevisionForConditionalGeneration.from_pretrained(model_id)
processor = AutoProcessor.from_pretrained(model_id)
messages = [
{
"role": "system",
"content": [{"type": "text", "text": "You are a friendly chatbot who always responds in the style of a pirate"}],
},
{
"role": "user",
"content": [
{"type": "video", "url": "https://test-videos.co.uk/vids/bigbuckbunny/mp4/h264/720/Big_Buck_Bunny_720_10s_10MB.mp4"},
{"type": "text", "text": "What do you see in this video?"},
],
},
]
```
### Example: Passing decoded video objects
```python
import numpy as np
video_object1 = np.random.randint(0, 255, size=(16, 224, 224, 3), dtype=np.uint8),
messages = [
{
"role": "system",
"content": [{"type": "text", "text": "You are a friendly chatbot who always responds in the style of a pirate"}],
},
{
"role": "user",
"content": [
{"type": "video", "video": video_object1},
{"type": "text", "text": "What do you see in this video?"}
],
},
]
```
You can also use existing (`"load_video()"`) function to load a video, edit the video in memory and pass it in the messages.
```python
# Make sure a video backend library (pyav, decord, or torchvision) is available.
from transformers.video_utils import load_video
# load a video file in memory for testing
video_object2, _ = load_video(
"https://test-videos.co.uk/vids/bigbuckbunny/mp4/h264/720/Big_Buck_Bunny_720_10s_10MB.mp4"
)
messages = [
{
"role": "system",
"content": [{"type": "text", "text": "You are a friendly chatbot who always responds in the style of a pirate"}],
},
{
"role": "user",
"content": [
{"type": "video", "video": video_object2},
{"type": "text", "text": "What do you see in this video?"}
],
},
]
```
Pass `messages` to [`~ProcessorMixin.apply_chat_template`] to tokenize the input content. There are a few extra parameters to include in [`~ProcessorMixin.apply_chat_template`] that controls the sampling process.
The `video_load_backend` parameter refers to a specific framework to load a video. It supports [PyAV](https://pyav.basswood-io.com/docs/stable/), [Decord](https://github.com/dmlc/decord), [OpenCV](https://github.com/opencv/opencv), and [torchvision](https://pytorch.org/vision/stable/index.html).
The examples below use Decord as the backend because it is a bit faster than PyAV.
<hfoptions id="sampling">
<hfoption id="fixed number of frames">
The `num_frames` parameter controls how many frames to uniformly sample from the video. Each checkpoint has a maximum frame count it was pretrained with and exceeding this count can significantly lower generation quality. It's important to choose a frame count that fits both the model capacity and your hardware resources. If `num_frames` isn't specified, the entire video is loaded without any frame sampling.
```python
processed_chat = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
num_frames=32,
video_load_backend="decord",
)
print(processed_chat.keys())
```
These inputs are now ready to be used in [`~GenerationMixin.generate`].
</hfoption>
<hfoption id="fps">
For longer videos, it may be better to sample more frames for better representation with the `video_fps` parameter. This determines how many frames per second to extract. As an example, if a video is 10 seconds long and `video_fps=2`, then the model samples 20 frames. In other words, 2 frames are uniformly sampled every 10 seconds.
```py
processed_chat = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
video_fps=16,
video_load_backend="decord",
)
print(processed_chat.keys())
```
</hfoption>
<hfoption id="list of image frames">
Videos may also exist as a set of sampled frames stored as images rather than the full video file.
In this case, pass a list of image file paths and the processor automatically concatenates them into a video. Make sure all images are the same size since they are assumed to be from the same video.
```py
frames_paths = ["/path/to/frame0.png", "/path/to/frame5.png", "/path/to/frame10.png"]
messages = [
{
"role": "system",
"content": [{"type": "text", "text": "You are a friendly chatbot who always responds in the style of a pirate"}],
},
{
"role": "user",
"content": [
{"type": "video", "path": frames_paths},
{"type": "text", "text": "What do you see in this video?"},
],
},
]
processed_chat = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
)
print(processed_chat.keys())
```
</hfoption>
</hfoptions>
| transformers/docs/source/en/chat_templating_multimodal.md/0 | {
"file_path": "transformers/docs/source/en/chat_templating_multimodal.md",
"repo_id": "transformers",
"token_count": 3715
} | 397 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Glossary
This glossary defines general machine learning and 🤗 Transformers terms to help you better understand the
documentation.
## A
### attention mask
The attention mask is an optional argument used when batching sequences together.
<Youtube id="M6adb1j2jPI"/>
This argument indicates to the model which tokens should be attended to, and which should not.
For example, consider these two sequences:
```python
>>> from transformers import BertTokenizer
>>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
>>> sequence_a = "This is a short sequence."
>>> sequence_b = "This is a rather long sequence. It is at least longer than the sequence A."
>>> encoded_sequence_a = tokenizer(sequence_a)["input_ids"]
>>> encoded_sequence_b = tokenizer(sequence_b)["input_ids"]
```
The encoded versions have different lengths:
```python
>>> len(encoded_sequence_a), len(encoded_sequence_b)
(8, 19)
```
Therefore, we can't put them together in the same tensor as-is. The first sequence needs to be padded up to the length
of the second one, or the second one needs to be truncated down to the length of the first one.
In the first case, the list of IDs will be extended by the padding indices. We can pass a list to the tokenizer and ask
it to pad like this:
```python
>>> padded_sequences = tokenizer([sequence_a, sequence_b], padding=True)
```
We can see that 0s have been added on the right of the first sentence to make it the same length as the second one:
```python
>>> padded_sequences["input_ids"]
[[101, 1188, 1110, 170, 1603, 4954, 119, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 1188, 1110, 170, 1897, 1263, 4954, 119, 1135, 1110, 1120, 1655, 2039, 1190, 1103, 4954, 138, 119, 102]]
```
This can then be converted into a tensor in PyTorch or TensorFlow. The attention mask is a binary tensor indicating the
position of the padded indices so that the model does not attend to them. For the [`BertTokenizer`], `1` indicates a
value that should be attended to, while `0` indicates a padded value. This attention mask is in the dictionary returned
by the tokenizer under the key "attention_mask":
```python
>>> padded_sequences["attention_mask"]
[[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
```
### autoencoding models
See [encoder models](#encoder-models) and [masked language modeling](#masked-language-modeling-mlm)
### autoregressive models
See [causal language modeling](#causal-language-modeling) and [decoder models](#decoder-models)
## B
### backbone
The backbone is the network (embeddings and layers) that outputs the raw hidden states or features. It is usually connected to a [head](#head) which accepts the features as its input to make a prediction. For example, [`ViTModel`] is a backbone without a specific head on top. Other models can also use [`VitModel`] as a backbone such as [DPT](model_doc/dpt).
## C
### causal language modeling
A pretraining task where the model reads the texts in order and has to predict the next word. It's usually done by
reading the whole sentence but using a mask inside the model to hide the future tokens at a certain timestep.
### channel
Color images are made up of some combination of values in three channels: red, green, and blue (RGB) and grayscale images only have one channel. In 🤗 Transformers, the channel can be the first or last dimension of an image's tensor: [`n_channels`, `height`, `width`] or [`height`, `width`, `n_channels`].
### connectionist temporal classification (CTC)
An algorithm which allows a model to learn without knowing exactly how the input and output are aligned; CTC calculates the distribution of all possible outputs for a given input and chooses the most likely output from it. CTC is commonly used in speech recognition tasks because speech doesn't always cleanly align with the transcript for a variety of reasons such as a speaker's different speech rates.
### convolution
A type of layer in a neural network where the input matrix is multiplied element-wise by a smaller matrix (kernel or filter) and the values are summed up in a new matrix. This is known as a convolutional operation which is repeated over the entire input matrix. Each operation is applied to a different segment of the input matrix. Convolutional neural networks (CNNs) are commonly used in computer vision.
## D
### DataParallel (DP)
Parallelism technique for training on multiple GPUs where the same setup is replicated multiple times, with each instance
receiving a distinct data slice. The processing is done in parallel and all setups are synchronized at the end of each training step.
Learn more about how DataParallel works [here](perf_train_gpu_many#dataparallel-vs-distributeddataparallel).
### decoder input IDs
This input is specific to encoder-decoder models, and contains the input IDs that will be fed to the decoder. These
inputs should be used for sequence to sequence tasks, such as translation or summarization, and are usually built in a
way specific to each model.
Most encoder-decoder models (BART, T5) create their `decoder_input_ids` on their own from the `labels`. In such models,
passing the `labels` is the preferred way to handle training.
Please check each model's docs to see how they handle these input IDs for sequence to sequence training.
### decoder models
Also referred to as autoregressive models, decoder models involve a pretraining task (called causal language modeling) where the model reads the texts in order and has to predict the next word. It's usually done by
reading the whole sentence with a mask to hide future tokens at a certain timestep.
<Youtube id="d_ixlCubqQw"/>
### deep learning (DL)
Machine learning algorithms which use neural networks with several layers.
## E
### encoder models
Also known as autoencoding models, encoder models take an input (such as text or images) and transform them into a condensed numerical representation called an embedding. Oftentimes, encoder models are pretrained using techniques like [masked language modeling](#masked-language-modeling-mlm), which masks parts of the input sequence and forces the model to create more meaningful representations.
<Youtube id="H39Z_720T5s"/>
## F
### feature extraction
The process of selecting and transforming raw data into a set of features that are more informative and useful for machine learning algorithms. Some examples of feature extraction include transforming raw text into word embeddings and extracting important features such as edges or shapes from image/video data.
### feed forward chunking
In each residual attention block in transformers the self-attention layer is usually followed by 2 feed forward layers.
The intermediate embedding size of the feed forward layers is often bigger than the hidden size of the model (e.g., for
`google-bert/bert-base-uncased`).
For an input of size `[batch_size, sequence_length]`, the memory required to store the intermediate feed forward
embeddings `[batch_size, sequence_length, config.intermediate_size]` can account for a large fraction of the memory
use. The authors of [Reformer: The Efficient Transformer](https://huggingface.co/papers/2001.04451) noticed that since the
computation is independent of the `sequence_length` dimension, it is mathematically equivalent to compute the output
embeddings of both feed forward layers `[batch_size, config.hidden_size]_0, ..., [batch_size, config.hidden_size]_n`
individually and concat them afterward to `[batch_size, sequence_length, config.hidden_size]` with `n = sequence_length`, which trades increased computation time against reduced memory use, but yields a mathematically
**equivalent** result.
For models employing the function [`apply_chunking_to_forward`], the `chunk_size` defines the number of output
embeddings that are computed in parallel and thus defines the trade-off between memory and time complexity. If
`chunk_size` is set to 0, no feed forward chunking is done.
### finetuned models
Finetuning is a form of transfer learning which involves taking a pretrained model, freezing its weights, and replacing the output layer with a newly added [model head](#head). The model head is trained on your target dataset.
See the [Fine-tune a pretrained model](https://huggingface.co/docs/transformers/training) tutorial for more details, and learn how to fine-tune models with 🤗 Transformers.
## H
### head
The model head refers to the last layer of a neural network that accepts the raw hidden states and projects them onto a different dimension. There is a different model head for each task. For example:
* [`GPT2ForSequenceClassification`] is a sequence classification head - a linear layer - on top of the base [`GPT2Model`].
* [`ViTForImageClassification`] is an image classification head - a linear layer on top of the final hidden state of the `CLS` token - on top of the base [`ViTModel`].
* [`Wav2Vec2ForCTC`] is a language modeling head with [CTC](#connectionist-temporal-classification-ctc) on top of the base [`Wav2Vec2Model`].
## I
### image patch
Vision-based Transformers models split an image into smaller patches which are linearly embedded, and then passed as a sequence to the model. You can find the `patch_size` - or resolution - of the model in its configuration.
### inference
Inference is the process of evaluating a model on new data after training is complete. See the [Pipeline for inference](https://huggingface.co/docs/transformers/pipeline_tutorial) tutorial to learn how to perform inference with 🤗 Transformers.
### input IDs
The input ids are often the only required parameters to be passed to the model as input. They are token indices,
numerical representations of tokens building the sequences that will be used as input by the model.
<Youtube id="VFp38yj8h3A"/>
Each tokenizer works differently but the underlying mechanism remains the same. Here's an example using the BERT
tokenizer, which is a [WordPiece](https://huggingface.co/papers/1609.08144) tokenizer:
```python
>>> from transformers import BertTokenizer
>>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
>>> sequence = "A Titan RTX has 24GB of VRAM"
```
The tokenizer takes care of splitting the sequence into tokens available in the tokenizer vocabulary.
```python
>>> tokenized_sequence = tokenizer.tokenize(sequence)
```
The tokens are either words or subwords. Here for instance, "VRAM" wasn't in the model vocabulary, so it's been split
in "V", "RA" and "M". To indicate those tokens are not separate words but parts of the same word, a double-hash prefix
is added for "RA" and "M":
```python
>>> print(tokenized_sequence)
['A', 'Titan', 'R', '##T', '##X', 'has', '24', '##GB', 'of', 'V', '##RA', '##M']
```
These tokens can then be converted into IDs which are understandable by the model. This can be done by directly feeding the sentence to the tokenizer, which leverages the Rust implementation of [🤗 Tokenizers](https://github.com/huggingface/tokenizers) for peak performance.
```python
>>> inputs = tokenizer(sequence)
```
The tokenizer returns a dictionary with all the arguments necessary for its corresponding model to work properly. The
token indices are under the key `input_ids`:
```python
>>> encoded_sequence = inputs["input_ids"]
>>> print(encoded_sequence)
[101, 138, 18696, 155, 1942, 3190, 1144, 1572, 13745, 1104, 159, 9664, 2107, 102]
```
Note that the tokenizer automatically adds "special tokens" (if the associated model relies on them) which are special
IDs the model sometimes uses.
If we decode the previous sequence of ids,
```python
>>> decoded_sequence = tokenizer.decode(encoded_sequence)
```
we will see
```python
>>> print(decoded_sequence)
[CLS] A Titan RTX has 24GB of VRAM [SEP]
```
because this is the way a [`BertModel`] is going to expect its inputs.
## L
### labels
The labels are an optional argument which can be passed in order for the model to compute the loss itself. These labels
should be the expected prediction of the model: it will use the standard loss in order to compute the loss between its
predictions and the expected value (the label).
These labels are different according to the model head, for example:
- For sequence classification models, ([`BertForSequenceClassification`]), the model expects a tensor of dimension
`(batch_size)` with each value of the batch corresponding to the expected label of the entire sequence.
- For token classification models, ([`BertForTokenClassification`]), the model expects a tensor of dimension
`(batch_size, seq_length)` with each value corresponding to the expected label of each individual token.
- For masked language modeling, ([`BertForMaskedLM`]), the model expects a tensor of dimension `(batch_size,
seq_length)` with each value corresponding to the expected label of each individual token: the labels being the token
ID for the masked token, and values to be ignored for the rest (usually -100).
- For sequence to sequence tasks, ([`BartForConditionalGeneration`], [`MBartForConditionalGeneration`]), the model
expects a tensor of dimension `(batch_size, tgt_seq_length)` with each value corresponding to the target sequences
associated with each input sequence. During training, both BART and T5 will make the appropriate
`decoder_input_ids` and decoder attention masks internally. They usually do not need to be supplied. This does not
apply to models leveraging the Encoder-Decoder framework.
- For image classification models, ([`ViTForImageClassification`]), the model expects a tensor of dimension
`(batch_size)` with each value of the batch corresponding to the expected label of each individual image.
- For semantic segmentation models, ([`SegformerForSemanticSegmentation`]), the model expects a tensor of dimension
`(batch_size, height, width)` with each value of the batch corresponding to the expected label of each individual pixel.
- For object detection models, ([`DetrForObjectDetection`]), the model expects a list of dictionaries with a
`class_labels` and `boxes` key where each value of the batch corresponds to the expected label and number of bounding boxes of each individual image.
- For automatic speech recognition models, ([`Wav2Vec2ForCTC`]), the model expects a tensor of dimension `(batch_size,
target_length)` with each value corresponding to the expected label of each individual token.
<Tip>
Each model's labels may be different, so be sure to always check the documentation of each model for more information
about their specific labels!
</Tip>
The base models ([`BertModel`]) do not accept labels, as these are the base transformer models, simply outputting
features.
### large language models (LLM)
A generic term that refers to transformer language models (GPT-3, BLOOM, OPT) that were trained on a large quantity of data. These models also tend to have a large number of learnable parameters (e.g. 175 billion for GPT-3).
## M
### masked language modeling (MLM)
A pretraining task where the model sees a corrupted version of the texts, usually done by
masking some tokens randomly, and has to predict the original text.
### multimodal
A task that combines texts with another kind of inputs (for instance images).
## N
### Natural language generation (NLG)
All tasks related to generating text (for instance, [Write With Transformers](https://transformer.huggingface.co/), translation).
### Natural language processing (NLP)
A generic way to say "deal with texts".
### Natural language understanding (NLU)
All tasks related to understanding what is in a text (for instance classifying the
whole text, individual words).
## P
### pipeline
A pipeline in 🤗 Transformers is an abstraction referring to a series of steps that are executed in a specific order to preprocess and transform data and return a prediction from a model. Some example stages found in a pipeline might be data preprocessing, feature extraction, and normalization.
For more details, see [Pipelines for inference](https://huggingface.co/docs/transformers/pipeline_tutorial).
### PipelineParallel (PP)
Parallelism technique in which the model is split up vertically (layer-level) across multiple GPUs, so that only one or
several layers of the model are placed on a single GPU. Each GPU processes in parallel different stages of the pipeline
and working on a small chunk of the batch. Learn more about how PipelineParallel works [here](perf_train_gpu_many#from-naive-model-parallelism-to-pipeline-parallelism).
### pixel values
A tensor of the numerical representations of an image that is passed to a model. The pixel values have a shape of [`batch_size`, `num_channels`, `height`, `width`], and are generated from an image processor.
### pooling
An operation that reduces a matrix into a smaller matrix, either by taking the maximum or average of the pooled dimension(s). Pooling layers are commonly found between convolutional layers to downsample the feature representation.
### position IDs
Contrary to RNNs that have the position of each token embedded within them, transformers are unaware of the position of
each token. Therefore, the position IDs (`position_ids`) are used by the model to identify each token's position in the
list of tokens.
They are an optional parameter. If no `position_ids` are passed to the model, the IDs are automatically created as
absolute positional embeddings.
Absolute positional embeddings are selected in the range `[0, config.max_position_embeddings - 1]`. Some models use
other types of positional embeddings, such as sinusoidal position embeddings or relative position embeddings.
### preprocessing
The task of preparing raw data into a format that can be easily consumed by machine learning models. For example, text is typically preprocessed by tokenization. To gain a better idea of what preprocessing looks like for other input types, check out the [Preprocess](https://huggingface.co/docs/transformers/preprocessing) tutorial.
### pretrained model
A model that has been pretrained on some data (for instance all of Wikipedia). Pretraining methods involve a
self-supervised objective, which can be reading the text and trying to predict the next word (see [causal language
modeling](#causal-language-modeling)) or masking some words and trying to predict them (see [masked language
modeling](#masked-language-modeling-mlm)).
Speech and vision models have their own pretraining objectives. For example, Wav2Vec2 is a speech model pretrained on a contrastive task which requires the model to identify the "true" speech representation from a set of "false" speech representations. On the other hand, BEiT is a vision model pretrained on a masked image modeling task which masks some of the image patches and requires the model to predict the masked patches (similar to the masked language modeling objective).
## R
### recurrent neural network (RNN)
A type of model that uses a loop over a layer to process texts.
### representation learning
A subfield of machine learning which focuses on learning meaningful representations of raw data. Some examples of representation learning techniques include word embeddings, autoencoders, and Generative Adversarial Networks (GANs).
## S
### sampling rate
A measurement in hertz of the number of samples (the audio signal) taken per second. The sampling rate is a result of discretizing a continuous signal such as speech.
### self-attention
Each element of the input finds out which other elements of the input they should attend to.
### self-supervised learning
A category of machine learning techniques in which a model creates its own learning objective from unlabeled data. It differs from [unsupervised learning](#unsupervised-learning) and [supervised learning](#supervised-learning) in that the learning process is supervised, but not explicitly from the user.
One example of self-supervised learning is [masked language modeling](#masked-language-modeling-mlm), where a model is passed sentences with a proportion of its tokens removed and learns to predict the missing tokens.
### semi-supervised learning
A broad category of machine learning training techniques that leverages a small amount of labeled data with a larger quantity of unlabeled data to improve the accuracy of a model, unlike [supervised learning](#supervised-learning) and [unsupervised learning](#unsupervised-learning).
An example of a semi-supervised learning approach is "self-training", in which a model is trained on labeled data, and then used to make predictions on the unlabeled data. The portion of the unlabeled data that the model predicts with the most confidence gets added to the labeled dataset and used to retrain the model.
### sequence-to-sequence (seq2seq)
Models that generate a new sequence from an input, like translation models, or summarization models (such as
[Bart](model_doc/bart) or [T5](model_doc/t5)).
### Sharded DDP
Another name for the foundational [ZeRO](#zero-redundancy-optimizer-zero) concept as used by various other implementations of ZeRO.
### stride
In [convolution](#convolution) or [pooling](#pooling), the stride refers to the distance the kernel is moved over a matrix. A stride of 1 means the kernel is moved one pixel over at a time, and a stride of 2 means the kernel is moved two pixels over at a time.
### supervised learning
A form of model training that directly uses labeled data to correct and instruct model performance. Data is fed into the model being trained, and its predictions are compared to the known labels. The model updates its weights based on how incorrect its predictions were, and the process is repeated to optimize model performance.
## T
### Tensor Parallelism (TP)
Parallelism technique for training on multiple GPUs in which each tensor is split up into multiple chunks, so instead of
having the whole tensor reside on a single GPU, each shard of the tensor resides on its designated GPU. Shards gets
processed separately and in parallel on different GPUs and the results are synced at the end of the processing step.
This is what is sometimes called horizontal parallelism, as the splitting happens on horizontal level.
Learn more about Tensor Parallelism [here](perf_train_gpu_many#tensor-parallelism).
### token
A part of a sentence, usually a word, but can also be a subword (non-common words are often split in subwords) or a
punctuation symbol.
### token Type IDs
Some models' purpose is to do classification on pairs of sentences or question answering.
<Youtube id="0u3ioSwev3s"/>
These require two different sequences to be joined in a single "input_ids" entry, which usually is performed with the
help of special tokens, such as the classifier (`[CLS]`) and separator (`[SEP]`) tokens. For example, the BERT model
builds its two sequence input as such:
```python
>>> # [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP]
```
We can use our tokenizer to automatically generate such a sentence by passing the two sequences to `tokenizer` as two
arguments (and not a list, like before) like this:
```python
>>> from transformers import BertTokenizer
>>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
>>> sequence_a = "HuggingFace is based in NYC"
>>> sequence_b = "Where is HuggingFace based?"
>>> encoded_dict = tokenizer(sequence_a, sequence_b)
>>> decoded = tokenizer.decode(encoded_dict["input_ids"])
```
which will return:
```python
>>> print(decoded)
[CLS] HuggingFace is based in NYC [SEP] Where is HuggingFace based? [SEP]
```
This is enough for some models to understand where one sequence ends and where another begins. However, other models,
such as BERT, also deploy token type IDs (also called segment IDs). They are represented as a binary mask identifying
the two types of sequence in the model.
The tokenizer returns this mask as the "token_type_ids" entry:
```python
>>> encoded_dict["token_type_ids"]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]
```
The first sequence, the "context" used for the question, has all its tokens represented by a `0`, whereas the second
sequence, corresponding to the "question", has all its tokens represented by a `1`.
Some models, like [`XLNetModel`] use an additional token represented by a `2`.
### transfer learning
A technique that involves taking a pretrained model and adapting it to a dataset specific to your task. Instead of training a model from scratch, you can leverage knowledge obtained from an existing model as a starting point. This speeds up the learning process and reduces the amount of training data needed.
### transformer
Self-attention based deep learning model architecture.
## U
### unsupervised learning
A form of model training in which data provided to the model is not labeled. Unsupervised learning techniques leverage statistical information of the data distribution to find patterns useful for the task at hand.
## Z
### Zero Redundancy Optimizer (ZeRO)
Parallelism technique which performs sharding of the tensors somewhat similar to [TensorParallel](#tensor-parallelism-tp),
except the whole tensor gets reconstructed in time for a forward or backward computation, therefore the model doesn't need
to be modified. This method also supports various offloading techniques to compensate for limited GPU memory.
Learn more about ZeRO [here](perf_train_gpu_many#zero-data-parallelism).
| transformers/docs/source/en/glossary.md/0 | {
"file_path": "transformers/docs/source/en/glossary.md",
"repo_id": "transformers",
"token_count": 6761
} | 398 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Models
The base class [`PreTrainedModel`] implements the common methods for loading/saving a model either from a local
file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace's Hub).
[`PreTrainedModel`] also implements a few methods which are common among all the models to:
- resize the input token embeddings when new tokens are added to the vocabulary
- prune the attention heads of the model.
The other methods that are common to each model are defined in [`~modeling_utils.ModuleUtilsMixin`] and [`~generation.GenerationMixin`].
## PreTrainedModel
[[autodoc]] PreTrainedModel
- push_to_hub
- all
Custom models should also include a `_supports_assign_param_buffer`, which determines if superfast init can apply
on the particular model. Signs that your model needs this are if `test_save_and_load_from_pretrained` fails. If so,
set this to `False`.
## ModuleUtilsMixin
[[autodoc]] modeling_utils.ModuleUtilsMixin
## Pushing to the Hub
[[autodoc]] utils.PushToHubMixin
## Sharded checkpoints
[[autodoc]] modeling_utils.load_sharded_checkpoint
| transformers/docs/source/en/main_classes/model.md/0 | {
"file_path": "transformers/docs/source/en/main_classes/model.md",
"repo_id": "transformers",
"token_count": 508
} | 399 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2025-06-18 and added to Hugging Face Transformers on 2025-06-24.*
<div style="float: right;">
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
</div>
# Arcee
[Arcee](https://www.arcee.ai/blog/deep-dive-afm-4-5b-the-first-arcee-foundational-model) is a decoder-only transformer model based on the Llama architecture with a key modification: it uses ReLU² (ReLU-squared) activation in the MLP blocks instead of SiLU, following recent research showing improved training efficiency with squared activations. This architecture is designed for efficient training and inference while maintaining the proven stability of the Llama design.
The Arcee model is architecturally similar to Llama but uses `x * relu(x)` in MLP layers for improved gradient flow and is optimized for efficiency in both training and inference scenarios.
> [!TIP]
> The Arcee model supports extended context with RoPE scaling and all standard transformers features including Flash Attention 2, SDPA, gradient checkpointing, and quantization support.
The example below demonstrates how to generate text with Arcee using [`Pipeline`] or the [`AutoModel`].
<hfoptions id="usage">
<hfoption id="Pipeline">
```py
import torch
from transformers import pipeline
pipeline = pipeline(
task="text-generation",
model="arcee-ai/AFM-4.5B",
dtype=torch.float16,
device=0
)
output = pipeline("The key innovation in Arcee is")
print(output[0]["generated_text"])
```
</hfoption>
<hfoption id="AutoModel">
```py
import torch
from transformers import AutoTokenizer, ArceeForCausalLM
tokenizer = AutoTokenizer.from_pretrained("arcee-ai/AFM-4.5B")
model = ArceeForCausalLM.from_pretrained(
"arcee-ai/AFM-4.5B",
dtype=torch.float16,
device_map="auto"
)
inputs = tokenizer("The key innovation in Arcee is", return_tensors="pt")
with torch.no_grad():
outputs = model.generate(**inputs, max_new_tokens=50)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
```
</hfoption>
</hfoptions>
## ArceeConfig
[[autodoc]] ArceeConfig
## ArceeModel
[[autodoc]] ArceeModel
- forward
## ArceeForCausalLM
[[autodoc]] ArceeForCausalLM
- forward
## ArceeForSequenceClassification
[[autodoc]] ArceeForSequenceClassification
- forward
## ArceeForQuestionAnswering
[[autodoc]] ArceeForQuestionAnswering
- forward
## ArceeForTokenClassification
[[autodoc]] ArceeForTokenClassification
- forward | transformers/docs/source/en/model_doc/arcee.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/arcee.md",
"repo_id": "transformers",
"token_count": 1184
} | 400 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2020-07-28 and added to Hugging Face Transformers on 2021-03-30.*
<div style="float: right;">
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white" >
</div>
</div>
# BigBird
[BigBird](https://huggingface.co/papers/2007.14062) is a transformer model built to handle sequence lengths up to 4096 compared to 512 for [BERT](./bert). Traditional transformers struggle with long inputs because attention gets really expensive as the sequence length grows. BigBird fixes this by using a sparse attention mechanism, which means it doesn’t try to look at everything at once. Instead, it mixes in local attention, random attention, and a few global tokens to process the whole input. This combination gives it the best of both worlds. It keeps the computation efficient while still capturing enough of the sequence to understand it well. Because of this, BigBird is great at tasks involving long documents, like question answering, summarization, and genomic applications.
You can find all the original BigBird checkpoints under the [Google](https://huggingface.co/google?search_models=bigbird) organization.
> [!TIP]
> Click on the BigBird models in the right sidebar for more examples of how to apply BigBird to different language tasks.
The example below demonstrates how to predict the `[MASK]` token with [`Pipeline`], [`AutoModel`], and from the command line.
<hfoptions id="usage">
<hfoption id="Pipeline">
```py
import torch
from transformers import pipeline
pipeline = pipeline(
task="fill-mask",
model="google/bigbird-roberta-base",
dtype=torch.float16,
device=0
)
pipeline("Plants create [MASK] through a process known as photosynthesis.")
```
</hfoption>
<hfoption id="AutoModel">
```py
import torch
from transformers import AutoModelForMaskedLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
"google/bigbird-roberta-base",
)
model = AutoModelForMaskedLM.from_pretrained(
"google/bigbird-roberta-base",
dtype=torch.float16,
device_map="auto",
)
inputs = tokenizer("Plants create [MASK] through a process known as photosynthesis.", return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
predictions = outputs.logits
masked_index = torch.where(inputs['input_ids'] == tokenizer.mask_token_id)[1]
predicted_token_id = predictions[0, masked_index].argmax(dim=-1)
predicted_token = tokenizer.decode(predicted_token_id)
print(f"The predicted token is: {predicted_token}")
```
</hfoption>
<hfoption id="transformers CLI">
```bash
!echo -e "Plants create [MASK] through a process known as photosynthesis." | transformers-cli run --task fill-mask --model google/bigbird-roberta-base --device 0
```
</hfoption>
</hfoptions>
## Notes
- Inputs should be padded on the right because BigBird uses absolute position embeddings.
- BigBird supports `original_full` and `block_sparse` attention. If the input sequence length is less than 1024, it is recommended to use `original_full` since sparse patterns don't offer much benefit for smaller inputs.
- The current implementation uses window size of 3 blocks and 2 global blocks, only supports the ITC-implementation, and doesn't support `num_random_blocks=0`.
- The sequence length must be divisible by the block size.
## Resources
- Read the [BigBird](https://huggingface.co/blog/big-bird) blog post for more details about how its attention works.
## BigBirdConfig
[[autodoc]] BigBirdConfig
## BigBirdTokenizer
[[autodoc]] BigBirdTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## BigBirdTokenizerFast
[[autodoc]] BigBirdTokenizerFast
## BigBird specific outputs
[[autodoc]] models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput
## BigBirdModel
[[autodoc]] BigBirdModel
- forward
## BigBirdForPreTraining
[[autodoc]] BigBirdForPreTraining
- forward
## BigBirdForCausalLM
[[autodoc]] BigBirdForCausalLM
- forward
## BigBirdForMaskedLM
[[autodoc]] BigBirdForMaskedLM
- forward
## BigBirdForSequenceClassification
[[autodoc]] BigBirdForSequenceClassification
- forward
## BigBirdForMultipleChoice
[[autodoc]] BigBirdForMultipleChoice
- forward
## BigBirdForTokenClassification
[[autodoc]] BigBirdForTokenClassification
- forward
## BigBirdForQuestionAnswering
[[autodoc]] BigBirdForQuestionAnswering
- forward
| transformers/docs/source/en/model_doc/big_bird.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/big_bird.md",
"repo_id": "transformers",
"token_count": 1597
} | 401 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2024-05-16 and added to Hugging Face Transformers on 2024-07-17.*
# Chameleon
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
## Overview
The Chameleon model was proposed in [Chameleon: Mixed-Modal Early-Fusion Foundation Models
](https://huggingface.co/papers/2405.09818) by META AI Chameleon Team. Chameleon is a Vision-Language Model that use vector quantization to tokenize images which enables the model to generate multimodal output. The model takes images and texts as input, including an interleaved format, and generates textual response. Image generation module is not released yet.
The abstract from the paper is the following:
*We present Chameleon, a family of early-fusion token-based mixed-modal models capable of understanding and generating images and text in any arbitrary sequence. We outline a stable training
approach from inception, an alignment recipe, and an architectural parameterization tailored for the
early-fusion, token-based, mixed-modal setting. The models are evaluated on a comprehensive range
of tasks, including visual question answering, image captioning, text generation, image generation, and
long-form mixed modal generation. Chameleon demonstrates broad and general capabilities, including
state-of-the-art performance in image captioning tasks, outperforms Llama-2 in text-only tasks while
being competitive with models such as Mixtral 8x7B and Gemini-Pro, and performs non-trivial image
generation, all in a single model. It also matches or exceeds the performance of much larger models,
including Gemini Pro and GPT-4V, according to human judgments on a new long-form mixed-modal
generation evaluation, where either the prompt or outputs contain mixed sequences of both images and
text. Chameleon marks a significant step forward in unified modeling of full multimodal documents*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/chameleon_arch.png"
alt="drawing" width="600"/>
<small> Chameleon incorporates a vector quantizer module to transform images into discrete tokens. That also enables image generation using an auto-regressive transformer. Taken from the <a href="https://huggingface.co/papers/2405.09818">original paper.</a> </small>
This model was contributed by [joaogante](https://huggingface.co/joaogante) and [RaushanTurganbay](https://huggingface.co/RaushanTurganbay).
The original code can be found [here](https://github.com/facebookresearch/chameleon).
## Usage tips
- We advise users to use `padding_side="left"` when computing batched generation as it leads to more accurate results. Simply make sure to set `processor.tokenizer.padding_side = "left"` before generating.
- Note that Chameleon was tuned for safety alignment. If the model is refusing to answer, consider asking a more concrete question, instead of an open question.
- Chameleon generates in chat format which means that the generated text will always be the "assistant's turn". You can enable a text completion generation by passing `return_for_text_completion=True` when calling the processor.
> [!NOTE]
> Chameleon implementation in Transformers uses a special image token to indicate where to merge image embeddings. For special image token we didn't add a new one but used one of the reserved tokens: `<reserved08707>`. You have to add `<image>` to your prompt in the place where the image should be embedded for correct generation.
## Usage example
### Single image inference
Chameleon is a gated model so make sure to have access and login to Hugging Face Hub using a token.
Here's how to load the model and perform inference in half-precision (`torch.bfloat16`):
```python
from transformers import ChameleonProcessor, ChameleonForConditionalGeneration
import torch
from PIL import Image
import requests
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", dtype=torch.bfloat16, device_map="auto")
# prepare image and text prompt
url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
image = Image.open(requests.get(url, stream=True).raw)
prompt = "What do you see in this image?<image>"
inputs = processor(images=image, text=prompt, return_tensors="pt").to(model.device, dtype=torch.bfloat16)
# autoregressively complete prompt
output = model.generate(**inputs, max_new_tokens=50)
print(processor.decode(output[0], skip_special_tokens=True))
```
### Multi image inference
Chameleon can perform inference with multiple images as input, where images either belong to the same prompt or different prompts (in batched inference). Here is how you can do it:
```python
from transformers import ChameleonProcessor, ChameleonForConditionalGeneration
import torch
from PIL import Image
import requests
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", dtype=torch.bfloat16, device_map="auto")
# Get three different images
url = "https://www.ilankelman.org/stopsigns/australia.jpg"
image_stop = Image.open(requests.get(url, stream=True).raw)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image_cats = Image.open(requests.get(url, stream=True).raw)
url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg"
image_snowman = Image.open(requests.get(url, stream=True).raw)
# Prepare a batched prompt, where the first one is a multi-image prompt and the second is not
prompts = [
"What do these images have in common?<image><image>",
"<image>What is shown in this image?"
]
# We can simply feed images in the order they have to be used in the text prompt
# Each "<image>" token uses one image leaving the next for the subsequent "<image>" tokens
inputs = processor(images=[image_stop, image_cats, image_snowman], text=prompts, padding=True, return_tensors="pt").to(device=model.device, dtype=torch.bfloat16)
# Generate
generate_ids = model.generate(**inputs, max_new_tokens=50)
processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
```
## Model optimization
### Quantization using Bitsandbytes
The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes` and to have access to a GPU/accelerator that is supported by the library.
<Tip>
bitsandbytes is being refactored to support multiple backends beyond CUDA. Currently, ROCm (AMD GPU) and Intel CPU implementations are mature, with Intel XPU in progress and Apple Silicon support expected by Q4/Q1. For installation instructions and the latest backend updates, visit [this link](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend).
We value your feedback to help identify bugs before the full release! Check out [these docs](https://huggingface.co/docs/bitsandbytes/main/en/non_cuda_backends) for more details and feedback links.
</Tip>
Simply change the snippet above with:
```python
from transformers import ChameleonForConditionalGeneration, BitsAndBytesConfig
# specify how to quantize the model
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
)
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", quantization_config=quantization_config, device_map="auto")
```
### Use Flash-Attention 2 and SDPA to further speed-up generation
The models supports both, Flash-Attention 2 and PyTorch's [`torch.nn.functional.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) which can be enables for optimization. SDPA is the default options when you load the model, If you want to switch for Flash Attention 2, first make sure to install flash-attn. Refer to the [original repository](https://github.com/Dao-AILab/flash-attention) regarding that package installation. Simply change the snippet above with:
```python
from transformers import ChameleonForConditionalGeneration
model_id = "facebook/chameleon-7b"
model = ChameleonForConditionalGeneration.from_pretrained(
model_id,
dtype=torch.bfloat16,
attn_implementation="flash_attention_2"
).to(0)
```
## ChameleonConfig
[[autodoc]] ChameleonConfig
## ChameleonVQVAEConfig
[[autodoc]] ChameleonVQVAEConfig
## ChameleonProcessor
[[autodoc]] ChameleonProcessor
## ChameleonImageProcessor
[[autodoc]] ChameleonImageProcessor
- preprocess
## ChameleonImageProcessorFast
[[autodoc]] ChameleonImageProcessorFast
- preprocess
## ChameleonVQVAE
[[autodoc]] ChameleonVQVAE
- forward
## ChameleonModel
[[autodoc]] ChameleonModel
- forward
## ChameleonForConditionalGeneration
[[autodoc]] ChameleonForConditionalGeneration
- forward
| transformers/docs/source/en/model_doc/chameleon.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/chameleon.md",
"repo_id": "transformers",
"token_count": 2941
} | 402 |
<!--Copyright 2025 Deepseek AI and The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2024-03-08 and added to Hugging Face Transformers on 2025-07-25.*
<div style="float: right;">
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
</div>
# DeepseekVL
[Deepseek-VL](https://huggingface.co/papers/2403.05525) was introduced by the DeepSeek AI team. It is a vision-language model (VLM) designed to process both text and images for generating contextually relevant responses. The model leverages [LLaMA](./llama) as its text encoder, while [SigLip](./siglip) is used for encoding images.
You can find all the original Deepseek-VL checkpoints under the [DeepSeek-community](https://huggingface.co/deepseek-community) organization.
> [!TIP]
> Click on the Deepseek-VL models in the right sidebar for more examples of how to apply Deepseek-VL to different vision and language tasks.
The example below demonstrates how to generate text based on an image with [`Pipeline`] or the [`AutoModel`] class.
<hfoptions id="usage">
<hfoption id="Pipeline">
```py
import torch
from transformers import pipeline
pipe = pipeline(
task="image-text-to-text",
model="deepseek-community/deepseek-vl-1.3b-chat",
device=0,
dtype=torch.float16
)
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{ "type": "text", "text": "Describe this image."},
]
}
]
pipe(text=messages, max_new_tokens=20, return_full_text=False)
```
</hfoption>
<hfoption id="AutoModel">
```py
import torch
from transformers import DeepseekVLForConditionalGeneration, AutoProcessor
model = DeepseekVLForConditionalGeneration.from_pretrained(
"deepseek-community/deepseek-vl-1.3b-chat",
dtype=torch.float16,
device_map="auto",
attn_implementation="sdpa"
)
processor = AutoProcessor.from_pretrained("deepseek-community/deepseek-vl-1.3b-chat")
messages = [
{
"role":"user",
"content":[
{
"type":"image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
},
{
"type":"text",
"text":"Describe this image."
}
]
}
]
inputs = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt"
).to(model.device, dtype=model.dtype)
generated_ids = model.generate(**inputs, max_new_tokens=128)
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
```
</hfoption>
</hfoptions>
Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends.
The example below uses [torchao](../quantization/torchao) to only quantize the weights to int4.
```python
import torch
from transformers import TorchAoConfig, DeepseekVLForConditionalGeneration, AutoProcessor
quantization_config = TorchAoConfig(
"int4_weight_only",
group_size=128
)
model = DeepseekVLForConditionalGeneration.from_pretrained(
"deepseek-community/deepseek-vl-1.3b-chat",
dtype=torch.bfloat16,
device_map="auto",
quantization_config=quantization_config
)
```
### Notes
- Do inference with multiple images in a single conversation.
```py
import torch
from transformers import DeepseekVLForConditionalGeneration, AutoProcessor
model = DeepseekVLForConditionalGeneration.from_pretrained(
"deepseek-community/deepseek-vl-1.3b-chat",
dtype=torch.float16,
device_map="auto",
attn_implementation="sdpa"
)
processor = AutoProcessor.from_pretrained("deepseek-community/deepseek-vl-1.3b-chat")
messages = [
[
{
"role": "user",
"content": [
{"type": "text", "text": "What’s the difference between"},
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
{"type": "text", "text": " and "},
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"}
]
}
],
[
{
"role": "user",
"content": [
{"type": "image", "url": "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg"},
{"type": "text", "text": "What do you see in this image?"}
]
}
]
]
inputs = processor.apply_chat_template(
messages,
add_generation_prompt=True,
padding=True,
truncation=True,
tokenize=True,
return_dict=True,
return_tensors="pt"
).to(model.device, dtype=model.dtype)
generated_ids = model.generate(**inputs, max_new_tokens=128)
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
```
## DeepseekVLConfig
[[autodoc]] DeepseekVLConfig
## DeepseekVLProcessor
[[autodoc]] DeepseekVLProcessor
## DeepseekVLImageProcessor
[[autodoc]] DeepseekVLImageProcessor
## DeepseekVLImageProcessorFast
[[autodoc]] DeepseekVLImageProcessorFast
## DeepseekVLModel
[[autodoc]] DeepseekVLModel
- forward
## DeepseekVLForConditionalGeneration
[[autodoc]] DeepseekVLForConditionalGeneration
- forward
| transformers/docs/source/en/model_doc/deepseek_vl.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/deepseek_vl.md",
"repo_id": "transformers",
"token_count": 2885
} | 403 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
*This model was released on 2025-08-13 and added to Hugging Face Transformers on 2025-08-14.*
<div style="float: right;">
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
</div>
# DINOv3
[DINOv3](https://huggingface.co/papers/2508.10104) is a family of versatile vision foundation models that outperforms the specialized state of the art across a broad range of settings, without fine-tuning. DINOv3 produces high-quality dense features that achieve outstanding performance on various vision tasks, significantly surpassing previous self- and weakly-supervised foundation models.
You can find all the original DINOv3 checkpoints under the [DINOv3](https://huggingface.co/collections/facebook/dinov3-68924841bd6b561778e31009) collection.
> [!TIP]
> Click on the DINOv3 models in the right sidebar for more examples of how to apply DINOv3 to different vision tasks.
The example below demonstrates how to obtain an image embedding with [`Pipeline`] or the [`AutoModel`] class.
<hfoptions id="usage">
<hfoption id="Pipeline">
```py
import torch
from transformers import pipeline
pipe = pipeline(
task="image-feature-extraction",
model="facebook/dinov3-vits16-pretrain-lvd1689m",
dtype=torch.bfloat16,
)
pipe("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg")
```
</hfoption>
<hfoption id="AutoModel">
```py
import torch
from transformers import AutoImageProcessor, AutoModel
from transformers.image_utils import load_image
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = load_image(url)
processor = AutoImageProcessor.from_pretrained("facebook/dinov3-vits16-pretrain-lvd1689m")
model = AutoModel.from_pretrained(
"facebook/dinov3-vits16-pretrain-lvd1689m",
dtype=torch.float16,
device_map="auto",
attn_implementation="sdpa"
)
inputs = processor(images=image, return_tensors="pt").to(model.device)
with torch.inference_mode():
outputs = model(**inputs)
pooled_output = outputs.pooler_output
print("Pooled output shape:", pooled_output.shape)
```
</hfoption>
</hfoptions>
Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends.
The example below uses [torchao](../quantization/torchao) to only quantize the weights to int4.
```py
# pip install torchao
import torch
from transformers import TorchAoConfig, AutoImageProcessor, AutoModel
from torchao.quantization import Int4WeightOnlyConfig
from transformers.image_utils import load_image
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = load_image(url)
processor = AutoImageProcessor.from_pretrained("facebook/dinov3-vitsplus-pretrain-lvd1689m")
quant_type = Int4WeightOnlyConfig(group_size=128)
quantization_config = TorchAoConfig(quant_type=quant_type)
model = AutoModel.from_pretrained(
"facebook/dinov3-vit7b16-pretrain-lvd1689m",
dtype=torch.bfloat16,
device_map="auto",
quantization_config=quantization_config
)
inputs = processor(images=image, return_tensors="pt").to(model.device)
with torch.inference_mode():
outputs = model(**inputs)
pooled_output = outputs.pooler_output
print("Pooled output shape:", pooled_output.shape)
```
## Notes
- The example below shows how to split the output tensor into:
- one embedding for the whole image, commonly referred to as a `CLS` token,
useful for classification and retrieval
- register tokens - learnable embeddings that act as dedicated “memory slots” for global information,
they reduce high-norm artifacts in patch tokens, yielding cleaner attention maps and better
performance on dense prediction tasks.
- a set of local embeddings, one for each `16x16` patch of the input image,
useful for dense tasks, such as semantic segmentation
```py
import torch
from transformers import AutoImageProcessor, AutoModel
from transformers.image_utils import load_image
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = load_image(url)
print("Image size:", image.height, image.width) # [480, 640]
processor = AutoImageProcessor.from_pretrained("facebook/dinov3-vits16-pretrain-lvd1689m")
model = AutoModel.from_pretrained("facebook/dinov3-vits16-pretrain-lvd1689m")
patch_size = model.config.patch_size
print("Patch size:", patch_size) # 16
print("Num register tokens:", model.config.num_register_tokens) # 4
inputs = processor(images=image, return_tensors="pt")
print("Preprocessed image size:", inputs.pixel_values.shape) # [1, 3, 224, 224]
batch_size, _, img_height, img_width = inputs.pixel_values.shape
num_patches_height, num_patches_width = img_height // patch_size, img_width // patch_size
num_patches_flat = num_patches_height * num_patches_width
with torch.inference_mode():
outputs = model(**inputs)
last_hidden_states = outputs.last_hidden_state
print(last_hidden_states.shape) # [1, 1 + 4 + 256, 384]
assert last_hidden_states.shape == (batch_size, 1 + model.config.num_register_tokens + num_patches_flat, model.config.hidden_size)
cls_token = last_hidden_states[:, 0, :]
patch_features_flat = last_hidden_states[:, 1 + model.config.num_register_tokens:, :]
patch_features = patch_features_flat.unflatten(1, (num_patches_height, num_patches_width))
```
## DINOv3ViTConfig
[[autodoc]] DINOv3ViTConfig
## DINOv3ConvNextConfig
[[autodoc]] DINOv3ConvNextConfig
## DINOv3ViTModel
[[autodoc]] DINOv3ViTModel
- forward
## DINOv3ConvNextModel
[[autodoc]] DINOv3ConvNextModel
- forward
## DINOv3ViTImageProcessorFast
[[autodoc]] DINOv3ViTImageProcessorFast
- preprocess
| transformers/docs/source/en/model_doc/dinov3.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/dinov3.md",
"repo_id": "transformers",
"token_count": 2230
} | 404 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2019-04-19 and added to Hugging Face Transformers on 2022-09-09.*
<div style="float: right;">
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white" >
</div>
</div>
# ERNIE
[ERNIE1.0](https://huggingface.co/papers/1904.09223), [ERNIE2.0](https://ojs.aaai.org/index.php/AAAI/article/view/6428),
[ERNIE3.0](https://huggingface.co/papers/2107.02137), [ERNIE-Gram](https://huggingface.co/papers/2010.12148), [ERNIE-health](https://huggingface.co/papers/2110.07244) are a series of powerful models proposed by baidu, especially in Chinese tasks.
ERNIE (Enhanced Representation through kNowledge IntEgration) is designed to learn language representation enhanced by knowledge masking strategies, which includes entity-level masking and phrase-level masking.
Other ERNIE models released by baidu can be found at [Ernie 4.5](./ernie4_5), and [Ernie 4.5 MoE](./ernie4_5_moe).
> [!TIP]
> This model was contributed by [nghuyong](https://huggingface.co/nghuyong), and the official code can be found in [PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP) (in PaddlePaddle).
>
> Click on the ERNIE models in the right sidebar for more examples of how to apply ERNIE to different language tasks.
The example below demonstrates how to predict the `[MASK]` token with [`Pipeline`], [`AutoModel`], and from the command line.
<hfoptions id="usage">
<hfoption id="Pipeline">
```py
from transformers import pipeline
pipeline = pipeline(
task="fill-mask",
model="nghuyong/ernie-3.0-xbase-zh"
)
pipeline("巴黎是[MASK]国的首都。")
```
</hfoption>
<hfoption id="AutoModel">
```py
import torch
from transformers import AutoModelForMaskedLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
"nghuyong/ernie-3.0-xbase-zh",
)
model = AutoModelForMaskedLM.from_pretrained(
"nghuyong/ernie-3.0-xbase-zh",
dtype=torch.float16,
device_map="auto"
)
inputs = tokenizer("巴黎是[MASK]国的首都。", return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
predictions = outputs.logits
masked_index = torch.where(inputs['input_ids'] == tokenizer.mask_token_id)[1]
predicted_token_id = predictions[0, masked_index].argmax(dim=-1)
predicted_token = tokenizer.decode(predicted_token_id)
print(f"The predicted token is: {predicted_token}")
```
</hfoption>
<hfoption id="transformers CLI">
```bash
echo -e "巴黎是[MASK]国的首都。" | transformers run --task fill-mask --model nghuyong/ernie-3.0-xbase-zh --device 0
```
</hfoption>
</hfoptions>
## Notes
Model variants are available in different sizes and languages.
| Model Name | Language | Description |
|:-------------------:|:--------:|:-------------------------------:|
| ernie-1.0-base-zh | Chinese | Layer:12, Heads:12, Hidden:768 |
| ernie-2.0-base-en | English | Layer:12, Heads:12, Hidden:768 |
| ernie-2.0-large-en | English | Layer:24, Heads:16, Hidden:1024 |
| ernie-3.0-base-zh | Chinese | Layer:12, Heads:12, Hidden:768 |
| ernie-3.0-medium-zh | Chinese | Layer:6, Heads:12, Hidden:768 |
| ernie-3.0-mini-zh | Chinese | Layer:6, Heads:12, Hidden:384 |
| ernie-3.0-micro-zh | Chinese | Layer:4, Heads:12, Hidden:384 |
| ernie-3.0-nano-zh | Chinese | Layer:4, Heads:12, Hidden:312 |
| ernie-health-zh | Chinese | Layer:12, Heads:12, Hidden:768 |
| ernie-gram-zh | Chinese | Layer:12, Heads:12, Hidden:768 |
## Resources
You can find all the supported models from huggingface's model hub: [huggingface.co/nghuyong](https://huggingface.co/nghuyong), and model details from paddle's official
repo: [PaddleNLP](https://paddlenlp.readthedocs.io/zh/latest/model_zoo/transformers/ERNIE/contents.html)
and [ERNIE's legacy branch](https://github.com/PaddlePaddle/ERNIE/tree/legacy/develop).
## ErnieConfig
[[autodoc]] ErnieConfig
- all
## Ernie specific outputs
[[autodoc]] models.ernie.modeling_ernie.ErnieForPreTrainingOutput
## ErnieModel
[[autodoc]] ErnieModel
- forward
## ErnieForPreTraining
[[autodoc]] ErnieForPreTraining
- forward
## ErnieForCausalLM
[[autodoc]] ErnieForCausalLM
- forward
## ErnieForMaskedLM
[[autodoc]] ErnieForMaskedLM
- forward
## ErnieForNextSentencePrediction
[[autodoc]] ErnieForNextSentencePrediction
- forward
## ErnieForSequenceClassification
[[autodoc]] ErnieForSequenceClassification
- forward
## ErnieForMultipleChoice
[[autodoc]] ErnieForMultipleChoice
- forward
## ErnieForTokenClassification
[[autodoc]] ErnieForTokenClassification
- forward
## ErnieForQuestionAnswering
[[autodoc]] ErnieForQuestionAnswering
- forward
| transformers/docs/source/en/model_doc/ernie.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/ernie.md",
"repo_id": "transformers",
"token_count": 2014
} | 405 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
<div style="float: right;">
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
</div>
# Florence-2
[Florence-2](https://arxiv.org/abs/2311.06242) is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks. Florence-2 can interpret simple text prompts to perform tasks like captioning, object detection, and segmentation. It leverages the FLD-5B dataset, containing 5.4 billion annotations across 126 million images, to master multi-task learning. The model's sequence-to-sequence architecture enables it to excel in both zero-shot and fine-tuned settings, proving to be a competitive vision foundation model.
You can find all the original Florence-2 checkpoints under the [Florence-2](https://huggingface.co/models?other=florence-2) collection.
> [!TIP]
> This model was contributed by [ducviet00](https://huggingface.co/ducviet00).
> Click on the Florence-2 models in the right sidebar for more examples of how to apply Florence-2 to different vision and language tasks.
The example below demonstrates how to perform object detection with [`Pipeline`] or the [`AutoModel`] class.
<hfoptions id="usage">
<hfoption id="Pipeline">
```py
import torch
import requests
from PIL import Image
from transformers import pipeline
pipeline = pipeline(
"image-text-to-text",
model="ducviet00/Florence-2-base-hf",
device=0,
dtype=torch.bfloat16
)
pipeline(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true",
text="<OD>"
)
```
</hfoption>
<hfoption id="AutoModel">
```py
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, Florence2ForConditionalGeneration
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
model = Florence2ForConditionalGeneration.from_pretrained("microsoft/Florence-2-base", dtype=torch.bfloat16, device_map="auto")
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base")
task_prompt = "<OD>"
inputs = processor(text=task_prompt, images=image, return_tensors="pt").to(model.device)
generated_ids = model.generate(
**inputs,
max_new_tokens=1024,
num_beams=3,
)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
image_size = image.size
parsed_answer = processor.post_process_generation(generated_text, task=task_prompt, image_size=image_size)
print(parsed_answer)
```
</hfoption>
</hfoptions>
Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends.
The example below uses [bitsandbytes](../quantization/bitsandbytes) to quantize the model to 4-bit.
```py
# pip install bitsandbytes
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, Florence2ForConditionalGeneration, BitsAndBytesConfig
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
model = Florence2ForConditionalGeneration.from_pretrained(
"microsoft/Florence-2-large",
dtype=torch.bfloat16,
device_map="auto",
quantization_config=quantization_config
)
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large")
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
task_prompt = "<OD>"
inputs = processor(text=task_prompt, images=image, return_tensors="pt").to(model.device, torch.bfloat16)
generated_ids = model.generate(
**inputs,
max_new_tokens=1024,
num_beams=3,
)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
image_size = image.size
parsed_answer = processor.post_process_generation(generated_text, task=task_prompt, image_size=image_size)
print(parsed_answer)
```
<div class="flex justify-center">
<img src=""/>
</div>
## Notes
- Florence-2 is a prompt-based model. You need to provide a task prompt to tell the model what to do. Supported tasks are:
- `<OCR>`
- `<OCR_WITH_REGION>`
- `<CAPTION>`
- `<DETAILED_CAPTION>`
- `<MORE_DETAILED_CAPTION>`
- `<OD>`
- `<DENSE_REGION_CAPTION>`
- `<CAPTION_TO_PHRASE_GROUNDING>`
- `<REFERRING_EXPRESSION_SEGMENTATION>`
- `<REGION_TO_SEGMENTATION>`
- `<OPEN_VOCABULARY_DETECTION>`
- `<REGION_TO_CATEGORY>`
- `<REGION_TO_DESCRIPTION>`
- `<REGION_TO_OCR>`
- `<REGION_PROPOSAL>`
- The raw output of the model is a string that needs to be parsed. The [`Florence2Processor`] has a [`~Florence2Processor.post_process_generation`] method that can parse the string into a more usable format, like bounding boxes and labels for object detection.
## Resources
- [Florence-2 technical report](https://arxiv.org/abs/2311.06242)
- [Jupyter Notebook for inference and visualization of Florence-2-large model](https://huggingface.co/microsoft/Florence-2-large/blob/main/sample_inference.ipynb)
## Florence2VisionConfig
[[autodoc]] Florence2VisionConfig
## Florence2Config
[[autodoc]] Florence2Config
## Florence2Processor
[[autodoc]] Florence2Processor
## Florence2Model
[[autodoc]] Florence2Model
- forward
## Florence2ForConditionalGeneration
[[autodoc]] Florence2ForConditionalGeneration
- forward
## Florence2VisionBackbone
[[autodoc]] Florence2VisionBackbone
- forward
| transformers/docs/source/en/model_doc/florence2.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/florence2.md",
"repo_id": "transformers",
"token_count": 2229
} | 406 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2024-12-18 and added to Hugging Face Transformers on 2025-01-23.*
# Granite Vision
## Overview
The [Granite Vision](https://www.ibm.com/new/announcements/ibm-granite-3-1-powerful-performance-long-context-and-more) model is a variant of [LLaVA-NeXT](llava_next), leveraging a [Granite](granite) language model alongside a [SigLIP](SigLIP) visual encoder. It utilizes multiple concatenated vision hidden states as its image features, similar to [VipLlava](vipllava). It also uses a larger set of image grid pinpoints than the original LlaVa-NeXT models to support additional aspect ratios.
Tips:
- This model is loaded into Transformers as an instance of LlaVA-Next. The usage and tips from [LLaVA-NeXT](llava_next) apply to this model as well.
- You can apply the chat template on the tokenizer / processor in the same way as well. Example chat format:
```bash
"<|user|>\nWhat’s shown in this image?\n<|assistant|>\nThis image shows a red stop sign.<|end_of_text|><|user|>\nDescribe the image in more details.\n<|assistant|>\n"
```
Sample inference:
```python
from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration, infer_device
device = infer_device()
model_path = "ibm-granite/granite-vision-3.1-2b-preview"
processor = LlavaNextProcessor.from_pretrained(model_path)
model = LlavaNextForConditionalGeneration.from_pretrained(model_path).to(device)
# prepare image and text prompt, using the appropriate prompt template
url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true"
conversation = [
{
"role": "user",
"content": [
{"type": "image", "url": url},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
inputs = processor.apply_chat_template(
conversation,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt"
).to(model.device)
# autoregressively complete prompt
output = model.generate(**inputs, max_new_tokens=100)
print(processor.decode(output[0], skip_special_tokens=True))
```
This model was contributed by [Alexander Brooks](https://huggingface.co/abrooks9944).
## LlavaNextConfig
[[autodoc]] LlavaNextConfig
## LlavaNextImageProcessor
[[autodoc]] LlavaNextImageProcessor
- preprocess
## LlavaNextProcessor
[[autodoc]] LlavaNextProcessor
## LlavaNextForConditionalGeneration
[[autodoc]] LlavaNextForConditionalGeneration
- forward
| transformers/docs/source/en/model_doc/granitevision.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/granitevision.md",
"repo_id": "transformers",
"token_count": 1071
} | 407 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
specific language governing permissions and limitations under the License. -->
*This model was released on 2020-06-17 and added to Hugging Face Transformers on 2021-11-18.*
# ImageGPT
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
## Overview
The ImageGPT model was proposed in [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt) by Mark
Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever. ImageGPT (iGPT) is a GPT-2-like
model trained to predict the next pixel value, allowing for both unconditional and conditional image generation.
The abstract from the [paper](https://cdn.openai.com/papers/Generative_Pretraining_from_Pixels_V1_ICML.pdf) is the following:
*Inspired by progress in unsupervised representation learning for natural language, we examine whether similar models
can learn useful representations for images. We train a sequence Transformer to auto-regressively predict pixels,
without incorporating knowledge of the 2D input structure. Despite training on low-resolution ImageNet without labels,
we find that a GPT-2 scale model learns strong image representations as measured by linear probing, fine-tuning, and
low-data classification. On CIFAR-10, we achieve 96.3% accuracy with a linear probe, outperforming a supervised Wide
ResNet, and 99.0% accuracy with full fine-tuning, matching the top supervised pre-trained models. We are also
competitive with self-supervised benchmarks on ImageNet when substituting pixels for a VQVAE encoding, achieving 69.0%
top-1 accuracy on a linear probe of our features.*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/imagegpt_architecture.png"
alt="drawing" width="600"/>
<small> Summary of the approach. Taken from the [original paper](https://cdn.openai.com/papers/Generative_Pretraining_from_Pixels_V2.pdf). </small>
This model was contributed by [nielsr](https://huggingface.co/nielsr), based on [this issue](https://github.com/openai/image-gpt/issues/7). The original code can be found
[here](https://github.com/openai/image-gpt).
## Usage tips
- ImageGPT is almost exactly the same as [GPT-2](gpt2), with the exception that a different activation
function is used (namely "quick gelu"), and the layer normalization layers don't mean center the inputs. ImageGPT
also doesn't have tied input- and output embeddings.
- As the time- and memory requirements of the attention mechanism of Transformers scales quadratically in the sequence
length, the authors pre-trained ImageGPT on smaller input resolutions, such as 32x32 and 64x64. However, feeding a
sequence of 32x32x3=3072 tokens from 0..255 into a Transformer is still prohibitively large. Therefore, the authors
applied k-means clustering to the (R,G,B) pixel values with k=512. This way, we only have a 32*32 = 1024-long
sequence, but now of integers in the range 0..511. So we are shrinking the sequence length at the cost of a bigger
embedding matrix. In other words, the vocabulary size of ImageGPT is 512, + 1 for a special "start of sentence" (SOS)
token, used at the beginning of every sequence. One can use [`ImageGPTImageProcessor`] to prepare
images for the model.
- Despite being pre-trained entirely unsupervised (i.e. without the use of any labels), ImageGPT produces fairly
performant image features useful for downstream tasks, such as image classification. The authors showed that the
features in the middle of the network are the most performant, and can be used as-is to train a linear model (such as
a sklearn logistic regression model for example). This is also referred to as "linear probing". Features can be
easily obtained by first forwarding the image through the model, then specifying `output_hidden_states=True`, and
then average-pool the hidden states at whatever layer you like.
- Alternatively, one can further fine-tune the entire model on a downstream dataset, similar to BERT. For this, you can
use [`ImageGPTForImageClassification`].
- ImageGPT comes in different sizes: there's ImageGPT-small, ImageGPT-medium and ImageGPT-large. The authors did also
train an XL variant, which they didn't release. The differences in size are summarized in the following table:
| **Model variant** | **Depths** | **Hidden sizes** | **Decoder hidden size** | **Params (M)** | **ImageNet-1k Top 1** |
|---|---|---|---|---|---|
| MiT-b0 | [2, 2, 2, 2] | [32, 64, 160, 256] | 256 | 3.7 | 70.5 |
| MiT-b1 | [2, 2, 2, 2] | [64, 128, 320, 512] | 256 | 14.0 | 78.7 |
| MiT-b2 | [3, 4, 6, 3] | [64, 128, 320, 512] | 768 | 25.4 | 81.6 |
| MiT-b3 | [3, 4, 18, 3] | [64, 128, 320, 512] | 768 | 45.2 | 83.1 |
| MiT-b4 | [3, 8, 27, 3] | [64, 128, 320, 512] | 768 | 62.6 | 83.6 |
| MiT-b5 | [3, 6, 40, 3] | [64, 128, 320, 512] | 768 | 82.0 | 83.8 |
## Resources
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ImageGPT.
<PipelineTag pipeline="image-classification"/>
- Demo notebooks for ImageGPT can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/ImageGPT).
- [`ImageGPTForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb).
- See also: [Image classification task guide](../tasks/image_classification)
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
## ImageGPTConfig
[[autodoc]] ImageGPTConfig
## ImageGPTFeatureExtractor
[[autodoc]] ImageGPTFeatureExtractor
- __call__
## ImageGPTImageProcessor
[[autodoc]] ImageGPTImageProcessor
- preprocess
## ImageGPTModel
[[autodoc]] ImageGPTModel
- forward
## ImageGPTForCausalImageModeling
[[autodoc]] ImageGPTForCausalImageModeling
- forward
## ImageGPTForImageClassification
[[autodoc]] ImageGPTForImageClassification
- forward
| transformers/docs/source/en/model_doc/imagegpt.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/imagegpt.md",
"repo_id": "transformers",
"token_count": 2042
} | 408 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2020-04-10 and added to Hugging Face Transformers on 2021-01-05.*
<div style="float: right;">
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
</div>
# LED
[Longformer-Encoder-Decoder (LED)](https://huggingface.co/papers/2004.05150) is an encoder-decoder transformer model for sequence-to-sequence tasks like summarization. It extends [Longformer](.longformer), an encoder-only model designed to handle long inputs, by adding a decoder layer. The decoder uses full self-attention on the encoded tokens and previously decoded locations. Because of Longformer's linear self-attention mechanism, LED is more efficient than standard encoder-decoder models when processing long sequences.
You can find all the original [LED] checkpoints under the [Ai2](https://huggingface.co/allenai/models?search=led) organization.
> [!TIP]
> This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten).
>
> Click on the LED models in the right sidebar for more examples of how to apply LED to different language tasks.
The example below demonstrates how to summarize text with [`Pipeline`], [`AutoModel`], and from the command line.
<hfoptions id="usage">
<hfoption id="Pipeline">
```python
import torch
from transformers import pipeline
pipeline = pipeline(
task="summarization",
model="allenai/led-base-16384",
dtype=torch.float16,
device=0
)
pipeline("""Plants are among the most remarkable and essential life forms on Earth, possessing a unique ability to produce their own food through a process known as photosynthesis. This complex biochemical process is fundamental not only to plant life but to virtually all life on the planet.
Through photosynthesis, plants capture energy from sunlight using a green pigment called chlorophyll, which is located in specialized cell structures called chloroplasts. In the presence of light, plants absorb carbon dioxide from the atmosphere through small pores in their leaves called stomata, and take in water from the soil through their root systems.
These ingredients are then transformed into glucose, a type of sugar that serves as a source of chemical energy, and oxygen, which is released as a byproduct into the atmosphere. The glucose produced during photosynthesis is not just used immediately; plants also store it as starch or convert it into other organic compounds like cellulose, which is essential for building their cellular structure.
This energy reserve allows them to grow, develop leaves, produce flowers, bear fruit, and carry out various physiological processes throughout their lifecycle.""")
```
</hfoption>
<hfoption id="AutoModel">
```python
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained(
"allenai/led-base-16384"
)
model = AutoModelForSeq2SeqLM.from_pretrained(
"allenai/led-base-16384",
dtype=torch.float16,
device_map="auto"
)
input_text = """Plants are among the most remarkable and essential life forms on Earth, possessing a unique ability to produce their own food through a process known as photosynthesis. This complex biochemical process is fundamental not only to plant life but to virtually all life on the planet.
Through photosynthesis, plants capture energy from sunlight using a green pigment called chlorophyll, which is located in specialized cell structures called chloroplasts. In the presence of light, plants absorb carbon dioxide from the atmosphere through small pores in their leaves called stomata, and take in water from the soil through their root systems.
These ingredients are then transformed into glucose, a type of sugar that serves as a source of chemical energy, and oxygen, which is released as a byproduct into the atmosphere. The glucose produced during photosynthesis is not just used immediately; plants also store it as starch or convert it into other organic compounds like cellulose, which is essential for building their cellular structure.
This energy reserve allows them to grow, develop leaves, produce flowers, bear fruit, and carry out various physiological processes throughout their lifecycle."""
input_ids = tokenizer(input_text, return_tensors="pt").to(model.device)
# Place global attention on the first token
global_attention_mask = torch.zeros_like(input_ids.input_ids).to(model.device)
global_attention_mask[:, 0] = 1
output = model.generate(**input_ids, global_attention_mask=global_attention_mask, cache_implementation="static")
print(tokenizer.decode(output[0], skip_special_tokens=True))
```
</hfoption>
<hfoption id="transformers-cli">
```bash
!echo -e "Plants are among the most remarkable and essential life forms on Earth, possessing a unique ability to produce their own food through a process known as photosynthesis. This complex biochemical process is fundamental not only to plant life but to virtually all life on the planet. Through photosynthesis, plants capture energy from sunlight using a green pigment called chlorophyll, which is located in specialized cell structures called chloroplasts." | transformers-cli run --task summarization --model allenai/led-base-16384 --device 0
```
</hfoption>
</hfoptions>
Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends.
The example below uses [bitsandbytes](../quantization/bitsandbytes) to only quantize the weights to int4.
```python
import torch
from transformers import BitsAndBytesConfig, AutoModelForSeq2SeqLM, AutoTokenizer
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_quant_type="nf4"
)
model = AutoModelForSeq2SeqLM.from_pretrained(
"allenai/led-large-16384",
dtype=torch.bfloat16,
device_map="auto",
quantization_config=quantization_config
)
tokenizer = AutoTokenizer.from_pretrained(
"allenai/led-large-16384"
)
input_text = """Plants are among the most remarkable and essential life forms on Earth, possessing a unique ability to produce their own food through a process known as photosynthesis. This complex biochemical process is fundamental not only to plant life but to virtually all life on the planet.
Through photosynthesis, plants capture energy from sunlight using a green pigment called chlorophyll, which is located in specialized cell structures called chloroplasts. In the presence of light, plants absorb carbon dioxide from the atmosphere through small pores in their leaves called stomata, and take in water from the soil through their root systems.
These ingredients are then transformed into glucose, a type of sugar that serves as a source of chemical energy, and oxygen, which is released as a byproduct into the atmosphere. The glucose produced during photosynthesis is not just used immediately; plants also store it as starch or convert it into other organic compounds like cellulose, which is essential for building their cellular structure.
This energy reserve allows them to grow, develop leaves, produce flowers, bear fruit, and carry out various physiological processes throughout their lifecycle."""
input_ids = tokenizer(input_text, return_tensors="pt").to(model.device)
# Place global attention on the first token
global_attention_mask = torch.zeros_like(input_ids.input_ids).to(model.device)
global_attention_mask[:, 0] = 1
output = model.generate(**input_ids, global_attention_mask=global_attention_mask, cache_implementation="static")
print(tokenizer.decode(output[0], skip_special_tokens=True))
```
## Notes
- [`LEDForConditionalGeneration`] is an extension of [`BartForConditionalGeneration`] exchanging the traditional self-attention layer with Longformer's chunked self-attention layer. [`LEDTokenizer`] is an alias of [`BartTokenizer`].
- LED pads the `input_ids` to be a multiple of `config.attention_window` if required. A small speedup is gained when [`LEDTokenizer`] is used with the `pad_to_multiple_of` argument.
- LED works best on long-range sequence-to-sequence tasks where the `input_ids` are significantly longer than 1024 tokens.
- LED uses global attention by means of the `global_attention_mask` (see [`LongformerModel`]). For summarization, it is advised to put global attention only on the first `<s>` token. For question answering, it is advised to put global attention on all tokens of the question.
- To fine-tune LED on all 16384 parameters, gradient checkpointing can be enabled in case training leads to out-of-memory (OOM) errors. Enable gradient checkpointing by adding `model.gradient_checkpointing_enable()` and setting `use_cache=False` to disable the caching mechanism to save memory.
- Inputs should be padded on the right because LED uses absolute position embeddings.
## Resources
- Read the [LED on Arxiv notebook](https://colab.research.google.com/drive/12INTTR6n64TzS4RrXZxMSXfrOd9Xzamo?usp=sharing) to see how LED can achieve state-of-the-art performance on Arxiv article summarization.
- Read the [Fine-tune LED notebook](https://colab.research.google.com/drive/12LjJazBl7Gam0XBPy_y0CTOJZeZ34c2v?usp=sharing) to learn how to fine-tune LED on PubMed articles.
## LEDConfig
[[autodoc]] LEDConfig
## LEDTokenizer
[[autodoc]] LEDTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## LEDTokenizerFast
[[autodoc]] LEDTokenizerFast
## LED specific outputs
[[autodoc]] models.led.modeling_led.LEDEncoderBaseModelOutput
[[autodoc]] models.led.modeling_led.LEDSeq2SeqModelOutput
[[autodoc]] models.led.modeling_led.LEDSeq2SeqLMOutput
[[autodoc]] models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput
[[autodoc]] models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput
## LEDModel
[[autodoc]] LEDModel
- forward
## LEDForConditionalGeneration
[[autodoc]] LEDForConditionalGeneration
- forward
## LEDForSequenceClassification
[[autodoc]] LEDForSequenceClassification
- forward
## LEDForQuestionAnswering
[[autodoc]] LEDForQuestionAnswering
- forward
| transformers/docs/source/en/model_doc/led.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/led.md",
"repo_id": "transformers",
"token_count": 2993
} | 409 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2019-08-20 and added to Hugging Face Transformers on 2020-11-16.*
# LXMERT
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
## Overview
The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from Transformers](https://huggingface.co/papers/1908.07490) by Hao Tan & Mohit Bansal. It is a series of bidirectional transformer encoders
(one for the vision modality, one for the language modality, and then one to fuse both modalities) pretrained using a
combination of masked language modeling, visual-language text alignment, ROI-feature regression, masked
visual-attribute modeling, masked visual-object modeling, and visual-question answering objectives. The pretraining
consists of multiple multi-modal datasets: MSCOCO, Visual-Genome + Visual-Genome Question Answering, VQA 2.0, and GQA.
The abstract from the paper is the following:
*Vision-and-language reasoning requires an understanding of visual concepts, language semantics, and, most importantly,
the alignment and relationships between these two modalities. We thus propose the LXMERT (Learning Cross-Modality
Encoder Representations from Transformers) framework to learn these vision-and-language connections. In LXMERT, we
build a large-scale Transformer model that consists of three encoders: an object relationship encoder, a language
encoder, and a cross-modality encoder. Next, to endow our model with the capability of connecting vision and language
semantics, we pre-train the model with large amounts of image-and-sentence pairs, via five diverse representative
pretraining tasks: masked language modeling, masked object prediction (feature regression and label classification),
cross-modality matching, and image question answering. These tasks help in learning both intra-modality and
cross-modality relationships. After fine-tuning from our pretrained parameters, our model achieves the state-of-the-art
results on two visual question answering datasets (i.e., VQA and GQA). We also show the generalizability of our
pretrained cross-modality model by adapting it to a challenging visual-reasoning task, NLVR, and improve the previous
best result by 22% absolute (54% to 76%). Lastly, we demonstrate detailed ablation studies to prove that both our novel
model components and pretraining strategies significantly contribute to our strong results; and also present several
attention visualizations for the different encoders*
This model was contributed by [eltoto1219](https://huggingface.co/eltoto1219). The original code can be found [here](https://github.com/airsplay/lxmert).
## Usage tips
- Bounding boxes are not necessary to be used in the visual feature embeddings, any kind of visual-spacial features
will work.
- Both the language hidden states and the visual hidden states that LXMERT outputs are passed through the
cross-modality layer, so they contain information from both modalities. To access a modality that only attends to
itself, select the vision/language hidden states from the first input in the tuple.
- The bidirectional cross-modality encoder attention only returns attention values when the language modality is used
as the input and the vision modality is used as the context vector. Further, while the cross-modality encoder
contains self-attention for each respective modality and cross-attention, only the cross attention is returned and
both self attention outputs are disregarded.
## Resources
- [Question answering task guide](../tasks/question_answering)
## LxmertConfig
[[autodoc]] LxmertConfig
## LxmertTokenizer
[[autodoc]] LxmertTokenizer
## LxmertTokenizerFast
[[autodoc]] LxmertTokenizerFast
## Lxmert specific outputs
[[autodoc]] models.lxmert.modeling_lxmert.LxmertModelOutput
[[autodoc]] models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput
[[autodoc]] models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput
## LxmertModel
[[autodoc]] LxmertModel
- forward
## LxmertForPreTraining
[[autodoc]] LxmertForPreTraining
- forward
## LxmertForQuestionAnswering
[[autodoc]] LxmertForQuestionAnswering
- forward
| transformers/docs/source/en/model_doc/lxmert.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/lxmert.md",
"repo_id": "transformers",
"token_count": 1334
} | 410 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2024-12-18 and added to Hugging Face Transformers on 2025-07-15.*
<div style="float: right;">
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
</div>
# ModernBERT Decoder
ModernBERT Decoder has the same architecture as [ModernBERT](https://huggingface.co/papers/2412.13663) but it is trained from scratch with a causal language modeling objective from the [Ettin paper](https://huggingface.co/papers/2507.11412). This allows for using the same architecture to compare encoders and decoders. This model is the decoder architecture implementation of ModernBERT, designed for autoregressive text generation tasks.
ModernBERT Decoder uses sliding window attention and rotary positional embeddings for efficiency and to handle longer sequences.
You can find all the original ModernBERT Decoder checkpoints under the [jhu-clsp](https://huggingface.co/collections/jhu-clsp/encoders-vs-decoders-the-ettin-suite-686303e16142257eed8e6aeb) collection.
> [!TIP]
> This model was contributed by [orionw](https://huggingface.co/orionweller).
>
> Click on the ModernBERT Decoder models in the right sidebar for more examples of how to apply ModernBERT Decoder to different text generation tasks.
The example below demonstrates how to use ModernBERT Decoder for text generation with [`Pipeline`], [`AutoModel`] (with and without quantization), and from the command line.
<hfoptions id="usage">
<hfoption id="Pipeline">
```py
import torch
from transformers import pipeline
generator = pipeline(
task="text-generation",
model="jhu-clsp/ettin-decoder-17m",
dtype=torch.float16,
device=0
)
generator("The future of artificial intelligence is", max_length=50, num_return_sequences=1)
# For sequence classification
classifier = pipeline(
task="text-classification",
model="jhu-clsp/ettin-decoder-17m",
dtype=torch.float16,
device=0
)
classifier("This movie is really great!")
```
</hfoption>
<hfoption id="AutoModel">
```py
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/ettin-decoder-17m")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/ettin-decoder-17m",
dtype=torch.float16,
device_map="auto",
)
prompt = "The future of artificial intelligence is"
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_length=50,
num_return_sequences=1,
temperature=0.7,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(f"Generated text: {generated_text}")
# For sequence classification
from transformers import AutoModelForSequenceClassification
classifier_model = AutoModelForSequenceClassification.from_pretrained(
"jhu-clsp/ettin-decoder-17m",
dtype=torch.float16,
device_map="auto",
num_labels=2
)
text = "This movie is really great!"
inputs = tokenizer(text, return_tensors="pt").to(classifier_model.device)
with torch.no_grad():
outputs = classifier_model(**inputs)
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
predicted_class = torch.argmax(predictions, dim=-1)
print(f"Predicted class: {predicted_class.item()}")
print(f"Prediction probabilities: {predictions}")
```
</hfoption>
<hfoption id="AutoModel (w/quantization)">
```
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
quantization_config = BitsAndBytesConfig(
load_in_8bit=True,
)
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/ettin-decoder-1b")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/ettin-decoder-1b",
dtype=torch.float16,
device_map="auto",
quantization_config=quantization_config
)
prompt = "The future of artificial intelligence is"
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_length=50,
num_return_sequences=1,
temperature=0.7,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(f"Generated text: {generated_text}")
```
</hfoption>
<hfoption id="transformers CLI">
```bash
echo "The future of artificial intelligence is" | transformers run --task text-generation --model jhu-clsp/ettin-decoder-17m --device 0
```
</hfoption>
</hfoptions>
## ModernBertDecoderConfig
[[autodoc]] ModernBertDecoderConfig
<frameworkcontent>
<pt>
## ModernBertDecoderModel
[[autodoc]] ModernBertDecoderModel
- forward
## ModernBertDecoderForCausalLM
[[autodoc]] ModernBertDecoderForCausalLM
- forward
## ModernBertDecoderForSequenceClassification
[[autodoc]] ModernBertDecoderForSequenceClassification
- forward
</pt>
</frameworkcontent>
| transformers/docs/source/en/model_doc/modernbert-decoder.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/modernbert-decoder.md",
"repo_id": "transformers",
"token_count": 2093
} | 411 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
<div style="float: right;">
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
</div>
*This model was released on 2022-07-11 and added to Hugging Face Transformers on 2022-07-18.*
# NLLB
[NLLB: No Language Left Behind](https://huggingface.co/papers/2207.04672) is a multilingual translation model. It's trained on data using data mining techniques tailored for low-resource languages and supports over 200 languages. NLLB features a conditional compute architecture using a Sparsely Gated Mixture of Experts.
You can find all the original NLLB checkpoints under the [AI at Meta](https://huggingface.co/facebook/models?search=nllb) organization.
> [!TIP]
> This model was contributed by [Lysandre](https://huggingface.co/lysandre).
> Click on the NLLB models in the right sidebar for more examples of how to apply NLLB to different translation tasks.
The example below demonstrates how to translate text with [`Pipeline`] or the [`AutoModel`] class.
<hfoptions id="usage">
<hfoption id="Pipeline">
```python
import torch
from transformers import pipeline
pipeline = pipeline(task="translation", model="facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn", dtype=torch.float16, device=0)
pipeline("UN Chief says there is no military solution in Syria")
```
</hfoption>
<hfoption id="AutoModel">
```python
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M")
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M", dtype="auto", attn_implementaiton="sdpa")
article = "UN Chief says there is no military solution in Syria"
inputs = tokenizer(article, return_tensors="pt")
translated_tokens = model.generate(
**inputs, forced_bos_token_id=tokenizer.convert_tokens_to_ids("fra_Latn"), max_length=30
)
print(tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0])
```
</hfoption>
<hfoption id="transformers CLI">
```bash
echo -e "UN Chief says there is no military solution in Syria" | transformers run --task "translation_en_to_fr" --model facebook/nllb-200-distilled-600M --device 0
```
</hfoption>
</hfoptions>
Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends.
The example below uses [bitsandbytes](../quantization/bitsandbytes) to quantize the weights to 8-bits.
```python
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, BitsAndBytesConfig
bnb_config = BitsAndBytesConfig(load_in_8bit=True)
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-1.3B", quantization_config=bnb_config)
tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-1.3B")
article = "UN Chief says there is no military solution in Syria"
inputs = tokenizer(article, return_tensors="pt").to(model.device)
translated_tokens = model.generate(
**inputs, forced_bos_token_id=tokenizer.convert_tokens_to_ids("fra_Latn"), max_length=30,
)
print(tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0])
```
Use the [AttentionMaskVisualizer](https://github.com/huggingface/transformers/blob/main/src/transformers/utils/attention_visualizer.py#L139) to better understand what tokens the model can and cannot attend to.
```python
from transformers.utils.attention_visualizer import AttentionMaskVisualizer
visualizer = AttentionMaskVisualizer("facebook/nllb-200-distilled-600M")
visualizer("UN Chief says there is no military solution in Syria")
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/NLLB-Attn-Mask.png"/>
</div>
## Notes
- The tokenizer was updated in April 2023 to prefix the source sequence with the source language rather than the target language. This prioritizes zero-shot performance at a minor cost to supervised performance.
```python
>>> from transformers import NllbTokenizer
>>> tokenizer = NllbTokenizer.from_pretrained("facebook/nllb-200-distilled-600M")
>>> tokenizer("How was your day?").input_ids
[256047, 13374, 1398, 4260, 4039, 248130, 2]
```
To revert to the legacy behavior, use the code example below.
```python
>>> from transformers import NllbTokenizer
>>> tokenizer = NllbTokenizer.from_pretrained("facebook/nllb-200-distilled-600M", legacy_behaviour=True)
```
- For non-English languages, specify the language's [BCP-47](https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200) code with the `src_lang` keyword as shown below.
- See example below for a translation from Romanian to German.
```python
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M")
>>> article = "UN Chief says there is no military solution in Syria"
>>> inputs = tokenizer(article, return_tensors="pt")
>>> translated_tokens = model.generate(
... **inputs, forced_bos_token_id=tokenizer.convert_tokens_to_ids("fra_Latn"), max_length=30
... )
>>> tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
Le chef de l'ONU dit qu'il n'y a pas de solution militaire en Syrie
```
## NllbTokenizer
[[autodoc]] NllbTokenizer
- build_inputs_with_special_tokens
## NllbTokenizerFast
[[autodoc]] NllbTokenizerFast
| transformers/docs/source/en/model_doc/nllb.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/nllb.md",
"repo_id": "transformers",
"token_count": 2238
} | 412 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.