repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_t5_sharded.py
import pytest @pytest.fixture(scope="module") def t5_sharded_handle(launcher): with launcher("google/flan-t5-xxl", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def t5_sharded(t5_sharded_handle): await t5_sharded_handle.health(300) return t5_sharded_handle.client @pytest.mark.asyncio async def test_t5_sharded(t5_sharded, response_snapshot): response = await t5_sharded.generate( "Please answer the following question. What is the boiling point of Nitrogen?", max_new_tokens=10, decoder_input_details=True, ) assert response == response_snapshot @pytest.mark.asyncio async def test_t5_sharded_load(t5_sharded, generate_load, response_snapshot): responses = await generate_load( t5_sharded, "Please answer the following question. What is the boiling point of Nitrogen?", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_llama.py
import pytest @pytest.fixture(scope="module") def flash_llama_handle(launcher): with launcher("huggingface/llama-7b", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama(flash_llama_handle): await flash_llama_handle.health(300) return flash_llama_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama(flash_llama, response_snapshot): response = await flash_llama.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_all_params(flash_llama, response_snapshot): response = await flash_llama.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 5 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_load(flash_llama, generate_load, response_snapshot): responses = await generate_load(flash_llama, "Test request", max_new_tokens=10, n=4) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_starcoder_gptq.py
import pytest @pytest.fixture(scope="module") def flash_starcoder_gptq_handle(launcher): with launcher("Narsil/starcoder-gptq", num_shard=2, quantize="gptq") as handle: yield handle @pytest.fixture(scope="module") async def flash_starcoder_gptq(flash_starcoder_gptq_handle): await flash_starcoder_gptq_handle.health(300) return flash_starcoder_gptq_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_gptq(flash_starcoder_gptq, generous_response_snapshot): response = await flash_starcoder_gptq.generate( "def geometric_mean(L: List[float]):", max_new_tokens=20, decoder_input_details=True, ) assert response.details.generated_tokens == 20 assert response == generous_response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_gptq_default_params( flash_starcoder_gptq, generous_response_snapshot ): response = await flash_starcoder_gptq.generate( "def geometric_mean(L: List[float]):", max_new_tokens=20, temperature=0.2, top_p=0.95, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 20 assert response == generous_response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_gptq_load( flash_starcoder_gptq, generate_load, generous_response_snapshot ): responses = await generate_load( flash_starcoder_gptq, "def geometric_mean(L: List[float]):", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == generous_response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_bloom_560m_sharded.py
import pytest @pytest.fixture(scope="module") def bloom_560m_sharded_handle(launcher): with launcher("bigscience/bloom-560m", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def bloom_560m_sharded(bloom_560m_sharded_handle): await bloom_560m_sharded_handle.health(240) return bloom_560m_sharded_handle.client @pytest.mark.asyncio async def test_bloom_560m_sharded(bloom_560m_sharded, response_snapshot): response = await bloom_560m_sharded.generate( "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, top_p=0.9, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_bloom_560m_sharded_load( bloom_560m_sharded, generate_load, response_snapshot ): responses = await generate_load( bloom_560m_sharded, "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_neox.py
import pytest @pytest.fixture(scope="module") def neox_handle(launcher): with launcher( "stabilityai/stablelm-tuned-alpha-3b", num_shard=1, use_flash_attention=False ) as handle: yield handle @pytest.fixture(scope="module") async def neox(neox_handle): await neox_handle.health(300) return neox_handle.client @pytest.mark.skip @pytest.mark.asyncio async def test_neox(neox, response_snapshot): response = await neox.generate( "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio async def test_neox_load(neox, generate_load, response_snapshot): responses = await generate_load( neox, "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, n=4, ) generated_texts = [r.generated_text for r in responses] assert len(generated_texts) == 4 assert generated_texts, all( [text == generated_texts[0] for text in generated_texts] ) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_awq.py
import pytest @pytest.fixture(scope="module") def flash_llama_awq_handle(launcher): with launcher( "abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq", num_shard=1, quantize="awq", ) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama_awq(flash_llama_awq_handle): await flash_llama_awq_handle.health(300) return flash_llama_awq_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_awq(flash_llama_awq, response_snapshot): response = await flash_llama_awq.generate( "What is Deep Learning?", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert ( response.generated_text == "\nWhat is the difference between Deep Learning and Machine" ) assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_awq_all_params(flash_llama_awq, response_snapshot): response = await flash_llama_awq.generate( "What is Deep Learning?", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_awq_load(flash_llama_awq, generate_load, response_snapshot): responses = await generate_load( flash_llama_awq, "What is Deep Learning?", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all( [ r.generated_text == "\nWhat is the difference between Deep Learning and Machine" for r in responses ] ) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4911, "logprob": -5.7851562, "text": "User" }, { "id": 29901, "logprob": -0.006996155, "text": ":" }, { "id": 32000, "logprob": -0.81347656, "text": "<fake_token_around_image>" }, { "id": 32001, "logprob": -6.687641e-05, "text": "<image>" }, { "id": 32000, "logprob": -3.5762787e-07, "text": "<fake_token_around_image>" }, { "id": 1815, "logprob": -4.2148438, "text": "Can" }, { "id": 366, "logprob": -0.014137268, "text": "you" }, { "id": 2649, "logprob": -4.4335938, "text": "tell" }, { "id": 592, "logprob": -0.2919922, "text": "me" }, { "id": 263, "logprob": -4.2070312, "text": "a" }, { "id": 1407, "logprob": -9.421875, "text": "very" }, { "id": 3273, "logprob": -1.8720703, "text": "short" }, { "id": 5828, "logprob": -0.26489258, "text": "story" }, { "id": 2729, "logprob": -3.7441406, "text": "based" }, { "id": 373, "logprob": -0.0005393028, "text": "on" }, { "id": 278, "logprob": -0.140625, "text": "the" }, { "id": 1967, "logprob": -0.06756592, "text": "image" }, { "id": 29973, "logprob": -0.15454102, "text": "?" } ], "seed": null, "tokens": [ { "id": 32002, "logprob": -0.0019140244, "special": true, "text": "<end_of_utterance>" }, { "id": 29871, "logprob": -8.404255e-05, "special": false, "text": " " }, { "id": 13, "logprob": -1.7642975e-05, "special": false, "text": "\n" }, { "id": 7900, "logprob": -2.9802322e-06, "special": false, "text": "Ass" }, { "id": 22137, "logprob": 0.0, "special": false, "text": "istant" }, { "id": 29901, "logprob": -3.2186508e-06, "special": false, "text": ":" }, { "id": 319, "logprob": -0.91064453, "special": false, "text": " A" }, { "id": 696, "logprob": -1.2412109, "special": false, "text": " ro" }, { "id": 15664, "logprob": -0.0002439022, "special": false, "text": "oster" }, { "id": 15028, "logprob": -1.1630859, "special": false, "text": " stands" } ], "top_tokens": null }, "generated_text": " \nAssistant: A rooster stands" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4911, "logprob": -5.7851562, "text": "User" }, { "id": 29901, "logprob": -0.006996155, "text": ":" }, { "id": 32000, "logprob": -0.81347656, "text": "<fake_token_around_image>" }, { "id": 32001, "logprob": -6.687641e-05, "text": "<image>" }, { "id": 32000, "logprob": -3.5762787e-07, "text": "<fake_token_around_image>" }, { "id": 1815, "logprob": -4.2148438, "text": "Can" }, { "id": 366, "logprob": -0.014137268, "text": "you" }, { "id": 2649, "logprob": -4.4335938, "text": "tell" }, { "id": 592, "logprob": -0.2919922, "text": "me" }, { "id": 263, "logprob": -4.2070312, "text": "a" }, { "id": 1407, "logprob": -9.421875, "text": "very" }, { "id": 3273, "logprob": -1.8720703, "text": "short" }, { "id": 5828, "logprob": -0.26489258, "text": "story" }, { "id": 2729, "logprob": -3.7441406, "text": "based" }, { "id": 373, "logprob": -0.0005393028, "text": "on" }, { "id": 278, "logprob": -0.140625, "text": "the" }, { "id": 1967, "logprob": -0.06756592, "text": "image" }, { "id": 29973, "logprob": -0.15454102, "text": "?" } ], "seed": null, "tokens": [ { "id": 32002, "logprob": -0.0019140244, "special": true, "text": "<end_of_utterance>" }, { "id": 29871, "logprob": -8.392334e-05, "special": false, "text": " " }, { "id": 13, "logprob": -1.7881393e-05, "special": false, "text": "\n" }, { "id": 7900, "logprob": -2.9802322e-06, "special": false, "text": "Ass" }, { "id": 22137, "logprob": 0.0, "special": false, "text": "istant" }, { "id": 29901, "logprob": -3.0994415e-06, "special": false, "text": ":" }, { "id": 319, "logprob": -0.9057617, "special": false, "text": " A" }, { "id": 696, "logprob": -1.2294922, "special": false, "text": " ro" }, { "id": 15664, "logprob": -0.00024533272, "special": false, "text": "oster" }, { "id": 15028, "logprob": -1.1640625, "special": false, "text": " stands" } ], "top_tokens": null }, "generated_text": " \nAssistant: A rooster stands" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4911, "logprob": -5.7773438, "text": "User" }, { "id": 29901, "logprob": -0.0070114136, "text": ":" }, { "id": 32000, "logprob": -0.8208008, "text": "<fake_token_around_image>" }, { "id": 32001, "logprob": -6.699562e-05, "text": "<image>" }, { "id": 32000, "logprob": -3.5762787e-07, "text": "<fake_token_around_image>" }, { "id": 1815, "logprob": -4.2265625, "text": "Can" }, { "id": 366, "logprob": -0.014175415, "text": "you" }, { "id": 2649, "logprob": -4.4296875, "text": "tell" }, { "id": 592, "logprob": -0.29516602, "text": "me" }, { "id": 263, "logprob": -4.2109375, "text": "a" }, { "id": 1407, "logprob": -9.4296875, "text": "very" }, { "id": 3273, "logprob": -1.8720703, "text": "short" }, { "id": 5828, "logprob": -0.26879883, "text": "story" }, { "id": 2729, "logprob": -3.7675781, "text": "based" }, { "id": 373, "logprob": -0.0005354881, "text": "on" }, { "id": 278, "logprob": -0.13671875, "text": "the" }, { "id": 1967, "logprob": -0.06719971, "text": "image" }, { "id": 29973, "logprob": -0.15551758, "text": "?" } ], "seed": null, "tokens": [ { "id": 32002, "logprob": -0.0019130707, "special": true, "text": "<end_of_utterance>" }, { "id": 29871, "logprob": -8.392334e-05, "special": false, "text": " " }, { "id": 13, "logprob": -1.7881393e-05, "special": false, "text": "\n" }, { "id": 7900, "logprob": -3.0994415e-06, "special": false, "text": "Ass" }, { "id": 22137, "logprob": 0.0, "special": false, "text": "istant" }, { "id": 29901, "logprob": -3.0994415e-06, "special": false, "text": ":" }, { "id": 319, "logprob": -0.9013672, "special": false, "text": " A" }, { "id": 696, "logprob": -1.2324219, "special": false, "text": " ro" }, { "id": 15664, "logprob": -0.0002477169, "special": false, "text": "oster" }, { "id": 15028, "logprob": -1.1660156, "special": false, "text": " stands" } ], "top_tokens": null }, "generated_text": " \nAssistant: A rooster stands" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4911, "logprob": -5.7773438, "text": "User" }, { "id": 29901, "logprob": -0.0070114136, "text": ":" }, { "id": 32000, "logprob": -0.8208008, "text": "<fake_token_around_image>" }, { "id": 32001, "logprob": -6.699562e-05, "text": "<image>" }, { "id": 32000, "logprob": -3.5762787e-07, "text": "<fake_token_around_image>" }, { "id": 1815, "logprob": -4.2265625, "text": "Can" }, { "id": 366, "logprob": -0.014175415, "text": "you" }, { "id": 2649, "logprob": -4.4296875, "text": "tell" }, { "id": 592, "logprob": -0.29516602, "text": "me" }, { "id": 263, "logprob": -4.2109375, "text": "a" }, { "id": 1407, "logprob": -9.4296875, "text": "very" }, { "id": 3273, "logprob": -1.8720703, "text": "short" }, { "id": 5828, "logprob": -0.26879883, "text": "story" }, { "id": 2729, "logprob": -3.7675781, "text": "based" }, { "id": 373, "logprob": -0.0005354881, "text": "on" }, { "id": 278, "logprob": -0.13671875, "text": "the" }, { "id": 1967, "logprob": -0.06719971, "text": "image" }, { "id": 29973, "logprob": -0.15551758, "text": "?" } ], "seed": null, "tokens": [ { "id": 32002, "logprob": -0.001912117, "special": true, "text": "<end_of_utterance>" }, { "id": 29871, "logprob": -8.392334e-05, "special": false, "text": " " }, { "id": 13, "logprob": -1.7762184e-05, "special": false, "text": "\n" }, { "id": 7900, "logprob": -3.0994415e-06, "special": false, "text": "Ass" }, { "id": 22137, "logprob": 0.0, "special": false, "text": "istant" }, { "id": 29901, "logprob": -3.0994415e-06, "special": false, "text": ":" }, { "id": 319, "logprob": -0.9013672, "special": false, "text": " A" }, { "id": 696, "logprob": -1.2324219, "special": false, "text": " ro" }, { "id": 15664, "logprob": -0.0002477169, "special": false, "text": "oster" }, { "id": 15028, "logprob": -1.1660156, "special": false, "text": " stands" } ], "top_tokens": null }, "generated_text": " \nAssistant: A rooster stands" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4911, "logprob": -5.7773438, "text": "User" }, { "id": 29901, "logprob": -0.0070114136, "text": ":" }, { "id": 32000, "logprob": -0.8208008, "text": "<fake_token_around_image>" }, { "id": 32001, "logprob": -6.699562e-05, "text": "<image>" }, { "id": 32000, "logprob": -3.5762787e-07, "text": "<fake_token_around_image>" }, { "id": 1815, "logprob": -4.2265625, "text": "Can" }, { "id": 366, "logprob": -0.014175415, "text": "you" }, { "id": 2649, "logprob": -4.4296875, "text": "tell" }, { "id": 592, "logprob": -0.29516602, "text": "me" }, { "id": 263, "logprob": -4.2109375, "text": "a" }, { "id": 1407, "logprob": -9.4296875, "text": "very" }, { "id": 3273, "logprob": -1.8720703, "text": "short" }, { "id": 5828, "logprob": -0.26879883, "text": "story" }, { "id": 2729, "logprob": -3.7675781, "text": "based" }, { "id": 373, "logprob": -0.0005354881, "text": "on" }, { "id": 278, "logprob": -0.13671875, "text": "the" }, { "id": 1967, "logprob": -0.06719971, "text": "image" }, { "id": 29973, "logprob": -0.15551758, "text": "?" } ], "seed": null, "tokens": [ { "id": 32002, "logprob": -0.001912117, "special": true, "text": "<end_of_utterance>" }, { "id": 29871, "logprob": -8.392334e-05, "special": false, "text": " " }, { "id": 13, "logprob": -1.7762184e-05, "special": false, "text": "\n" }, { "id": 7900, "logprob": -3.0994415e-06, "special": false, "text": "Ass" }, { "id": 22137, "logprob": 0.0, "special": false, "text": "istant" }, { "id": 29901, "logprob": -3.0994415e-06, "special": false, "text": ":" }, { "id": 319, "logprob": -0.9013672, "special": false, "text": " A" }, { "id": 696, "logprob": -1.2324219, "special": false, "text": " ro" }, { "id": 15664, "logprob": -0.0002477169, "special": false, "text": "oster" }, { "id": 15028, "logprob": -1.1660156, "special": false, "text": " stands" } ], "top_tokens": null }, "generated_text": " \nAssistant: A rooster stands" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 9, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": 0, "tokens": [ { "id": 16017, "logprob": -0.30908203, "special": false, "text": " blue" }, { "id": 20495, "logprob": 0.0, "special": false, "text": " sky" }, { "id": 259, "logprob": -0.28271484, "special": false, "text": " " }, { "id": 15484, "logprob": -1.7929688, "special": false, "text": "appear" }, { "id": 345, "logprob": -0.8935547, "special": false, "text": "ed" }, { "id": 281, "logprob": 0.0, "special": false, "text": " in" }, { "id": 287, "logprob": 0.0, "special": false, "text": " the" }, { "id": 20495, "logprob": -0.32299805, "special": false, "text": " sky" }, { "id": 1, "logprob": 0.0, "special": true, "text": "</s>" } ] }, "generated_text": "Why is the sky blue?blue sky appeared in the sky" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base.json
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 5, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": 0, "tokens": [ { "id": 926, "logprob": -4.3554688, "special": false, "text": " To" }, { "id": 18295, "logprob": -7.7734375, "special": false, "text": " sell" }, { "id": 7868, "logprob": -3.9257812, "special": false, "text": " things" }, { "id": 260, "logprob": -2.4179688, "special": false, "text": "." }, { "id": 1, "logprob": 0.0, "special": true, "text": "</s>" } ] }, "generated_text": "To sell things." }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3798828, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36328125, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0947266, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8286133, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6826172, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.7290039, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3789062, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36279297, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0966797, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8276367, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6845703, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.72753906, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3789062, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36279297, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0966797, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8276367, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6845703, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.72753906, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3789062, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36279297, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0966797, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8276367, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6845703, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.72753906, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21447754, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.43701172, "special": false, "text": " print" }, { "id": 372, "logprob": -0.5361328, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2412109, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.7583008, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.20837402, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2470703, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.5390625, "text": " dég" }, { "id": 21543, "logprob": -0.14758301, "text": "uster" }, { "id": 447, "logprob": -1.9296875, "text": " un" }, { "id": 46341, "logprob": -15.4453125, "text": " ort" }, { "id": 35567, "logprob": -7.59375, "text": "olan" }, { "id": 15, "logprob": -1.3994141, "text": "," }, { "id": 1669, "logprob": -1.578125, "text": " il" }, { "id": 11580, "logprob": -0.9453125, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": 0, "tokens": [ { "id": 578, "logprob": -1.6474609, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5097656, "special": false, "text": " faire" }, { "id": 159570, "logprob": -6.65625, "special": false, "text": " réch" }, { "id": 810, "logprob": 0.0, "special": false, "text": "au" }, { "id": 12736, "logprob": 0.0, "special": false, "text": "ffer" }, { "id": 1742, "logprob": -2.5859375, "special": false, "text": " au" }, { "id": 6105, "logprob": -2.03125, "special": false, "text": " bain" }, { "id": 88254, "logprob": -0.12695312, "special": false, "text": "-mar" }, { "id": 641, "logprob": 0.0, "special": false, "text": "ie" }, { "id": 2940, "logprob": -3.5175781, "special": false, "text": " avec" } ] }, "generated_text": " le faire réchauffer au bain-marie avec" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.5390625, "text": " dég" }, { "id": 21543, "logprob": -0.14758301, "text": "uster" }, { "id": 447, "logprob": -1.9296875, "text": " un" }, { "id": 46341, "logprob": -15.4453125, "text": " ort" }, { "id": 35567, "logprob": -7.59375, "text": "olan" }, { "id": 15, "logprob": -1.3994141, "text": "," }, { "id": 1669, "logprob": -1.578125, "text": " il" }, { "id": 11580, "logprob": -0.9453125, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7529297, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.6054688, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5283203, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4716797, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11853027, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.41210938, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.0037765503, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0166016, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.515625, "text": " dég" }, { "id": 21543, "logprob": -0.1484375, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.34375, "text": " ort" }, { "id": 35567, "logprob": -7.515625, "text": "olan" }, { "id": 15, "logprob": -1.4199219, "text": "," }, { "id": 1669, "logprob": -1.5664062, "text": " il" }, { "id": 11580, "logprob": -0.94091797, "text": " faut" }, { "id": 3913, "logprob": -3.6660156, "text": " tout" }, { "id": 39261, "logprob": -1.7753906, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7626953, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5820312, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5097656, "special": false, "text": " cu" }, { "id": 1273, "logprob": -9.393692e-05, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.5175781, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4909668, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.515625, "text": " dég" }, { "id": 21543, "logprob": -0.1484375, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.34375, "text": " ort" }, { "id": 35567, "logprob": -7.515625, "text": "olan" }, { "id": 15, "logprob": -1.4199219, "text": "," }, { "id": 1669, "logprob": -1.5664062, "text": " il" }, { "id": 11580, "logprob": -0.94091797, "text": " faut" }, { "id": 3913, "logprob": -3.6660156, "text": " tout" }, { "id": 39261, "logprob": -1.7753906, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7626953, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5820312, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5097656, "special": false, "text": " cu" }, { "id": 1273, "logprob": -9.393692e-05, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.5175781, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4909668, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.515625, "text": " dég" }, { "id": 21543, "logprob": -0.1484375, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.34375, "text": " ort" }, { "id": 35567, "logprob": -7.515625, "text": "olan" }, { "id": 15, "logprob": -1.4199219, "text": "," }, { "id": 1669, "logprob": -1.5664062, "text": " il" }, { "id": 11580, "logprob": -0.94091797, "text": " faut" }, { "id": 3913, "logprob": -3.6660156, "text": " tout" }, { "id": 39261, "logprob": -1.7753906, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7626953, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5820312, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5097656, "special": false, "text": " cu" }, { "id": 1273, "logprob": -9.393692e-05, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.5175781, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4909668, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_default_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 60, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6328125, "text": " print" }, { "id": 81, "logprob": -1.6035156, "text": "_" }, { "id": 7656, "logprob": -5.9882812, "text": "hello" } ], "seed": 0, "tokens": [ { "id": 2262, "logprob": -0.042999268, "special": false, "text": "():" }, { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 440, "logprob": 0.0, "special": false, "text": "(\"" }, { "id": 8279, "logprob": 0.0, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -0.38549805, "special": false, "text": " World" }, { "id": 657, "logprob": -0.5229492, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.10632324, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": -0.20141602, "special": false, "text": "def" }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 7656, "logprob": 0.0, "special": false, "text": "hello" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 711, "logprob": 0.0, "special": false, "text": "):" }, { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 440, "logprob": -0.16027832, "special": false, "text": "(\"" }, { "id": 8279, "logprob": 0.0, "special": false, "text": "Hello" }, { "id": 313, "logprob": 0.0, "special": false, "text": " \"" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 636, "logprob": 0.0, "special": false, "text": " name" }, { "id": 27, "logprob": 0.0, "special": false, "text": ")" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": 0.0, "special": false, "text": "def" }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 7656, "logprob": 0.0, "special": false, "text": "hello" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 381, "logprob": 0.0, "special": false, "text": "age" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 30, "logprob": 0.0, "special": false, "text": "," }, { "id": 11442, "logprob": 0.0, "special": false, "text": " age" }, { "id": 711, "logprob": 0.0, "special": false, "text": "):" }, { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 440, "logprob": 0.0, "special": false, "text": "(\"" }, { "id": 8279, "logprob": 0.0, "special": false, "text": "Hello" }, { "id": 313, "logprob": 0.0, "special": false, "text": " \"" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 636, "logprob": 0.0, "special": false, "text": " name" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 313, "logprob": -0.6328125, "special": false, "text": " \"" }, { "id": 313, "logprob": -1.7011719, "special": false, "text": " \"" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 596, "logprob": 0.0, "special": false, "text": " str" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 381, "logprob": 0.0, "special": false, "text": "age" }, { "id": 490, "logprob": 0.0, "special": false, "text": "))" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": 0.0, "special": false, "text": "def" }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef print_hello_name(name):\n print(\"Hello \" + name)\n\ndef print_hello_name_age(name, age):\n print(\"Hello \" + name + \" \" + str(age))\n\ndef print" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2590332, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39379883, "special": false, "text": " print" }, { "id": 440, "logprob": -0.61376953, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.47338867, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.80810547, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7397461, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0371094, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.4179688, "text": " is" }, { "id": 247, "logprob": -2.1542969, "text": " a" }, { "id": 1167, "logprob": -5.359375, "text": " mem" }, { "id": 70, "logprob": -0.006038666, "text": "e" }, { "id": 13, "logprob": -7.328125, "text": "," }, { "id": 285, "logprob": -0.3173828, "text": " and" }, { "id": 752, "logprob": -2.0625, "text": " what" }, { "id": 434, "logprob": -5.7734375, "text": "'s" }, { "id": 253, "logprob": -0.74072266, "text": " the" }, { "id": 2892, "logprob": -6.5898438, "text": " history" }, { "id": 3212, "logprob": -2.2949219, "text": " behind" }, { "id": 436, "logprob": -11.40625, "text": " this" }, { "id": 3159, "logprob": -2.1113281, "text": " word" }, { "id": 32, "logprob": -0.008056641, "text": "?" }, { "id": 0, "logprob": -2.3300781, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.28125, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.5878906, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5498047, "special": false, "text": " word" }, { "id": 346, "logprob": -0.04815674, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.002313614, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.2636185e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0010147095, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0859375, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12609863, "special": false, "text": " first" }, { "id": 908, "logprob": -0.016601562, "special": false, "text": " used" }, { "id": 275, "logprob": -0.38256836, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1640625, "text": " a" }, { "id": 1167, "logprob": -5.40625, "text": " mem" }, { "id": 70, "logprob": -0.005420685, "text": "e" }, { "id": 13, "logprob": -7.2226562, "text": "," }, { "id": 285, "logprob": -0.26879883, "text": " and" }, { "id": 752, "logprob": -2.1992188, "text": " what" }, { "id": 434, "logprob": -5.46875, "text": "'s" }, { "id": 253, "logprob": -0.8017578, "text": " the" }, { "id": 2892, "logprob": -6.6796875, "text": " history" }, { "id": 3212, "logprob": -2.1972656, "text": " behind" }, { "id": 436, "logprob": -11.4453125, "text": " this" }, { "id": 3159, "logprob": -2.1933594, "text": " word" }, { "id": 32, "logprob": -0.007858276, "text": "?" }, { "id": 0, "logprob": -2.328125, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.21875, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.6201172, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.546875, "special": false, "text": " word" }, { "id": 346, "logprob": -0.051879883, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.0020179749, "special": false, "text": "mem" }, { "id": 70, "logprob": -9.059906e-06, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00096797943, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.07940674, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12182617, "special": false, "text": " first" }, { "id": 908, "logprob": -0.017227173, "special": false, "text": " used" }, { "id": 275, "logprob": -0.44482422, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1640625, "text": " a" }, { "id": 1167, "logprob": -5.40625, "text": " mem" }, { "id": 70, "logprob": -0.005420685, "text": "e" }, { "id": 13, "logprob": -7.2226562, "text": "," }, { "id": 285, "logprob": -0.26879883, "text": " and" }, { "id": 752, "logprob": -2.1992188, "text": " what" }, { "id": 434, "logprob": -5.46875, "text": "'s" }, { "id": 253, "logprob": -0.8017578, "text": " the" }, { "id": 2892, "logprob": -6.6796875, "text": " history" }, { "id": 3212, "logprob": -2.1972656, "text": " behind" }, { "id": 436, "logprob": -11.4453125, "text": " this" }, { "id": 3159, "logprob": -2.1933594, "text": " word" }, { "id": 32, "logprob": -0.007858276, "text": "?" }, { "id": 0, "logprob": -2.328125, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.21875, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.6201172, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.546875, "special": false, "text": " word" }, { "id": 346, "logprob": -0.051879883, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.0020179749, "special": false, "text": "mem" }, { "id": 70, "logprob": -9.059906e-06, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00096797943, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.07940674, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12182617, "special": false, "text": " first" }, { "id": 908, "logprob": -0.017227173, "special": false, "text": " used" }, { "id": 275, "logprob": -0.44482422, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1640625, "text": " a" }, { "id": 1167, "logprob": -5.40625, "text": " mem" }, { "id": 70, "logprob": -0.005420685, "text": "e" }, { "id": 13, "logprob": -7.2226562, "text": "," }, { "id": 285, "logprob": -0.26879883, "text": " and" }, { "id": 752, "logprob": -2.1992188, "text": " what" }, { "id": 434, "logprob": -5.46875, "text": "'s" }, { "id": 253, "logprob": -0.8017578, "text": " the" }, { "id": 2892, "logprob": -6.6796875, "text": " history" }, { "id": 3212, "logprob": -2.1972656, "text": " behind" }, { "id": 436, "logprob": -11.4453125, "text": " this" }, { "id": 3159, "logprob": -2.1933594, "text": " word" }, { "id": 32, "logprob": -0.007858276, "text": "?" }, { "id": 0, "logprob": -2.328125, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.21875, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.6201172, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.546875, "special": false, "text": " word" }, { "id": 346, "logprob": -0.051879883, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.0020179749, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.04904175e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0009560585, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.08557129, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12084961, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.4025879, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.4179688, "text": " is" }, { "id": 247, "logprob": -2.1542969, "text": " a" }, { "id": 1167, "logprob": -5.359375, "text": " mem" }, { "id": 70, "logprob": -0.006038666, "text": "e" }, { "id": 13, "logprob": -7.328125, "text": "," }, { "id": 285, "logprob": -0.3173828, "text": " and" }, { "id": 752, "logprob": -2.0625, "text": " what" }, { "id": 434, "logprob": -5.7734375, "text": "'s" }, { "id": 253, "logprob": -0.74072266, "text": " the" }, { "id": 2892, "logprob": -6.5898438, "text": " history" }, { "id": 3212, "logprob": -2.2949219, "text": " behind" }, { "id": 436, "logprob": -11.40625, "text": " this" }, { "id": 3159, "logprob": -2.1113281, "text": " word" }, { "id": 32, "logprob": -0.008056641, "text": "?" }, { "id": 0, "logprob": -2.3300781, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.28125, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.5878906, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5449219, "special": false, "text": " word" }, { "id": 346, "logprob": -0.05038452, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.002292633, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.3828278e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0010242462, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.090270996, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12719727, "special": false, "text": " first" }, { "id": 908, "logprob": -0.016571045, "special": false, "text": " used" }, { "id": 275, "logprob": -0.43432617, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 3735, "logprob": -12.9140625, "text": "Test" }, { "id": 2159, "logprob": -10.7578125, "text": "request" } ], "seed": 0, "tokens": [ { "id": 28747, "logprob": 0.0, "special": false, "text": ":" }, { "id": 3169, "logprob": -0.1307373, "special": false, "text": " Let" }, { "id": 332, "logprob": -2.3359375, "special": false, "text": " u" }, { "id": 347, "logprob": 0.0, "special": false, "text": " be" }, { "id": 325, "logprob": -1.0234375, "special": false, "text": " (" }, { "id": 28734, "logprob": -2.0292969, "special": false, "text": "0" }, { "id": 648, "logprob": -1.0439453, "special": false, "text": " +" }, { "id": 28705, "logprob": -0.24499512, "special": false, "text": " " }, { "id": 28770, "logprob": -0.5073242, "special": false, "text": "3" }, { "id": 387, "logprob": -1.5507812, "special": false, "text": " -" } ], "top_tokens": null }, "generated_text": "Test request: Let u be (0 + 3 -" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 3735, "logprob": -12.9140625, "text": "Test" }, { "id": 2159, "logprob": -10.7578125, "text": "request" } ], "seed": null, "tokens": [ { "id": 28747, "logprob": -0.55078125, "special": false, "text": ":" }, { "id": 3169, "logprob": -1.4140625, "special": false, "text": " Let" }, { "id": 307, "logprob": -3.0273438, "special": false, "text": " n" }, { "id": 327, "logprob": -0.94140625, "special": false, "text": " =" }, { "id": 28705, "logprob": -0.8173828, "special": false, "text": " " }, { "id": 28740, "logprob": -1.2978516, "special": false, "text": "1" }, { "id": 28734, "logprob": -2.0664062, "special": false, "text": "0" }, { "id": 387, "logprob": -1.9560547, "special": false, "text": " -" }, { "id": 28705, "logprob": -0.5078125, "special": false, "text": " " }, { "id": 28740, "logprob": -1.1787109, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": ": Let n = 10 - 1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 3735, "logprob": -12.9140625, "text": "Test" }, { "id": 2159, "logprob": -10.7578125, "text": "request" } ], "seed": null, "tokens": [ { "id": 28747, "logprob": -0.54785156, "special": false, "text": ":" }, { "id": 3169, "logprob": -1.4111328, "special": false, "text": " Let" }, { "id": 307, "logprob": -3.0292969, "special": false, "text": " n" }, { "id": 327, "logprob": -0.94433594, "special": false, "text": " =" }, { "id": 28705, "logprob": -0.8178711, "special": false, "text": " " }, { "id": 28740, "logprob": -1.2939453, "special": false, "text": "1" }, { "id": 28734, "logprob": -2.0644531, "special": false, "text": "0" }, { "id": 387, "logprob": -1.9550781, "special": false, "text": " -" }, { "id": 28705, "logprob": -0.5078125, "special": false, "text": " " }, { "id": 28740, "logprob": -1.1796875, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": ": Let n = 10 - 1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 3735, "logprob": -12.9140625, "text": "Test" }, { "id": 2159, "logprob": -10.7578125, "text": "request" } ], "seed": null, "tokens": [ { "id": 28747, "logprob": -0.55078125, "special": false, "text": ":" }, { "id": 3169, "logprob": -1.4140625, "special": false, "text": " Let" }, { "id": 307, "logprob": -3.0273438, "special": false, "text": " n" }, { "id": 327, "logprob": -0.94140625, "special": false, "text": " =" }, { "id": 28705, "logprob": -0.8173828, "special": false, "text": " " }, { "id": 28740, "logprob": -1.2978516, "special": false, "text": "1" }, { "id": 28734, "logprob": -2.0664062, "special": false, "text": "0" }, { "id": 387, "logprob": -1.9560547, "special": false, "text": " -" }, { "id": 28705, "logprob": -0.5078125, "special": false, "text": " " }, { "id": 28740, "logprob": -1.1787109, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": ": Let n = 10 - 1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 3735, "logprob": -12.9140625, "text": "Test" }, { "id": 2159, "logprob": -10.7578125, "text": "request" } ], "seed": null, "tokens": [ { "id": 28747, "logprob": -0.55078125, "special": false, "text": ":" }, { "id": 3169, "logprob": -1.4140625, "special": false, "text": " Let" }, { "id": 307, "logprob": -3.0273438, "special": false, "text": " n" }, { "id": 327, "logprob": -0.94140625, "special": false, "text": " =" }, { "id": 28705, "logprob": -0.8173828, "special": false, "text": " " }, { "id": 28740, "logprob": -1.2978516, "special": false, "text": "1" }, { "id": 28734, "logprob": -2.0664062, "special": false, "text": "0" }, { "id": 387, "logprob": -1.9560547, "special": false, "text": " -" }, { "id": 28705, "logprob": -0.5078125, "special": false, "text": " " }, { "id": 28740, "logprob": -1.1787109, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": ": Let n = 10 - 1" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 3735, "logprob": -12.9140625, "text": "Test" }, { "id": 2159, "logprob": -10.7578125, "text": "request" } ], "seed": null, "tokens": [ { "id": 28747, "logprob": -0.54785156, "special": false, "text": ":" }, { "id": 3169, "logprob": -1.4091797, "special": false, "text": " Let" }, { "id": 307, "logprob": -3.0273438, "special": false, "text": " n" }, { "id": 327, "logprob": -0.94433594, "special": false, "text": " =" }, { "id": 28705, "logprob": -0.81347656, "special": false, "text": " " }, { "id": 28740, "logprob": -1.2958984, "special": false, "text": "1" }, { "id": 28734, "logprob": -2.0644531, "special": false, "text": "0" }, { "id": 387, "logprob": -1.9580078, "special": false, "text": " -" }, { "id": 28705, "logprob": -0.5073242, "special": false, "text": " " }, { "id": 28740, "logprob": -1.1816406, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": ": Let n = 10 - 1" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5351562, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5722656, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2714844, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.03414917, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.95996094, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.3635254, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.013031006, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.1523438, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.43701172, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9394531, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": " for /api/v1/projects/1" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5351562, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5566406, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2519531, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.03414917, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.96240234, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.3647461, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.012901306, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.1542969, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.4362793, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9394531, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": " for /api/v1/projects/1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5332031, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5625, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2617188, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.033996582, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.9609375, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36572266, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.0129776, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.15625, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.4362793, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9394531, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": " for /api/v1/projects/1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5332031, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5625, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2617188, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.033996582, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.9609375, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36572266, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.0129776, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.15625, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.4362793, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9394531, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": " for /api/v1/projects/1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5332031, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5625, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2617188, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.033996582, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.9609375, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36572266, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.0129776, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.15625, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.4362793, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9394531, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": " for /api/v1/projects/1" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "stop_sequence", "generated_tokens": 5, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.546875, "text": "request" } ], "seed": 0, "tokens": [ { "id": 5229, "logprob": -2.5839844, "special": false, "text": " failed" }, { "id": 29901, "logprob": -0.44970703, "special": false, "text": ":" }, { "id": 4829, "logprob": -1.8339844, "special": false, "text": " Error" }, { "id": 297, "logprob": -1.0556641, "special": false, "text": " in" }, { "id": 1243, "logprob": 0.0, "special": false, "text": " test" } ], "top_tokens": null }, "generated_text": "Test request failed: Error in test" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.21875, "text": " your" }, { "id": 12315, "logprob": -9.9375, "text": " mood" }, { "id": 3063, "logprob": -4.1015625, "text": " today" }, { "id": 32, "logprob": -0.15319824, "text": "?" }, { "id": 50279, "logprob": -0.2614746, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8886719, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.98046875, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2265625, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3479004, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.0117188, "special": false, "text": "," }, { "id": 534, "logprob": -0.67871094, "special": false, "text": " which" }, { "id": 310, "logprob": -1.421875, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7382812, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.051330566, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0390625, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.1054688, "text": " your" }, { "id": 12315, "logprob": -9.953125, "text": " mood" }, { "id": 3063, "logprob": -4.0820312, "text": " today" }, { "id": 32, "logprob": -0.15148926, "text": "?" }, { "id": 50279, "logprob": -0.27026367, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.88378906, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9819336, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2421875, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3474121, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.078125, "special": false, "text": "," }, { "id": 534, "logprob": -0.69140625, "special": false, "text": " which" }, { "id": 310, "logprob": -1.4072266, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7041016, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.053375244, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0351562, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.21875, "text": " your" }, { "id": 12315, "logprob": -9.9375, "text": " mood" }, { "id": 3063, "logprob": -4.1015625, "text": " today" }, { "id": 32, "logprob": -0.15319824, "text": "?" }, { "id": 50279, "logprob": -0.2614746, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8886719, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.98046875, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2265625, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3479004, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.0117188, "special": false, "text": "," }, { "id": 534, "logprob": -0.67871094, "special": false, "text": " which" }, { "id": 310, "logprob": -1.421875, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7382812, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.051330566, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0390625, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.21875, "text": " your" }, { "id": 12315, "logprob": -9.9375, "text": " mood" }, { "id": 3063, "logprob": -4.1015625, "text": " today" }, { "id": 32, "logprob": -0.15319824, "text": "?" }, { "id": 50279, "logprob": -0.2614746, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8886719, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.98046875, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2265625, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3479004, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.0117188, "special": false, "text": "," }, { "id": 534, "logprob": -0.67871094, "special": false, "text": " which" }, { "id": 310, "logprob": -1.421875, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7382812, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.051330566, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0390625, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.1054688, "text": " your" }, { "id": 12315, "logprob": -9.953125, "text": " mood" }, { "id": 3063, "logprob": -4.0820312, "text": " today" }, { "id": 32, "logprob": -0.15148926, "text": "?" }, { "id": 50279, "logprob": -0.27026367, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.88378906, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.94921875, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2402344, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3725586, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.078125, "special": false, "text": "," }, { "id": 534, "logprob": -0.67822266, "special": false, "text": " which" }, { "id": 310, "logprob": -1.3837891, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7050781, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.052001953, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0390625, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1953125, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8828125, "text": " mood" }, { "id": 3063, "logprob": -3.9980469, "text": " today" }, { "id": 32, "logprob": -0.14672852, "text": "?" }, { "id": 50279, "logprob": -0.26489258, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8618164, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9506836, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1738281, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.0758667, "special": false, "text": "," }, { "id": 1394, "logprob": -0.9135742, "special": false, "text": "You" }, { "id": 452, "logprob": -1.1445312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4375, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.1103516, "special": false, "text": " choice" }, { "id": 273, "logprob": -1.0058594, "special": false, "text": " of" }, { "id": 752, "logprob": -1.921875, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1953125, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8828125, "text": " mood" }, { "id": 3063, "logprob": -3.9980469, "text": " today" }, { "id": 32, "logprob": -0.14672852, "text": "?" }, { "id": 50279, "logprob": -0.26489258, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8618164, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9506836, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1738281, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.0758667, "special": false, "text": "," }, { "id": 1394, "logprob": -0.9135742, "special": false, "text": "You" }, { "id": 452, "logprob": -1.1445312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4375, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.1103516, "special": false, "text": " choice" }, { "id": 273, "logprob": -1.0058594, "special": false, "text": " of" }, { "id": 752, "logprob": -1.921875, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1953125, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8828125, "text": " mood" }, { "id": 3063, "logprob": -3.9980469, "text": " today" }, { "id": 32, "logprob": -0.14672852, "text": "?" }, { "id": 50279, "logprob": -0.26489258, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8618164, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9506836, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1738281, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.0758667, "special": false, "text": "," }, { "id": 1394, "logprob": -0.9135742, "special": false, "text": "You" }, { "id": 452, "logprob": -1.1445312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4375, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.1103516, "special": false, "text": " choice" }, { "id": 273, "logprob": -1.0058594, "special": false, "text": " of" }, { "id": 752, "logprob": -1.921875, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1953125, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8828125, "text": " mood" }, { "id": 3063, "logprob": -3.9980469, "text": " today" }, { "id": 32, "logprob": -0.14672852, "text": "?" }, { "id": 50279, "logprob": -0.26489258, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8618164, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9506836, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1738281, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.0758667, "special": false, "text": "," }, { "id": 1394, "logprob": -0.9135742, "special": false, "text": "You" }, { "id": 452, "logprob": -1.1445312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4375, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.1103516, "special": false, "text": " choice" }, { "id": 273, "logprob": -1.0058594, "special": false, "text": " of" }, { "id": 752, "logprob": -1.921875, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1992188, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8984375, "text": " mood" }, { "id": 3063, "logprob": -4.0976562, "text": " today" }, { "id": 32, "logprob": -0.14562988, "text": "?" }, { "id": 50279, "logprob": -0.26733398, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.86279297, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.94921875, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1835938, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.074035645, "special": false, "text": "," }, { "id": 1394, "logprob": -0.86376953, "special": false, "text": "You" }, { "id": 452, "logprob": -1.2070312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4365234, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.109375, "special": false, "text": " choice" }, { "id": 273, "logprob": -0.93408203, "special": false, "text": " of" }, { "id": 752, "logprob": -1.8808594, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5039062, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.04837036, "text": " with" }, { "id": 26680, "logprob": -3.9960938, "text": " gir" }, { "id": 1903, "logprob": -0.07525635, "text": "aff" }, { "id": 255, "logprob": -0.006790161, "text": "es" }, { "id": 23, "logprob": -1.546875, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.109375, "text": " glorious" }, { "id": 5985, "logprob": -2.09375, "text": " animal" }, { "id": 313, "logprob": -1.1845703, "text": " on" }, { "id": 248, "logprob": -0.77734375, "text": " the" }, { "id": 1936, "logprob": -2.3828125, "text": " face" }, { "id": 275, "logprob": -0.0044403076, "text": " of" }, { "id": 414, "logprob": -1.9667969, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.921875, "text": " G" }, { "id": 6013, "logprob": -2.2714844, "text": "ira" }, { "id": 694, "logprob": -0.62353516, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5507812, "text": " believes" }, { "id": 455, "logprob": -4.5625, "text": " all" }, { "id": 599, "logprob": -2.7402344, "text": " other" }, { "id": 5632, "logprob": -0.21899414, "text": " animals" }, { "id": 362, "logprob": -0.76708984, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.103515625, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6796875, "text": " glorious" }, { "id": 64398, "logprob": -1.8222656, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.3544922, "text": " the" }, { "id": 26680, "logprob": -0.24609375, "text": " gir" }, { "id": 23226, "logprob": -0.02960205, "text": "affe" }, { "id": 25, "logprob": -0.17358398, "text": "." }, { "id": 193, "logprob": -1.3925781, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.5898438, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99365234, "text": "," }, { "id": 29033, "logprob": -2.2304688, "text": " Gir" }, { "id": 1622, "logprob": -0.107788086, "text": "af" }, { "id": 249, "logprob": -0.04257202, "text": "at" }, { "id": 1480, "logprob": -0.0024871826, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056915283, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.0071105957, "text": "af" }, { "id": 249, "logprob": -0.008453369, "text": "at" }, { "id": 1480, "logprob": -0.0006928444, "text": "ron" }, { "id": 37, "logprob": -0.0074920654, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.828125, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.3178711, "special": false, "text": "," }, { "id": 8156, "logprob": -0.23925781, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.61279297, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.4177246, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.0023345947, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5283203, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007965088, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07531738, "text": "aff" }, { "id": 255, "logprob": -0.006793976, "text": "es" }, { "id": 23, "logprob": -1.5478516, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.1132812, "text": " glorious" }, { "id": 5985, "logprob": -2.0957031, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3808594, "text": " face" }, { "id": 275, "logprob": -0.004436493, "text": " of" }, { "id": 414, "logprob": -1.9638672, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2734375, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5546875, "text": " believes" }, { "id": 455, "logprob": -4.5703125, "text": " all" }, { "id": 599, "logprob": -2.7382812, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7661133, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.10357666, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6816406, "text": " glorious" }, { "id": 64398, "logprob": -1.8203125, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24572754, "text": " gir" }, { "id": 23226, "logprob": -0.029586792, "text": "affe" }, { "id": 25, "logprob": -0.17346191, "text": "." }, { "id": 193, "logprob": -1.3945312, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99316406, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10797119, "text": "af" }, { "id": 249, "logprob": -0.04248047, "text": "at" }, { "id": 1480, "logprob": -0.0024814606, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056884766, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.007095337, "text": "af" }, { "id": 249, "logprob": -0.00844574, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074768066, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.31762695, "special": false, "text": "," }, { "id": 8156, "logprob": -0.2388916, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.6152344, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.42211914, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.002336502, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007926941, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07531738, "text": "aff" }, { "id": 255, "logprob": -0.006793976, "text": "es" }, { "id": 23, "logprob": -1.5478516, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.1132812, "text": " glorious" }, { "id": 5985, "logprob": -2.0957031, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3808594, "text": " face" }, { "id": 275, "logprob": -0.004436493, "text": " of" }, { "id": 414, "logprob": -1.9638672, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2734375, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5546875, "text": " believes" }, { "id": 455, "logprob": -4.5703125, "text": " all" }, { "id": 599, "logprob": -2.7382812, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7661133, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.10357666, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6816406, "text": " glorious" }, { "id": 64398, "logprob": -1.8203125, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24572754, "text": " gir" }, { "id": 23226, "logprob": -0.029586792, "text": "affe" }, { "id": 25, "logprob": -0.17346191, "text": "." }, { "id": 193, "logprob": -1.3945312, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99316406, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10797119, "text": "af" }, { "id": 249, "logprob": -0.04248047, "text": "at" }, { "id": 1480, "logprob": -0.0024814606, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056884766, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.007095337, "text": "af" }, { "id": 249, "logprob": -0.00844574, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074768066, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.31762695, "special": false, "text": "," }, { "id": 8156, "logprob": -0.2388916, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.6152344, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.42211914, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.002336502, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007926941, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07531738, "text": "aff" }, { "id": 255, "logprob": -0.006793976, "text": "es" }, { "id": 23, "logprob": -1.5478516, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.1132812, "text": " glorious" }, { "id": 5985, "logprob": -2.0957031, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3808594, "text": " face" }, { "id": 275, "logprob": -0.004436493, "text": " of" }, { "id": 414, "logprob": -1.9638672, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2734375, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5546875, "text": " believes" }, { "id": 455, "logprob": -4.5703125, "text": " all" }, { "id": 599, "logprob": -2.7382812, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7661133, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.10357666, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6816406, "text": " glorious" }, { "id": 64398, "logprob": -1.8203125, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24572754, "text": " gir" }, { "id": 23226, "logprob": -0.029586792, "text": "affe" }, { "id": 25, "logprob": -0.17346191, "text": "." }, { "id": 193, "logprob": -1.3945312, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99316406, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10797119, "text": "af" }, { "id": 249, "logprob": -0.04248047, "text": "at" }, { "id": 1480, "logprob": -0.0024814606, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056884766, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.007095337, "text": "af" }, { "id": 249, "logprob": -0.00844574, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074768066, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.31762695, "special": false, "text": "," }, { "id": 8156, "logprob": -0.2388916, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.6152344, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.42211914, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.002336502, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007926941, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 330, "logprob": null, "text": "ir" }, { "id": 1622, "logprob": -7.8125, "text": "af" }, { "id": 249, "logprob": -4.5, "text": "at" }, { "id": 1480, "logprob": -10.875, "text": "ron" }, { "id": 37, "logprob": -3.6875, "text": ":" } ], "seed": 0, "tokens": [ { "id": 836, "logprob": -1.265625, "special": false, "text": " i" }, { "id": 18, "logprob": -0.119628906, "special": false, "text": "'" }, { "id": 298, "logprob": -2.265625, "special": false, "text": "ve" }, { "id": 650, "logprob": -0.49804688, "special": false, "text": " been" }, { "id": 1241, "logprob": 0.0, "special": false, "text": " using" }, { "id": 334, "logprob": 0.0, "special": false, "text": " it" }, { "id": 312, "logprob": -1.2421875, "special": false, "text": " for" }, { "id": 909, "logprob": -0.99609375, "special": false, "text": " years" }, { "id": 193, "logprob": -0.30273438, "special": false, "text": "\n" }, { "id": 807, "logprob": -1.078125, "special": false, "text": "ik" } ] }, "generated_text": "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron: i've been using it for years\nik" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6132812, "text": "af" }, { "id": 249, "logprob": -6.5039062, "text": "at" }, { "id": 1480, "logprob": -8.078125, "text": "ron" }, { "id": 304, "logprob": -2.3261719, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07556152, "text": "aff" }, { "id": 255, "logprob": -0.0067749023, "text": "es" }, { "id": 23, "logprob": -1.546875, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.734375, "text": " most" }, { "id": 21735, "logprob": -5.109375, "text": " glorious" }, { "id": 5985, "logprob": -2.09375, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3828125, "text": " face" }, { "id": 275, "logprob": -0.004432678, "text": " of" }, { "id": 414, "logprob": -1.9677734, "text": " this" }, { "id": 6490, "logprob": -2.046875, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2753906, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20874023, "text": "ron" }, { "id": 9369, "logprob": -4.5507812, "text": " believes" }, { "id": 455, "logprob": -4.5664062, "text": " all" }, { "id": 599, "logprob": -2.7402344, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7675781, "text": " are" }, { "id": 23981, "logprob": -5.0, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.103637695, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6835938, "text": " glorious" }, { "id": 64398, "logprob": -1.8173828, "text": " majesty" }, { "id": 275, "logprob": -0.23510742, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24633789, "text": " gir" }, { "id": 23226, "logprob": -0.02960205, "text": "affe" }, { "id": 25, "logprob": -0.17333984, "text": "." }, { "id": 193, "logprob": -1.3935547, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99365234, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10809326, "text": "af" }, { "id": 249, "logprob": -0.042663574, "text": "at" }, { "id": 1480, "logprob": -0.0024776459, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1015625, "text": "\n" }, { "id": 50, "logprob": -0.05709839, "text": "G" }, { "id": 330, "logprob": -0.13208008, "text": "ir" }, { "id": 1622, "logprob": -0.0071487427, "text": "af" }, { "id": 249, "logprob": -0.008468628, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074691772, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.3173828, "special": false, "text": "," }, { "id": 8156, "logprob": -0.23803711, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.56933594, "special": false, "text": "!" }, { "id": 193, "logprob": -0.61279297, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.41967773, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.0023403168, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007904053, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq_sharded/test_flash_llama_awq_sharded.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.6914062, "text": "What" }, { "id": 338, "logprob": -1.4746094, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8623047, "text": "Learning" }, { "id": 29973, "logprob": -0.7558594, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9228516, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4609375, "special": false, "text": "What" }, { "id": 338, "logprob": -0.57177734, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5722656, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5927734, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.026428223, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4267578, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.16015625, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17382812, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62060547, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq_sharded/test_flash_llama_awq_load_sharded.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.6914062, "text": "What" }, { "id": 338, "logprob": -1.4746094, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8623047, "text": "Learning" }, { "id": 29973, "logprob": -0.7558594, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9228516, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4609375, "special": false, "text": "What" }, { "id": 338, "logprob": -0.57177734, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5722656, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5859375, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.02633667, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4335938, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15991211, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62060547, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.6914062, "text": "What" }, { "id": 338, "logprob": -1.4746094, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8623047, "text": "Learning" }, { "id": 29973, "logprob": -0.7558594, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9228516, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4609375, "special": false, "text": "What" }, { "id": 338, "logprob": -0.57177734, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5722656, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5859375, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.02633667, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4335938, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15991211, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62060547, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.6914062, "text": "What" }, { "id": 338, "logprob": -1.4746094, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8623047, "text": "Learning" }, { "id": 29973, "logprob": -0.7558594, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9228516, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4609375, "special": false, "text": "What" }, { "id": 338, "logprob": -0.57177734, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5722656, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5859375, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.02633667, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4335938, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15991211, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62060547, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.6914062, "text": "What" }, { "id": 338, "logprob": -1.4746094, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8623047, "text": "Learning" }, { "id": 29973, "logprob": -0.7558594, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9228516, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4609375, "special": false, "text": "What" }, { "id": 338, "logprob": -0.57177734, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5722656, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5859375, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.02633667, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4335938, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15991211, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62060547, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 20, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.09375, "text": "ometric" }, { "id": 81, "logprob": -0.25976562, "text": "_" }, { "id": 6009, "logprob": -2.2148438, "text": "mean" }, { "id": 26, "logprob": -0.3010254, "text": "(" }, { "id": 62, "logprob": -5.6757812, "text": "L" }, { "id": 44, "logprob": -3.0898438, "text": ":" }, { "id": 1682, "logprob": -0.6791992, "text": " List" }, { "id": 77, "logprob": -0.38891602, "text": "[" }, { "id": 1808, "logprob": -0.92041016, "text": "float" }, { "id": 10794, "logprob": -2.5390625, "text": "]):" } ], "seed": 0, "tokens": [ { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 442, "logprob": 0.0, "special": false, "text": " return" }, { "id": 11665, "logprob": -1.6005859, "special": false, "text": " reduce" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 5962, "logprob": 0.0, "special": false, "text": "lambda" }, { "id": 816, "logprob": 0.0, "special": false, "text": " x" }, { "id": 30, "logprob": 0.0, "special": false, "text": "," }, { "id": 533, "logprob": 0.0, "special": false, "text": " y" }, { "id": 44, "logprob": 0.0, "special": false, "text": ":" }, { "id": 816, "logprob": 0.0, "special": false, "text": " x" }, { "id": 319, "logprob": 0.0, "special": false, "text": " *" }, { "id": 533, "logprob": 0.0, "special": false, "text": " y" }, { "id": 30, "logprob": 0.0, "special": false, "text": "," }, { "id": 498, "logprob": 0.0, "special": false, "text": " L" }, { "id": 27, "logprob": 0.0, "special": false, "text": ")" }, { "id": 203, "logprob": -0.11968994, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": 0.0, "special": false, "text": "def" }, { "id": 3226, "logprob": 0.0, "special": false, "text": " ge" }, { "id": 21017, "logprob": 0.0, "special": false, "text": "ometric" } ] }, "generated_text": "\n return reduce(lambda x, y: x * y, L)\n\ndef geometric" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json
{ "generated_text": "\n return sum(L) / len(L)\n\n\ndef geometric_mean(L", "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 20, "seed": null, "prefill": [ { "id": 589, "text": "def", "logprob": null }, { "id": 3226, "text": " ge", "logprob": -9.0234375 }, { "id": 21017, "text": "ometric", "logprob": -9.0859375 }, { "id": 81, "text": "_", "logprob": -0.25878906 }, { "id": 6009, "text": "mean", "logprob": -2.2109375 }, { "id": 26, "text": "(", "logprob": -0.30371094 }, { "id": 62, "text": "L", "logprob": -5.6054688 }, { "id": 44, "text": ":", "logprob": -3.0722656 }, { "id": 1682, "text": " List", "logprob": -0.6879883 }, { "id": 77, "text": "[", "logprob": -0.38500977 }, { "id": 1808, "text": "float", "logprob": -0.984375 }, { "id": 10794, "text": "]):", "logprob": -2.5351562 } ], "tokens": [ { "id": 284, "text": "\n ", "logprob": -1.1738281, "special": false }, { "id": 442, "text": " return", "logprob": -0.95947266, "special": false }, { "id": 3632, "text": " sum", "logprob": -1.4199219, "special": false }, { "id": 26, "text": "(", "logprob": -0.085876465, "special": false }, { "id": 62, "text": "L", "logprob": -0.09875488, "special": false }, { "id": 27, "text": ")", "logprob": -0.30517578, "special": false }, { "id": 517, "text": " /", "logprob": -0.42089844, "special": false }, { "id": 2069, "text": " len", "logprob": -0.042053223, "special": false }, { "id": 26, "text": "(", "logprob": -0.0011806488, "special": false }, { "id": 62, "text": "L", "logprob": -0.0005259514, "special": false }, { "id": 27, "text": ")", "logprob": -0.0017633438, "special": false }, { "id": 478, "text": "\n\n", "logprob": -0.69189453, "special": false }, { "id": 203, "text": "\n", "logprob": -0.041870117, "special": false }, { "id": 589, "text": "def", "logprob": -0.27856445, "special": false }, { "id": 3226, "text": " ge", "logprob": -1.7255859, "special": false }, { "id": 21017, "text": "ometric", "logprob": -0.011291504, "special": false }, { "id": 81, "text": "_", "logprob": -0.008430481, "special": false }, { "id": 6009, "text": "mean", "logprob": -0.025787354, "special": false }, { "id": 26, "text": "(", "logprob": -0.073913574, "special": false }, { "id": 62, "text": "L", "logprob": -0.09967041, "special": false } ] } }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.0859375, "text": "ometric" }, { "id": 81, "logprob": -0.25927734, "text": "_" }, { "id": 6009, "logprob": -2.25, "text": "mean" }, { "id": 26, "logprob": -0.30126953, "text": "(" }, { "id": 62, "logprob": -5.7539062, "text": "L" }, { "id": 44, "logprob": -3.0878906, "text": ":" }, { "id": 1682, "logprob": -0.6845703, "text": " List" }, { "id": 77, "logprob": -0.3918457, "text": "[" }, { "id": 1808, "logprob": -0.8798828, "text": "float" }, { "id": 10794, "logprob": -2.4980469, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -1.1533203, "special": false, "text": "\n " }, { "id": 442, "logprob": -0.91796875, "special": false, "text": " return" }, { "id": 3632, "logprob": -1.3291016, "special": false, "text": " sum" }, { "id": 26, "logprob": -0.08062744, "special": false, "text": "(" }, { "id": 62, "logprob": -0.097717285, "special": false, "text": "L" }, { "id": 27, "logprob": -0.29003906, "special": false, "text": ")" }, { "id": 517, "logprob": -0.34958984, "special": false, "text": " /" }, { "id": 2069, "logprob": -0.03829956, "special": false, "text": " len" }, { "id": 26, "logprob": -0.0011987686, "special": false, "text": "(" }, { "id": 62, "logprob": -0.00050878525, "special": false, "text": "L" } ] }, "generated_text": "\n return sum(L) / len(L" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.0859375, "text": "ometric" }, { "id": 81, "logprob": -0.25878906, "text": "_" }, { "id": 6009, "logprob": -2.2109375, "text": "mean" }, { "id": 26, "logprob": -0.30371094, "text": "(" }, { "id": 62, "logprob": -5.6054688, "text": "L" }, { "id": 44, "logprob": -3.0722656, "text": ":" }, { "id": 1682, "logprob": -0.6879883, "text": " List" }, { "id": 77, "logprob": -0.38500977, "text": "[" }, { "id": 1808, "logprob": -0.984375, "text": "float" }, { "id": 10794, "logprob": -2.5351562, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -1.1738281, "special": false, "text": "\n " }, { "id": 442, "logprob": -0.9584961, "special": false, "text": " return" }, { "id": 3632, "logprob": -1.4169922, "special": false, "text": " sum" }, { "id": 26, "logprob": -0.085876465, "special": false, "text": "(" }, { "id": 62, "logprob": -0.0982666, "special": false, "text": "L" }, { "id": 27, "logprob": -0.3022461, "special": false, "text": ")" }, { "id": 517, "logprob": -0.40504883, "special": false, "text": " /" }, { "id": 2069, "logprob": -0.041656494, "special": false, "text": " len" }, { "id": 26, "logprob": -0.0011844635, "special": false, "text": "(" }, { "id": 62, "logprob": -0.0005264282, "special": false, "text": "L" } ] }, "generated_text": "\n return sum(L) / len(L" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.0859375, "text": "ometric" }, { "id": 81, "logprob": -0.25927734, "text": "_" }, { "id": 6009, "logprob": -2.25, "text": "mean" }, { "id": 26, "logprob": -0.30126953, "text": "(" }, { "id": 62, "logprob": -5.7539062, "text": "L" }, { "id": 44, "logprob": -3.0878906, "text": ":" }, { "id": 1682, "logprob": -0.6845703, "text": " List" }, { "id": 77, "logprob": -0.3918457, "text": "[" }, { "id": 1808, "logprob": -0.8798828, "text": "float" }, { "id": 10794, "logprob": -2.4980469, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -1.1533203, "special": false, "text": "\n " }, { "id": 442, "logprob": -0.9165039, "special": false, "text": " return" }, { "id": 3632, "logprob": -1.328125, "special": false, "text": " sum" }, { "id": 26, "logprob": -0.07946777, "special": false, "text": "(" }, { "id": 62, "logprob": -0.09820557, "special": false, "text": "L" }, { "id": 27, "logprob": -0.28930664, "special": false, "text": ")" }, { "id": 517, "logprob": -0.34592773, "special": false, "text": " /" }, { "id": 2069, "logprob": -0.038330078, "special": false, "text": " len" }, { "id": 26, "logprob": -0.0011940002, "special": false, "text": "(" }, { "id": 62, "logprob": -0.00050878525, "special": false, "text": "L" } ] }, "generated_text": "\n return sum(L) / len(L" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.0859375, "text": "ometric" }, { "id": 81, "logprob": -0.25927734, "text": "_" }, { "id": 6009, "logprob": -2.25, "text": "mean" }, { "id": 26, "logprob": -0.30126953, "text": "(" }, { "id": 62, "logprob": -5.7539062, "text": "L" }, { "id": 44, "logprob": -3.0878906, "text": ":" }, { "id": 1682, "logprob": -0.6845703, "text": " List" }, { "id": 77, "logprob": -0.3918457, "text": "[" }, { "id": 1808, "logprob": -0.8798828, "text": "float" }, { "id": 10794, "logprob": -2.4980469, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -1.1533203, "special": false, "text": "\n " }, { "id": 442, "logprob": -0.91259766, "special": false, "text": " return" }, { "id": 3632, "logprob": -1.3251953, "special": false, "text": " sum" }, { "id": 26, "logprob": -0.08062744, "special": false, "text": "(" }, { "id": 62, "logprob": -0.09906006, "special": false, "text": "L" }, { "id": 27, "logprob": -0.28979492, "special": false, "text": ")" }, { "id": 517, "logprob": -0.35958984, "special": false, "text": " /" }, { "id": 2069, "logprob": -0.038604736, "special": false, "text": " len" }, { "id": 26, "logprob": -0.0011901855, "special": false, "text": "(" }, { "id": 62, "logprob": -0.0005078316, "special": false, "text": "L" } ] }, "generated_text": "\n return sum(L) / len(L" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2119141, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2480469, "special": false, "text": " " }, { "id": 1956, "logprob": -0.33203125, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19250488, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030166626, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2119141, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2480469, "special": false, "text": " " }, { "id": 1956, "logprob": -0.33203125, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19250488, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030166626, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2119141, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2480469, "special": false, "text": " " }, { "id": 1956, "logprob": -0.33203125, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19250488, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030166626, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2099609, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2451172, "special": false, "text": " " }, { "id": 1956, "logprob": -0.3322754, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19213867, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030151367, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2099609, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2451172, "special": false, "text": " " }, { "id": 1956, "logprob": -0.3322754, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19213867, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030151367, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.5625, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4609375, "text": " ort" }, { "id": 35567, "logprob": -7.5585938, "text": "olan" }, { "id": 15, "logprob": -1.4003906, "text": "," }, { "id": 1669, "logprob": -1.5673828, "text": " il" }, { "id": 11580, "logprob": -0.94628906, "text": " faut" }, { "id": 3913, "logprob": -3.703125, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7646484, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.6113281, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5263672, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4707031, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.2119141, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.40844727, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.0037841797, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0195312, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.53125, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4140625, "text": " ort" }, { "id": 35567, "logprob": -7.5234375, "text": "olan" }, { "id": 15, "logprob": -1.3613281, "text": "," }, { "id": 1669, "logprob": -1.5458984, "text": " il" }, { "id": 11580, "logprob": -0.94189453, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7548828, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.578125, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5117188, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4707031, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11004639, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4506836, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.53125, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4140625, "text": " ort" }, { "id": 35567, "logprob": -7.5234375, "text": "olan" }, { "id": 15, "logprob": -1.3613281, "text": "," }, { "id": 1669, "logprob": -1.5458984, "text": " il" }, { "id": 11580, "logprob": -0.94189453, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7548828, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.578125, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5117188, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4707031, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11004639, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4506836, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.53125, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4140625, "text": " ort" }, { "id": 35567, "logprob": -7.5234375, "text": "olan" }, { "id": 15, "logprob": -1.3613281, "text": "," }, { "id": 1669, "logprob": -1.5458984, "text": " il" }, { "id": 11580, "logprob": -0.94189453, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7548828, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.578125, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5117188, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4707031, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11004639, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4506836, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 15, "logprob": null, "text": "," }, { "id": 1669, "logprob": -5.4414062, "text": " il" }, { "id": 11580, "logprob": -2.3378906, "text": " faut" }, { "id": 3913, "logprob": -4.3554688, "text": " tout" }, { "id": 39261, "logprob": -2.9238281, "text": " d'abord" } ], "seed": 0, "tokens": [ { "id": 408, "logprob": -0.07891846, "special": false, "text": " que" }, { "id": 366, "logprob": -1.2939453, "special": false, "text": " la" }, { "id": 8769, "logprob": -0.3708496, "special": false, "text": " personne" }, { "id": 1479, "logprob": -2.2871094, "special": false, "text": " qui" }, { "id": 2997, "logprob": -0.8671875, "special": false, "text": " vous" }, { "id": 35977, "logprob": -1.5097656, "special": false, "text": " suit" }, { "id": 21558, "logprob": -0.07891846, "special": false, "text": " ait" }, { "id": 447, "logprob": -0.12695312, "special": false, "text": " un" }, { "id": 78606, "logprob": -2.21875, "special": false, "text": " profil" }, { "id": 3899, "logprob": -1.3535156, "special": false, "text": " bien" } ] }, "generated_text": "Pour déguster un ortolan, il faut tout d'abord que la personne qui vous suit ait un profil bien" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.5625, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4609375, "text": " ort" }, { "id": 35567, "logprob": -7.5585938, "text": "olan" }, { "id": 15, "logprob": -1.4003906, "text": "," }, { "id": 1669, "logprob": -1.5673828, "text": " il" }, { "id": 11580, "logprob": -0.94628906, "text": " faut" }, { "id": 3913, "logprob": -3.703125, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": 0, "tokens": [ { "id": 578, "logprob": -1.6591797, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.4492188, "special": false, "text": " faire" }, { "id": 159570, "logprob": -6.6835938, "special": false, "text": " réch" }, { "id": 810, "logprob": 0.0, "special": false, "text": "au" }, { "id": 12736, "logprob": 0.0, "special": false, "text": "ffer" }, { "id": 1742, "logprob": -2.5175781, "special": false, "text": " au" }, { "id": 6105, "logprob": -2.0078125, "special": false, "text": " bain" }, { "id": 88254, "logprob": -0.12695312, "special": false, "text": "-mar" }, { "id": 641, "logprob": 0.0, "special": false, "text": "ie" }, { "id": 2940, "logprob": -3.5175781, "special": false, "text": " avec" } ] }, "generated_text": " le faire réchauffer au bain-marie avec" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.59375, "text": "Test" }, { "id": 2009, "logprob": -9.6640625, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3867188, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.8183594, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6367188, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0527344, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.6542969, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.056121826, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.01600647, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.87939453, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.7529297, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.2980957, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.6640625, "text": "request" } ], "seed": 0, "tokens": [ { "id": 29899, "logprob": -1.1640625, "special": false, "text": "-" }, { "id": 1454, "logprob": -0.07543945, "special": false, "text": "for" }, { "id": 29899, "logprob": 0.0, "special": false, "text": "-" }, { "id": 9342, "logprob": 0.0, "special": false, "text": "comment" }, { "id": 29901, "logprob": 0.0, "special": false, "text": ":" }, { "id": 396, "logprob": -0.2956543, "special": false, "text": " #" }, { "id": 29906, "logprob": -0.52734375, "special": false, "text": "2" }, { "id": 29900, "logprob": -0.6899414, "special": false, "text": "0" }, { "id": 29896, "logprob": 0.0, "special": false, "text": "1" }, { "id": 29946, "logprob": -1.5068359, "special": false, "text": "4" } ] }, "generated_text": "Test request-for-comment: #2014" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.671875, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3828125, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.8105469, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6396484, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0546875, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.6513672, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.056365967, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.016082764, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.87841797, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.7548828, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.29711914, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.6640625, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3828125, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.828125, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6386719, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0527344, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.6542969, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.055877686, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.016021729, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.8769531, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.7583008, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.29833984, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.671875, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3847656, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.8144531, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6396484, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0527344, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.65478516, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.056243896, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.016143799, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.8808594, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.75341797, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.2956543, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.6640625, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3769531, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.8183594, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6396484, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0546875, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.65478516, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.05557251, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.01612854, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.8730469, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.7519531, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.29785156, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mpt/test_mpt_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5117188, "text": " is" }, { "id": 18147, "logprob": -8.96875, "text": " Deep" }, { "id": 20727, "logprob": -1.953125, "text": " Learning" }, { "id": 32, "logprob": -0.94189453, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5830078, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3183594, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.32617188, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5742188, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.6015625, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.67822266, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5395508, "special": false, "text": " a" }, { "id": 749, "logprob": -1.8623047, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6020508, "special": false, "text": "field" }, { "id": 273, "logprob": -0.0552063, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0742188, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.011405945, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9165039, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4501953, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.4960938, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.02116394, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5, "text": " is" }, { "id": 18147, "logprob": -8.984375, "text": " Deep" }, { "id": 20727, "logprob": -1.96875, "text": " Learning" }, { "id": 32, "logprob": -0.93359375, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5800781, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3242188, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.31835938, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5644531, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.5957031, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.68603516, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5258789, "special": false, "text": " a" }, { "id": 749, "logprob": -1.859375, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6166992, "special": false, "text": "field" }, { "id": 273, "logprob": -0.056762695, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0703125, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.011428833, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9213867, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4726562, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.5039062, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.021652222, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5, "text": " is" }, { "id": 18147, "logprob": -8.984375, "text": " Deep" }, { "id": 20727, "logprob": -1.96875, "text": " Learning" }, { "id": 32, "logprob": -0.93359375, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5800781, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3242188, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.31835938, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5644531, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.5957031, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.68603516, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5258789, "special": false, "text": " a" }, { "id": 749, "logprob": -1.859375, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6166992, "special": false, "text": "field" }, { "id": 273, "logprob": -0.056762695, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0703125, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.011428833, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9213867, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4726562, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.5039062, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.021652222, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5, "text": " is" }, { "id": 18147, "logprob": -8.984375, "text": " Deep" }, { "id": 20727, "logprob": -1.96875, "text": " Learning" }, { "id": 32, "logprob": -0.93359375, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5800781, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3242188, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.31835938, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5644531, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.5957031, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.68603516, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5258789, "special": false, "text": " a" }, { "id": 749, "logprob": -1.859375, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6166992, "special": false, "text": "field" }, { "id": 273, "logprob": -0.056762695, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0703125, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.011428833, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9213867, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4726562, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.5039062, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.021652222, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mpt/test_mpt.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5117188, "text": " is" }, { "id": 18147, "logprob": -8.96875, "text": " Deep" }, { "id": 20727, "logprob": -1.953125, "text": " Learning" }, { "id": 32, "logprob": -0.94189453, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5830078, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3105469, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.3215332, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5566406, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.6074219, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.6923828, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5263672, "special": false, "text": " a" }, { "id": 749, "logprob": -1.8544922, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6118164, "special": false, "text": "field" }, { "id": 273, "logprob": -0.055877686, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0537109, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.0115737915, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9111328, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4589844, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.4853516, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.021636963, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 338, "logprob": -9.0859375, "text": "is" }, { "id": 21784, "logprob": -10.90625, "text": "Deep" }, { "id": 29257, "logprob": -2.65625, "text": "Learning" }, { "id": 29973, "logprob": -4.8085938, "text": "?" } ], "seed": 0, "tokens": [ { "id": 13, "logprob": -0.19958496, "special": false, "text": "\n" }, { "id": 4013, "logprob": -2.203125, "special": false, "text": "This" }, { "id": 1139, "logprob": -0.23693848, "special": false, "text": " question" }, { "id": 756, "logprob": 0.0, "special": false, "text": " has" }, { "id": 1063, "logprob": -0.076538086, "special": false, "text": " been" }, { "id": 4433, "logprob": 0.0, "special": false, "text": " asked" }, { "id": 1784, "logprob": -1.1367188, "special": false, "text": " many" }, { "id": 3064, "logprob": 0.0, "special": false, "text": " times" }, { "id": 322, "logprob": -1.7460938, "special": false, "text": " and" }, { "id": 306, "logprob": 0.0, "special": false, "text": " I" } ], "top_tokens": null }, "generated_text": "What is Deep Learning?\nThis question has been asked many times and I" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.703125, "text": "What" }, { "id": 338, "logprob": -1.4765625, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8583984, "text": "Learning" }, { "id": 29973, "logprob": -0.7548828, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9306641, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4550781, "special": false, "text": "What" }, { "id": 338, "logprob": -0.5732422, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5761719, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5888672, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.026504517, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4287109, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15856934, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62646484, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.703125, "text": "What" }, { "id": 338, "logprob": -1.4765625, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8652344, "text": "Learning" }, { "id": 29973, "logprob": -0.7548828, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9306641, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4550781, "special": false, "text": "What" }, { "id": 338, "logprob": -0.5732422, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5761719, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5888672, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.026504517, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4287109, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15856934, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62646484, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.703125, "text": "What" }, { "id": 338, "logprob": -1.4765625, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8583984, "text": "Learning" }, { "id": 29973, "logprob": -0.7548828, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9306641, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4550781, "special": false, "text": "What" }, { "id": 338, "logprob": -0.5732422, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5761719, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5888672, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.026504517, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4287109, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15856934, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62646484, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.703125, "text": "What" }, { "id": 338, "logprob": -1.4765625, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8652344, "text": "Learning" }, { "id": 29973, "logprob": -0.7548828, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9306641, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4550781, "special": false, "text": "What" }, { "id": 338, "logprob": -0.5732422, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5761719, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5888672, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.026504517, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4287109, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15856934, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62646484, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.703125, "text": "What" }, { "id": 338, "logprob": -1.4765625, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8652344, "text": "Learning" }, { "id": 29973, "logprob": -0.7548828, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9306641, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4550781, "special": false, "text": "What" }, { "id": 338, "logprob": -0.5732422, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5761719, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5888672, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.026504517, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4287109, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15856934, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62646484, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox_sharded/test_flash_neox_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5488281, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.00207901, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.335144e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00097227097, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0892334, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12463379, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.50341797, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5488281, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.00207901, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.335144e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00097227097, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0892334, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12463379, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.50341797, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5488281, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.00207901, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.335144e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00097227097, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0892334, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12463379, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.50341797, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5488281, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.00207901, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.335144e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00097227097, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0892334, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12463379, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.50341797, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox_sharded/test_flash_neox.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5390625, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.002090454, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.3589859e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0009455681, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.088012695, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12585449, "special": false, "text": " first" }, { "id": 908, "logprob": -0.017196655, "special": false, "text": " used" }, { "id": 275, "logprob": -0.49731445, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/proto/generate.proto
syntax = "proto3"; package generate.v1; service TextGenerationService { /// Model Info rpc Info (InfoRequest) returns (InfoResponse) {} /// Service discovery rpc ServiceDiscovery (ServiceDiscoveryRequest) returns (ServiceDiscoveryResponse) {} /// Empties batch cache rpc ClearCache (ClearCacheRequest) returns (ClearCacheResponse); /// Remove requests from a cached batch rpc FilterBatch (FilterBatchRequest) returns (FilterBatchResponse); /// Warmup the model and compute max cache size rpc Warmup (WarmupRequest) returns (WarmupResponse); /// Prefill batch and decode first token rpc Prefill (PrefillRequest) returns (PrefillResponse); /// Decode token for a list of prefilled batches rpc Decode (DecodeRequest) returns (DecodeResponse); /// Health check rpc Health (HealthRequest) returns (HealthResponse); } message HealthRequest {} message HealthResponse {} /// Empty request message InfoRequest {} message InfoResponse { bool requires_padding = 1; string dtype = 2; string device_type = 3; optional uint32 window_size = 4; } /// Empty request message ServiceDiscoveryRequest {} message ServiceDiscoveryResponse { /// Other shards urls repeated string urls = 1; } message ClearCacheRequest { /// Optional batch id optional uint64 id = 1; } /// Empty response message ClearCacheResponse {} message NextTokenChooserParameters { /// exponential scaling output probability distribution float temperature = 1; /// restricting to the k highest probability elements uint32 top_k = 2; /// restricting to top tokens summing to prob_cut_off <= prob_cut_off float top_p = 3; /// restricting to top tokens summing to prob_cut_off <= prob_cut_off float typical_p = 4; /// apply sampling on the logits bool do_sample = 5; /// random seed for sampling uint64 seed = 6; /// repetition penalty float repetition_penalty = 7; /// token watermarking using "A Watermark for Large Language Models" bool watermark = 8; } message StoppingCriteriaParameters { /// Maximum number of generated tokens uint32 max_new_tokens = 1; /// Optional stopping sequences repeated string stop_sequences = 2; /// Ignore end of sequence token /// used for benchmarking bool ignore_eos_token = 3; } message Request { /// Request ID uint64 id = 1; /// The generation context string inputs = 2; /// Context truncation uint32 truncate = 3; /// Next Token Chooser Parameters NextTokenChooserParameters parameters = 4; /// Stopping Criteria Parameters StoppingCriteriaParameters stopping_parameters = 5; /// Return prefill logprobs bool prefill_logprobs = 6; /// Return most likely n tokens uint32 top_n_tokens = 7; } message Batch { /// Batch ID uint64 id = 1; /// Individual requests repeated Request requests = 2; /// Batch size (==len(requests)) uint32 size = 3; /// Maximum number of tokens this batch will grow to uint32 max_tokens = 4; } message CachedBatch { /// Batch ID uint64 id = 1; /// Individual requests ids repeated uint64 request_ids = 2; /// Batch size (==len(requests)) uint32 size = 3; /// Maximum number of tokens this batch will grow to uint32 max_tokens = 4; } enum FinishReason { FINISH_REASON_LENGTH = 0; FINISH_REASON_EOS_TOKEN = 1; FINISH_REASON_STOP_SEQUENCE = 2; } message GeneratedText { /// Output string text = 1; /// Number of generated tokens uint32 generated_tokens = 2; /// Finish reason FinishReason finish_reason = 3; /// Seed optional uint64 seed = 4; } message PrefillTokens { /// Prefill Token IDs repeated uint32 ids = 1; /// Prefill Logprobs repeated float logprobs = 2; /// Prefill tokens repeated string texts = 3; } message TopTokens { /// Top Token IDs repeated uint32 ids = 1; /// Top Logprobs repeated float logprobs = 2; /// Top Token Texts repeated string texts = 3; /// If the tokens are special repeated bool is_special = 6; } message Generation { /// Request ID uint64 request_id = 1; /// Prefill tokens (optional) PrefillTokens prefill_tokens = 2; /// Token ID uint32 token_id = 3; /// Logprob float token_logprob = 4; /// Text string token_text = 5; /// Is it a special token bool token_is_special = 6; /// Complete generated text optional GeneratedText generated_text = 7; /// Top tokens TopTokens top_tokens = 8; } message FilterBatchRequest { /// Batch ID uint64 batch_id = 1; /// Requests to keep repeated uint64 request_ids = 2; } message FilterBatchResponse { /// Filtered Batch (cached) CachedBatch batch = 1; } message PrefillRequest { /// Batch Batch batch = 1; } message PrefillResponse { /// Generation repeated Generation generations = 1; /// Next batch (cached) optional CachedBatch batch = 2; } message DecodeRequest { /// Cached batches repeated CachedBatch batches = 1; } message DecodeResponse { /// Decodes repeated Generation generations = 1; /// Next batch (cached) optional CachedBatch batch = 2; } message WarmupRequest { /// Batch to warmup on Batch batch = 1; } /// Empty response message WarmupResponse { /// Maximum number of tokens supported by the model optional uint32 max_supported_total_tokens = 1; }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/load_tests/starcoder_load.js
import {check} from 'k6'; import http from 'k6/http'; import {Trend} from 'k6/metrics'; const host = __ENV.HOST || '127.0.0.1:3000'; const totalTime = new Trend('total_time', true); const validationTime = new Trend('validation_time', true); const queueTime = new Trend('queue_time', true); const inferenceTime = new Trend('inference_time', true); const timePerToken = new Trend('time_per_token', true); const example = { payload: JSON.stringify({ inputs: '# This is a fibonacci function written in the Python programming language.' + 'def fibonacci', parameters: { details: true, max_new_tokens: 60, temperature: 0.2, top_p: 0.95, seed: 0, }, }), generated_tokens: 60 }; export const options = { thresholds: { http_req_failed: ['rate==0'], time_per_token: ['p(95)<90'], queue_time: ['p(95)<1500'], }, scenarios: { load_test: { executor: 'constant-arrival-rate', duration: '60s', preAllocatedVUs: 100, rate: 10, timeUnit: '1s', }, }, }; export default function () { const headers = {'Content-Type': 'application/json'}; const res = http.post(`http://${host}/generate`, example.payload, { headers, }); check(res, { 'Post status is 200': (r) => res.status === 200, 'Post response generated tokens': (r) => res.status === 200 && res.json().details.generated_tokens === example.generated_tokens, }); if (res.status === 200) { totalTime.add(res.headers["X-Total-Time"]); validationTime.add(res.headers["X-Validation-Time"]); queueTime.add(res.headers["X-Queue-Time"]); inferenceTime.add(res.headers["X-Inference-Time"]); timePerToken.add(res.headers["X-Time-Per-Token"]); } }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/load_tests/vllm.js
import { get_options, run } from "./common.js"; const reference_latency_ms = 22; const host = __ENV.HOST || '127.0.0.1:8000'; const max_new_tokens = 50; function generate_payload(gpt){ const input = gpt["conversations"][0]["value"]; return {"prompt": input, "temperature": 0.5, "ignore_eos": true} } export const options = get_options(reference_latency_ms); export default function(){ run(host, generate_payload, max_new_tokens); }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/load_tests/tgi.js
import { get_options, run } from "./common.js"; const reference_latency_ms = 30; const host = __ENV.HOST || '127.0.0.1:8000'; const max_new_tokens = 50; function generate_payload(gpt){ const input = gpt["conversations"][0]["value"]; return {"inputs": input, "parameters": {"max_new_tokens": max_new_tokens, "temperature" : 0.5}} } export const options = get_options(reference_latency_ms); export default function(){ run(host, generate_payload, max_new_tokens); }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/load_tests/common.js
import { check, randomSeed } from 'k6'; import http from 'k6/http'; import { Trend, Counter } from 'k6/metrics'; import { randomItem } from 'https://jslib.k6.io/k6-utils/1.2.0/index.js'; const seed = 0; const host = __ENV.HOST || '127.0.0.1:8000'; const timePerToken = new Trend('time_per_token', true); const throughput = new Counter('tokens_per_s'); randomSeed(seed); // const shareGPT = JSON.parse(open("ShareGPT_V3_unfiltered_cleaned_split.json")) const shareGPT = JSON.parse(open("small.json")) export function get_options(reference_latency_ms){ return { thresholds: { http_req_failed: ['rate==0'], time_per_token: [{ threshold: `p(50)<${3 * reference_latency_ms}`, abortOnFail: true, delayAbortEval: '10s' }], }, scenarios: { load_test: { executor: 'constant-arrival-rate', duration: '60s', preAllocatedVUs: 100, rate: 10, timeUnit: '1s', }, }, }; } export function run(host, generate_payload, max_new_tokens) { const headers = {'Content-Type': 'application/json'}; const query = randomItem(shareGPT); const payload = JSON.stringify(generate_payload(query)); const res = http.post(`http://${host}/generate`, payload, { headers, }); if(res.status >= 400 && res.status < 500){ return; } check(res, { 'Post status is 200': (r) => res.status === 200, }); const n_tokens = max_new_tokens; const timings = res.timings.duration; if (res.status === 200) { const latency_ms_per_token = timings / n_tokens; timePerToken.add(latency_ms_per_token); const latency_in_s = latency_ms_per_token / 1000; const individual_throughput = 1 / latency_in_s; throughput.add(individual_throughput); } }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/benchmark/README.md
<div align="center"> # Text Generation Inference benchmarking tool ![benchmark](../assets/benchmark.png) </div> A lightweight benchmarking tool based inspired by [oha](https://github.com/hatoo/oha) and powered by [tui](https://github.com/tui-rs-revival/ratatui). ## Install ```shell make install-benchmark ``` ## Run First, start `text-generation-inference`: ```shell text-generation-launcher --model-id bigscience/bloom-560m ``` Then run the benchmarking tool: ```shell text-generation-benchmark --tokenizer-name bigscience/bloom-560m ```
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/benchmark/Cargo.toml
[package] name = "text-generation-benchmark" description = "Text Generation Benchmarking tool" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [lib] path = "src/lib.rs" [[bin]] name = "text-generation-benchmark" path = "src/main.rs" [dependencies] average = "0.14" clap = { version = "4.4.5", features = ["derive", "env"] } crossterm = "0.27" float-ord = "0.3.2" serde = {version = "1.0.188", features = ["derive"]} serde_json = "1.0" tabled = "0.14.0" text-generation-client = { path = "../router/client" } thiserror = "1.0.48" tokenizers = { version = "0.14.0", features = ["http"] } tokio = { version = "1.32.0", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync", "macros"] } tui = {package = "ratatui", version = "0.23", default-features = false, features = ["crossterm"]} tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } hf-hub = "0.3.1"
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/event.rs
/// Inspired by https://github.com/orhun/rust-tui-template/blob/472aa515119d4c94903eac12d9784417281dc7f5/src/event.rs use crossterm::event; use std::time::{Duration, Instant}; use tokio::sync::{broadcast, mpsc}; /// Events #[derive(Debug)] pub(crate) enum Event { /// Terminal tick. Tick, /// Key press. Key(event::KeyEvent), /// Terminal resize. Resize(u16, u16), } pub(crate) async fn terminal_event_task( fps: u32, event_sender: mpsc::Sender<Event>, mut shutdown_receiver: broadcast::Receiver<()>, _shutdown_guard_sender: mpsc::Sender<()>, ) { // End task if a message is received on shutdown_receiver // _shutdown_guard_sender will be dropped once the task is finished tokio::select! { _ = event_loop(fps, event_sender) => { }, _ = shutdown_receiver.recv() => {} } } /// Main event loop async fn event_loop(fps: u32, event_sender: mpsc::Sender<Event>) { // Frame budget let per_frame = Duration::from_secs(1) / fps; // When was last frame executed let mut last_frame = Instant::now(); loop { // Sleep to avoid blocking the thread for too long if let Some(sleep) = per_frame.checked_sub(last_frame.elapsed()) { tokio::time::sleep(sleep).await; } // Get crossterm event and send a new one over the channel if event::poll(Duration::from_secs(0)).expect("no events available") { match event::read().expect("unable to read event") { event::Event::Key(e) => event_sender.send(Event::Key(e)).await.unwrap_or(()), event::Event::Resize(w, h) => { event_sender.send(Event::Resize(w, h)).await.unwrap_or(()) } _ => (), } } // Frame budget exceeded if last_frame.elapsed() >= per_frame { // Send tick event_sender.send(Event::Tick).await.unwrap_or(()); // Rest last_frame time last_frame = Instant::now(); } } }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/table.rs
use crate::app::Data; use tabled::settings::Merge; use tabled::{builder::Builder, settings::Style, Table}; #[allow(clippy::too_many_arguments)] pub(crate) fn parameters_table( tokenizer_name: String, sequence_length: u32, decode_length: u32, top_n_tokens: Option<u32>, n_runs: usize, warmups: usize, temperature: Option<f32>, top_k: Option<u32>, top_p: Option<f32>, typical_p: Option<f32>, repetition_penalty: Option<f32>, watermark: bool, do_sample: bool, ) -> Table { let mut builder = Builder::default(); builder.set_header(["Parameter", "Value"]); builder.push_record(["Model", &tokenizer_name]); builder.push_record(["Sequence Length", &sequence_length.to_string()]); builder.push_record(["Decode Length", &decode_length.to_string()]); builder.push_record(["Top N Tokens", &format!("{top_n_tokens:?}")]); builder.push_record(["N Runs", &n_runs.to_string()]); builder.push_record(["Warmups", &warmups.to_string()]); builder.push_record(["Temperature", &format!("{temperature:?}")]); builder.push_record(["Top K", &format!("{top_k:?}")]); builder.push_record(["Top P", &format!("{top_p:?}")]); builder.push_record(["Typical P", &format!("{typical_p:?}")]); builder.push_record(["Repetition Penalty", &format!("{repetition_penalty:?}")]); builder.push_record(["Watermark", &watermark.to_string()]); builder.push_record(["Do Sample", &do_sample.to_string()]); let mut table = builder.build(); table.with(Style::markdown()); table } pub(crate) fn latency_table(data: &Data) -> Table { let mut builder = Builder::default(); builder.set_header([ "Step", "Batch Size", "Average", "Lowest", "Highest", "p50", "p90", "p99", ]); add_latencies( &mut builder, "Prefill", &data.batch_size, &data.prefill_latencies, ); add_latencies( &mut builder, "Decode (token)", &data.batch_size, &data.decode_token_latencies, ); add_latencies( &mut builder, "Decode (total)", &data.batch_size, &data.decode_latencies, ); let mut table = builder.build(); table.with(Style::markdown()).with(Merge::vertical()); table } pub(crate) fn throughput_table(data: &Data) -> Table { let mut builder = Builder::default(); builder.set_header(["Step", "Batch Size", "Average", "Lowest", "Highest"]); add_throuhgputs( &mut builder, "Prefill", &data.batch_size, &data.prefill_throughputs, ); add_throuhgputs( &mut builder, "Decode", &data.batch_size, &data.decode_throughputs, ); let mut table = builder.build(); table.with(Style::markdown()).with(Merge::vertical()); table } fn add_latencies( builder: &mut Builder, step: &'static str, batch_size: &[u32], batch_latencies: &[Vec<f64>], ) { for (i, b) in batch_size.iter().enumerate() { let latencies = &batch_latencies[i]; let (avg, min, max) = avg_min_max(latencies); let row = [ step, &b.to_string(), &format_value(avg, "ms"), &format_value(min, "ms"), &format_value(max, "ms"), &format_value(px(latencies, 50), "ms"), &format_value(px(latencies, 90), "ms"), &format_value(px(latencies, 99), "ms"), ]; builder.push_record(row); } } fn add_throuhgputs( builder: &mut Builder, step: &'static str, batch_size: &[u32], batch_throughputs: &[Vec<f64>], ) { for (i, b) in batch_size.iter().enumerate() { let throughputs = &batch_throughputs[i]; let (avg, min, max) = avg_min_max(throughputs); let row = [ step, &b.to_string(), &format_value(avg, "tokens/secs"), &format_value(min, "tokens/secs"), &format_value(max, "tokens/secs"), ]; builder.push_record(row); } } fn avg_min_max(data: &Vec<f64>) -> (f64, f64, f64) { let average = data.iter().sum::<f64>() / data.len() as f64; let min = data .iter() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); let max = data .iter() .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); (average, *min, *max) } fn px(data: &Vec<f64>, p: u32) -> f64 { let i = (f64::from(p) / 100.0 * data.len() as f64) as usize; *data.get(i).unwrap_or(&std::f64::NAN) } fn format_value(value: f64, unit: &'static str) -> String { format!("{:.2} {unit}", value) }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/generation.rs
use std::time::{Duration, Instant}; use text_generation_client::{ Batch, CachedBatch, ClientError, NextTokenChooserParameters, Request, ShardedClient, StoppingCriteriaParameters, }; use tokenizers::{Tokenizer, TruncationDirection}; use tokio::sync::{broadcast, mpsc}; const LOREM_IPSUM: &str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."; #[derive(Debug, Clone)] pub(crate) struct Prefill { pub(crate) latency: Duration, pub(crate) throughput: f64, } #[derive(Debug, Clone)] pub(crate) struct Decode { pub(crate) latency: Duration, pub(crate) token_latency: Duration, pub(crate) throughput: f64, } #[derive(Debug)] pub(crate) enum Message { Warmup, Prefill(Prefill), Decode(Decode), EndRun, EndBatch, } /// Benchmarking task #[allow(clippy::too_many_arguments)] pub(crate) async fn generation_task( tokenizer: Tokenizer, batch_size: Vec<u32>, sequence_length: u32, decode_length: u32, top_n_tokens: Option<u32>, n_runs: usize, warmups: usize, parameters: NextTokenChooserParameters, client: ShardedClient, run_sender: mpsc::Sender<Result<Message, ClientError>>, mut shutdown_receiver: broadcast::Receiver<()>, _shutdown_guard_sender: mpsc::Sender<()>, ) { // End task if a message is received on shutdown_receiver // _shutdown_guard_sender will be dropped once the task is finished tokio::select! { res = generate_runs(tokenizer, batch_size, sequence_length, decode_length, top_n_tokens, n_runs, warmups, parameters, client, run_sender.clone()) => { if let Err(err) = res { run_sender.send(Err(err)).await.unwrap_or(()); } }, _ = shutdown_receiver.recv() => {} } } /// Benchmark prefill/decode #[allow(clippy::too_many_arguments)] async fn generate_runs( tokenizer: Tokenizer, batch_size: Vec<u32>, sequence_length: u32, decode_length: u32, top_n_tokens: Option<u32>, n_runs: usize, warmups: usize, parameters: NextTokenChooserParameters, mut client: ShardedClient, run_sender: mpsc::Sender<Result<Message, ClientError>>, ) -> Result<(), ClientError> { // Create a dummy sequence let sequence = create_sequence(sequence_length, tokenizer); for b in batch_size { // Warmups on batch size for _ in 0..warmups { let (_, decode_batch) = prefill( sequence.clone(), sequence_length, b, decode_length, parameters.clone(), top_n_tokens, &mut client, ) .await?; let _ = decode(decode_batch, &mut client).await?; // Send warmup message run_sender.send(Ok(Message::Warmup)).await.unwrap_or(()); } for _ in 0..n_runs { let (prefill, decode_batch) = prefill( sequence.clone(), sequence_length, b, decode_length, parameters.clone(), top_n_tokens, &mut client, ) .await?; // Send prefill message run_sender .send(Ok(Message::Prefill(prefill))) .await .unwrap_or(()); let decode = decode(decode_batch, &mut client).await?; // Send decode message run_sender .send(Ok(Message::Decode(decode))) .await .unwrap_or(()); // Send run ended message run_sender.send(Ok(Message::EndRun)).await.unwrap_or(()); } // Batch ended run_sender.send(Ok(Message::EndBatch)).await.unwrap_or(()); } Ok(()) } // Run a prefill step async fn prefill( sequence: String, sequence_length: u32, batch_size: u32, decode_length: u32, parameters: NextTokenChooserParameters, top_n_tokens: Option<u32>, client: &mut ShardedClient, ) -> Result<(Prefill, CachedBatch), ClientError> { // Create requests let requests = (0..batch_size) .map(|id| Request { id: id.into(), prefill_logprobs: false, inputs: sequence.clone(), truncate: sequence_length, parameters: Some(parameters.clone()), stopping_parameters: Some(StoppingCriteriaParameters { max_new_tokens: decode_length, stop_sequences: vec![], ignore_eos_token: true, // Will not stop even if a eos token is generated }), top_n_tokens: top_n_tokens.unwrap_or(0), }) .collect(); let batch = Batch { id: 0, requests, size: batch_size, max_tokens: batch_size * (sequence_length + decode_length), }; // Run prefill let start_time = Instant::now(); let (_, decode_batch) = client.prefill(batch.clone()).await?; // Get latency let latency = start_time.elapsed(); // Compute throughput from latency and batch size let throughput = batch_size as f64 / latency.as_secs_f64(); // Decode batch cannot be empty let decode_batch = decode_batch.expect("decode_batch is None. This is a bug."); let step = Prefill { latency, throughput, }; Ok((step, decode_batch)) } /// Run a full decode async fn decode(batch: CachedBatch, client: &mut ShardedClient) -> Result<Decode, ClientError> { let mut decode_length = 0; let batch_size = batch.size; let start_time = Instant::now(); // Full decode over decode length let mut next_batch = Some(batch); while let Some(batch) = next_batch { let result = client.decode(vec![batch]).await?; next_batch = result.1; decode_length += 1; } // Get latency let latency = start_time.elapsed(); let token_latency = latency / decode_length; // Compute throughput from latency, batch size and decode length let throughput = (batch_size * decode_length) as f64 / latency.as_secs_f64(); let step = Decode { latency, token_latency, throughput, }; Ok(step) } /// Create a dummy sequence of the correct length fn create_sequence(sequence_length: u32, tokenizer: Tokenizer) -> String { let lorem_ipsum_length = tokenizer.encode(LOREM_IPSUM, true).unwrap().len(); // Repeat lorem ipsum to cover sequence length let string_sequence = LOREM_IPSUM.repeat((0..sequence_length).step_by(lorem_ipsum_length).len()); // Encode sequence let mut encoding = tokenizer.encode(string_sequence, true).unwrap(); // Truncate to sequence_length encoding.truncate(sequence_length as usize, 0, TruncationDirection::Left); // Decode tokenizer.decode(encoding.get_ids(), false).unwrap() }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/lib.rs
mod app; mod event; mod generation; mod table; mod utils; use crate::app::App; use crate::event::Event; use crossterm::ExecutableCommand; use std::io; use text_generation_client::{NextTokenChooserParameters, ShardedClient}; use tokenizers::Tokenizer; use tokio::sync::{broadcast, mpsc}; use tui::backend::CrosstermBackend; use tui::Terminal; /// Run benchmarking app #[allow(clippy::too_many_arguments)] pub async fn run( tokenizer_name: String, tokenizer: Tokenizer, batch_size: Vec<u32>, sequence_length: u32, decode_length: u32, top_n_tokens: Option<u32>, n_runs: usize, warmups: usize, temperature: Option<f32>, top_k: Option<u32>, top_p: Option<f32>, typical_p: Option<f32>, repetition_penalty: Option<f32>, watermark: bool, do_sample: bool, client: ShardedClient, ) -> Result<(), std::io::Error> { let parameters = NextTokenChooserParameters { temperature: temperature.unwrap_or(1.0), top_k: top_k.unwrap_or(0), top_p: top_p.unwrap_or(1.0), typical_p: typical_p.unwrap_or(1.0), do_sample, seed: 0, repetition_penalty: repetition_penalty.unwrap_or(1.0), watermark, }; // Initialize terminal properties crossterm::terminal::enable_raw_mode()?; io::stdout().execute(crossterm::terminal::EnterAlternateScreen)?; io::stdout().execute(crossterm::cursor::Hide)?; // Initialize terminal let mut terminal = { let backend = CrosstermBackend::new(io::stdout()); Terminal::new(backend)? }; // Create message channel between generation_task and app let (run_sender, run_receiver) = mpsc::channel(8); // Crossterm event channel let (event_sender, mut event_receiver) = mpsc::channel(8); // Shutdown channel to terminate tasks let (shutdown_sender, _) = broadcast::channel(1); // Channel to check if tasks terminated let (shutdown_guard_sender, mut shutdown_guard_receiver) = mpsc::channel(1); // Create generation task tokio::spawn(generation::generation_task( tokenizer, batch_size.clone(), sequence_length, decode_length, top_n_tokens, n_runs, warmups, parameters, client, run_sender, shutdown_sender.subscribe(), shutdown_guard_sender.clone(), )); // Create event task tokio::spawn(event::terminal_event_task( 250, event_sender, shutdown_sender.subscribe(), shutdown_guard_sender.clone(), )); // Drop our end of shutdown sender drop(shutdown_guard_sender); // Create App let mut app = App::new( run_receiver, tokenizer_name.clone(), sequence_length, decode_length, n_runs, batch_size, ); while app.running { // Draw frame terminal.draw(|frame| app.render(frame))?; // Await a new event from event handling task match event_receiver.recv().await { None => break, // Update app state Some(event) => match event { Event::Tick => app.tick(), Event::Key(key_event) => app.handle_key_event(key_event), _ => {} }, } } // Ask tasks to shutdown let _ = shutdown_sender.send(()); // Wait for tasks to shutdown let _ = shutdown_guard_receiver.recv().await; // Revert terminal to original view io::stdout().execute(crossterm::terminal::LeaveAlternateScreen)?; crossterm::terminal::disable_raw_mode()?; io::stdout().execute(crossterm::cursor::Show)?; let parameters_table = table::parameters_table( tokenizer_name, sequence_length, decode_length, top_n_tokens, n_runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, watermark, do_sample, ); println!("\n{parameters_table}\n"); let latency_table = table::latency_table(&app.data); println!("\n{latency_table}\n"); let throughput_table = table::throughput_table(&app.data); println!("\n{throughput_table}\n"); Ok(()) }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/main.rs
/// Text Generation Inference benchmarking tool /// /// Inspired by the great Oha app: https://github.com/hatoo/oha /// and: https://github.com/orhun/rust-tui-template use clap::Parser; use std::path::Path; use text_generation_client::ShardedClient; use tokenizers::{FromPretrainedParameters, Tokenizer}; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; /// App Configuration #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Args { /// The name of the tokenizer (as in model_id on the huggingface hub, or local path). #[clap(short, long, env)] tokenizer_name: String, /// The revision to use for the tokenizer if on the hub. #[clap(default_value = "main", long, env)] revision: String, /// The various batch sizes to benchmark for, the idea is to get enough /// batching to start seeing increased latency, this usually means you're /// moving from memory bound (usual as BS=1) to compute bound, and this is /// a sweet spot for the maximum batch size for the model under test #[clap(short, long)] batch_size: Option<Vec<u32>>, /// This is the initial prompt sent to the text-generation-server length /// in token. Longer prompt will slow down the benchmark. Usually the /// latency grows somewhat linearly with this for the prefill step. /// /// Most importantly, the prefill step is usually not the one dominating /// your runtime, so it's ok to keep it short. #[clap(default_value = "10", short, long, env)] sequence_length: u32, /// This is how many tokens will be generated by the server and averaged out /// to give the `decode` latency. This is the *critical* number you want to optimize for /// LLM spend most of their time doing decoding. /// /// Decode latency is usually quite stable. #[clap(default_value = "8", short, long, env)] decode_length: u32, ///How many runs should we average from #[clap(default_value = "10", short, long, env)] runs: usize, /// Number of warmup cycles #[clap(default_value = "1", short, long, env)] warmups: usize, /// The location of the grpc socket. This benchmark tool bypasses the router /// completely and directly talks to the gRPC processes #[clap(default_value = "/tmp/text-generation-server-0", short, long, env)] master_shard_uds_path: String, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] temperature: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] top_k: Option<u32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] top_p: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] typical_p: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] repetition_penalty: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] watermark: bool, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] do_sample: bool, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] top_n_tokens: Option<u32>, } fn main() -> Result<(), Box<dyn std::error::Error>> { init_logging(); // Get args let args = Args::parse(); // Pattern match configuration let Args { tokenizer_name, revision, batch_size, sequence_length, decode_length, runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, watermark, do_sample, master_shard_uds_path, top_n_tokens, } = args; let batch_size = batch_size.unwrap_or(vec![1, 2, 4, 8, 16, 32]); // Tokenizer instance // This will only be used to validate payloads tracing::info!("Loading tokenizer"); let local_path = Path::new(&tokenizer_name); let tokenizer = if local_path.exists() && local_path.is_dir() && local_path.join("tokenizer.json").exists() { // Load local tokenizer tracing::info!("Found local tokenizer"); Tokenizer::from_file(local_path.join("tokenizer.json")).unwrap() } else { tracing::info!("Downloading tokenizer"); // Parse Huggingface hub token let auth_token = std::env::var("HUGGING_FACE_HUB_TOKEN").ok(); // Download and instantiate tokenizer // We need to download it outside of the Tokio runtime let params = FromPretrainedParameters { revision, auth_token, ..Default::default() }; Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).unwrap() }; tracing::info!("Tokenizer loaded"); // Launch Tokio runtime tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap() .block_on(async { // Instantiate sharded client from the master unix socket tracing::info!("Connect to model server"); let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path) .await .expect("Could not connect to server"); // Clear the cache; useful if the webserver rebooted sharded_client .clear_cache(None) .await .expect("Unable to clear cache"); tracing::info!("Connected"); // Run app text_generation_benchmark::run( tokenizer_name, tokenizer, batch_size, sequence_length, decode_length, top_n_tokens, runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, watermark, do_sample, sharded_client, ) .await .unwrap(); }); Ok(()) } /// Init logging using LOG_LEVEL fn init_logging() { // STDOUT/STDERR layer let fmt_layer = tracing_subscriber::fmt::layer() .with_file(true) .with_line_number(true); // Filter events with LOG_LEVEL let env_filter = EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info")); tracing_subscriber::registry() .with(env_filter) .with(fmt_layer) .init(); }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/app.rs
/// Inspired by https://github.com/hatoo/oha/blob/bb989ea3cd77727e7743e7daa60a19894bb5e901/src/monitor.rs use crate::generation::{Decode, Message, Prefill}; use crossterm::event::{KeyCode, KeyEvent, KeyModifiers}; use text_generation_client::ClientError; use tokio::sync::mpsc; use tui::backend::Backend; use tui::layout::{Alignment, Constraint, Direction, Layout}; use tui::style::{Color, Modifier, Style}; use tui::text::{Line, Span}; use tui::widgets::{ Axis, BarChart, Block, Borders, Chart, Dataset, Gauge, GraphType, Paragraph, Tabs, }; use tui::{symbols, Frame}; /// TUI powered App pub(crate) struct App { pub(crate) running: bool, pub(crate) data: Data, completed_runs: Vec<usize>, completed_batch: usize, current_batch: usize, current_tab: usize, touched_tab: bool, zoom: bool, is_error: bool, tokenizer_name: String, sequence_length: u32, decode_length: u32, n_run: usize, receiver: mpsc::Receiver<Result<Message, ClientError>>, } impl App { pub(crate) fn new( receiver: mpsc::Receiver<Result<Message, ClientError>>, tokenizer_name: String, sequence_length: u32, decode_length: u32, n_run: usize, batch_size: Vec<u32>, ) -> Self { let current_tab = 0; let completed_runs: Vec<usize> = (0..batch_size.len()).map(|_| 0).collect(); let completed_batch = 0; let current_batch = 0; let is_error = false; let data = Data::new(n_run, batch_size); Self { running: true, data, completed_runs, completed_batch, current_batch, current_tab, touched_tab: false, zoom: false, is_error, tokenizer_name, sequence_length, decode_length, n_run, receiver, } } /// Handle crossterm key events pub(crate) fn handle_key_event(&mut self, key_event: KeyEvent) { match key_event { // Increase and wrap tab KeyEvent { code: KeyCode::Right, .. } | KeyEvent { code: KeyCode::Tab, .. } => { self.touched_tab = true; self.current_tab = (self.current_tab + 1) % self.data.batch_size.len(); } // Decrease and wrap tab KeyEvent { code: KeyCode::Left, .. } => { self.touched_tab = true; if self.current_tab > 0 { self.current_tab -= 1; } else { self.current_tab = self.data.batch_size.len() - 1; } } // Zoom on throughput/latency fig KeyEvent { code: KeyCode::Char('+'), .. } => { self.zoom = true; } // Unzoom on throughput/latency fig KeyEvent { code: KeyCode::Char('-'), .. } => { self.zoom = false; } // Quit KeyEvent { code: KeyCode::Char('q'), .. } | KeyEvent { code: KeyCode::Char('c'), modifiers: KeyModifiers::CONTROL, .. } => { self.running = false; } _ => (), } } /// Get all pending messages from generation task pub(crate) fn tick(&mut self) { while let Ok(message) = self.receiver.try_recv() { match message { Ok(message) => match message { Message::Prefill(step) => self.data.push_prefill(step, self.current_batch), Message::Decode(step) => self.data.push_decode(step, self.current_batch), Message::EndRun => { self.completed_runs[self.current_batch] += 1; } Message::EndBatch => { self.data.end_batch(self.current_batch); self.completed_batch += 1; if self.current_batch < self.data.batch_size.len() - 1 { // Only go to next tab if the user never touched the tab keys if !self.touched_tab { self.current_tab += 1; } self.current_batch += 1; } } Message::Warmup => {} }, Err(_) => self.is_error = true, } } } /// Render frame pub fn render<B: Backend>(&mut self, f: &mut Frame<'_, B>) { let batch_progress = (self.completed_batch as f64 / self.data.batch_size.len() as f64).clamp(0.0, 1.0); let run_progress = (self.completed_runs[self.current_batch] as f64 / self.n_run as f64).clamp(0.0, 1.0); // Vertical layout let row5 = Layout::default() .direction(Direction::Vertical) .constraints( [ Constraint::Length(1), Constraint::Length(3), Constraint::Length(3), Constraint::Length(13), Constraint::Min(10), ] .as_ref(), ) .split(f.size()); // Top row horizontal layout let top = Layout::default() .direction(Direction::Horizontal) .constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref()) .split(row5[2]); // Mid row horizontal layout let mid = Layout::default() .direction(Direction::Horizontal) .constraints( [ Constraint::Percentage(25), Constraint::Percentage(25), Constraint::Percentage(25), Constraint::Percentage(25), ] .as_ref(), ) .split(row5[3]); // Left mid row vertical layout let prefill_text = Layout::default() .direction(Direction::Vertical) .constraints([Constraint::Length(8), Constraint::Length(5)].as_ref()) .split(mid[0]); // Right mid row vertical layout let decode_text = Layout::default() .direction(Direction::Vertical) .constraints([Constraint::Length(8), Constraint::Length(5)].as_ref()) .split(mid[2]); let decode_text_latency = Layout::default() .direction(Direction::Horizontal) .constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref()) .split(decode_text[0]); // Bottom row horizontal layout let bottom = Layout::default() .direction(Direction::Horizontal) .constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref()) .split(row5[4]); // Title let title = Block::default() .borders(Borders::NONE) .title(format!( "Model: {} | Sequence Length: {} | Decode Length: {}", self.tokenizer_name, self.sequence_length, self.decode_length )) .style( Style::default() .add_modifier(Modifier::BOLD) .fg(Color::White), ); f.render_widget(title, row5[0]); // Helper let helper = Block::default() .borders(Borders::NONE) .title("<- | tab | ->: change batch tab | q / CTRL + c: quit | +/-: zoom") .title_alignment(Alignment::Right) .style(Style::default().fg(Color::White)); f.render_widget(helper, row5[0]); // Batch tabs let titles = self .data .batch_size .iter() .map(|b| { Line::from(vec![Span::styled( format!("Batch: {b}"), Style::default().fg(Color::White), )]) }) .collect(); let tabs = Tabs::new(titles) .block(Block::default().borders(Borders::ALL).title("Tabs")) .select(self.current_tab) .style(Style::default().fg(Color::LightCyan)) .highlight_style( Style::default() .add_modifier(Modifier::BOLD) .bg(Color::Black), ); f.render_widget(tabs, row5[1]); // Total progress bar let color = if self.is_error { Color::Red } else { Color::LightGreen }; let batch_gauge = progress_gauge( "Total Progress", format!("{} / {}", self.completed_batch, self.data.batch_size.len()), batch_progress, color, ); f.render_widget(batch_gauge, top[0]); // Batch progress Bar let color = if self.is_error { Color::Red } else { Color::LightBlue }; let run_gauge = progress_gauge( "Batch Progress", format!( "{} / {}", self.completed_runs[self.current_batch], self.n_run ), run_progress, color, ); f.render_widget(run_gauge, top[1]); // Prefill text infos let prefill_latency_block = latency_paragraph( &mut self.data.prefill_latencies[self.current_tab], "Prefill", ); let prefill_throughput_block = throughput_paragraph(&self.data.prefill_throughputs[self.current_tab], "Prefill"); f.render_widget(prefill_latency_block, prefill_text[0]); f.render_widget(prefill_throughput_block, prefill_text[1]); // Prefill latency histogram let histo_width = 7; let bins = if mid[1].width < 2 { 0 } else { (mid[1].width as usize - 2) / (histo_width + 1) } .max(2); let histo_data = latency_histogram_data(&self.data.prefill_latencies[self.current_tab], bins); let histo_data_str: Vec<(&str, u64)> = histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect(); let prefill_histogram = latency_histogram(&histo_data_str, "Prefill").bar_width(histo_width as u16); f.render_widget(prefill_histogram, mid[1]); // Decode text info let decode_latency_block = latency_paragraph( &mut self.data.decode_latencies[self.current_tab], "Decode Total", ); let decode_token_latency_block = latency_paragraph( &mut self.data.decode_token_latencies[self.current_tab], "Decode Token", ); let decode_throughput_block = throughput_paragraph(&self.data.decode_throughputs[self.current_tab], "Decode"); f.render_widget(decode_latency_block, decode_text_latency[0]); f.render_widget(decode_token_latency_block, decode_text_latency[1]); f.render_widget(decode_throughput_block, decode_text[1]); // Decode latency histogram let histo_data = latency_histogram_data(&self.data.decode_latencies[self.current_tab], bins); let histo_data_str: Vec<(&str, u64)> = histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect(); let decode_histogram = latency_histogram(&histo_data_str, "Decode").bar_width(histo_width as u16); f.render_widget(decode_histogram, mid[3]); // Prefill latency/throughput chart let prefill_latency_throughput_chart = latency_throughput_chart( &self.data.prefill_batch_latency_throughput, &self.data.batch_size, self.zoom, "Prefill", ); f.render_widget(prefill_latency_throughput_chart, bottom[0]); // Decode latency/throughput chart let decode_latency_throughput_chart = latency_throughput_chart( &self.data.decode_batch_latency_throughput, &self.data.batch_size, self.zoom, "Decode", ); f.render_widget(decode_latency_throughput_chart, bottom[1]); } } /// App internal data struct pub(crate) struct Data { pub(crate) batch_size: Vec<u32>, pub(crate) prefill_latencies: Vec<Vec<f64>>, pub(crate) prefill_throughputs: Vec<Vec<f64>>, pub(crate) decode_latencies: Vec<Vec<f64>>, pub(crate) decode_token_latencies: Vec<Vec<f64>>, pub(crate) decode_throughputs: Vec<Vec<f64>>, pub(crate) prefill_batch_latency_throughput: Vec<(f64, f64)>, pub(crate) decode_batch_latency_throughput: Vec<(f64, f64)>, } impl Data { fn new(n_run: usize, batch_size: Vec<u32>) -> Self { let prefill_latencies: Vec<Vec<f64>> = (0..batch_size.len()) .map(|_| Vec::with_capacity(n_run)) .collect(); let prefill_throughputs: Vec<Vec<f64>> = prefill_latencies.clone(); let decode_latencies: Vec<Vec<f64>> = prefill_latencies.clone(); let decode_token_latencies: Vec<Vec<f64>> = decode_latencies.clone(); let decode_throughputs: Vec<Vec<f64>> = prefill_throughputs.clone(); let prefill_batch_latency_throughput: Vec<(f64, f64)> = Vec::with_capacity(batch_size.len()); let decode_batch_latency_throughput: Vec<(f64, f64)> = prefill_batch_latency_throughput.clone(); Self { batch_size, prefill_latencies, prefill_throughputs, decode_latencies, decode_token_latencies, decode_throughputs, prefill_batch_latency_throughput, decode_batch_latency_throughput, } } fn push_prefill(&mut self, prefill: Prefill, batch_idx: usize) { let latency = prefill.latency.as_micros() as f64 / 1000.0; self.prefill_latencies[batch_idx].push(latency); self.prefill_throughputs[batch_idx].push(prefill.throughput); } fn push_decode(&mut self, decode: Decode, batch_idx: usize) { let latency = decode.latency.as_micros() as f64 / 1000.0; let token_latency = decode.token_latency.as_micros() as f64 / 1000.0; self.decode_latencies[batch_idx].push(latency); self.decode_token_latencies[batch_idx].push(token_latency); self.decode_throughputs[batch_idx].push(decode.throughput); } fn end_batch(&mut self, batch_idx: usize) { self.prefill_batch_latency_throughput.push(( self.prefill_latencies[batch_idx].iter().sum::<f64>() / self.prefill_latencies[batch_idx].len() as f64, self.prefill_throughputs[batch_idx].iter().sum::<f64>() / self.prefill_throughputs[batch_idx].len() as f64, )); self.decode_batch_latency_throughput.push(( self.decode_latencies[batch_idx].iter().sum::<f64>() / self.decode_latencies[batch_idx].len() as f64, self.decode_throughputs[batch_idx].iter().sum::<f64>() / self.decode_throughputs[batch_idx].len() as f64, )); } } /// Progress bar fn progress_gauge(title: &str, label: String, progress: f64, color: Color) -> Gauge { Gauge::default() .block(Block::default().title(title).borders(Borders::ALL)) .gauge_style(Style::default().fg(color)) .label(Span::raw(label)) .ratio(progress) } /// Throughput paragraph fn throughput_paragraph<'a>(throughput: &Vec<f64>, name: &'static str) -> Paragraph<'a> { // Throughput average/high/low texts let throughput_texts = statis_spans(throughput, "tokens/secs"); // Throughput block Paragraph::new(throughput_texts).block( Block::default() .title(Span::raw(format!("{name} Throughput"))) .borders(Borders::ALL), ) } /// Latency paragraph fn latency_paragraph<'a>(latency: &mut Vec<f64>, name: &'static str) -> Paragraph<'a> { // Latency average/high/low texts let mut latency_texts = statis_spans(latency, "ms"); // Sort latency for percentiles float_ord::sort(latency); let latency_percentiles = crate::utils::percentiles(latency, &[50, 90, 99]); // Latency p50/p90/p99 texts let colors = vec![Color::LightGreen, Color::LightYellow, Color::LightRed]; for (i, (name, value)) in latency_percentiles.iter().enumerate() { let span = Line::from(vec![Span::styled( format!("{name}: {value:.2} ms"), Style::default().fg(colors[i]), )]); latency_texts.push(span); } Paragraph::new(latency_texts).block( Block::default() .title(Span::raw(format!("{name} Latency"))) .borders(Borders::ALL), ) } /// Average/High/Low spans fn statis_spans<'a>(data: &Vec<f64>, unit: &'static str) -> Vec<Line<'a>> { vec![ Line::from(vec![Span::styled( format!( "Average: {:.2} {unit}", data.iter().sum::<f64>() / data.len() as f64 ), Style::default().fg(Color::LightBlue), )]), Line::from(vec![Span::styled( format!( "Lowest: {:.2} {unit}", data.iter() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN) ), Style::default().fg(Color::Reset), )]), Line::from(vec![Span::styled( format!( "Highest: {:.2} {unit}", data.iter() .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN) ), Style::default().fg(Color::Reset), )]), ] } /// Latency histogram data fn latency_histogram_data(latency: &[f64], bins: usize) -> Vec<(String, u64)> { let histo_data: Vec<(String, u64)> = { let histo = crate::utils::histogram(latency, bins); histo .into_iter() .map(|(label, v)| (format!("{label:.2}"), v as u64)) .collect() }; histo_data } /// Latency Histogram fn latency_histogram<'a>( histo_data_str: &'a Vec<(&'a str, u64)>, name: &'static str, ) -> BarChart<'a> { BarChart::default() .block( Block::default() .title(format!("{name} latency histogram")) .style(Style::default().fg(Color::LightYellow).bg(Color::Reset)) .borders(Borders::ALL), ) .data(histo_data_str.as_slice()) } /// Latency/Throughput chart fn latency_throughput_chart<'a>( latency_throughput: &'a Vec<(f64, f64)>, batch_sizes: &'a [u32], zoom: bool, name: &'static str, ) -> Chart<'a> { let latency_iter = latency_throughput.iter().map(|(l, _)| l); let throughput_iter = latency_throughput.iter().map(|(_, t)| t); // Get extreme values let min_latency: f64 = *latency_iter .clone() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); let max_latency: f64 = *latency_iter .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); let min_throughput: f64 = *throughput_iter .clone() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); let max_throughput: f64 = *throughput_iter .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); // Char min max values let min_x = if zoom { ((min_latency - 0.05 * min_latency) / 100.0).floor() * 100.0 } else { 0.0 }; let max_x = ((max_latency + 0.05 * max_latency) / 100.0).ceil() * 100.0; let step_x = (max_x - min_x) / 4.0; // Chart min max values let min_y = if zoom { ((min_throughput - 0.05 * min_throughput) / 100.0).floor() * 100.0 } else { 0.0 }; let max_y = ((max_throughput + 0.05 * max_throughput) / 100.0).ceil() * 100.0; let step_y = (max_y - min_y) / 4.0; // Labels let mut x_labels = vec![Span::styled( format!("{min_x:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )]; for i in 0..3 { x_labels.push(Span::styled( format!("{:.2}", min_x + ((i + 1) as f64 * step_x)), Style::default().fg(Color::Gray).bg(Color::Reset), )); } x_labels.push(Span::styled( format!("{max_x:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )); // Labels let mut y_labels = vec![Span::styled( format!("{min_y:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )]; for i in 0..3 { y_labels.push(Span::styled( format!("{:.2}", min_y + ((i + 1) as f64 * step_y)), Style::default().fg(Color::Gray).bg(Color::Reset), )); } y_labels.push(Span::styled( format!("{max_y:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )); // Chart dataset let colors = color_vec(); let datasets: Vec<Dataset> = (0..latency_throughput.len()) .map(|i| { let color_idx = i % colors.len(); Dataset::default() .name(batch_sizes[i].to_string()) .marker(symbols::Marker::Block) .style(Style::default().fg(colors[color_idx])) .graph_type(GraphType::Scatter) .data(&latency_throughput[i..(i + 1)]) }) .collect(); // Chart Chart::new(datasets) .style(Style::default().fg(Color::Cyan).bg(Color::Reset)) .block( Block::default() .title(Span::styled( format!("{name} throughput over latency"), Style::default().fg(Color::Gray).bg(Color::Reset), )) .borders(Borders::ALL), ) .x_axis( Axis::default() .title("ms") .style(Style::default().fg(Color::Gray).bg(Color::Reset)) .labels(x_labels) .bounds([min_x, max_x]), ) .y_axis( Axis::default() .title("tokens/secs") .style(Style::default().fg(Color::Gray).bg(Color::Reset)) .labels(y_labels) .bounds([min_y, max_y]), ) } // Colors for latency/throughput chart fn color_vec() -> Vec<Color> { vec![ Color::Red, Color::Green, Color::Yellow, Color::Blue, Color::Magenta, Color::Cyan, Color::Gray, Color::DarkGray, Color::LightRed, Color::LightGreen, Color::LightYellow, Color::LightBlue, Color::LightMagenta, Color::LightCyan, ] }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/utils.rs
/// MIT License // // Copyright (c) 2020 hatoo // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. use std::collections::BTreeMap; pub(crate) fn histogram(values: &[f64], bins: usize) -> Vec<(f64, usize)> { assert!(bins >= 2); let mut bucket: Vec<usize> = vec![0; bins]; let min = values.iter().collect::<average::Min>().min(); let max = values.iter().collect::<average::Max>().max(); let step = (max - min) / (bins - 1) as f64; for &v in values { let i = std::cmp::min(((v - min) / step).ceil() as usize, bins - 1); bucket[i] += 1; } bucket .into_iter() .enumerate() .map(|(i, v)| (min + step * i as f64, v)) .collect() } pub(crate) fn percentiles(values: &[f64], pecents: &[i32]) -> BTreeMap<String, f64> { pecents .iter() .map(|&p| { let i = (f64::from(p) / 100.0 * values.len() as f64) as usize; (format!("p{p}"), *values.get(i).unwrap_or(&std::f64::NAN)) }) .collect() }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/docs/index.html
<html> <head> <!-- Load the latest Swagger UI code and style from npm using unpkg.com --> <script src="https://unpkg.com/swagger-ui-dist@3/swagger-ui-bundle.js"></script> <link rel="stylesheet" type="text/css" href="https://unpkg.com/swagger-ui-dist@3/swagger-ui.css"/> <title>Text Generation Inference API</title> </head> <body> <div id="swagger-ui"></div> <!-- Div to hold the UI component --> <script> window.onload = function () { // Begin Swagger UI call region const ui = SwaggerUIBundle({ url: "openapi.json", //Location of Open API spec in the repo dom_id: '#swagger-ui', deepLinking: true, supportedSubmitMethods: [], presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], plugins: [ SwaggerUIBundle.plugins.DownloadUrl ], }) window.ui = ui } </script> </body> </html>
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/docs/openapi.json
{ "openapi": "3.0.3", "info": { "title": "Text Generation Inference", "description": "Text Generation Webserver", "contact": { "name": "Olivier Dehaene" }, "license": { "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0" }, "version": "1.1.1" }, "paths": { "/": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate tokens if `stream == false` or a stream of token if `stream == true`", "description": "Generate tokens if `stream == false` or a stream of token if `stream == true`", "operationId": "compat_generate", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/CompatGenerateRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Text", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateResponse" } }, "text/event-stream": { "schema": { "$ref": "#/components/schemas/StreamResponse" } } } }, "422": { "description": "Input validation error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/generate": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate tokens", "description": "Generate tokens", "operationId": "generate", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Text", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateResponse" } } } }, "422": { "description": "Input validation error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/generate_stream": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate a stream of token using Server-Sent Events", "description": "Generate a stream of token using Server-Sent Events", "operationId": "generate_stream", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Text", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/StreamResponse" } } } }, "422": { "description": "Input validation error", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/health": { "get": { "tags": [ "Text Generation Inference" ], "summary": "Health check method", "description": "Health check method", "operationId": "health", "responses": { "200": { "description": "Everything is working fine" }, "503": { "description": "Text generation inference is down", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "unhealthy", "error_type": "healthcheck" } } } } } } }, "/info": { "get": { "tags": [ "Text Generation Inference" ], "summary": "Text Generation Inference endpoint info", "description": "Text Generation Inference endpoint info", "operationId": "get_model_info", "responses": { "200": { "description": "Served model info", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/Info" } } } } } } }, "/metrics": { "get": { "tags": [ "Text Generation Inference" ], "summary": "Prometheus metrics scrape endpoint", "description": "Prometheus metrics scrape endpoint", "operationId": "metrics", "responses": { "200": { "description": "Prometheus Metrics", "content": { "text/plain": { "schema": { "type": "string" } } } } } } } }, "components": { "schemas": { "BestOfSequence": { "type": "object", "required": [ "generated_text", "finish_reason", "generated_tokens", "prefill", "tokens" ], "properties": { "finish_reason": { "$ref": "#/components/schemas/FinishReason" }, "generated_text": { "type": "string", "example": "test" }, "generated_tokens": { "type": "integer", "format": "int32", "example": 1, "minimum": 0 }, "prefill": { "type": "array", "items": { "$ref": "#/components/schemas/PrefillToken" } }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0 }, "tokens": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } }, "top_tokens": { "type": "array", "items": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } } } } }, "CompatGenerateRequest": { "type": "object", "required": [ "inputs" ], "properties": { "inputs": { "type": "string", "example": "My name is Olivier and I" }, "parameters": { "$ref": "#/components/schemas/GenerateParameters" }, "stream": { "type": "boolean", "default": "false" } } }, "Details": { "type": "object", "required": [ "finish_reason", "generated_tokens", "prefill", "tokens" ], "properties": { "best_of_sequences": { "type": "array", "items": { "$ref": "#/components/schemas/BestOfSequence" }, "nullable": true }, "finish_reason": { "$ref": "#/components/schemas/FinishReason" }, "generated_tokens": { "type": "integer", "format": "int32", "example": 1, "minimum": 0 }, "prefill": { "type": "array", "items": { "$ref": "#/components/schemas/PrefillToken" } }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0 }, "tokens": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } }, "top_tokens": { "type": "array", "items": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } } } } }, "ErrorResponse": { "type": "object", "required": [ "error", "error_type" ], "properties": { "error": { "type": "string" }, "error_type": { "type": "string" } } }, "FinishReason": { "type": "string", "enum": [ "length", "eos_token", "stop_sequence" ] }, "GenerateParameters": { "type": "object", "properties": { "best_of": { "type": "integer", "default": "null", "example": 1, "nullable": true, "minimum": 0, "exclusiveMinimum": 0 }, "decoder_input_details": { "type": "boolean", "default": "true" }, "details": { "type": "boolean", "default": "true" }, "do_sample": { "type": "boolean", "default": "false", "example": true }, "max_new_tokens": { "type": "integer", "format": "int32", "default": "null", "example": "20", "nullable": true, "minimum": 0 }, "repetition_penalty": { "type": "number", "format": "float", "default": "null", "example": 1.03, "nullable": true, "exclusiveMinimum": 0 }, "return_full_text": { "type": "boolean", "default": "null", "example": false, "nullable": true }, "seed": { "type": "integer", "format": "int64", "default": "null", "example": "null", "nullable": true, "minimum": 0, "exclusiveMinimum": 0 }, "stop": { "type": "array", "items": { "type": "string" }, "example": [ "photographer" ], "maxItems": 4 }, "temperature": { "type": "number", "format": "float", "default": "null", "example": 0.5, "nullable": true, "exclusiveMinimum": 0 }, "top_k": { "type": "integer", "format": "int32", "default": "null", "example": 10, "nullable": true, "exclusiveMinimum": 0 }, "top_n_tokens": { "type": "integer", "format": "int32", "default": "null", "example": 5, "nullable": true, "minimum": 0, "exclusiveMinimum": 0 }, "top_p": { "type": "number", "format": "float", "default": "null", "example": 0.95, "nullable": true, "maximum": 1, "exclusiveMinimum": 0 }, "truncate": { "type": "integer", "default": "null", "example": "null", "nullable": true, "minimum": 0 }, "typical_p": { "type": "number", "format": "float", "default": "null", "example": 0.95, "nullable": true, "maximum": 1, "exclusiveMinimum": 0 }, "watermark": { "type": "boolean", "default": "false", "example": true } } }, "GenerateRequest": { "type": "object", "required": [ "inputs" ], "properties": { "inputs": { "type": "string", "example": "My name is Olivier and I" }, "parameters": { "$ref": "#/components/schemas/GenerateParameters" } } }, "GenerateResponse": { "type": "object", "required": [ "generated_text" ], "properties": { "details": { "allOf": [ { "$ref": "#/components/schemas/Details" } ], "nullable": true }, "generated_text": { "type": "string", "example": "test" } } }, "Info": { "type": "object", "required": [ "model_id", "model_dtype", "model_device_type", "max_concurrent_requests", "max_best_of", "max_stop_sequences", "max_input_length", "max_total_tokens", "waiting_served_ratio", "max_batch_total_tokens", "max_waiting_tokens", "validation_workers", "version" ], "properties": { "docker_label": { "type": "string", "example": "null", "nullable": true }, "max_batch_total_tokens": { "type": "integer", "format": "int32", "example": "32000", "minimum": 0 }, "max_best_of": { "type": "integer", "example": "2", "minimum": 0 }, "max_concurrent_requests": { "type": "integer", "description": "Router Parameters", "example": "128", "minimum": 0 }, "max_input_length": { "type": "integer", "example": "1024", "minimum": 0 }, "max_stop_sequences": { "type": "integer", "example": "4", "minimum": 0 }, "max_total_tokens": { "type": "integer", "example": "2048", "minimum": 0 }, "max_waiting_tokens": { "type": "integer", "example": "20", "minimum": 0 }, "model_device_type": { "type": "string", "example": "cuda" }, "model_dtype": { "type": "string", "example": "torch.float16" }, "model_id": { "type": "string", "description": "Model info", "example": "bigscience/blomm-560m" }, "model_pipeline_tag": { "type": "string", "example": "text-generation", "nullable": true }, "model_sha": { "type": "string", "example": "e985a63cdc139290c5f700ff1929f0b5942cced2", "nullable": true }, "sha": { "type": "string", "example": "null", "nullable": true }, "validation_workers": { "type": "integer", "example": "2", "minimum": 0 }, "version": { "type": "string", "description": "Router Info", "example": "0.5.0" }, "waiting_served_ratio": { "type": "number", "format": "float", "example": "1.2" } } }, "PrefillToken": { "type": "object", "required": [ "id", "text", "logprob" ], "properties": { "id": { "type": "integer", "format": "int32", "example": 0, "minimum": 0 }, "logprob": { "type": "number", "format": "float", "example": -0.34, "nullable": true }, "text": { "type": "string", "example": "test" } } }, "StreamDetails": { "type": "object", "required": [ "finish_reason", "generated_tokens" ], "properties": { "finish_reason": { "$ref": "#/components/schemas/FinishReason" }, "generated_tokens": { "type": "integer", "format": "int32", "example": 1, "minimum": 0 }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0 } } }, "StreamResponse": { "type": "object", "required": [ "token" ], "properties": { "details": { "allOf": [ { "$ref": "#/components/schemas/StreamDetails" } ], "default": "null", "nullable": true }, "generated_text": { "type": "string", "default": "null", "example": "test", "nullable": true }, "token": { "$ref": "#/components/schemas/Token" }, "top_tokens": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } } } }, "Token": { "type": "object", "required": [ "id", "text", "logprob", "special" ], "properties": { "id": { "type": "integer", "format": "int32", "example": 0, "minimum": 0 }, "logprob": { "type": "number", "format": "float", "example": -0.34, "nullable": true }, "special": { "type": "boolean", "example": "false" }, "text": { "type": "string", "example": "test" } } } } }, "tags": [ { "name": "Text Generation Inference", "description": "Hugging Face Text Generation Inference API" } ] }
0
hf_public_repos/text-generation-inference/docs
hf_public_repos/text-generation-inference/docs/source/supported_models.md
# Supported Models and Hardware Text Generation Inference enables serving optimized models on specific hardware for the highest performance. The following sections list which models are hardware are supported. ## Supported Models The following models are optimized and can be served with TGI, which uses custom CUDA kernels for better inference. You can add the flag `--disable-custom-kernels` at the end of the `docker run` command if you wish to disable them. - [BLOOM](https://huggingface.co/bigscience/bloom) - [FLAN-T5](https://huggingface.co/google/flan-t5-xxl) - [Galactica](https://huggingface.co/facebook/galactica-120b) - [GPT-Neox](https://huggingface.co/EleutherAI/gpt-neox-20b) - [Llama](https://github.com/facebookresearch/llama) - [OPT](https://huggingface.co/facebook/opt-66b) - [SantaCoder](https://huggingface.co/bigcode/santacoder) - [Starcoder](https://huggingface.co/bigcode/starcoder) - [Falcon 7B](https://huggingface.co/tiiuae/falcon-7b) - [Falcon 40B](https://huggingface.co/tiiuae/falcon-40b) - [MPT](https://huggingface.co/mosaicml/mpt-30b) - [Llama V2](https://huggingface.co/meta-llama) - [Code Llama](https://huggingface.co/codellama) - [Mistral](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) If the above list lacks the model you would like to serve, depending on the model's pipeline type, you can try to initialize and serve the model anyways to see how well it performs, but performance isn't guaranteed for non-optimized models: ```python # for causal LMs/text-generation models AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")` # or, for text-to-text generation models AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto") ``` If you wish to serve a supported model that already exists on a local folder, just point to the local folder. ```bash text-generation-launcher --model-id <PATH-TO-LOCAL-BLOOM> `````` ## Supported Hardware TGI optimized models are supported on NVIDIA [A100](https://www.nvidia.com/en-us/data-center/a100/), [A10G](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) and [T4](https://www.nvidia.com/en-us/data-center/tesla-t4/) GPUs with CUDA 11.8+. Note that you have to install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) to use it. For other NVIDIA GPUs, continuous batching will still apply, but some operations like flash attention and paged attention will not be executed. TGI also has support of RoCm-enabled AMD Instinct MI210 and MI250 GPUs, with paged attention and flash attention v2 support. The following features are missing from the RoCm version of TGI: quantization and flash [layer norm kernel](https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm). TGI is also supported on the following AI hardware accelerators: - *Habana first-gen Gaudi and Gaudi2:* check out this [example](https://github.com/huggingface/optimum-habana/tree/main/text-generation-inference) how to serve models with TGI on Gaudi and Gaudi2 with [Optimum Habana](https://huggingface.co/docs/optimum/habana/index)
0
hf_public_repos/text-generation-inference/docs
hf_public_repos/text-generation-inference/docs/source/installation.md
# Installation This section explains how to install the CLI tool as well as installing TGI from source. **The strongly recommended approach is to use Docker, as it does not require much setup. Check [the Quick Tour](./quicktour) to learn how to run TGI with Docker.** ## Install CLI You can use TGI command-line interface (CLI) to download weights, serve and quantize models, or get information on serving parameters. To install the CLI, you need to first clone the TGI repository and then run `make`. ```bash git clone https://github.com/huggingface/text-generation-inference.git && cd text-generation-inference make install ``` If you would like to serve models with custom kernels, run ```bash BUILD_EXTENSIONS=True make install ``` ## Local Installation from Source Before you start, you will need to setup your environment, and install Text Generation Inference. Text Generation Inference is tested on **Python 3.9+**. Text Generation Inference is available on pypi, conda and GitHub. To install and launch locally, first [install Rust](https://rustup.rs/) and create a Python virtual environment with at least Python 3.9, e.g. using conda: ```bash curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh conda create -n text-generation-inference python=3.9 conda activate text-generation-inference ``` You may also need to install Protoc. On Linux: ```bash PROTOC_ZIP=protoc-21.12-linux-x86_64.zip curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*' rm -f $PROTOC_ZIP ``` On MacOS, using Homebrew: ```bash brew install protobuf ``` Then run to install Text Generation Inference: ```bash git clone https://github.com/huggingface/text-generation-inference.git && cd text-generation-inference BUILD_EXTENSIONS=True make install ``` <Tip warning={true}> On some machines, you may also need the OpenSSL libraries and gcc. On Linux machines, run: ```bash sudo apt-get install libssl-dev gcc -y ``` </Tip> Once installation is done, simply run: ```bash make run-falcon-7b-instruct ``` This will serve Falcon 7B Instruct model from the port 8080, which we can query.
0
hf_public_repos/text-generation-inference/docs
hf_public_repos/text-generation-inference/docs/source/_toctree.yml
- sections: - local: index title: Text Generation Inference - local: quicktour title: Quick Tour - local: installation title: Installation - local: supported_models title: Supported Models and Hardware title: Getting started - sections: - local: basic_tutorials/consuming_tgi title: Consuming TGI - local: basic_tutorials/preparing_model title: Preparing Model for Serving - local: basic_tutorials/gated_model_access title: Serving Private & Gated Models - local: basic_tutorials/using_cli title: Using TGI CLI - local: basic_tutorials/launcher title: All TGI CLI options - local: basic_tutorials/non_core_models title: Non-core Model Serving title: Tutorials - sections: - local: conceptual/streaming title: Streaming - local: conceptual/quantization title: Quantization - local: conceptual/tensor_parallelism title: Tensor Parallelism - local: conceptual/paged_attention title: PagedAttention - local: conceptual/safetensors title: Safetensors - local: conceptual/flash_attention title: Flash Attention title: Conceptual Guides
0
hf_public_repos/text-generation-inference/docs
hf_public_repos/text-generation-inference/docs/source/index.md
# Text Generation Inference Text Generation Inference (TGI) is a toolkit for deploying and serving Large Language Models (LLMs). TGI enables high-performance text generation for the most popular open-source LLMs, including Llama, Falcon, StarCoder, BLOOM, GPT-NeoX, and T5. ![Text Generation Inference](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/TGI.png) Text Generation Inference implements many optimizations and features, such as: - Simple launcher to serve most popular LLMs - Production ready (distributed tracing with Open Telemetry, Prometheus metrics) - Tensor Parallelism for faster inference on multiple GPUs - Token streaming using Server-Sent Events (SSE) - Continuous batching of incoming requests for increased total throughput - Optimized transformers code for inference using [Flash Attention](https://github.com/HazyResearch/flash-attention) and [Paged Attention](https://github.com/vllm-project/vllm) on the most popular architectures - Quantization with [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) and [GPT-Q](https://arxiv.org/abs/2210.17323) - [Safetensors](https://github.com/huggingface/safetensors) weight loading - Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) - Logits warper (temperature scaling, top-p, top-k, repetition penalty) - Stop sequences - Log probabilities - Custom Prompt Generation: Easily generate text by providing custom prompts to guide the model's output. - Fine-tuning Support: Utilize fine-tuned models for specific tasks to achieve higher accuracy and performance. Text Generation Inference is used in production by multiple projects, such as: - [Hugging Chat](https://github.com/huggingface/chat-ui), an open-source interface for open-access models, such as Open Assistant and Llama - [OpenAssistant](https://open-assistant.io/), an open-source community effort to train LLMs in the open - [nat.dev](http://nat.dev/), a playground to explore and compare LLMs.
0
hf_public_repos/text-generation-inference/docs
hf_public_repos/text-generation-inference/docs/source/quicktour.md
# Quick Tour The easiest way of getting started is using the official Docker container. Install Docker following [their installation instructions](https://docs.docker.com/get-docker/). Let's say you want to deploy [Falcon-7B Instruct](https://huggingface.co/tiiuae/falcon-7b-instruct) model with TGI. Here is an example on how to do that: ```bash model=tiiuae/falcon-7b-instruct volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.1.1 --model-id $model ``` <Tip warning={true}> To use GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) . We also recommend using NVIDIA drivers with CUDA version 11.8 or higher. To use TGI on RoCm-enabled AMD GPUs (only MI210 and MI250 are tested), please use the image `ghcr.io/huggingface/text-generation-inference:1.1.1+rocm` instead. For details about the usage on RoCm, please refer to the [Supported Hardware section](./supported_models#supported-hardware) and [AMD documentation](https://rocm.docs.amd.com/en/latest/deploy/docker.html). </Tip> Once TGI is running, you can use the `generate` endpoint by doing requests. To learn more about how to query the endpoints, check the [Consuming TGI](./basic_tutorials/consuming_tgi) section, where we show examples with utility libraries and UIs. Below you can see a simple snippet to query the endpoint. <inferencesnippet> <python> ```python import requests headers = { "Content-Type": "application/json", } data = { 'inputs': 'What is Deep Learning?', 'parameters': { 'max_new_tokens': 20, }, } response = requests.post('http://127.0.0.1:8080/generate', headers=headers, json=data) print(response.json()) # {'generated_text': '\n\nDeep Learning is a subset of Machine Learning that is concerned with the development of algorithms that can'} ``` </python> <js> ```js async function query() { const response = await fetch( 'http://127.0.0.1:8080/generate', { method: 'POST', headers: { 'Content-Type': 'application/json'}, body: JSON.stringify({ 'inputs': 'What is Deep Learning?', 'parameters': { 'max_new_tokens': 20 } }) } ); } query().then((response) => { console.log(JSON.stringify(response)); }); /// {"generated_text":"\n\nDeep Learning is a subset of Machine Learning that is concerned with the development of algorithms that can"} ``` </js> <curl> ```curl curl 127.0.0.1:8080/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` </curl> </inferencesnippet> <Tip> To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more. ```bash docker run ghcr.io/huggingface/text-generation-inference:1.1.1 --help ``` </Tip>
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/basic_tutorials/non_core_models.md
# Non-core Model Serving TGI supports various LLM architectures (see full list [here](../supported_models)). If you wish to serve a model that is not one of the supported models, TGI will fallback to the `transformers` implementation of that model. This means you will be unable to use some of the features introduced by TGI, such as tensor-parallel sharding or flash attention. However, you can still get many benefits of TGI, such as continuous batching or streaming outputs. You can serve these models using the same Docker command-line invocation as with fully supported models 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id gpt2 ``` If the model you wish to serve is a custom transformers model, and its weights and implementation are available in the Hub, you can still serve the model by passing the `--trust-remote-code` flag to the `docker run` command like below 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id <CUSTOM_MODEL_ID> --trust-remote-code ``` Finally, if the model is not on Hugging Face Hub but on your local, you can pass the path to the folder that contains your model like below 👇 ```bash # Make sure your model is in the $volume directory docker run --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id /data/<PATH-TO-FOLDER> ``` You can refer to [transformers docs on custom models](https://huggingface.co/docs/transformers/main/en/custom_models) for more information.
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/basic_tutorials/launcher.md
# Text-generation-launcher arguments <!-- WRAP CODE BLOCKS --> ```shell Text Generation Launcher Usage: text-generation-launcher [OPTIONS] Options: ``` ## MODEL_ID ```shell --model-id <MODEL_ID> The name of the model to load. Can be a MODEL_ID as listed on <https://hf.co/models> like `gpt2` or `OpenAssistant/oasst-sft-1-pythia-12b`. Or it can be a local directory containing the necessary files as saved by `save_pretrained(...)` methods of transformers [env: MODEL_ID=] [default: bigscience/bloom-560m] ``` ## REVISION ```shell --revision <REVISION> The actual revision of the model if you're referring to a model on the hub. You can use a specific commit id or a branch like `refs/pr/2` [env: REVISION=] ``` ## VALIDATION_WORKERS ```shell --validation-workers <VALIDATION_WORKERS> The number of tokenizer workers used for payload validation and truncation inside the router [env: VALIDATION_WORKERS=] [default: 2] ``` ## SHARDED ```shell --sharded <SHARDED> Whether to shard the model across multiple GPUs By default text-generation-inference will use all available GPUs to run the model. Setting it to `false` deactivates `num_shard` [env: SHARDED=] [possible values: true, false] ``` ## NUM_SHARD ```shell --num-shard <NUM_SHARD> The number of shards to use if you don't want to use all GPUs on a given machine. You can use `CUDA_VISIBLE_DEVICES=0,1 text-generation-launcher... --num_shard 2` and `CUDA_VISIBLE_DEVICES=2,3 text-generation-launcher... --num_shard 2` to launch 2 copies with 2 shard each on a given machine with 4 GPUs for instance [env: NUM_SHARD=] ``` ## QUANTIZE ```shell --quantize <QUANTIZE> Whether you want the model to be quantized [env: QUANTIZE=] Possible values: - awq: 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=awq. Should replace GPTQ models whereever possible because of the better latency - eetq: 8 bit quantization, doesn't require specific model. Should be a drop-in replacement to bitsandbytes with much better performance. Kernels are from https://github.com/NetEase-FuXi/EETQ.git - gptq: 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=gptq. text-generation-inference will use exllama (faster) kernels whereever possible, and use triton kernel (wider support) when it's not. AWQ has faster kernels - bitsandbytes: Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half, but it is known that the model will be much slower to run than the native f16 - bitsandbytes-nf4: Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x, but it is known that the model will be much slower to run than the native f16 - bitsandbytes-fp4: Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better perplexity performance for you model ``` ## DTYPE ```shell --dtype <DTYPE> The dtype to be forced upon the model. This option cannot be used with `--quantize` [env: DTYPE=] [possible values: float16, bfloat16] ``` ## TRUST_REMOTE_CODE ```shell --trust-remote-code Whether you want to execute hub modelling code. Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure no malicious code has been contributed in a newer revision [env: TRUST_REMOTE_CODE=] ``` ## MAX_CONCURRENT_REQUESTS ```shell --max-concurrent-requests <MAX_CONCURRENT_REQUESTS> The maximum amount of concurrent requests for this particular deployment. Having a low limit will refuse clients requests instead of having them wait for too long and is usually good to handle backpressure correctly [env: MAX_CONCURRENT_REQUESTS=] [default: 128] ``` ## MAX_BEST_OF ```shell --max-best-of <MAX_BEST_OF> This is the maximum allowed value for clients to set `best_of`. Best of makes `n` generations at the same time, and return the best in terms of overall log probability over the entire generated sequence [env: MAX_BEST_OF=] [default: 2] ``` ## MAX_STOP_SEQUENCES ```shell --max-stop-sequences <MAX_STOP_SEQUENCES> This is the maximum allowed value for clients to set `stop_sequences`. Stop sequences are used to allow the model to stop on more than just the EOS token, and enable more complex "prompting" where users can preprompt the model in a specific way and define their "own" stop token aligned with their prompt [env: MAX_STOP_SEQUENCES=] [default: 4] ``` ## MAX_TOP_N_TOKENS ```shell --max-top-n-tokens <MAX_TOP_N_TOKENS> This is the maximum allowed value for clients to set `top_n_tokens`. `top_n_tokens is used to return information about the the `n` most likely tokens at each generation step, instead of just the sampled token. This information can be used for downstream tasks like for classification or ranking [env: MAX_TOP_N_TOKENS=] [default: 5] ``` ## MAX_INPUT_LENGTH ```shell --max-input-length <MAX_INPUT_LENGTH> This is the maximum allowed input length (expressed in number of tokens) for users. The larger this value, the longer prompt users can send which can impact the overall memory required to handle the load. Please note that some models have a finite range of sequence they can handle [env: MAX_INPUT_LENGTH=] [default: 1024] ``` ## MAX_TOTAL_TOKENS ```shell --max-total-tokens <MAX_TOTAL_TOKENS> This is the most important value to set as it defines the "memory budget" of running clients requests. Clients will send input sequences and ask to generate `max_new_tokens` on top. with a value of `1512` users can send either a prompt of `1000` and ask for `512` new tokens, or send a prompt of `1` and ask for `1511` max_new_tokens. The larger this value, the larger amount each request will be in your RAM and the less effective batching can be [env: MAX_TOTAL_TOKENS=] [default: 2048] ``` ## WAITING_SERVED_RATIO ```shell --waiting-served-ratio <WAITING_SERVED_RATIO> This represents the ratio of waiting queries vs running queries where you want to start considering pausing the running queries to include the waiting ones into the same batch. `waiting_served_ratio=1.2` Means when 12 queries are waiting and there's only 10 queries left in the current batch we check if we can fit those 12 waiting queries into the batching strategy, and if yes, then batching happens delaying the 10 running queries by a `prefill` run. This setting is only applied if there is room in the batch as defined by `max_batch_total_tokens`. [env: WAITING_SERVED_RATIO=] [default: 1.2] ``` ## MAX_BATCH_PREFILL_TOKENS ```shell --max-batch-prefill-tokens <MAX_BATCH_PREFILL_TOKENS> Limits the number of tokens for the prefill operation. Since this operation take the most memory and is compute bound, it is interesting to limit the number of requests that can be sent [env: MAX_BATCH_PREFILL_TOKENS=] [default: 4096] ``` ## MAX_BATCH_TOTAL_TOKENS ```shell --max-batch-total-tokens <MAX_BATCH_TOTAL_TOKENS> **IMPORTANT** This is one critical control to allow maximum usage of the available hardware. This represents the total amount of potential tokens within a batch. When using padding (not recommended) this would be equivalent of `batch_size` * `max_total_tokens`. However in the non-padded (flash attention) version this can be much finer. For `max_batch_total_tokens=1000`, you could fit `10` queries of `total_tokens=100` or a single query of `1000` tokens. Overall this number should be the largest possible amount that fits the remaining memory (after the model is loaded). Since the actual memory overhead depends on other parameters like if you're using quantization, flash attention or the model implementation, text-generation-inference cannot infer this number automatically. [env: MAX_BATCH_TOTAL_TOKENS=] ``` ## MAX_WAITING_TOKENS ```shell --max-waiting-tokens <MAX_WAITING_TOKENS> This setting defines how many tokens can be passed before forcing the waiting queries to be put on the batch (if the size of the batch allows for it). New queries require 1 `prefill` forward, which is different from `decode` and therefore you need to pause the running batch in order to run `prefill` to create the correct values for the waiting queries to be able to join the batch. With a value too small, queries will always "steal" the compute to run `prefill` and running queries will be delayed by a lot. With a value too big, waiting queries could wait for a very long time before being allowed a slot in the running batch. If your server is busy that means that requests that could run in ~2s on an empty server could end up running in ~20s because the query had to wait for 18s. This number is expressed in number of tokens to make it a bit more "model" agnostic, but what should really matter is the overall latency for end users. [env: MAX_WAITING_TOKENS=] [default: 20] ``` ## HOSTNAME ```shell --hostname <HOSTNAME> The IP address to listen on [env: HOSTNAME=] [default: 0.0.0.0] ``` ## PORT ```shell -p, --port <PORT> The port to listen on [env: PORT=] [default: 3000] ``` ## SHARD_UDS_PATH ```shell --shard-uds-path <SHARD_UDS_PATH> The name of the socket for gRPC communication between the webserver and the shards [env: SHARD_UDS_PATH=] [default: /tmp/text-generation-server] ``` ## MASTER_ADDR ```shell --master-addr <MASTER_ADDR> The address the master shard will listen on. (setting used by torch distributed) [env: MASTER_ADDR=] [default: localhost] ``` ## MASTER_PORT ```shell --master-port <MASTER_PORT> The address the master port will listen on. (setting used by torch distributed) [env: MASTER_PORT=] [default: 29500] ``` ## HUGGINGFACE_HUB_CACHE ```shell --huggingface-hub-cache <HUGGINGFACE_HUB_CACHE> The location of the huggingface hub cache. Used to override the location if you want to provide a mounted disk for instance [env: HUGGINGFACE_HUB_CACHE=] ``` ## WEIGHTS_CACHE_OVERRIDE ```shell --weights-cache-override <WEIGHTS_CACHE_OVERRIDE> The location of the huggingface hub cache. Used to override the location if you want to provide a mounted disk for instance [env: WEIGHTS_CACHE_OVERRIDE=] ``` ## DISABLE_CUSTOM_KERNELS ```shell --disable-custom-kernels For some models (like bloom), text-generation-inference implemented custom cuda kernels to speed up inference. Those kernels were only tested on A100. Use this flag to disable them if you're running on different hardware and encounter issues [env: DISABLE_CUSTOM_KERNELS=] ``` ## CUDA_MEMORY_FRACTION ```shell --cuda-memory-fraction <CUDA_MEMORY_FRACTION> Limit the CUDA available memory. The allowed value equals the total visible memory multiplied by cuda-memory-fraction [env: CUDA_MEMORY_FRACTION=] [default: 1.0] ``` ## ROPE_SCALING ```shell --rope-scaling <ROPE_SCALING> Rope scaling will only be used for RoPE models and allow rescaling the position rotary to accomodate for larger prompts. Goes together with `rope_factor`. `--rope-factor 2.0` gives linear scaling with a factor of 2.0 `--rope-scaling dynamic` gives dynamic scaling with a factor of 1.0 `--rope-scaling linear` gives linear scaling with a factor of 1.0 (Nothing will be changed basically) `--rope-scaling linear --rope-factor` fully describes the scaling you want [env: ROPE_SCALING=] [possible values: linear, dynamic] ``` ## ROPE_FACTOR ```shell --rope-factor <ROPE_FACTOR> Rope scaling will only be used for RoPE models See `rope_scaling` [env: ROPE_FACTOR=] ``` ## JSON_OUTPUT ```shell --json-output Outputs the logs in JSON format (useful for telemetry) [env: JSON_OUTPUT=] ``` ## OTLP_ENDPOINT ```shell --otlp-endpoint <OTLP_ENDPOINT> [env: OTLP_ENDPOINT=] ``` ## CORS_ALLOW_ORIGIN ```shell --cors-allow-origin <CORS_ALLOW_ORIGIN> [env: CORS_ALLOW_ORIGIN=] ``` ## WATERMARK_GAMMA ```shell --watermark-gamma <WATERMARK_GAMMA> [env: WATERMARK_GAMMA=] ``` ## WATERMARK_DELTA ```shell --watermark-delta <WATERMARK_DELTA> [env: WATERMARK_DELTA=] ``` ## NGROK ```shell --ngrok Enable ngrok tunneling [env: NGROK=] ``` ## NGROK_AUTHTOKEN ```shell --ngrok-authtoken <NGROK_AUTHTOKEN> ngrok authentication token [env: NGROK_AUTHTOKEN=] ``` ## NGROK_EDGE ```shell --ngrok-edge <NGROK_EDGE> ngrok edge [env: NGROK_EDGE=] ``` ## ENV ```shell -e, --env Display a lot of information about your runtime environment ``` ## HELP ```shell -h, --help Print help (see a summary with '-h') ``` ## VERSION ```shell -V, --version Print version ```
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/basic_tutorials/consuming_tgi.md
# Consuming Text Generation Inference There are many ways you can consume Text Generation Inference server in your applications. After launching, you can use the `/generate` route and make a `POST` request to get results from the server. You can also use the `/generate_stream` route if you want TGI to return a stream of tokens. You can make the requests using the tool of your preference, such as curl, Python or TypeScrpt. For a final end-to-end experience, we also open-sourced ChatUI, a chat interface for open-source models. ## curl After the launch, you can query the model using either the `/generate` or `/generate_stream` routes: ```bash curl 127.0.0.1:8080/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` ## Inference Client [`huggingface-hub`](https://huggingface.co/docs/huggingface_hub/main/en/index) is a Python library to interact with the Hugging Face Hub, including its endpoints. It provides a nice high-level class, [`~huggingface_hub.InferenceClient`], which makes it easy to make calls to a TGI endpoint. `InferenceClient` also takes care of parameter validation and provides a simple to-use interface. You can simply install `huggingface-hub` package with pip. ```bash pip install huggingface-hub ``` Once you start the TGI server, instantiate `InferenceClient()` with the URL to the endpoint serving the model. You can then call `text_generation()` to hit the endpoint through Python. ```python from huggingface_hub import InferenceClient client = InferenceClient(model="http://127.0.0.1:8080") client.text_generation(prompt="Write a code for snake game") ``` You can do streaming with `InferenceClient` by passing `stream=True`. Streaming will return tokens as they are being generated in the server. To use streaming, you can do as follows: ```python for token in client.text_generation("How do you make cheese?", max_new_tokens=12, stream=True): print(token) ``` Another parameter you can use with TGI backend is `details`. You can get more details on generation (tokens, probabilities, etc.) by setting `details` to `True`. When it's specified, TGI will return a `TextGenerationResponse` or `TextGenerationStreamResponse` rather than a string or stream. ```python output = client.text_generation(prompt="Meaning of life is", details=True) print(output) # TextGenerationResponse(generated_text=' a complex concept that is not always clear to the individual. It is a concept that is not always', details=Details(finish_reason=<FinishReason.Length: 'length'>, generated_tokens=20, seed=None, prefill=[], tokens=[Token(id=267, text=' a', logprob=-2.0723474, special=False), Token(id=11235, text=' complex', logprob=-3.1272552, special=False), Token(id=17908, text=' concept', logprob=-1.3632495, special=False),..)) ``` You can see how to stream below. ```python output = client.text_generation(prompt="Meaning of life is", stream=True, details=True) print(next(iter(output))) # TextGenerationStreamResponse(token=Token(id=267, text=' a', logprob=-2.0723474, special=False), generated_text=None, details=None) ``` You can check out the details of the function [here](https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation). There is also an async version of the client, `AsyncInferenceClient`, based on `asyncio` and `aiohttp`. You can find docs for it [here](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.AsyncInferenceClient) ## ChatUI ChatUI is an open-source interface built for LLM serving. It offers many customization options, such as web search with SERP API and more. ChatUI can automatically consume the TGI server and even provides an option to switch between different TGI endpoints. You can try it out at [Hugging Chat](https://huggingface.co/chat/), or use the [ChatUI Docker Space](https://huggingface.co/new-space?template=huggingchat/chat-ui-template) to deploy your own Hugging Chat to Spaces. To serve both ChatUI and TGI in same environment, simply add your own endpoints to the `MODELS` variable in `.env.local` file inside the `chat-ui` repository. Provide the endpoints pointing to where TGI is served. ``` { // rest of the model config here "endpoints": [{"url": "https://HOST:PORT/generate_stream"}] } ``` ![ChatUI](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chatui_screen.png) ## Gradio Gradio is a Python library that helps you build web applications for your machine learning models with a few lines of code. It has a `ChatInterface` wrapper that helps create neat UIs for chatbots. Let's take a look at how to create a chatbot with streaming mode using TGI and Gradio. Let's install Gradio and Hub Python library first. ```bash pip install huggingface-hub gradio ``` Assume you are serving your model on port 8080, we will query through [InferenceClient](consuming_tgi#inference-client). ```python import gradio as gr from huggingface_hub import InferenceClient client = InferenceClient(model="http://127.0.0.1:8080") def inference(message, history): partial_message = "" for token in client.text_generation(message, max_new_tokens=20, stream=True): partial_message += token yield partial_message gr.ChatInterface( inference, chatbot=gr.Chatbot(height=300), textbox=gr.Textbox(placeholder="Chat with me!", container=False, scale=7), description="This is the demo for Gradio UI consuming TGI endpoint with LLaMA 7B-Chat model.", title="Gradio 🤝 TGI", examples=["Are tomatoes vegetables?"], retry_btn="Retry", undo_btn="Undo", clear_btn="Clear", ).queue().launch() ``` The UI looks like this 👇 <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/gradio-tgi.png" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/gradio-tgi-dark.png" /> </div> You can try the demo directly here 👇 <div class="block dark:hidden"> <iframe src="https://merve-gradio-tgi-2.hf.space?__theme=light" width="850" height="750" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://merve-gradio-tgi-2.hf.space?__theme=dark" width="850" height="750" ></iframe> </div> You can disable streaming mode using `return` instead of `yield` in your inference function, like below. ```python def inference(message, history): return client.text_generation(message, max_new_tokens=20) ``` You can read more about how to customize a `ChatInterface` [here](https://www.gradio.app/guides/creating-a-chatbot-fast). ## API documentation You can consult the OpenAPI documentation of the `text-generation-inference` REST API using the `/docs` route. The Swagger UI is also available [here](https://huggingface.github.io/text-generation-inference).
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/basic_tutorials/gated_model_access.md
# Serving Private & Gated Models If the model you wish to serve is behind gated access or the model repository on Hugging Face Hub is private, and you have access to the model, you can provide your Hugging Face Hub access token. You can generate and copy a read token from [Hugging Face Hub tokens page](https://huggingface.co/settings/tokens) If you're using the CLI, set the `HUGGING_FACE_HUB_TOKEN` environment variable. For example: ``` export HUGGING_FACE_HUB_TOKEN=<YOUR READ TOKEN> ``` If you would like to do it through Docker, you can provide your token by specifying `HUGGING_FACE_HUB_TOKEN` as shown below. ```bash model=meta-llama/Llama-2-7b-chat-hf volume=$PWD/data token=<your READ token> docker run --gpus all \ --shm-size 1g \ -e HUGGING_FACE_HUB_TOKEN=$token \ -p 8080:80 \ -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.1.1 \ --model-id $model ```
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/basic_tutorials/using_cli.md
# Using TGI CLI You can use TGI command-line interface (CLI) to download weights, serve and quantize models, or get information on serving parameters. To install the CLI, please refer to [the installation section](./installation#install-cli). `text-generation-server` lets you download the model with `download-weights` command like below 👇 ```bash text-generation-server download-weights MODEL_HUB_ID ``` You can also use it to quantize models like below 👇 ```bash text-generation-server quantize MODEL_HUB_ID OUTPUT_DIR ``` You can use `text-generation-launcher` to serve models. ```bash text-generation-launcher --model-id MODEL_HUB_ID --port 8080 ``` There are many options and parameters you can pass to `text-generation-launcher`. The documentation for CLI is kept minimal and intended to rely on self-generating documentation, which can be found by running ```bash text-generation-launcher --help ``` You can also find it hosted in this [Swagger UI](https://huggingface.github.io/text-generation-inference/). Same documentation can be found for `text-generation-server`. ```bash text-generation-server --help ```
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/basic_tutorials/preparing_model.md
# Preparing the Model Text Generation Inference improves the model in several aspects. ## Quantization TGI supports [bits-and-bytes](https://github.com/TimDettmers/bitsandbytes#bitsandbytes), [GPT-Q](https://arxiv.org/abs/2210.17323) and [AWQ](https://arxiv.org/abs/2306.00978) quantization. To speed up inference with quantization, simply set `quantize` flag to `bitsandbytes`, `gptq` or `awq` depending on the quantization technique you wish to use. When using GPT-Q quantization, you need to point to one of the models [here](https://huggingface.co/models?search=gptq) when using AWQ quantization, you need to point to one of the models [here](https://huggingface.co/models?search=awq). To get more information about quantization, please refer to [quantization guide](./../conceptual/quantization) ## RoPE Scaling RoPE scaling can be used to increase the sequence length of the model during the inference time without necessarily fine-tuning it. To enable RoPE scaling, simply pass `--rope-scaling`, `--max-input-length` and `--rope-factors` flags when running through CLI. `--rope-scaling` can take the values `linear` or `dynamic`. If your model is not fine-tuned to a longer sequence length, use `dynamic`. `--rope-factor` is the ratio between the intended max sequence length and the model's original max sequence length. Make sure to pass `--max-input-length` to provide maximum input length for extension. <Tip> We recommend using `dynamic` RoPE scaling. </Tip> ## Safetensors [Safetensors](https://github.com/huggingface/safetensors) is a fast and safe persistence format for deep learning models, and is required for tensor parallelism. TGI supports `safetensors` model loading under the hood. By default, given a repository with `safetensors` and `pytorch` weights, TGI will always load `safetensors`. If there's no `pytorch` weights, TGI will convert the weights to `safetensors` format.
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/conceptual/quantization.md
# Quantization TGI offers GPTQ and bits-and-bytes quantization to quantize large language models. ## Quantization with GPTQ GPTQ is a post-training quantization method to make the model smaller. It quantizes the layers by finding a compressed version of that weight, that will yield a minimum mean squared error like below 👇 Given a layer \\(l\\) with weight matrix \\(W_{l}\\) and layer input \\(X_{l}\\), find quantized weight \\(\\hat{W}_{l}\\): $$({\hat{W}_{l}}^{*} = argmin_{\hat{W_{l}}} ||W_{l}X-\hat{W}_{l}X||^{2}_{2})$$ TGI allows you to both run an already GPTQ quantized model (see available models [here](https://huggingface.co/models?search=gptq)) or quantize a model of your choice using quantization script. You can run a quantized model by simply passing --quantize like below 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --quantize gptq ``` Note that TGI's GPTQ implementation doesn't use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) under the hood. However, models quantized using AutoGPTQ or Optimum can still be served by TGI. To quantize a given model using GPTQ with a calibration dataset, simply run ```bash text-generation-server quantize tiiuae/falcon-40b /data/falcon-40b-gptq # Add --upload-to-model-id MYUSERNAME/falcon-40b to push the created model to the hub directly ``` This will create a new directory with the quantized files which you can use with, ```bash text-generation-launcher --model-id /data/falcon-40b-gptq/ --sharded true --num-shard 2 --quantize gptq ``` You can learn more about the quantization options by running `text-generation-server quantize --help`. If you wish to do more with GPTQ models (e.g. train an adapter on top), you can read about transformers GPTQ integration [here](https://huggingface.co/blog/gptq-integration). You can learn more about GPTQ from the [paper](https://arxiv.org/pdf/2210.17323.pdf). ## Quantization with bitsandbytes bitsandbytes is a library used to apply 8-bit and 4-bit quantization to models. Unlike GPTQ quantization, bitsandbytes doesn't require a calibration dataset or any post-processing – weights are automatically quantized on load. However, inference with bitsandbytes is slower than GPTQ or FP16 precision. 8-bit quantization enables multi-billion parameter scale models to fit in smaller hardware without degrading performance too much. In TGI, you can use 8-bit quantization by adding `--quantize bitsandbytes` like below 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --quantize bitsandbytes ``` 4-bit quantization is also possible with bitsandbytes. You can choose one of the following 4-bit data types: 4-bit float (`fp4`), or 4-bit `NormalFloat` (`nf4`). These data types were introduced in the context of parameter-efficient fine-tuning, but you can apply them for inference by automatically converting the model weights on load. In TGI, you can use 4-bit quantization by adding `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` like below 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --quantize bitsandbytes-nf4 ``` You can get more information about 8-bit quantization by reading this [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), and 4-bit quantization by reading [this blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes).
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/conceptual/paged_attention.md
# PagedAttention LLMs struggle with memory limitations during generation. In the decoding part of generation, all the attention keys and values generated for previous tokens are stored in GPU memory for reuse. This is called _KV cache_, and it may take up a large amount of memory for large models and long sequences. PagedAttention attempts to optimize memory use by partitioning the KV cache into blocks that are accessed through a lookup table. Thus, the KV cache does not need to be stored in contiguous memory, and blocks are allocated as needed. The memory efficiency can increase GPU utilization on memory-bound workloads, so more inference batches can be supported. The use of a lookup table to access the memory blocks can also help with KV sharing across multiple generations. This is helpful for techniques such as _parallel sampling_, where multiple outputs are generated simultaneously for the same prompt. In this case, the cached KV blocks can be shared among the generations. TGI's PagedAttention implementation leverages the custom cuda kernels developed by the [vLLM Project](https://github.com/vllm-project/vllm). You can learn more about this technique in the [project's page](https://vllm.ai/).
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/conceptual/flash_attention.md
# Flash Attention Scaling the transformer architecture is heavily bottlenecked by the self-attention mechanism, which has quadratic time and memory complexity. Recent developments in accelerator hardware mainly focus on enhancing compute capacities and not memory and transferring data between hardware. This results in attention operation having a memory bottleneck. **Flash Attention** is an attention algorithm used to reduce this problem and scale transformer-based models more efficiently, enabling faster training and inference. Standard attention mechanism uses High Bandwidth Memory (HBM) to store, read and write keys, queries and values. HBM is large in memory, but slow in processing, meanwhile SRAM is smaller in memory, but faster in operations. In the standard attention implementation, the cost of loading and writing keys, queries, and values from HBM is high. It loads keys, queries, and values from HBM to GPU on-chip SRAM, performs a single step of the attention mechanism, writes it back to HBM, and repeats this for every single attention step. Instead, Flash Attention loads keys, queries, and values once, fuses the operations of the attention mechanism, and writes them back. ![Flash Attention](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/flash-attn.png) It is implemented for supported models. You can check out the complete list of models that support Flash Attention [here](https://github.com/huggingface/text-generation-inference/tree/main/server/text_generation_server/models), for models with flash prefix. You can learn more about Flash Attention by reading the paper in this [link](https://arxiv.org/abs/2205.14135).
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/conceptual/safetensors.md
# Safetensors Safetensors is a model serialization format for deep learning models. It is [faster](https://huggingface.co/docs/safetensors/speed) and safer compared to other serialization formats like pickle (which is used under the hood in many deep learning libraries). TGI depends on safetensors format mainly to enable [tensor parallelism sharding](./tensor_parallelism). For a given model repository during serving, TGI looks for safetensors weights. If there are no safetensors weights, TGI converts the PyTorch weights to safetensors format. You can learn more about safetensors by reading the [safetensors documentation](https://huggingface.co/docs/safetensors/index).
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/conceptual/streaming.md
# Streaming ## What is Streaming? Token streaming is the mode in which the server returns the tokens one by one as the model generates them. This enables showing progressive generations to the user rather than waiting for the whole generation. Streaming is an essential aspect of the end-user experience as it reduces latency, one of the most critical aspects of a smooth experience. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/streaming-generation-visual_360.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/streaming-generation-visual-dark_360.gif" /> </div> With token streaming, the server can start returning the tokens one by one before having to generate the whole response. Users can have a sense of the generation's quality earlier than the end of the generation. This has different positive effects: * Users can get results orders of magnitude earlier for extremely long queries. * Seeing something in progress allows users to stop the generation if it's not going in the direction they expect. * Perceived latency is lower when results are shown in the early stages. * When used in conversational UIs, the experience feels more natural. For example, a system can generate 100 tokens per second. If the system generates 1000 tokens, with the non-streaming setup, users need to wait 10 seconds to get results. On the other hand, with the streaming setup, users get initial results immediately, and although end-to-end latency will be the same, they can see half of the generation after five seconds. Below you can see an interactive demo that shows non-streaming vs streaming side-by-side. Click **generate** below. <div class="block dark:hidden"> <iframe src="https://osanseviero-streaming-vs-non-streaming.hf.space?__theme=light" width="850" height="350" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://osanseviero-streaming-vs-non-streaming.hf.space?__theme=dark" width="850" height="350" ></iframe> </div> ## How to use Streaming? ### Streaming with Python To stream tokens with `InferenceClient`, simply pass `stream=True` and iterate over the response. ```python from huggingface_hub import InferenceClient client = InferenceClient("http://127.0.0.1:8080") for token in client.text_generation("How do you make cheese?", max_new_tokens=12, stream=True): print(token) # To # make # cheese #, # you # need # to # start # with # milk #. ``` If you want additional details, you can add `details=True`. In this case, you get a `TextGenerationStreamResponse` which contains additional information such as the probabilities and the tokens. For the final response in the stream, it also returns the full generated text. ```python for details in client.text_generation("How do you make cheese?", max_new_tokens=12, details=True, stream=True): print(details) #TextGenerationStreamResponse(token=Token(id=193, text='\n', logprob=-0.007358551, special=False), generated_text=None, details=None) #TextGenerationStreamResponse(token=Token(id=2044, text='To', logprob=-1.1357422, special=False), generated_text=None, details=None) #TextGenerationStreamResponse(token=Token(id=717, text=' make', logprob=-0.009841919, special=False), generated_text=None, details=None) #... #TextGenerationStreamResponse(token=Token(id=25, text='.', logprob=-1.3408203, special=False), generated_text='\nTo make cheese, you need to start with milk.', details=StreamDetails(finish_reason=<FinishReason.Length: 'length'>, generated_tokens=12, seed=None)) ``` The `huggingface_hub` library also comes with an `AsyncInferenceClient` in case you need to handle the requests concurrently. ```python from huggingface_hub import AsyncInferenceClient client = AsyncInferenceClient("http://127.0.0.1:8080") async for token in await client.text_generation("How do you make cheese?", stream=True): print(token) # To # make # cheese #, # you # need # to # start # with # milk #. ``` ### Streaming with cURL To use the `generate_stream` endpoint with curl, you can add the `-N` flag, which disables curl default buffering and shows data as it arrives from the server ```curl curl -N 127.0.0.1:8080/generate_stream \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` ### Streaming with JavaScript First, we need to install the `@huggingface/inference` library. `npm install @huggingface/inference` If you're using the free Inference API, you can use `HfInference`. If you're using inference endpoints, you can use `HfInferenceEndpoint`. Let's We can create a `HfInferenceEndpoint` providing our endpoint URL and credential. ```js import { HfInferenceEndpoint } from '@huggingface/inference' const hf = new HfInferenceEndpoint('https://YOUR_ENDPOINT.endpoints.huggingface.cloud', 'hf_YOUR_TOKEN') // prompt const prompt = 'What can you do in Nuremberg, Germany? Give me 3 Tips' const stream = hf.textGenerationStream({ inputs: prompt }) for await (const r of stream) { // yield the generated token process.stdout.write(r.token.text) } ``` ## How does Streaming work under the hood? Under the hood, TGI uses Server-Sent Events (SSE). In an SSE Setup, a client sends a request with the data, opening an HTTP connection and subscribing to updates. Afterward, the server sends data to the client. There is no need for further requests; the server will keep sending the data. SSEs are unidirectional, meaning the client does not send other requests to the server. SSE sends data over HTTP, making it easy to use. SSEs are different than: * Polling: where the client keeps calling the server to get data. This means that the server might return empty responses and cause overhead. * Webhooks: where there is a bi-directional connection. The server can send information to the client, but the client can also send data to the server after the first request. Webhooks are more complex to operate as they don’t only use HTTP. If there are too many requests at the same time, TGI returns an HTTP Error with an `overloaded` error type (`huggingface_hub` returns `OverloadedError`). This allows the client to manage the overloaded server (e.g., it could display a busy error to the user or retry with a new request). To configure the maximum number of concurrent requests, you can specify `--max_concurrent_requests`, allowing clients to handle backpressure.
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/conceptual/tensor_parallelism.md
# Tensor Parallelism Tensor parallelism is a technique used to fit a large model in multiple GPUs. For example, when multiplying the input tensors with the first weight tensor, the matrix multiplication is equivalent to splitting the weight tensor column-wise, multiplying each column with the input separately, and then concatenating the separate outputs. These outputs are then transferred from the GPUs and concatenated together to get the final result, like below 👇 ![Image courtesy of Anton Lozkhov](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/TP.png) <Tip warning={true}> Tensor Parallelism only works for [models officially supported](../supported_models), it will not work when falling back to `transformers`. You can get more information about unsupported models [here](../basic_tutorials/non_core_models). </Tip> You can learn a lot more details about tensor-parallelism from [the `transformers` docs](https://huggingface.co/docs/transformers/main/en/perf_train_gpu_many#tensor-parallelism).
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/router/README.md
# Router Also named `webserver` throughout the docs. This router is handling most of the logic to handle the "batches" tell when to pass new `prefill` requests and pausing `decode` requests, which ones etc... It uses gRPC to communicate with the shards which can therefore be kept much simpler and focus on having the most efficient forward passes as possible. ## Continuous batching One important feature of `text-generation-inference` is enabled by this `router`. Continuous batching is the act of regularly running queries in the same `forward` step of the LLM (a "batch") and also removing them when they are finished. In order for continuous batching to be useful, you need to have more compute available with respect to the memory requirements of your model. This is essentially true for LLMs and the larger the model, the truer it gets (since you have to pool multiple GPUs to load the model, you effectively have a lot of compute power at your hands). Static batching is the act of doing several queries at the same time, but usually this is controlled by the client, and therefore the amount of batching is decided beforehand. For text-generation, and LLMs which are memory bound we can try to be much more efficient with the available compute, by having client sending us single queries, and let the router mix&match queries into or out of batches to make the use the compute the most efficiently. This is possible because for LLMs the total compute for running the model is much bigger than doing mix&match of the batches themselves. ### Simple continuous batching text-generation works by feeding a prompt to a model, and iteratively calling `forward` on the model to produce new text, 1 token at a time. The first idea is simple, when a query arrives, we start working on it directly. When new queries arrive, we simply wait for the current `forward` to be finished then batch the current running prompt with the new query, and call `forward`. Whenever either query is finished: either the model produce EOS (end of sentence) token or the query reached the allowed limit. We simply drop it from the batch, remove all the allocated memory and we can continue with the rest until nothing is left. This simple idea generalizes very well and we could potentially stack many requests in the same batch. One thing to note, is that queries can be potentially run with different parameters meaning different way to choose the next token (sampling, not sampling, temperature, top_k etc..). This is not problematic for the proposed approach we just need to do the sampling independantly on each member of the batch. ### Prefill, decode and past key values In order to make LLMs and text-generation efficient, there's actually a very powerful trick that can be used, which is the "caching" of some attention matrices. [More on that in the first part of this blog](https://huggingface.co/blog/accelerated-inference#getting-to-the-first-10x-speedup) What this means, is that the first "pass" of a prompt is different from the subsequent "forward" passes. Since for the first one we have to compute the entire attention matrix, whereas in the follow-ups only require to compute the new token attention. The first pass is called `prefill` throughout this codebase where as the follow-ups are called `decode`. Since `prefill` is much more expensive than `decode` we don't want to do it all the time, but a currently running query is probably doing `decode`. If we want to do the continuous batching as explained previously we need to run `prefill` at some point in order to create the attention matrix required to be able to join the `decode` group. `text-generation-inference` uses a bunch of different strategies and parameters in order to enable you to find the sweet spot between exploiting the hardware and perceived latency. With no continuous batching at all, latency is going to be super good, but throughput (meaning the total number of requests allowed in a given timeframe) is going to be super bad (since it's essentially 1). With static batching, you can probably reach the maximum throughput (by using the maximum total batch size applicable to your hardware), but the latency is super bad since in order to have maximum throughput you need to wait for requests to come in before processing. With continuous batching you can find a sweet spot. In general latency is the most critical parameter users care about. But a 2x latency slowdown for 10x more users on the same hardware is an acceptable tradeoff. ## Token streaming This is a very important aspect of client UX. As mentionned above, latency is the most critical perceived quality of an LLM API. With token streaming, the server can start answering after the first `prefill` pass directly, without waiting for all the generation to be done. For extremely long queries this means clients can start to see something happening orders of magnitude before the work is done. Seeing something in progress allows them to cut short if it's not what's wanted but also it "feels" better.
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/router/build.rs
use std::error::Error; use vergen::EmitBuilder; fn main() -> Result<(), Box<dyn Error>> { // Try to get the git sha from the local git repository if EmitBuilder::builder() .fail_on_error() .git_sha(false) .emit() .is_err() { // Unable to get the git sha if let Ok(sha) = std::env::var("GIT_SHA") { // Set it from an env var println!("cargo:rustc-env=VERGEN_GIT_SHA={sha}"); } } // Set docker label if present if let Ok(label) = std::env::var("DOCKER_LABEL") { // Set it from an env var println!("cargo:rustc-env=DOCKER_LABEL={label}"); } Ok(()) }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/router/Cargo.toml
[package] name = "text-generation-router" description = "Text Generation Webserver" build = "build.rs" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [lib] path = "src/lib.rs" [[bin]] name = "text-generation-router" path = "src/main.rs" [dependencies] async-stream = "0.3.5" axum = { version = "0.6.20", features = ["json"] } axum-tracing-opentelemetry = "0.14.1" text-generation-client = { path = "client" } clap = { version = "4.4.5", features = ["derive", "env"] } futures = "0.3.28" metrics = "0.21.1" metrics-exporter-prometheus = { version = "0.12.1", features = [] } nohash-hasher = "0.2.0" opentelemetry = { version = "0.20.0", features = ["rt-tokio"] } opentelemetry-otlp = "0.13.0" rand = "0.8.5" reqwest = { version = "0.11.20", features = [] } serde = "1.0.188" serde_json = "1.0.107" thiserror = "1.0.48" tokenizers = { version = "0.14.0", features = ["http"] } tokio = { version = "1.32.0", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync"] } tokio-stream = "0.1.14" tower-http = { version = "0.4.4", features = ["cors"] } tracing = "0.1.37" tracing-opentelemetry = "0.21.0" tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } utoipa = { version = "3.5.0", features = ["axum_extras"] } utoipa-swagger-ui = { version = "3.1.5", features = ["axum"] } ngrok = { version = "0.13.1", features = ["axum"], optional = true } hf-hub = "0.3.1" init-tracing-opentelemetry = { version = "0.14.1", features = ["opentelemetry-otlp"] } [build-dependencies] vergen = { version = "8.2.5", features = ["build", "git", "gitcl"] } [features] default = ["ngrok"] ngrok = ["dep:ngrok"]
0
hf_public_repos/text-generation-inference/router
hf_public_repos/text-generation-inference/router/src/health.rs
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use text_generation_client::{ Batch, NextTokenChooserParameters, Request, ShardedClient, StoppingCriteriaParameters, }; // Note: Request ids and batch ids cannot collide. const LIVENESS_ID: u64 = u64::MAX; const BATCH_ID: u64 = u64::MAX; #[derive(Clone, Debug)] pub(crate) struct Health { client: ShardedClient, generation_health: Arc<AtomicBool>, } impl Health { pub(crate) fn new(client: ShardedClient, generation_health: Arc<AtomicBool>) -> Self { Self { client, generation_health, } } pub(crate) async fn check(&mut self) -> bool { if self.generation_health.load(Ordering::SeqCst) { // Generation is healthy, we only check that the shards are answering gRPC calls self.client.health().await.is_ok() } else { // Generation is unhealthy or have not sent any generation request yet // Dummy batch of 1 token and 1 generated token let liveness_request = Request { id: LIVENESS_ID, inputs: "liveness".to_string(), truncate: 10, prefill_logprobs: false, parameters: Some(NextTokenChooserParameters { temperature: 1.0, top_k: 0, top_p: 1.0, typical_p: 1.0, do_sample: false, seed: 0, repetition_penalty: 1.0, watermark: false, }), stopping_parameters: Some(StoppingCriteriaParameters { max_new_tokens: 1, stop_sequences: vec![], ignore_eos_token: false, }), top_n_tokens: 0, }; let batch = Batch { id: BATCH_ID, requests: vec![liveness_request], size: 1, max_tokens: 2, }; // Skips the queue let value = self.client.prefill(batch).await.is_ok(); // Update generation health self.generation_health.store(value, Ordering::SeqCst); value } } }
0
hf_public_repos/text-generation-inference/router
hf_public_repos/text-generation-inference/router/src/queue.rs
use crate::infer::InferError; use crate::infer::InferStreamResponse; use crate::validation::ValidGenerateRequest; use nohash_hasher::{BuildNoHashHasher, IntMap}; use std::cmp::min; use std::collections::VecDeque; use text_generation_client::{Batch, Request}; use tokio::sync::{mpsc, oneshot}; use tokio::time::Instant; use tracing::{info_span, instrument, Span}; /// Queue entry #[derive(Debug)] pub(crate) struct Entry { /// Request pub request: ValidGenerateRequest, /// Response sender to communicate between the Infer struct and the batching_task pub response_tx: mpsc::UnboundedSender<Result<InferStreamResponse, InferError>>, /// Span that will live as long as entry pub span: Span, /// Temporary span used as a guard when logging inference, wait times... pub temp_span: Option<Span>, /// Instant when this entry was queued pub queue_time: Instant, /// Instant when this entry was added to a batch pub batch_time: Option<Instant>, } /// Request Queue #[derive(Debug, Clone)] pub(crate) struct Queue { /// Channel to communicate with the background queue task queue_sender: mpsc::UnboundedSender<QueueCommand>, } impl Queue { pub(crate) fn new(requires_padding: bool, block_size: u32, window_size: Option<u32>) -> Self { // Create channel let (queue_sender, queue_receiver) = mpsc::unbounded_channel(); // Launch background queue task tokio::spawn(queue_task( requires_padding, block_size, window_size, queue_receiver, )); Self { queue_sender } } /// Append an entry to the queue #[instrument(skip_all)] pub(crate) fn append(&self, entry: Entry) { // Send append command to the background task managing the state // Unwrap is safe here self.queue_sender .send(QueueCommand::Append(Box::new(entry), Span::current())) .unwrap(); } // Get the next batch #[instrument(skip(self))] pub(crate) async fn next_batch( &self, min_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, ) -> Option<NextBatch> { // Create response channel let (response_sender, response_receiver) = oneshot::channel(); // Send next batch command to the background task managing the state // Unwrap is safe here self.queue_sender .send(QueueCommand::NextBatch { min_size, prefill_token_budget, token_budget, response_sender, span: Span::current(), }) .unwrap(); // Await on response channel // Unwrap is safe here response_receiver.await.unwrap() } } // Background task responsible of the queue state async fn queue_task( requires_padding: bool, block_size: u32, window_size: Option<u32>, mut receiver: mpsc::UnboundedReceiver<QueueCommand>, ) { let mut state = State::new(requires_padding, block_size, window_size); while let Some(cmd) = receiver.recv().await { match cmd { QueueCommand::Append(entry, span) => { span.in_scope(|| state.append(*entry)); metrics::increment_gauge!("tgi_queue_size", 1.0); } QueueCommand::NextBatch { min_size, prefill_token_budget, token_budget, response_sender, span, } => span.in_scope(|| { let next_batch = state.next_batch(min_size, prefill_token_budget, token_budget); response_sender.send(next_batch).unwrap(); metrics::gauge!("tgi_queue_size", state.entries.len() as f64); }), } } } /// Queue State #[derive(Debug)] struct State { /// Queue entries organized in a Vec entries: VecDeque<(u64, Entry)>, /// Id of the next entry next_id: u64, /// Id of the next batch next_batch_id: u64, /// Whether the model is using padding requires_padding: bool, /// Paged Attention block size block_size: u32, /// Sliding window window_size: Option<u32>, } impl State { fn new(requires_padding: bool, block_size: u32, window_size: Option<u32>) -> Self { Self { entries: VecDeque::with_capacity(128), next_id: 0, next_batch_id: 0, requires_padding, block_size, window_size, } } /// Append an entry to the queue fn append(&mut self, mut entry: Entry) { // Create a span that will live as long as the entry is in the queue waiting to be batched let queue_span = info_span!(parent: &entry.span, "queued"); entry.temp_span = Some(queue_span); // Push entry in the queue self.entries.push_back((self.next_id, entry)); self.next_id += 1; } // Get the next batch fn next_batch( &mut self, min_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, ) -> Option<NextBatch> { if self.entries.is_empty() { return None; } // Check if we have enough entries if let Some(min_size) = min_size { if self.entries.len() < min_size { return None; } } // Create span for this batch to add context to inference calls let next_batch_span = info_span!(parent: None, "batch", batch_size = tracing::field::Empty); next_batch_span.follows_from(&Span::current()); let mut batch_requests = Vec::with_capacity(self.entries.len()); let mut batch_entries = IntMap::with_capacity_and_hasher(self.entries.len(), BuildNoHashHasher::default()); let mut max_input_length = 0; let mut prefill_tokens: u32 = 0; let mut decode_tokens: u32 = 0; // Pop entries starting from the front of the queue while let Some((id, mut entry)) = self.entries.pop_front() { // Filter entries where the response receiver was dropped (== entries where the request // was dropped by the client) if entry.response_tx.is_closed() { metrics::increment_counter!("tgi_request_failure", "err" => "dropped"); continue; } if self.requires_padding { // We pad to max input length in the Python shards // We need to take these padding tokens into the equation max_input_length = max_input_length.max(entry.request.input_length); prefill_tokens = (batch_requests.len() + 1) as u32 * max_input_length } else { // pad to block size prefill_tokens += ((entry.request.input_length + self.block_size - 1) / self.block_size) * self.block_size; } if self.requires_padding { decode_tokens += entry.request.stopping_parameters.max_new_tokens; } else { let max_new_tokens = match self.window_size { None => entry.request.stopping_parameters.max_new_tokens, Some(window_size) => min( window_size.saturating_sub(entry.request.input_length), entry.request.stopping_parameters.max_new_tokens, ), }; // pad to block size decode_tokens += ((max_new_tokens + self.block_size - 1) / self.block_size) * self.block_size; } if prefill_tokens > prefill_token_budget || (prefill_tokens + decode_tokens) > token_budget { // Entry is over budget // Add it back to the front self.entries.push_front((id, entry)); break; } // Create a new span to link the batch back to this entry let entry_batch_span = info_span!(parent: &entry.span, "infer"); // Add relationships next_batch_span.follows_from(&entry_batch_span); entry_batch_span.follows_from(&next_batch_span); // Update entry entry.temp_span = Some(entry_batch_span); batch_requests.push(Request { id, prefill_logprobs: entry.request.decoder_input_details, inputs: entry.request.inputs.clone(), truncate: entry.request.truncate, parameters: Some(entry.request.parameters.clone()), stopping_parameters: Some(entry.request.stopping_parameters.clone()), top_n_tokens: entry.request.top_n_tokens, }); // Set batch_time entry.batch_time = Some(Instant::now()); // Insert in batch_entries IntMap batch_entries.insert(id, entry); } // Empty batch if batch_requests.is_empty() { return None; } // Check if our batch is big enough if let Some(min_size) = min_size { // Batch is too small if batch_requests.len() < min_size { // Add back entries to the queue in the correct order for r in batch_requests.into_iter().rev() { let id = r.id; let entry = batch_entries.remove(&id).unwrap(); self.entries.push_front((id, entry)); } return None; } } // Final batch size let size = batch_requests.len() as u32; next_batch_span.record("batch_size", size); let batch = Batch { id: self.next_batch_id, requests: batch_requests, size, max_tokens: (prefill_tokens + decode_tokens), }; // Increment batch id self.next_batch_id += 1; metrics::histogram!("tgi_batch_next_size", batch.size as f64); Some((batch_entries, batch, next_batch_span)) } } type NextBatch = (IntMap<u64, Entry>, Batch, Span); #[derive(Debug)] enum QueueCommand { Append(Box<Entry>, Span), NextBatch { min_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, response_sender: oneshot::Sender<Option<NextBatch>>, span: Span, }, } #[cfg(test)] mod tests { use super::*; use text_generation_client::{NextTokenChooserParameters, StoppingCriteriaParameters}; use tracing::info_span; fn default_entry() -> ( Entry, mpsc::UnboundedReceiver<Result<InferStreamResponse, InferError>>, ) { let (response_tx, receiver_tx) = mpsc::unbounded_channel(); let entry = Entry { request: ValidGenerateRequest { inputs: "".to_string(), input_length: 0, truncate: 0, decoder_input_details: false, parameters: NextTokenChooserParameters { temperature: 0.0, top_k: 0, top_p: 0.0, typical_p: 0.0, do_sample: false, seed: 0, repetition_penalty: 0.0, watermark: false, }, stopping_parameters: StoppingCriteriaParameters { ignore_eos_token: false, max_new_tokens: 1, stop_sequences: vec![], }, top_n_tokens: 0, }, response_tx, span: info_span!("entry"), temp_span: None, queue_time: Instant::now(), batch_time: None, }; (entry, receiver_tx) } #[test] fn test_append() { let mut state = State::new(false, 1, None); let (entry, _guard) = default_entry(); assert_eq!(state.next_id, 0); assert_eq!(state.entries.len(), 0); state.append(entry); assert_eq!(state.next_id, 1); assert_eq!(state.entries.len(), 1); let (id, _) = state.entries.remove(0).unwrap(); assert_eq!(id, 0); } #[test] fn test_next_batch_empty() { let mut state = State::new(false, 1, None); assert!(state.next_batch(None, 1, 1).is_none()); assert!(state.next_batch(Some(1), 1, 1).is_none()); } #[test] fn test_next_batch_min_size() { let mut state = State::new(false, 1, None); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); state.append(entry1); state.append(entry2); let (entries, batch, _) = state.next_batch(None, 2, 2).unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert!(entries.get(&1).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); assert_eq!(state.next_id, 2); assert_eq!(state.entries.len(), 0); assert_eq!(state.next_batch_id, 1); let (entry3, _guard3) = default_entry(); state.append(entry3); assert!(state.next_batch(Some(2), 2, 2).is_none()); assert_eq!(state.next_id, 3); assert_eq!(state.entries.len(), 1); let (id, _) = state.entries.remove(0).unwrap(); assert_eq!(id, 2); } #[test] fn test_next_batch_token_budget() { let mut state = State::new(false, 1, None); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); state.append(entry1); state.append(entry2); let (entries, batch, _) = state.next_batch(None, 1, 1).unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); assert_eq!(state.next_id, 2); assert_eq!(state.entries.len(), 1); assert_eq!(state.next_batch_id, 1); let (entry3, _guard3) = default_entry(); state.append(entry3); let (entries, batch, _) = state.next_batch(None, 3, 3).unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&1)); assert!(entries.contains_key(&2)); assert_eq!(batch.id, 1); assert_eq!(batch.size, 2); assert_eq!(state.next_id, 3); assert_eq!(state.entries.len(), 0); assert_eq!(state.next_batch_id, 2); } #[tokio::test] async fn test_queue_append() { let queue = Queue::new(false, 1, None); let (entry, _guard) = default_entry(); queue.append(entry); } #[tokio::test] async fn test_queue_next_batch_empty() { let queue = Queue::new(false, 1, None); assert!(queue.next_batch(None, 1, 1).await.is_none()); assert!(queue.next_batch(Some(1), 1, 1).await.is_none()); } #[tokio::test] async fn test_queue_next_batch_min_size() { let queue = Queue::new(false, 1, None); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); let (entries, batch, _) = queue.next_batch(None, 2, 2).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert!(entries.get(&1).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); let (entry3, _guard3) = default_entry(); queue.append(entry3); // Not enough requests pending assert!(queue.next_batch(Some(2), 2, 2).await.is_none()); // Not enough token budget assert!(queue.next_batch(Some(1), 0, 0).await.is_none()); // Ok let (entries2, batch2, _) = queue.next_batch(Some(1), 2, 2).await.unwrap(); assert_eq!(entries2.len(), 1); assert!(entries2.contains_key(&2)); assert!(entries2.get(&2).unwrap().batch_time.is_some()); assert_eq!(batch2.id, 1); assert_eq!(batch2.size, 1); } #[tokio::test] async fn test_queue_next_batch_token_budget() { let queue = Queue::new(false, 1, None); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); let (entries, batch, _) = queue.next_batch(None, 1, 1).await.unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); let (entry3, _guard3) = default_entry(); queue.append(entry3); let (entries, batch, _) = queue.next_batch(None, 3, 3).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&1)); assert!(entries.contains_key(&2)); assert_eq!(batch.id, 1); assert_eq!(batch.size, 2); } #[tokio::test] async fn test_queue_next_batch_dropped_receiver() { let queue = Queue::new(false, 1, None); let (entry, _) = default_entry(); queue.append(entry); assert!(queue.next_batch(None, 1, 1).await.is_none()); } }
0
hf_public_repos/text-generation-inference/router
hf_public_repos/text-generation-inference/router/src/infer.rs
/// Batching and inference logic use crate::validation::{Validation, ValidationError}; use crate::{Entry, Queue, Token}; use crate::{GenerateRequest, PrefillToken}; use futures::future::try_join_all; use nohash_hasher::IntMap; use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, }; use text_generation_client::{ Batch, CachedBatch, ClientError, GeneratedText, Generation, PrefillTokens, ShardedClient, }; use thiserror::Error; use tokio::sync::mpsc::error::SendError; use tokio::sync::{mpsc, Notify, OwnedSemaphorePermit, Semaphore, TryAcquireError}; use tokio::time::Instant; use tokio_stream::wrappers::UnboundedReceiverStream; use tokio_stream::StreamExt; use tracing::{info_span, instrument, Instrument, Span}; /// Inference struct #[derive(Clone)] pub struct Infer { /// Validation validation: Validation, /// Request queue queue: Queue, /// Shared state shared: Arc<Shared>, /// Inference limit limit_concurrent_requests: Arc<Semaphore>, } /// Infer shared state struct Shared { /// Batching background Tokio task notifier batching_task: Notify, } impl Infer { #[allow(clippy::too_many_arguments)] pub(crate) fn new( client: ShardedClient, validation: Validation, waiting_served_ratio: f32, max_batch_prefill_tokens: u32, max_batch_total_tokens: u32, max_waiting_tokens: usize, max_concurrent_requests: usize, requires_padding: bool, window_size: Option<u32>, generation_health: Arc<AtomicBool>, ) -> Self { // Infer shared state let queue = Queue::new(requires_padding, 16, window_size); let shared = Arc::new(Shared { batching_task: Notify::new(), }); // Spawn batching background task that contains all the inference logic tokio::spawn(batching_task( client, waiting_served_ratio, max_batch_prefill_tokens, max_batch_total_tokens, max_waiting_tokens, queue.clone(), shared.clone(), generation_health, )); // Inference limit with a semaphore let semaphore = Arc::new(Semaphore::new(max_concurrent_requests)); Self { validation, queue, shared, limit_concurrent_requests: semaphore, } } /// Add a new request to the queue and return a stream of InferStreamResponse #[instrument(skip_all)] pub(crate) async fn generate_stream( &self, request: GenerateRequest, ) -> Result< ( OwnedSemaphorePermit, UnboundedReceiverStream<Result<InferStreamResponse, InferError>>, ), InferError, > { // Limit concurrent requests by acquiring a permit from the semaphore let permit = self .clone() .limit_concurrent_requests .try_acquire_owned() .map_err(|err| { metrics::increment_counter!("tgi_request_failure", "err" => "overloaded"); tracing::error!("{err}"); err })?; // Validate request let valid_request = self.validation.validate(request).await.map_err(|err| { metrics::increment_counter!("tgi_request_failure", "err" => "validation"); tracing::error!("{err}"); err })?; // MPSC channel to communicate with the background batching task let (response_tx, response_rx) = mpsc::unbounded_channel(); // Append the request to the queue self.queue.append(Entry { request: valid_request, response_tx, span: Span::current(), temp_span: None, queue_time: Instant::now(), batch_time: None, }); // Notify the background task that we have a new entry in the queue that needs // to be batched self.shared.batching_task.notify_one(); // Return stream Ok((permit, UnboundedReceiverStream::new(response_rx))) } /// Add a new request to the queue and return a InferResponse #[instrument(skip_all)] pub(crate) async fn generate( &self, request: GenerateRequest, ) -> Result<InferResponse, InferError> { let use_top_tokens = request.parameters.top_n_tokens.is_some_and(|x| x > 0); // Create stream and keep semaphore permit as long as generate lives let (_permit, mut stream) = self.generate_stream(request).await?; // Return values let mut result_prefill = Vec::new(); let mut result_tokens = Vec::new(); let mut result_top_tokens = Vec::new(); let mut result_generated_text = None; let mut result_start = None; let mut result_queued = None; // Iterate on stream while let Some(response) = stream.next().await { match response? { // Add prefill tokens InferStreamResponse::Prefill(tokens) => { // Create Token objects // We do that here instead of in the Python code as Rust for loops are faster result_prefill = tokens .ids .into_iter() .zip(tokens.logprobs.into_iter()) .zip(tokens.texts.into_iter()) .map(|((id, logprob), text)| PrefillToken { id, text, logprob }) .collect(); } // Push last token InferStreamResponse::Intermediate { token, top_tokens } => { result_tokens.push(token); result_top_tokens.push(top_tokens); } // Final message // Set return values InferStreamResponse::End { token, generated_text, start, queued, top_tokens, } => { result_tokens.push(token); result_top_tokens.push(top_tokens); result_generated_text = Some(generated_text); result_start = Some(start); result_queued = Some(queued) } } } // Check that we received a `InferStreamResponse::End` message if let (Some(generated_text), Some(queued), Some(start)) = (result_generated_text, result_queued, result_start) { Ok(InferResponse { prefill: result_prefill, tokens: result_tokens, generated_text, queued, start, top_tokens: if use_top_tokens { result_top_tokens } else { Vec::new() }, }) } else { let err = InferError::IncompleteGeneration; metrics::increment_counter!("tgi_request_failure", "err" => "incomplete"); tracing::error!("{err}"); Err(err) } } /// Add best_of new requests to the queue and return a InferResponse of the sequence with /// the highest log probability per token #[instrument(skip(self, request))] pub(crate) async fn generate_best_of( &self, request: GenerateRequest, best_of: usize, ) -> Result<(InferResponse, Vec<InferResponse>), InferError> { // validate best_of parameter separately let best_of = self.validation.validate_best_of(best_of)?; // create multiple generate requests let mut infer_responses: Vec<InferResponse> = try_join_all((0..best_of).map(|_| self.generate(request.clone()))).await?; // get the sequence with the highest log probability per token let mut max_index = 0; let mut max_logprob: f32 = f32::MIN; for (i, response) in infer_responses.iter().enumerate() { // mean logprobs of the generated tokens let sequence_logprob = response .tokens .iter() .map(|token| token.logprob) .sum::<f32>() / response.tokens.len() as f32; // set best sequence if sequence_logprob > max_logprob { max_index = i; max_logprob = sequence_logprob; } } let best_response = infer_responses.remove(max_index); Ok((best_response, infer_responses)) } } /// Batching logic /// Will be launched in a background Tokio task /// /// Batches requests and sends them to the inference server #[allow(clippy::too_many_arguments)] async fn batching_task( mut client: ShardedClient, waiting_served_ratio: f32, max_batch_prefill_tokens: u32, max_batch_total_tokens: u32, max_waiting_tokens: usize, queue: Queue, shared: Arc<Shared>, generation_health: Arc<AtomicBool>, ) { // Infinite loop loop { // Wait for a notification from the Infer struct shared.batching_task.notified().await; // Get the next batch from the queue // This batch might be smaller than the maximum batch size if there are not enough requests // waiting in the queue while let Some((mut entries, batch, span)) = queue .next_batch(None, max_batch_prefill_tokens, max_batch_total_tokens) .await { let mut cached_batch = prefill(&mut client, batch, &mut entries, &generation_health) .instrument(span) .await; let mut waiting_tokens = 1; // We loop until we do not receive any cached batch from the inference server (== until // all requests have met their stopping criteria) while let Some(batch) = cached_batch { // Get current batch info let batch_size = batch.size; let batch_max_tokens = batch.max_tokens; let mut batches = vec![batch]; metrics::gauge!("tgi_batch_current_size", batch_size as f64); metrics::gauge!("tgi_batch_current_max_tokens", batch_max_tokens as f64); let min_size = if waiting_tokens >= max_waiting_tokens { // If we didn't onboard any new requests since >= max_waiting_tokens, we try // to add a new batch even though its size might be small None } else { // Minimum batch size Some((batch_size as f32 * waiting_served_ratio).floor() as usize) }; let token_budget = max_batch_total_tokens.saturating_sub(batch_max_tokens); // Try to get a new batch if let Some((mut new_entries, new_batch, span)) = queue .next_batch(min_size, max_batch_prefill_tokens, token_budget) .await { // Tracking metrics if min_size.is_some() { metrics::increment_counter!("tgi_batch_concat", "reason" => "backpressure"); } else { metrics::increment_counter!("tgi_batch_concat", "reason" => "wait_exceeded"); } entries.iter_mut().for_each(|(_, entry)| { // Create a new span to add the info that this entry is waiting // because a new batch is being computed let entry_waiting_span = info_span!(parent: &entry.span, "waiting"); // Add relationships span.follows_from(&entry_waiting_span); entry_waiting_span.follows_from(&span); // Update entry entry.temp_span = Some(entry_waiting_span); }); // Generate one token for this new batch to have the attention past in cache let new_cached_batch = prefill(&mut client, new_batch, &mut new_entries, &generation_health) .instrument(span) .await; // Reset waiting counter waiting_tokens = 1; // Extend current batch with the new batch if let Some(new_cached_batch) = new_cached_batch { entries.extend(new_entries); batches.push(new_cached_batch); } } // Create span for this batch to add context to inference calls let next_batch_size = entries.len(); let next_batch_span = info_span!(parent: None, "batch", batch_size = next_batch_size); entries.iter_mut().for_each(|(_, entry)| { // Create a new span to link the batch back to this entry let entry_batch_span = info_span!(parent: &entry.span, "infer"); // Add relationships next_batch_span.follows_from(&entry_batch_span); entry_batch_span.follows_from(&next_batch_span); // Update entry entry.temp_span = Some(entry_batch_span); }); cached_batch = decode(&mut client, batches, &mut entries, &generation_health) .instrument(next_batch_span) .await; waiting_tokens += 1; } metrics::gauge!("tgi_batch_current_size", 0.0); metrics::gauge!("tgi_batch_current_max_tokens", 0.0); } } } #[instrument(skip_all)] async fn prefill( client: &mut ShardedClient, batch: Batch, entries: &mut IntMap<u64, Entry>, generation_health: &Arc<AtomicBool>, ) -> Option<CachedBatch> { let start_time = Instant::now(); let batch_id = batch.id; metrics::increment_counter!("tgi_batch_inference_count", "method" => "prefill"); match client.prefill(batch).await { Ok((generations, next_batch)) => { // Update health generation_health.store(true, Ordering::SeqCst); // Send generated tokens and filter stopped entries filter_send_generations(generations, entries); // Filter next batch and remove requests that were stopped let next_batch = filter_batch(client, next_batch, entries).await; metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed().as_secs_f64(), "method" => "prefill"); metrics::increment_counter!("tgi_batch_inference_success", "method" => "prefill"); next_batch } // If we have an error, we discard the whole batch Err(err) => { // Update health generation_health.store(false, Ordering::SeqCst); let _ = client.clear_cache(Some(batch_id)).await; send_errors(err, entries); metrics::increment_counter!("tgi_batch_inference_failure", "method" => "prefill"); None } } } #[instrument(skip_all)] async fn decode( client: &mut ShardedClient, batches: Vec<CachedBatch>, entries: &mut IntMap<u64, Entry>, generation_health: &Arc<AtomicBool>, ) -> Option<CachedBatch> { let start_time = Instant::now(); let batch_ids: Vec<u64> = batches.iter().map(|b| b.id).collect(); metrics::increment_counter!("tgi_batch_inference_count", "method" => "decode"); match client.decode(batches).await { Ok((generations, next_batch)) => { // Update health generation_health.store(true, Ordering::SeqCst); // Send generated tokens and filter stopped entries filter_send_generations(generations, entries); // Filter next batch and remove requests that were stopped let next_batch = filter_batch(client, next_batch, entries).await; metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed().as_secs_f64(), "method" => "decode"); metrics::increment_counter!("tgi_batch_inference_success", "method" => "decode"); next_batch } // If we have an error, we discard the whole batch Err(err) => { generation_health.store(false, Ordering::SeqCst); for id in batch_ids { let _ = client.clear_cache(Some(id)).await; } send_errors(err, entries); metrics::increment_counter!("tgi_batch_inference_failure", "method" => "decode"); None } } } /// Filter a `batch` and remove all requests not present in `entries` #[instrument(skip_all)] async fn filter_batch( client: &mut ShardedClient, next_batch: Option<CachedBatch>, entries: &IntMap<u64, Entry>, ) -> Option<CachedBatch> { let mut batch = next_batch?; // No need to filter if batch.size as usize == entries.len() { return Some(batch); } let id = batch.id; // Retain only requests that are still in entries batch.request_ids.retain(|id| entries.contains_key(id)); if batch.request_ids.is_empty() { // All requests have been filtered out // Next batch is now empty // Clear it from the Python shards cache // We unwrap here as we need to panic since we cannot recover if this method fails client.clear_cache(Some(id)).await.unwrap(); None } else { // Filter Python shard cache // We unwrap here as we need to panic since we cannot recover if this method fails client.filter_batch(id, batch.request_ids).await.unwrap() } } /// Send one or multiple `InferStreamResponse` to Infer for all `entries` /// and filter entries #[instrument(skip_all)] fn filter_send_generations(generations: Vec<Generation>, entries: &mut IntMap<u64, Entry>) { generations.into_iter().for_each(|generation| { let id = generation.request_id; // Get entry // We can `expect` here as the request id should always be in the entries let entry = entries .get(&id) .expect("ID not found in entries. This is a bug."); // Create and enter a span to link this function back to the entry let _span = info_span!(parent: entry.temp_span.as_ref().expect("batch_span is None. This is a bug."), "send_generation", generation = ?generation).entered(); // Send generation responses back to the infer task // If the receive an error from the Flume channel, it means that the client dropped the // request and we need to stop generating hence why we unwrap_or(true) let stopped = send_responses(generation, entry).map_err(|err| { tracing::error!("Entry response channel error."); metrics::increment_counter!("tgi_request_failure", "err" => "dropped"); err }).unwrap_or(true); if stopped { entries.remove(&id).expect("ID not found in entries. This is a bug."); } }); } /// Send responses through the `entry` response channel fn send_responses( generation: Generation, entry: &Entry, ) -> Result<bool, Box<SendError<Result<InferStreamResponse, InferError>>>> { // Return directly if the channel is disconnected if entry.response_tx.is_closed() { metrics::increment_counter!("tgi_request_failure", "err" => "dropped"); return Ok(true); } let mut stopped = false; if let Some(prefill_tokens) = generation.prefill_tokens { // Send message entry .response_tx .send(Ok(InferStreamResponse::Prefill(prefill_tokens)))?; } // Create last Token let token = Token { id: generation.token_id, text: generation.token_text, logprob: generation.token_logprob, special: generation.token_is_special, }; // generation.top_tokens let mut top_tokens = Vec::new(); if let Some(top_tokens_) = generation.top_tokens { top_tokens.extend( top_tokens_ .ids .into_iter() .zip(top_tokens_.logprobs.into_iter()) .zip(top_tokens_.texts.into_iter()) .zip(top_tokens_.is_special.into_iter()) .map(|(((id, logprob), text), special)| Token { id, text, logprob, special, }), ) } if let Some(generated_text) = generation.generated_text { // Generation has ended stopped = true; // Send message entry.response_tx.send(Ok(InferStreamResponse::End { token, top_tokens, generated_text, queued: entry.queue_time, start: entry.batch_time.unwrap(), }))?; } else { // Send message entry .response_tx .send(Ok(InferStreamResponse::Intermediate { token, top_tokens }))?; } Ok(stopped) } /// Send errors to Infer for all `entries` #[instrument(skip_all)] fn send_errors(error: ClientError, entries: &mut IntMap<u64, Entry>) { entries.drain().for_each(|(_, entry)| { // Create and enter a span to link this function back to the entry let _send_error_span = info_span!(parent: entry.temp_span.as_ref().expect("batch_span is None. This is a bug."), "send_error").entered(); let err = InferError::GenerationError(error.to_string()); metrics::increment_counter!("tgi_request_failure", "err" => "generation"); tracing::error!("{err}"); // unwrap_or is valid here as we don't care if the receiver is gone. entry .response_tx .send(Err(err)) .unwrap_or(()); }); } #[derive(Debug)] pub(crate) enum InferStreamResponse { // Optional first message Prefill(PrefillTokens), // Intermediate messages Intermediate { token: Token, top_tokens: Vec<Token>, }, // Last message End { token: Token, top_tokens: Vec<Token>, generated_text: GeneratedText, start: Instant, queued: Instant, }, } #[derive(Debug)] pub(crate) struct InferResponse { pub(crate) prefill: Vec<PrefillToken>, pub(crate) tokens: Vec<Token>, pub(crate) generated_text: GeneratedText, pub(crate) queued: Instant, pub(crate) start: Instant, pub(crate) top_tokens: Vec<Vec<Token>>, } #[derive(Debug, Error)] pub enum InferError { #[error("Request failed during generation: {0}")] GenerationError(String), #[error("Model is overloaded")] Overloaded(#[from] TryAcquireError), #[error("Input validation error: {0}")] ValidationError(#[from] ValidationError), #[error("Incomplete generation")] IncompleteGeneration, } impl InferError { pub(crate) fn error_type(&self) -> &str { match self { InferError::GenerationError(_) => "generation", InferError::Overloaded(_) => "overloaded", InferError::ValidationError(_) => "validation", InferError::IncompleteGeneration => "incomplete_generation", } } }
0
hf_public_repos/text-generation-inference/router
hf_public_repos/text-generation-inference/router/src/server.rs
/// HTTP Server logic use crate::health::Health; use crate::infer::{InferError, InferResponse, InferStreamResponse}; use crate::validation::ValidationError; use crate::{ BestOfSequence, CompatGenerateRequest, Details, ErrorResponse, FinishReason, GenerateParameters, GenerateRequest, GenerateResponse, HubModelInfo, Infer, Info, PrefillToken, StreamDetails, StreamResponse, Token, Validation, }; use axum::extract::Extension; use axum::http::{HeaderMap, Method, StatusCode}; use axum::response::sse::{Event, KeepAlive, Sse}; use axum::response::{IntoResponse, Response}; use axum::routing::{get, post}; use axum::{http, Json, Router}; use axum_tracing_opentelemetry::middleware::OtelAxumLayer; use futures::stream::StreamExt; use futures::Stream; use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle}; use std::convert::Infallible; use std::net::SocketAddr; use std::sync::atomic::AtomicBool; use std::sync::Arc; use text_generation_client::{ShardInfo, ShardedClient}; use tokenizers::Tokenizer; use tokio::signal; use tokio::time::Instant; use tower_http::cors::{AllowOrigin, CorsLayer}; use tracing::{info_span, instrument, Instrument}; use utoipa::OpenApi; use utoipa_swagger_ui::SwaggerUi; /// Generate tokens if `stream == false` or a stream of token if `stream == true` #[utoipa::path( post, tag = "Text Generation Inference", path = "/", request_body = CompatGenerateRequest, responses( (status = 200, description = "Generated Text", content( ("application/json" = GenerateResponse), ("text/event-stream" = StreamResponse), )), (status = 424, description = "Generation Error", body = ErrorResponse, example = json ! ({"error": "Request failed during generation"})), (status = 429, description = "Model is overloaded", body = ErrorResponse, example = json ! ({"error": "Model is overloaded"})), (status = 422, description = "Input validation error", body = ErrorResponse, example = json ! ({"error": "Input validation error"})), (status = 500, description = "Incomplete generation", body = ErrorResponse, example = json ! ({"error": "Incomplete generation"})), ) )] #[instrument(skip(infer, req))] async fn compat_generate( Extension(default_return_full_text): Extension<bool>, infer: Extension<Infer>, Json(mut req): Json<CompatGenerateRequest>, ) -> Result<Response, (StatusCode, Json<ErrorResponse>)> { // default return_full_text given the pipeline_tag if req.parameters.return_full_text.is_none() { req.parameters.return_full_text = Some(default_return_full_text) } // switch on stream if req.stream { Ok(generate_stream(infer, Json(req.into())) .await .into_response()) } else { let (headers, Json(generation)) = generate(infer, Json(req.into())).await?; // wrap generation inside a Vec to match api-inference Ok((headers, Json(vec![generation])).into_response()) } } /// Text Generation Inference endpoint info #[utoipa::path( get, tag = "Text Generation Inference", path = "/info", responses((status = 200, description = "Served model info", body = Info)) )] #[instrument] async fn get_model_info(info: Extension<Info>) -> Json<Info> { Json(info.0) } #[utoipa::path( get, tag = "Text Generation Inference", path = "/health", responses( (status = 200, description = "Everything is working fine"), (status = 503, description = "Text generation inference is down", body = ErrorResponse, example = json ! ({"error": "unhealthy", "error_type": "healthcheck"})), ) )] #[instrument(skip(health))] /// Health check method async fn health(mut health: Extension<Health>) -> Result<(), (StatusCode, Json<ErrorResponse>)> { match health.check().await { true => Ok(()), false => Err(( StatusCode::SERVICE_UNAVAILABLE, Json(ErrorResponse { error: "unhealthy".to_string(), error_type: "healthcheck".to_string(), }), )), } } /// Generate tokens #[utoipa::path( post, tag = "Text Generation Inference", path = "/generate", request_body = GenerateRequest, responses( (status = 200, description = "Generated Text", body = GenerateResponse), (status = 424, description = "Generation Error", body = ErrorResponse, example = json ! ({"error": "Request failed during generation"})), (status = 429, description = "Model is overloaded", body = ErrorResponse, example = json ! ({"error": "Model is overloaded"})), (status = 422, description = "Input validation error", body = ErrorResponse, example = json ! ({"error": "Input validation error"})), (status = 500, description = "Incomplete generation", body = ErrorResponse, example = json ! ({"error": "Incomplete generation"})), ) )] #[instrument( skip_all, fields( parameters = ? req.parameters, total_time, validation_time, queue_time, inference_time, time_per_token, seed, ) )] async fn generate( infer: Extension<Infer>, Json(req): Json<GenerateRequest>, ) -> Result<(HeaderMap, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> { let span = tracing::Span::current(); let start_time = Instant::now(); metrics::increment_counter!("tgi_request_count"); tracing::debug!("Input: {}", req.inputs); let compute_characters = req.inputs.chars().count(); let mut add_prompt = None; if req.parameters.return_full_text.unwrap_or(false) { add_prompt = Some(req.inputs.clone()); } let details: bool = req.parameters.details || req.parameters.decoder_input_details; // Inference let (response, best_of_responses) = match req.parameters.best_of { Some(best_of) if best_of > 1 => { let (response, best_of_responses) = infer.generate_best_of(req, best_of).await?; (response, Some(best_of_responses)) } _ => (infer.generate(req).await?, None), }; // Token details let details = match details { true => { // convert best_of_responses let best_of_sequences = best_of_responses.map(|responses: Vec<InferResponse>| { responses .into_iter() .map(|response: InferResponse| { // Add prompt if return_full_text let mut output_text = response.generated_text.text; if let Some(prompt) = &add_prompt { output_text = prompt.clone() + &output_text; } BestOfSequence { generated_text: output_text, finish_reason: FinishReason::from( response.generated_text.finish_reason, ), generated_tokens: response.generated_text.generated_tokens, prefill: response.prefill, tokens: response.tokens, top_tokens: response.top_tokens, seed: response.generated_text.seed, } }) .collect() }); Some(Details { finish_reason: FinishReason::from(response.generated_text.finish_reason), generated_tokens: response.generated_text.generated_tokens, prefill: response.prefill, tokens: response.tokens, seed: response.generated_text.seed, best_of_sequences, top_tokens: response.top_tokens, }) } false => None, }; // Timings let total_time = start_time.elapsed(); let validation_time = response.queued - start_time; let queue_time = response.start - response.queued; let inference_time = Instant::now() - response.start; let time_per_token = inference_time / response.generated_text.generated_tokens; // Tracing metadata span.record("total_time", format!("{total_time:?}")); span.record("validation_time", format!("{validation_time:?}")); span.record("queue_time", format!("{queue_time:?}")); span.record("inference_time", format!("{inference_time:?}")); span.record("time_per_token", format!("{time_per_token:?}")); span.record("seed", format!("{:?}", response.generated_text.seed)); // Headers let mut headers = HeaderMap::new(); headers.insert("x-compute-type", "gpu+optimized".parse().unwrap()); headers.insert( "x-compute-time", total_time.as_millis().to_string().parse().unwrap(), ); headers.insert( "x-compute-characters", compute_characters.to_string().parse().unwrap(), ); headers.insert( "x-total-time", total_time.as_millis().to_string().parse().unwrap(), ); headers.insert( "x-validation-time", validation_time.as_millis().to_string().parse().unwrap(), ); headers.insert( "x-queue-time", queue_time.as_millis().to_string().parse().unwrap(), ); headers.insert( "x-inference-time", inference_time.as_millis().to_string().parse().unwrap(), ); headers.insert( "x-time-per-token", time_per_token.as_millis().to_string().parse().unwrap(), ); // Metrics metrics::increment_counter!("tgi_request_success"); metrics::histogram!("tgi_request_duration", total_time.as_secs_f64()); metrics::histogram!( "tgi_request_validation_duration", validation_time.as_secs_f64() ); metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64()); metrics::histogram!( "tgi_request_inference_duration", inference_time.as_secs_f64() ); metrics::histogram!( "tgi_request_mean_time_per_token_duration", time_per_token.as_secs_f64() ); metrics::histogram!( "tgi_request_generated_tokens", response.generated_text.generated_tokens as f64 ); // Send response let mut output_text = response.generated_text.text; if let Some(prompt) = add_prompt { output_text = prompt + &output_text; } tracing::debug!("Output: {}", output_text); tracing::info!("Success"); let response = GenerateResponse { generated_text: output_text, details, }; Ok((headers, Json(response))) } /// Generate a stream of token using Server-Sent Events #[utoipa::path( post, tag = "Text Generation Inference", path = "/generate_stream", request_body = GenerateRequest, responses( (status = 200, description = "Generated Text", body = StreamResponse, content_type = "text/event-stream"), (status = 424, description = "Generation Error", body = ErrorResponse, example = json ! ({"error": "Request failed during generation"}), content_type = "text/event-stream"), (status = 429, description = "Model is overloaded", body = ErrorResponse, example = json ! ({"error": "Model is overloaded"}), content_type = "text/event-stream"), (status = 422, description = "Input validation error", body = ErrorResponse, example = json ! ({"error": "Input validation error"}), content_type = "text/event-stream"), (status = 500, description = "Incomplete generation", body = ErrorResponse, example = json ! ({"error": "Incomplete generation"}), content_type = "text/event-stream"), ) )] #[instrument( skip_all, fields( parameters = ? req.parameters, total_time, validation_time, queue_time, inference_time, time_per_token, seed, ) )] async fn generate_stream( Extension(infer): Extension<Infer>, Json(req): Json<GenerateRequest>, ) -> ( HeaderMap, Sse<impl Stream<Item = Result<Event, Infallible>>>, ) { let span = tracing::Span::current(); let start_time = Instant::now(); metrics::increment_counter!("tgi_request_count"); tracing::debug!("Input: {}", req.inputs); let compute_characters = req.inputs.chars().count(); let mut headers = HeaderMap::new(); headers.insert("x-compute-type", "gpu+optimized".parse().unwrap()); headers.insert( "x-compute-characters", compute_characters.to_string().parse().unwrap(), ); headers.insert("X-Accel-Buffering", "no".parse().unwrap()); let stream = async_stream::stream! { // Inference let mut end_reached = false; let mut error = false; let mut add_prompt = None; if req.parameters.return_full_text.unwrap_or(false) { add_prompt = Some(req.inputs.clone()); } let details = req.parameters.details; let best_of = req.parameters.best_of.unwrap_or(1); if best_of != 1 { let err = InferError::from(ValidationError::BestOfStream); metrics::increment_counter!("tgi_request_failure", "err" => "validation"); tracing::error!("{err}"); yield Ok(Event::from(err)); } else if req.parameters.decoder_input_details { let err = InferError::from(ValidationError::PrefillDetailsStream); metrics::increment_counter!("tgi_request_failure", "err" => "validation"); tracing::error!("{err}"); yield Ok(Event::from(err)); } else { match infer.generate_stream(req).instrument(info_span!(parent: &span, "async_stream")).await { // Keep permit as long as generate_stream lives Ok((_permit, mut response_stream)) => { // Server-Sent Event stream while let Some(response) = response_stream.next().await { match response { Ok(response) => { match response { // Prefill is ignored InferStreamResponse::Prefill(_) => {} // Yield event for every new token InferStreamResponse::Intermediate{ token, top_tokens, } => { tracing::debug!(parent: &span, "Token: {:?}", token); // StreamResponse let stream_token = StreamResponse { token, top_tokens, generated_text: None, details: None, }; yield Ok(Event::default().json_data(stream_token).unwrap()) } // Yield event for last token and compute timings InferStreamResponse::End { token, generated_text, start, queued, top_tokens, } => { // Token details let details = match details { true => Some(StreamDetails { finish_reason: FinishReason::from(generated_text.finish_reason), generated_tokens: generated_text.generated_tokens, seed: generated_text.seed, }), false => None, }; // Timings let total_time = start_time.elapsed(); let validation_time = queued - start_time; let queue_time = start - queued; let inference_time = Instant::now() - start; let time_per_token = inference_time / generated_text.generated_tokens; // Tracing metadata span.record("total_time", format!("{total_time:?}")); span.record("validation_time", format!("{validation_time:?}")); span.record("queue_time", format!("{queue_time:?}")); span.record("inference_time", format!("{inference_time:?}")); span.record("time_per_token", format!("{time_per_token:?}")); span.record("seed", format!("{:?}", generated_text.seed)); // Metrics metrics::increment_counter!("tgi_request_success"); metrics::histogram!("tgi_request_duration", total_time.as_secs_f64()); metrics::histogram!("tgi_request_validation_duration", validation_time.as_secs_f64()); metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64()); metrics::histogram!("tgi_request_inference_duration", inference_time.as_secs_f64()); metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token.as_secs_f64()); metrics::histogram!("tgi_request_generated_tokens", generated_text.generated_tokens as f64); // StreamResponse end_reached = true; let mut output_text = generated_text.text; if let Some(prompt) = add_prompt { output_text = prompt + &output_text; } tracing::debug!(parent: &span, "Output: {}", output_text); tracing::info!(parent: &span, "Success"); let stream_token = StreamResponse { token, top_tokens, generated_text: Some(output_text), details }; yield Ok(Event::default().json_data(stream_token).unwrap()); break; } } } // yield error Err(err) => { error = true; yield Ok(Event::from(err)); break; } } } }, // yield error Err(err) => { error = true; yield Ok(Event::from(err)); } } // Check if generation reached the end // Skip if we already sent an error if !end_reached && !error { let err = InferError::IncompleteGeneration; metrics::increment_counter!("tgi_request_failure", "err" => "incomplete"); tracing::error!("{err}"); yield Ok(Event::from(err)); } } }; (headers, Sse::new(stream).keep_alive(KeepAlive::default())) } /// Prometheus metrics scrape endpoint #[utoipa::path( get, tag = "Text Generation Inference", path = "/metrics", responses((status = 200, description = "Prometheus Metrics", body = String)) )] async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String { prom_handle.render() } /// Serving method #[allow(clippy::too_many_arguments)] pub async fn run( model_info: HubModelInfo, shard_info: ShardInfo, compat_return_full_text: bool, max_concurrent_requests: usize, max_best_of: usize, max_stop_sequences: usize, max_top_n_tokens: u32, max_input_length: usize, max_total_tokens: usize, waiting_served_ratio: f32, max_batch_prefill_tokens: u32, max_batch_total_tokens: u32, max_waiting_tokens: usize, client: ShardedClient, tokenizer: Option<Tokenizer>, validation_workers: usize, addr: SocketAddr, allow_origin: Option<AllowOrigin>, ngrok: bool, ngrok_authtoken: Option<String>, ngrok_edge: Option<String>, ) -> Result<(), axum::BoxError> { // OpenAPI documentation #[derive(OpenApi)] #[openapi( paths( health, get_model_info, compat_generate, generate, generate_stream, metrics, ), components( schemas( Info, CompatGenerateRequest, GenerateRequest, GenerateParameters, PrefillToken, Token, GenerateResponse, BestOfSequence, Details, FinishReason, StreamResponse, StreamDetails, ErrorResponse, ) ), tags( (name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API") ), info( title = "Text Generation Inference", license( name = "Apache 2.0", url = "https://www.apache.org/licenses/LICENSE-2.0" ) ) )] struct ApiDoc; // Create state let validation = Validation::new( validation_workers, tokenizer, max_best_of, max_stop_sequences, max_top_n_tokens, max_input_length, max_total_tokens, ); let generation_health = Arc::new(AtomicBool::new(false)); let health_ext = Health::new(client.clone(), generation_health.clone()); let infer = Infer::new( client, validation, waiting_served_ratio, max_batch_prefill_tokens, max_batch_total_tokens, max_waiting_tokens, max_concurrent_requests, shard_info.requires_padding, shard_info.window_size, generation_health, ); // Duration buckets let duration_matcher = Matcher::Suffix(String::from("duration")); let n_duration_buckets = 35; let mut duration_buckets = Vec::with_capacity(n_duration_buckets); // Minimum duration in seconds let mut value = 0.0001; for _ in 0..n_duration_buckets { // geometric sequence value *= 1.5; duration_buckets.push(value); } // Input Length buckets let input_length_matcher = Matcher::Full(String::from("tgi_request_input_length")); let input_length_buckets: Vec<f64> = (0..100) .map(|x| (max_input_length as f64 / 100.0) * (x + 1) as f64) .collect(); // Generated tokens buckets let generated_tokens_matcher = Matcher::Full(String::from("tgi_request_generated_tokens")); let generated_tokens_buckets: Vec<f64> = (0..100) .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64) .collect(); // Input Length buckets let max_new_tokens_matcher = Matcher::Full(String::from("tgi_request_max_new_tokens")); let max_new_tokens_buckets: Vec<f64> = (0..100) .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64) .collect(); // Batch size buckets let batch_size_matcher = Matcher::Full(String::from("tgi_batch_next_size")); let batch_size_buckets: Vec<f64> = (0..1024).map(|x| (x + 1) as f64).collect(); // Prometheus handler let builder = PrometheusBuilder::new() .set_buckets_for_metric(duration_matcher, &duration_buckets) .unwrap() .set_buckets_for_metric(input_length_matcher, &input_length_buckets) .unwrap() .set_buckets_for_metric(generated_tokens_matcher, &generated_tokens_buckets) .unwrap() .set_buckets_for_metric(max_new_tokens_matcher, &max_new_tokens_buckets) .unwrap() .set_buckets_for_metric(batch_size_matcher, &batch_size_buckets) .unwrap(); let prom_handle = builder .install_recorder() .expect("failed to install metrics recorder"); // CORS layer let allow_origin = allow_origin.unwrap_or(AllowOrigin::any()); let cors_layer = CorsLayer::new() .allow_methods([Method::GET, Method::POST]) .allow_headers([http::header::CONTENT_TYPE]) .allow_origin(allow_origin); // Endpoint info let info = Info { model_id: model_info.model_id, model_sha: model_info.sha, model_dtype: shard_info.dtype, model_device_type: shard_info.device_type, model_pipeline_tag: model_info.pipeline_tag, max_concurrent_requests, max_best_of, max_stop_sequences, max_input_length, max_total_tokens, waiting_served_ratio, max_batch_total_tokens, max_waiting_tokens, validation_workers, version: env!("CARGO_PKG_VERSION"), sha: option_env!("VERGEN_GIT_SHA"), docker_label: option_env!("DOCKER_LABEL"), }; // Create router let app = Router::new() .merge(SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi())) // Base routes .route("/", post(compat_generate)) .route("/info", get(get_model_info)) .route("/generate", post(generate)) .route("/generate_stream", post(generate_stream)) // AWS Sagemaker route .route("/invocations", post(compat_generate)) // Base Health route .route("/health", get(health)) // Inference API health route .route("/", get(health)) // AWS Sagemaker health route .route("/ping", get(health)) // Prometheus metrics route .route("/metrics", get(metrics)) .layer(Extension(info)) .layer(Extension(health_ext.clone())) .layer(Extension(compat_return_full_text)) .layer(Extension(infer)) .layer(Extension(prom_handle.clone())) .layer(OtelAxumLayer::default()) .layer(cors_layer); if ngrok { #[cfg(feature = "ngrok")] { use ngrok::config::TunnelBuilder; let _ = addr; let authtoken = ngrok_authtoken.expect("`ngrok-authtoken` must be set when using ngrok tunneling"); let edge = ngrok_edge.expect("`ngrok-edge` must be set when using ngrok tunneling"); let tunnel = ngrok::Session::builder() .authtoken(authtoken) .connect() .await .unwrap() .labeled_tunnel() .label("edge", edge); let listener = tunnel.listen().await.unwrap(); // Run prom metrics and health locally too tokio::spawn( axum::Server::bind(&addr) .serve( Router::new() .route("/health", get(health)) .route("/metrics", get(metrics)) .layer(Extension(health_ext)) .layer(Extension(prom_handle)) .into_make_service(), ) //Wait until all requests are finished to shut down .with_graceful_shutdown(shutdown_signal()), ); // Run server axum::Server::builder(listener) .serve(app.into_make_service()) //Wait until all requests are finished to shut down .with_graceful_shutdown(shutdown_signal()) .await?; } #[cfg(not(feature = "ngrok"))] { let _ngrok_authtoken = ngrok_authtoken; let _ngrok_domain = ngrok_domain; let _ngrok_username = ngrok_username; let _ngrok_password = ngrok_password; panic!("`text-generation-router` was compiled without the `ngrok` feature"); } } else { // Run server axum::Server::bind(&addr) .serve(app.into_make_service()) // Wait until all requests are finished to shut down .with_graceful_shutdown(shutdown_signal()) .await?; } Ok(()) } /// Shutdown signal handler async fn shutdown_signal() { let ctrl_c = async { signal::ctrl_c() .await .expect("failed to install Ctrl+C handler"); }; #[cfg(unix)] let terminate = async { signal::unix::signal(signal::unix::SignalKind::terminate()) .expect("failed to install signal handler") .recv() .await; }; #[cfg(not(unix))] let terminate = std::future::pending::<()>(); tokio::select! { _ = ctrl_c => {}, _ = terminate => {}, } tracing::info!("signal received, starting graceful shutdown"); opentelemetry::global::shutdown_tracer_provider(); } impl From<i32> for FinishReason { fn from(finish_reason: i32) -> Self { let finish_reason = text_generation_client::FinishReason::try_from(finish_reason).unwrap(); match finish_reason { text_generation_client::FinishReason::Length => FinishReason::Length, text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken, text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence, } } } /// Convert to Axum supported formats impl From<InferError> for (StatusCode, Json<ErrorResponse>) { fn from(err: InferError) -> Self { let status_code = match err { InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY, InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS, InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY, InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR, }; ( status_code, Json(ErrorResponse { error: err.to_string(), error_type: err.error_type().to_string(), }), ) } } impl From<InferError> for Event { fn from(err: InferError) -> Self { Event::default() .json_data(ErrorResponse { error: err.to_string(), error_type: err.error_type().to_string(), }) .unwrap() } }
0
hf_public_repos/text-generation-inference/router
hf_public_repos/text-generation-inference/router/src/lib.rs
mod health; /// Text Generation Inference Webserver mod infer; mod queue; pub mod server; mod validation; use infer::Infer; use queue::{Entry, Queue}; use serde::{Deserialize, Serialize}; use utoipa::ToSchema; use validation::Validation; /// Hub type #[derive(Clone, Debug, Deserialize)] pub struct HubModelInfo { #[serde(rename(deserialize = "id"))] pub model_id: String, pub sha: Option<String>, pub pipeline_tag: Option<String>, } #[derive(Clone, Debug, Serialize, ToSchema)] pub struct Info { /// Model info #[schema(example = "bigscience/blomm-560m")] pub model_id: String, #[schema(nullable = true, example = "e985a63cdc139290c5f700ff1929f0b5942cced2")] pub model_sha: Option<String>, #[schema(example = "torch.float16")] pub model_dtype: String, #[schema(example = "cuda")] pub model_device_type: String, #[schema(nullable = true, example = "text-generation")] pub model_pipeline_tag: Option<String>, /// Router Parameters #[schema(example = "128")] pub max_concurrent_requests: usize, #[schema(example = "2")] pub max_best_of: usize, #[schema(example = "4")] pub max_stop_sequences: usize, #[schema(example = "1024")] pub max_input_length: usize, #[schema(example = "2048")] pub max_total_tokens: usize, #[schema(example = "1.2")] pub waiting_served_ratio: f32, #[schema(example = "32000")] pub max_batch_total_tokens: u32, #[schema(example = "20")] pub max_waiting_tokens: usize, #[schema(example = "2")] pub validation_workers: usize, /// Router Info #[schema(example = "0.5.0")] pub version: &'static str, #[schema(nullable = true, example = "null")] pub sha: Option<&'static str>, #[schema(nullable = true, example = "null")] pub docker_label: Option<&'static str>, } #[derive(Clone, Debug, Deserialize, ToSchema)] pub(crate) struct GenerateParameters { #[serde(default)] #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 1)] pub best_of: Option<usize>, #[serde(default)] #[schema( exclusive_minimum = 0.0, nullable = true, default = "null", example = 0.5 )] pub temperature: Option<f32>, #[serde(default)] #[schema( exclusive_minimum = 0.0, nullable = true, default = "null", example = 1.03 )] pub repetition_penalty: Option<f32>, #[serde(default)] #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 10)] pub top_k: Option<i32>, #[serde(default)] #[schema( exclusive_minimum = 0.0, maximum = 1.0, nullable = true, default = "null", example = 0.95 )] pub top_p: Option<f32>, #[serde(default)] #[schema( exclusive_minimum = 0.0, maximum = 1.0, nullable = true, default = "null", example = 0.95 )] pub typical_p: Option<f32>, #[serde(default)] #[schema(default = "false", example = true)] pub do_sample: bool, #[serde(default = "default_max_new_tokens")] #[schema(nullable = true, default = "null", example = "20")] pub max_new_tokens: Option<u32>, #[serde(default)] #[schema(nullable = true, default = "null", example = false)] pub return_full_text: Option<bool>, #[serde(default)] #[schema(inline, max_items = 4, example = json ! (["photographer"]))] pub stop: Vec<String>, #[serde(default)] #[schema(nullable = true, default = "null", example = "null")] pub truncate: Option<usize>, #[serde(default)] #[schema(default = "false", example = true)] pub watermark: bool, #[serde(default)] #[schema(default = "true")] pub details: bool, #[serde(default)] #[schema(default = "true")] pub decoder_input_details: bool, #[serde(default)] #[schema( exclusive_minimum = 0, nullable = true, default = "null", example = "null" )] pub seed: Option<u64>, #[serde(default)] #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 5)] pub top_n_tokens: Option<u32>, } fn default_max_new_tokens() -> Option<u32> { None } fn default_parameters() -> GenerateParameters { GenerateParameters { best_of: None, temperature: None, repetition_penalty: None, top_k: None, top_p: None, typical_p: None, do_sample: false, max_new_tokens: default_max_new_tokens(), return_full_text: None, stop: Vec::new(), truncate: None, watermark: false, details: false, decoder_input_details: false, seed: None, top_n_tokens: None, } } #[derive(Clone, Debug, Deserialize, ToSchema)] pub(crate) struct GenerateRequest { #[schema(example = "My name is Olivier and I")] pub inputs: String, #[serde(default = "default_parameters")] pub parameters: GenerateParameters, } #[derive(Clone, Debug, Deserialize, ToSchema)] pub(crate) struct CompatGenerateRequest { #[schema(example = "My name is Olivier and I")] pub inputs: String, #[serde(default = "default_parameters")] pub parameters: GenerateParameters, #[serde(default)] #[schema(default = "false")] pub stream: bool, } impl From<CompatGenerateRequest> for GenerateRequest { fn from(req: CompatGenerateRequest) -> Self { Self { inputs: req.inputs, parameters: req.parameters, } } } #[derive(Debug, Serialize, ToSchema)] pub struct PrefillToken { #[schema(example = 0)] id: u32, #[schema(example = "test")] text: String, #[schema(nullable = true, example = - 0.34)] logprob: f32, } #[derive(Debug, Serialize, ToSchema)] pub struct Token { #[schema(example = 0)] id: u32, #[schema(example = "test")] text: String, #[schema(nullable = true, example = - 0.34)] logprob: f32, #[schema(example = "false")] special: bool, } #[derive(Serialize, ToSchema)] #[serde(rename_all(serialize = "snake_case"))] pub(crate) enum FinishReason { #[schema(rename = "length")] Length, #[serde(rename = "eos_token")] #[schema(rename = "eos_token")] EndOfSequenceToken, #[schema(rename = "stop_sequence")] StopSequence, } #[derive(Serialize, ToSchema)] pub(crate) struct BestOfSequence { #[schema(example = "test")] pub generated_text: String, #[schema(example = "length")] pub finish_reason: FinishReason, #[schema(example = 1)] pub generated_tokens: u32, #[schema(nullable = true, example = 42)] pub seed: Option<u64>, pub prefill: Vec<PrefillToken>, pub tokens: Vec<Token>, #[serde(skip_serializing_if = "Vec::is_empty")] pub top_tokens: Vec<Vec<Token>>, } #[derive(Serialize, ToSchema)] pub(crate) struct Details { #[schema(example = "length")] pub finish_reason: FinishReason, #[schema(example = 1)] pub generated_tokens: u32, #[schema(nullable = true, example = 42)] pub seed: Option<u64>, pub prefill: Vec<PrefillToken>, pub tokens: Vec<Token>, #[serde(skip_serializing_if = "Option::is_none")] pub best_of_sequences: Option<Vec<BestOfSequence>>, #[serde(skip_serializing_if = "Vec::is_empty")] pub top_tokens: Vec<Vec<Token>>, } #[derive(Serialize, ToSchema)] pub(crate) struct GenerateResponse { #[schema(example = "test")] pub generated_text: String, #[serde(skip_serializing_if = "Option::is_none")] pub details: Option<Details>, } #[derive(Serialize, ToSchema)] pub(crate) struct StreamDetails { #[schema(example = "length")] pub finish_reason: FinishReason, #[schema(example = 1)] pub generated_tokens: u32, #[schema(nullable = true, example = 42)] pub seed: Option<u64>, } #[derive(Serialize, ToSchema)] pub(crate) struct StreamResponse { pub token: Token, #[serde(skip_serializing_if = "Vec::is_empty")] pub top_tokens: Vec<Token>, #[schema(nullable = true, default = "null", example = "test")] pub generated_text: Option<String>, #[schema(nullable = true, default = "null")] pub details: Option<StreamDetails>, } #[derive(Serialize, ToSchema)] pub(crate) struct ErrorResponse { pub error: String, pub error_type: String, } #[cfg(test)] mod tests { use std::io::Write; use tokenizers::Tokenizer; pub(crate) async fn get_tokenizer() -> Tokenizer { let filename = std::path::Path::new("tokenizer.json"); if !filename.exists() { let content = reqwest::get("https://huggingface.co/gpt2/raw/main/tokenizer.json") .await .unwrap() .bytes() .await .unwrap(); let tmp_filename = "tokenizer.json.temp"; let mut file = std::fs::File::create(tmp_filename).unwrap(); file.write_all(&content).unwrap(); // Re-check if another process has written this file maybe. if !filename.exists() { std::fs::rename(tmp_filename, filename).unwrap() } } Tokenizer::from_file("tokenizer.json").unwrap() } }
0
hf_public_repos/text-generation-inference/router
hf_public_repos/text-generation-inference/router/src/main.rs
/// Text Generation Inference webserver entrypoint use axum::http::HeaderValue; use clap::Parser; use opentelemetry::sdk::propagation::TraceContextPropagator; use opentelemetry::sdk::trace; use opentelemetry::sdk::trace::Sampler; use opentelemetry::sdk::Resource; use opentelemetry::{global, KeyValue}; use opentelemetry_otlp::WithExportConfig; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::Path; use std::time::Duration; use text_generation_client::{ClientError, ShardedClient}; use text_generation_router::{server, HubModelInfo}; use thiserror::Error; use tokenizers::{FromPretrainedParameters, Tokenizer}; use tower_http::cors::AllowOrigin; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, Layer}; /// App Configuration #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Args { #[clap(default_value = "128", long, env)] max_concurrent_requests: usize, #[clap(default_value = "2", long, env)] max_best_of: usize, #[clap(default_value = "4", long, env)] max_stop_sequences: usize, #[clap(default_value = "5", long, env)] max_top_n_tokens: u32, #[clap(default_value = "1024", long, env)] max_input_length: usize, #[clap(default_value = "2048", long, env)] max_total_tokens: usize, #[clap(default_value = "1.2", long, env)] waiting_served_ratio: f32, #[clap(default_value = "4096", long, env)] max_batch_prefill_tokens: u32, #[clap(long, env)] max_batch_total_tokens: Option<u32>, #[clap(default_value = "20", long, env)] max_waiting_tokens: usize, #[clap(default_value = "0.0.0.0", long, env)] hostname: String, #[clap(default_value = "3000", long, short, env)] port: u16, #[clap(default_value = "/tmp/text-generation-server-0", long, env)] master_shard_uds_path: String, #[clap(default_value = "bigscience/bloom", long, env)] tokenizer_name: String, #[clap(long, env)] revision: Option<String>, #[clap(default_value = "2", long, env)] validation_workers: usize, #[clap(long, env)] json_output: bool, #[clap(long, env)] otlp_endpoint: Option<String>, #[clap(long, env)] cors_allow_origin: Option<Vec<String>>, #[clap(long, env)] ngrok: bool, #[clap(long, env)] ngrok_authtoken: Option<String>, #[clap(long, env)] ngrok_edge: Option<String>, } fn main() -> Result<(), RouterError> { // Get args let args = Args::parse(); // Pattern match configuration let Args { max_concurrent_requests, max_best_of, max_stop_sequences, max_top_n_tokens, max_input_length, max_total_tokens, waiting_served_ratio, max_batch_prefill_tokens, max_batch_total_tokens, max_waiting_tokens, hostname, port, master_shard_uds_path, tokenizer_name, revision, validation_workers, json_output, otlp_endpoint, cors_allow_origin, ngrok, ngrok_authtoken, ngrok_edge, } = args; // Validate args if max_input_length >= max_total_tokens { return Err(RouterError::ArgumentValidation( "`max_input_length` must be < `max_total_tokens`".to_string(), )); } if max_input_length as u32 > max_batch_prefill_tokens { return Err(RouterError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be >= `max_input_length`. Given: {max_batch_prefill_tokens} and {max_input_length}"))); } if validation_workers == 0 { return Err(RouterError::ArgumentValidation( "`validation_workers` must be > 0".to_string(), )); } if let Some(ref max_batch_total_tokens) = max_batch_total_tokens { if max_batch_prefill_tokens > *max_batch_total_tokens { return Err(RouterError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be <= `max_batch_total_tokens`. Given: {max_batch_prefill_tokens} and {max_batch_total_tokens}"))); } if max_total_tokens as u32 > *max_batch_total_tokens { return Err(RouterError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_batch_total_tokens}"))); } } // CORS allowed origins // map to go inside the option and then map to parse from String to HeaderValue // Finally, convert to AllowOrigin let cors_allow_origin: Option<AllowOrigin> = cors_allow_origin.map(|cors_allow_origin| { AllowOrigin::list( cors_allow_origin .iter() .map(|origin| origin.parse::<HeaderValue>().unwrap()), ) }); // Parse Huggingface hub token let authorization_token = std::env::var("HUGGING_FACE_HUB_TOKEN").ok(); // Tokenizer instance // This will only be used to validate payloads let local_path = Path::new(&tokenizer_name); let local_model = local_path.exists() && local_path.is_dir(); let tokenizer = if local_model { // Load local tokenizer Tokenizer::from_file(local_path.join("tokenizer.json")).ok() } else { // Download and instantiate tokenizer // We need to download it outside of the Tokio runtime let params = FromPretrainedParameters { revision: revision.clone().unwrap_or("main".to_string()), auth_token: authorization_token.clone(), ..Default::default() }; Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).ok() }; // Launch Tokio runtime tokio::runtime::Builder::new_multi_thread() .enable_all() .build()? .block_on(async { init_logging(otlp_endpoint, json_output); if tokenizer.is_none() { tracing::warn!( "Could not find a fast tokenizer implementation for {tokenizer_name}" ); tracing::warn!("Rust input length validation and truncation is disabled"); } // Get Model info let model_info = match local_model { true => HubModelInfo { model_id: tokenizer_name.clone(), sha: None, pipeline_tag: None, }, false => get_model_info(&tokenizer_name, revision, authorization_token) .await .unwrap_or_else(|| { tracing::warn!("Could not retrieve model info from the Hugging Face hub."); HubModelInfo { model_id: tokenizer_name.to_string(), sha: None, pipeline_tag: None, } }), }; // if pipeline-tag == text-generation we default to return_full_text = true let compat_return_full_text = match &model_info.pipeline_tag { None => { tracing::warn!("no pipeline tag found for model {tokenizer_name}"); false } Some(pipeline_tag) => pipeline_tag.as_str() == "text-generation", }; // Instantiate sharded client from the master unix socket let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path) .await .map_err(RouterError::Connection)?; // Clear the cache; useful if the webserver rebooted sharded_client .clear_cache(None) .await .map_err(RouterError::Cache)?; // Get info from the shard let shard_info = sharded_client.info().await.map_err(RouterError::Info)?; // Warmup model tracing::info!("Warming up model"); let max_supported_batch_total_tokens = match sharded_client .warmup(max_input_length as u32, max_batch_prefill_tokens, max_total_tokens as u32) .await .map_err(RouterError::Warmup)? { // Older models do not support automatic max-batch-total-tokens None => { let max_batch_total_tokens = max_batch_total_tokens.unwrap_or( 16000.max((max_total_tokens as u32).max(max_batch_prefill_tokens)), ); tracing::warn!("Model does not support automatic max batch total tokens"); max_batch_total_tokens } // Flash attention models return their max supported total tokens Some(max_supported_batch_total_tokens) => { // Warn if user added his own max-batch-total-tokens as we will ignore it if max_batch_total_tokens.is_some() { tracing::warn!( "`--max-batch-total-tokens` is deprecated for Flash \ Attention models." ); tracing::warn!( "Inferred max batch total tokens: {max_supported_batch_total_tokens}" ); } if max_total_tokens as u32 > max_supported_batch_total_tokens { return Err(RouterError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_supported_batch_total_tokens}"))); } max_supported_batch_total_tokens } }; tracing::info!("Setting max batch total tokens to {max_supported_batch_total_tokens}"); tracing::info!("Connected"); let addr = match hostname.parse() { Ok(ip) => SocketAddr::new(ip, port), Err(_) => { tracing::warn!("Invalid hostname, defaulting to 0.0.0.0"); SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port) } }; // Run server server::run( model_info, shard_info, compat_return_full_text, max_concurrent_requests, max_best_of, max_stop_sequences, max_top_n_tokens, max_input_length, max_total_tokens, waiting_served_ratio, max_batch_prefill_tokens, max_supported_batch_total_tokens, max_waiting_tokens, sharded_client, tokenizer, validation_workers, addr, cors_allow_origin, ngrok, ngrok_authtoken, ngrok_edge, ) .await?; Ok(()) }) } /// Init logging using env variables LOG_LEVEL and LOG_FORMAT: /// - otlp_endpoint is an optional URL to an Open Telemetry collector /// - LOG_LEVEL may be TRACE, DEBUG, INFO, WARN or ERROR (default to INFO) /// - LOG_FORMAT may be TEXT or JSON (default to TEXT) fn init_logging(otlp_endpoint: Option<String>, json_output: bool) { let mut layers = Vec::new(); // STDOUT/STDERR layer let fmt_layer = tracing_subscriber::fmt::layer() .with_file(true) .with_line_number(true); let fmt_layer = match json_output { true => fmt_layer.json().flatten_event(true).boxed(), false => fmt_layer.boxed(), }; layers.push(fmt_layer); // OpenTelemetry tracing layer if let Some(otlp_endpoint) = otlp_endpoint { global::set_text_map_propagator(TraceContextPropagator::new()); let tracer = opentelemetry_otlp::new_pipeline() .tracing() .with_exporter( opentelemetry_otlp::new_exporter() .tonic() .with_endpoint(otlp_endpoint), ) .with_trace_config( trace::config() .with_resource(Resource::new(vec![KeyValue::new( "service.name", "text-generation-inference.router", )])) .with_sampler(Sampler::AlwaysOn), ) .install_batch(opentelemetry::runtime::Tokio); if let Ok(tracer) = tracer { layers.push(tracing_opentelemetry::layer().with_tracer(tracer).boxed()); init_tracing_opentelemetry::init_propagator().unwrap(); }; } // Filter events with LOG_LEVEL let env_filter = EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info")); tracing_subscriber::registry() .with(env_filter) .with(layers) .init(); } /// get model info from the Huggingface Hub pub async fn get_model_info( model_id: &str, revision: Option<String>, token: Option<String>, ) -> Option<HubModelInfo> { let revision = match revision { None => { tracing::warn!("`--revision` is not set"); tracing::warn!("We strongly advise to set it to a known supported commit."); "main".to_string() } Some(revision) => revision, }; let client = reqwest::Client::new(); // Poor man's urlencode let revision = revision.replace('/', "%2F"); let url = format!("https://huggingface.co/api/models/{model_id}/revision/{revision}"); let mut builder = client.get(url).timeout(Duration::from_secs(5)); if let Some(token) = token { builder = builder.bearer_auth(token); } let response = builder.send().await.ok()?; if response.status().is_success() { let hub_model_info: HubModelInfo = serde_json::from_str(&response.text().await.ok()?).ok()?; if let Some(sha) = &hub_model_info.sha { tracing::info!( "Serving revision {sha} of model {}", hub_model_info.model_id ); } Some(hub_model_info) } else { None } } #[derive(Debug, Error)] enum RouterError { #[error("Argument validation error: {0}")] ArgumentValidation(String), #[error("Unable to connect to the Python model shards: {0}")] Connection(ClientError), #[error("Unable to clear the Python model shards cache: {0}")] Cache(ClientError), #[error("Unable to get the Python model shards info: {0}")] Info(ClientError), #[error("Unable to warmup the Python model shards: {0}")] Warmup(ClientError), #[error("Tokio runtime failed to start: {0}")] Tokio(#[from] std::io::Error), #[error("Axum webserver failed: {0}")] Axum(#[from] axum::BoxError), }
0
hf_public_repos/text-generation-inference/router
hf_public_repos/text-generation-inference/router/src/validation.rs
/// Payload validation logic use crate::validation::ValidationError::{BestOfSampling, BestOfSeed, EmptyInput}; use crate::{GenerateParameters, GenerateRequest}; use rand::{thread_rng, Rng}; use text_generation_client::{NextTokenChooserParameters, StoppingCriteriaParameters}; use thiserror::Error; use tokenizers::tokenizer::Tokenizer; use tokenizers::TruncationDirection; use tokio::sync::mpsc; use tokio::sync::oneshot; use tracing::{instrument, Span}; /// Validation #[derive(Debug, Clone)] pub struct Validation { /// Validation parameters max_best_of: usize, max_stop_sequences: usize, max_top_n_tokens: u32, max_input_length: usize, max_total_tokens: usize, /// Channel to communicate with the background tokenization task sender: Option<mpsc::UnboundedSender<TokenizerRequest>>, } impl Validation { pub(crate) fn new( workers: usize, tokenizer: Option<Tokenizer>, max_best_of: usize, max_stop_sequences: usize, max_top_n_tokens: u32, max_input_length: usize, max_total_tokens: usize, ) -> Self { // If we have a fast tokenizer let sender = if let Some(tokenizer) = tokenizer { // Create round robin channel let (validation_sender, validation_round_robin_receiver) = mpsc::unbounded_channel(); let mut senders = Vec::with_capacity(workers); // Create workers for _ in 0..workers { let tokenizer_clone = tokenizer.clone(); let (tokenizer_sender, tokenizer_receiver) = mpsc::unbounded_channel(); senders.push(tokenizer_sender); // Spawn worker tokio::task::spawn_blocking(move || { tokenizer_worker(tokenizer_clone, tokenizer_receiver) }); } // Create tokenization round robin task tokio::spawn(round_robin_task(validation_round_robin_receiver, senders)); Some(validation_sender) } else { None }; Self { max_best_of, sender, max_stop_sequences, max_top_n_tokens, max_input_length, max_total_tokens, } } #[instrument(skip(self, inputs))] async fn validate_input( &self, inputs: String, truncate: Option<usize>, max_new_tokens: Option<u32>, ) -> Result<(String, usize, u32), ValidationError> { // If we have a fast tokenizer if let Some(sender) = &self.sender { // Create response channel let (response_sender, response_receiver) = oneshot::channel(); // Send request to the background validation task // Unwrap is safe here sender .send(((inputs, truncate), response_sender, Span::current())) .unwrap(); // Await on response channel // Unwrap is safe here let (inputs, input_length) = response_receiver.await.unwrap()?; // Get total tokens let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens { max_new_tokens } else { self.max_total_tokens.saturating_sub(input_length) as u32 }; let total_tokens = input_length + max_new_tokens as usize; // Validate MaxTotalTokens if total_tokens > self.max_total_tokens { return Err(ValidationError::MaxTotalTokens( self.max_total_tokens, input_length, max_new_tokens, )); } // Validate InputLength if input_length > self.max_input_length { return Err(ValidationError::InputLength( self.max_input_length, input_length, )); } metrics::histogram!("tgi_request_input_length", input_length as f64); Ok((inputs, input_length, max_new_tokens)) } // Return inputs without validation else { // In this case, we don't know the real length in tokens of the inputs // However, the inputs will be truncated by the python servers // We make sure that truncate + max_new_tokens <= self.max_total_tokens let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens { max_new_tokens } else if let Some(truncate) = truncate { self.max_total_tokens.saturating_sub(truncate) as u32 } else { return Err(ValidationError::UnsetMaxNewTokens); }; let input_length = truncate.unwrap_or(self.max_input_length); // Validate MaxNewTokens if (input_length as u32 + max_new_tokens) > self.max_total_tokens as u32 { return Err(ValidationError::MaxNewTokens( self.max_total_tokens - self.max_input_length, max_new_tokens, )); } Ok((inputs, input_length, max_new_tokens)) } } /// Validate a payload and get the number of tokens in the input #[instrument(skip_all)] pub(crate) async fn validate( &self, request: GenerateRequest, ) -> Result<ValidGenerateRequest, ValidationError> { let GenerateParameters { best_of, temperature, repetition_penalty, top_k, top_p, typical_p, do_sample, max_new_tokens, stop: stop_sequences, truncate, seed, watermark, decoder_input_details, top_n_tokens, .. } = request.parameters; // sampling must be true when best_of > 1 let best_of = best_of.unwrap_or(1); let sampling = do_sample || temperature.is_some() || top_k.is_some() || top_p.is_some() || typical_p.is_some(); if best_of > 1 && !sampling { return Err(BestOfSampling); } let temperature = temperature.unwrap_or(1.0); if temperature <= 0.0 { return Err(ValidationError::Temperature); } let repetition_penalty = repetition_penalty.unwrap_or(1.0); if repetition_penalty <= 0.0 { return Err(ValidationError::RepetitionPenalty); } // Different because the proto default value is not a valid value // for the user let top_p = top_p .map(|value| { if value <= 0.0 || value >= 1.0 { return Err(ValidationError::TopP); } Ok(value) }) .unwrap_or(Ok(1.0))?; let typical_p = typical_p .map(|value| { if value <= 0.0 || value >= 1.0 { return Err(ValidationError::TypicalP); } Ok(value) }) .unwrap_or(Ok(1.0))?; let top_k: u32 = top_k .map(|value| { if value <= 0 { return Err(ValidationError::TopK); } Ok(value as u32) }) .unwrap_or(Ok(0))?; if max_new_tokens == Some(0) { return Err(ValidationError::NegativeMaxNewTokens); } if stop_sequences.len() > self.max_stop_sequences { return Err(ValidationError::StopSequence( self.max_stop_sequences, stop_sequences.len(), )); } // If seed is None, assign a random one let seed = match seed { None => thread_rng().gen(), Some(seed) => { if best_of > 1 { return Err(BestOfSeed); } seed } }; let top_n_tokens = top_n_tokens .map(|value| { if value > self.max_top_n_tokens { return Err(ValidationError::TopNTokens(self.max_top_n_tokens, value)); } Ok(value) }) .unwrap_or(Ok(0))?; // Check if inputs is empty if request.inputs.is_empty() { return Err(EmptyInput); } // Check if truncate is strictly positive and less than max_input_length let truncate = truncate .map(|value| { if value == 0 || value > self.max_input_length { return Err(ValidationError::Truncate(self.max_input_length, value)); } Ok(Some(value)) }) .unwrap_or(Ok(None))?; // Validate inputs let (inputs, input_length, max_new_tokens) = self .validate_input(request.inputs, truncate, max_new_tokens) .await?; let parameters = NextTokenChooserParameters { temperature, repetition_penalty, top_k, top_p, typical_p, do_sample, seed, watermark, }; let stopping_parameters = StoppingCriteriaParameters { max_new_tokens, stop_sequences, ignore_eos_token: false, }; metrics::histogram!("tgi_request_max_new_tokens", max_new_tokens as f64); Ok(ValidGenerateRequest { inputs, decoder_input_details, input_length: input_length as u32, truncate: truncate.unwrap_or(self.max_input_length) as u32, parameters, stopping_parameters, top_n_tokens, }) } /// Validate the best_of parameter #[instrument(skip_all)] pub(crate) fn validate_best_of(&self, best_of: usize) -> Result<usize, ValidationError> { if self.max_best_of == 1 && best_of != 1 { return Err(ValidationError::BestOfDisabled); } if best_of > self.max_best_of { return Err(ValidationError::BestOf(self.max_best_of, best_of)); } Ok(best_of) } } /// Round robin tokenization task async fn round_robin_task( mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>, senders: Vec<mpsc::UnboundedSender<TokenizerRequest>>, ) { loop { for sender in &senders { match receiver.recv().await { None => return, Some(request) => sender.send(request).unwrap(), }; } } } /// Start tokenization workers fn tokenizer_worker(tokenizer: Tokenizer, mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>) { // Loop over requests while let Some(((inputs, truncate), response_tx, parent_span)) = receiver.blocking_recv() { parent_span.in_scope(|| { response_tx .send(prepare_input(inputs, truncate, &tokenizer)) .unwrap_or(()) }) } } /// Get input length and optionally truncate it fn prepare_input( inputs: String, truncate: Option<usize>, tokenizer: &Tokenizer, ) -> Result<(String, usize), ValidationError> { // Get the number of tokens in the input let mut encoding = tokenizer .encode(inputs.clone(), true) .map_err(|err| ValidationError::Tokenizer(err.to_string()))?; // Optionally truncate let (inputs, input_length) = match truncate { // Truncate is some and < encoding length Some(truncate) if truncate < encoding.len() => { // truncate encoding and decode new inputs encoding.truncate(truncate, 0, TruncationDirection::Left); let inputs = tokenizer .decode(encoding.get_ids(), false) .map_err(|err| ValidationError::Tokenizer(err.to_string()))?; (inputs, encoding.len()) } // Nothing to do _ => (inputs, encoding.len()), }; Ok((inputs, input_length)) } type TokenizerRequest = ( (String, Option<usize>), oneshot::Sender<Result<(String, usize), ValidationError>>, Span, ); #[derive(Debug)] pub(crate) struct ValidGenerateRequest { pub inputs: String, pub input_length: u32, pub truncate: u32, pub decoder_input_details: bool, pub parameters: NextTokenChooserParameters, pub stopping_parameters: StoppingCriteriaParameters, pub top_n_tokens: u32, } #[derive(Error, Debug)] pub enum ValidationError { #[error("`best_of` must be > 0 and <= {0}. Given: {1}")] BestOf(usize, usize), #[error("`best_of` != 1 is not allowed for this endpoint")] BestOfDisabled, #[error("you must use sampling when `best_of` is > 1")] BestOfSampling, #[error("`seed` must not be set when `best_of` > 1")] BestOfSeed, #[error("`best_of` != 1 is not supported when streaming tokens")] BestOfStream, #[error("`top_n_tokens` must be >= 0 and <= {0}. Given: {1}")] TopNTokens(u32, u32), #[error("`top_n_tokens` != 0 is not allowed for this endpoint")] TopNTokensDisabled, #[error("`decoder_input_details` == true is not supported when streaming tokens")] PrefillDetailsStream, #[error("`temperature` must be strictly positive")] Temperature, #[error("`repetition_penalty` must be strictly positive")] RepetitionPenalty, #[error("`top_p` must be > 0.0 and < 1.0")] TopP, #[error("`top_k` must be strictly positive")] TopK, #[error("`truncate` must be strictly positive and less than {0}. Given: {1}")] Truncate(usize, usize), #[error("`typical_p` must be > 0.0 and < 1.0")] TypicalP, #[error("one of `max_new_tokens` or `truncate` must be set if a fast tokenizer is not in use")] UnsetMaxNewTokens, #[error("`max_new_tokens` must be strictly positive")] NegativeMaxNewTokens, #[error("`max_new_tokens` must be <= {0}. Given: {1}")] MaxNewTokens(usize, u32), #[error("`inputs` tokens + `max_new_tokens` must be <= {0}. Given: {1} `inputs` tokens and {2} `max_new_tokens`")] MaxTotalTokens(usize, usize, u32), #[error("`inputs` must have less than {0} tokens. Given: {1}")] InputLength(usize, usize), #[error("`inputs` cannot be empty")] EmptyInput, #[error("`stop` supports up to {0} stop sequences. Given: {1}")] StopSequence(usize, usize), #[error("tokenizer error {0}")] Tokenizer(String), } #[cfg(test)] mod tests { use super::*; use crate::default_parameters; use crate::tests::get_tokenizer; #[tokio::test] async fn test_validation_max_new_tokens() { let tokenizer = None; let max_best_of = 2; let max_stop_sequence = 3; let max_top_n_tokens = 4; let max_input_length = 5; let max_total_tokens = 6; let workers = 1; let validation = Validation::new( workers, tokenizer, max_best_of, max_stop_sequence, max_top_n_tokens, max_input_length, max_total_tokens, ); let max_new_tokens = 10; match validation .validate_input("Hello".to_string(), None, Some(max_new_tokens)) .await { Err(ValidationError::MaxNewTokens(1, 10)) => (), _ => panic!("Unexpected not max new tokens"), } } #[tokio::test] async fn test_validation_input_length() { let tokenizer = Some(get_tokenizer().await); let max_best_of = 2; let max_stop_sequence = 3; let max_top_n_tokens = 4; let max_input_length = 5; let max_total_tokens = 6; let workers = 1; let validation = Validation::new( workers, tokenizer, max_best_of, max_stop_sequence, max_top_n_tokens, max_input_length, max_total_tokens, ); let max_new_tokens = 10; match validation .validate_input("Hello".to_string(), None, Some(max_new_tokens)) .await { Err(ValidationError::MaxTotalTokens(6, 1, 10)) => (), _ => panic!("Unexpected not max new tokens"), } } #[tokio::test] async fn test_validation_best_of_sampling() { let tokenizer = Some(get_tokenizer().await); let max_best_of = 2; let max_stop_sequence = 3; let max_top_n_tokens = 4; let max_input_length = 5; let max_total_tokens = 6; let workers = 1; let validation = Validation::new( workers, tokenizer, max_best_of, max_stop_sequence, max_top_n_tokens, max_input_length, max_total_tokens, ); match validation .validate(GenerateRequest { inputs: "Hello".to_string(), parameters: GenerateParameters { best_of: Some(2), do_sample: false, ..default_parameters() }, }) .await { Err(ValidationError::BestOfSampling) => (), _ => panic!("Unexpected not best of sampling"), } } #[tokio::test] async fn test_validation_top_p() { let tokenizer = Some(get_tokenizer().await); let max_best_of = 2; let max_stop_sequence = 3; let max_top_n_tokens = 4; let max_input_length = 5; let max_total_tokens = 6; let workers = 1; let validation = Validation::new( workers, tokenizer, max_best_of, max_stop_sequence, max_top_n_tokens, max_input_length, max_total_tokens, ); match validation .validate(GenerateRequest { inputs: "Hello".to_string(), parameters: GenerateParameters { top_p: Some(1.0), ..default_parameters() }, }) .await { Err(ValidationError::TopP) => (), _ => panic!("Unexpected top_p"), } match validation .validate(GenerateRequest { inputs: "Hello".to_string(), parameters: GenerateParameters { top_p: Some(0.99), ..default_parameters() }, }) .await { Ok(_) => (), _ => panic!("Unexpected top_p error"), } let valid_request = validation .validate(GenerateRequest { inputs: "Hello".to_string(), parameters: GenerateParameters { top_p: None, ..default_parameters() }, }) .await .unwrap(); // top_p == 1.0 is invalid for users to ask for but it's the default resolved value. assert_eq!(valid_request.parameters.top_p, 1.0); } #[tokio::test] async fn test_validation_top_n_tokens() { let tokenizer = Some(get_tokenizer().await); let max_best_of = 2; let max_stop_sequences = 3; let max_top_n_tokens = 4; let max_input_length = 5; let max_total_tokens = 6; let workers = 1; let validation = Validation::new( workers, tokenizer, max_best_of, max_stop_sequences, max_top_n_tokens, max_input_length, max_total_tokens, ); match validation .validate(GenerateRequest { inputs: "Hello".to_string(), parameters: GenerateParameters { top_n_tokens: Some(5), ..default_parameters() }, }) .await { Err(ValidationError::TopNTokens(4, 5)) => (), _ => panic!("Unexpected top_n_tokens"), } validation .validate(GenerateRequest { inputs: "Hello".to_string(), parameters: GenerateParameters { top_n_tokens: Some(4), ..default_parameters() }, }) .await .unwrap(); validation .validate(GenerateRequest { inputs: "Hello".to_string(), parameters: GenerateParameters { top_n_tokens: Some(0), ..default_parameters() }, }) .await .unwrap(); let valid_request = validation .validate(GenerateRequest { inputs: "Hello".to_string(), parameters: GenerateParameters { top_n_tokens: None, ..default_parameters() }, }) .await .unwrap(); assert_eq!(valid_request.top_n_tokens, 0); } }
0
hf_public_repos/text-generation-inference/router
hf_public_repos/text-generation-inference/router/client/build.rs
use std::fs; fn main() -> Result<(), Box<dyn std::error::Error>> { println!("cargo:rerun-if-changed=../../proto/generate.proto"); fs::create_dir("src/pb").unwrap_or(()); let mut config = prost_build::Config::new(); config.protoc_arg("--experimental_allow_proto3_optional"); tonic_build::configure() .build_client(true) .build_server(false) .out_dir("src/pb") .include_file("mod.rs") .compile_with_config(config, &["../../proto/generate.proto"], &["../../proto"]) .unwrap_or_else(|e| panic!("protobuf compilation failed: {e}")); Ok(()) }
0
hf_public_repos/text-generation-inference/router
hf_public_repos/text-generation-inference/router/client/Cargo.toml
[package] name = "text-generation-client" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [dependencies] futures = "^0.3" grpc-metadata = { path = "../grpc-metadata" } prost = "^0.12" thiserror = "^1.0" tokio = { version = "^1.32", features = ["sync"] } tonic = "^0.10" tower = "^0.4" tracing = "^0.1" [build-dependencies] tonic-build = "0.10.1" prost-build = "0.12.1"
0
hf_public_repos/text-generation-inference/router/client
hf_public_repos/text-generation-inference/router/client/src/sharded_client.rs
/// Multi shard Client use crate::{Batch, CachedBatch, Client, Generation, HealthResponse, ShardInfo}; use crate::{ClientError, Result}; use futures::future::join_all; use tonic::transport::Uri; use tracing::instrument; #[derive(Debug, Clone)] /// Text Generation Inference gRPC multi client pub struct ShardedClient { clients: Vec<Client>, } impl ShardedClient { fn new(clients: Vec<Client>) -> Self { Self { clients } } /// Create a new ShardedClient from a master client. The master client will communicate with /// the other shards and returns all uris/unix sockets with the `service_discovery` gRPC method. async fn from_master_client(mut master_client: Client) -> Result<Self> { // Get all uris/unix sockets from the master client let uris = master_client.service_discovery().await?; let futures = uris.into_iter().map(Client::connect_uds); let clients: Result<Vec<Client>> = join_all(futures).await.into_iter().collect(); Ok(Self::new(clients?)) } /// Returns a client connected to the given uri pub async fn connect(uri: Uri) -> Result<Self> { let master_client = Client::connect(uri).await?; Self::from_master_client(master_client).await } /// Returns a client connected to the given unix socket pub async fn connect_uds(path: String) -> Result<Self> { let master_client = Client::connect_uds(path).await?; Self::from_master_client(master_client).await } /// Get the model info #[instrument(skip(self))] pub async fn info(&mut self) -> Result<ShardInfo> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| client.info()) .collect(); join_all(futures).await.pop().unwrap() } /// GRPC health check #[instrument(skip(self))] pub async fn health(&mut self) -> Result<HealthResponse> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| client.health()) .collect(); join_all(futures).await.pop().unwrap() } /// Clear the past generations cache #[instrument(skip(self))] pub async fn clear_cache(&mut self, batch_id: Option<u64>) -> Result<()> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| client.clear_cache(batch_id)) .collect(); join_all(futures).await.into_iter().collect() } /// Filter a cached batch #[instrument(skip(self))] pub async fn filter_batch( &mut self, batch_id: u64, request_ids: Vec<u64>, ) -> Result<Option<CachedBatch>> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| Box::pin(client.filter_batch(batch_id, request_ids.clone()))) .collect(); // all shards return the same message join_all(futures).await.pop().unwrap() } /// Warmup on a max size batch /// /// Returns the maximum amount of tokens supported by the hardware #[instrument(skip(self))] pub async fn warmup( &mut self, max_input_length: u32, max_prefill_tokens: u32, max_total_tokens: u32, ) -> Result<Option<u32>> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| { Box::pin(client.warmup(max_input_length, max_prefill_tokens, max_total_tokens)) }) .collect(); // Take the minimum value let results = join_all(futures) .await .into_iter() .collect::<Result<Vec<Option<u32>>>>()?; Ok(results.into_iter().flatten().min()) } /// Generate one token for each request in the given batch /// /// Returns Generation for each request in batch /// and the next cached batch #[instrument(skip_all, fields(id = &batch.id, size = &batch.size))] pub async fn prefill( &mut self, batch: Batch, ) -> Result<(Vec<Generation>, Option<CachedBatch>)> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| Box::pin(client.prefill(batch.clone()))) .collect(); let results: Result<Vec<(Vec<Generation>, Option<CachedBatch>)>> = join_all(futures).await.into_iter().collect(); merge_generations(results?) } /// Generate one token for each request in the given cached batches /// /// Returns Generation for each request in batches /// and the next cached batch #[instrument(skip_all, fields(size = batches.iter().map(|batch|{batch.size}).sum::<u32>()))] pub async fn decode( &mut self, batches: Vec<CachedBatch>, ) -> Result<(Vec<Generation>, Option<CachedBatch>)> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| Box::pin(client.decode(batches.clone()))) .collect(); let results: Result<Vec<(Vec<Generation>, Option<CachedBatch>)>> = join_all(futures).await.into_iter().collect(); merge_generations(results?) } } /// Merge generations from the different model shards fn merge_generations( mut results: Vec<(Vec<Generation>, Option<CachedBatch>)>, ) -> Result<(Vec<Generation>, Option<CachedBatch>)> { let (mut generations, next_batch) = results.pop().ok_or(ClientError::EmptyResults)?; for (mut shard_generations, _) in results.into_iter() { generations.append(&mut shard_generations); } Ok((generations, next_batch)) }
0
hf_public_repos/text-generation-inference/router/client
hf_public_repos/text-generation-inference/router/client/src/lib.rs
//! Text Generation gRPC client library mod client; #[allow(clippy::derive_partial_eq_without_eq)] mod pb; mod sharded_client; pub use client::Client; pub use pb::generate::v1::HealthResponse; pub use pb::generate::v1::InfoResponse as ShardInfo; pub use pb::generate::v1::{ Batch, CachedBatch, FinishReason, GeneratedText, Generation, NextTokenChooserParameters, PrefillTokens, Request, StoppingCriteriaParameters, }; pub use sharded_client::ShardedClient; use thiserror::Error; use tonic::transport; use tonic::Status; #[derive(Error, Debug, Clone)] pub enum ClientError { #[error("Could not connect to Text Generation server: {0}")] Connection(String), #[error("Server error: {0}")] Generation(String), #[error("Sharded results are empty")] EmptyResults, } impl From<Status> for ClientError { fn from(err: Status) -> Self { let err = Self::Generation(err.message().to_string()); tracing::error!("{err}"); err } } impl From<transport::Error> for ClientError { fn from(err: transport::Error) -> Self { let err = Self::Connection(err.to_string()); tracing::error!("{err}"); err } } pub type Result<T> = std::result::Result<T, ClientError>;
0