| model=BAAI/bge-large-en-v1.5 | |
| volume=$PWD/data | |
| docker run --gpus all -p 8081:80 -v $volume:/data \ | |
| --pull always ghcr.io/huggingface/text-embeddings-inference:1.7 \ | |
| --model-id $model \ | |
| --max-concurrent-requests 512 \ | |
| --max-batch-requests 32 \ | |
| --max-batch-tokens 16384 \ | |
| --tokenization-workers 4 \ | |
| --dtype float16 |