Buckets:

hf-doc-build/doc / optimum-neuron /main /en /inference_tutorials /sentence_transformers.html
rtrm's picture
download
raw
34.1 kB
<meta charset="utf-8" /><meta name="hf:doc:metadata" content="{&quot;title&quot;:&quot;Sentence Transformers on AWS Inferentia with Optimum Neuron&quot;,&quot;local&quot;:&quot;sentence-transformers-on-aws-inferentia-with-optimum-neuron&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Text Models&quot;,&quot;local&quot;:&quot;text-models&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Convert Sentence Transformers model to AWS Inferentia2&quot;,&quot;local&quot;:&quot;convert-sentence-transformers-model-to-aws-inferentia2&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Load compiled Sentence Transformers model and run inference&quot;,&quot;local&quot;:&quot;load-compiled-sentence-transformers-model-and-run-inference&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Production Usage&quot;,&quot;local&quot;:&quot;production-usage&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3}],&quot;depth&quot;:2},{&quot;title&quot;:&quot;CLIP&quot;,&quot;local&quot;:&quot;clip&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Compile CLIP for AWS Inferentia2&quot;,&quot;local&quot;:&quot;compile-clip-for-aws-inferentia2&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Load compiled Sentence Transformers model and run inference&quot;,&quot;local&quot;:&quot;load-compiled-sentence-transformers-model-and-run-inference&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3}],&quot;depth&quot;:2}],&quot;depth&quot;:1}">
<link href="/docs/optimum.neuron/main/en/_app/immutable/assets/0.e3b0c442.css" rel="modulepreload">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/entry/start.7c62bda0.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/chunks/scheduler.56725da7.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/chunks/singletons.cc9cad5c.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/chunks/paths.a4116ff2.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/entry/app.7db1057b.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/chunks/preload-helper.ec99a452.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/chunks/index.18a26576.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/nodes/0.2218415c.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/chunks/each.e59479a4.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/nodes/22.ea1e8a3a.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/chunks/Tip.5b941656.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/chunks/CopyLLMTxtMenu.22408ad0.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/chunks/globals.7f7f1b26.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/chunks/MermaidChart.svelte_svelte_type_style_lang.8f5f1e9e.js">
<link rel="modulepreload" href="/docs/optimum.neuron/main/en/_app/immutable/chunks/CodeBlock.9417983d.js"><!-- HEAD_svelte-u9bgzb_START --><meta name="hf:doc:metadata" content="{&quot;title&quot;:&quot;Sentence Transformers on AWS Inferentia with Optimum Neuron&quot;,&quot;local&quot;:&quot;sentence-transformers-on-aws-inferentia-with-optimum-neuron&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Text Models&quot;,&quot;local&quot;:&quot;text-models&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Convert Sentence Transformers model to AWS Inferentia2&quot;,&quot;local&quot;:&quot;convert-sentence-transformers-model-to-aws-inferentia2&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Load compiled Sentence Transformers model and run inference&quot;,&quot;local&quot;:&quot;load-compiled-sentence-transformers-model-and-run-inference&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Production Usage&quot;,&quot;local&quot;:&quot;production-usage&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3}],&quot;depth&quot;:2},{&quot;title&quot;:&quot;CLIP&quot;,&quot;local&quot;:&quot;clip&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Compile CLIP for AWS Inferentia2&quot;,&quot;local&quot;:&quot;compile-clip-for-aws-inferentia2&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Load compiled Sentence Transformers model and run inference&quot;,&quot;local&quot;:&quot;load-compiled-sentence-transformers-model-and-run-inference&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3}],&quot;depth&quot;:2}],&quot;depth&quot;:1}"><!-- HEAD_svelte-u9bgzb_END --> <p></p> <div class="items-center shrink-0 min-w-[100px] max-sm:min-w-[50px] justify-end ml-auto flex" style="float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"><div class="inline-flex rounded-md max-sm:rounded-sm"><button class="inline-flex items-center gap-1 h-7 max-sm:h-7 px-2 max-sm:px-1.5 text-sm font-medium text-gray-800 border border-r-0 rounded-l-md max-sm:rounded-l-sm border-gray-200 bg-white hover:shadow-inner dark:border-gray-850 dark:bg-gray-950 dark:text-gray-200 dark:hover:bg-gray-800" aria-live="polite"><span class="inline-flex items-center justify-center rounded-md p-0.5 max-sm:p-0 hover:text-gray-800 dark:hover:text-gray-200"><svg class="sm:size-3.5 size-3" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg></span> <span>Copy page</span></button> <button class="inline-flex items-center justify-center w-6 max-sm:w-5 h-7 max-sm:h-7 disabled:pointer-events-none text-sm text-gray-500 hover:text-gray-700 dark:hover:text-white rounded-r-md max-sm:rounded-r-sm border border-l transition border-gray-200 bg-white hover:shadow-inner dark:border-gray-850 dark:bg-gray-950 dark:text-gray-200 dark:hover:bg-gray-800" aria-haspopup="menu" aria-expanded="false" aria-label="Open copy menu"><svg class="transition-transform text-gray-400 overflow-visible sm:size-3.5 size-3 rotate-0" width="1em" height="1em" viewBox="0 0 12 7" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1 1L6 6L11 1" stroke="currentColor"></path></svg></button></div> </div> <h1 class="relative group"><a id="sentence-transformers-on-aws-inferentia-with-optimum-neuron" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#sentence-transformers-on-aws-inferentia-with-optimum-neuron"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Sentence Transformers on AWS Inferentia with Optimum Neuron</span></h1> <h2 class="relative group"><a id="text-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#text-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Text Models</span></h2> <p data-svelte-h="svelte-z9n49l"><em>There is a notebook version of that tutorial <a href="https://github.com/huggingface/optimum-neuron/blob/main/notebooks/sentence-transformers/getting-started.ipynb" rel="nofollow">here</a>.</em></p> <p data-svelte-h="svelte-12xwboz">This guide explains how to compile, load, and use <a href="https://www.sbert.net/" rel="nofollow">Sentence Transformers (SBERT)</a> models on AWS Inferentia2 with Optimum Neuron, enabling efficient calculation of embeddings. Sentence Transformers are powerful models for generating sentence embeddings. You can use this Sentence Transformers to compute sentence / text embeddings for more than 100 languages. These embeddings can then be compared e.g. with cosine-similarity to find sentences with a similar meaning. This can be useful for semantic textual similarity, semantic search, or paraphrase mining.</p> <h3 class="relative group"><a id="convert-sentence-transformers-model-to-aws-inferentia2" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#convert-sentence-transformers-model-to-aws-inferentia2"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Convert Sentence Transformers model to AWS Inferentia2</span></h3> <p data-svelte-h="svelte-172kcew">First, you need to convert your Sentence Transformers model to a format compatible with AWS Inferentia2. You can compile Sentence Transformers models with Optimum Neuron using the <code>optimum-cli</code> or <code>NeuronSentenceTransformers</code> class. Below you will find an example for both approaches. We have to make sure <code>sentence-transformers</code> is installed. That’s only needed for exporting the model.</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->pip install sentence-transformers<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-188wjd">Here we will use the <code>NeuronSentenceTransformers</code>, which can be used to convert any Sentence Transformers model to a format compatible with AWS Inferentia2 or load already converted models. When exporting models with the <code>NeuronSentenceTransformers</code> you need to set <code>export=True</code> and define the input shape and batch size. The input shape is defined by the <code>sequence_length</code> and the batch size by <code>batch_size</code>.</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> optimum.neuron <span class="hljs-keyword">import</span> NeuronSentenceTransformers
<span class="hljs-comment"># Sentence Transformers model from HuggingFace</span>
model_id = <span class="hljs-string">&quot;BAAI/bge-small-en-v1.5&quot;</span>
input_shapes = {<span class="hljs-string">&quot;batch_size&quot;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&quot;sequence_length&quot;</span>: <span class="hljs-number">384</span>} <span class="hljs-comment"># mandatory shapes</span>
<span class="hljs-comment"># Load Transformers model and export it to AWS Inferentia2</span>
model = NeuronSentenceTransformers.from_pretrained(model_id, export=<span class="hljs-literal">True</span>, **input_shapes)
<span class="hljs-comment"># Save model to disk</span>
model.save_pretrained(<span class="hljs-string">&quot;bge_emb_inf2/&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-11dpxl7">Here we will use the <code>optimum-cli</code> to convert the model. Similar to the <code>NeuronSentenceTransformers</code> we need to define our input shape and batch size. The input shape is defined by the <code>sequence_length</code> and the batch size by <code>batch_size</code>. The <code>optimum-cli</code> will automatically convert the model to a format compatible with AWS Inferentia2 and save it to the specified output directory.</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->optimum-cli <span class="hljs-built_in">export</span> neuron -m BAAI/bge-small-en-v1.5 --sequence_length 384 --batch_size 1 --task feature-extraction bge_emb_inf2/<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="load-compiled-sentence-transformers-model-and-run-inference" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-compiled-sentence-transformers-model-and-run-inference"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load compiled Sentence Transformers model and run inference</span></h3> <p data-svelte-h="svelte-f0pjfl">Once we have a compiled Sentence Transformers model, which we either exported ourselves or is available on the Hugging Face Hub, we can load it and run inference. For loading the model we can use the <code>NeuronSentenceTransformers</code> class, which is an abstraction layer for the <code>SentenceTransformer</code> class. The <code>NeuronSentenceTransformers</code> class will automatically pad the input to the specified <code>sequence_length</code> and run inference on AWS Inferentia2.</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> optimum.neuron <span class="hljs-keyword">import</span> NeuronSentenceTransformers
model_id_or_path = <span class="hljs-string">&quot;bge_emb_inf2/&quot;</span>
<span class="hljs-comment"># Load model and tokenizer</span>
model = NeuronSentenceTransformers.from_pretrained(model_id_or_path)
<span class="hljs-comment"># Run inference</span>
token_embeddings = model.encode(output_value=<span class="hljs-string">&quot;token_embeddings&quot;</span>)
sentence_embedding = model.encode(output_value=<span class="hljs-string">&quot;sentence_embedding&quot;</span>)<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="production-usage" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#production-usage"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Production Usage</span></h3> <p data-svelte-h="svelte-qum7z">For deploying these models in a production environment, refer to the <a href="https://www.philschmid.de/inferentia2-embeddings" rel="nofollow">Amazon SageMaker Blog</a>.</p> <h2 class="relative group"><a id="clip" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#clip"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>CLIP</span></h2> <h3 class="relative group"><a id="compile-clip-for-aws-inferentia2" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#compile-clip-for-aws-inferentia2"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Compile CLIP for AWS Inferentia2</span></h3> <p data-svelte-h="svelte-106rdam">You can compile CLIP models with Optimum Neuron either by using the <code>optimum-cli</code> or <code>NeuronSentenceTransformers</code> class. Adopt one approach that you prefer:</p> <ul data-svelte-h="svelte-1f07ihj"><li>With the Optimum CLI</li></ul> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->optimum-cli <span class="hljs-built_in">export</span> neuron -m sentence-transformers/clip-ViT-B-32 --sequence_length 64 --text_batch_size 3 --image_batch_size 1 --num_channels 3 --height 224 --width 224 --task feature-extraction --subfolder 0_CLIPModel clip_emb/<!-- HTML_TAG_END --></pre></div> <ul data-svelte-h="svelte-gd7ph7"><li>With the <code>NeuronSentenceTransformers</code> class</li></ul> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> optimum.neuron <span class="hljs-keyword">import</span> NeuronSentenceTransformers
model_id = <span class="hljs-string">&quot;sentence-transformers/clip-ViT-B-32&quot;</span>
<span class="hljs-comment"># configs for compiling model</span>
input_shapes = {
<span class="hljs-string">&quot;num_channels&quot;</span>: <span class="hljs-number">3</span>,
<span class="hljs-string">&quot;height&quot;</span>: <span class="hljs-number">224</span>,
<span class="hljs-string">&quot;width&quot;</span>: <span class="hljs-number">224</span>,
<span class="hljs-string">&quot;text_batch_size&quot;</span>: <span class="hljs-number">3</span>,
<span class="hljs-string">&quot;image_batch_size&quot;</span>: <span class="hljs-number">1</span>,
<span class="hljs-string">&quot;sequence_length&quot;</span>: <span class="hljs-number">64</span>,
}
emb_model = NeuronSentenceTransformers.from_pretrained(
model_id, subfolder=<span class="hljs-string">&quot;0_CLIPModel&quot;</span>, export=<span class="hljs-literal">True</span>, library_name=<span class="hljs-string">&quot;sentence_transformers&quot;</span>, dynamic_batch_size=<span class="hljs-literal">False</span>, **input_shapes
)
<span class="hljs-comment"># Save locally or upload to the HuggingFace Hub</span>
save_directory = <span class="hljs-string">&quot;clip_emb/&quot;</span>
emb_model.save_pretrained(save_directory)<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="load-compiled-sentence-transformers-model-and-run-inference" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-compiled-sentence-transformers-model-and-run-inference"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load compiled Sentence Transformers model and run inference</span></h3> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image
<span class="hljs-keyword">from</span> sentence_transformers <span class="hljs-keyword">import</span> util
<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor
<span class="hljs-keyword">from</span> optimum.neuron <span class="hljs-keyword">import</span> NeuronSentenceTransformers
save_directory = <span class="hljs-string">&quot;clip_emb&quot;</span>
emb_model = NeuronSentenceTransformers.from_pretrained(save_directory)
processor = CLIPProcessor.from_pretrained(save_directory)
inputs = processor(
text=[<span class="hljs-string">&quot;Two dogs in the snow&quot;</span>, <span class="hljs-string">&#x27;A cat on a table&#x27;</span>, <span class="hljs-string">&#x27;A picture of London at night&#x27;</span>], images=Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;two_dogs_in_snow.jpg&quot;</span>), return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>
)
outputs = emb_model(**inputs)
<span class="hljs-comment"># Compute cosine similarities</span>
cos_scores = util.cos_sim(outputs.image_embeds, outputs.text_embeds)
<span class="hljs-built_in">print</span>(cos_scores)
<span class="hljs-comment"># tensor([[0.3072, 0.1016, 0.1095]])</span><!-- HTML_TAG_END --></pre></div> <blockquote class="tip"><p data-svelte-h="svelte-6rj1id"><strong>Caveat</strong></p> <p data-svelte-h="svelte-174308j">Since compiled models with dynamic batching enabled only accept input tensors with the same batch size, we cannot set <code>dynamic_batch_size=True</code> if the input texts and images have different batch sizes. And as <code>NeuronSentenceTransformers</code> class pads the inputs to the batch sizes (<code>text_batch_size</code> and <code>image_batch_size</code>) used during the compilation, you could use relatively larger batch sizes during the compilation for flexibility with the trade-off of compute.</p> <p data-svelte-h="svelte-7gdj48">eg. if you want to encode 3 or 4 or 5 texts and 1 image, you could set <code>text_batch_size = 5 = max(3, 4, 5)</code> and <code>image_batch_size = 1</code> during the compilation.</p></blockquote> <p></p>
<script>
{
__sveltekit_rx9oz5 = {
assets: "/docs/optimum.neuron/main/en",
base: "/docs/optimum.neuron/main/en",
env: {}
};
const element = document.currentScript.parentElement;
const data = [null,null];
Promise.all([
import("/docs/optimum.neuron/main/en/_app/immutable/entry/start.7c62bda0.js"),
import("/docs/optimum.neuron/main/en/_app/immutable/entry/app.7db1057b.js")
]).then(([kit, app]) => {
kit.start(app, element, {
node_ids: [0, 22],
data,
form: null,
error: null
});
});
}
</script>

Xet Storage Details

Size:
34.1 kB
·
Xet hash:
e1762c920f2296756257e4c6b9905e94641888de1b905fa40bc37552227c2bca

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.