Buckets:
| <meta charset="utf-8" /><meta name="hf:doc:metadata" content="{"title":"Esporta modelli 🤗 Transformers","local":"esporta-modelli--transformers","sections":[{"title":"ONNX","local":"onnx","sections":[{"title":"Esportazione di un modello in ONNX","local":"esportazione-di-un-modello-in-onnx","sections":[],"depth":3},{"title":"Selezione delle caratteristiche per diverse topologie di modello","local":"selezione-delle-caratteristiche-per-diverse-topologie-di-modello","sections":[],"depth":3},{"title":"Esportazione di un modello per un’architettura non supportata","local":"esportazione-di-un-modello-per-unarchitettura-non-supportata","sections":[{"title":"Implementazione di una configurazione ONNX personalizzata","local":"implementazione-di-una-configurazione-onnx-personalizzata","sections":[],"depth":4},{"title":"Esportazione del modello","local":"esportazione-del-modello","sections":[],"depth":4},{"title":"Convalida degli output del modello","local":"convalida-degli-output-del-modello","sections":[],"depth":4}],"depth":3},{"title":"Contribuire con una nuova configurazione a 🤗 Transformers","local":"contribuire-con-una-nuova-configurazione-a--transformers","sections":[],"depth":3}],"depth":2},{"title":"TorchScript","local":"torchscript","sections":[{"title":"Flag TorchScript e pesi legati","local":"flag-torchscript-e-pesi-legati","sections":[],"depth":3},{"title":"Input fittizi e standard lengths","local":"input-fittizi-e-standard-lengths","sections":[],"depth":3},{"title":"Usare TorchSscript in Python","local":"usare-torchsscript-in-python","sections":[{"title":"Salvare un modello","local":"salvare-un-modello","sections":[],"depth":4},{"title":"Caricare un modello","local":"caricare-un-modello","sections":[],"depth":4},{"title":"Utilizzare un modello tracciato per l’inferenza","local":"utilizzare-un-modello-tracciato-per-linferenza","sections":[],"depth":4}],"depth":3},{"title":"Implementare modelli HuggingFace TorchScript su AWS utilizzando Neuron SDK","local":"implementare-modelli-huggingface-torchscript-su-aws-utilizzando-neuron-sdk","sections":[{"title":"Implicazioni","local":"implicazioni","sections":[],"depth":4},{"title":"Dipendenze","local":"dipendenze","sections":[],"depth":4},{"title":"Convertire un modello per AWS Neuron","local":"convertire-un-modello-per-aws-neuron","sections":[],"depth":4}],"depth":3}],"depth":2}],"depth":1}"> | |
| <link href="/docs/transformers/main/it/_app/immutable/assets/0.e3b0c442.css" rel="modulepreload"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/entry/start.dc9544fe.js"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/chunks/scheduler.90743ba6.js"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/chunks/singletons.f14607cf.js"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/chunks/index.06cbfa56.js"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/chunks/paths.99b61a45.js"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/entry/app.9ee4d62e.js"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/chunks/preload-helper.252ff27f.js"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/chunks/index.eb8303a6.js"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/nodes/0.7075e84a.js"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/chunks/each.e59479a4.js"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/nodes/30.127aefb5.js"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/chunks/Tip.94f3ec86.js"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/chunks/CodeBlock.5315ec0a.js"> | |
| <link rel="modulepreload" href="/docs/transformers/main/it/_app/immutable/chunks/MermaidChart.svelte_svelte_type_style_lang.f20f0146.js"><!-- HEAD_svelte-u9bgzb_START --><meta name="hf:doc:metadata" content="{"title":"Esporta modelli 🤗 Transformers","local":"esporta-modelli--transformers","sections":[{"title":"ONNX","local":"onnx","sections":[{"title":"Esportazione di un modello in ONNX","local":"esportazione-di-un-modello-in-onnx","sections":[],"depth":3},{"title":"Selezione delle caratteristiche per diverse topologie di modello","local":"selezione-delle-caratteristiche-per-diverse-topologie-di-modello","sections":[],"depth":3},{"title":"Esportazione di un modello per un’architettura non supportata","local":"esportazione-di-un-modello-per-unarchitettura-non-supportata","sections":[{"title":"Implementazione di una configurazione ONNX personalizzata","local":"implementazione-di-una-configurazione-onnx-personalizzata","sections":[],"depth":4},{"title":"Esportazione del modello","local":"esportazione-del-modello","sections":[],"depth":4},{"title":"Convalida degli output del modello","local":"convalida-degli-output-del-modello","sections":[],"depth":4}],"depth":3},{"title":"Contribuire con una nuova configurazione a 🤗 Transformers","local":"contribuire-con-una-nuova-configurazione-a--transformers","sections":[],"depth":3}],"depth":2},{"title":"TorchScript","local":"torchscript","sections":[{"title":"Flag TorchScript e pesi legati","local":"flag-torchscript-e-pesi-legati","sections":[],"depth":3},{"title":"Input fittizi e standard lengths","local":"input-fittizi-e-standard-lengths","sections":[],"depth":3},{"title":"Usare TorchSscript in Python","local":"usare-torchsscript-in-python","sections":[{"title":"Salvare un modello","local":"salvare-un-modello","sections":[],"depth":4},{"title":"Caricare un modello","local":"caricare-un-modello","sections":[],"depth":4},{"title":"Utilizzare un modello tracciato per l’inferenza","local":"utilizzare-un-modello-tracciato-per-linferenza","sections":[],"depth":4}],"depth":3},{"title":"Implementare modelli HuggingFace TorchScript su AWS utilizzando Neuron SDK","local":"implementare-modelli-huggingface-torchscript-su-aws-utilizzando-neuron-sdk","sections":[{"title":"Implicazioni","local":"implicazioni","sections":[],"depth":4},{"title":"Dipendenze","local":"dipendenze","sections":[],"depth":4},{"title":"Convertire un modello per AWS Neuron","local":"convertire-un-modello-per-aws-neuron","sections":[],"depth":4}],"depth":3}],"depth":2}],"depth":1}"><!-- HEAD_svelte-u9bgzb_END --> <p></p> <h1 class="relative group"><a id="esporta-modelli--transformers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#esporta-modelli--transformers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Esporta modelli 🤗 Transformers</span></h1> <p data-svelte-h="svelte-do19d6">Se devi implementare 🤗 modelli Transformers in ambienti di produzione, noi | |
| consigliamo di esportarli in un formato serializzato che può essere caricato ed eseguito | |
| su runtime e hardware specializzati. In questa guida ti mostreremo come farlo | |
| esporta 🤗 Modelli Transformers in due formati ampiamente utilizzati: ONNX e TorchScript.</p> <p data-svelte-h="svelte-1f9txfv">Una volta esportato, un modello può essere ottimizato per l’inferenza tramite tecniche come | |
| la quantizzazione e soppressione. Se sei interessato a ottimizzare i tuoi modelli per l’esecuzione | |
| con la massima efficienza, dai un’occhiata a <a href="https://github.com/huggingface/optimum" rel="nofollow">🤗 Optimum | |
| library</a>.</p> <h2 class="relative group"><a id="onnx" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#onnx"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ONNX</span></h2> <p data-svelte-h="svelte-1gdh4oo">Il progetto <a href="http://onnx.ai" rel="nofollow">ONNX (Open Neural Network eXchange)</a> Il progetto onnx è un open | |
| standard che definisce un insieme comune di operatori e un formato di file comune a | |
| rappresentano modelli di deep learning in un’ampia varietà di framework, tra cui | |
| PyTorch e TensorFlow. Quando un modello viene esportato nel formato ONNX, questi | |
| operatori sono usati per costruire un grafico computazionale (often called an | |
| <em>intermediate representation</em>) che rappresenta il flusso di dati attraverso la | |
| rete neurale.</p> <p data-svelte-h="svelte-z64a5j">Esponendo un grafico con operatori e tipi di dati standardizzati, ONNX rende | |
| più facile passare da un framework all’altro. Ad esempio, un modello allenato in PyTorch può | |
| essere esportato in formato ONNX e quindi importato in TensorFlow (e viceversa).</p> <p data-svelte-h="svelte-7rl71e">🤗 Transformers fornisce un pacchetto <code>transformers.onnx</code> che ti consente di | |
| convertire i checkpoint del modello in un grafico ONNX sfruttando gli oggetti di configurazione. | |
| Questi oggetti di configurazione sono già pronti per una serie di architetture di modelli, | |
| e sono progettati per essere facilmente estensibili ad altre architetture.</p> <p data-svelte-h="svelte-sr712u">Le configurazioni pronte includono le seguenti architetture:</p> <ul data-svelte-h="svelte-vozkex"><li>ALBERT</li> <li>BART</li> <li>BEiT</li> <li>BERT</li> <li>BigBird</li> <li>BigBird-Pegasus</li> <li>Blenderbot</li> <li>BlenderbotSmall</li> <li>CamemBERT</li> <li>ConvBERT</li> <li>Data2VecText</li> <li>Data2VecVision</li> <li>DeiT</li> <li>DistilBERT</li> <li>ELECTRA</li> <li>FlauBERT</li> <li>GPT Neo</li> <li>GPT-J</li> <li>I-BERT</li> <li>LayoutLM</li> <li>M2M100</li> <li>Marian</li> <li>mBART</li> <li>MobileBERT</li> <li>OpenAI GPT-2</li> <li>Perceiver</li> <li>PLBart</li> <li>RoBERTa</li> <li>RoFormer</li> <li>SqueezeBERT</li> <li>T5</li> <li>ViT</li> <li>XLM</li> <li>XLM-RoBERTa</li> <li>XLM-RoBERTa-XL</li></ul> <p data-svelte-h="svelte-1sn7n41">Nelle prossime due sezioni, ti mostreremo come:</p> <ul data-svelte-h="svelte-1g8hsur"><li>Esporta un modello supportato usando il pacchetto <code>transformers.onnx</code>.</li> <li>Esporta un modello personalizzato per un’architettura non supportata.</li></ul> <h3 class="relative group"><a id="esportazione-di-un-modello-in-onnx" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#esportazione-di-un-modello-in-onnx"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Esportazione di un modello in ONNX</span></h3> <p data-svelte-h="svelte-4psczp">Per esportare un modello 🤗 Transformers in ONNX, dovrai prima installarne alcune | |
| dipendenze extra:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->pip install transformers[onnx]<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1j72zvu">Il pacchetto <code>transformers.onnx</code> può essere usato come modulo Python:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->python -m transformers.onnx --<span class="hljs-built_in">help</span> | |
| usage: Hugging Face Transformers ONNX exporter [-h] -m MODEL [--feature {causal-lm, ...}] [--opset OPSET] [--atol ATOL] output | |
| positional arguments: | |
| output Path indicating <span class="hljs-built_in">where</span> to store generated ONNX model. | |
| optional arguments: | |
| -h, --<span class="hljs-built_in">help</span> show this <span class="hljs-built_in">help</span> message and <span class="hljs-built_in">exit</span> | |
| -m MODEL, --model MODEL | |
| Model ID on huggingface.co or path on disk to load model from. | |
| --feature {causal-lm, ...} | |
| The <span class="hljs-built_in">type</span> of features to <span class="hljs-built_in">export</span> the model with. | |
| --opset OPSET ONNX opset version to <span class="hljs-built_in">export</span> the model with. | |
| --atol ATOL Absolute difference tolerance when validating the model.<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-13jbmoq">L’esportazione di un checkpoint utilizzando una configurazione già pronta può essere eseguita come segue:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->python -m transformers.onnx --model=distilbert/distilbert-base-uncased onnx/<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-wxqvf3">che dovrebbe mostrare i seguenti log:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->Validating ONNX model... | |
| -[✓] ONNX model output names match reference model ({<span class="hljs-string">'last_hidden_state'</span>}) | |
| - Validating ONNX Model output <span class="hljs-string">"last_hidden_state"</span>: | |
| -[✓] (2, 8, 768) matches (2, 8, 768) | |
| -[✓] all values close (atol: 1e-05) | |
| All good, model saved at: onnx/model.onnx<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-16irxk2">Questo esporta un grafico ONNX del checkpoint definito dall’argomento <code>--model</code>. | |
| In questo esempio è <code>distilbert/distilbert-base-uncased</code>, ma può essere qualsiasi checkpoint | |
| Hugging Face Hub o uno memorizzato localmente.</p> <p data-svelte-h="svelte-rdoavc">Il file risultante <code>model.onnx</code> può quindi essere eseguito su uno dei <a href="https://onnx.ai/supported-tools.html#deployModel" rel="nofollow">tanti | |
| acceleratori</a> che supportano il | |
| lo standard ONNX. Ad esempio, possiamo caricare ed eseguire il modello con <a href="https://onnxruntime.ai/" rel="nofollow">ONNX | |
| Runtime</a> come segue:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> onnxruntime <span class="hljs-keyword">import</span> InferenceSession | |
| <span class="hljs-meta">>>> </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">"distilbert/distilbert-base-uncased"</span>) | |
| <span class="hljs-meta">>>> </span>session = InferenceSession(<span class="hljs-string">"onnx/model.onnx"</span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># ONNX Runtime expects NumPy arrays as input</span> | |
| <span class="hljs-meta">>>> </span>inputs = tokenizer(<span class="hljs-string">"Using DistilBERT with ONNX Runtime!"</span>, return_tensors=<span class="hljs-string">"np"</span>) | |
| <span class="hljs-meta">>>> </span>outputs = session.run(output_names=[<span class="hljs-string">"last_hidden_state"</span>], input_feed=<span class="hljs-built_in">dict</span>(inputs))<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1t39lef">I nomi di output richiesti (cioè <code>["last_hidden_state"]</code>) possono essere ottenuti | |
| dando un’occhiata alla configurazione ONNX di ogni modello. Ad esempio, per | |
| DistilBERT abbiamo:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> transformers.models.distilbert <span class="hljs-keyword">import</span> DistilBertConfig, DistilBertOnnxConfig | |
| <span class="hljs-meta">>>> </span>config = DistilBertConfig() | |
| <span class="hljs-meta">>>> </span>onnx_config = DistilBertOnnxConfig(config) | |
| <span class="hljs-meta">>>> </span><span class="hljs-built_in">print</span>(<span class="hljs-built_in">list</span>(onnx_config.outputs.keys())) | |
| [<span class="hljs-string">"last_hidden_state"</span>]<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-ej8fgq">Il processo è identico per i checkpoint TensorFlow sull’hub. Ad esempio, noi | |
| possiamo esportare un checkpoint TensorFlow puro da <a href="https://huggingface.co/keras-io" rel="nofollow">Keras | |
| organizzazione</a> come segue:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->python -m transformers.onnx --model=keras-io/transformers-qa onnx/<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-h5zd9u">Per esportare un modello memorizzato localmente, devi disporre dei pesi del modello | |
| e file tokenizer memorizzati in una directory. Ad esempio, possiamo caricare e salvare un | |
| checkpoint come segue:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSequenceClassification | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Load tokenizer and PyTorch weights form the Hub</span> | |
| <span class="hljs-meta">>>> </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">"distilbert/distilbert-base-uncased"</span>) | |
| <span class="hljs-meta">>>> </span>pt_model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">"distilbert/distilbert-base-uncased"</span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Save to disk</span> | |
| <span class="hljs-meta">>>> </span>tokenizer.save_pretrained(<span class="hljs-string">"local-pt-checkpoint"</span>) | |
| <span class="hljs-meta">>>> </span>pt_model.save_pretrained(<span class="hljs-string">"local-pt-checkpoint"</span>)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1rjvspr">Una volta salvato il checkpoint, possiamo esportarlo su ONNX puntando l’argomento <code>--model</code> | |
| del pacchetto <code>transformers.onnx</code> nella directory desiderata:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->python -m transformers.onnx --model=local-pt-checkpoint onnx/<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="selezione-delle-caratteristiche-per-diverse-topologie-di-modello" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#selezione-delle-caratteristiche-per-diverse-topologie-di-modello"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Selezione delle caratteristiche per diverse topologie di modello</span></h3> <p data-svelte-h="svelte-ne5myz">Ogni configurazione già pronta viene fornita con una serie di <em>caratteristiche</em> che ti consentono di | |
| esportare modelli per diversi tipi di topologie o attività. Come mostrato nella tabella | |
| di seguito, ogni caratteristica è associata a una diversa Auto Class:</p> <table data-svelte-h="svelte-1n68aa"><thead><tr><th>Caratteristica</th> <th>Auto Class</th></tr></thead> <tbody><tr><td><code>causal-lm</code>, <code>causal-lm-with-past</code></td> <td><code>AutoModelForCausalLM</code></td></tr> <tr><td><code>default</code>, <code>default-with-past</code></td> <td><code>AutoModel</code></td></tr> <tr><td><code>masked-lm</code></td> <td><code>AutoModelForMaskedLM</code></td></tr> <tr><td><code>question-answering</code></td> <td><code>AutoModelForQuestionAnswering</code></td></tr> <tr><td><code>seq2seq-lm</code>, <code>seq2seq-lm-with-past</code></td> <td><code>AutoModelForSeq2SeqLM</code></td></tr> <tr><td><code>sequence-classification</code></td> <td><code>AutoModelForSequenceClassification</code></td></tr> <tr><td><code>token-classification</code></td> <td><code>AutoModelForTokenClassification</code></td></tr></tbody></table> <p data-svelte-h="svelte-6wtiyv">Per ciascuna configurazione, puoi trovare l’elenco delle funzionalità supportate tramite il | |
| <code>FeaturesManager</code>. Ad esempio, per DistilBERT abbiamo:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> transformers.onnx.features <span class="hljs-keyword">import</span> FeaturesManager | |
| <span class="hljs-meta">>>> </span>distilbert_features = <span class="hljs-built_in">list</span>(FeaturesManager.get_supported_features_for_model_type(<span class="hljs-string">"distilbert"</span>).keys()) | |
| <span class="hljs-meta">>>> </span><span class="hljs-built_in">print</span>(distilbert_features) | |
| [<span class="hljs-string">"default"</span>, <span class="hljs-string">"masked-lm"</span>, <span class="hljs-string">"causal-lm"</span>, <span class="hljs-string">"sequence-classification"</span>, <span class="hljs-string">"token-classification"</span>, <span class="hljs-string">"question-answering"</span>]<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-3qfl60">Puoi quindi passare una di queste funzionalità all’argomento <code>--feature</code> nel | |
| pacchetto <code>transformers.onnx</code>. Ad esempio, per esportare un modello di classificazione del testo | |
| possiamo scegliere un modello ottimizzato dall’Hub ed eseguire:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->python -m transformers.onnx --model=distilbert/distilbert-base-uncased-finetuned-sst-2-english \ | |
| --feature=sequence-classification onnx/<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-lgu95w">che visualizzerà i seguenti registri:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->Validating ONNX model... | |
| -[✓] ONNX model output names match reference model ({<span class="hljs-string">'logits'</span>}) | |
| - Validating ONNX Model output <span class="hljs-string">"logits"</span>: | |
| -[✓] (2, 2) matches (2, 2) | |
| -[✓] all values close (atol: 1e-05) | |
| All good, model saved at: onnx/model.onnx<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-13p6mbz">Puoi notare che in questo caso, i nomi di output del modello ottimizzato sono | |
| <code>logits</code> invece di <code>last_hidden_state</code> che abbiamo visto con il | |
| checkpoint <code>distilbert/distilbert-base-uncased</code> precedente. Questo è previsto dal | |
| modello ottimizato visto che ha una testa di e.</p> <blockquote class="tip"><p data-svelte-h="svelte-1on6rqr">Le caratteristiche che hanno un suffisso <code>wtih-past</code> (ad es. <code>causal-lm-with-past</code>) | |
| corrispondono a topologie di modello con stati nascosti precalcolati (chiave e valori | |
| nei blocchi di attenzione) che possono essere utilizzati per la decodifica autoregressiva veloce.</p></blockquote> <h3 class="relative group"><a id="esportazione-di-un-modello-per-unarchitettura-non-supportata" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#esportazione-di-un-modello-per-unarchitettura-non-supportata"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Esportazione di un modello per un’architettura non supportata</span></h3> <p data-svelte-h="svelte-17r0w1i">Se desideri esportare un modello la cui architettura non è nativamente supportata dalla | |
| libreria, ci sono tre passaggi principali da seguire:</p> <ol data-svelte-h="svelte-z67jz4"><li>Implementare una configurazione ONNX personalizzata.</li> <li>Esportare il modello in ONNX.</li> <li>Convalidare gli output di PyTorch e dei modelli esportati.</li></ol> <p data-svelte-h="svelte-10yqqq8">In questa sezione, vedremo come DistilBERT è stato implementato per mostrare cosa è | |
| coinvolto in ogni passaggio.</p> <h4 class="relative group"><a id="implementazione-di-una-configurazione-onnx-personalizzata" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#implementazione-di-una-configurazione-onnx-personalizzata"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Implementazione di una configurazione ONNX personalizzata</span></h4> <p data-svelte-h="svelte-trahak">Iniziamo con l’oggetto di configurazione ONNX. Forniamo tre classi | |
| astratte da cui ereditare, a seconda del tipo di archittettura | |
| del modello che desideri esportare:</p> <ul data-svelte-h="svelte-j9of43"><li>I modelli basati su encoder ereditano da <code>OnnxConfig</code></li> <li>I modelli basati su decoder ereditano da <code>OnnxConfigWithPast</code></li> <li>I modelli encoder-decoder ereditano da<code>OnnxSeq2SeqConfigWithPast</code></li></ul> <blockquote class="tip"><p data-svelte-h="svelte-k15enw">Un buon modo per implementare una configurazione ONNX personalizzata è guardare l’implementazione | |
| esistente nel file <code>configuration_<model_name>.py</code> di un’architettura simile.</p></blockquote> <p data-svelte-h="svelte-1jb3jgn">Poiché DistilBERT è un modello basato su encoder, la sua configurazione eredita da | |
| <code>OnnxConfig</code>:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> Mapping, OrderedDict | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> transformers.onnx <span class="hljs-keyword">import</span> OnnxConfig | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DistilBertOnnxConfig</span>(<span class="hljs-title class_ inherited__">OnnxConfig</span>): | |
| <span class="hljs-meta">... </span> @<span class="hljs-built_in">property</span> | |
| <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">inputs</span>(<span class="hljs-params">self</span>) -> Mapping[<span class="hljs-built_in">str</span>, Mapping[<span class="hljs-built_in">int</span>, <span class="hljs-built_in">str</span>]]: | |
| <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> OrderedDict( | |
| <span class="hljs-meta">... </span> [ | |
| <span class="hljs-meta">... </span> (<span class="hljs-string">"input_ids"</span>, {<span class="hljs-number">0</span>: <span class="hljs-string">"batch"</span>, <span class="hljs-number">1</span>: <span class="hljs-string">"sequence"</span>}), | |
| <span class="hljs-meta">... </span> (<span class="hljs-string">"attention_mask"</span>, {<span class="hljs-number">0</span>: <span class="hljs-string">"batch"</span>, <span class="hljs-number">1</span>: <span class="hljs-string">"sequence"</span>}), | |
| <span class="hljs-meta">... </span> ] | |
| <span class="hljs-meta">... </span> )<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-13xj4dt">Ogni oggetto di configurazione deve implementare la proprietà <code>inputs</code> e restituire una | |
| mappatura, dove ogni chiave corrisponde a un input previsto e ogni valore | |
| indica l’asse di quell’input. Per DistilBERT, possiamo vedere che sono richiesti | |
| due input: <code>input_ids</code> e <code>attention_mask</code>. Questi inputs hanno la stessa forma di | |
| <code>(batch_size, sequence_length)</code> per questo motivo vediamo gli stessi assi usati nella | |
| configurazione.</p> <blockquote class="tip"><p data-svelte-h="svelte-9pdb1s">Puoi notare che la proprietà <code>inputs</code> per <code>DistilBertOnnxConfig</code> restituisce un | |
| <code>OrdinatoDict</code>. Ciò garantisce che gli input corrispondano alla loro posizione | |
| relativa all’interno del metodo <code>PreTrainedModel.forward()</code> durante il tracciamento del grafico. | |
| Raccomandiamo di usare un <code>OrderedDict</code> per le proprietà <code>inputs</code> e <code>outputs</code> | |
| quando si implementano configurazioni ONNX personalizzate.</p></blockquote> <p data-svelte-h="svelte-w0rck2">Dopo aver implementato una configurazione ONNX, è possibile istanziarla | |
| fornendo alla configurazione del modello base come segue:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig | |
| <span class="hljs-meta">>>> </span>config = AutoConfig.from_pretrained(<span class="hljs-string">"distilbert/distilbert-base-uncased"</span>) | |
| <span class="hljs-meta">>>> </span>onnx_config = DistilBertOnnxConfig(config)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-6wsqkw">L’oggetto risultante ha diverse proprietà utili. Ad esempio è possibile visualizzare il | |
| Set operatore ONNX che verrà utilizzato durante l’esportazione:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-meta">>>> </span><span class="hljs-built_in">print</span>(onnx_config.default_onnx_opset) | |
| <span class="hljs-number">11</span><!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1ggtlqz">È inoltre possibile visualizzare gli output associati al modello come segue:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-meta">>>> </span><span class="hljs-built_in">print</span>(onnx_config.outputs) | |
| OrderedDict([(<span class="hljs-string">"last_hidden_state"</span>, {<span class="hljs-number">0</span>: <span class="hljs-string">"batch"</span>, <span class="hljs-number">1</span>: <span class="hljs-string">"sequence"</span>})])<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1kn83vw">Puoi notare che la proprietà degli output segue la stessa struttura degli input; esso | |
| restituisce un <code>OrderedDict</code> di output con nome e le loro forme. La struttura di output | |
| è legato alla scelta della funzione con cui viene inizializzata la configurazione. | |
| Per impostazione predefinita, la configurazione ONNX viene inizializzata con la funzione ‘predefinita’ | |
| che corrisponde all’esportazione di un modello caricato con la classe <code>AutoModel</code>. Se tu | |
| desideri esportare una topologia di modello diversa, è sufficiente fornire una funzionalità diversa a | |
| l’argomento <code>task</code> quando inizializzi la configurazione ONNX. Ad esempio, se | |
| volevamo esportare DistilBERT con una testa di classificazione per sequenze, potremmo | |
| usare:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig | |
| <span class="hljs-meta">>>> </span>config = AutoConfig.from_pretrained(<span class="hljs-string">"distilbert/distilbert-base-uncased"</span>) | |
| <span class="hljs-meta">>>> </span>onnx_config_for_seq_clf = DistilBertOnnxConfig(config, task=<span class="hljs-string">"sequence-classification"</span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-built_in">print</span>(onnx_config_for_seq_clf.outputs) | |
| OrderedDict([(<span class="hljs-string">'logits'</span>, {<span class="hljs-number">0</span>: <span class="hljs-string">'batch'</span>})])<!-- HTML_TAG_END --></pre></div> <blockquote class="tip"><p data-svelte-h="svelte-8je27i">Tutte le proprietà e i metodi di base associati a <code>OnnxConfig</code> e le | |
| altre classi di configurazione possono essere sovrascritte se necessario. Guarda | |
| <code>BartOnnxConfig</code> per un esempio avanzato.</p></blockquote> <h4 class="relative group"><a id="esportazione-del-modello" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#esportazione-del-modello"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Esportazione del modello</span></h4> <p data-svelte-h="svelte-1ocrgze">Una volta implementata la configurazione ONNX, il passaggio successivo consiste nell’esportare il | |
| modello. Qui possiamo usare la funzione <code>export()</code> fornita dal | |
| pacchetto <code>transformers.onnx</code>. Questa funzione prevede la configurazione ONNX, insieme | |
| con il modello base e il tokenizer e il percorso per salvare il file esportato:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> pathlib <span class="hljs-keyword">import</span> Path | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> transformers.onnx <span class="hljs-keyword">import</span> export | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModel | |
| <span class="hljs-meta">>>> </span>onnx_path = Path(<span class="hljs-string">"model.onnx"</span>) | |
| <span class="hljs-meta">>>> </span>model_ckpt = <span class="hljs-string">"distilbert/distilbert-base-uncased"</span> | |
| <span class="hljs-meta">>>> </span>base_model = AutoModel.from_pretrained(model_ckpt) | |
| <span class="hljs-meta">>>> </span>tokenizer = AutoTokenizer.from_pretrained(model_ckpt) | |
| <span class="hljs-meta">>>> </span>onnx_inputs, onnx_outputs = export(tokenizer, base_model, onnx_config, onnx_config.default_onnx_opset, onnx_path)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-kxw9bj">Gli <code>onnx_inputs</code> e <code>onnx_outputs</code> restituiti dalla funzione <code>export()</code> sono | |
| liste di chiavi definite nelle proprietà di <code>input</code> e <code>output</code> della | |
| configurazione. Una volta esportato il modello, puoi verificare che il modello sia ben | |
| formato come segue:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> onnx | |
| <span class="hljs-meta">>>> </span>onnx_model = onnx.load(<span class="hljs-string">"model.onnx"</span>) | |
| <span class="hljs-meta">>>> </span>onnx.checker.check_model(onnx_model)<!-- HTML_TAG_END --></pre></div> <blockquote class="tip"><p data-svelte-h="svelte-zgeibt">Se il tuo modello è più largo di 2 GB, vedrai che molti file aggiuntivi sono | |
| creati durante l’esportazione. Questo è <em>previsto</em> perché ONNX utilizza <a href="https://developers.google.com/protocol-buffers/" rel="nofollow">Protocol | |
| Buffer</a> per memorizzare il modello e | |
| questi hanno un limite di dimensione 2 GB. Vedi la <a href="https://github.com/onnx/onnx/blob/master/docs/ExternalData.md" rel="nofollow">Documentazione | |
| ONNX</a> | |
| per istruzioni su come caricare modelli con dati esterni.</p></blockquote> <h4 class="relative group"><a id="convalida-degli-output-del-modello" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#convalida-degli-output-del-modello"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Convalida degli output del modello</span></h4> <p data-svelte-h="svelte-zllt9o">Il passaggio finale consiste nel convalidare gli output dal modello di base e quello esportato | |
| corrispondere entro una soglia di tolleranza assoluta. Qui possiamo usare la | |
| Funzione <code>validate_model_outputs()</code> fornita dal pacchetto <code>transformers.onnx</code> | |
| come segue:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> transformers.onnx <span class="hljs-keyword">import</span> validate_model_outputs | |
| <span class="hljs-meta">>>> </span>validate_model_outputs( | |
| <span class="hljs-meta">... </span> onnx_config, tokenizer, base_model, onnx_path, onnx_outputs, onnx_config.atol_for_validation | |
| <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1c44gg3">Questa funzione usa il metodo <code>OnnxConfig.generate_dummy_inputs()</code> per generare | |
| input per il modello di base e quello esportato e la tolleranza assoluta può essere | |
| definita nella configurazione. Generalmente troviamo una corrispondenza numerica nell’intervallo da 1e-6 | |
| a 1e-4, anche se è probabile che qualsiasi cosa inferiore a 1e-3 vada bene.</p> <h3 class="relative group"><a id="contribuire-con-una-nuova-configurazione-a--transformers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#contribuire-con-una-nuova-configurazione-a--transformers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Contribuire con una nuova configurazione a 🤗 Transformers</span></h3> <p data-svelte-h="svelte-lrvm1j">Stiamo cercando di espandere l’insieme di configurazioni già pronte e di accettare | |
| contributi della community! Se vuoi contribuire con la tua aggiunta | |
| nella libreria, dovrai:</p> <ul data-svelte-h="svelte-a8pd1a"><li>Implementare la configurazione ONNX nella corrispondente <code>configuration file _<model_name>.py</code></li> <li>Includere l’architettura del modello e le funzioni corrispondenti in <code>~onnx.features.FeatureManager</code></li> <li>Aggiungere la tua architettura del modello ai test in <code>test_onnx_v2.py</code></li></ul> <p data-svelte-h="svelte-6wlbs5">Scopri come stato contribuito la configurazione per <a href="https://github.com/huggingface/transformers/pull/14868/files" rel="nofollow">IBERT</a> per | |
| avere un’idea di cosa è coinvolto.</p> <h2 class="relative group"><a id="torchscript" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#torchscript"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TorchScript</span></h2> <blockquote class="tip"><p data-svelte-h="svelte-1puyypy">Questo è l’inizio dei nostri esperimenti con TorchScript e stiamo ancora esplorando le sue capacità con | |
| modelli con variable-input-size. È una nostra priorità e approfondiremo le nostre analisi nelle prossime versioni, | |
| con più esempi di codici, un’implementazione più flessibile e benchmark che confrontano i codici basati su Python con quelli compilati con | |
| TorchScript.</p></blockquote> <p data-svelte-h="svelte-wgebc9">Secondo la documentazione di Pytorch: “TorchScript è un modo per creare modelli serializzabili e ottimizzabili da codice | |
| Pytorch”. I due moduli di Pytorch <a href="https://pytorch.org/docs/stable/jit.html" rel="nofollow">JIT e TRACE</a> consentono allo sviluppatore di esportare | |
| il loro modello da riutilizzare in altri programmi, come i programmi C++ orientati all’efficienza.</p> <p data-svelte-h="svelte-9p2ixq">Abbiamo fornito un’interfaccia che consente l’esportazione di modelli 🤗 Transformers in TorchScript in modo che possano essere riutilizzati | |
| in un ambiente diverso rispetto a un programma Python basato su Pytorch. Qui spieghiamo come esportare e utilizzare i nostri modelli utilizzando | |
| TorchScript.</p> <p data-svelte-h="svelte-6xn7nf">Esportare un modello richiede due cose:</p> <ul data-svelte-h="svelte-oy2125"><li>Un passaggio in avanti con input fittizzi.</li> <li>Istanziazione del modello con flag <code>torchscript</code>.</li></ul> <p data-svelte-h="svelte-m48voh">Queste necessità implicano diverse cose a cui gli sviluppatori dovrebbero prestare attenzione. Questi dettagli mostrati sotto.</p> <h3 class="relative group"><a id="flag-torchscript-e-pesi-legati" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#flag-torchscript-e-pesi-legati"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Flag TorchScript e pesi legati</span></h3> <p data-svelte-h="svelte-tz9w6u">Questo flag è necessario perché la maggior parte dei modelli linguistici in questo repository hanno pesi legati tra il loro | |
| strato “Embedding” e lo strato “Decoding”. TorchScript non consente l’esportazione di modelli che hanno pesi | |
| legati, quindi è necessario prima slegare e clonare i pesi.</p> <p data-svelte-h="svelte-1mq7t8t">Ciò implica che i modelli istanziati con il flag <code>torchscript</code> hanno il loro strato <code>Embedding</code> e strato <code>Decoding</code> | |
| separato, il che significa che non dovrebbero essere addestrati in futuro. L’allenamento de-sincronizza i due | |
| strati, portando a risultati inaspettati.</p> <p data-svelte-h="svelte-1jmydvh">Questo non è il caso per i modelli che non hanno una testa del modello linguistico, poiché quelli non hanno pesi legati. Questi modelli | |
| può essere esportato in sicurezza senza il flag <code>torchscript</code>.</p> <h3 class="relative group"><a id="input-fittizi-e-standard-lengths" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#input-fittizi-e-standard-lengths"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Input fittizi e standard lengths</span></h3> <p data-svelte-h="svelte-c132kv">Gli input fittizzi sono usati per fare un modello passaggio in avanti . Mentre i valori degli input si propagano attraverso i strati, | |
| Pytorch tiene traccia delle diverse operazioni eseguite su ciascun tensore. Queste operazioni registrate vengono quindi utilizzate per | |
| creare la “traccia” del modello.</p> <p data-svelte-h="svelte-1bggopo">La traccia viene creata relativamente alle dimensioni degli input. È quindi vincolato dalle dimensioni dell’input | |
| fittizio e non funzionerà per altre lunghezze di sequenza o dimensioni batch. Quando si proverà con una dimensione diversa, ci sarà errore | |
| come:</p> <p data-svelte-h="svelte-vyea1g"><code>La dimensione espansa del tensore (3) deve corrispondere alla dimensione esistente (7) nella dimensione non singleton 2</code></p> <p data-svelte-h="svelte-1yce0u1">will be raised. Si consiglia pertanto di tracciare il modello con una dimensione di input fittizia grande almeno quanto il più grande | |
| input che verrà fornito al modello durante l’inferenza. È possibile eseguire il padding per riempire i valori mancanti. Il modello | |
| sarà tracciato con una grande dimensione di input, tuttavia, anche le dimensioni della diverse matrici saranno grandi, | |
| risultando in più calcoli.</p> <p data-svelte-h="svelte-1sr04ao">Si raccomanda di prestare attenzione al numero totale di operazioni eseguite su ciascun input e di seguire da vicino le prestazioni | |
| durante l’esportazione di modelli di sequenza-lunghezza variabili.</p> <h3 class="relative group"><a id="usare-torchsscript-in-python" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#usare-torchsscript-in-python"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Usare TorchSscript in Python</span></h3> <p data-svelte-h="svelte-q23m3c">Di seguito è riportato un esempio, che mostra come salvare, caricare modelli e come utilizzare la traccia per l’inferenza.</p> <h4 class="relative group"><a id="salvare-un-modello" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#salvare-un-modello"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Salvare un modello</span></h4> <p data-svelte-h="svelte-k16qri">Questo frammento di codice mostra come usare TorchScript per esportare un <code>BertModel</code>. Qui il <code>BertModel</code> è istanziato secondo | |
| una classe <code>BertConfig</code> e quindi salvato su disco con il nome del file <code>traced_bert.pt</code></p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertModel, BertTokenizer, BertConfig | |
| <span class="hljs-keyword">import</span> torch | |
| enc = BertTokenizer.from_pretrained(<span class="hljs-string">"google-bert/bert-base-uncased"</span>) | |
| <span class="hljs-comment"># Tokenizing input text</span> | |
| text = <span class="hljs-string">"[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"</span> | |
| tokenized_text = enc.tokenize(text) | |
| <span class="hljs-comment"># Masking one of the input tokens</span> | |
| masked_index = <span class="hljs-number">8</span> | |
| tokenized_text[masked_index] = <span class="hljs-string">"[MASK]"</span> | |
| indexed_tokens = enc.convert_tokens_to_ids(tokenized_text) | |
| segments_ids = [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>] | |
| <span class="hljs-comment"># Creating a dummy input</span> | |
| tokens_tensor = torch.tensor([indexed_tokens]) | |
| segments_tensors = torch.tensor([segments_ids]) | |
| dummy_input = [tokens_tensor, segments_tensors] | |
| <span class="hljs-comment"># Initializing the model with the torchscript flag</span> | |
| <span class="hljs-comment"># Flag set to True even though it is not necessary as this model does not have an LM Head.</span> | |
| config = BertConfig( | |
| vocab_size_or_config_json_file=<span class="hljs-number">32000</span>, | |
| hidden_size=<span class="hljs-number">768</span>, | |
| num_hidden_layers=<span class="hljs-number">12</span>, | |
| num_attention_heads=<span class="hljs-number">12</span>, | |
| intermediate_size=<span class="hljs-number">3072</span>, | |
| torchscript=<span class="hljs-literal">True</span>, | |
| ) | |
| <span class="hljs-comment"># Instantiating the model</span> | |
| model = BertModel(config) | |
| <span class="hljs-comment"># The model needs to be in evaluation mode</span> | |
| model.<span class="hljs-built_in">eval</span>() | |
| <span class="hljs-comment"># If you are instantiating the model with *from_pretrained* you can also easily set the TorchScript flag</span> | |
| model = BertModel.from_pretrained(<span class="hljs-string">"google-bert/bert-base-uncased"</span>, torchscript=<span class="hljs-literal">True</span>) | |
| <span class="hljs-comment"># Creating the trace</span> | |
| traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors]) | |
| torch.jit.save(traced_model, <span class="hljs-string">"traced_bert.pt"</span>)<!-- HTML_TAG_END --></pre></div> <h4 class="relative group"><a id="caricare-un-modello" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#caricare-un-modello"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Caricare un modello</span></h4> <p data-svelte-h="svelte-wh65i">Questo frammento di codice mostra come caricare il <code>BertModel</code> che era stato precedentemente salvato su disco con il nome <code>traced_bert.pt</code>. | |
| Stiamo riutilizzando il <code>dummy_input</code> precedentemente inizializzato.</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->loaded_model = torch.jit.load(<span class="hljs-string">"traced_bert.pt"</span>) | |
| loaded_model.<span class="hljs-built_in">eval</span>() | |
| all_encoder_layers, pooled_output = loaded_model(*dummy_input)<!-- HTML_TAG_END --></pre></div> <h4 class="relative group"><a id="utilizzare-un-modello-tracciato-per-linferenza" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#utilizzare-un-modello-tracciato-per-linferenza"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilizzare un modello tracciato per l’inferenza</span></h4> <p data-svelte-h="svelte-1gnslzu">Usare il modello tracciato per l’inferenza è semplice come usare il suo metodo dunder <code>__call__</code>:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->traced_model(tokens_tensor, segments_tensors)<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="implementare-modelli-huggingface-torchscript-su-aws-utilizzando-neuron-sdk" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#implementare-modelli-huggingface-torchscript-su-aws-utilizzando-neuron-sdk"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Implementare modelli HuggingFace TorchScript su AWS utilizzando Neuron SDK</span></h3> <p data-svelte-h="svelte-fjq776">AWS ha introdotto <a href="https://aws.amazon.com/ec2/instance-types/inf1/" rel="nofollow">Amazon EC2 Inf1</a> | |
| famiglia di istanze per l’inferenza di machine learning a basso costo e ad alte prestazioni nel cloud. | |
| Le istanze Inf1 sono alimentate dal chip AWS Inferentia, un acceleratore hardware personalizzato, | |
| specializzato in carichi di lavoro di inferenza di deep learning. | |
| <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/#" rel="nofollow">AWS Neuron</a> | |
| è l’SDK per Inferentia che supporta il tracciamento e l’ottimizzazione dei modelli transformers per | |
| distribuzione su Inf1. L’SDK Neuron fornisce:</p> <ol data-svelte-h="svelte-121gcdu"><li>API di facile utilizzo con una riga di modifica del codice per tracciare e ottimizzare un modello TorchScript per l’inferenza nel cloud.</li> <li>Ottimizzazioni delle prestazioni pronte all’uso per <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/%3E" rel="nofollow">miglioramento dei costi-prestazioni</a></li> <li>Supporto per i modelli di trasformatori HuggingFace costruiti con <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/bert_tutorial/tutorial_pretrained_bert.html" rel="nofollow">PyTorch</a> | |
| o <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/tensorflow/huggingface_bert/huggingface_bert.html" rel="nofollow">TensorFlow</a>.</li></ol> <h4 class="relative group"><a id="implicazioni" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#implicazioni"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Implicazioni</span></h4> <p data-svelte-h="svelte-nfv9dr">Modelli Transformers basati su architettura <a href="https://huggingface.co/docs/transformers/main/model_doc/bert" rel="nofollow">BERT (Bidirectional Encoder Representations from Transformers)</a>, | |
| o sue varianti come <a href="https://huggingface.co/docs/transformers/main/model_doc/distilbert" rel="nofollow">distilBERT</a> | |
| e <a href="https://huggingface.co/docs/transformers/main/model_doc/roberta" rel="nofollow">roBERTa</a> | |
| funzioneranno meglio su Inf1 per attività non generative come la question answering estrattive, | |
| Classificazione della sequenza, Classificazione dei token. In alternativa, generazione di testo | |
| le attività possono essere adattate per essere eseguite su Inf1, secondo questo <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/transformers-marianmt.html" rel="nofollow">tutorial AWS Neuron MarianMT</a>. | |
| Ulteriori informazioni sui modelli che possono essere convertiti fuori dagli schemi su Inferentia possono essere | |
| trovati nella <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.html#models-inferentia" rel="nofollow">sezione Model Architecture Fit della documentazione Neuron</a>.</p> <h4 class="relative group"><a id="dipendenze" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#dipendenze"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Dipendenze</span></h4> <p data-svelte-h="svelte-clfp9m">L’utilizzo di AWS Neuron per convertire i modelli richiede le seguenti dipendenze e l’ambiente:</p> <ul data-svelte-h="svelte-1yjzy16"><li>A <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/index.html#installation-guide" rel="nofollow">Neuron SDK environment</a>, | |
| which comes pre-configured on <a href="https://docs.aws.amazon.com/dlami/latest/devguide/tutorial-inferentia-launching.html" rel="nofollow">AWS Deep Learning AMI</a>.</li></ul> <h4 class="relative group"><a id="convertire-un-modello-per-aws-neuron" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#convertire-un-modello-per-aws-neuron"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Convertire un modello per AWS Neuron</span></h4> <p data-svelte-h="svelte-wberyk">Usando lo stesso script come in <a href="https://huggingface.co/docs/transformers/main/en/serialization#using-torchscript-in-python" rel="nofollow">Usando TorchScipt in Python</a> | |
| per tracciare un “BertModel”, importi l’estensione del framework <code>torch.neuron</code> per accedere | |
| i componenti di Neuron SDK tramite un’API Python.</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertModel, BertTokenizer, BertConfig | |
| <span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">import</span> torch.neuron<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-e7n7t3">E modificare solo la riga di codice di traccia</p> <p data-svelte-h="svelte-yedjtf">Da:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->torch.jit.trace(model, [tokens_tensor, segments_tensors])<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-a9ap49">A:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->torch.neuron.trace(model, [token_tensor, segments_tensors])<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1pjct15">Questa modifica consente a Neuron SDK di tracciare il modello e ottimizzarlo per l’esecuzione nelle istanze Inf1.</p> <p data-svelte-h="svelte-1do2s1n">Per ulteriori informazioni sulle funzionalità, gli strumenti, i tutorial di esempi e gli ultimi aggiornamenti di AWS Neuron SDK, | |
| consultare la <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html" rel="nofollow">documentazione AWS NeuronSDK</a>.</p> <a class="!text-gray-400 !no-underline text-sm flex items-center not-prose mt-4" href="https://github.com/huggingface/transformers/blob/main/docs/source/it/serialization.md" target="_blank"><span data-svelte-h="svelte-1kd6by1"><</span> <span data-svelte-h="svelte-x0xyl0">></span> <span data-svelte-h="svelte-1dajgef"><span class="underline ml-1.5">Update</span> on GitHub</span></a> <p></p> | |
| <script> | |
| { | |
| __sveltekit_ezmzz1 = { | |
| assets: "/docs/transformers/main/it", | |
| base: "/docs/transformers/main/it", | |
| env: {} | |
| }; | |
| const element = document.currentScript.parentElement; | |
| const data = [null,null]; | |
| Promise.all([ | |
| import("/docs/transformers/main/it/_app/immutable/entry/start.dc9544fe.js"), | |
| import("/docs/transformers/main/it/_app/immutable/entry/app.9ee4d62e.js") | |
| ]).then(([kit, app]) => { | |
| kit.start(app, element, { | |
| node_ids: [0, 30], | |
| data, | |
| form: null, | |
| error: null | |
| }); | |
| }); | |
| } | |
| </script> | |
Xet Storage Details
- Size:
- 102 kB
- Xet hash:
- 4d556173d0ff11a5e97f0cb58d1a45e223c97cdac26a8cfbddabcbad1027d725
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.