Buckets:
| import{s as Ls,o as Bs}from"../chunks/scheduler.37c15a92.js";import{S as Fs,i as Ps,g as f,s as p,r as T,A as Xs,h as y,f as n,c,j as Vs,u as h,x as b,k as Tl,y as qs,a,v as J,t as m,b as B,d as M,w as U,m as Ss,n as Hs,p as F}from"../chunks/index.2bf4358c.js";import{T as Zs}from"../chunks/Tip.363c041f.js";import{Y as Rs}from"../chunks/Youtube.1e50a667.js";import{C as w}from"../chunks/CodeBlock.4e987730.js";import{D as Ws}from"../chunks/DocNotebookDropdown.efc1fb7c.js";import{F as Ys}from"../chunks/FrameworkSwitchCourse.8d4d4ab6.js";import{H as ft,E as Ks}from"../chunks/getInferenceSnippets.ebf8be91.js";function Os(d){let l,i;return l=new Ws({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Google Colab",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/it/chapter2/section2_tf.ipynb"},{label:"Aws Studio",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/it/chapter2/section2_tf.ipynb"}]}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function en(d){let l,i;return l=new Ws({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Google Colab",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/it/chapter2/section2_pt.ipynb"},{label:"Aws Studio",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/it/chapter2/section2_pt.ipynb"}]}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function tn(d){let l;return{c(){l=Ss("Questa è la prima sezione in cui il contenuto è leggermente diverso a seconda che si utilizzi PyTorch o TensorFlow. Attivate lo switch sopra il titolo per selezionare la tua piattaforma preferita!")},l(i){l=Hs(i,"Questa è la prima sezione in cui il contenuto è leggermente diverso a seconda che si utilizzi PyTorch o TensorFlow. Attivate lo switch sopra il titolo per selezionare la tua piattaforma preferita!")},m(i,t){a(i,l,t)},d(i){i&&n(l)}}}function ln(d){let l,i;return l=new Rs({props:{id:"wVN12smEvqg"}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function sn(d){let l,i;return l=new Rs({props:{id:"1pedAIvTWXk"}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function nn(d){let l,i;return l=new w({props:{code:"cmF3X2lucHV0cyUyMCUzRCUyMCU1QiUwQSUyMCUyMCUyMCUyMCUyMkkndmUlMjBiZWVuJTIwd2FpdGluZyUyMGZvciUyMGElMjBIdWdnaW5nRmFjZSUyMGNvdXJzZSUyMG15JTIwd2hvbGUlMjBsaWZlLiUyMiUyQyUwQSUyMCUyMCUyMCUyMCUyMkklMjBoYXRlJTIwdGhpcyUyMHNvJTIwbXVjaCElMjIlMkMlMEElNUQlMEFpbnB1dHMlMjAlM0QlMjB0b2tlbml6ZXIocmF3X2lucHV0cyUyQyUyMHBhZGRpbmclM0RUcnVlJTJDJTIwdHJ1bmNhdGlvbiUzRFRydWUlMkMlMjByZXR1cm5fdGVuc29ycyUzRCUyMnRmJTIyKSUwQXByaW50KGlucHV0cyk=",highlighted:`raw_inputs = [ | |
| <span class="hljs-string">"I've been waiting for a HuggingFace course my whole life."</span>, | |
| <span class="hljs-string">"I hate this so much!"</span>, | |
| ] | |
| inputs = tokenizer(raw_inputs, padding=<span class="hljs-literal">True</span>, truncation=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">"tf"</span>) | |
| <span class="hljs-built_in">print</span>(inputs)`,wrap:!1}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function an(d){let l,i;return l=new w({props:{code:"cmF3X2lucHV0cyUyMCUzRCUyMCU1QiUwQSUyMCUyMCUyMCUyMCUyMkkndmUlMjBiZWVuJTIwd2FpdGluZyUyMGZvciUyMGElMjBIdWdnaW5nRmFjZSUyMGNvdXJzZSUyMG15JTIwd2hvbGUlMjBsaWZlLiUyMiUyQyUwQSUyMCUyMCUyMCUyMCUyMkklMjBoYXRlJTIwdGhpcyUyMHNvJTIwbXVjaCElMjIlMkMlMEElNUQlMEFpbnB1dHMlMjAlM0QlMjB0b2tlbml6ZXIocmF3X2lucHV0cyUyQyUyMHBhZGRpbmclM0RUcnVlJTJDJTIwdHJ1bmNhdGlvbiUzRFRydWUlMkMlMjByZXR1cm5fdGVuc29ycyUzRCUyMnB0JTIyKSUwQXByaW50KGlucHV0cyk=",highlighted:`raw_inputs = [ | |
| <span class="hljs-string">"I've been waiting for a HuggingFace course my whole life."</span>, | |
| <span class="hljs-string">"I hate this so much!"</span>, | |
| ] | |
| inputs = tokenizer(raw_inputs, padding=<span class="hljs-literal">True</span>, truncation=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">"pt"</span>) | |
| <span class="hljs-built_in">print</span>(inputs)`,wrap:!1}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function on(d){let l,i="Ecco come appaiono i risultati come tensori TensorFlow:",t,r,u;return r=new w({props:{code:"JTdCJTBBJTIwJTIwJTIwJTIwJ2lucHV0X2lkcyclM0ElMjAlM0N0Zi5UZW5zb3IlM0ElMjBzaGFwZSUzRCgyJTJDJTIwMTYpJTJDJTIwZHR5cGUlM0RpbnQzMiUyQyUyMG51bXB5JTNEJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwYXJyYXkoJTVCJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTVCJTIwJTIwMTAxJTJDJTIwJTIwMTA0NSUyQyUyMCUyMDEwMDUlMkMlMjAlMjAyMzEwJTJDJTIwJTIwMjA0MiUyQyUyMCUyMDM0MDMlMkMlMjAlMjAyMDA1JTJDJTIwJTIwMTAzNyUyQyUyMDE3NjYyJTJDJTIwMTIxNzIlMkMlMjAlMjAyNjA3JTJDJTIwJTIwMjAyNiUyQyUyMCUyMDI4NzglMkMlMjAlMjAyMTY2JTJDJTIwJTIwMTAxMiUyQyUyMCUyMCUyMDEwMiU1RCUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCU1QiUyMCUyMDEwMSUyQyUyMCUyMDEwNDUlMkMlMjAlMjA1MjIzJTJDJTIwJTIwMjAyMyUyQyUyMCUyMDIwNjElMkMlMjAlMjAyMTcyJTJDJTIwJTIwJTIwOTk5JTJDJTIwJTIwJTIwMTAyJTJDJTIwJTIwJTIwJTIwJTIwMCUyQyUyMCUyMCUyMCUyMCUyMDAlMkMlMjAlMjAlMjAlMjAlMjAwJTJDJTIwJTIwJTIwJTIwJTIwMCUyQyUyMCUyMCUyMCUyMCUyMDAlMkMlMjAlMjAlMjAlMjAlMjAwJTJDJTIwJTIwJTIwJTIwJTIwMCUyQyUyMCUyMCUyMCUyMCUyMDAlNUQlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlNUQlMkMlMjBkdHlwZSUzRGludDMyKSUzRSUyQyUyMCUwQSUyMCUyMCUyMCUyMCdhdHRlbnRpb25fbWFzayclM0ElMjAlM0N0Zi5UZW5zb3IlM0ElMjBzaGFwZSUzRCgyJTJDJTIwMTYpJTJDJTIwZHR5cGUlM0RpbnQzMiUyQyUyMG51bXB5JTNEJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwYXJyYXkoJTVCJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTVCMSUyQyUyMDElMkMlMjAxJTJDJTIwMSUyQyUyMDElMkMlMjAxJTJDJTIwMSUyQyUyMDElMkMlMjAxJTJDJTIwMSUyQyUyMDElMkMlMjAxJTJDJTIwMSUyQyUyMDElMkMlMjAxJTJDJTIwMSU1RCUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCU1QjElMkMlMjAxJTJDJTIwMSUyQyUyMDElMkMlMjAxJTJDJTIwMSUyQyUyMDElMkMlMjAwJTJDJTIwMCUyQyUyMDAlMkMlMjAwJTJDJTIwMCUyQyUyMDAlMkMlMjAwJTJDJTIwMCUyQyUyMDAlNUQlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlNUQlMkMlMjBkdHlwZSUzRGludDMyKSUzRSUwQSU3RA==",highlighted:`{ | |
| <span class="hljs-string">'input_ids'</span>: <tf.Tensor: shape=(<span class="hljs-number">2</span>, <span class="hljs-number">16</span>), dtype=int32, numpy= | |
| array([ | |
| [ <span class="hljs-number">101</span>, <span class="hljs-number">1045</span>, <span class="hljs-number">1005</span>, <span class="hljs-number">2310</span>, <span class="hljs-number">2042</span>, <span class="hljs-number">3403</span>, <span class="hljs-number">2005</span>, <span class="hljs-number">1037</span>, <span class="hljs-number">17662</span>, <span class="hljs-number">12172</span>, <span class="hljs-number">2607</span>, <span class="hljs-number">2026</span>, <span class="hljs-number">2878</span>, <span class="hljs-number">2166</span>, <span class="hljs-number">1012</span>, <span class="hljs-number">102</span>], | |
| [ <span class="hljs-number">101</span>, <span class="hljs-number">1045</span>, <span class="hljs-number">5223</span>, <span class="hljs-number">2023</span>, <span class="hljs-number">2061</span>, <span class="hljs-number">2172</span>, <span class="hljs-number">999</span>, <span class="hljs-number">102</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>] | |
| ], dtype=int32)>, | |
| <span class="hljs-string">'attention_mask'</span>: <tf.Tensor: shape=(<span class="hljs-number">2</span>, <span class="hljs-number">16</span>), dtype=int32, numpy= | |
| array([ | |
| [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], | |
| [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>] | |
| ], dtype=int32)> | |
| }`,wrap:!1}}),{c(){l=f("p"),l.textContent=i,t=p(),T(r.$$.fragment)},l(o){l=y(o,"P",{"data-svelte-h":!0}),b(l)!=="svelte-rb7hpw"&&(l.textContent=i),t=c(o),h(r.$$.fragment,o)},m(o,j){a(o,l,j),a(o,t,j),J(r,o,j),u=!0},i(o){u||(M(r.$$.fragment,o),u=!0)},o(o){m(r.$$.fragment,o),u=!1},d(o){o&&(n(l),n(t)),U(r,o)}}}function rn(d){let l,i="Ecco come appaiono i risultati come tensori PyTorch:",t,r,u;return r=new w({props:{code:"JTdCJTBBJTIwJTIwJTIwJTIwJ2lucHV0X2lkcyclM0ElMjB0ZW5zb3IoJTVCJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTVCJTIwJTIwMTAxJTJDJTIwJTIwMTA0NSUyQyUyMCUyMDEwMDUlMkMlMjAlMjAyMzEwJTJDJTIwJTIwMjA0MiUyQyUyMCUyMDM0MDMlMkMlMjAlMjAyMDA1JTJDJTIwJTIwMTAzNyUyQyUyMDE3NjYyJTJDJTIwMTIxNzIlMkMlMjAyNjA3JTJDJTIwJTIwMjAyNiUyQyUyMCUyMDI4NzglMkMlMjAlMjAyMTY2JTJDJTIwJTIwMTAxMiUyQyUyMCUyMCUyMDEwMiU1RCUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCU1QiUyMCUyMDEwMSUyQyUyMCUyMDEwNDUlMkMlMjAlMjA1MjIzJTJDJTIwJTIwMjAyMyUyQyUyMCUyMDIwNjElMkMlMjAlMjAyMTcyJTJDJTIwJTIwJTIwOTk5JTJDJTIwJTIwJTIwMTAyJTJDJTIwJTIwJTIwJTIwJTIwMCUyQyUyMCUyMCUyMCUyMCUyMDAlMkMlMjAlMjAlMjAlMjAlMjAwJTJDJTIwJTIwJTIwJTIwJTIwMCUyQyUyMCUyMCUyMCUyMCUyMDAlMkMlMjAlMjAlMjAlMjAlMjAwJTJDJTIwJTIwJTIwJTIwJTIwMCUyQyUyMCUyMCUyMCUyMCUyMDAlNUQlMEElMjAlMjAlMjAlMjAlNUQpJTJDJTIwJTBBJTIwJTIwJTIwJTIwJ2F0dGVudGlvbl9tYXNrJyUzQSUyMHRlbnNvciglNUIlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlNUIxJTJDJTIwMSUyQyUyMDElMkMlMjAxJTJDJTIwMSUyQyUyMDElMkMlMjAxJTJDJTIwMSUyQyUyMDElMkMlMjAxJTJDJTIwMSUyQyUyMDElMkMlMjAxJTJDJTIwMSUyQyUyMDElMkMlMjAxJTVEJTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTVCMSUyQyUyMDElMkMlMjAxJTJDJTIwMSUyQyUyMDElMkMlMjAxJTJDJTIwMSUyQyUyMDAlMkMlMjAwJTJDJTIwMCUyQyUyMDAlMkMlMjAwJTJDJTIwMCUyQyUyMDAlMkMlMjAwJTJDJTIwMCU1RCUwQSUyMCUyMCUyMCUyMCU1RCklMEElN0Q=",highlighted:`{ | |
| <span class="hljs-string">'input_ids'</span>: tensor([ | |
| [ <span class="hljs-number">101</span>, <span class="hljs-number">1045</span>, <span class="hljs-number">1005</span>, <span class="hljs-number">2310</span>, <span class="hljs-number">2042</span>, <span class="hljs-number">3403</span>, <span class="hljs-number">2005</span>, <span class="hljs-number">1037</span>, <span class="hljs-number">17662</span>, <span class="hljs-number">12172</span>, <span class="hljs-number">2607</span>, <span class="hljs-number">2026</span>, <span class="hljs-number">2878</span>, <span class="hljs-number">2166</span>, <span class="hljs-number">1012</span>, <span class="hljs-number">102</span>], | |
| [ <span class="hljs-number">101</span>, <span class="hljs-number">1045</span>, <span class="hljs-number">5223</span>, <span class="hljs-number">2023</span>, <span class="hljs-number">2061</span>, <span class="hljs-number">2172</span>, <span class="hljs-number">999</span>, <span class="hljs-number">102</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>] | |
| ]), | |
| <span class="hljs-string">'attention_mask'</span>: tensor([ | |
| [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], | |
| [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>] | |
| ]) | |
| }`,wrap:!1}}),{c(){l=f("p"),l.textContent=i,t=p(),T(r.$$.fragment)},l(o){l=y(o,"P",{"data-svelte-h":!0}),b(l)!=="svelte-1e7hvq4"&&(l.textContent=i),t=c(o),h(r.$$.fragment,o)},m(o,j){a(o,l,j),a(o,t,j),J(r,o,j),u=!0},i(o){u||(M(r.$$.fragment,o),u=!0)},o(o){m(r.$$.fragment,o),u=!1},d(o){o&&(n(l),n(t)),U(r,o)}}}function pn(d){let l,i="Possiamo scaricare il nostro modello preaddestrato nello stesso modo in cui abbiamo fatto con il nostro tokenizer. 🤗 Transformers fornisce una classe <code>TFAutoModel</code> che ha anche un metodo <code>from_pretrained</code>:",t,r,u;return r=new w({props:{code:"ZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMFRGQXV0b01vZGVsJTBBJTBBY2hlY2twb2ludCUyMCUzRCUyMCUyMmRpc3RpbGJlcnQtYmFzZS11bmNhc2VkLWZpbmV0dW5lZC1zc3QtMi1lbmdsaXNoJTIyJTBBbW9kZWwlMjAlM0QlMjBURkF1dG9Nb2RlbC5mcm9tX3ByZXRyYWluZWQoY2hlY2twb2ludCk=",highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModel | |
| checkpoint = <span class="hljs-string">"distilbert-base-uncased-finetuned-sst-2-english"</span> | |
| model = TFAutoModel.from_pretrained(checkpoint)`,wrap:!1}}),{c(){l=f("p"),l.innerHTML=i,t=p(),T(r.$$.fragment)},l(o){l=y(o,"P",{"data-svelte-h":!0}),b(l)!=="svelte-168vw5"&&(l.innerHTML=i),t=c(o),h(r.$$.fragment,o)},m(o,j){a(o,l,j),a(o,t,j),J(r,o,j),u=!0},i(o){u||(M(r.$$.fragment,o),u=!0)},o(o){m(r.$$.fragment,o),u=!1},d(o){o&&(n(l),n(t)),U(r,o)}}}function cn(d){let l,i="Possiamo scaricare il nostro modello preaddestrato nello stesso modo in cui abbiamo fatto con il nostro tokenizer. 🤗 Transformers fornisce una classe <code>AutoModel</code> che ha anche un metodo <code>from_pretrained()</code>:",t,r,u;return r=new w({props:{code:"ZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMEF1dG9Nb2RlbCUwQSUwQWNoZWNrcG9pbnQlMjAlM0QlMjAlMjJkaXN0aWxiZXJ0LWJhc2UtdW5jYXNlZC1maW5ldHVuZWQtc3N0LTItZW5nbGlzaCUyMiUwQW1vZGVsJTIwJTNEJTIwQXV0b01vZGVsLmZyb21fcHJldHJhaW5lZChjaGVja3BvaW50KQ==",highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel | |
| checkpoint = <span class="hljs-string">"distilbert-base-uncased-finetuned-sst-2-english"</span> | |
| model = AutoModel.from_pretrained(checkpoint)`,wrap:!1}}),{c(){l=f("p"),l.innerHTML=i,t=p(),T(r.$$.fragment)},l(o){l=y(o,"P",{"data-svelte-h":!0}),b(l)!=="svelte-19bwkio"&&(l.innerHTML=i),t=c(o),h(r.$$.fragment,o)},m(o,j){a(o,l,j),a(o,t,j),J(r,o,j),u=!0},i(o){u||(M(r.$$.fragment,o),u=!0)},o(o){m(r.$$.fragment,o),u=!1},d(o){o&&(n(l),n(t)),U(r,o)}}}function un(d){let l,i,t,r;return l=new w({props:{code:"b3V0cHV0cyUyMCUzRCUyMG1vZGVsKGlucHV0cyklMEFwcmludChvdXRwdXRzLmxhc3RfaGlkZGVuX3N0YXRlLnNoYXBlKQ==",highlighted:`outputs = model(inputs) | |
| <span class="hljs-built_in">print</span>(outputs.last_hidden_state.shape)`,wrap:!1}}),t=new w({props:{code:"KDIlMkMlMjAxNiUyQyUyMDc2OCk=",highlighted:'(<span class="hljs-number">2</span>, <span class="hljs-number">16</span>, <span class="hljs-number">768</span>)',wrap:!1}}),{c(){T(l.$$.fragment),i=p(),T(t.$$.fragment)},l(u){h(l.$$.fragment,u),i=c(u),h(t.$$.fragment,u)},m(u,o){J(l,u,o),a(u,i,o),J(t,u,o),r=!0},i(u){r||(M(l.$$.fragment,u),M(t.$$.fragment,u),r=!0)},o(u){m(l.$$.fragment,u),m(t.$$.fragment,u),r=!1},d(u){u&&n(i),U(l,u),U(t,u)}}}function mn(d){let l,i,t,r;return l=new w({props:{code:"b3V0cHV0cyUyMCUzRCUyMG1vZGVsKCoqaW5wdXRzKSUwQXByaW50KG91dHB1dHMubGFzdF9oaWRkZW5fc3RhdGUuc2hhcGUp",highlighted:`outputs = model(**inputs) | |
| <span class="hljs-built_in">print</span>(outputs.last_hidden_state.shape)`,wrap:!1}}),t=new w({props:{code:"dG9yY2guU2l6ZSglNUIyJTJDJTIwMTYlMkMlMjA3NjglNUQp",highlighted:'torch.Size([<span class="hljs-number">2</span>, <span class="hljs-number">16</span>, <span class="hljs-number">768</span>])',wrap:!1}}),{c(){T(l.$$.fragment),i=p(),T(t.$$.fragment)},l(u){h(l.$$.fragment,u),i=c(u),h(t.$$.fragment,u)},m(u,o){J(l,u,o),a(u,i,o),J(t,u,o),r=!0},i(u){r||(M(l.$$.fragment,u),M(t.$$.fragment,u),r=!0)},o(u){m(l.$$.fragment,u),m(t.$$.fragment,u),r=!1},d(u){u&&n(i),U(l,u),U(t,u)}}}function Mn(d){let l,i="Per il nostro esempio, avremo bisogno di un modello con una classificazion head della sequenza (per poter classificare le frasi come positive o negative). Quindi, non useremo la classe <code>TFAutoModel</code>, ma <code>TFAutoModelForSequenceClassification</code>:",t,r,u;return r=new w({props:{code:"ZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMFRGQXV0b01vZGVsRm9yU2VxdWVuY2VDbGFzc2lmaWNhdGlvbiUwQSUwQWNoZWNrcG9pbnQlMjAlM0QlMjAlMjJkaXN0aWxiZXJ0LWJhc2UtdW5jYXNlZC1maW5ldHVuZWQtc3N0LTItZW5nbGlzaCUyMiUwQW1vZGVsJTIwJTNEJTIwVEZBdXRvTW9kZWxGb3JTZXF1ZW5jZUNsYXNzaWZpY2F0aW9uLmZyb21fcHJldHJhaW5lZChjaGVja3BvaW50KSUwQW91dHB1dHMlMjAlM0QlMjBtb2RlbChpbnB1dHMp",highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification | |
| checkpoint = <span class="hljs-string">"distilbert-base-uncased-finetuned-sst-2-english"</span> | |
| model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) | |
| outputs = model(inputs)`,wrap:!1}}),{c(){l=f("p"),l.innerHTML=i,t=p(),T(r.$$.fragment)},l(o){l=y(o,"P",{"data-svelte-h":!0}),b(l)!=="svelte-1jd8c81"&&(l.innerHTML=i),t=c(o),h(r.$$.fragment,o)},m(o,j){a(o,l,j),a(o,t,j),J(r,o,j),u=!0},i(o){u||(M(r.$$.fragment,o),u=!0)},o(o){m(r.$$.fragment,o),u=!1},d(o){o&&(n(l),n(t)),U(r,o)}}}function dn(d){let l,i="Per il nostro esempio, avremo bisogno di un modello con una classificazion head della sequenza (per poter classificare le frasi come positive o negative). Quindi, non useremo la classe <code>AutoModel</code>, ma <code>AutoModelForSequenceClassification</code>:",t,r,u;return r=new w({props:{code:"ZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMEF1dG9Nb2RlbEZvclNlcXVlbmNlQ2xhc3NpZmljYXRpb24lMEElMEFjaGVja3BvaW50JTIwJTNEJTIwJTIyZGlzdGlsYmVydC1iYXNlLXVuY2FzZWQtZmluZXR1bmVkLXNzdC0yLWVuZ2xpc2glMjIlMEFtb2RlbCUyMCUzRCUyMEF1dG9Nb2RlbEZvclNlcXVlbmNlQ2xhc3NpZmljYXRpb24uZnJvbV9wcmV0cmFpbmVkKGNoZWNrcG9pbnQpJTBBb3V0cHV0cyUyMCUzRCUyMG1vZGVsKCoqaW5wdXRzKQ==",highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification | |
| checkpoint = <span class="hljs-string">"distilbert-base-uncased-finetuned-sst-2-english"</span> | |
| model = AutoModelForSequenceClassification.from_pretrained(checkpoint) | |
| outputs = model(**inputs)`,wrap:!1}}),{c(){l=f("p"),l.innerHTML=i,t=p(),T(r.$$.fragment)},l(o){l=y(o,"P",{"data-svelte-h":!0}),b(l)!=="svelte-btrap5"&&(l.innerHTML=i),t=c(o),h(r.$$.fragment,o)},m(o,j){a(o,l,j),a(o,t,j),J(r,o,j),u=!0},i(o){u||(M(r.$$.fragment,o),u=!0)},o(o){m(r.$$.fragment,o),u=!1},d(o){o&&(n(l),n(t)),U(r,o)}}}function fn(d){let l,i;return l=new w({props:{code:"KDIlMkMlMjAyKQ==",highlighted:'(<span class="hljs-number">2</span>, <span class="hljs-number">2</span>)',wrap:!1}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function yn(d){let l,i;return l=new w({props:{code:"dG9yY2guU2l6ZSglNUIyJTJDJTIwMiU1RCk=",highlighted:'torch.Size([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>])',wrap:!1}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function bn(d){let l,i;return l=new w({props:{code:"JTNDdGYuVGVuc29yJTNBJTIwc2hhcGUlM0QoMiUyQyUyMDIpJTJDJTIwZHR5cGUlM0RmbG9hdDMyJTJDJTIwbnVtcHklM0QlMEElMjAlMjAlMjAlMjBhcnJheSglNUIlNUItMS41NjA2OTkxJTJDJTIwJTIwMS42MTIyODQyJTVEJTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTVCJTIwNC4xNjkyMzElMjAlMkMlMjAtMy4zNDY0NDcyJTVEJTVEJTJDJTIwZHR5cGUlM0RmbG9hdDMyKSUzRQ==",highlighted:`<tf.Tensor: shape=(<span class="hljs-number">2</span>, <span class="hljs-number">2</span>), dtype=float32, numpy= | |
| array([[-<span class="hljs-number">1.5606991</span>, <span class="hljs-number">1.6122842</span>], | |
| [ <span class="hljs-number">4.169231</span> , -<span class="hljs-number">3.3464472</span>]], dtype=float32)>`,wrap:!1}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function Tn(d){let l,i;return l=new w({props:{code:"dGVuc29yKCU1QiU1Qi0xLjU2MDclMkMlMjAlMjAxLjYxMjMlNUQlMkMlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlNUIlMjA0LjE2OTIlMkMlMjAtMy4zNDY0JTVEJTVEJTJDJTIwZ3JhZF9mbiUzRCUzQ0FkZG1tQmFja3dhcmQlM0Up",highlighted:`tensor([[-<span class="hljs-number">1.5607</span>, <span class="hljs-number">1.6123</span>], | |
| [ <span class="hljs-number">4.1692</span>, -<span class="hljs-number">3.3464</span>]], grad_fn=<AddmmBackward>)`,wrap:!1}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function hn(d){let l,i;return l=new w({props:{code:"aW1wb3J0JTIwdGVuc29yZmxvdyUyMGFzJTIwdGYlMEElMEFwcmVkaWN0aW9ucyUyMCUzRCUyMHRmLm1hdGguc29mdG1heChvdXRwdXRzLmxvZ2l0cyUyQyUyMGF4aXMlM0QtMSklMEFwcmludChwcmVkaWN0aW9ucyk=",highlighted:`<span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf | |
| predictions = tf.math.softmax(outputs.logits, axis=-<span class="hljs-number">1</span>) | |
| <span class="hljs-built_in">print</span>(predictions)`,wrap:!1}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function Jn(d){let l,i;return l=new w({props:{code:"aW1wb3J0JTIwdG9yY2glMEElMEFwcmVkaWN0aW9ucyUyMCUzRCUyMHRvcmNoLm5uLmZ1bmN0aW9uYWwuc29mdG1heChvdXRwdXRzLmxvZ2l0cyUyQyUyMGRpbSUzRC0xKSUwQXByaW50KHByZWRpY3Rpb25zKQ==",highlighted:`<span class="hljs-keyword">import</span> torch | |
| predictions = torch.nn.functional.softmax(outputs.logits, dim=-<span class="hljs-number">1</span>) | |
| <span class="hljs-built_in">print</span>(predictions)`,wrap:!1}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function Un(d){let l,i;return l=new w({props:{code:"dGYuVGVuc29yKCUwQSU1QiU1QjQuMDE5NTE2NzFlLTAyJTIwOS41OTgwNDgzM2UtMDElNUQlMEElMjAlNUI5Ljk5NDU1ODdlLTAxJTIwNS40NDE4NDI0ZS0wNCU1RCU1RCUyQyUyMHNoYXBlJTNEKDIlMkMlMjAyKSUyQyUyMGR0eXBlJTNEZmxvYXQzMik=",highlighted:`tf.Tensor( | |
| [[<span class="hljs-number">4.01951671e-02</span> <span class="hljs-number">9.59804833e-01</span>] | |
| [<span class="hljs-number">9.9945587e-01</span> <span class="hljs-number">5.4418424e-04</span>]], shape=(<span class="hljs-number">2</span>, <span class="hljs-number">2</span>), dtype=float32)`,wrap:!1}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function jn(d){let l,i;return l=new w({props:{code:"dGVuc29yKCU1QiU1QjQuMDE5NWUtMDIlMkMlMjA5LjU5ODBlLTAxJTVEJTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTVCOS45OTQ2ZS0wMSUyQyUyMDUuNDQxOGUtMDQlNUQlNUQlMkMlMjBncmFkX2ZuJTNEJTNDU29mdG1heEJhY2t3YXJkJTNFKQ==",highlighted:`tensor([[<span class="hljs-number">4.0195e-02</span>, <span class="hljs-number">9.5980e-01</span>], | |
| [<span class="hljs-number">9.9946e-01</span>, <span class="hljs-number">5.4418e-04</span>]], grad_fn=<SoftmaxBackward>)`,wrap:!1}}),{c(){T(l.$$.fragment)},l(t){h(l.$$.fragment,t)},m(t,r){J(l,t,r),i=!0},i(t){i||(M(l.$$.fragment,t),i=!0)},o(t){m(l.$$.fragment,t),i=!1},d(t){U(l,t)}}}function wn(d){let l;return{c(){l=Ss("✏️ **Provaci anche tu!** Scegli due (o più) testi di tua proprietà e lanciali all'interno della pipeline `sentiment-analysis`. Successivamente, replica i passi che hai visto qui e verifica di aver ottenuto gli stessi risultati!")},l(i){l=Hs(i,"✏️ **Provaci anche tu!** Scegli due (o più) testi di tua proprietà e lanciali all'interno della pipeline `sentiment-analysis`. Successivamente, replica i passi che hai visto qui e verifica di aver ottenuto gli stessi risultati!")},m(i,t){a(i,l,t)},d(i){i&&n(l)}}}function _n(d){let l,i,t,r,u,o,j,bt,_,$,nt,P,Tt,C,k,at,K,vl='Cominciamo con un esempio completo, dando un’occhiata a ciò che è successo dietro le quinte quando abbiamo eseguito il seguente codice nel <a href="/course/chapter1">Capitolo 1</a>:',ht,O,Jt,ee,Al="e ottenuto:",Ut,te,jt,le,xl='Come abbiamo visto nel <a href="/course/chapter1">Capitolo 1</a>, questa pipeline raggruppa tre fasi: la pre-elaborazione, il passaggio degli input attraverso il modello e la post-elaborazione:',wt,X,zl='<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/full_nlp_pipeline.svg" alt="La pipeline NLP completa: tokenizzazione del testo, conversione in ID e inferenza attraverso il modello Transformer ed il modello head."/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/full_nlp_pipeline-dark.svg" alt="La pipeline NLP completa: tokenizzazione del testo, conversione in ID e inferenza attraverso il modello Transformer ed il modello head.."/>',_t,se,Ql="Esaminiamo rapidamente ciascuno di essi.",$t,ne,Ct,ae,Dl="Come altre reti neurali, i modelli Transformer non possono elaborare direttamente il testo non elaborato, quindi la prima fase della nostra pipeline consiste nel convertire gli input testuali in numeri che il modello possa interpretare. Per fare ciò, utilizziamo un <em>tokenizer</em>, che sarà responsabile di:",kt,ie,Nl="<li>Suddivisione dell’input in parole, sottoparole o simboli (come la punteggiatura) che vengono chiamati <em>token</em>.</li> <li>Mappare ogni token in un numero intero</li> <li>Aggiunta di ulteriori input che possono essere utili per il modello</li>",gt,oe,El='Tutta questa preelaborazione deve essere fatta esattamente nello stesso modo in cui è stato preaddestrato il modello, quindi dobbiamo prima scaricare queste informazioni dal <a href="https://huggingface.co/models" rel="nofollow">Model Hub</a>. Per farlo, si usa la classe <code>AutoTokenizer</code> e il suo metodo <code>from_pretrained()</code>. Utilizzando il nome del checkpoint del nostro modello, recupererà automaticamente i dati associati al tokenizer del modello e li metterà in cache (in modo che vengano scaricati solo la prima volta che si esegue il codice sottostante).',It,re,Gl='Poiché il checkpoint predefinito della pipeline <code>sentiment-analysis</code> è <code>distilbert-base-uncased-finetuned-sst-2-english</code> (si può vedere la sua scheda modello <a href="https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english" rel="nofollow">qui</a>), eseguiamo quanto segue:',vt,pe,At,ce,Vl="Una volta che abbiamo il tokenizer, possiamo passargli direttamente le nostre frasi e otterremo un dizionario pronto per il nostro modello! L’unica cosa che resta da fare è convertire l’elenco degli ID in ingresso in tensori.",xt,ue,Zl="È possibile utilizzare i 🤗 Transformer senza doversi preoccupare di quale framework ML venga utilizzato come backend;potrebbe essere PyTorch o TensorFlow, o Flax per alcuni modelli. Tuttavia, i modelli Transformer accettano solo <em>tensors</em> come input. Se è la prima volta che sentite parlare di tensori, potete pensare a loro come array NumPy. Un array NumPy può essere uno scalare (0D), un vettore (1D), una matrice (2D) o avere più dimensioni. Si tratta effettivamente di un tensore; i tensori di altri framework ML si comportano in modo simile e di solito sono semplici da istanziare come gli array NumPy.",zt,me,Sl="Per specificare il tipo di tensori che vogliamo ottenere (PyTorch, TensorFlow o NumPy), usiamo l’argomento <code>return_tensors</code>:",Qt,g,I,it,Me,Hl="Non preoccupatevi ancora di padding e truncation; li spiegheremo più avanti.Le cose principali da ricordare sono che si può passare una frase o un elenco di frasi, oltre a specificare il tipo di tensori che si desidera ottenere (se non viene passato alcun tipo, si otterrà una lista di liste come risultato).",Dt,v,A,ot,de,Rl="L’output stesso è un dizionario contenente due chiavi, <code>input_ids</code> e <code>attention_mask</code>. <code>input_ids</code> contiene due righe di interi (uno per ogni frase) che sono gli identificatori unici dei token in ogni frase. Spiegheremo cosa sia la <code>attention_mask</code> più avanti in questo capitolo.",Nt,fe,Et,x,z,rt,ye,Wl="In questo frammento di codice, abbiamo scaricato lo stesso checkpoint usato in precedenza nella nostra pipeline (in realtà dovrebbe essere già nella cache) e abbiamo istanziato un modello con esso.",Gt,be,Ll="Questa architettura contiene solo il modulo Transformer di base: dati alcuni input, produce quelli che chiameremo <em>hidden states</em>, noti anche come <em>features</em>. Per ogni input del modello, recupereremo un vettore ad alta dimensionalità che rappresenta la <strong>comprensione contestuale di quell’input da parte del modello Transformer</strong>.",Vt,Te,Bl="Se per te tutto questo non ha senso, non preoccuparti. Ti spiegheremo tutto più avanti.",Zt,he,Fl='Anche se questi stati nascosti possono essere utili da soli, di solito sono input di un’altra parte del modello, nota come <em>head</em>. Nel <a href="/course/chapter1">Capitolo 1</a>, i diversi compiti potrebbero essere eseguiti con la stessa architettura, ma a ciascuno di essi sarà associata una head diversa.',St,Je,Ht,Ue,Pl="Il vettore emesso dal modulo Transformer è solitamente di grandi dimensioni. In genere ha tre dimensioni:",Rt,je,Xl="<li><strong>Dimensione del batch</strong>: Il numero di sequenze elaborate alla volta (2 nel nostro esempio).</li> <li><strong>Lunghezza della sequenza</strong>: La lunghezza della rappresentazione numerica della sequenza (16 nel nostro esempio).</li> <li><strong>Dimensione nascosta</strong>: La dimensione del vettore di ciascun ingresso del modello.</li>",Wt,we,ql="Si dice che è “ad alta dimensionalità” a causa dell’ultimo valore. La dimensione nascosta può essere molto grande (768 è comune per i modelli più piccoli, mentre nei modelli più grandi può arrivare a 3072 o più).",Lt,_e,Yl="Lo possiamo vedere se alimentiamo il nostro modello con gli input che abbiamo preelaborato:",Bt,Q,D,pt,$e,Kl="Si noti che gli output dei modelli 🤗 Transformers si comportano come <code>namedtuple</code> o dizionari. Si può accedere agli elementi per attributi (come abbiamo fatto noi) sia per chiave (<code>outputs["last_hidden_state"]</code>), sia per indice se si sa esattamente dove si trova ciò che si sta cercando (<code>outputs[0]</code>).",Ft,Ce,Pt,ke,Ol="Le model head prendono in input il vettore ad alta dimensione degli stati nascosti e lo proiettano su una dimensione diversa. Di solito sono composte da uno o pochi strati lineari:",Xt,q,es='<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/transformer_and_head.svg" alt="Una rete di Transformer accanto alla sua head."/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/transformer_and_head-dark.svg" alt="Una rete di Transformer accanto alla sua head."/>',qt,ge,ts="Gli output del modello Transformer vengono inviati direttamente alla model head per essere elaborati.",Yt,Ie,ls="In questo diagramma, il modello è rappresentato dallo strato embeddings e dagli strati successivi. Il livello embeddings converte ogni ID dell’input tokenizzato in un vettore che rappresenta il token associato. I livelli successivi manipolano questi vettori utilizzando il meccanismo di attenzione per produrre la rappresentazione finale delle frasi.",Kt,ve,ss="Esistono diverse architetture disponibili nei 🤗 Transformers, ognuna delle quali è stata progettata per affrontare un compito specifico. Ecco un elenco non esaustivo:",Ot,Ae,ns="<li><code>*Model</code> (retrieve the hidden states)</li> <li><code>*ForCausalLM</code></li> <li><code>*ForMaskedLM</code></li> <li><code>*ForMultipleChoice</code></li> <li><code>*ForQuestionAnswering</code></li> <li><code>*ForSequenceClassification</code></li> <li><code>*ForTokenClassification</code></li> <li>e altre 🤗</li>",el,N,E,ct,xe,as="Ora, se osserviamo la forma dei nostri output, la dimensionalità sarà molto più bassa: la model head prende in input i vettori ad alta dimensionalità che abbiamo visto prima e produce vettori contenenti due valori (uno per etichetta):",tl,ze,ll,G,V,ut,Qe,is="Dato che abbiamo solo due frasi e due etichette, il risultato che otteniamo dal nostro modello è di forma 2 x 2.",sl,De,nl,Ne,os="I valori che otteniamo come output dal nostro modello non hanno necessariamente senso da soli. Diamo un’occhiata:",al,Ee,il,Z,S,mt,Ge,rs='Il nostro modello ha previsto <code>[-1.5607, 1.6123]</code> per la prima frase e <code>[ 4.1692, -3.3464]</code> per la seconda. Non si tratta di probabilità ma di <em>logit</em>, i punteggi non normalizzati emessi dall’ultimo livello del modello. Per poterli convertire in probabilità, devono passare attraverso un layer <a href="https://en.wikipedia.org/wiki/Softmax_function" rel="nofollow">SoftMax</a> (tutti i modelli 🤗 Transformers producono i logits, poiché la funzione di perdita per l’addestramento generalmente fonde l’ultima funzione di attivazione, come SoftMax, con la funzione di perdita effettiva, come la cross entropy):',ol,H,R,Mt,W,L,dt,Ve,ps="Ora possiamo vedere che il modello ha previsto <code>[0,0402, 0,9598]</code> per la prima frase e <code>[0,9995, 0,0005]</code> per la seconda. Si tratta di punteggi di probabilità riconoscibili.",rl,Ze,cs="Per ottenere le etichette corrispondenti a ogni posizione, si può ispezionare l’attributo <code>id2label</code> della configurazione del modello (si veda la prossima sezione):",pl,Se,cl,He,ul,Re,us="Ora possiamo concludere che il modello ha previsto quanto segue:",ml,We,ms="<li>Prima frase: NEGATIVE: 0.0402, POSITIVE: 0.9598</li> <li>Seconda frase: NEGATIVE: 0.9995, POSITIVE: 0.0005</li>",Ml,Le,Ms="Abbiamo riprodotto con successo le tre fasi della pipeline: preelaborazione con i tokenizer, passaggio degli input attraverso il modello e postelaborazione! Ora prendiamoci un po’ di tempo per approfondire ognuna di queste fasi.",dl,Y,fl,Be,yl,yt,bl;u=new Ys({props:{fw:d[0]}}),j=new ft({props:{title:"Dietro la pipeline",local:"dietro-la-pipeline",headingTag:"h1"}});const ds=[en,Os],Fe=[];function fs(e,s){return e[0]==="pt"?0:1}_=fs(d),$=Fe[_]=ds[_](d),P=new Zs({props:{$$slots:{default:[tn]},$$scope:{ctx:d}}});const ys=[sn,ln],Pe=[];function bs(e,s){return e[0]==="pt"?0:1}C=bs(d),k=Pe[C]=ys[C](d),O=new w({props:{code:"ZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMHBpcGVsaW5lJTBBJTBBY2xhc3NpZmllciUyMCUzRCUyMHBpcGVsaW5lKCUyMnNlbnRpbWVudC1hbmFseXNpcyUyMiklMEFjbGFzc2lmaWVyKCUwQSUyMCUyMCUyMCUyMCU1QiUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMkkndmUlMjBiZWVuJTIwd2FpdGluZyUyMGZvciUyMGElMjBIdWdnaW5nRmFjZSUyMGNvdXJzZSUyMG15JTIwd2hvbGUlMjBsaWZlLiUyMiUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMkklMjBoYXRlJTIwdGhpcyUyMHNvJTIwbXVjaCElMjIlMkMlMEElMjAlMjAlMjAlMjAlNUQlMEEp",highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline | |
| classifier = pipeline(<span class="hljs-string">"sentiment-analysis"</span>) | |
| classifier( | |
| [ | |
| <span class="hljs-string">"I've been waiting for a HuggingFace course my whole life."</span>, | |
| <span class="hljs-string">"I hate this so much!"</span>, | |
| ] | |
| )`,wrap:!1}}),te=new w({props:{code:"JTVCJTdCJ2xhYmVsJyUzQSUyMCdQT1NJVElWRSclMkMlMjAnc2NvcmUnJTNBJTIwMC45NTk4MDQ3MTM3MjYwNDM3JTdEJTJDJTBBJTIwJTdCJ2xhYmVsJyUzQSUyMCdORUdBVElWRSclMkMlMjAnc2NvcmUnJTNBJTIwMC45OTk0NTU4MDk1OTMyMDA3JTdEJTVE",highlighted:`[{<span class="hljs-string">'label'</span>: <span class="hljs-string">'POSITIVE'</span>, <span class="hljs-string">'score'</span>: <span class="hljs-number">0.9598047137260437</span>}, | |
| {<span class="hljs-string">'label'</span>: <span class="hljs-string">'NEGATIVE'</span>, <span class="hljs-string">'score'</span>: <span class="hljs-number">0.9994558095932007</span>}]`,wrap:!1}}),ne=new ft({props:{title:"Preelaborazione con un tokenizer",local:"preelaborazione-con-un-tokenizer",headingTag:"h2"}}),pe=new w({props:{code:"ZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMEF1dG9Ub2tlbml6ZXIlMEElMEFjaGVja3BvaW50JTIwJTNEJTIwJTIyZGlzdGlsYmVydC1iYXNlLXVuY2FzZWQtZmluZXR1bmVkLXNzdC0yLWVuZ2xpc2glMjIlMEF0b2tlbml6ZXIlMjAlM0QlMjBBdXRvVG9rZW5pemVyLmZyb21fcHJldHJhaW5lZChjaGVja3BvaW50KQ==",highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer | |
| checkpoint = <span class="hljs-string">"distilbert-base-uncased-finetuned-sst-2-english"</span> | |
| tokenizer = AutoTokenizer.from_pretrained(checkpoint)`,wrap:!1}});const Ts=[an,nn],Xe=[];function hs(e,s){return e[0]==="pt"?0:1}g=hs(d),I=Xe[g]=Ts[g](d);const Js=[rn,on],qe=[];function Us(e,s){return e[0]==="pt"?0:1}v=Us(d),A=qe[v]=Js[v](d),fe=new ft({props:{title:"Passare attraverso il modello",local:"passare-attraverso-il-modello",headingTag:"h2"}});const js=[cn,pn],Ye=[];function ws(e,s){return e[0]==="pt"?0:1}x=ws(d),z=Ye[x]=js[x](d),Je=new ft({props:{title:"Un vettore ad alta dimensionalità?",local:"un-vettore-ad-alta-dimensionalità",headingTag:"h3"}});const _s=[mn,un],Ke=[];function $s(e,s){return e[0]==="pt"?0:1}Q=$s(d),D=Ke[Q]=_s[Q](d),Ce=new ft({props:{title:"Model heads: Dare un senso ai numeri",local:"model-heads-dare-un-senso-ai-numeri",headingTag:"h3"}});const Cs=[dn,Mn],Oe=[];function ks(e,s){return e[0]==="pt"?0:1}N=ks(d),E=Oe[N]=Cs[N](d),ze=new w({props:{code:"cHJpbnQob3V0cHV0cy5sb2dpdHMuc2hhcGUp",highlighted:'<span class="hljs-built_in">print</span>(outputs.logits.shape)',wrap:!1}});const gs=[yn,fn],et=[];function Is(e,s){return e[0]==="pt"?0:1}G=Is(d),V=et[G]=gs[G](d),De=new ft({props:{title:"Postprocessing the output",local:"postprocessing-the-output",headingTag:"h2"}}),Ee=new w({props:{code:"cHJpbnQob3V0cHV0cy5sb2dpdHMp",highlighted:'<span class="hljs-built_in">print</span>(outputs.logits)',wrap:!1}});const vs=[Tn,bn],tt=[];function As(e,s){return e[0]==="pt"?0:1}Z=As(d),S=tt[Z]=vs[Z](d);const xs=[Jn,hn],lt=[];function zs(e,s){return e[0]==="pt"?0:1}H=zs(d),R=lt[H]=xs[H](d);const Qs=[jn,Un],st=[];function Ds(e,s){return e[0]==="pt"?0:1}return W=Ds(d),L=st[W]=Qs[W](d),Se=new w({props:{code:"bW9kZWwuY29uZmlnLmlkMmxhYmVs",highlighted:"model.config.id2label",wrap:!1}}),He=new w({props:{code:"JTdCMCUzQSUyMCdORUdBVElWRSclMkMlMjAxJTNBJTIwJ1BPU0lUSVZFJyU3RA==",highlighted:'{<span class="hljs-number">0</span>: <span class="hljs-string">'NEGATIVE'</span>, <span class="hljs-number">1</span>: <span class="hljs-string">'POSITIVE'</span>}',wrap:!1}}),Y=new Zs({props:{$$slots:{default:[wn]},$$scope:{ctx:d}}}),Be=new Ks({props:{source:"https://github.com/huggingface/course/blob/main/chapters/it/chapter2/2.mdx"}}),{c(){l=f("meta"),i=p(),t=f("p"),r=p(),T(u.$$.fragment),o=p(),T(j.$$.fragment),bt=p(),$.c(),nt=p(),T(P.$$.fragment),Tt=p(),k.c(),at=p(),K=f("p"),K.innerHTML=vl,ht=p(),T(O.$$.fragment),Jt=p(),ee=f("p"),ee.textContent=Al,Ut=p(),T(te.$$.fragment),jt=p(),le=f("p"),le.innerHTML=xl,wt=p(),X=f("div"),X.innerHTML=zl,_t=p(),se=f("p"),se.textContent=Ql,$t=p(),T(ne.$$.fragment),Ct=p(),ae=f("p"),ae.innerHTML=Dl,kt=p(),ie=f("ul"),ie.innerHTML=Nl,gt=p(),oe=f("p"),oe.innerHTML=El,It=p(),re=f("p"),re.innerHTML=Gl,vt=p(),T(pe.$$.fragment),At=p(),ce=f("p"),ce.textContent=Vl,xt=p(),ue=f("p"),ue.innerHTML=Zl,zt=p(),me=f("p"),me.innerHTML=Sl,Qt=p(),I.c(),it=p(),Me=f("p"),Me.textContent=Hl,Dt=p(),A.c(),ot=p(),de=f("p"),de.innerHTML=Rl,Nt=p(),T(fe.$$.fragment),Et=p(),z.c(),rt=p(),ye=f("p"),ye.textContent=Wl,Gt=p(),be=f("p"),be.innerHTML=Ll,Vt=p(),Te=f("p"),Te.textContent=Bl,Zt=p(),he=f("p"),he.innerHTML=Fl,St=p(),T(Je.$$.fragment),Ht=p(),Ue=f("p"),Ue.textContent=Pl,Rt=p(),je=f("ul"),je.innerHTML=Xl,Wt=p(),we=f("p"),we.textContent=ql,Lt=p(),_e=f("p"),_e.textContent=Yl,Bt=p(),D.c(),pt=p(),$e=f("p"),$e.innerHTML=Kl,Ft=p(),T(Ce.$$.fragment),Pt=p(),ke=f("p"),ke.textContent=Ol,Xt=p(),q=f("div"),q.innerHTML=es,qt=p(),ge=f("p"),ge.textContent=ts,Yt=p(),Ie=f("p"),Ie.textContent=ls,Kt=p(),ve=f("p"),ve.textContent=ss,Ot=p(),Ae=f("ul"),Ae.innerHTML=ns,el=p(),E.c(),ct=p(),xe=f("p"),xe.textContent=as,tl=p(),T(ze.$$.fragment),ll=p(),V.c(),ut=p(),Qe=f("p"),Qe.textContent=is,sl=p(),T(De.$$.fragment),nl=p(),Ne=f("p"),Ne.textContent=os,al=p(),T(Ee.$$.fragment),il=p(),S.c(),mt=p(),Ge=f("p"),Ge.innerHTML=rs,ol=p(),R.c(),Mt=p(),L.c(),dt=p(),Ve=f("p"),Ve.innerHTML=ps,rl=p(),Ze=f("p"),Ze.innerHTML=cs,pl=p(),T(Se.$$.fragment),cl=p(),T(He.$$.fragment),ul=p(),Re=f("p"),Re.textContent=us,ml=p(),We=f("ul"),We.innerHTML=ms,Ml=p(),Le=f("p"),Le.textContent=Ms,dl=p(),T(Y.$$.fragment),fl=p(),T(Be.$$.fragment),yl=p(),yt=f("p"),this.h()},l(e){const s=Xs("svelte-u9bgzb",document.head);l=y(s,"META",{name:!0,content:!0}),s.forEach(n),i=c(e),t=y(e,"P",{}),Vs(t).forEach(n),r=c(e),h(u.$$.fragment,e),o=c(e),h(j.$$.fragment,e),bt=c(e),$.l(e),nt=c(e),h(P.$$.fragment,e),Tt=c(e),k.l(e),at=c(e),K=y(e,"P",{"data-svelte-h":!0}),b(K)!=="svelte-151gkcr"&&(K.innerHTML=vl),ht=c(e),h(O.$$.fragment,e),Jt=c(e),ee=y(e,"P",{"data-svelte-h":!0}),b(ee)!=="svelte-exo8xh"&&(ee.textContent=Al),Ut=c(e),h(te.$$.fragment,e),jt=c(e),le=y(e,"P",{"data-svelte-h":!0}),b(le)!=="svelte-1qe8xdh"&&(le.innerHTML=xl),wt=c(e),X=y(e,"DIV",{class:!0,"data-svelte-h":!0}),b(X)!=="svelte-1bcdsk7"&&(X.innerHTML=zl),_t=c(e),se=y(e,"P",{"data-svelte-h":!0}),b(se)!=="svelte-1gy2qph"&&(se.textContent=Ql),$t=c(e),h(ne.$$.fragment,e),Ct=c(e),ae=y(e,"P",{"data-svelte-h":!0}),b(ae)!=="svelte-9pzy30"&&(ae.innerHTML=Dl),kt=c(e),ie=y(e,"UL",{"data-svelte-h":!0}),b(ie)!=="svelte-6217uj"&&(ie.innerHTML=Nl),gt=c(e),oe=y(e,"P",{"data-svelte-h":!0}),b(oe)!=="svelte-nu4tuu"&&(oe.innerHTML=El),It=c(e),re=y(e,"P",{"data-svelte-h":!0}),b(re)!=="svelte-1kbz7hd"&&(re.innerHTML=Gl),vt=c(e),h(pe.$$.fragment,e),At=c(e),ce=y(e,"P",{"data-svelte-h":!0}),b(ce)!=="svelte-x437dh"&&(ce.textContent=Vl),xt=c(e),ue=y(e,"P",{"data-svelte-h":!0}),b(ue)!=="svelte-mg2zx7"&&(ue.innerHTML=Zl),zt=c(e),me=y(e,"P",{"data-svelte-h":!0}),b(me)!=="svelte-5yo1cd"&&(me.innerHTML=Sl),Qt=c(e),I.l(e),it=c(e),Me=y(e,"P",{"data-svelte-h":!0}),b(Me)!=="svelte-1ry6kqr"&&(Me.textContent=Hl),Dt=c(e),A.l(e),ot=c(e),de=y(e,"P",{"data-svelte-h":!0}),b(de)!=="svelte-1w6kfn9"&&(de.innerHTML=Rl),Nt=c(e),h(fe.$$.fragment,e),Et=c(e),z.l(e),rt=c(e),ye=y(e,"P",{"data-svelte-h":!0}),b(ye)!=="svelte-savfl5"&&(ye.textContent=Wl),Gt=c(e),be=y(e,"P",{"data-svelte-h":!0}),b(be)!=="svelte-lczy69"&&(be.innerHTML=Ll),Vt=c(e),Te=y(e,"P",{"data-svelte-h":!0}),b(Te)!=="svelte-7ig53h"&&(Te.textContent=Bl),Zt=c(e),he=y(e,"P",{"data-svelte-h":!0}),b(he)!=="svelte-eqteqf"&&(he.innerHTML=Fl),St=c(e),h(Je.$$.fragment,e),Ht=c(e),Ue=y(e,"P",{"data-svelte-h":!0}),b(Ue)!=="svelte-12i73ac"&&(Ue.textContent=Pl),Rt=c(e),je=y(e,"UL",{"data-svelte-h":!0}),b(je)!=="svelte-1tsv60u"&&(je.innerHTML=Xl),Wt=c(e),we=y(e,"P",{"data-svelte-h":!0}),b(we)!=="svelte-1d9p2vz"&&(we.textContent=ql),Lt=c(e),_e=y(e,"P",{"data-svelte-h":!0}),b(_e)!=="svelte-lrpjbq"&&(_e.textContent=Yl),Bt=c(e),D.l(e),pt=c(e),$e=y(e,"P",{"data-svelte-h":!0}),b($e)!=="svelte-1fa661h"&&($e.innerHTML=Kl),Ft=c(e),h(Ce.$$.fragment,e),Pt=c(e),ke=y(e,"P",{"data-svelte-h":!0}),b(ke)!=="svelte-16uw2ui"&&(ke.textContent=Ol),Xt=c(e),q=y(e,"DIV",{class:!0,"data-svelte-h":!0}),b(q)!=="svelte-d4xqhj"&&(q.innerHTML=es),qt=c(e),ge=y(e,"P",{"data-svelte-h":!0}),b(ge)!=="svelte-1w20y4v"&&(ge.textContent=ts),Yt=c(e),Ie=y(e,"P",{"data-svelte-h":!0}),b(Ie)!=="svelte-19gy4mg"&&(Ie.textContent=ls),Kt=c(e),ve=y(e,"P",{"data-svelte-h":!0}),b(ve)!=="svelte-1ufibh0"&&(ve.textContent=ss),Ot=c(e),Ae=y(e,"UL",{"data-svelte-h":!0}),b(Ae)!=="svelte-1vbui3x"&&(Ae.innerHTML=ns),el=c(e),E.l(e),ct=c(e),xe=y(e,"P",{"data-svelte-h":!0}),b(xe)!=="svelte-1s9g5wu"&&(xe.textContent=as),tl=c(e),h(ze.$$.fragment,e),ll=c(e),V.l(e),ut=c(e),Qe=y(e,"P",{"data-svelte-h":!0}),b(Qe)!=="svelte-1h2plev"&&(Qe.textContent=is),sl=c(e),h(De.$$.fragment,e),nl=c(e),Ne=y(e,"P",{"data-svelte-h":!0}),b(Ne)!=="svelte-61xkfb"&&(Ne.textContent=os),al=c(e),h(Ee.$$.fragment,e),il=c(e),S.l(e),mt=c(e),Ge=y(e,"P",{"data-svelte-h":!0}),b(Ge)!=="svelte-seb2ta"&&(Ge.innerHTML=rs),ol=c(e),R.l(e),Mt=c(e),L.l(e),dt=c(e),Ve=y(e,"P",{"data-svelte-h":!0}),b(Ve)!=="svelte-67oj2y"&&(Ve.innerHTML=ps),rl=c(e),Ze=y(e,"P",{"data-svelte-h":!0}),b(Ze)!=="svelte-1oq61oo"&&(Ze.innerHTML=cs),pl=c(e),h(Se.$$.fragment,e),cl=c(e),h(He.$$.fragment,e),ul=c(e),Re=y(e,"P",{"data-svelte-h":!0}),b(Re)!=="svelte-16tfjws"&&(Re.textContent=us),ml=c(e),We=y(e,"UL",{"data-svelte-h":!0}),b(We)!=="svelte-vj1gli"&&(We.innerHTML=ms),Ml=c(e),Le=y(e,"P",{"data-svelte-h":!0}),b(Le)!=="svelte-4jy107"&&(Le.textContent=Ms),dl=c(e),h(Y.$$.fragment,e),fl=c(e),h(Be.$$.fragment,e),yl=c(e),yt=y(e,"P",{}),Vs(yt).forEach(n),this.h()},h(){Tl(l,"name","hf:doc:metadata"),Tl(l,"content",$n),Tl(X,"class","flex justify-center"),Tl(q,"class","flex justify-center")},m(e,s){qs(document.head,l),a(e,i,s),a(e,t,s),a(e,r,s),J(u,e,s),a(e,o,s),J(j,e,s),a(e,bt,s),Fe[_].m(e,s),a(e,nt,s),J(P,e,s),a(e,Tt,s),Pe[C].m(e,s),a(e,at,s),a(e,K,s),a(e,ht,s),J(O,e,s),a(e,Jt,s),a(e,ee,s),a(e,Ut,s),J(te,e,s),a(e,jt,s),a(e,le,s),a(e,wt,s),a(e,X,s),a(e,_t,s),a(e,se,s),a(e,$t,s),J(ne,e,s),a(e,Ct,s),a(e,ae,s),a(e,kt,s),a(e,ie,s),a(e,gt,s),a(e,oe,s),a(e,It,s),a(e,re,s),a(e,vt,s),J(pe,e,s),a(e,At,s),a(e,ce,s),a(e,xt,s),a(e,ue,s),a(e,zt,s),a(e,me,s),a(e,Qt,s),Xe[g].m(e,s),a(e,it,s),a(e,Me,s),a(e,Dt,s),qe[v].m(e,s),a(e,ot,s),a(e,de,s),a(e,Nt,s),J(fe,e,s),a(e,Et,s),Ye[x].m(e,s),a(e,rt,s),a(e,ye,s),a(e,Gt,s),a(e,be,s),a(e,Vt,s),a(e,Te,s),a(e,Zt,s),a(e,he,s),a(e,St,s),J(Je,e,s),a(e,Ht,s),a(e,Ue,s),a(e,Rt,s),a(e,je,s),a(e,Wt,s),a(e,we,s),a(e,Lt,s),a(e,_e,s),a(e,Bt,s),Ke[Q].m(e,s),a(e,pt,s),a(e,$e,s),a(e,Ft,s),J(Ce,e,s),a(e,Pt,s),a(e,ke,s),a(e,Xt,s),a(e,q,s),a(e,qt,s),a(e,ge,s),a(e,Yt,s),a(e,Ie,s),a(e,Kt,s),a(e,ve,s),a(e,Ot,s),a(e,Ae,s),a(e,el,s),Oe[N].m(e,s),a(e,ct,s),a(e,xe,s),a(e,tl,s),J(ze,e,s),a(e,ll,s),et[G].m(e,s),a(e,ut,s),a(e,Qe,s),a(e,sl,s),J(De,e,s),a(e,nl,s),a(e,Ne,s),a(e,al,s),J(Ee,e,s),a(e,il,s),tt[Z].m(e,s),a(e,mt,s),a(e,Ge,s),a(e,ol,s),lt[H].m(e,s),a(e,Mt,s),st[W].m(e,s),a(e,dt,s),a(e,Ve,s),a(e,rl,s),a(e,Ze,s),a(e,pl,s),J(Se,e,s),a(e,cl,s),J(He,e,s),a(e,ul,s),a(e,Re,s),a(e,ml,s),a(e,We,s),a(e,Ml,s),a(e,Le,s),a(e,dl,s),J(Y,e,s),a(e,fl,s),J(Be,e,s),a(e,yl,s),a(e,yt,s),bl=!0},p(e,[s]){const Ns={};s&1&&(Ns.fw=e[0]),u.$set(Ns);let hl=_;_=fs(e),_!==hl&&(F(),m(Fe[hl],1,1,()=>{Fe[hl]=null}),B(),$=Fe[_],$||($=Fe[_]=ds[_](e),$.c()),M($,1),$.m(nt.parentNode,nt));const Es={};s&2&&(Es.$$scope={dirty:s,ctx:e}),P.$set(Es);let Jl=C;C=bs(e),C!==Jl&&(F(),m(Pe[Jl],1,1,()=>{Pe[Jl]=null}),B(),k=Pe[C],k||(k=Pe[C]=ys[C](e),k.c()),M(k,1),k.m(at.parentNode,at));let Ul=g;g=hs(e),g!==Ul&&(F(),m(Xe[Ul],1,1,()=>{Xe[Ul]=null}),B(),I=Xe[g],I||(I=Xe[g]=Ts[g](e),I.c()),M(I,1),I.m(it.parentNode,it));let jl=v;v=Us(e),v!==jl&&(F(),m(qe[jl],1,1,()=>{qe[jl]=null}),B(),A=qe[v],A||(A=qe[v]=Js[v](e),A.c()),M(A,1),A.m(ot.parentNode,ot));let wl=x;x=ws(e),x!==wl&&(F(),m(Ye[wl],1,1,()=>{Ye[wl]=null}),B(),z=Ye[x],z||(z=Ye[x]=js[x](e),z.c()),M(z,1),z.m(rt.parentNode,rt));let _l=Q;Q=$s(e),Q!==_l&&(F(),m(Ke[_l],1,1,()=>{Ke[_l]=null}),B(),D=Ke[Q],D||(D=Ke[Q]=_s[Q](e),D.c()),M(D,1),D.m(pt.parentNode,pt));let $l=N;N=ks(e),N!==$l&&(F(),m(Oe[$l],1,1,()=>{Oe[$l]=null}),B(),E=Oe[N],E||(E=Oe[N]=Cs[N](e),E.c()),M(E,1),E.m(ct.parentNode,ct));let Cl=G;G=Is(e),G!==Cl&&(F(),m(et[Cl],1,1,()=>{et[Cl]=null}),B(),V=et[G],V||(V=et[G]=gs[G](e),V.c()),M(V,1),V.m(ut.parentNode,ut));let kl=Z;Z=As(e),Z!==kl&&(F(),m(tt[kl],1,1,()=>{tt[kl]=null}),B(),S=tt[Z],S||(S=tt[Z]=vs[Z](e),S.c()),M(S,1),S.m(mt.parentNode,mt));let gl=H;H=zs(e),H!==gl&&(F(),m(lt[gl],1,1,()=>{lt[gl]=null}),B(),R=lt[H],R||(R=lt[H]=xs[H](e),R.c()),M(R,1),R.m(Mt.parentNode,Mt));let Il=W;W=Ds(e),W!==Il&&(F(),m(st[Il],1,1,()=>{st[Il]=null}),B(),L=st[W],L||(L=st[W]=Qs[W](e),L.c()),M(L,1),L.m(dt.parentNode,dt));const Gs={};s&2&&(Gs.$$scope={dirty:s,ctx:e}),Y.$set(Gs)},i(e){bl||(M(u.$$.fragment,e),M(j.$$.fragment,e),M($),M(P.$$.fragment,e),M(k),M(O.$$.fragment,e),M(te.$$.fragment,e),M(ne.$$.fragment,e),M(pe.$$.fragment,e),M(I),M(A),M(fe.$$.fragment,e),M(z),M(Je.$$.fragment,e),M(D),M(Ce.$$.fragment,e),M(E),M(ze.$$.fragment,e),M(V),M(De.$$.fragment,e),M(Ee.$$.fragment,e),M(S),M(R),M(L),M(Se.$$.fragment,e),M(He.$$.fragment,e),M(Y.$$.fragment,e),M(Be.$$.fragment,e),bl=!0)},o(e){m(u.$$.fragment,e),m(j.$$.fragment,e),m($),m(P.$$.fragment,e),m(k),m(O.$$.fragment,e),m(te.$$.fragment,e),m(ne.$$.fragment,e),m(pe.$$.fragment,e),m(I),m(A),m(fe.$$.fragment,e),m(z),m(Je.$$.fragment,e),m(D),m(Ce.$$.fragment,e),m(E),m(ze.$$.fragment,e),m(V),m(De.$$.fragment,e),m(Ee.$$.fragment,e),m(S),m(R),m(L),m(Se.$$.fragment,e),m(He.$$.fragment,e),m(Y.$$.fragment,e),m(Be.$$.fragment,e),bl=!1},d(e){e&&(n(i),n(t),n(r),n(o),n(bt),n(nt),n(Tt),n(at),n(K),n(ht),n(Jt),n(ee),n(Ut),n(jt),n(le),n(wt),n(X),n(_t),n(se),n($t),n(Ct),n(ae),n(kt),n(ie),n(gt),n(oe),n(It),n(re),n(vt),n(At),n(ce),n(xt),n(ue),n(zt),n(me),n(Qt),n(it),n(Me),n(Dt),n(ot),n(de),n(Nt),n(Et),n(rt),n(ye),n(Gt),n(be),n(Vt),n(Te),n(Zt),n(he),n(St),n(Ht),n(Ue),n(Rt),n(je),n(Wt),n(we),n(Lt),n(_e),n(Bt),n(pt),n($e),n(Ft),n(Pt),n(ke),n(Xt),n(q),n(qt),n(ge),n(Yt),n(Ie),n(Kt),n(ve),n(Ot),n(Ae),n(el),n(ct),n(xe),n(tl),n(ll),n(ut),n(Qe),n(sl),n(nl),n(Ne),n(al),n(il),n(mt),n(Ge),n(ol),n(Mt),n(dt),n(Ve),n(rl),n(Ze),n(pl),n(cl),n(ul),n(Re),n(ml),n(We),n(Ml),n(Le),n(dl),n(fl),n(yl),n(yt)),n(l),U(u,e),U(j,e),Fe[_].d(e),U(P,e),Pe[C].d(e),U(O,e),U(te,e),U(ne,e),U(pe,e),Xe[g].d(e),qe[v].d(e),U(fe,e),Ye[x].d(e),U(Je,e),Ke[Q].d(e),U(Ce,e),Oe[N].d(e),U(ze,e),et[G].d(e),U(De,e),U(Ee,e),tt[Z].d(e),lt[H].d(e),st[W].d(e),U(Se,e),U(He,e),U(Y,e),U(Be,e)}}}const $n='{"title":"Dietro la pipeline","local":"dietro-la-pipeline","sections":[{"title":"Preelaborazione con un tokenizer","local":"preelaborazione-con-un-tokenizer","sections":[],"depth":2},{"title":"Passare attraverso il modello","local":"passare-attraverso-il-modello","sections":[{"title":"Un vettore ad alta dimensionalità?","local":"un-vettore-ad-alta-dimensionalità","sections":[],"depth":3},{"title":"Model heads: Dare un senso ai numeri","local":"model-heads-dare-un-senso-ai-numeri","sections":[],"depth":3}],"depth":2},{"title":"Postprocessing the output","local":"postprocessing-the-output","sections":[],"depth":2}],"depth":1}';function Cn(d,l,i){let t="pt";return Bs(()=>{const r=new URLSearchParams(window.location.search);i(0,t=r.get("fw")||"pt")}),[t]}class Dn extends Fs{constructor(l){super(),Ps(this,l,Cn,_n,Ls,{})}}export{Dn as component}; | |
Xet Storage Details
- Size:
- 52.6 kB
- Xet hash:
- a7db3fc2e448ad69740345320f011f3961e81445e78bc8b9c405e4c6c0753908
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.