Buckets:

rtrm's picture
download
raw
43.3 kB
import{s as vt,o as Vt,n as E}from"../chunks/scheduler.5c93273d.js";import{S as Ht,i as It,g as y,s as i,r as $,A as Xt,h as _,f as l,c as r,j as Gt,u as b,x as T,k as xt,y as Bt,a,v as U,d as J,t as h,w}from"../chunks/index.e43dd92b.js";import{T as Ze}from"../chunks/Tip.1cbfe904.js";import{C as G}from"../chunks/CodeBlock.6896320e.js";import{H as Te,E as Nt}from"../chunks/getInferenceSnippets.3559ff1c.js";import{H as rt,a as ge}from"../chunks/HfOption.d50154c3.js";function Ft(g){let t,o="此脚本是实验性的,容易过拟合并遇到灾难性遗忘等问题。尝试探索不同的超参数以在您的数据集上获得最佳结果。";return{c(){t=y("p"),t.textContent=o},l(s){t=_(s,"P",{"data-svelte-h":!0}),T(t)!=="svelte-1lt513h"&&(t.textContent=o)},m(s,c){a(s,t,c)},p:E,d(s){s&&l(t)}}}function Qt(g){let t,o='🤗 Accelerate 是一个帮助您在多个 GPU/TPU 上或使用混合精度进行训练的库。它会根据您的硬件和环境自动配置训练设置。查看 🤗 Accelerate 的 <a href="https://huggingface.co/docs/accelerate/quicktour" rel="nofollow">快速入门</a> 了解更多。';return{c(){t=y("p"),t.innerHTML=o},l(s){t=_(s,"P",{"data-svelte-h":!0}),T(t)!=="svelte-1lrtzoj"&&(t.innerHTML=o)},m(s,c){a(s,t,c)},p:E,d(s){s&&l(t)}}}function Et(g){let t,o="以下部分重点介绍了训练脚本中对于理解如何修改它很重要的部分,但并未详细涵盖脚本的每个方面。如果您有兴趣了解更多,请随时阅读脚本,并让我们知道您有任何疑问或顾虑。";return{c(){t=y("p"),t.textContent=o},l(s){t=_(s,"P",{"data-svelte-h":!0}),T(t)!=="svelte-1wk1lfl"&&(t.textContent=o)},m(s,c){a(s,t,c)},p:E,d(s){s&&l(t)}}}function Yt(g){let t,o=`<a href="https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L441" rel="nofollow"><code>main()</code></a> 函数包含代码 f
或准备数据集和训练模型。`,s,c,p="您会立即注意到的主要区别之一是,训练脚本除了调度器和分词器外,还加载了一个 <code>CLIPImageProcessor</code> 用于预处理图像,以及一个 <code>CLIPVisionModelWithProjection</code> 模型用于编码图像:",M,u,d,j,Y="Kandinsky 使用一个 <code>PriorTransformer</code> 来生成图像嵌入,因此您需要设置优化器来学习先验模型的参数。",I,C,B,R,X='接下来,输入标题被分词,图像由 <code>CLIPImageProcessor</code> <a href="https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L632" rel="nofollow">预处理</a>:',N,k,v,V,H='最后,<a href="https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L718" rel="nofollow">训练循环</a> 将输入图像转换为潜在表示,向图像嵌入添加噪声,并进行预测:',x,f,Z,Q,P='如果您想了解更多关于训练循环的工作原理,请查看 <a href="../using-diffusers/write_own_pipeline">理解管道、模型和调度器</a> 教程,该教程分解了去噪过程的基本模式。',F;return u=new G({props:{code:"bm9pc2Vfc2NoZWR1bGVyJTIwJTNEJTIwRERQTVNjaGVkdWxlcihiZXRhX3NjaGVkdWxlJTNEJTIyc3F1YXJlZGNvc19jYXBfdjIlMjIlMkMlMjBwcmVkaWN0aW9uX3R5cGUlM0QlMjJzYW1wbGUlMjIpJTBBaW1hZ2VfcHJvY2Vzc29yJTIwJTNEJTIwQ0xJUEltYWdlUHJvY2Vzc29yLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjBhcmdzLnByZXRyYWluZWRfcHJpb3JfbW9kZWxfbmFtZV9vcl9wYXRoJTJDJTIwc3ViZm9sZGVyJTNEJTIyaW1hZ2VfcHJvY2Vzc29yJTIyJTBBKSUwQXRva2VuaXplciUyMCUzRCUyMENMSVBUb2tlbml6ZXIuZnJvbV9wcmV0cmFpbmVkKGFyZ3MucHJldHJhaW5lZF9wcmlvcl9tb2RlbF9uYW1lX29yX3BhdGglMkMlMjBzdWJmb2xkZXIlM0QlMjJ0b2tlbml6ZXIlMjIpJTBBJTBBd2l0aCUyMENvbnRleHRNYW5hZ2VycyhkZWVwc3BlZWRfemVyb19pbml0X2Rpc2FibGVkX2NvbnRleHRfbWFuYWdlcigpKSUzQSUwQSUyMCUyMCUyMCUyMGltYWdlX2VuY29kZXIlMjAlM0QlMjBDTElQVmlzaW9uTW9kZWxXaXRoUHJvamVjdGlvbi5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwYXJncy5wcmV0cmFpbmVkX3ByaW9yX21vZGVsX25hbWVfb3JfcGF0aCUyQyUyMHN1YmZvbGRlciUzRCUyMmltYWdlX2VuY29kZXIlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHdlaWdodF9kdHlwZSUwQSUyMCUyMCUyMCUyMCkuZXZhbCgpJTBBJTIwJTIwJTIwJTIwdGV4dF9lbmNvZGVyJTIwJTNEJTIwQ0xJUFRleHRNb2RlbFdpdGhQcm9qZWN0aW9uLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBhcmdzLnByZXRyYWluZWRfcHJpb3JfbW9kZWxfbmFtZV9vcl9wYXRoJTJDJTIwc3ViZm9sZGVyJTNEJTIydGV4dF9lbmNvZGVyJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R3ZWlnaHRfZHR5cGUlMEElMjAlMjAlMjAlMjApLmV2YWwoKQ==",highlighted:`noise_scheduler = DDPMScheduler(beta_schedule=<span class="hljs-string">&quot;squaredcos_cap_v2&quot;</span>, prediction_type=<span class="hljs-string">&quot;sample&quot;</span>)
image_processor = CLIPImageProcessor.from_pretrained(
args.pretrained_prior_model_name_or_path, subfolder=<span class="hljs-string">&quot;image_processor&quot;</span>
)
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder=<span class="hljs-string">&quot;tokenizer&quot;</span>)
<span class="hljs-keyword">with</span> ContextManagers(deepspeed_zero_init_disabled_context_manager()):
image_encoder = CLIPVisionModelWithProjection.from_pretrained(
args.pretrained_prior_model_name_or_path, subfolder=<span class="hljs-string">&quot;image_encoder&quot;</span>, torch_dtype=weight_dtype
).<span class="hljs-built_in">eval</span>()
text_encoder = CLIPTextModelWithProjection.from_pretrained(
args.pretrained_prior_model_name_or_path, subfolder=<span class="hljs-string">&quot;text_encoder&quot;</span>, torch_dtype=weight_dtype
).<span class="hljs-built_in">eval</span>()`,wrap:!1}}),C=new G({props:{code:"cHJpb3IlMjAlM0QlMjBQcmlvclRyYW5zZm9ybWVyLmZyb21fcHJldHJhaW5lZChhcmdzLnByZXRyYWluZWRfcHJpb3JfbW9kZWxfbmFtZV9vcl9wYXRoJTJDJTIwc3ViZm9sZGVyJTNEJTIycHJpb3IlMjIpJTBBcHJpb3IudHJhaW4oKSUwQW9wdGltaXplciUyMCUzRCUyMG9wdGltaXplcl9jbHMoJTBBJTIwJTIwJTIwJTIwcHJpb3IucGFyYW1ldGVycygpJTJDJTBBJTIwJTIwJTIwJTIwbHIlM0RhcmdzLmxlYXJuaW5nX3JhdGUlMkMlMEElMjAlMjAlMjAlMjBiZXRhcyUzRChhcmdzLmFkYW1fYmV0YTElMkMlMjBhcmdzLmFkYW1fYmV0YTIpJTJDJTBBJTIwJTIwJTIwJTIwd2VpZ2h0X2RlY2F5JTNEYXJncy5hZGFtX3dlaWdodF9kZWNheSUyQyUwQSUyMCUyMCUyMCUyMGVwcyUzRGFyZ3MuYWRhbV9lcHNpbG9uJTJDJTBBKQ==",highlighted:`prior = PriorTransformer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder=<span class="hljs-string">&quot;prior&quot;</span>)
prior.train()
optimizer = optimizer_cls(
prior.parameters(),
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)`,wrap:!1}}),k=new G({props:{code:"ZGVmJTIwcHJlcHJvY2Vzc190cmFpbihleGFtcGxlcyklM0ElMEElMjAlMjAlMjAlMjBpbWFnZXMlMjAlM0QlMjAlNUJpbWFnZS5jb252ZXJ0KCUyMlJHQiUyMiklMjBmb3IlMjBpbWFnZSUyMGluJTIwZXhhbXBsZXMlNUJpbWFnZV9jb2x1bW4lNUQlNUQlMEElMjAlMjAlMjAlMjBleGFtcGxlcyU1QiUyMmNsaXBfcGl4ZWxfdmFsdWVzJTIyJTVEJTIwJTNEJTIwaW1hZ2VfcHJvY2Vzc29yKGltYWdlcyUyQyUyMHJldHVybl90ZW5zb3JzJTNEJTIycHQlMjIpLnBpeGVsX3ZhbHVlcyUwQSUyMCUyMCUyMCUyMGV4YW1wbGVzJTVCJTIydGV4dF9pbnB1dF9pZHMlMjIlNUQlMkMlMjBleGFtcGxlcyU1QiUyMnRleHRfbWFzayUyMiU1RCUyMCUzRCUyMHRva2VuaXplX2NhcHRpb25zKGV4YW1wbGVzKSUwQSUyMCUyMCUyMCUyMHJldHVybiUyMGV4YW1wbGVz",highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_train</span>(<span class="hljs-params">examples</span>):
images = [image.convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-keyword">for</span> image <span class="hljs-keyword">in</span> examples[image_column]]
examples[<span class="hljs-string">&quot;clip_pixel_values&quot;</span>] = image_processor(images, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values
examples[<span class="hljs-string">&quot;text_input_ids&quot;</span>], examples[<span class="hljs-string">&quot;text_mask&quot;</span>] = tokenize_captions(examples)
<span class="hljs-keyword">return</span> examples`,wrap:!1}}),f=new G({props:{code:"bW9kZWxfcHJlZCUyMCUzRCUyMHByaW9yKCUwQSUyMCUyMCUyMCUyMG5vaXN5X2xhdGVudHMlMkMlMEElMjAlMjAlMjAlMjB0aW1lc3RlcCUzRHRpbWVzdGVwcyUyQyUwQSUyMCUyMCUyMCUyMHByb2pfZW1iZWRkaW5nJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMGVuY29kZXJfaGlkZGVuX3N0YXRlcyUzRHRleHRfZW5jb2Rlcl9oaWRkZW5fc3RhdGVzJTJDJTBBJTIwJTIwJTIwJTIwYXR0ZW50aW9uX21hc2slM0R0ZXh0X21hc2slMkMlMEEpLnByZWRpY3RlZF9pbWFnZV9lbWJlZGRpbmc=",highlighted:`model_pred = prior(
noisy_latents,
timestep=timesteps,
proj_embedding=prompt_embeds,
encoder_hidden_states=text_encoder_hidden_states,
attention_mask=text_mask,
).predicted_image_embedding`,wrap:!1}}),{c(){t=y("p"),t.innerHTML=o,s=i(),c=y("p"),c.innerHTML=p,M=i(),$(u.$$.fragment),d=i(),j=y("p"),j.innerHTML=Y,I=i(),$(C.$$.fragment),B=i(),R=y("p"),R.innerHTML=X,N=i(),$(k.$$.fragment),v=i(),V=y("p"),V.innerHTML=H,x=i(),$(f.$$.fragment),Z=i(),Q=y("p"),Q.innerHTML=P},l(m){t=_(m,"P",{"data-svelte-h":!0}),T(t)!=="svelte-1x90g38"&&(t.innerHTML=o),s=r(m),c=_(m,"P",{"data-svelte-h":!0}),T(c)!=="svelte-l9ezec"&&(c.innerHTML=p),M=r(m),b(u.$$.fragment,m),d=r(m),j=_(m,"P",{"data-svelte-h":!0}),T(j)!=="svelte-120gd1l"&&(j.innerHTML=Y),I=r(m),b(C.$$.fragment,m),B=r(m),R=_(m,"P",{"data-svelte-h":!0}),T(R)!=="svelte-o1316t"&&(R.innerHTML=X),N=r(m),b(k.$$.fragment,m),v=r(m),V=_(m,"P",{"data-svelte-h":!0}),T(V)!=="svelte-25wyru"&&(V.innerHTML=H),x=r(m),b(f.$$.fragment,m),Z=r(m),Q=_(m,"P",{"data-svelte-h":!0}),T(Q)!=="svelte-t0nra5"&&(Q.innerHTML=P)},m(m,W){a(m,t,W),a(m,s,W),a(m,c,W),a(m,M,W),U(u,m,W),a(m,d,W),a(m,j,W),a(m,I,W),U(C,m,W),a(m,B,W),a(m,R,W),a(m,N,W),U(k,m,W),a(m,v,W),a(m,V,W),a(m,x,W),U(f,m,W),a(m,Z,W),a(m,Q,W),F=!0},p:E,i(m){F||(J(u.$$.fragment,m),J(C.$$.fragment,m),J(k.$$.fragment,m),J(f.$$.fragment,m),F=!0)},o(m){h(u.$$.fragment,m),h(C.$$.fragment,m),h(k.$$.fragment,m),h(f.$$.fragment,m),F=!1},d(m){m&&(l(t),l(s),l(c),l(M),l(d),l(j),l(I),l(B),l(R),l(N),l(v),l(V),l(x),l(Z),l(Q)),w(u,m),w(C,m),w(k,m),w(f,m)}}}function At(g){let t,o=`The [<code>main()</code>](<a href="https://github.com/huggingface/di" rel="nofollow">https://github.com/huggingface/di</a>
ffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L440) 函数包含准备数据集和训练模型的代码。`,s,c,p="与之前的模型不同,解码器初始化一个 <code>VQModel</code> 来将潜在变量解码为图像,并使用一个 <code>UNet2DConditionModel</code>:",M,u,d,j,Y='接下来,脚本包括几个图像变换和一个用于对图像应用变换并返回像素值的<a href="https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L622" rel="nofollow">预处理</a>函数:',I,C,B,R,X='最后,<a href="https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L706" rel="nofollow">训练循环</a>处理将图像转换为潜在变量、添加噪声和预测噪声残差。',N,k,v='如果您想了解更多关于训练循环如何工作的信息,请查看<a href="../using-diffusers/write_own_pipeline">理解管道、模型和调度器</a>教程,该教程分解了去噪过程的基本模式。',V,H,x;return u=new G({props:{code:"d2l0aCUyMENvbnRleHRNYW5hZ2VycyhkZWVwc3BlZWRfemVyb19pbml0X2Rpc2FibGVkX2NvbnRleHRfbWFuYWdlcigpKSUzQSUwQSUyMCUyMCUyMCUyMHZhZSUyMCUzRCUyMFZRTW9kZWwuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMGFyZ3MucHJldHJhaW5lZF9kZWNvZGVyX21vZGVsX25hbWVfb3JfcGF0aCUyQyUyMHN1YmZvbGRlciUzRCUyMm1vdnElMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHdlaWdodF9kdHlwZSUwQSUyMCUyMCUyMCUyMCkuZXZhbCgpJTBBJTIwJTIwJTIwJTIwaW1hZ2VfZW5jb2RlciUyMCUzRCUyMENMSVBWaXNpb25Nb2RlbFdpdGhQcm9qZWN0aW9uLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBhcmdzLnByZXRyYWluZWRfcHJpb3JfbW9kZWxfbmFtZV9vcl9wYXRoJTJDJTIwc3ViZm9sZGVyJTNEJTIyaW1hZ2VfZW5jb2RlciUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEd2VpZ2h0X2R0eXBlJTBBJTIwJTIwJTIwJTIwKS5ldmFsKCklMEF1bmV0JTIwJTNEJTIwVU5ldDJEQ29uZGl0aW9uTW9kZWwuZnJvbV9wcmV0cmFpbmVkKGFyZ3MucHJldHJhaW5lZF9kZWNvZGVyX21vZGVsX25hbWVfb3JfcGF0aCUyQyUyMHN1YmZvbGRlciUzRCUyMnVuZXQlMjIp",highlighted:`<span class="hljs-keyword">with</span> ContextManagers(deepspeed_zero_init_disabled_context_manager()):
vae = VQModel.from_pretrained(
args.pretrained_decoder_model_name_or_path, subfolder=<span class="hljs-string">&quot;movq&quot;</span>, torch_dtype=weight_dtype
).<span class="hljs-built_in">eval</span>()
image_encoder = CLIPVisionModelWithProjection.from_pretrained(
args.pretrained_prior_model_name_or_path, subfolder=<span class="hljs-string">&quot;image_encoder&quot;</span>, torch_dtype=weight_dtype
).<span class="hljs-built_in">eval</span>()
unet = UNet2DConditionModel.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder=<span class="hljs-string">&quot;unet&quot;</span>)`,wrap:!1}}),C=new G({props:{code:"ZGVmJTIwcHJlcHJvY2Vzc190cmFpbihleGFtcGxlcyklM0ElMEElMjAlMjAlMjAlMjBpbWFnZXMlMjAlM0QlMjAlNUJpbWFnZS5jb252ZXJ0KCUyMlJHQiUyMiklMjBmb3IlMjBpbWFnZSUyMGluJTIwZXhhbXBsZXMlNUJpbWFnZV9jb2x1bW4lNUQlNUQlMEElMjAlMjAlMjAlMjBleGFtcGxlcyU1QiUyMnBpeGVsX3ZhbHVlcyUyMiU1RCUyMCUzRCUyMCU1QnRyYWluX3RyYW5zZm9ybXMoaW1hZ2UpJTIwZm9yJTIwaW1hZ2UlMjBpbiUyMGltYWdlcyU1RCUwQSUyMCUyMCUyMCUyMGV4YW1wbGVzJTVCJTIyY2xpcF9waXhlbF92YWx1ZXMlMjIlNUQlMjAlM0QlMjBpbWFnZV9wcm9jZXNzb3IoaW1hZ2VzJTJDJTIwcmV0dXJuX3RlbnNvcnMlM0QlMjJwdCUyMikucGl4ZWxfdmFsdWVzJTBBJTIwJTIwJTIwJTIwcmV0dXJuJTIwZXhhbXBsZXM=",highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_train</span>(<span class="hljs-params">examples</span>):
images = [image.convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-keyword">for</span> image <span class="hljs-keyword">in</span> examples[image_column]]
examples[<span class="hljs-string">&quot;pixel_values&quot;</span>] = [train_transforms(image) <span class="hljs-keyword">for</span> image <span class="hljs-keyword">in</span> images]
examples[<span class="hljs-string">&quot;clip_pixel_values&quot;</span>] = image_processor(images, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values
<span class="hljs-keyword">return</span> examples`,wrap:!1}}),H=new G({props:{code:"bW9kZWxfcHJlZCUyMCUzRCUyMHVuZXQobm9pc3lfbGF0ZW50cyUyQyUyMHRpbWVzdGVwcyUyQyUyME5vbmUlMkMlMjBhZGRlZF9jb25kX2t3YXJncyUzRGFkZGVkX2NvbmRfa3dhcmdzKS5zYW1wbGUlNUIlM0ElMkMlMjAlM0E0JTVE",highlighted:'model_pred = unet(noisy_latents, timesteps, <span class="hljs-literal">None</span>, added_cond_kwargs=added_cond_kwargs).sample[:, :<span class="hljs-number">4</span>]',wrap:!1}}),{c(){t=y("p"),t.innerHTML=o,s=i(),c=y("p"),c.innerHTML=p,M=i(),$(u.$$.fragment),d=i(),j=y("p"),j.innerHTML=Y,I=i(),$(C.$$.fragment),B=i(),R=y("p"),R.innerHTML=X,N=i(),k=y("p"),k.innerHTML=v,V=i(),$(H.$$.fragment)},l(f){t=_(f,"P",{"data-svelte-h":!0}),T(t)!=="svelte-uuvj6j"&&(t.innerHTML=o),s=r(f),c=_(f,"P",{"data-svelte-h":!0}),T(c)!=="svelte-1o46svh"&&(c.innerHTML=p),M=r(f),b(u.$$.fragment,f),d=r(f),j=_(f,"P",{"data-svelte-h":!0}),T(j)!=="svelte-69asr1"&&(j.innerHTML=Y),I=r(f),b(C.$$.fragment,f),B=r(f),R=_(f,"P",{"data-svelte-h":!0}),T(R)!=="svelte-1o16k7c"&&(R.innerHTML=X),N=r(f),k=_(f,"P",{"data-svelte-h":!0}),T(k)!=="svelte-1rwgka7"&&(k.innerHTML=v),V=r(f),b(H.$$.fragment,f)},m(f,Z){a(f,t,Z),a(f,s,Z),a(f,c,Z),a(f,M,Z),U(u,f,Z),a(f,d,Z),a(f,j,Z),a(f,I,Z),U(C,f,Z),a(f,B,Z),a(f,R,Z),a(f,N,Z),a(f,k,Z),a(f,V,Z),U(H,f,Z),x=!0},p:E,i(f){x||(J(u.$$.fragment,f),J(C.$$.fragment,f),J(H.$$.fragment,f),x=!0)},o(f){h(u.$$.fragment,f),h(C.$$.fragment,f),h(H.$$.fragment,f),x=!1},d(f){f&&(l(t),l(s),l(c),l(M),l(d),l(j),l(I),l(B),l(R),l(N),l(k),l(V)),w(u,f),w(C,f),w(H,f)}}}function zt(g){let t,o,s,c;return t=new ge({props:{id:"script",option:"prior model",$$slots:{default:[Yt]},$$scope:{ctx:g}}}),s=new ge({props:{id:"script",option:"decoder model",$$slots:{default:[At]},$$scope:{ctx:g}}}),{c(){$(t.$$.fragment),o=i(),$(s.$$.fragment)},l(p){b(t.$$.fragment,p),o=r(p),b(s.$$.fragment,p)},m(p,M){U(t,p,M),a(p,o,M),U(s,p,M),c=!0},p(p,M){const u={};M&2&&(u.$$scope={dirty:M,ctx:p}),t.$set(u);const d={};M&2&&(d.$$scope={dirty:M,ctx:p}),s.$set(d)},i(p){c||(J(t.$$.fragment,p),J(s.$$.fragment,p),c=!0)},o(p){h(t.$$.fragment,p),h(s.$$.fragment,p),c=!1},d(p){p&&l(o),w(t,p),w(s,p)}}}function Lt(g){let t,o=`要使用Weights &amp; Biases监控训练进度,请在训练命令中添加 <code>--report_to=wandb</code> 参数。您还需要
建议在训练命令中添加 <code>--validation_prompt</code> 以跟踪结果。这对于调试模型和查看中间结果非常有用。`;return{c(){t=y("p"),t.innerHTML=o},l(s){t=_(s,"P",{"data-svelte-h":!0}),T(t)!=="svelte-1qc2vlo"&&(t.innerHTML=o)},m(s,c){a(s,t,c)},p:E,d(s){s&&l(t)}}}function St(g){let t,o;return t=new G({props:{code:"ZXhwb3J0JTIwREFUQVNFVF9OQU1FJTNEJTIybGFtYmRhbGFicyUyRm5hcnV0by1ibGlwLWNhcHRpb25zJTIyJTBBJTBBYWNjZWxlcmF0ZSUyMGxhdW5jaCUyMC0tbWl4ZWRfcHJlY2lzaW9uJTNEJTIyZnAxNiUyMiUyMCUyMHRyYWluX3RleHRfdG9faW1hZ2VfcHJpb3IucHklMjAlNUMlMEElMjAlMjAtLWRhdGFzZXRfbmFtZSUzRCUyNERBVEFTRVRfTkFNRSUyMCU1QyUwQSUyMCUyMC0tcmVzb2x1dGlvbiUzRDc2OCUyMCU1QyUwQSUyMCUyMC0tdHJhaW5fYmF0Y2hfc2l6ZSUzRDElMjAlNUMlMEElMjAlMjAtLWdyYWRpZW50X2FjY3VtdWxhdGlvbl9zdGVwcyUzRDQlMjAlNUMlMEElMjAlMjAtLW1heF90cmFpbl9zdGVwcyUzRDE1MDAwJTIwJTVDJTBBJTIwJTIwLS1sZWFybmluZ19yYXRlJTNEMWUtMDUlMjAlNUMlMEElMjAlMjAtLW1heF9ncmFkX25vcm0lM0QxJTIwJTVDJTBBJTIwJTIwLS1jaGVja3BvaW50c190b3RhbF9saW1pdCUzRDMlMjAlNUMlMEElMjAlMjAtLWxyX3NjaGVkdWxlciUzRCUyMmNvbnN0YW50JTIyJTIwJTVDJTBBJTIwJTIwLS1scl93YXJtdXBfc3RlcHMlM0QwJTIwJTVDJTBBJTIwJTIwLS12YWxpZGF0aW9uX3Byb21wdHMlM0QlMjJBJTIwcm9ib3QlMjBuYXJ1dG8lMkMlMjA0ayUyMHBob3RvJTIyJTIwJTVDJTBBJTIwJTIwLS1yZXBvcnRfdG8lM0QlMjJ3YW5kYiUyMiUyMCU1QyUwQSUyMCUyMC0tcHVzaF90b19odWIlMjAlNUMlMEElMjAlMjAtLW91dHB1dF9kaXIlM0QlMjJrYW5kaTItcHJpb3ItbmFydXRvLW1vZGVsJTIy",highlighted:`<span class="hljs-built_in">export</span> DATASET_NAME=<span class="hljs-string">&quot;lambdalabs/naruto-blip-captions&quot;</span>
accelerate launch --mixed_precision=<span class="hljs-string">&quot;fp16&quot;</span> train_text_to_image_prior.py \\
--dataset_name=<span class="hljs-variable">$DATASET_NAME</span> \\
--resolution=768 \\
--train_batch_size=1 \\
--gradient_accumulation_steps=4 \\
--max_train_steps=15000 \\
--learning_rate=1e-05 \\
--max_grad_norm=1 \\
--checkpoints_total_limit=3 \\
--lr_scheduler=<span class="hljs-string">&quot;constant&quot;</span> \\
--lr_warmup_steps=0 \\
--validation_prompts=<span class="hljs-string">&quot;A robot naruto, 4k photo&quot;</span> \\
--report_to=<span class="hljs-string">&quot;wandb&quot;</span> \\
--push_to_hub \\
--output_dir=<span class="hljs-string">&quot;kandi2-prior-naruto-model&quot;</span>`,wrap:!1}}),{c(){$(t.$$.fragment)},l(s){b(t.$$.fragment,s)},m(s,c){U(t,s,c),o=!0},p:E,i(s){o||(J(t.$$.fragment,s),o=!0)},o(s){h(t.$$.fragment,s),o=!1},d(s){w(t,s)}}}function qt(g){let t,o;return t=new G({props:{code:"ZXhwb3J0JTIwREFUQVNFVF9OQU1FJTNEJTIybGFtYmRhbGFicyUyRm5hcnV0by1ibGlwLWNhcHRpb25zJTIyJTBBJTBBYWNjZWxlcmF0ZSUyMGxhdW5jaCUyMC0tbWl4ZWRfcHJlY2lzaW9uJTNEJTIyZnAxNiUyMiUyMCUyMHRyYWluX3RleHRfdG9faW1hZ2VfZGVjb2Rlci5weSUyMCU1QyUwQSUyMCUyMC0tZGF0YXNldF9uYW1lJTNEJTI0REFUQVNFVF9OQU1FJTIwJTVDJTBBJTIwJTIwLS1yZXNvbHV0aW9uJTNENzY4JTIwJTVDJTBBJTIwJTIwLS10cmFpbl9iYXRjaF9zaXplJTNEMSUyMCU1QyUwQSUyMCUyMC0tZ3JhZGllbnRfYWNjdW11bGF0aW9uX3N0ZXBzJTNENCUyMCU1QyUwQSUyMCUyMC0tZ3JhZGllbnRfY2hlY2twb2ludGluZyUyMCU1QyUwQSUyMCUyMC0tbWF4X3RyYWluX3N0ZXBzJTNEMTUwMDAlMjAlNUMlMEElMjAlMjAtLWxlYXJuaW5nX3JhdGUlM0QxZS0wNSUyMCU1QyUwQSUyMCUyMC0tbWF4X2dyYWRfbm9ybSUzRDElMjAlNUMlMEElMjAlMjAtLWNoZWNrcG9pbnRzX3RvdGFsX2xpbWl0JTNEMyUyMCU1QyUwQSUyMCUyMC0tbHJfc2NoZWR1bGVyJTNEJTIyY29uc3RhbnQlMjIlMjAlNUMlMEElMjAlMjAtLWxyX3dhcm11cF9zdGVwcyUzRDAlMjAlNUMlMEElMjAlMjAtLXZhbGlkYXRpb25fcHJvbXB0cyUzRCUyMkElMjByb2JvdCUyMG5hcnV0byUyQyUyMDRrJTIwcGhvdG8lMjIlMjAlNUMlMEElMjAlMjAtLXJlcG9ydF90byUzRCUyMndhbmRiJTIyJTIwJTVDJTBBJTIwJTIwLS1wdXNoX3RvX2h1YiUyMCU1QyUwQSUyMCUyMC0tb3V0cHV0X2RpciUzRCUyMmthbmRpMi1kZWNvZGVyLW5hcnV0by1tb2RlbCUyMg==",highlighted:`<span class="hljs-built_in">export</span> DATASET_NAME=<span class="hljs-string">&quot;lambdalabs/naruto-blip-captions&quot;</span>
accelerate launch --mixed_precision=<span class="hljs-string">&quot;fp16&quot;</span> train_text_to_image_decoder.py \\
--dataset_name=<span class="hljs-variable">$DATASET_NAME</span> \\
--resolution=768 \\
--train_batch_size=1 \\
--gradient_accumulation_steps=4 \\
--gradient_checkpointing \\
--max_train_steps=15000 \\
--learning_rate=1e-05 \\
--max_grad_norm=1 \\
--checkpoints_total_limit=3 \\
--lr_scheduler=<span class="hljs-string">&quot;constant&quot;</span> \\
--lr_warmup_steps=0 \\
--validation_prompts=<span class="hljs-string">&quot;A robot naruto, 4k photo&quot;</span> \\
--report_to=<span class="hljs-string">&quot;wandb&quot;</span> \\
--push_to_hub \\
--output_dir=<span class="hljs-string">&quot;kandi2-decoder-naruto-model&quot;</span>`,wrap:!1}}),{c(){$(t.$$.fragment)},l(s){b(t.$$.fragment,s)},m(s,c){U(t,s,c),o=!0},p:E,i(s){o||(J(t.$$.fragment,s),o=!0)},o(s){h(t.$$.fragment,s),o=!1},d(s){w(t,s)}}}function Pt(g){let t,o,s,c;return t=new ge({props:{id:"training-inference",option:"prior model",$$slots:{default:[St]},$$scope:{ctx:g}}}),s=new ge({props:{id:"training-inference",option:"decoder model",$$slots:{default:[qt]},$$scope:{ctx:g}}}),{c(){$(t.$$.fragment),o=i(),$(s.$$.fragment)},l(p){b(t.$$.fragment,p),o=r(p),b(s.$$.fragment,p)},m(p,M){U(t,p,M),a(p,o,M),U(s,p,M),c=!0},p(p,M){const u={};M&2&&(u.$$scope={dirty:M,ctx:p}),t.$set(u);const d={};M&2&&(d.$$scope={dirty:M,ctx:p}),s.$set(d)},i(p){c||(J(t.$$.fragment,p),J(s.$$.fragment,p),c=!0)},o(p){h(t.$$.fragment,p),h(s.$$.fragment,p),c=!1},d(p){p&&l(o),w(t,p),w(s,p)}}}function Dt(g){let t,o="可以随意将 <code>kandinsky-community/kandinsky-2-2-decoder</code> 替换为您自己训练的 decoder 检查点!";return{c(){t=y("p"),t.innerHTML=o},l(s){t=_(s,"P",{"data-svelte-h":!0}),T(t)!=="svelte-3gfvrg"&&(t.innerHTML=o)},m(s,c){a(s,t,c)},p:E,d(s){s&&l(t)}}}function Kt(g){let t,o,s,c;return t=new G({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEF1dG9QaXBlbGluZUZvclRleHQySW1hZ2UlMkMlMjBEaWZmdXNpb25QaXBlbGluZSUwQWltcG9ydCUyMHRvcmNoJTBBJTBBcHJpb3JfcGlwZWxpbmUlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQob3V0cHV0X2RpciUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwcmlvcl9jb21wb25lbnRzJTIwJTNEJTIwJTdCJTIycHJpb3JfJTIyJTIwJTJCJTIwayUzQSUyMHYlMjBmb3IlMjBrJTJDdiUyMGluJTIwcHJpb3JfcGlwZWxpbmUuY29tcG9uZW50cy5pdGVtcygpJTdEJTBBcGlwZWxpbmUlMjAlM0QlMjBBdXRvUGlwZWxpbmVGb3JUZXh0MkltYWdlLmZyb21fcHJldHJhaW5lZCglMjJrYW5kaW5za3ktY29tbXVuaXR5JTJGa2FuZGluc2t5LTItMi1kZWNvZGVyJTIyJTJDJTIwKipwcmlvcl9jb21wb25lbnRzJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2KSUwQSUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEFwcm9tcHQlM0QlMjJBJTIwcm9ib3QlMjBuYXJ1dG8lMkMlMjA0ayUyMHBob3RvJTIyJTBBaW1hZ2UlMjAlM0QlMjBwaXBlbGluZShwcm9tcHQlM0Rwcm9tcHQlMkMlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuZWdhdGl2ZV9wcm9tcHQpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoPipelineForText2Image, DiffusionPipeline
<span class="hljs-keyword">import</span> torch
prior_pipeline = DiffusionPipeline.from_pretrained(output_dir, torch_dtype=torch.float16)
prior_components = {<span class="hljs-string">&quot;prior_&quot;</span> + k: v <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> prior_pipeline.components.items()}
pipeline = AutoPipelineForText2Image.from_pretrained(<span class="hljs-string">&quot;kandinsky-community/kandinsky-2-2-decoder&quot;</span>, **prior_components, torch_dtype=torch.float16)
pipe.enable_model_cpu_offload()
prompt=<span class="hljs-string">&quot;A robot naruto, 4k photo&quot;</span>
image = pipeline(prompt=prompt, negative_prompt=negative_prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),s=new Ze({props:{$$slots:{default:[Dt]},$$scope:{ctx:g}}}),{c(){$(t.$$.fragment),o=i(),$(s.$$.fragment)},l(p){b(t.$$.fragment,p),o=r(p),b(s.$$.fragment,p)},m(p,M){U(t,p,M),a(p,o,M),U(s,p,M),c=!0},p(p,M){const u={};M&2&&(u.$$scope={dirty:M,ctx:p}),s.$set(u)},i(p){c||(J(t.$$.fragment,p),J(s.$$.fragment,p),c=!0)},o(p){h(t.$$.fragment,p),h(s.$$.fragment,p),c=!1},d(p){p&&l(o),w(t,p),w(s,p)}}}function Ot(g){let t,o,s,c="对于 decoder 模型,您还可以从保存的检查点进行推理,这对于查看中间结果很有用。在这种情况下,将检查点加载到 UNet 中:",p,M,u;return t=new G({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEF1dG9QaXBlbGluZUZvclRleHQySW1hZ2UlMEFpbXBvcnQlMjB0b3JjaCUwQSUwQXBpcGVsaW5lJTIwJTNEJTIwQXV0b1BpcGVsaW5lRm9yVGV4dDJJbWFnZS5mcm9tX3ByZXRyYWluZWQoJTIycGF0aCUyRnRvJTJGc2F2ZWQlMkZtb2RlbCUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlbGluZS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQXByb21wdCUzRCUyMkElMjByb2JvdCUyMG5hcnV0byUyQyUyMDRrJTIwcGhvdG8lMjIlMEFpbWFnZSUyMCUzRCUyMHBpcGVsaW5lKHByb21wdCUzRHByb21wdCkuaW1hZ2VzJTVCMCU1RA==",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoPipelineForText2Image
<span class="hljs-keyword">import</span> torch
pipeline = AutoPipelineForText2Image.from_pretrained(<span class="hljs-string">&quot;path/to/saved/model&quot;</span>, torch_dtype=torch.float16)
pipeline.enable_model_cpu_offload()
prompt=<span class="hljs-string">&quot;A robot naruto, 4k photo&quot;</span>
image = pipeline(prompt=prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),M=new G({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEF1dG9QaXBlbGluZUZvclRleHQySW1hZ2UlMkMlMjBVTmV0MkRDb25kaXRpb25Nb2RlbCUwQSUwQXVuZXQlMjAlM0QlMjBVTmV0MkRDb25kaXRpb25Nb2RlbC5mcm9tX3ByZXRyYWluZWQoJTIycGF0aCUyRnRvJTJGc2F2ZWQlMkZtb2RlbCUyMiUyMCUyQiUyMCUyMiUyRmNoZWNrcG9pbnQtJTNDTiUzRSUyRnVuZXQlMjIpJTBBJTBBcGlwZWxpbmUlMjAlM0QlMjBBdXRvUGlwZWxpbmVGb3JUZXh0MkltYWdlLmZyb21fcHJldHJhaW5lZCglMjJrYW5kaW5za3ktY29tbXVuaXR5JTJGa2FuZGluc2t5LTItMi1kZWNvZGVyJTIyJTJDJTIwdW5ldCUzRHVuZXQlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBcGlwZWxpbmUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGVsaW5lKHByb21wdCUzRCUyMkElMjByb2JvdCUyMG5hcnV0byUyQyUyMDRrJTIwcGhvdG8lMjIpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoPipelineForText2Image, UNet2DConditionModel
unet = UNet2DConditionModel.from_pretrained(<span class="hljs-string">&quot;path/to/saved/model&quot;</span> + <span class="hljs-string">&quot;/checkpoint-&lt;N&gt;/unet&quot;</span>)
pipeline = AutoPipelineForText2Image.from_pretrained(<span class="hljs-string">&quot;kandinsky-community/kandinsky-2-2-decoder&quot;</span>, unet=unet, torch_dtype=torch.float16)
pipeline.enable_model_cpu_offload()
image = pipeline(prompt=<span class="hljs-string">&quot;A robot naruto, 4k photo&quot;</span>).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){$(t.$$.fragment),o=i(),s=y("p"),s.textContent=c,p=i(),$(M.$$.fragment)},l(d){b(t.$$.fragment,d),o=r(d),s=_(d,"P",{"data-svelte-h":!0}),T(s)!=="svelte-1cv5kc7"&&(s.textContent=c),p=r(d),b(M.$$.fragment,d)},m(d,j){U(t,d,j),a(d,o,j),a(d,s,j),a(d,p,j),U(M,d,j),u=!0},p:E,i(d){u||(J(t.$$.fragment,d),J(M.$$.fragment,d),u=!0)},o(d){h(t.$$.fragment,d),h(M.$$.fragment,d),u=!1},d(d){d&&(l(o),l(s),l(p)),w(t,d),w(M,d)}}}function el(g){let t,o,s,c;return t=new ge({props:{id:"training-inference",option:"prior model",$$slots:{default:[Kt]},$$scope:{ctx:g}}}),s=new ge({props:{id:"training-inference",option:"decoder model",$$slots:{default:[Ot]},$$scope:{ctx:g}}}),{c(){$(t.$$.fragment),o=i(),$(s.$$.fragment)},l(p){b(t.$$.fragment,p),o=r(p),b(s.$$.fragment,p)},m(p,M){U(t,p,M),a(p,o,M),U(s,p,M),c=!0},p(p,M){const u={};M&2&&(u.$$scope={dirty:M,ctx:p}),t.$set(u);const d={};M&2&&(d.$$scope={dirty:M,ctx:p}),s.$set(d)},i(p){c||(J(t.$$.fragment,p),J(s.$$.fragment,p),c=!0)},o(p){h(t.$$.fragment,p),h(s.$$.fragment,p),c=!1},d(p){p&&l(o),w(t,p),w(s,p)}}}function tl(g){let t,o,s,c,p,M,u,d,j,Y="Kandinsky 2.2 是一个多语言文本到图像模型,能够生成更逼真的图像。该模型包括一个图像先验模型,用于从文本提示创建图像嵌入,以及一个解码器模型,基于先验模型的嵌入生成图像。这就是为什么在 Diffusers 中您会找到两个独立的脚本用于 Kandinsky 2.2,一个用于训练先验模型,另一个用于训练解码器模型。您可以分别训练这两个模型,但为了获得最佳结果,您应该同时训练先验和解码器模型。",I,C,B='根据您的 GPU,您可能需要启用 <code>gradient_checkpointing</code>(⚠️ 不支持先验模型!)、<code>mixed_precision</code> 和 <code>gradient_accumulation_steps</code> 来帮助将模型装入内存并加速训练。您可以通过启用 <a href="../optimization/xformers">xFormers</a> 的内存高效注意力来进一步减少内存使用(版本 <a href="https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212" rel="nofollow">v0.0.16</a> 在某些 GPU 上训练时失败,因此您可能需要安装开发版本)。',R,X,N='本指南探讨了 <a href="https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py" rel="nofollow">train_text_to_image_prior.py</a> 和 <a href="https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py" rel="nofollow">train_text_to_image_decoder.py</a> 脚本,以帮助您更熟悉它,以及如何根据您的用例进行调整。',k,v,V="在运行脚本之前,请确保从源代码安装库:",H,x,f,Z,Q="然后导航到包含训练脚本的示例文件夹,并安装脚本所需的依赖项:",P,F,m,W,We,D,ot="初始化一个 🤗 Accelerate 环境:",Ce,K,Re,O,mt="要设置一个默认的 🤗 Accelerate 环境而不选择任何配置:",ke,ee,Ge,te,ct="或者,如果您的环境不支持交互式 shell,比如 notebook,您可以使用:",xe,le,ve,se,ft='最后,如果您想在自己的数据集上训练模型,请查看 <a href="create_dataset">创建用于训练的数据集</a> 指南,了解如何创建与训练脚本兼容的数据集。',Ve,A,He,ne,Ie,ae,Mt='训练脚本提供了许多参数来帮助您自定义训练运行。所有参数及其描述都可以在 <a href="https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L190" rel="nofollow"><code>parse_args()</code></a> 函数中找到。训练脚本为每个参数提供了默认值,例如训练批次大小和学习率,但如果您愿意,也可以在训练命令中设置自己的值。',Xe,pe,dt="例如,要使用 fp16 格式的混合精度加速训练,请在训练命令中添加 <code>--mixed_precision</code> 参数:",Be,ie,Ne,re,ut='大多数参数与 <a href="text2image#script-parameters">文本到图像</a> 训练指南中的参数相同,所以让我们直接进入 Kandinsky 训练脚本的 walkthrough!',Fe,oe,Qe,me,yt='<a href="https://huggingface.co/papers/2303.09556" rel="nofollow">Min-SNR</a> 加权策略可以通过重新平衡损失来帮助训练,实现更快的收敛。训练脚本支持预测 <code>epsilon</code>(噪声)或 <code>v_prediction</code>,但 Min-SNR 与两种预测类型都兼容。此加权策略仅由 PyTorch 支持,在 Flax 训练脚本中不可用。',Ee,ce,_t="添加 <code>--snr_gamma</code> 参数并将其设置为推荐值 5.0:",Ye,fe,Ae,Me,ze,de,$t='训练脚本也类似于 <a href="text2image#training-script">文本到图像</a> 训练指南,但已修改以支持训练 prior 和 decoder 模型。本指南重点介绍 Kandinsky 2.2 训练脚本中独特的代码。',Le,z,Se,ue,qe,ye,bt="一旦您完成了所有更改或接受默认配置,就可以启动训练脚本了!🚀",Pe,_e,Ut='您将在<a href="https://huggingface.co/datasets/lambdalabs/naruto-blip-captions" rel="nofollow">Naruto BLIP 字幕</a>数据集上进行训练,以生成您自己的Naruto角色,但您也可以通过遵循<a href="create_dataset">创建用于训练的数据集</a>指南来创建和训练您自己的数据集。将环境变量 <code>DATASET_NAME</code> 设置为Hub上数据集的名称,或者如果您在自己的文件上训练,将环境变量 <code>TRAIN_DIR</code> 设置为数据集的路径。',De,$e,Jt="如果您在多个GPU上训练,请在 <code>accelerate launch</code> 命令中添加 <code>--multi_gpu</code> 参数。",Ke,L,Oe,S,et,be,ht="训练完成后,您可以使用新训练的模型进行推理!",tt,q,lt,Ue,st,Je,wt="恭喜您训练了一个 Kandinsky 2.2 模型!要了解更多关于如何使用您的新模型的信息,以下指南可能会有所帮助:",nt,he,Tt='<li>阅读 <a href="../using-diffusers/kandinsky">Kandinsky</a> 指南,学习如何将其用于各种不同的任务(文本到图像、图像到图像、修复、插值),以及如何与 ControlNet 结合使用。</li> <li>查看 <a href="dreambooth">DreamBooth</a> 和 <a href="lora">LoRA</a> 训练指南,学习如何使用少量示例图像训练个性化的 Kandinsky 模型。这两种训练技术甚至可以结合使用!</li>',at,we,pt,je,it;return p=new Te({props:{title:"Kandinsky 2.2",local:"kandinsky-22",headingTag:"h1"}}),u=new Ze({props:{warning:!0,$$slots:{default:[Ft]},$$scope:{ctx:g}}}),x=new G({props:{code:"Z2l0JTIwY2xvbmUlMjBodHRwcyUzQSUyRiUyRmdpdGh1Yi5jb20lMkZodWdnaW5nZmFjZSUyRmRpZmZ1c2VycyUwQWNkJTIwZGlmZnVzZXJzJTBBcGlwJTIwaW5zdGFsbCUyMC4=",highlighted:`git <span class="hljs-built_in">clone</span> https://github.com/huggingface/diffusers
<span class="hljs-built_in">cd</span> diffusers
pip install .`,wrap:!1}}),F=new G({props:{code:"Y2QlMjBleGFtcGxlcyUyRmthbmRpbnNreTJfMiUyRnRleHRfdG9faW1hZ2UlMEFwaXAlMjBpbnN0YWxsJTIwLXIlMjByZXF1aXJlbWVudHMudHh0",highlighted:`<span class="hljs-built_in">cd</span> examples/kandinsky2_2/text_to_image
pip install -r requirements.txt`,wrap:!1}}),W=new Ze({props:{$$slots:{default:[Qt]},$$scope:{ctx:g}}}),K=new G({props:{code:"YWNjZWxlcmF0ZSUyMGNvbmZpZw==",highlighted:"accelerate config",wrap:!1}}),ee=new G({props:{code:"YWNjZWxlcmF0ZSUyMGNvbmZpZyUyMGRlZmF1bHQ=",highlighted:"accelerate config default",wrap:!1}}),le=new G({props:{code:"ZnJvbSUyMGFjY2VsZXJhdGUudXRpbHMlMjBpbXBvcnQlMjB3cml0ZV9iYXNpY19jb25maWclMEElMEF3cml0ZV9iYXNpY19jb25maWcoKQ==",highlighted:`<span class="hljs-keyword">from</span> accelerate.utils <span class="hljs-keyword">import</span> write_basic_config
write_basic_config()`,wrap:!1}}),A=new Ze({props:{$$slots:{default:[Et]},$$scope:{ctx:g}}}),ne=new Te({props:{title:"脚本参数",local:"脚本参数",headingTag:"h2"}}),ie=new G({props:{code:"YWNjZWxlcmF0ZSUyMGxhdW5jaCUyMHRyYWluX3RleHRfdG9faW1hZ2VfcHJpb3IucHklMjAlNUMlMEElMjAlMjAtLW1peGVkX3ByZWNpc2lvbiUzRCUyMmZwMTYlMjI=",highlighted:`accelerate launch train_text_to_image_prior.py \\
--mixed_precision=<span class="hljs-string">&quot;fp16&quot;</span>`,wrap:!1}}),oe=new Te({props:{title:"Min-SNR 加权",local:"min-snr-加权",headingTag:"h3"}}),fe=new G({props:{code:"YWNjZWxlcmF0ZSUyMGxhdW5jaCUyMHRyYWluX3RleHRfdG9faW1hZ2VfcHJpb3IucHklMjAlNUMlMEElMjAlMjAtLXNucl9nYW1tYSUzRDUuMA==",highlighted:`accelerate launch train_text_to_image_prior.py \\
--snr_gamma=5.0`,wrap:!1}}),Me=new Te({props:{title:"训练脚本",local:"训练脚本",headingTag:"h2"}}),z=new rt({props:{id:"script",options:["prior model","decoder model"],$$slots:{default:[zt]},$$scope:{ctx:g}}}),ue=new Te({props:{title:"启动脚本",local:"启动脚本",headingTag:"h2"}}),L=new Ze({props:{$$slots:{default:[Lt]},$$scope:{ctx:g}}}),S=new rt({props:{id:"training-inference",options:["prior model","decoder model"],$$slots:{default:[Pt]},$$scope:{ctx:g}}}),q=new rt({props:{id:"training-inference",options:["prior model","decoder model"],$$slots:{default:[el]},$$scope:{ctx:g}}}),Ue=new Te({props:{title:"后续步骤",local:"后续步骤",headingTag:"h2"}}),we=new Nt({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/zh/training/kandinsky.md"}}),{c(){t=y("meta"),o=i(),s=y("p"),c=i(),$(p.$$.fragment),M=i(),$(u.$$.fragment),d=i(),j=y("p"),j.textContent=Y,I=i(),C=y("p"),C.innerHTML=B,R=i(),X=y("p"),X.innerHTML=N,k=i(),v=y("p"),v.textContent=V,H=i(),$(x.$$.fragment),f=i(),Z=y("p"),Z.textContent=Q,P=i(),$(F.$$.fragment),m=i(),$(W.$$.fragment),We=i(),D=y("p"),D.textContent=ot,Ce=i(),$(K.$$.fragment),Re=i(),O=y("p"),O.textContent=mt,ke=i(),$(ee.$$.fragment),Ge=i(),te=y("p"),te.textContent=ct,xe=i(),$(le.$$.fragment),ve=i(),se=y("p"),se.innerHTML=ft,Ve=i(),$(A.$$.fragment),He=i(),$(ne.$$.fragment),Ie=i(),ae=y("p"),ae.innerHTML=Mt,Xe=i(),pe=y("p"),pe.innerHTML=dt,Be=i(),$(ie.$$.fragment),Ne=i(),re=y("p"),re.innerHTML=ut,Fe=i(),$(oe.$$.fragment),Qe=i(),me=y("p"),me.innerHTML=yt,Ee=i(),ce=y("p"),ce.innerHTML=_t,Ye=i(),$(fe.$$.fragment),Ae=i(),$(Me.$$.fragment),ze=i(),de=y("p"),de.innerHTML=$t,Le=i(),$(z.$$.fragment),Se=i(),$(ue.$$.fragment),qe=i(),ye=y("p"),ye.textContent=bt,Pe=i(),_e=y("p"),_e.innerHTML=Ut,De=i(),$e=y("p"),$e.innerHTML=Jt,Ke=i(),$(L.$$.fragment),Oe=i(),$(S.$$.fragment),et=i(),be=y("p"),be.textContent=ht,tt=i(),$(q.$$.fragment),lt=i(),$(Ue.$$.fragment),st=i(),Je=y("p"),Je.textContent=wt,nt=i(),he=y("ul"),he.innerHTML=Tt,at=i(),$(we.$$.fragment),pt=i(),je=y("p"),this.h()},l(e){const n=Xt("svelte-u9bgzb",document.head);t=_(n,"META",{name:!0,content:!0}),n.forEach(l),o=r(e),s=_(e,"P",{}),Gt(s).forEach(l),c=r(e),b(p.$$.fragment,e),M=r(e),b(u.$$.fragment,e),d=r(e),j=_(e,"P",{"data-svelte-h":!0}),T(j)!=="svelte-1rjrzwn"&&(j.textContent=Y),I=r(e),C=_(e,"P",{"data-svelte-h":!0}),T(C)!=="svelte-1wrjuq9"&&(C.innerHTML=B),R=r(e),X=_(e,"P",{"data-svelte-h":!0}),T(X)!=="svelte-183ogqr"&&(X.innerHTML=N),k=r(e),v=_(e,"P",{"data-svelte-h":!0}),T(v)!=="svelte-sgtnc7"&&(v.textContent=V),H=r(e),b(x.$$.fragment,e),f=r(e),Z=_(e,"P",{"data-svelte-h":!0}),T(Z)!=="svelte-9jjxff"&&(Z.textContent=Q),P=r(e),b(F.$$.fragment,e),m=r(e),b(W.$$.fragment,e),We=r(e),D=_(e,"P",{"data-svelte-h":!0}),T(D)!=="svelte-1cwu974"&&(D.textContent=ot),Ce=r(e),b(K.$$.fragment,e),Re=r(e),O=_(e,"P",{"data-svelte-h":!0}),T(O)!=="svelte-6b3ch3"&&(O.textContent=mt),ke=r(e),b(ee.$$.fragment,e),Ge=r(e),te=_(e,"P",{"data-svelte-h":!0}),T(te)!=="svelte-w9j6kh"&&(te.textContent=ct),xe=r(e),b(le.$$.fragment,e),ve=r(e),se=_(e,"P",{"data-svelte-h":!0}),T(se)!=="svelte-1jgrevq"&&(se.innerHTML=ft),Ve=r(e),b(A.$$.fragment,e),He=r(e),b(ne.$$.fragment,e),Ie=r(e),ae=_(e,"P",{"data-svelte-h":!0}),T(ae)!=="svelte-dxr8zk"&&(ae.innerHTML=Mt),Xe=r(e),pe=_(e,"P",{"data-svelte-h":!0}),T(pe)!=="svelte-1q1qn86"&&(pe.innerHTML=dt),Be=r(e),b(ie.$$.fragment,e),Ne=r(e),re=_(e,"P",{"data-svelte-h":!0}),T(re)!=="svelte-1sjmcjj"&&(re.innerHTML=ut),Fe=r(e),b(oe.$$.fragment,e),Qe=r(e),me=_(e,"P",{"data-svelte-h":!0}),T(me)!=="svelte-1uzpqtn"&&(me.innerHTML=yt),Ee=r(e),ce=_(e,"P",{"data-svelte-h":!0}),T(ce)!=="svelte-1dvfh3c"&&(ce.innerHTML=_t),Ye=r(e),b(fe.$$.fragment,e),Ae=r(e),b(Me.$$.fragment,e),ze=r(e),de=_(e,"P",{"data-svelte-h":!0}),T(de)!=="svelte-1ihvk28"&&(de.innerHTML=$t),Le=r(e),b(z.$$.fragment,e),Se=r(e),b(ue.$$.fragment,e),qe=r(e),ye=_(e,"P",{"data-svelte-h":!0}),T(ye)!=="svelte-j4yqef"&&(ye.textContent=bt),Pe=r(e),_e=_(e,"P",{"data-svelte-h":!0}),T(_e)!=="svelte-1qykf4g"&&(_e.innerHTML=Ut),De=r(e),$e=_(e,"P",{"data-svelte-h":!0}),T($e)!=="svelte-14drp3g"&&($e.innerHTML=Jt),Ke=r(e),b(L.$$.fragment,e),Oe=r(e),b(S.$$.fragment,e),et=r(e),be=_(e,"P",{"data-svelte-h":!0}),T(be)!=="svelte-2s2vlp"&&(be.textContent=ht),tt=r(e),b(q.$$.fragment,e),lt=r(e),b(Ue.$$.fragment,e),st=r(e),Je=_(e,"P",{"data-svelte-h":!0}),T(Je)!=="svelte-10blbi9"&&(Je.textContent=wt),nt=r(e),he=_(e,"UL",{"data-svelte-h":!0}),T(he)!=="svelte-g3c9xr"&&(he.innerHTML=Tt),at=r(e),b(we.$$.fragment,e),pt=r(e),je=_(e,"P",{}),Gt(je).forEach(l),this.h()},h(){xt(t,"name","hf:doc:metadata"),xt(t,"content",ll)},m(e,n){Bt(document.head,t),a(e,o,n),a(e,s,n),a(e,c,n),U(p,e,n),a(e,M,n),U(u,e,n),a(e,d,n),a(e,j,n),a(e,I,n),a(e,C,n),a(e,R,n),a(e,X,n),a(e,k,n),a(e,v,n),a(e,H,n),U(x,e,n),a(e,f,n),a(e,Z,n),a(e,P,n),U(F,e,n),a(e,m,n),U(W,e,n),a(e,We,n),a(e,D,n),a(e,Ce,n),U(K,e,n),a(e,Re,n),a(e,O,n),a(e,ke,n),U(ee,e,n),a(e,Ge,n),a(e,te,n),a(e,xe,n),U(le,e,n),a(e,ve,n),a(e,se,n),a(e,Ve,n),U(A,e,n),a(e,He,n),U(ne,e,n),a(e,Ie,n),a(e,ae,n),a(e,Xe,n),a(e,pe,n),a(e,Be,n),U(ie,e,n),a(e,Ne,n),a(e,re,n),a(e,Fe,n),U(oe,e,n),a(e,Qe,n),a(e,me,n),a(e,Ee,n),a(e,ce,n),a(e,Ye,n),U(fe,e,n),a(e,Ae,n),U(Me,e,n),a(e,ze,n),a(e,de,n),a(e,Le,n),U(z,e,n),a(e,Se,n),U(ue,e,n),a(e,qe,n),a(e,ye,n),a(e,Pe,n),a(e,_e,n),a(e,De,n),a(e,$e,n),a(e,Ke,n),U(L,e,n),a(e,Oe,n),U(S,e,n),a(e,et,n),a(e,be,n),a(e,tt,n),U(q,e,n),a(e,lt,n),U(Ue,e,n),a(e,st,n),a(e,Je,n),a(e,nt,n),a(e,he,n),a(e,at,n),U(we,e,n),a(e,pt,n),a(e,je,n),it=!0},p(e,[n]){const gt={};n&2&&(gt.$$scope={dirty:n,ctx:e}),u.$set(gt);const jt={};n&2&&(jt.$$scope={dirty:n,ctx:e}),W.$set(jt);const Zt={};n&2&&(Zt.$$scope={dirty:n,ctx:e}),A.$set(Zt);const Wt={};n&2&&(Wt.$$scope={dirty:n,ctx:e}),z.$set(Wt);const Ct={};n&2&&(Ct.$$scope={dirty:n,ctx:e}),L.$set(Ct);const Rt={};n&2&&(Rt.$$scope={dirty:n,ctx:e}),S.$set(Rt);const kt={};n&2&&(kt.$$scope={dirty:n,ctx:e}),q.$set(kt)},i(e){it||(J(p.$$.fragment,e),J(u.$$.fragment,e),J(x.$$.fragment,e),J(F.$$.fragment,e),J(W.$$.fragment,e),J(K.$$.fragment,e),J(ee.$$.fragment,e),J(le.$$.fragment,e),J(A.$$.fragment,e),J(ne.$$.fragment,e),J(ie.$$.fragment,e),J(oe.$$.fragment,e),J(fe.$$.fragment,e),J(Me.$$.fragment,e),J(z.$$.fragment,e),J(ue.$$.fragment,e),J(L.$$.fragment,e),J(S.$$.fragment,e),J(q.$$.fragment,e),J(Ue.$$.fragment,e),J(we.$$.fragment,e),it=!0)},o(e){h(p.$$.fragment,e),h(u.$$.fragment,e),h(x.$$.fragment,e),h(F.$$.fragment,e),h(W.$$.fragment,e),h(K.$$.fragment,e),h(ee.$$.fragment,e),h(le.$$.fragment,e),h(A.$$.fragment,e),h(ne.$$.fragment,e),h(ie.$$.fragment,e),h(oe.$$.fragment,e),h(fe.$$.fragment,e),h(Me.$$.fragment,e),h(z.$$.fragment,e),h(ue.$$.fragment,e),h(L.$$.fragment,e),h(S.$$.fragment,e),h(q.$$.fragment,e),h(Ue.$$.fragment,e),h(we.$$.fragment,e),it=!1},d(e){e&&(l(o),l(s),l(c),l(M),l(d),l(j),l(I),l(C),l(R),l(X),l(k),l(v),l(H),l(f),l(Z),l(P),l(m),l(We),l(D),l(Ce),l(Re),l(O),l(ke),l(Ge),l(te),l(xe),l(ve),l(se),l(Ve),l(He),l(Ie),l(ae),l(Xe),l(pe),l(Be),l(Ne),l(re),l(Fe),l(Qe),l(me),l(Ee),l(ce),l(Ye),l(Ae),l(ze),l(de),l(Le),l(Se),l(qe),l(ye),l(Pe),l(_e),l(De),l($e),l(Ke),l(Oe),l(et),l(be),l(tt),l(lt),l(st),l(Je),l(nt),l(he),l(at),l(pt),l(je)),l(t),w(p,e),w(u,e),w(x,e),w(F,e),w(W,e),w(K,e),w(ee,e),w(le,e),w(A,e),w(ne,e),w(ie,e),w(oe,e),w(fe,e),w(Me,e),w(z,e),w(ue,e),w(L,e),w(S,e),w(q,e),w(Ue,e),w(we,e)}}}const ll='{"title":"Kandinsky 2.2","local":"kandinsky-22","sections":[{"title":"脚本参数","local":"脚本参数","sections":[{"title":"Min-SNR 加权","local":"min-snr-加权","sections":[],"depth":3}],"depth":2},{"title":"训练脚本","local":"训练脚本","sections":[],"depth":2},{"title":"启动脚本","local":"启动脚本","sections":[],"depth":2},{"title":"后续步骤","local":"后续步骤","sections":[],"depth":2}],"depth":1}';function sl(g){return Vt(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ml extends Ht{constructor(t){super(),It(this,t,sl,tl,vt,{})}}export{ml as component};

Xet Storage Details

Size:
43.3 kB
·
Xet hash:
772c1734a8baa96db4a4b94232c70257cadc1b531cd3d1609bd80c56beab6e3c

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.