Buckets:

rtrm's picture
download
raw
43 kB
import{s as Ee,o as xe,n as Ne}from"../chunks/scheduler.94020406.js";import{S as Ye,i as Le,g as a,s,r as m,E as Fe,h as p,f as e,c as i,j as We,u as c,x as o,k as Xe,y as Qe,a as n,v as r,d as M,t as u,w as f}from"../chunks/index.a08c8d92.js";import{T as ke}from"../chunks/Tip.3b0aeee8.js";import{C as U}from"../chunks/CodeBlock.b23cf525.js";import{H as b,E as Se}from"../chunks/EditOnGithub.b1bceb47.js";function Ae(Wt){let d,h="가장 최신 버전의 예시 스크립트를 성공적으로 실행하기 위해서는, 소스에서 설치하고 최신 버전의 설치를 유지하는 것을 강력하게 추천합니다. 우리는 예시 스크립트들을 자주 업데이트하고 예시에 맞춘 특정한 요구사항을 설치합니다.";return{c(){d=a("p"),d.textContent=h},l(g){d=p(g,"P",{"data-svelte-h":!0}),o(d)!=="svelte-89o4gq"&&(d.textContent=h)},m(g,It){n(g,d,It)},p:Ne,d(g){g&&e(d)}}}function He(Wt){let d,h,g,It,T,Xt,y,zl='<a href="https://arxiv.org/abs/2302.05543" rel="nofollow">Adding Conditional Control to Text-to-Image Diffusion Models</a> (ControlNet)은 Lvmin Zhang과 Maneesh Agrawala에 의해 쓰여졌습니다.',Et,_,Pl='이 예시는 <a href="https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md" rel="nofollow">원본 ControlNet 리포지토리에서 예시 학습하기</a>에 기반합니다. ControlNet은 원들을 채우기 위해 <a href="https://huggingface.co/datasets/fusing/fill50k" rel="nofollow">small synthetic dataset</a>을 사용해서 학습됩니다.',xt,w,Nt,j,Ol="아래의 스크립트를 실행하기 전에, 라이브러리의 학습 의존성을 설치해야 합니다.",Yt,J,Lt,Z,Kl="위 사항을 만족시키기 위해서, 새로운 가상환경에서 다음 일련의 스텝을 실행하세요:",Ft,R,Qt,V,te='그 다음에는 <a href="https://github.com/huggingface/diffusers/tree/main/examples/controlnet" rel="nofollow">예시 폴더</a>으로 이동합니다.',kt,v,St,$,le="이제 실행하세요:",At,C,Ht,G,ee='<a href="https://github.com/huggingface/accelerate/" rel="nofollow">🤗Accelerate</a> 환경을 초기화 합니다:',Dt,I,qt,B,ne="혹은 여러분의 환경이 무엇인지 몰라도 기본적인 🤗Accelerate 구성으로 초기화할 수 있습니다:",zt,W,Pt,X,se="혹은 당신의 환경이 노트북 같은 상호작용하는 쉘을 지원하지 않는다면, 아래의 코드로 초기화 할 수 있습니다:",Ot,E,Kt,x,tl,N,ie='원본 데이터셋은 ControlNet <a href="https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip" rel="nofollow">repo</a>에 올라와있지만, 우리는 <a href="https://huggingface.co/datasets/fusing/fill50k" rel="nofollow">여기</a>에 새롭게 다시 올려서 🤗 Datasets 과 호환가능합니다. 그래서 학습 스크립트 상에서 데이터 불러오기를 다룰 수 있습니다.',ll,Y,ae='우리의 학습 예시는 원래 ControlNet의 학습에 쓰였던 <a href="https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5" rel="nofollow"><code>stable-diffusion-v1-5/stable-diffusion-v1-5</code></a>을 사용합니다. 그렇지만 ControlNet은 대응되는 어느 Stable Diffusion 모델(<a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" rel="nofollow"><code>CompVis/stable-diffusion-v1-4</code></a>) 혹은 <a href="https://huggingface.co/stabilityai/stable-diffusion-2-1" rel="nofollow"><code>stabilityai/stable-diffusion-2-1</code></a>의 증가를 위해 학습될 수 있습니다.',el,L,pe='자체 데이터셋을 사용하기 위해서는 <a href="create_dataset">학습을 위한 데이터셋 생성하기</a> 가이드를 확인하세요.',nl,F,sl,Q,oe="이 학습에 사용될 다음 이미지들을 다운로드하세요:",il,k,al,S,me='<code>MODEL_NAME</code> 환경 변수 (Hub 모델 리포지토리 아이디 혹은 모델 가중치가 있는 디렉토리로 가는 주소)를 명시하고 <a href="https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path" rel="nofollow"><code>pretrained_model_name_or_path</code></a> 인자로 환경변수를 보냅니다.',pl,A,ce="학습 스크립트는 당신의 리포지토리에 <code>diffusion_pytorch_model.bin</code> 파일을 생성하고 저장합니다.",ol,H,ml,D,re="이 기본적인 설정으로는 ~38GB VRAM이 필요합니다.",cl,q,Me="기본적으로 학습 스크립트는 결과를 텐서보드에 기록합니다. 가중치(weight)와 편향(bias)을 사용하기 위해 <code>--report_to wandb</code> 를 전달합니다.",rl,z,ue="더 작은 batch(배치) 크기로 gradient accumulation(기울기 누적)을 하면 학습 요구사항을 ~20 GB VRAM으로 줄일 수 있습니다.",Ml,P,ul,O,fl,K,fe=`<code>accelerate</code> 은 seamless multi-GPU 학습을 고려합니다. <code>accelerate</code>과 함께 분산된 학습을 실행하기 위해 <a href="https://huggingface.co/docs/accelerate/basic_tutorials/launch" rel="nofollow">여기</a>
의 설명을 확인하세요. 아래는 예시 명령어입니다:`,dl,tt,Ul,lt,bl,et,gl,nt,de='<thead><tr><th></th> <th align="center"></th></tr></thead> <tbody><tr><td></td> <td align="center">푸른 배경과 빨간 원</td></tr> <tr><td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png" alt="conditioning image"/></td> <td align="center"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_300_steps.png" alt="푸른 배경과 빨간 원"/></td></tr> <tr><td></td> <td align="center">갈색 꽃 배경과 청록색 원</td></tr> <tr><td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png" alt="conditioning image"/></td> <td align="center"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_300_steps.png" alt="갈색 꽃 배경과 청록색 원"/></td></tr></tbody>',Jl,st,hl,it,Ue='<thead><tr><th></th> <th align="center"></th></tr></thead> <tbody><tr><td></td> <td align="center">푸른 배경과 빨간 원</td></tr> <tr><td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png" alt="conditioning image"/></td> <td align="center"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_6000_steps.png" alt="푸른 배경과 빨간 원"/></td></tr> <tr><td></td> <td align="center">갈색 꽃 배경과 청록색 원</td></tr> <tr><td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png" alt="conditioning image"/></td> <td align="center"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_6000_steps.png" alt="갈색 꽃 배경과 청록색 원"/></td></tr></tbody>',Tl,at,yl,pt,be="16GB GPU에서 학습하기 위해 다음의 최적화를 진행하세요:",_l,ot,ge='<li>기울기 체크포인트 저장하기</li> <li>bitsandbyte의 <a href="https://github.com/TimDettmers/bitsandbytes#requirements--installation" rel="nofollow">8-bit optimizer</a>가 설치되지 않았다면 링크에 연결된 설명서를 보세요.</li>',wl,mt,Je="이제 학습 스크립트를 시작할 수 있습니다:",jl,ct,Zl,rt,Rl,Mt,he="12GB GPU에서 실행하기 위해 다음의 최적화를 진행하세요:",Vl,ut,Te='<li>기울기 체크포인트 저장하기</li> <li>bitsandbyte의 8-bit <a href="https://github.com/TimDettmers/bitsandbytes#requirements--installation" rel="nofollow">optimizer</a>(가 설치되지 않았다면 링크에 연결된 설명서를 보세요)</li> <li><a href="https://huggingface.co/docs/diffusers/training/optimization/xformers" rel="nofollow">xFormers</a>(가 설치되지 않았다면 링크에 연결된 설명서를 보세요)</li> <li>기울기를 <code>None</code>으로 설정</li>',vl,ft,$l,dt,ye="<code>pip install xformers</code>으로 <code>xformers</code>을 확실히 설치하고 <code>enable_xformers_memory_efficient_attention</code>을 사용하세요.",Cl,Ut,Gl,bt,_e=`우리는 ControlNet을 지원하기 위한 DeepSpeed를 철저하게 테스트하지 않았습니다. 환경설정이 메모리를 저장할 때,
그 환경이 성공적으로 학습했는지를 확정하지 않았습니다. 성공한 학습 실행을 위해 설정을 변경해야 할 가능성이 높습니다.`,Il,gt,we="8GB GPU에서 실행하기 위해 다음의 최적화를 진행하세요:",Bl,Jt,je='<li>기울기 체크포인트 저장하기</li> <li>bitsandbyte의 8-bit <a href="https://github.com/TimDettmers/bitsandbytes#requirements--installation" rel="nofollow">optimizer</a>(가 설치되지 않았다면 링크에 연결된 설명서를 보세요)</li> <li><a href="https://huggingface.co/docs/diffusers/training/optimization/xformers" rel="nofollow">xFormers</a>(가 설치되지 않았다면 링크에 연결된 설명서를 보세요)</li> <li>기울기를 <code>None</code>으로 설정</li> <li>DeepSpeed stage 2 변수와 optimizer 없에기</li> <li>fp16 혼합 정밀도(precision)</li>',Wl,ht,Ze=`<a href="https://www.deepspeed.ai/" rel="nofollow">DeepSpeed</a>는 CPU 또는 NVME로 텐서를 VRAM에서 오프로드할 수 있습니다.
이를 위해서 훨씬 더 많은 RAM(약 25 GB)가 필요합니다.`,Xl,Tt,Re="DeepSpeed stage 2를 활성화하기 위해서 <code>accelerate config</code>로 환경을 구성해야합니다.",El,yt,Ve="구성(configuration) 파일은 이런 모습이어야 합니다:",xl,_t,Nl,wt,ve="<팁>",Yl,jt,$e='<a href="https://huggingface.co/docs/accelerate/usage_guides/deepspeed" rel="nofollow">문서</a>를 더 많은 DeepSpeed 설정 옵션을 위해 보세요.',Ll,Zt,Ce="<팁>",Fl,Rt,Ge=`기본 Adam optimizer를 DeepSpeed’의 Adam
<code>deepspeed.ops.adam.DeepSpeedCPUAdam</code> 으로 바꾸면 상당한 속도 향상을 이룰수 있지만,
Pytorch와 같은 버전의 CUDA toolchain이 필요합니다. 8-비트 optimizer는 현재 DeepSpeed와
호환되지 않는 것 같습니다.`,Ql,Vt,kl,vt,Sl,$t,Ie=`학습된 모델은 <code>StableDiffusionControlNetPipeline</code>과 함께 실행될 수 있습니다.
<code>base_model_path</code>와 <code>controlnet_path</code> 에 값을 지정하세요 <code>--pretrained_model_name_or_path</code> 와
<code>--output_dir</code> 는 학습 스크립트에 개별적으로 지정됩니다.`,Al,Ct,Hl,Gt,Dl,Bt,ql;return T=new b({props:{title:"ControlNet",local:"controlnet",headingTag:"h1"}}),w=new b({props:{title:"의존성 설치하기",local:"의존성-설치하기",headingTag:"h2"}}),J=new ke({props:{warning:!0,$$slots:{default:[Ae]},$$scope:{ctx:Wt}}}),R=new U({props:{code:"Z2l0JTIwY2xvbmUlMjBodHRwcyUzQSUyRiUyRmdpdGh1Yi5jb20lMkZodWdnaW5nZmFjZSUyRmRpZmZ1c2VycyUwQWNkJTIwZGlmZnVzZXJzJTBBcGlwJTIwaW5zdGFsbCUyMC1lJTIwLg==",highlighted:`git <span class="hljs-built_in">clone</span> https://github.com/huggingface/diffusers
<span class="hljs-built_in">cd</span> diffusers
pip install -e .`,wrap:!1}}),v=new U({props:{code:"Y2QlMjBleGFtcGxlcyUyRmNvbnRyb2xuZXQ=",highlighted:'<span class="hljs-built_in">cd</span> examples/controlnet',wrap:!1}}),C=new U({props:{code:"cGlwJTIwaW5zdGFsbCUyMC1yJTIwcmVxdWlyZW1lbnRzLnR4dA==",highlighted:"pip install -r requirements.txt",wrap:!1}}),I=new U({props:{code:"YWNjZWxlcmF0ZSUyMGNvbmZpZw==",highlighted:"accelerate config",wrap:!1}}),W=new U({props:{code:"YWNjZWxlcmF0ZSUyMGNvbmZpZyUyMGRlZmF1bHQ=",highlighted:"accelerate config default",wrap:!1}}),E=new U({props:{code:"ZnJvbSUyMGFjY2VsZXJhdGUudXRpbHMlMjBpbXBvcnQlMjB3cml0ZV9iYXNpY19jb25maWclMEElMEF3cml0ZV9iYXNpY19jb25maWcoKQ==",highlighted:`<span class="hljs-keyword">from</span> accelerate.utils <span class="hljs-keyword">import</span> write_basic_config
write_basic_config()`,wrap:!1}}),x=new b({props:{title:"원을 채우는 데이터셋",local:"원을-채우는-데이터셋",headingTag:"h2"}}),F=new b({props:{title:"학습",local:"학습",headingTag:"h2"}}),k=new U({props:{code:"d2dldCUyMGh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRmNvbnRyb2xuZXRfdHJhaW5pbmclMkZjb25kaXRpb25pbmdfaW1hZ2VfMS5wbmclMEElMEF3Z2V0JTIwaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGY29udHJvbG5ldF90cmFpbmluZyUyRmNvbmRpdGlvbmluZ19pbWFnZV8yLnBuZw==",highlighted:`wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png
wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png`,wrap:!1}}),H=new U({props:{code:"ZXhwb3J0JTIwTU9ERUxfRElSJTNEJTIyc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTBBZXhwb3J0JTIwT1VUUFVUX0RJUiUzRCUyMnBhdGglMjB0byUyMHNhdmUlMjBtb2RlbCUyMiUwQSUwQWFjY2VsZXJhdGUlMjBsYXVuY2glMjB0cmFpbl9jb250cm9sbmV0LnB5JTIwJTVDJTBBJTIwLS1wcmV0cmFpbmVkX21vZGVsX25hbWVfb3JfcGF0aCUzRCUyNE1PREVMX0RJUiUyMCU1QyUwQSUyMC0tb3V0cHV0X2RpciUzRCUyNE9VVFBVVF9ESVIlMjAlNUMlMEElMjAtLWRhdGFzZXRfbmFtZSUzRGZ1c2luZyUyRmZpbGw1MGslMjAlNUMlMEElMjAtLXJlc29sdXRpb24lM0Q1MTIlMjAlNUMlMEElMjAtLWxlYXJuaW5nX3JhdGUlM0QxZS01JTIwJTVDJTBBJTIwLS12YWxpZGF0aW9uX2ltYWdlJTIwJTIyLiUyRmNvbmRpdGlvbmluZ19pbWFnZV8xLnBuZyUyMiUyMCUyMi4lMkZjb25kaXRpb25pbmdfaW1hZ2VfMi5wbmclMjIlMjAlNUMlMEElMjAtLXZhbGlkYXRpb25fcHJvbXB0JTIwJTIycmVkJTIwY2lyY2xlJTIwd2l0aCUyMGJsdWUlMjBiYWNrZ3JvdW5kJTIyJTIwJTIyY3lhbiUyMGNpcmNsZSUyMHdpdGglMjBicm93biUyMGZsb3JhbCUyMGJhY2tncm91bmQlMjIlMjAlNUMlMEElMjAtLXRyYWluX2JhdGNoX3NpemUlM0Q0JTIwJTVDJTBBJTIwLS1wdXNoX3RvX2h1Yg==",highlighted:`<span class="hljs-built_in">export</span> MODEL_DIR=<span class="hljs-string">&quot;stable-diffusion-v1-5/stable-diffusion-v1-5&quot;</span>
<span class="hljs-built_in">export</span> OUTPUT_DIR=<span class="hljs-string">&quot;path to save model&quot;</span>
accelerate launch train_controlnet.py \\
--pretrained_model_name_or_path=<span class="hljs-variable">$MODEL_DIR</span> \\
--output_dir=<span class="hljs-variable">$OUTPUT_DIR</span> \\
--dataset_name=fusing/fill50k \\
--resolution=512 \\
--learning_rate=1e-5 \\
--validation_image <span class="hljs-string">&quot;./conditioning_image_1.png&quot;</span> <span class="hljs-string">&quot;./conditioning_image_2.png&quot;</span> \\
--validation_prompt <span class="hljs-string">&quot;red circle with blue background&quot;</span> <span class="hljs-string">&quot;cyan circle with brown floral background&quot;</span> \\
--train_batch_size=4 \\
--push_to_hub`,wrap:!1}}),P=new U({props:{code:"ZXhwb3J0JTIwTU9ERUxfRElSJTNEJTIyc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTBBZXhwb3J0JTIwT1VUUFVUX0RJUiUzRCUyMnBhdGglMjB0byUyMHNhdmUlMjBtb2RlbCUyMiUwQSUwQWFjY2VsZXJhdGUlMjBsYXVuY2glMjB0cmFpbl9jb250cm9sbmV0LnB5JTIwJTVDJTBBJTIwLS1wcmV0cmFpbmVkX21vZGVsX25hbWVfb3JfcGF0aCUzRCUyNE1PREVMX0RJUiUyMCU1QyUwQSUyMC0tb3V0cHV0X2RpciUzRCUyNE9VVFBVVF9ESVIlMjAlNUMlMEElMjAtLWRhdGFzZXRfbmFtZSUzRGZ1c2luZyUyRmZpbGw1MGslMjAlNUMlMEElMjAtLXJlc29sdXRpb24lM0Q1MTIlMjAlNUMlMEElMjAtLWxlYXJuaW5nX3JhdGUlM0QxZS01JTIwJTVDJTBBJTIwLS12YWxpZGF0aW9uX2ltYWdlJTIwJTIyLiUyRmNvbmRpdGlvbmluZ19pbWFnZV8xLnBuZyUyMiUyMCUyMi4lMkZjb25kaXRpb25pbmdfaW1hZ2VfMi5wbmclMjIlMjAlNUMlMEElMjAtLXZhbGlkYXRpb25fcHJvbXB0JTIwJTIycmVkJTIwY2lyY2xlJTIwd2l0aCUyMGJsdWUlMjBiYWNrZ3JvdW5kJTIyJTIwJTIyY3lhbiUyMGNpcmNsZSUyMHdpdGglMjBicm93biUyMGZsb3JhbCUyMGJhY2tncm91bmQlMjIlMjAlNUMlMEElMjAtLXRyYWluX2JhdGNoX3NpemUlM0QxJTIwJTVDJTBBJTIwLS1ncmFkaWVudF9hY2N1bXVsYXRpb25fc3RlcHMlM0Q0JTIwJTVDJTBBJTIwJTIwLS1wdXNoX3RvX2h1Yg==",highlighted:`<span class="hljs-built_in">export</span> MODEL_DIR=<span class="hljs-string">&quot;stable-diffusion-v1-5/stable-diffusion-v1-5&quot;</span>
<span class="hljs-built_in">export</span> OUTPUT_DIR=<span class="hljs-string">&quot;path to save model&quot;</span>
accelerate launch train_controlnet.py \\
--pretrained_model_name_or_path=<span class="hljs-variable">$MODEL_DIR</span> \\
--output_dir=<span class="hljs-variable">$OUTPUT_DIR</span> \\
--dataset_name=fusing/fill50k \\
--resolution=512 \\
--learning_rate=1e-5 \\
--validation_image <span class="hljs-string">&quot;./conditioning_image_1.png&quot;</span> <span class="hljs-string">&quot;./conditioning_image_2.png&quot;</span> \\
--validation_prompt <span class="hljs-string">&quot;red circle with blue background&quot;</span> <span class="hljs-string">&quot;cyan circle with brown floral background&quot;</span> \\
--train_batch_size=1 \\
--gradient_accumulation_steps=4 \\
--push_to_hub`,wrap:!1}}),O=new b({props:{title:"여러개 GPU로 학습하기",local:"여러개-gpu로-학습하기",headingTag:"h2"}}),tt=new U({props:{code:"ZXhwb3J0JTIwTU9ERUxfRElSJTNEJTIyc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTBBZXhwb3J0JTIwT1VUUFVUX0RJUiUzRCUyMnBhdGglMjB0byUyMHNhdmUlMjBtb2RlbCUyMiUwQSUwQWFjY2VsZXJhdGUlMjBsYXVuY2glMjAtLW1peGVkX3ByZWNpc2lvbiUzRCUyMmZwMTYlMjIlMjAtLW11bHRpX2dwdSUyMHRyYWluX2NvbnRyb2xuZXQucHklMjAlNUMlMEElMjAtLXByZXRyYWluZWRfbW9kZWxfbmFtZV9vcl9wYXRoJTNEJTI0TU9ERUxfRElSJTIwJTVDJTBBJTIwLS1vdXRwdXRfZGlyJTNEJTI0T1VUUFVUX0RJUiUyMCU1QyUwQSUyMC0tZGF0YXNldF9uYW1lJTNEZnVzaW5nJTJGZmlsbDUwayUyMCU1QyUwQSUyMC0tcmVzb2x1dGlvbiUzRDUxMiUyMCU1QyUwQSUyMC0tbGVhcm5pbmdfcmF0ZSUzRDFlLTUlMjAlNUMlMEElMjAtLXZhbGlkYXRpb25faW1hZ2UlMjAlMjIuJTJGY29uZGl0aW9uaW5nX2ltYWdlXzEucG5nJTIyJTIwJTIyLiUyRmNvbmRpdGlvbmluZ19pbWFnZV8yLnBuZyUyMiUyMCU1QyUwQSUyMC0tdmFsaWRhdGlvbl9wcm9tcHQlMjAlMjJyZWQlMjBjaXJjbGUlMjB3aXRoJTIwYmx1ZSUyMGJhY2tncm91bmQlMjIlMjAlMjJjeWFuJTIwY2lyY2xlJTIwd2l0aCUyMGJyb3duJTIwZmxvcmFsJTIwYmFja2dyb3VuZCUyMiUyMCU1QyUwQSUyMC0tdHJhaW5fYmF0Y2hfc2l6ZSUzRDQlMjAlNUMlMEElMjAtLW1peGVkX3ByZWNpc2lvbiUzRCUyMmZwMTYlMjIlMjAlNUMlMEElMjAtLXRyYWNrZXJfcHJvamVjdF9uYW1lJTNEJTIyY29udHJvbG5ldC1kZW1vJTIyJTIwJTVDJTBBJTIwLS1yZXBvcnRfdG8lM0R3YW5kYiUyMCU1QyUwQSUyMCUyMC0tcHVzaF90b19odWI=",highlighted:`<span class="hljs-built_in">export</span> MODEL_DIR=<span class="hljs-string">&quot;stable-diffusion-v1-5/stable-diffusion-v1-5&quot;</span>
<span class="hljs-built_in">export</span> OUTPUT_DIR=<span class="hljs-string">&quot;path to save model&quot;</span>
accelerate launch --mixed_precision=<span class="hljs-string">&quot;fp16&quot;</span> --multi_gpu train_controlnet.py \\
--pretrained_model_name_or_path=<span class="hljs-variable">$MODEL_DIR</span> \\
--output_dir=<span class="hljs-variable">$OUTPUT_DIR</span> \\
--dataset_name=fusing/fill50k \\
--resolution=512 \\
--learning_rate=1e-5 \\
--validation_image <span class="hljs-string">&quot;./conditioning_image_1.png&quot;</span> <span class="hljs-string">&quot;./conditioning_image_2.png&quot;</span> \\
--validation_prompt <span class="hljs-string">&quot;red circle with blue background&quot;</span> <span class="hljs-string">&quot;cyan circle with brown floral background&quot;</span> \\
--train_batch_size=4 \\
--mixed_precision=<span class="hljs-string">&quot;fp16&quot;</span> \\
--tracker_project_name=<span class="hljs-string">&quot;controlnet-demo&quot;</span> \\
--report_to=wandb \\
--push_to_hub`,wrap:!1}}),lt=new b({props:{title:"예시 결과",local:"예시-결과",headingTag:"h2"}}),et=new b({props:{title:"배치 사이즈 8로 300 스텝 이후:",local:"배치-사이즈-8로-300-스텝-이후",headingTag:"h4"}}),st=new b({props:{title:"배치 사이즈 8로 6000 스텝 이후:",local:"배치-사이즈-8로-6000-스텝-이후",headingTag:"h4"}}),at=new b({props:{title:"16GB GPU에서 학습하기",local:"16gb-gpu에서-학습하기",headingTag:"h2"}}),ct=new U({props:{code:"ZXhwb3J0JTIwTU9ERUxfRElSJTNEJTIyc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTBBZXhwb3J0JTIwT1VUUFVUX0RJUiUzRCUyMnBhdGglMjB0byUyMHNhdmUlMjBtb2RlbCUyMiUwQSUwQWFjY2VsZXJhdGUlMjBsYXVuY2glMjB0cmFpbl9jb250cm9sbmV0LnB5JTIwJTVDJTBBJTIwLS1wcmV0cmFpbmVkX21vZGVsX25hbWVfb3JfcGF0aCUzRCUyNE1PREVMX0RJUiUyMCU1QyUwQSUyMC0tb3V0cHV0X2RpciUzRCUyNE9VVFBVVF9ESVIlMjAlNUMlMEElMjAtLWRhdGFzZXRfbmFtZSUzRGZ1c2luZyUyRmZpbGw1MGslMjAlNUMlMEElMjAtLXJlc29sdXRpb24lM0Q1MTIlMjAlNUMlMEElMjAtLWxlYXJuaW5nX3JhdGUlM0QxZS01JTIwJTVDJTBBJTIwLS12YWxpZGF0aW9uX2ltYWdlJTIwJTIyLiUyRmNvbmRpdGlvbmluZ19pbWFnZV8xLnBuZyUyMiUyMCUyMi4lMkZjb25kaXRpb25pbmdfaW1hZ2VfMi5wbmclMjIlMjAlNUMlMEElMjAtLXZhbGlkYXRpb25fcHJvbXB0JTIwJTIycmVkJTIwY2lyY2xlJTIwd2l0aCUyMGJsdWUlMjBiYWNrZ3JvdW5kJTIyJTIwJTIyY3lhbiUyMGNpcmNsZSUyMHdpdGglMjBicm93biUyMGZsb3JhbCUyMGJhY2tncm91bmQlMjIlMjAlNUMlMEElMjAtLXRyYWluX2JhdGNoX3NpemUlM0QxJTIwJTVDJTBBJTIwLS1ncmFkaWVudF9hY2N1bXVsYXRpb25fc3RlcHMlM0Q0JTIwJTVDJTBBJTIwLS1ncmFkaWVudF9jaGVja3BvaW50aW5nJTIwJTVDJTBBJTIwLS11c2VfOGJpdF9hZGFtJTIwJTVDJTBBJTIwJTIwLS1wdXNoX3RvX2h1Yg==",highlighted:`<span class="hljs-built_in">export</span> MODEL_DIR=<span class="hljs-string">&quot;stable-diffusion-v1-5/stable-diffusion-v1-5&quot;</span>
<span class="hljs-built_in">export</span> OUTPUT_DIR=<span class="hljs-string">&quot;path to save model&quot;</span>
accelerate launch train_controlnet.py \\
--pretrained_model_name_or_path=<span class="hljs-variable">$MODEL_DIR</span> \\
--output_dir=<span class="hljs-variable">$OUTPUT_DIR</span> \\
--dataset_name=fusing/fill50k \\
--resolution=512 \\
--learning_rate=1e-5 \\
--validation_image <span class="hljs-string">&quot;./conditioning_image_1.png&quot;</span> <span class="hljs-string">&quot;./conditioning_image_2.png&quot;</span> \\
--validation_prompt <span class="hljs-string">&quot;red circle with blue background&quot;</span> <span class="hljs-string">&quot;cyan circle with brown floral background&quot;</span> \\
--train_batch_size=1 \\
--gradient_accumulation_steps=4 \\
--gradient_checkpointing \\
--use_8bit_adam \\
--push_to_hub`,wrap:!1}}),rt=new b({props:{title:"12GB GPU에서 학습하기",local:"12gb-gpu에서-학습하기",headingTag:"h2"}}),ft=new U({props:{code:"ZXhwb3J0JTIwTU9ERUxfRElSJTNEJTIyc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTBBZXhwb3J0JTIwT1VUUFVUX0RJUiUzRCUyMnBhdGglMjB0byUyMHNhdmUlMjBtb2RlbCUyMiUwQSUwQWFjY2VsZXJhdGUlMjBsYXVuY2glMjB0cmFpbl9jb250cm9sbmV0LnB5JTIwJTVDJTBBJTIwLS1wcmV0cmFpbmVkX21vZGVsX25hbWVfb3JfcGF0aCUzRCUyNE1PREVMX0RJUiUyMCU1QyUwQSUyMC0tb3V0cHV0X2RpciUzRCUyNE9VVFBVVF9ESVIlMjAlNUMlMEElMjAtLWRhdGFzZXRfbmFtZSUzRGZ1c2luZyUyRmZpbGw1MGslMjAlNUMlMEElMjAtLXJlc29sdXRpb24lM0Q1MTIlMjAlNUMlMEElMjAtLWxlYXJuaW5nX3JhdGUlM0QxZS01JTIwJTVDJTBBJTIwLS12YWxpZGF0aW9uX2ltYWdlJTIwJTIyLiUyRmNvbmRpdGlvbmluZ19pbWFnZV8xLnBuZyUyMiUyMCUyMi4lMkZjb25kaXRpb25pbmdfaW1hZ2VfMi5wbmclMjIlMjAlNUMlMEElMjAtLXZhbGlkYXRpb25fcHJvbXB0JTIwJTIycmVkJTIwY2lyY2xlJTIwd2l0aCUyMGJsdWUlMjBiYWNrZ3JvdW5kJTIyJTIwJTIyY3lhbiUyMGNpcmNsZSUyMHdpdGglMjBicm93biUyMGZsb3JhbCUyMGJhY2tncm91bmQlMjIlMjAlNUMlMEElMjAtLXRyYWluX2JhdGNoX3NpemUlM0QxJTIwJTVDJTBBJTIwLS1ncmFkaWVudF9hY2N1bXVsYXRpb25fc3RlcHMlM0Q0JTIwJTVDJTBBJTIwLS1ncmFkaWVudF9jaGVja3BvaW50aW5nJTIwJTVDJTBBJTIwLS11c2VfOGJpdF9hZGFtJTIwJTVDJTBBJTIwLS1lbmFibGVfeGZvcm1lcnNfbWVtb3J5X2VmZmljaWVudF9hdHRlbnRpb24lMjAlNUMlMEElMjAtLXNldF9ncmFkc190b19ub25lJTIwJTVDJTBBJTIwJTIwLS1wdXNoX3RvX2h1Yg==",highlighted:`<span class="hljs-built_in">export</span> MODEL_DIR=<span class="hljs-string">&quot;stable-diffusion-v1-5/stable-diffusion-v1-5&quot;</span>
<span class="hljs-built_in">export</span> OUTPUT_DIR=<span class="hljs-string">&quot;path to save model&quot;</span>
accelerate launch train_controlnet.py \\
--pretrained_model_name_or_path=<span class="hljs-variable">$MODEL_DIR</span> \\
--output_dir=<span class="hljs-variable">$OUTPUT_DIR</span> \\
--dataset_name=fusing/fill50k \\
--resolution=512 \\
--learning_rate=1e-5 \\
--validation_image <span class="hljs-string">&quot;./conditioning_image_1.png&quot;</span> <span class="hljs-string">&quot;./conditioning_image_2.png&quot;</span> \\
--validation_prompt <span class="hljs-string">&quot;red circle with blue background&quot;</span> <span class="hljs-string">&quot;cyan circle with brown floral background&quot;</span> \\
--train_batch_size=1 \\
--gradient_accumulation_steps=4 \\
--gradient_checkpointing \\
--use_8bit_adam \\
--enable_xformers_memory_efficient_attention \\
--set_grads_to_none \\
--push_to_hub`,wrap:!1}}),Ut=new b({props:{title:"8GB GPU에서 학습하기",local:"8gb-gpu에서-학습하기",headingTag:"h2"}}),_t=new U({props:{code:"Y29tcHV0ZV9lbnZpcm9ubWVudCUzQSUyMExPQ0FMX01BQ0hJTkUlMEFkZWVwc3BlZWRfY29uZmlnJTNBJTBBJTIwJTIwZ3JhZGllbnRfYWNjdW11bGF0aW9uX3N0ZXBzJTNBJTIwNCUwQSUyMCUyMG9mZmxvYWRfb3B0aW1pemVyX2RldmljZSUzQSUyMGNwdSUwQSUyMCUyMG9mZmxvYWRfcGFyYW1fZGV2aWNlJTNBJTIwY3B1JTBBJTIwJTIwemVybzNfaW5pdF9mbGFnJTNBJTIwZmFsc2UlMEElMjAlMjB6ZXJvX3N0YWdlJTNBJTIwMiUwQWRpc3RyaWJ1dGVkX3R5cGUlM0ElMjBERUVQU1BFRUQ=",highlighted:`<span class="hljs-attr">compute_environment:</span> <span class="hljs-string">LOCAL_MACHINE</span>
<span class="hljs-attr">deepspeed_config:</span>
<span class="hljs-attr">gradient_accumulation_steps:</span> <span class="hljs-number">4</span>
<span class="hljs-attr">offload_optimizer_device:</span> <span class="hljs-string">cpu</span>
<span class="hljs-attr">offload_param_device:</span> <span class="hljs-string">cpu</span>
<span class="hljs-attr">zero3_init_flag:</span> <span class="hljs-literal">false</span>
<span class="hljs-attr">zero_stage:</span> <span class="hljs-number">2</span>
<span class="hljs-attr">distributed_type:</span> <span class="hljs-string">DEEPSPEED</span>`,wrap:!1}}),Vt=new U({props:{code:"ZXhwb3J0JTIwTU9ERUxfRElSJTNEJTIyc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTBBZXhwb3J0JTIwT1VUUFVUX0RJUiUzRCUyMnBhdGglMjB0byUyMHNhdmUlMjBtb2RlbCUyMiUwQSUwQWFjY2VsZXJhdGUlMjBsYXVuY2glMjB0cmFpbl9jb250cm9sbmV0LnB5JTIwJTVDJTBBJTIwLS1wcmV0cmFpbmVkX21vZGVsX25hbWVfb3JfcGF0aCUzRCUyNE1PREVMX0RJUiUyMCU1QyUwQSUyMC0tb3V0cHV0X2RpciUzRCUyNE9VVFBVVF9ESVIlMjAlNUMlMEElMjAtLWRhdGFzZXRfbmFtZSUzRGZ1c2luZyUyRmZpbGw1MGslMjAlNUMlMEElMjAtLXJlc29sdXRpb24lM0Q1MTIlMjAlNUMlMEElMjAtLXZhbGlkYXRpb25faW1hZ2UlMjAlMjIuJTJGY29uZGl0aW9uaW5nX2ltYWdlXzEucG5nJTIyJTIwJTIyLiUyRmNvbmRpdGlvbmluZ19pbWFnZV8yLnBuZyUyMiUyMCU1QyUwQSUyMC0tdmFsaWRhdGlvbl9wcm9tcHQlMjAlMjJyZWQlMjBjaXJjbGUlMjB3aXRoJTIwYmx1ZSUyMGJhY2tncm91bmQlMjIlMjAlMjJjeWFuJTIwY2lyY2xlJTIwd2l0aCUyMGJyb3duJTIwZmxvcmFsJTIwYmFja2dyb3VuZCUyMiUyMCU1QyUwQSUyMC0tdHJhaW5fYmF0Y2hfc2l6ZSUzRDElMjAlNUMlMEElMjAtLWdyYWRpZW50X2FjY3VtdWxhdGlvbl9zdGVwcyUzRDQlMjAlNUMlMEElMjAtLWdyYWRpZW50X2NoZWNrcG9pbnRpbmclMjAlNUMlMEElMjAtLWVuYWJsZV94Zm9ybWVyc19tZW1vcnlfZWZmaWNpZW50X2F0dGVudGlvbiUyMCU1QyUwQSUyMC0tc2V0X2dyYWRzX3RvX25vbmUlMjAlNUMlMEElMjAtLW1peGVkX3ByZWNpc2lvbiUyMGZwMTYlMjAlNUMlMEElMjAtLXB1c2hfdG9faHVi",highlighted:`<span class="hljs-built_in">export</span> MODEL_DIR=<span class="hljs-string">&quot;stable-diffusion-v1-5/stable-diffusion-v1-5&quot;</span>
<span class="hljs-built_in">export</span> OUTPUT_DIR=<span class="hljs-string">&quot;path to save model&quot;</span>
accelerate launch train_controlnet.py \\
--pretrained_model_name_or_path=<span class="hljs-variable">$MODEL_DIR</span> \\
--output_dir=<span class="hljs-variable">$OUTPUT_DIR</span> \\
--dataset_name=fusing/fill50k \\
--resolution=512 \\
--validation_image <span class="hljs-string">&quot;./conditioning_image_1.png&quot;</span> <span class="hljs-string">&quot;./conditioning_image_2.png&quot;</span> \\
--validation_prompt <span class="hljs-string">&quot;red circle with blue background&quot;</span> <span class="hljs-string">&quot;cyan circle with brown floral background&quot;</span> \\
--train_batch_size=1 \\
--gradient_accumulation_steps=4 \\
--gradient_checkpointing \\
--enable_xformers_memory_efficient_attention \\
--set_grads_to_none \\
--mixed_precision fp16 \\
--push_to_hub`,wrap:!1}}),vt=new b({props:{title:"추론",local:"추론",headingTag:"h2"}}),Ct=new U({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvbkNvbnRyb2xOZXRQaXBlbGluZSUyQyUyMENvbnRyb2xOZXRNb2RlbCUyQyUyMFVuaVBDTXVsdGlzdGVwU2NoZWR1bGVyJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEFpbXBvcnQlMjB0b3JjaCUwQSUwQWJhc2VfbW9kZWxfcGF0aCUyMCUzRCUyMCUyMnBhdGglMjB0byUyMG1vZGVsJTIyJTBBY29udHJvbG5ldF9wYXRoJTIwJTNEJTIwJTIycGF0aCUyMHRvJTIwY29udHJvbG5ldCUyMiUwQSUwQWNvbnRyb2xuZXQlMjAlM0QlMjBDb250cm9sTmV0TW9kZWwuZnJvbV9wcmV0cmFpbmVkKGNvbnRyb2xuZXRfcGF0aCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlJTIwJTNEJTIwU3RhYmxlRGlmZnVzaW9uQ29udHJvbE5ldFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjBiYXNlX21vZGVsX3BhdGglMkMlMjBjb250cm9sbmV0JTNEY29udHJvbG5ldCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEElMEElMjMlMjAlRUIlOEQlOTQlMjAlRUIlQjklQTAlRUIlQTUlQjglMjAlRUMlOEElQTQlRUMlQkMlODAlRUMlQTQlODQlRUIlOUYlQUMlRUMlOTklODAlMjAlRUIlQTklOTQlRUIlQUElQTglRUIlQTYlQUMlMjAlRUMlQjUlOUMlRUMlQTAlODElRUQlOTklOTQlRUIlQTElOUMlMjBkaWZmdXNpb24lMjAlRUQlOTQlODQlRUIlQTElOUMlRUMlODQlQjglRUMlOEElQTQlMjAlRUMlODYlOEQlRUIlOEYlODQlMjAlRUMlOTglQUMlRUIlQTYlQUMlRUElQjglQjAlMEFwaXBlLnNjaGVkdWxlciUyMCUzRCUyMFVuaVBDTXVsdGlzdGVwU2NoZWR1bGVyLmZyb21fY29uZmlnKHBpcGUuc2NoZWR1bGVyLmNvbmZpZyklMEElMjMlMjB4Zm9ybWVycyVFQSVCMCU4MCUyMCVFQyU4NCVBNCVFQyVCOSU5OCVFQiU5MCU5OCVFQyVBNyU4MCUyMCVFQyU5NSU4QSVFQyU5QyVCQyVFQiVBOSVCNCUyMCVFQyU5NSU4NCVFQiU5RSU5OCUyMCVFQyVBNCU4NCVFQyU5RCU4NCUyMCVFQyU4MiVBRCVFQyVBMCU5QyVFRCU5NSU5OCVFQSVCOCVCMCUwQXBpcGUuZW5hYmxlX3hmb3JtZXJzX21lbW9yeV9lZmZpY2llbnRfYXR0ZW50aW9uKCklMEElMEFwaXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBY29udHJvbF9pbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTIyLiUyRmNvbmRpdGlvbmluZ19pbWFnZV8xLnBuZyUyMiklMEFwcm9tcHQlMjAlM0QlMjAlMjJwYWxlJTIwZ29sZGVuJTIwcm9kJTIwY2lyY2xlJTIwd2l0aCUyMG9sZCUyMGxhY2UlMjBiYWNrZ3JvdW5kJTIyJTBBJTBBJTIzJTIwJUVDJTlEJUI0JUVCJUFGJUI4JUVDJUE3JTgwJTIwJUVDJTgzJTlEJUVDJTg0JUIxJUVEJTk1JTk4JUVBJUI4JUIwJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2gubWFudWFsX3NlZWQoMCklMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0JTJDJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDIwJTJDJTIwZ2VuZXJhdG9yJTNEZ2VuZXJhdG9yJTJDJTIwaW1hZ2UlM0Rjb250cm9sX2ltYWdlKS5pbWFnZXMlNUIwJTVEJTBBJTBBaW1hZ2Uuc2F2ZSglMjIuJTJGb3V0cHV0LnBuZyUyMik=",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
<span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image
<span class="hljs-keyword">import</span> torch
base_model_path = <span class="hljs-string">&quot;path to model&quot;</span>
controlnet_path = <span class="hljs-string">&quot;path to controlnet&quot;</span>
controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
base_model_path, controlnet=controlnet, torch_dtype=torch.float16
)
<span class="hljs-comment"># 더 빠른 스케줄러와 메모리 최적화로 diffusion 프로세스 속도 올리기</span>
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
<span class="hljs-comment"># xformers가 설치되지 않으면 아래 줄을 삭제하기</span>
pipe.enable_xformers_memory_efficient_attention()
pipe.enable_model_cpu_offload()
control_image = load_image(<span class="hljs-string">&quot;./conditioning_image_1.png&quot;</span>)
prompt = <span class="hljs-string">&quot;pale golden rod circle with old lace background&quot;</span>
<span class="hljs-comment"># 이미지 생성하기</span>
generator = torch.manual_seed(<span class="hljs-number">0</span>)
image = pipe(prompt, num_inference_steps=<span class="hljs-number">20</span>, generator=generator, image=control_image).images[<span class="hljs-number">0</span>]
image.save(<span class="hljs-string">&quot;./output.png&quot;</span>)`,wrap:!1}}),Gt=new Se({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/ko/training/controlnet.md"}}),{c(){d=a("meta"),h=s(),g=a("p"),It=s(),m(T.$$.fragment),Xt=s(),y=a("p"),y.innerHTML=zl,Et=s(),_=a("p"),_.innerHTML=Pl,xt=s(),m(w.$$.fragment),Nt=s(),j=a("p"),j.textContent=Ol,Yt=s(),m(J.$$.fragment),Lt=s(),Z=a("p"),Z.textContent=Kl,Ft=s(),m(R.$$.fragment),Qt=s(),V=a("p"),V.innerHTML=te,kt=s(),m(v.$$.fragment),St=s(),$=a("p"),$.textContent=le,At=s(),m(C.$$.fragment),Ht=s(),G=a("p"),G.innerHTML=ee,Dt=s(),m(I.$$.fragment),qt=s(),B=a("p"),B.textContent=ne,zt=s(),m(W.$$.fragment),Pt=s(),X=a("p"),X.textContent=se,Ot=s(),m(E.$$.fragment),Kt=s(),m(x.$$.fragment),tl=s(),N=a("p"),N.innerHTML=ie,ll=s(),Y=a("p"),Y.innerHTML=ae,el=s(),L=a("p"),L.innerHTML=pe,nl=s(),m(F.$$.fragment),sl=s(),Q=a("p"),Q.textContent=oe,il=s(),m(k.$$.fragment),al=s(),S=a("p"),S.innerHTML=me,pl=s(),A=a("p"),A.innerHTML=ce,ol=s(),m(H.$$.fragment),ml=s(),D=a("p"),D.textContent=re,cl=s(),q=a("p"),q.innerHTML=Me,rl=s(),z=a("p"),z.textContent=ue,Ml=s(),m(P.$$.fragment),ul=s(),m(O.$$.fragment),fl=s(),K=a("p"),K.innerHTML=fe,dl=s(),m(tt.$$.fragment),Ul=s(),m(lt.$$.fragment),bl=s(),m(et.$$.fragment),gl=s(),nt=a("table"),nt.innerHTML=de,Jl=s(),m(st.$$.fragment),hl=s(),it=a("table"),it.innerHTML=Ue,Tl=s(),m(at.$$.fragment),yl=s(),pt=a("p"),pt.textContent=be,_l=s(),ot=a("ul"),ot.innerHTML=ge,wl=s(),mt=a("p"),mt.textContent=Je,jl=s(),m(ct.$$.fragment),Zl=s(),m(rt.$$.fragment),Rl=s(),Mt=a("p"),Mt.textContent=he,Vl=s(),ut=a("ul"),ut.innerHTML=Te,vl=s(),m(ft.$$.fragment),$l=s(),dt=a("p"),dt.innerHTML=ye,Cl=s(),m(Ut.$$.fragment),Gl=s(),bt=a("p"),bt.textContent=_e,Il=s(),gt=a("p"),gt.textContent=we,Bl=s(),Jt=a("ul"),Jt.innerHTML=je,Wl=s(),ht=a("p"),ht.innerHTML=Ze,Xl=s(),Tt=a("p"),Tt.innerHTML=Re,El=s(),yt=a("p"),yt.textContent=Ve,xl=s(),m(_t.$$.fragment),Nl=s(),wt=a("p"),wt.textContent=ve,Yl=s(),jt=a("p"),jt.innerHTML=$e,Ll=s(),Zt=a("p"),Zt.textContent=Ce,Fl=s(),Rt=a("p"),Rt.innerHTML=Ge,Ql=s(),m(Vt.$$.fragment),kl=s(),m(vt.$$.fragment),Sl=s(),$t=a("p"),$t.innerHTML=Ie,Al=s(),m(Ct.$$.fragment),Hl=s(),m(Gt.$$.fragment),Dl=s(),Bt=a("p"),this.h()},l(t){const l=Fe("svelte-u9bgzb",document.head);d=p(l,"META",{name:!0,content:!0}),l.forEach(e),h=i(t),g=p(t,"P",{}),We(g).forEach(e),It=i(t),c(T.$$.fragment,t),Xt=i(t),y=p(t,"P",{"data-svelte-h":!0}),o(y)!=="svelte-9fufis"&&(y.innerHTML=zl),Et=i(t),_=p(t,"P",{"data-svelte-h":!0}),o(_)!=="svelte-srp9tb"&&(_.innerHTML=Pl),xt=i(t),c(w.$$.fragment,t),Nt=i(t),j=p(t,"P",{"data-svelte-h":!0}),o(j)!=="svelte-audt4e"&&(j.textContent=Ol),Yt=i(t),c(J.$$.fragment,t),Lt=i(t),Z=p(t,"P",{"data-svelte-h":!0}),o(Z)!=="svelte-enix0a"&&(Z.textContent=Kl),Ft=i(t),c(R.$$.fragment,t),Qt=i(t),V=p(t,"P",{"data-svelte-h":!0}),o(V)!=="svelte-1k8zlp9"&&(V.innerHTML=te),kt=i(t),c(v.$$.fragment,t),St=i(t),$=p(t,"P",{"data-svelte-h":!0}),o($)!=="svelte-1fgpi95"&&($.textContent=le),At=i(t),c(C.$$.fragment,t),Ht=i(t),G=p(t,"P",{"data-svelte-h":!0}),o(G)!=="svelte-12bqcqg"&&(G.innerHTML=ee),Dt=i(t),c(I.$$.fragment,t),qt=i(t),B=p(t,"P",{"data-svelte-h":!0}),o(B)!=="svelte-wd3dp4"&&(B.textContent=ne),zt=i(t),c(W.$$.fragment,t),Pt=i(t),X=p(t,"P",{"data-svelte-h":!0}),o(X)!=="svelte-1gibgaz"&&(X.textContent=se),Ot=i(t),c(E.$$.fragment,t),Kt=i(t),c(x.$$.fragment,t),tl=i(t),N=p(t,"P",{"data-svelte-h":!0}),o(N)!=="svelte-u51kgh"&&(N.innerHTML=ie),ll=i(t),Y=p(t,"P",{"data-svelte-h":!0}),o(Y)!=="svelte-1854nza"&&(Y.innerHTML=ae),el=i(t),L=p(t,"P",{"data-svelte-h":!0}),o(L)!=="svelte-18vs9m4"&&(L.innerHTML=pe),nl=i(t),c(F.$$.fragment,t),sl=i(t),Q=p(t,"P",{"data-svelte-h":!0}),o(Q)!=="svelte-64o031"&&(Q.textContent=oe),il=i(t),c(k.$$.fragment,t),al=i(t),S=p(t,"P",{"data-svelte-h":!0}),o(S)!=="svelte-ky63v0"&&(S.innerHTML=me),pl=i(t),A=p(t,"P",{"data-svelte-h":!0}),o(A)!=="svelte-1v3g5fv"&&(A.innerHTML=ce),ol=i(t),c(H.$$.fragment,t),ml=i(t),D=p(t,"P",{"data-svelte-h":!0}),o(D)!=="svelte-on7i15"&&(D.textContent=re),cl=i(t),q=p(t,"P",{"data-svelte-h":!0}),o(q)!=="svelte-j3jf4w"&&(q.innerHTML=Me),rl=i(t),z=p(t,"P",{"data-svelte-h":!0}),o(z)!=="svelte-ibpfse"&&(z.textContent=ue),Ml=i(t),c(P.$$.fragment,t),ul=i(t),c(O.$$.fragment,t),fl=i(t),K=p(t,"P",{"data-svelte-h":!0}),o(K)!=="svelte-kn2k8f"&&(K.innerHTML=fe),dl=i(t),c(tt.$$.fragment,t),Ul=i(t),c(lt.$$.fragment,t),bl=i(t),c(et.$$.fragment,t),gl=i(t),nt=p(t,"TABLE",{"data-svelte-h":!0}),o(nt)!=="svelte-7n8ul8"&&(nt.innerHTML=de),Jl=i(t),c(st.$$.fragment,t),hl=i(t),it=p(t,"TABLE",{"data-svelte-h":!0}),o(it)!=="svelte-197rfee"&&(it.innerHTML=Ue),Tl=i(t),c(at.$$.fragment,t),yl=i(t),pt=p(t,"P",{"data-svelte-h":!0}),o(pt)!=="svelte-1a3ynwi"&&(pt.textContent=be),_l=i(t),ot=p(t,"UL",{"data-svelte-h":!0}),o(ot)!=="svelte-gud7gs"&&(ot.innerHTML=ge),wl=i(t),mt=p(t,"P",{"data-svelte-h":!0}),o(mt)!=="svelte-fmnd9z"&&(mt.textContent=Je),jl=i(t),c(ct.$$.fragment,t),Zl=i(t),c(rt.$$.fragment,t),Rl=i(t),Mt=p(t,"P",{"data-svelte-h":!0}),o(Mt)!=="svelte-ulemqz"&&(Mt.textContent=he),Vl=i(t),ut=p(t,"UL",{"data-svelte-h":!0}),o(ut)!=="svelte-ezl5xe"&&(ut.innerHTML=Te),vl=i(t),c(ft.$$.fragment,t),$l=i(t),dt=p(t,"P",{"data-svelte-h":!0}),o(dt)!=="svelte-m0yrgw"&&(dt.innerHTML=ye),Cl=i(t),c(Ut.$$.fragment,t),Gl=i(t),bt=p(t,"P",{"data-svelte-h":!0}),o(bt)!=="svelte-1bj00p0"&&(bt.textContent=_e),Il=i(t),gt=p(t,"P",{"data-svelte-h":!0}),o(gt)!=="svelte-sh9kz6"&&(gt.textContent=we),Bl=i(t),Jt=p(t,"UL",{"data-svelte-h":!0}),o(Jt)!=="svelte-xjm66k"&&(Jt.innerHTML=je),Wl=i(t),ht=p(t,"P",{"data-svelte-h":!0}),o(ht)!=="svelte-2hnilk"&&(ht.innerHTML=Ze),Xl=i(t),Tt=p(t,"P",{"data-svelte-h":!0}),o(Tt)!=="svelte-b5or85"&&(Tt.innerHTML=Re),El=i(t),yt=p(t,"P",{"data-svelte-h":!0}),o(yt)!=="svelte-1y0d6k"&&(yt.textContent=Ve),xl=i(t),c(_t.$$.fragment,t),Nl=i(t),wt=p(t,"P",{"data-svelte-h":!0}),o(wt)!=="svelte-1terwo1"&&(wt.textContent=ve),Yl=i(t),jt=p(t,"P",{"data-svelte-h":!0}),o(jt)!=="svelte-1p0c8l1"&&(jt.innerHTML=$e),Ll=i(t),Zt=p(t,"P",{"data-svelte-h":!0}),o(Zt)!=="svelte-1terwo1"&&(Zt.textContent=Ce),Fl=i(t),Rt=p(t,"P",{"data-svelte-h":!0}),o(Rt)!=="svelte-1en0huo"&&(Rt.innerHTML=Ge),Ql=i(t),c(Vt.$$.fragment,t),kl=i(t),c(vt.$$.fragment,t),Sl=i(t),$t=p(t,"P",{"data-svelte-h":!0}),o($t)!=="svelte-1erw0pq"&&($t.innerHTML=Ie),Al=i(t),c(Ct.$$.fragment,t),Hl=i(t),c(Gt.$$.fragment,t),Dl=i(t),Bt=p(t,"P",{}),We(Bt).forEach(e),this.h()},h(){Xe(d,"name","hf:doc:metadata"),Xe(d,"content",De)},m(t,l){Qe(document.head,d),n(t,h,l),n(t,g,l),n(t,It,l),r(T,t,l),n(t,Xt,l),n(t,y,l),n(t,Et,l),n(t,_,l),n(t,xt,l),r(w,t,l),n(t,Nt,l),n(t,j,l),n(t,Yt,l),r(J,t,l),n(t,Lt,l),n(t,Z,l),n(t,Ft,l),r(R,t,l),n(t,Qt,l),n(t,V,l),n(t,kt,l),r(v,t,l),n(t,St,l),n(t,$,l),n(t,At,l),r(C,t,l),n(t,Ht,l),n(t,G,l),n(t,Dt,l),r(I,t,l),n(t,qt,l),n(t,B,l),n(t,zt,l),r(W,t,l),n(t,Pt,l),n(t,X,l),n(t,Ot,l),r(E,t,l),n(t,Kt,l),r(x,t,l),n(t,tl,l),n(t,N,l),n(t,ll,l),n(t,Y,l),n(t,el,l),n(t,L,l),n(t,nl,l),r(F,t,l),n(t,sl,l),n(t,Q,l),n(t,il,l),r(k,t,l),n(t,al,l),n(t,S,l),n(t,pl,l),n(t,A,l),n(t,ol,l),r(H,t,l),n(t,ml,l),n(t,D,l),n(t,cl,l),n(t,q,l),n(t,rl,l),n(t,z,l),n(t,Ml,l),r(P,t,l),n(t,ul,l),r(O,t,l),n(t,fl,l),n(t,K,l),n(t,dl,l),r(tt,t,l),n(t,Ul,l),r(lt,t,l),n(t,bl,l),r(et,t,l),n(t,gl,l),n(t,nt,l),n(t,Jl,l),r(st,t,l),n(t,hl,l),n(t,it,l),n(t,Tl,l),r(at,t,l),n(t,yl,l),n(t,pt,l),n(t,_l,l),n(t,ot,l),n(t,wl,l),n(t,mt,l),n(t,jl,l),r(ct,t,l),n(t,Zl,l),r(rt,t,l),n(t,Rl,l),n(t,Mt,l),n(t,Vl,l),n(t,ut,l),n(t,vl,l),r(ft,t,l),n(t,$l,l),n(t,dt,l),n(t,Cl,l),r(Ut,t,l),n(t,Gl,l),n(t,bt,l),n(t,Il,l),n(t,gt,l),n(t,Bl,l),n(t,Jt,l),n(t,Wl,l),n(t,ht,l),n(t,Xl,l),n(t,Tt,l),n(t,El,l),n(t,yt,l),n(t,xl,l),r(_t,t,l),n(t,Nl,l),n(t,wt,l),n(t,Yl,l),n(t,jt,l),n(t,Ll,l),n(t,Zt,l),n(t,Fl,l),n(t,Rt,l),n(t,Ql,l),r(Vt,t,l),n(t,kl,l),r(vt,t,l),n(t,Sl,l),n(t,$t,l),n(t,Al,l),r(Ct,t,l),n(t,Hl,l),r(Gt,t,l),n(t,Dl,l),n(t,Bt,l),ql=!0},p(t,[l]){const Be={};l&2&&(Be.$$scope={dirty:l,ctx:t}),J.$set(Be)},i(t){ql||(M(T.$$.fragment,t),M(w.$$.fragment,t),M(J.$$.fragment,t),M(R.$$.fragment,t),M(v.$$.fragment,t),M(C.$$.fragment,t),M(I.$$.fragment,t),M(W.$$.fragment,t),M(E.$$.fragment,t),M(x.$$.fragment,t),M(F.$$.fragment,t),M(k.$$.fragment,t),M(H.$$.fragment,t),M(P.$$.fragment,t),M(O.$$.fragment,t),M(tt.$$.fragment,t),M(lt.$$.fragment,t),M(et.$$.fragment,t),M(st.$$.fragment,t),M(at.$$.fragment,t),M(ct.$$.fragment,t),M(rt.$$.fragment,t),M(ft.$$.fragment,t),M(Ut.$$.fragment,t),M(_t.$$.fragment,t),M(Vt.$$.fragment,t),M(vt.$$.fragment,t),M(Ct.$$.fragment,t),M(Gt.$$.fragment,t),ql=!0)},o(t){u(T.$$.fragment,t),u(w.$$.fragment,t),u(J.$$.fragment,t),u(R.$$.fragment,t),u(v.$$.fragment,t),u(C.$$.fragment,t),u(I.$$.fragment,t),u(W.$$.fragment,t),u(E.$$.fragment,t),u(x.$$.fragment,t),u(F.$$.fragment,t),u(k.$$.fragment,t),u(H.$$.fragment,t),u(P.$$.fragment,t),u(O.$$.fragment,t),u(tt.$$.fragment,t),u(lt.$$.fragment,t),u(et.$$.fragment,t),u(st.$$.fragment,t),u(at.$$.fragment,t),u(ct.$$.fragment,t),u(rt.$$.fragment,t),u(ft.$$.fragment,t),u(Ut.$$.fragment,t),u(_t.$$.fragment,t),u(Vt.$$.fragment,t),u(vt.$$.fragment,t),u(Ct.$$.fragment,t),u(Gt.$$.fragment,t),ql=!1},d(t){t&&(e(h),e(g),e(It),e(Xt),e(y),e(Et),e(_),e(xt),e(Nt),e(j),e(Yt),e(Lt),e(Z),e(Ft),e(Qt),e(V),e(kt),e(St),e($),e(At),e(Ht),e(G),e(Dt),e(qt),e(B),e(zt),e(Pt),e(X),e(Ot),e(Kt),e(tl),e(N),e(ll),e(Y),e(el),e(L),e(nl),e(sl),e(Q),e(il),e(al),e(S),e(pl),e(A),e(ol),e(ml),e(D),e(cl),e(q),e(rl),e(z),e(Ml),e(ul),e(fl),e(K),e(dl),e(Ul),e(bl),e(gl),e(nt),e(Jl),e(hl),e(it),e(Tl),e(yl),e(pt),e(_l),e(ot),e(wl),e(mt),e(jl),e(Zl),e(Rl),e(Mt),e(Vl),e(ut),e(vl),e($l),e(dt),e(Cl),e(Gl),e(bt),e(Il),e(gt),e(Bl),e(Jt),e(Wl),e(ht),e(Xl),e(Tt),e(El),e(yt),e(xl),e(Nl),e(wt),e(Yl),e(jt),e(Ll),e(Zt),e(Fl),e(Rt),e(Ql),e(kl),e(Sl),e($t),e(Al),e(Hl),e(Dl),e(Bt)),e(d),f(T,t),f(w,t),f(J,t),f(R,t),f(v,t),f(C,t),f(I,t),f(W,t),f(E,t),f(x,t),f(F,t),f(k,t),f(H,t),f(P,t),f(O,t),f(tt,t),f(lt,t),f(et,t),f(st,t),f(at,t),f(ct,t),f(rt,t),f(ft,t),f(Ut,t),f(_t,t),f(Vt,t),f(vt,t),f(Ct,t),f(Gt,t)}}}const De='{"title":"ControlNet","local":"controlnet","sections":[{"title":"의존성 설치하기","local":"의존성-설치하기","sections":[],"depth":2},{"title":"원을 채우는 데이터셋","local":"원을-채우는-데이터셋","sections":[],"depth":2},{"title":"학습","local":"학습","sections":[],"depth":2},{"title":"여러개 GPU로 학습하기","local":"여러개-gpu로-학습하기","sections":[],"depth":2},{"title":"예시 결과","local":"예시-결과","sections":[{"title":"배치 사이즈 8로 300 스텝 이후:","local":"배치-사이즈-8로-300-스텝-이후","sections":[],"depth":4},{"title":"배치 사이즈 8로 6000 스텝 이후:","local":"배치-사이즈-8로-6000-스텝-이후","sections":[],"depth":4}],"depth":2},{"title":"16GB GPU에서 학습하기","local":"16gb-gpu에서-학습하기","sections":[],"depth":2},{"title":"12GB GPU에서 학습하기","local":"12gb-gpu에서-학습하기","sections":[],"depth":2},{"title":"8GB GPU에서 학습하기","local":"8gb-gpu에서-학습하기","sections":[],"depth":2},{"title":"추론","local":"추론","sections":[],"depth":2}],"depth":1}';function qe(Wt){return xe(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ln extends Ye{constructor(d){super(),Le(this,d,qe,He,Ee,{})}}export{ln as component};

Xet Storage Details

Size:
43 kB
·
Xet hash:
da79f4d9850e2f8e95636ed4f1cd25b74c9169bf733a4e465721caf16ed75c8e

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.