Spaces:
Configuration error
Configuration error
| #https://github.com/huggingface/diffusers/tree/main/examples/dreambooth | |
| #export | |
| MODEL_NAME="stabilityai/stable-diffusion-2-1-base" | |
| #export | |
| INSTANCE_DIR="./data_example" | |
| #export | |
| OUTPUT_DIR="./output_example" | |
| from diffusers import StableDiffusionPipeline | |
| from lora_diffusion import monkeypatch_lora, tune_lora_scale | |
| import torch | |
| import os | |
| import gradio as gr | |
| #os.system('python file.py') | |
| import subprocess | |
| # If your shell script has shebang, | |
| # you can omit shell=True argument. | |
| #subprocess.run("./run_lora_db.sh", shell=True) | |
| ##### | |
| model_id = "stabilityai/stable-diffusion-2-1-base" | |
| pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") | |
| prompt = "style of sks, baby lion" | |
| torch.manual_seed(1) | |
| #image = pipe(prompt, num_inference_steps=50, guidance_scale= 7).images[0] #no need | |
| #image # nice. diffusers are cool. #no need | |
| finetuned_lora_weights = "./lora_weight.pt" | |
| ##### | |
| #my fine tuned weights | |
| def monkeypatching(alpha): #, prompt, pipe): finetuned_lora_weights | |
| monkeypatch_lora(pipe.unet, torch.load(finetuned_lora_weights)) #"./lora_weight.pt")) | |
| tune_lora_scale(pipe.unet, alpha) #1.00) | |
| image = pipe(prompt, num_inference_steps=50, guidance_scale=7).images[0] | |
| image.save("./illust_lora.jpg") #"./contents/illust_lora.jpg") | |
| return image | |
| def accelerate_train_lora(steps): | |
| print("***********inside accelerate_train_lora 11111***********") | |
| #subprocess.run(accelerate launch {"./train_lora_dreambooth.py"} \ | |
| #subprocess.Popen(f'accelerate launch {"./train_lora_dreambooth.py"} \ | |
| os.system( f'accelerate launch {"./train_lora_dreambooth.py"} \ | |
| --pretrained_model_name_or_path={MODEL_NAME} \ | |
| --instance_data_dir={INSTANCE_DIR} \ | |
| --output_dir={OUTPUT_DIR} \ | |
| --instance_prompt="style of sks" \ | |
| --resolution=512 \ | |
| --train_batch_size=1 \ | |
| --gradient_accumulation_steps=1 \ | |
| --learning_rate=1e-4 \ | |
| --lr_scheduler="constant" \ | |
| --lr_warmup_steps=0 \ | |
| --max_train_steps={int(steps)}') #,shell=True) #30000 | |
| print("***********inside accelerate_train_lora 22222***********") | |
| return | |
| with gr.Blocks() as demo: | |
| with gr.Row(): | |
| in_images = gr.File(label="Upload images to fine-tune for LORA", file_count="multiple") | |
| #in_prompt = gr.Textbox(label="Enter a ") | |
| in_steps = gr.Number(label="Enter number of steps") | |
| in_alpha = gr.Slider(0.1,1.0, step=0.01, label="Set Alpha level - higher value has more chances to overfit") | |
| with gr.Row(): | |
| b1 = gr.Button(value="Train LORA model") | |
| b2 = gr.Button(value="Inference using LORA model") | |
| with gr.Row(): | |
| out_image = gr.Image(label="Image generated by LORA model") | |
| out_file = gr.File(label="Lora trained model weights") | |
| b1.click(fn = accelerate_train_lora, inputs=in_steps, outputs=out_file) | |
| b2.click(fn = monkeypatching, inputs=in_alpha, outputs=out_image) | |
| demo.launch(debug=True, show_error=True) | |