MRamzan commited on
Commit
8917366
·
1 Parent(s): 5133079

Update files

Browse files
Files changed (3) hide show
  1. app.py +104 -12
  2. qwe cat_20240307-085154.json +115 -0
  3. requirements.txt +4 -2
app.py CHANGED
@@ -1,18 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from transformers import pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
5
 
6
- def predict(input_img):
7
- predictions = pipeline(input_img)
8
- return input_img, {p["label"]: p["score"] for p in predictions}
 
9
 
10
- gradio_app = gr.Interface(
11
- predict,
12
- inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
13
- outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
14
- title="Hot Dog? Or Not?",
15
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  if __name__ == "__main__":
18
- gradio_app.launch()
 
 
 
 
1
+ # import gradio as gr
2
+ # from transformers import pipeline
3
+
4
+ # pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
5
+
6
+ # def predict(input_img):
7
+ # predictions = pipeline(input_img)
8
+ # return input_img, {p["label"]: p["score"] for p in predictions}
9
+
10
+ # gradio_app = gr.Interface(
11
+ # predict,
12
+ # inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
13
+ # outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
14
+ # title="Hot Dog? Or Not?",
15
+ # )
16
+
17
+ import random
18
  import gradio as gr
19
+ from PIL import Image
20
+ import torch
21
+ import uuid
22
+ import numpy as np
23
+ from diffusers import AutoPipelineForText2Image, AutoPipelineForInpainting, StableDiffusionXLInpaintPipeline, StableDiffusionXLPipeline
24
+
25
+
26
+ model_id = "stabilityai/stable-diffusion-xl-base-1.0"
27
+
28
+ # we have give lora weights here
29
+ adapter_id = "qwe cat_20240307-085154.json"
30
+
31
+ pipe = AutoPipelineForText2Image.from_pretrained(model_id,
32
+ torch_dtype=torch.float16,
33
+ variant="fp16")
34
+ # pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4)
35
+
36
+ pipe.to("cuda")
37
+
38
 
 
39
 
40
+ pipe.load_lora_weights('lora_weights',
41
+ weight_name=adapter_id,
42
+ adapter_name="qwe")
43
+ pipe.fuse_lora() #lora_scale=0.7
44
 
45
+ def set_lora_weight(lora_scale):
46
+ pipe.unfuse_lora(True)
47
+ pipe.load_lora_weights('lora_weights',
48
+ weight_name=adapter_id,
49
+ adapter_name="qwe")
50
+ pipe.fuse_lora(lora_scale=lora_scale) #lora_scale=0.7
51
+ print('DONE')
52
+
53
+ def generate(text, guidance_scale, num_images_per_prompt, height, width, generator_seed):
54
+
55
+ generator = torch.Generator("cuda").manual_seed(generator_seed)
56
+
57
+
58
+ prompt = text
59
+ image = pipe(prompt=prompt,
60
+ negative_prompt='worst quality, normal quality, low quality, low res, blurry, text, watermark, logo, banner, extra digits, cropped, jpeg artifacts, signature, username, error, sketch ,duplicate, ugly, monochrome, horror, geometry, mutation, disgusting',
61
+ guidance_scale=guidance_scale,
62
+ num_images_per_prompt=num_images_per_prompt,
63
+ height=height,
64
+ width=width,
65
+ num_inference_steps=20,
66
+ generator=generator).images
67
+ return image
68
+
69
+
70
+
71
+
72
+ with gr.Blocks() as demo:
73
+ with gr.Row():
74
+ with gr.Column():
75
+
76
+ gallery = gr.Gallery(
77
+ label="Generate",
78
+ object_fit="contain", height="512")
79
+
80
+ text = gr.Textbox(
81
+ label="Enter Prompt...")
82
+ btn = gr.Button("Generate", scale=0)
83
+ guidance_scale = gr.Slider(minimum=0, maximum=15, value=7.5, label='guidance scale')
84
+ num_images_per_prompt = gr.Slider(minimum=1, maximum=4, value=2, step=1, label = 'number of images per prompt')
85
+ height = gr.Slider(minimum=512, maximum=2048, value=1024, label = 'Image height')
86
+ width = gr.Slider(minimum=512, maximum=2048, value=1024,step=8,label = 'Image width')
87
+ lora_scale = gr.Slider(minimum=0.1, maximum=1, value=1,step=0.01,label = 'Lora scale')
88
+ generator_seed = gr.Slider(minimum=-1, maximum=100, value=1,step=1,label = 'generator_seed')
89
+
90
+
91
+
92
+ # with gr.Column():
93
+ # gallery = gr.Gallery(
94
+ # label="Generate",
95
+ # object_fit="contain", height="2048")
96
+
97
+
98
+ btn.click(generate,
99
+ inputs=[text,guidance_scale,num_images_per_prompt, height, width, generator_seed],
100
+ outputs=gallery)
101
+
102
+ lora_scale.change(set_lora_weight,
103
+ inputs=lora_scale)
104
+
105
 
106
  if __name__ == "__main__":
107
+ demo.launch(share=True)
108
+
109
+ # if __name__ == "__main__":
110
+ # gradio_app.launch()
qwe cat_20240307-085154.json ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "LoRA_type": "Standard",
3
+ "LyCORIS_preset": "full",
4
+ "adaptive_noise_scale": 0,
5
+ "additional_parameters": "",
6
+ "block_alphas": "",
7
+ "block_dims": "",
8
+ "block_lr_zero_threshold": "",
9
+ "bucket_no_upscale": false,
10
+ "bucket_reso_steps": 32,
11
+ "cache_latents": true,
12
+ "cache_latents_to_disk": true,
13
+ "caption_dropout_every_n_epochs": 0.0,
14
+ "caption_dropout_rate": 0,
15
+ "caption_extension": ".txt",
16
+ "clip_skip": "1",
17
+ "color_aug": false,
18
+ "conv_alpha": 32,
19
+ "conv_block_alphas": "",
20
+ "conv_block_dims": "",
21
+ "conv_dim": 32,
22
+ "debiased_estimation_loss": false,
23
+ "decompose_both": false,
24
+ "dim_from_weights": false,
25
+ "down_lr_weight": "",
26
+ "enable_bucket": true,
27
+ "epoch": 3,
28
+ "factor": -1,
29
+ "flip_aug": false,
30
+ "full_bf16": false,
31
+ "full_fp16": false,
32
+ "gradient_accumulation_steps": 1,
33
+ "gradient_checkpointing": true,
34
+ "keep_tokens": "0",
35
+ "learning_rate": 0.0001,
36
+ "logging_dir": "/home/rohan/Projects/Lora/doll/log",
37
+ "lora_network_weights": "",
38
+ "lr_scheduler": "constant",
39
+ "lr_scheduler_args": "",
40
+ "lr_scheduler_num_cycles": "1",
41
+ "lr_scheduler_power": "",
42
+ "lr_warmup": 0,
43
+ "max_bucket_reso": 2048,
44
+ "max_data_loader_n_workers": "0",
45
+ "max_resolution": "1024,1024",
46
+ "max_timestep": 1000,
47
+ "max_token_length": "75",
48
+ "max_train_epochs": "1",
49
+ "max_train_steps": "",
50
+ "mem_eff_attn": false,
51
+ "mid_lr_weight": "",
52
+ "min_bucket_reso": 256,
53
+ "min_snr_gamma": 5,
54
+ "min_timestep": 0,
55
+ "mixed_precision": "bf16",
56
+ "model_list": "custom",
57
+ "module_dropout": 0,
58
+ "multires_noise_discount": 0,
59
+ "multires_noise_iterations": 0,
60
+ "network_alpha": 13,
61
+ "network_dim": 128,
62
+ "network_dropout": 0,
63
+ "no_token_padding": false,
64
+ "noise_offset": 0,
65
+ "noise_offset_type": "Original",
66
+ "num_cpu_threads_per_process": 2,
67
+ "optimizer": "AdamW8bit",
68
+ "optimizer_args": "",
69
+ "output_dir": "/home/rohan/Projects/Lora/qwe_toy/model/",
70
+ "output_name": "qwe cat",
71
+ "persistent_data_loader_workers": false,
72
+ "pretrained_model_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
73
+ "prior_loss_weight": 1.0,
74
+ "random_crop": false,
75
+ "rank_dropout": 0,
76
+ "reg_data_dir": "/home/rohan/Projects/Lora/qwe_toy/log/",
77
+ "resume": "",
78
+ "sample_every_n_epochs": 0,
79
+ "sample_every_n_steps": 0,
80
+ "sample_prompts": "",
81
+ "sample_sampler": "euler_a",
82
+ "save_every_n_epochs": 2,
83
+ "save_every_n_steps": 0,
84
+ "save_last_n_steps": 0,
85
+ "save_last_n_steps_state": 0,
86
+ "save_model_as": "safetensors",
87
+ "save_precision": "bf16",
88
+ "save_state": false,
89
+ "scale_v_pred_loss_like_noise_pred": false,
90
+ "scale_weight_norms": 2.5,
91
+ "sdxl": true,
92
+ "sdxl_cache_text_encoder_outputs": false,
93
+ "sdxl_no_half_vae": true,
94
+ "seed": "",
95
+ "shuffle_caption": false,
96
+ "stop_text_encoder_training_pct": 0,
97
+ "text_encoder_lr": 0.0001,
98
+ "train_batch_size": 1,
99
+ "train_data_dir": "/home/rohan/Projects/Lora/qwe_toy/img/",
100
+ "train_on_input": true,
101
+ "training_comment": "trigger: playboy centerfold",
102
+ "unet_lr": 0.0001,
103
+ "unit": 1,
104
+ "up_lr_weight": "",
105
+ "use_cp": false,
106
+ "use_wandb": false,
107
+ "v2": false,
108
+ "v_parameterization": false,
109
+ "v_pred_like_loss": 0,
110
+ "vae": "",
111
+ "vae_batch_size": 0,
112
+ "wandb_api_key": "",
113
+ "weighted_captions": false,
114
+ "xformers": "xformers"
115
+ }
requirements.txt CHANGED
@@ -1,2 +1,4 @@
1
- transformers
2
- torch
 
 
 
1
+ torch
2
+ torchvision
3
+ torchaudio
4
+ diffusers