Upload The_Vram_Goes_Brrrrrr.cfg
#2
by
Pinguin - opened
- The_Vram_Goes_Brrrrrr.cfg +98 -0
The_Vram_Goes_Brrrrrr.cfg
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
#This settings file can be loaded back to Latent Majesty Diffusion. If you like your setting consider sharing it to the settings library at https://github.com/multimodalart/MajestyDiffusion
|
| 3 |
+
[model]
|
| 4 |
+
latent_diffusion_model = finetuned
|
| 5 |
+
|
| 6 |
+
[clip_list]
|
| 7 |
+
perceptors = ['[clip - mlfoundations - ViT-B-16--openai]', '[clip - mlfoundations - RN50x16--openai]', '[clip - mlfoundations - ViT-L-14--laion400m_e32]', '[clip - mlfoundations - ViT-B-16-plus-240--laion400m_e32]', '[clip - mlfoundations - ViT-B-32--laion2b_e16]']
|
| 8 |
+
|
| 9 |
+
[basic_settings]
|
| 10 |
+
#Perceptor things
|
| 11 |
+
|
| 12 |
+
width = 256
|
| 13 |
+
height = 256
|
| 14 |
+
|
| 15 |
+
latent_diffusion_guidance_scale = 10
|
| 16 |
+
clip_guidance_scale = 135000
|
| 17 |
+
aesthetic_loss_scale = 400
|
| 18 |
+
augment_cuts=True
|
| 19 |
+
|
| 20 |
+
#Init image settings
|
| 21 |
+
starting_timestep = 0.02
|
| 22 |
+
init_scale = 1000
|
| 23 |
+
init_brightness = 0.0
|
| 24 |
+
|
| 25 |
+
[advanced_settings]
|
| 26 |
+
#Add CLIP Guidance and all the flavors or just run normal Latent Diffusion
|
| 27 |
+
use_cond_fn = True
|
| 28 |
+
|
| 29 |
+
#Custom schedules for cuts. Check out the schedules documentation here
|
| 30 |
+
custom_schedule_setting = [[30, 1000, 8], 'gfpgan:1.5', 'scale:.9', [20, 200, 8], 'gfpgan:1', 'scale:.9', [50, 220, 2], 'gfpgan:1']
|
| 31 |
+
|
| 32 |
+
#Cut settings
|
| 33 |
+
clamp_index = [2.4, 2.1]
|
| 34 |
+
cut_overview = [8]*500 + [4]*500
|
| 35 |
+
cut_innercut = [0]*500 + [4]*500
|
| 36 |
+
cut_blur_n = [0]*1300
|
| 37 |
+
cut_blur_kernel = 3
|
| 38 |
+
cut_ic_pow = 5.6
|
| 39 |
+
cut_icgray_p = [0.1]*300 + [0]*1000
|
| 40 |
+
cutn_batches = 1
|
| 41 |
+
range_index = [0]*200 + [50000.0]*400 + [0]*1000
|
| 42 |
+
active_function = "softsign"
|
| 43 |
+
ths_method= "clamp"
|
| 44 |
+
tv_scales = [150]*1 + [0]*3
|
| 45 |
+
|
| 46 |
+
#If you uncomment this line you can schedule the CLIP guidance across the steps. Otherwise the clip_guidance_scale will be used
|
| 47 |
+
clip_guidance_schedule = [16000]*1000
|
| 48 |
+
|
| 49 |
+
#Apply symmetric loss (force simmetry to your results)
|
| 50 |
+
symmetric_loss_scale = 0
|
| 51 |
+
|
| 52 |
+
#Latent Diffusion Advanced Settings
|
| 53 |
+
#Use when latent upscale to correct satuation problem
|
| 54 |
+
scale_div = 1
|
| 55 |
+
#Magnify grad before clamping by how many times
|
| 56 |
+
opt_mag_mul = 20
|
| 57 |
+
opt_ddim_eta = 1.3
|
| 58 |
+
opt_eta_end = 1.1
|
| 59 |
+
opt_temperature = 0.98
|
| 60 |
+
|
| 61 |
+
#Grad advanced settings
|
| 62 |
+
grad_center = False
|
| 63 |
+
#Lower value result in more coherent and detailed result, higher value makes it focus on more dominent concept
|
| 64 |
+
grad_scale=0.25
|
| 65 |
+
score_modifier = True
|
| 66 |
+
threshold_percentile = 0.85
|
| 67 |
+
threshold = 1
|
| 68 |
+
var_index = [2]*300 + [0]*700
|
| 69 |
+
var_range = 0.5
|
| 70 |
+
mean_index = [0]*1000
|
| 71 |
+
mean_range = 0.75
|
| 72 |
+
|
| 73 |
+
#Init image advanced settings
|
| 74 |
+
init_rotate=False
|
| 75 |
+
mask_rotate=False
|
| 76 |
+
init_magnitude = 0.18215
|
| 77 |
+
|
| 78 |
+
#More settings
|
| 79 |
+
RGB_min = -0.95
|
| 80 |
+
RGB_max = 0.95
|
| 81 |
+
#How to pad the image with cut_overview
|
| 82 |
+
padargs = {'mode': 'constant', 'value': -1}
|
| 83 |
+
flip_aug=False
|
| 84 |
+
|
| 85 |
+
#Experimental aesthetic embeddings, work only with OpenAI ViT-B/32 and ViT-L/14
|
| 86 |
+
experimental_aesthetic_embeddings = True
|
| 87 |
+
#How much you want this to influence your result
|
| 88 |
+
experimental_aesthetic_embeddings_weight = 0.3
|
| 89 |
+
#9 are good aesthetic embeddings, 0 are bad ones
|
| 90 |
+
experimental_aesthetic_embeddings_score = 8
|
| 91 |
+
|
| 92 |
+
# For fun dont change except if you really know what your are doing
|
| 93 |
+
grad_blur = False
|
| 94 |
+
compress_steps = 200
|
| 95 |
+
compress_factor = 0.1
|
| 96 |
+
punish_steps = 200
|
| 97 |
+
punish_factor = 0.5
|
| 98 |
+
|