diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..646e1014c11e7ddc949669c877c9fbd314d35e97 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,161 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +Lora/2Dfy[[:space:]]DarkSky[[:space:]]XL_v1.1[[:space:]]test.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/3domain.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/AgateLightvaleIllustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Aliza_Horrortale_IlluXL-000018.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/AmberLightvaleIllustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Anal_Gape_ILXL_V1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/BettyNoireIllustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Bg_maki.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Body[[:space:]]weight[[:space:]]slider[[:space:]]IXL[[:space:]]1.0_alpha16.0_rank32_full_last.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Breasts[[:space:]]size[[:space:]]slider[[:space:]]NNFFS_alpha16.0_rank32_full_last.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/BrokenGlass_illusXL_Incrs_v1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Chara_IL_v1-05.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Chara_Storyfell_IL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/CinderFall-IL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/CopperLightvaleIllustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Creative-Angle_v1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/D-art_Style.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/DC_v1_NAI-XL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/DarkSlider1.2_alpha1.0_rank16_full_300steps.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Darkynsfw_Style__Pony_XL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Detail_Tweaker.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Dishwasher1910.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Dishwasher1910_Style_IllustriousXL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Dishwasher_1910_Style-IL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/E7BBAAE584BF20E58E9AE6B682E69E84E59BBE.gkED.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/FriskIllustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Frisk_new_ill.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Grimphantom_-_Illustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Gwen_Tennyson_Ben_10_LoRA_Illustrious_Edition.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/H1_hhh_1.0_style-ILXL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Hips[[:space:]]size[[:space:]]slider[[:space:]]IXL4_alpha16.0_rank32_full_last.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Hyper-SDXL-12steps-CFG-lora.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Hyper-SDXL-8steps-CFG-lora.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/IllustriousTwerkT2.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Illustrious_Amechan_Style.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Intruder-guy90-Illust-Lycorisv1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/JauneArc.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/JauneArcIL.jpg filter=lfs diff=lfs merge=lfs -text +Lora/Jaune_Arc.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Jlullaby[[:space:]]Illust.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Jlullaby[[:space:]]v2[[:space:]]Illust.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/KMS_RWBY_RR_IL-000005.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/KarasuChan1204.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/KarasuChanIllustriousXL_byKonan.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Kissing_Penis.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Krekkov[[:space:]]v2[[:space:]]Illust.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Krekkov_Style-000009.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/LCM&TurboMix.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/LCMTurboMix_DPM_SDE_Karras.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/LCMTurboMix_Euler_A_fix.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/LCMTurboMix_LCM_Sampler.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/LCMV2-PONYplus-PAseer.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/LCM_LoRA_Weights_SDXL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Light-skinned_male.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/LoRA2komaNoobAIXLvPred.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/LoRAMuryoKushoNoobAIXLvPred.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Marceline_ILL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Marceline_The_Star__Adventure_Time_Fionna__Cake.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/NARUTO_illustriousXL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Nami_shocked_eye_IL_v1-000012.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Nayuta_Illustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/NoobV065sHyperDmd.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Obui_1215767.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Ohogao_illustrious_v1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/POV_mirror_fellatio_Illustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/PandoraIllustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Phone_Exposure.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Poper[[:space:]]IL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Ratetaso_artist_style__CharaUndertale_character.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Ruby_Illustrious-Copy1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Ruby_Illustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/SSAMBAteaIL-style.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Sans.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Shadman_Comic.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Shexyo_-_Illustrious_2025_style-000014.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Shirt_lift-_Assisted_exposure_Illustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Smooth_Booster_v3.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Ssambatea[[:space:]]IL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/StS_Age_Slider_Illustrious_v1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Summer_Rose_-_IL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Takatsu-Keita-Style-IL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Tan[[:space:]]slider_alpha1.0_rank4_full_last.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/The_after_show_Illustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Twitching_Eye.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/WSSKX_WAI.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Weiss_Illus.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Xipa.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/Your_Turns_Next_Illustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/_jlullaby_NAIXL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/add_detail.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/ahemaru_777.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/aidmaImageUpgrader-v0.3-IL.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/aidmaImageUpgraderSDXL-v0.3.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/anna_nishikinomiya.V-1.0.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/b10-lucy.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/backlighting_il_2_d16.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/black-souls-style-noobai-epspred11.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/buttjob.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/cfg_scale_boost.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/checkpoint-e18_s738.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/cheekbulge.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/classic_gwen.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/contract_controller_illus01_v1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/darkness_xl_v20.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/dashedeyes_Illustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/deal360acv[[:space:]]illustrious[[:space:]]006.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/dish[[:space:]]white[[:space:]]skin_noobai_ep110_v1-000010.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/dishXL_il_lokr_V53P1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/excessivesaliva.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/eyelashext_v10.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/fixed_perspective.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/fixed_point_v2.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/fluffy.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/grabbed-breast-over-shoulder-illustriousxl-lora-nochekaiser.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/gwen.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/gwentenE28090illus13.QCtZ.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/highresbodyfix_v1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/hinaIDidSurgeryAlongSlider_x215c.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/il_brightness_slider_d1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/il_color_temp_slider_d1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/il_contrast_slider_d1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/il_saturation_slider_d1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/illustriousXLv01_stabilizer_v1.121.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/illustriousXLv01_stabilizer_v1.152.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/illustriousXLv01_stabilizer_v1.164.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/illustriousXLv01_stable_dark_v0.3.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/illustriousXLv11_stabilizer_v1.113.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/in-dark.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/jujutsu_kaisen_style_ilxl.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/kelvinhiu-10.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/krekkov-ByChiAi_.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/krekkov_style_ilxl_goofy_1179195.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/marl_texture_v0.4-illu10_done.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/masturbation_h.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/midis_performancejuxtaposition_V0.51\[IL\].preview.png filter=lfs diff=lfs merge=lfs -text +Lora/minimal_design_slider.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/mtu_virusIllustrious.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/multipleviews.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/nipple_size_slider_pdxl_goofy.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/noobai_ep11_stabilizer_v0.114_fp16.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/noobai_ep11_stabilizer_v0.205_fp16.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/omni-09.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/omniverse_gwen.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/ovgwen-10.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/photo_background.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/prompt_extend.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/ryosios_GodMode.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/sd_xl_dpo_lora_v1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/sdxl_lightning_8step_lora.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/sgb_ilxl_v1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/style_strength_controller_nbep11_v1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/takedaXL_il_lokr_V6311P.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/tedain777ILLUS.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/theodyss.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/togawagatame_ilxl_v1.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/touching_grass_v0.2.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/transformation_onahole_noobai.preview.png filter=lfs diff=lfs merge=lfs -text +Lora/wai-Rectified-s.preview.png filter=lfs diff=lfs merge=lfs -text diff --git a/Lora/1261748_training_data.json b/Lora/1261748_training_data.json new file mode 100644 index 0000000000000000000000000000000000000000..672dd4515d326a0f7dc3bf04d12b15675f77cb95 --- /dev/null +++ b/Lora/1261748_training_data.json @@ -0,0 +1,8 @@ +{ + "sha256": "317141980AB99189599DF6B0B719FA3C38B9566755880CAC4C95386EDD4171D4", + "modelId": 1122629, + "modelVersionId": 1261748, + "activation text": "Jaune Arc, First Outfit, Long Hair, Blonde Hair, Blue Eyes, Brown Gloves, Armor, Jeans, Pants, Hoodie, Shoulder Armor, Jaune Arc, Second Outfit, Long Hair, Blonde Hair, Blue Eyes, Brown Gloves, Breastplate, Armor, Hoodie, Jeans, Pants, Jaune Arc, Third Outfit, Short Hair, Blonde Hair, Blue Eyes, Brown Gloves, Armor, Jeans, Pants, Jaune Arc, Uniform, Long Hair, Blonde Hair, Blue Eyes, Pants", + "description": "Jaune Arc from RWBY", + "sd version": "Other" +} \ No newline at end of file diff --git a/Lora/2Dfy DarkSky XL_v1.1 test.html b/Lora/2Dfy DarkSky XL_v1.1 test.html new file mode 100644 index 0000000000000000000000000000000000000000..dbb541a4ea9cbb034085bcf96d40224757d30c9a --- /dev/null +++ b/Lora/2Dfy DarkSky XL_v1.1 test.html @@ -0,0 +1,75 @@ + +
+ + + + +
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Important: Use the version of the LoRA depending on the resolution you are going to use. Stylize generated images from 2.5D or 2.25D models as 2D. Don't use tags like "perfect hands" for the 1st version, it will confuse most models.
Trained LoRA stylized as 2D anime for the landscape/wallpaper resolution. It will work as a mini-model that will change the style of the model you'll use. For some models, it will be in a subtle way. The best use is in 2.25 to 2.5D models with good anatomy. Do not use with 3D models. Safest weights: 0.2 and 0.1.
Tips for better use: Try different weights for each model, start with 0.2 and slowly increase it or decrease it until you find the right spot. Try my negatives of my first two images that work in most of the models I've tried. Use a model that mixes 2D and 3D so this LoRA will tune down the 3D. Recommended use of Adetailer (face fix).
Basically: For 2.5D models it will need more weight, for 2D models it will need less weight. For some 2D models, 0.2 weight will be enough. Do not use with 3D models or 2.75D models.
It will hardly be influenced by the main model!! So be careful which one will you use, as it will enhance it capabilities. It can greatly help eyes and you will get good results with "shining eyes" and "detailed eyes" and maybe it will help hand generation.
Some models would need "semirealistic" in prompt to work, for others my LoRA won't work as great.
It will change a lot your gen, it is not like other HD enhancers that will not change the composition. Why you should use it? Well, because it shows an amazing style that it is my favorite. Trained over HD/detailed and most of the time a blurry style, you can force it with prompts like "blurry background". The focus will be on the character and the depth of field is used a lot for this 2Dfy edition.
This mini-model was trained with AnyLoRA so it should create good quality 2D anime. V1 is trained with half of my anime gallery, so that means 2271 images.
V1 was trained with resolution x640. It will work with resolution 768x512 / 832x448.
HD v1 was trained with resolution x1024. It uses resolution x1280 and x1024. HD v1 shows the best results and it seems to change the image even more than v1.
HD v1.1 will be trained on resolution x896. It should be better on resolution x1024 to x1152.
I'll try to train a version for XL models in the future.
Weight is very variable, but the best results I had is from 0,4 to 0,7, with 0,7 being the default for 2.5D models and 0,6 for 2.25D models. You can also use from 0,1 to 1. Weight will hardly depend on the model, test yourself. I like to use from 0,6 to 0,75. This LoRA can tune down the 3D of some models and make the image better.
Remember this is a LoRA for ANIME WALLPAPER STYLE, with characters. So it would show best results for that purpose. I also recommend a way to fix eyes. I recommend Adetailer in Stable Diffusion.
★ New: Version v1.1 (working on it until v2) includes new tags & most used tags (negatives too, like low quality or bad quality). Deleted some bad images. Actually 360/2228 images. Quality tags: masterpiece, UHD2Dfy, HD wallpaper (in this order). For eyes: detailed eyes.
Version v1.2: Will have a better tag to choose between normal quality and painted style, something like 2DfyPainted. For the best quality we'll still use UHD2Dfy. More bad images were deleted. I'll also try to delete images from more than 2 characters, because SD 1.5 normally works better with 1 or 2 characters. I'm not talking about characters in background.
For V2 I'll retrain the LoRA to manually include new tags (like anime names/character tags), include poses, effects, fix tags, hand/image qualities and add new tags like "dynamic pose", "movement effect", "anime painted style", "blurry vignette", "blurry background", "glow"... deleting bad images and adding a few new images as well. The result should be better with advanced prompts and more consistent than V1. Current state: ~360/2250
All intended custom tags (for V2): UHD2Dfy, Bright2Dfy, Blur2Dfy, Vintage2Dfy (Old filter), Painted2Dfy, Retro2Dfy (Retro anime style), Special2Dfy (An special amazing chosen style for certain images)
FAQ:
What about SFW or NSFW?
It was trained over SFW artwork so I don't know if it will work for NSFW.
Can I use it for 2D?
I only use this LoRA for 2.5D or 2.25D models, because for 2D models it can make some gens weird, but it can still work if the model is great.
LoRA is not giving me the best results.
If you want better results, try less complicated tags (so more generic ones), less "perfect" tags and try different negatives that can be good for the base model. If you use a lot of complicated tags, the combination can make worse the result. Also try another model, this LoRA is good very specific scenarios. My intention for V2 will be to make it more consistent.
~
The images you see as examples were done mainly with my model Everything-Mix v2.1 / V3 :)
This was my 2nd trained LoRA and I decided to share with you. Test it and let me know if it works for you! Share to me your images so I can see the results. You can use this LoRA as you want, but give me the proper credit. Don't sell it.
Note: Changed name from DarkSky Diffusion to 2Dfy, as I'll try to make a model out of this LoRA in the future and maybe I will give it that name. Since I can do more editions of this kind of LoRAs, this will be the "DarkSky" one.
-
Most used tags for V1:
solo (1821) 1girl (1791) long hair (1355) looking at viewer (1228) smile (800) dress (727) short hair (718) red eyes (715) bangs (692) breasts (663) holding (608) weapon (601) black hair (566) blue eyes (550) ribbon (523) very long hair (500) gloves (496) skirt (495) sky (466) long sleeves (456) blonde hair (454) sword (418) flower (415) open mouth (401) shirt (401) upper body (397) hair ornament (397) bare shoulders (392) thighhighs (379) 1boy (379) white hair (372) hat (359) hair between eyes (356) blush (345) outdoors (344) petals (330) medium breasts (323) bow (321) blue hair (309) twintails (306) cloud (306) cleavage (296) brown hair (296) holding weapon (292) wings (289) closed mouth (285) hair ribbon (283) male focus (270) yellow eyes (261) detached sleeves (253)
Most used tags for V2:
[WIP]
-
Upcoming 2Dfy editions:
ClearSky Edition: The other half of my anime gallery.
Masterpiece Edition: The best images from a collection of 5000.
All images deleted from the LoRAs will try to make a new edition of the LoRA (Weird Edition) after I finish each one, so it would be a total of 4. This edition won't be revised and won't be shared... probably.
Also, due to limitations of SD 1.5 models with various characters I'll try to create a new LoRA for more than 3 characters.
+ Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
it's fine if you dont use it but the character might get mixed.
(FIRST TIME MAKING LORAS, MIGHT BE BAD)
Some times it keeps generate greyscale image, you can maybe use the loras from the example image to kinda fix that, (I donno how to fix that ;-;) (The one with blade stelle and lumine)
Recommended aspect ratio: 1216 x 832, You can change if you want to experiment, quality wont be guaranteed.
Recommended strength: 1 (you can kinda play around with it, but its probably going to be bad :P)
Trained and tested on noob v-pred 1.0, donno other checkpoints :P
Example: 3domain, character_1, character_2, character_3,
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
If you like my work you can drop a positive review and follow me, it's free and it helps me a lot :D
You can also buy me a coffee to support me :D
-
Commissions:https://ko-fi.com/shadowxart/commissions
Early Access:https://tensor.art/u/650355879652217325/models
Thanks :)
-
Support the Artist:https://x.com/afrobullart
-
Prompts
Trigger Word:BullStyle
Do not use:score_9, score_8_up, score_7_up in positive prompts.
Do not use:score_6, score_5, score_4 in negative prompts.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Works best at 0.75 weight for SD1, 1 for SDXL
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Do "black holes" pull you in like a gravitational force you can’t escape? Are you ready to dive into the cosmic unknown and explore the deepest mysteries of the analverse? 🚀
Introducing: 🌌 Anal Gape Slider for PonyXL! 🌌
Slide your way deep into the rabbit hole (or should we say black hole) of discovery! Adjust the sliders, tweak the size, and let your imagination expand faster than the universe itself.
Works seamlessly in 0.10 increments for fine-tuned adjustments.
Tested extensively between -0.5 and 3 for consistent, high-quality results.
No activation prompt required!
🔭 Warning: Adjusting the slider beyond 3 might bend time and space. This feature remains untested, so proceed with caution—you could accidentally create a wormhole or end up procrastinating for hours while contemplating your choices for hours!
🚨 Disclaimer 🚨
This slider is not brought to you by Neil deGrasse Tyson, Stephen Hawking, or any other super-genius astrophysicist.
Enjoy the slider? Leave a like, share your results, and let me know how it worked for you! Feedback and requests are always welcome—I’d love to hear your ideas!
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Works best at 0.75 weight for 1.0, 0.8 for 2.0
0.85 for Pony version.
Illustrious works best at 0.95 weight.
+ Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Anime
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Trained on Pony Diffusion V6 XL.
V2 works from -5 to 5+
LORA weight: -4 to 6+
Use Positive weight to add weight to a character
This is a slider, Lora, so you can choose whatever weight you want, so experiment and find what suits you best. Negative weight makes the character skinnier.

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
IL version
Try experimenting adding the tag: chara \(undertale\)
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Training: 20 images at 2000 steps
Base model: NoobAI-XL_Eplison_predv1.1
Recommended weights: 0.9 - 1
Recommended Aspect Ratio: 1024 x 1024 or 1216 x 812
Trigger words:
domain clash, split screen, character_1, character_2Extras:
english text
+ 
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
(commission) Lighting intensity slider
Makes light sources in the image brighter or darker for a very natural brightness/lighting control.
IMPORTANT:
This lora is POTENT if used correctly.
-Add any lighting descriptor (highly recommend "blue hour" or "late night") but things like "dark" or "dimly lit" work fine as well
Literally any keywords that are dark related will make it DARK. try -0.5 to -1 with a "dark" word in your prompt.
-You may get blue eyes on dark scenes, simply specify eye color
.
Feel free to reach out to me for commission requests ✌️
+ Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
A first attempt at recreating Dark artstyle
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
HF: https://huggingface.co/OedoSoldier/detail-tweaker-lora
This is a LoRA for enhancing/diminishing detail while keeping the overall style/character; it works well with all kinds of base models (incl anime & realistic models)/style LoRA/character LoRA, etc.
Apply your own weight; this LoRA can be utilized for any weight up/down to 2/-2!
Note: use a negative weight to reduce details!
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
This model depicts Dishwasher1910's Style, credit to the artist
Trained on Illustrious V0.1
Tag: dishwasher1910
Enjoy and share results!
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Diswasher1910 Style
Promt: d1shw4ash3r
Strength: 0.6-1
Have fun.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Dishwasher1910 Style Lora.
+ Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Lora designed to generate the induction where a finger is places in the middle of a person's forehead to trigger the trance.
First 2 triggers are mandatory, the rest are to decide the style, disembodied_limb will add a floating hand for the induction, mind_control & empty eyes add a pretty good effect to the eyes and sometimes a shining finger.
+ Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Recommended setting is 1
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Trigger words:
gwentennyson, orange hair, short hair, green eyes, earrings
Outfit:
blue shirt, hairclip, white pants
Tested and used with WAI-SHUFFLE-NOOB:
https://civitai.com/models/989367?modelVersionId=1202076
For on-site generations, use my Tensor Art page: https://tensor.art/u/830771749061779884
Order a commission here!
https://ko-fi.com/c/38aa4a973d
Support my work by buying me a ko-fi:
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Doesn't require any trigger words.
Strength : 1.0
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Please do not use this LORA to claim the art as your own or for profit.
This model is good for replicating Amechan's art style.
Use the activation tag: amechanstyle

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Trigger: pyramidheadSDXL
Strenght: 1 or 0.8
Use Adetailer

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Tested at 0.8-1.0 strength
Activator
RWBY_RR,Body
ruby rose, 1girl, grey eyes, short hair, bangs, black hair, gradient hair, two-tone hair, huge breasts, wide hips, (thick thighs:1.2),'Fit
black choker, shrug (clothing), scarf, hooded cloak, red cape, bandolier, short red dress, cleavage, sleeveless, bare shoulders, corset, red miniskirt, pleated skirt, belt buckle, multiple belts, sheer black pantyhose, black thighhigh boots, deep skin, skindentation, thigh strap, black elbow gloves, fingerless gloves,lingerie, see-through, lace trim, lace, black nightgown, loose nightgown, bottomless, cleavage, bare shoulders, nipples visible through clothes, covered nipples, no panties, female pubic hair, pussy under clothes,wedding dress, overflowing breasts, bursting breasts, cleavage, bridal lingerie, long sleeves, white lace gloves, white lace collar, frilled collar, plunging neckline, white lace harness, white pantyhose, zettai ryouiki,sports bra, cleavage, overflowing breasts, bursting breasts, deep skin, skindentation, sideboob, dolphin shorts, short shorts,deep skin, skindentation, volleyball uniform, track uniform, red and white croptop, sleevless, midriff, red shorts, buruma, short shorts, black elbow pads, black knee pads, red footwear, sneakers,heart-shaped eyewear, eyewear on head, pajamas, black tank top, camisole, sleeveless, heart print, pink pants, polka dot legwear,black camisole, overflowing breasts, sideboob, deep skin, skindentation, sideboob, denim shorts, white thighhigh socks, zettai ryouiki,
+ 
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Trained on Illustrious-XL-v0.1 with 58 pictures.
Best result with weight between : 0.8-1.
Main prompts : karasuchan
Style prompts : greyscale with colored background, hatching \(texture\), monochrome
Reviews are really appreciated, i love to see the community use my work, that's why I share it.
If you like my work, you can tip me here.
Got a specific request ? I'm open for commission on my kofi or fiverr gig *! If you provide enough data, OCs are accepted
If you want to get updates on my projects as they go, you can follow me on X
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Style of the artist "Krekkov"
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
🖥️Welcome to try out the open-source GPT4V-Image-Captioner, developed by my friend and me. It offers a one-click installation and comes integrated with multiple features including image pre-compression, image tagging, and tag statistics. Recently, we also launched the webui plugin version of this tool, everyone is welcome to use it!
🌍欢迎加入QQ群"兔狲·AIGC梦工北厂",群号 :780132897 ;"兔狲·AIGC梦工南厂",群号 :835297318(入群答案:兔狲)。Telegram群聊“兔狲的SDXL百老汇”,链接:https://t.me/+KkflmfLTAdwzMzI1
Quick Use Guide: Compatible with any SDXL standard base model. Weight 1, 8 steps, CFG 2. Different version names correspond to their respective adapted samplers.
极简用法:可与任何常规SDXL大模型搭配使用。权重1,步数8,CFG 2。不同版本名对应各自适配的采样器
This is a fusion LoRA model of SDXL LCM LoRA and SDXL Turbo LoRA. The inspiration for its creation came about when I was developing the LEOSAM HelloWorld Turbo+LCM version and discovered that the combination of these two technologies yielded the best image generation results.
As a result, I merged this fusion model, in which the merge ratio of LCM and Turbo LoRA has been optimized by me. The size of this model has been resized to rank 1, with only 12.3MB. At the same time, the optimal weight for LORA has been adjusted to 1. It's very convenient to use. Everyone is welcome to experience it!
The following demonstrates the image generation results when using the Eular A sampler for 8 steps with HelloWorld 2.0 combined with LCM & TurboMix LoRA, LCM LoRA, and Turbo LoRA respectively.
It can be seen that LCM & TurboMix LoRA achieves the most balanced results between facial influence and image clarity.


The recommended parameters for generating images with this model are:
Sampler: Different version names correspond to their respective adapted samplers. The time needed for the different samplers to produce an image in 8 steps, as well as the quality of the image, varies. Please choose according to your needs.
LoRA Weight: Around 1
CFG scale: 2 (Important! It is recommended to have a CFG scale between 1.5~2.5)
Sampling steps: 8 steps (6~8 steps are acceptable)
Hires algorithm: ESRGAN 4x (Other upscaling algorithms can also be used, not a mandatory option. Please ensure that your GPU memory is sufficient)
Hires Upscale factor: 1.5x
Hires steps: 8 steps
Hires Denoising strength: 0.3
这是一个SDXL LCM LoRA和SDXL Turbo LoRA的融合LoRA模型,起因于我在制作LEOSAM HelloWorld Turbo+LCM版本时,发现这两种技术结合使用时,生图效果是最好的,因此我制作了这个融合版本的模型。
LCM和turbo lora的融合比例经过我的调节寻优,同时这个模型的体积已经resize至rank 1,只有12.3MB大小。同时LORA最佳权重已经调整至1。使用上非常方便。欢迎大家体验!
上方图展示了HelloWorld 2.0分别搭配LCM&TurboMix LoRA、 LCM LoRA、Turbo LoRA时,使用Eular A采样器采样8步时的出图效果。可以看出LCM&TurboMix LoRA拥有脸型影响和图像清晰度之间最平衡的结果。
本模型推荐的生图参数:
采样器:不同版本名对应各自适配的采样器。不同采样器8步出图的时间和画质不同,请根据需要选择。
LoRA 使用权重:1左右
采样步数:8步(6~8步均可)
CFG scale:2(重要!对于LCM模型,CFG scale建议1.5~2.5)
放大算法:ESRGAN 4x(其他放大算法也可以,非必须选项,请确保GPU显存充足)
放大倍数:1.5倍
放大步数:8步
放大降噪系数:0.3
2023.12.2 Update details for the Euler a.fix version.
The Euler a version has an issue where it doesn't display in the sdxl LoRA list of webui. The Euler a.fix version rectifies this problem by modifying the metadata. Additionally, the Euler a.fix version further optimizes the merge steps and proportions of LCM and Turbo.
Euler a版本存在Webui SDXL LoRA列表中不显示的问题。Euler a.fix版本通过更改元数据,修正了这个问题。同时Euler a.fix版本还进一步优化了LCM和Turbo的融合步骤和比例。
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
🖥️Welcome to try out the open-source GPT4V-Image-Captioner, developed by my friend and me. It offers a one-click installation and comes integrated with multiple features including image pre-compression, image tagging, and tag statistics. Recently, we also launched the webui plugin version of this tool, everyone is welcome to use it!
🌍欢迎加入QQ群"兔狲·AIGC梦工北厂",群号 :780132897 ;"兔狲·AIGC梦工南厂",群号 :835297318(入群答案:兔狲)。Telegram群聊“兔狲的SDXL百老汇”,链接:https://t.me/+KkflmfLTAdwzMzI1
Quick Use Guide: Compatible with any SDXL standard base model. Weight 1, 8 steps, CFG 2. Different version names correspond to their respective adapted samplers.
极简用法:可与任何常规SDXL大模型搭配使用。权重1,步数8,CFG 2。不同版本名对应各自适配的采样器
This is a fusion LoRA model of SDXL LCM LoRA and SDXL Turbo LoRA. The inspiration for its creation came about when I was developing the LEOSAM HelloWorld Turbo+LCM version and discovered that the combination of these two technologies yielded the best image generation results.
As a result, I merged this fusion model, in which the merge ratio of LCM and Turbo LoRA has been optimized by me. The size of this model has been resized to rank 1, with only 12.3MB. At the same time, the optimal weight for LORA has been adjusted to 1. It's very convenient to use. Everyone is welcome to experience it!
The following demonstrates the image generation results when using the Eular A sampler for 8 steps with HelloWorld 2.0 combined with LCM & TurboMix LoRA, LCM LoRA, and Turbo LoRA respectively.
It can be seen that LCM & TurboMix LoRA achieves the most balanced results between facial influence and image clarity.


The recommended parameters for generating images with this model are:
Sampler: Different version names correspond to their respective adapted samplers. The time needed for the different samplers to produce an image in 8 steps, as well as the quality of the image, varies. Please choose according to your needs.
LoRA Weight: Around 1
CFG scale: 2 (Important! It is recommended to have a CFG scale between 1.5~2.5)
Sampling steps: 8 steps (6~8 steps are acceptable)
Hires algorithm: ESRGAN 4x (Other upscaling algorithms can also be used, not a mandatory option. Please ensure that your GPU memory is sufficient)
Hires Upscale factor: 1.5x
Hires steps: 8 steps
Hires Denoising strength: 0.3
这是一个SDXL LCM LoRA和SDXL Turbo LoRA的融合LoRA模型,起因于我在制作LEOSAM HelloWorld Turbo+LCM版本时,发现这两种技术结合使用时,生图效果是最好的,因此我制作了这个融合版本的模型。
LCM和turbo lora的融合比例经过我的调节寻优,同时这个模型的体积已经resize至rank 1,只有12.3MB大小。同时LORA最佳权重已经调整至1。使用上非常方便。欢迎大家体验!
上方图展示了HelloWorld 2.0分别搭配LCM&TurboMix LoRA、 LCM LoRA、Turbo LoRA时,使用Eular A采样器采样8步时的出图效果。可以看出LCM&TurboMix LoRA拥有脸型影响和图像清晰度之间最平衡的结果。
本模型推荐的生图参数:
采样器:不同版本名对应各自适配的采样器。不同采样器8步出图的时间和画质不同,请根据需要选择。
LoRA 使用权重:1左右
采样步数:8步(6~8步均可)
CFG scale:2(重要!对于LCM模型,CFG scale建议1.5~2.5)
放大算法:ESRGAN 4x(其他放大算法也可以,非必须选项,请确保GPU显存充足)
放大倍数:1.5倍
放大步数:8步
放大降噪系数:0.3
2023.12.2 Update details for the Euler a.fix version.
The Euler a version has an issue where it doesn't display in the sdxl LoRA list of webui. The Euler a.fix version rectifies this problem by modifying the metadata. Additionally, the Euler a.fix version further optimizes the merge steps and proportions of LCM and Turbo.
Euler a版本存在Webui SDXL LoRA列表中不显示的问题。Euler a.fix版本通过更改元数据,修正了这个问题。同时Euler a.fix版本还进一步优化了LCM和Turbo的融合步骤和比例。
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
🖥️Welcome to try out the open-source GPT4V-Image-Captioner, developed by my friend and me. It offers a one-click installation and comes integrated with multiple features including image pre-compression, image tagging, and tag statistics. Recently, we also launched the webui plugin version of this tool, everyone is welcome to use it!
🌍欢迎加入QQ群"兔狲·AIGC梦工北厂",群号 :780132897 ;"兔狲·AIGC梦工南厂",群号 :835297318(入群答案:兔狲)。Telegram群聊“兔狲的SDXL百老汇”,链接:https://t.me/+KkflmfLTAdwzMzI1
Quick Use Guide: Compatible with any SDXL standard base model. Weight 1, 8 steps, CFG 2. Different version names correspond to their respective adapted samplers.
极简用法:可与任何常规SDXL大模型搭配使用。权重1,步数8,CFG 2。不同版本名对应各自适配的采样器
This is a fusion LoRA model of SDXL LCM LoRA and SDXL Turbo LoRA. The inspiration for its creation came about when I was developing the LEOSAM HelloWorld Turbo+LCM version and discovered that the combination of these two technologies yielded the best image generation results.
As a result, I merged this fusion model, in which the merge ratio of LCM and Turbo LoRA has been optimized by me. The size of this model has been resized to rank 1, with only 12.3MB. At the same time, the optimal weight for LORA has been adjusted to 1. It's very convenient to use. Everyone is welcome to experience it!
The following demonstrates the image generation results when using the Eular A sampler for 8 steps with HelloWorld 2.0 combined with LCM & TurboMix LoRA, LCM LoRA, and Turbo LoRA respectively.
It can be seen that LCM & TurboMix LoRA achieves the most balanced results between facial influence and image clarity.


The recommended parameters for generating images with this model are:
Sampler: Different version names correspond to their respective adapted samplers. The time needed for the different samplers to produce an image in 8 steps, as well as the quality of the image, varies. Please choose according to your needs.
LoRA Weight: Around 1
CFG scale: 2 (Important! It is recommended to have a CFG scale between 1.5~2.5)
Sampling steps: 8 steps (6~8 steps are acceptable)
Hires algorithm: ESRGAN 4x (Other upscaling algorithms can also be used, not a mandatory option. Please ensure that your GPU memory is sufficient)
Hires Upscale factor: 1.5x
Hires steps: 8 steps
Hires Denoising strength: 0.3
这是一个SDXL LCM LoRA和SDXL Turbo LoRA的融合LoRA模型,起因于我在制作LEOSAM HelloWorld Turbo+LCM版本时,发现这两种技术结合使用时,生图效果是最好的,因此我制作了这个融合版本的模型。
LCM和turbo lora的融合比例经过我的调节寻优,同时这个模型的体积已经resize至rank 1,只有12.3MB大小。同时LORA最佳权重已经调整至1。使用上非常方便。欢迎大家体验!
上方图展示了HelloWorld 2.0分别搭配LCM&TurboMix LoRA、 LCM LoRA、Turbo LoRA时,使用Eular A采样器采样8步时的出图效果。可以看出LCM&TurboMix LoRA拥有脸型影响和图像清晰度之间最平衡的结果。
本模型推荐的生图参数:
采样器:不同版本名对应各自适配的采样器。不同采样器8步出图的时间和画质不同,请根据需要选择。
LoRA 使用权重:1左右
采样步数:8步(6~8步均可)
CFG scale:2(重要!对于LCM模型,CFG scale建议1.5~2.5)
放大算法:ESRGAN 4x(其他放大算法也可以,非必须选项,请确保GPU显存充足)
放大倍数:1.5倍
放大步数:8步
放大降噪系数:0.3
2023.12.2 Update details for the Euler a.fix version.
The Euler a version has an issue where it doesn't display in the sdxl LoRA list of webui. The Euler a.fix version rectifies this problem by modifying the metadata. Additionally, the Euler a.fix version further optimizes the merge steps and proportions of LCM and Turbo.
Euler a版本存在Webui SDXL LoRA列表中不显示的问题。Euler a.fix版本通过更改元数据,修正了这个问题。同时Euler a.fix版本还进一步优化了LCM和Turbo的融合步骤和比例。
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
🖥️Welcome to try out the open-source GPT4V-Image-Captioner, developed by my friend and me. It offers a one-click installation and comes integrated with multiple features including image pre-compression, image tagging, and tag statistics. Recently, we also launched the webui plugin version of this tool, everyone is welcome to use it!
🌍欢迎加入QQ群"兔狲·AIGC梦工北厂",群号 :780132897 ;"兔狲·AIGC梦工南厂",群号 :835297318(入群答案:兔狲)。Telegram群聊“兔狲的SDXL百老汇”,链接:https://t.me/+KkflmfLTAdwzMzI1
Quick Use Guide: Compatible with any SDXL standard base model. Weight 1, 8 steps, CFG 2. Different version names correspond to their respective adapted samplers.
极简用法:可与任何常规SDXL大模型搭配使用。权重1,步数8,CFG 2。不同版本名对应各自适配的采样器
This is a fusion LoRA model of SDXL LCM LoRA and SDXL Turbo LoRA. The inspiration for its creation came about when I was developing the LEOSAM HelloWorld Turbo+LCM version and discovered that the combination of these two technologies yielded the best image generation results.
As a result, I merged this fusion model, in which the merge ratio of LCM and Turbo LoRA has been optimized by me. The size of this model has been resized to rank 1, with only 12.3MB. At the same time, the optimal weight for LORA has been adjusted to 1. It's very convenient to use. Everyone is welcome to experience it!
The following demonstrates the image generation results when using the Eular A sampler for 8 steps with HelloWorld 2.0 combined with LCM & TurboMix LoRA, LCM LoRA, and Turbo LoRA respectively.
It can be seen that LCM & TurboMix LoRA achieves the most balanced results between facial influence and image clarity.


The recommended parameters for generating images with this model are:
Sampler: Different version names correspond to their respective adapted samplers. The time needed for the different samplers to produce an image in 8 steps, as well as the quality of the image, varies. Please choose according to your needs.
LoRA Weight: Around 1
CFG scale: 2 (Important! It is recommended to have a CFG scale between 1.5~2.5)
Sampling steps: 8 steps (6~8 steps are acceptable)
Hires algorithm: ESRGAN 4x (Other upscaling algorithms can also be used, not a mandatory option. Please ensure that your GPU memory is sufficient)
Hires Upscale factor: 1.5x
Hires steps: 8 steps
Hires Denoising strength: 0.3
这是一个SDXL LCM LoRA和SDXL Turbo LoRA的融合LoRA模型,起因于我在制作LEOSAM HelloWorld Turbo+LCM版本时,发现这两种技术结合使用时,生图效果是最好的,因此我制作了这个融合版本的模型。
LCM和turbo lora的融合比例经过我的调节寻优,同时这个模型的体积已经resize至rank 1,只有12.3MB大小。同时LORA最佳权重已经调整至1。使用上非常方便。欢迎大家体验!
上方图展示了HelloWorld 2.0分别搭配LCM&TurboMix LoRA、 LCM LoRA、Turbo LoRA时,使用Eular A采样器采样8步时的出图效果。可以看出LCM&TurboMix LoRA拥有脸型影响和图像清晰度之间最平衡的结果。
本模型推荐的生图参数:
采样器:不同版本名对应各自适配的采样器。不同采样器8步出图的时间和画质不同,请根据需要选择。
LoRA 使用权重:1左右
采样步数:8步(6~8步均可)
CFG scale:2(重要!对于LCM模型,CFG scale建议1.5~2.5)
放大算法:ESRGAN 4x(其他放大算法也可以,非必须选项,请确保GPU显存充足)
放大倍数:1.5倍
放大步数:8步
放大降噪系数:0.3
2023.12.2 Update details for the Euler a.fix version.
The Euler a version has an issue where it doesn't display in the sdxl LoRA list of webui. The Euler a.fix version rectifies this problem by modifying the metadata. Additionally, the Euler a.fix version further optimizes the merge steps and proportions of LCM and Turbo.
Euler a版本存在Webui SDXL LoRA列表中不显示的问题。Euler a.fix版本通过更改元数据,修正了这个问题。同时Euler a.fix版本还进一步优化了LCM和Turbo的融合步骤和比例。
+ Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Base on Turbo and LCM LORA
I made this Version for accelerating Checkpoint to generate.
now you can link the node wherever you want , no more just put them at the end of you LORAs and hesitate if you should link the CLIP or not which make your nodes more complex.
基于最新的ADD训练模式,Turbo和LCM LORA的推出大大加快的出图速度。
我基于最新的两个官方LORA模型,制作了这个微型的LORA版本。
现在,使用我的LORA,你可以随意的将这个24MB的LORA放在你的LORAs栈中的任意位置,而不必再因为需要避开CLIP层而感到迷茫,费时费力的将它放在最后,并且需要链接更复杂的节点了。随便放,放哪里都可以。

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
This LoRA is trained on the Instant loss 2koma(即堕ち2コマ).
NoobAI-XL V-Pread 1.0 was used as the base model for training.
2koma, comic, multiple views
1girl, solo focus, explicit, 2koma, comic, multiple views, speech bubble, 1boy, sex, frown, ahegao
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
This LoRA is trained on the Domain Expansion: Muryokusho from Jujutsu Kaisen.
NoobAI-XL V-Pread 1.0 was used as the base model for training.
muryokusho, domain expansion
muryokusho, domain expansion, hand up, index finger raised
muryokusho, domain expansion, hand up, index finger raised, collar tug
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
ワンピースで見かけるプレイステーションのボタンみたいな怯えた目です。
アニメ系のチェックポイントやスタイルLoRAとの相性がいいです。
プロンプト等は以下を参照してください。
Shocked eyes in One Piece like a button on a PlayStation.
It goes well with anime-style checkpoints and style LoRAs.
Thank you.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Necromalock Style LoRA.
+ Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
If you like my work, drop a 5 review and hit the heart icon. it's free and keeps me motivated
This is a LoRA style training based on illustrator Obui 's art.
Obui is a Japanese illustrator best known for his work in adult-oriented content. His art style is detailed and full of energy, with expressive characters that bring a lot of personality and emotion to the table. He’s also been involved in character design for various games, showcasing a versatile and dynamic creative style.
Prompt:
Obui
I recommend setting the weight higher to make the art style lean more toward Obui's distinct aesthetics.
(You can refer to my sample images for reference.)
I’ve created a Telegram group, and everyone’s welcome to join and share their AI-generated images: https://t.me/LepusMindRoom
If you have any favorite characters or art styles you'd like to see, feel free to comment and let me know. If I like them too, I might create a LoRA for them as well!
+ Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
A Lora to pov fellatio with a mirror.
Making illustrious versions of some of my past models.
Trained on on 28 images.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
If you like my work you can drop a positive review and follow me, it's free and it helps me a lot :D
You can also buy me a coffee to support me :D
-
Commissions:https://ko-fi.com/shadowxart/commissions
Early Access:https://tensor.art/u/650355879652217325/models
Thanks :)
-
Support the Artist:https://x.com/PoPer211102
-
Prompts
Trigger Word:Poper
Do not use:score_9, score_8_up, score_7_up in positive prompts.
Do not use:score_6, score_5, score_4 in negative prompts.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Recommended for use on weights 0.8-1
Triggered word- ssambatea
Will remove if requested

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
There are already few shexyo loras, but his style is changed, so here is another one. Trained only on new (2025) images.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Been cooking some tweaks based on feedback:
Should work well on any XL/Illustrious/NoobaAI Checkpoints!
Clothes Details: created and merged Loras that adds more intricate details to clothing texture.
Skin Details: created and merged Loras that add a little more details to skin. Should be more noticeable on realistic Checkpoints.
Armor and Weapons Details:: created and merged Loras that adds detailed engraving and patterns. Better material quality.
The more Loras you use the lower the Lora weight should be!
Less than 2 Loras: Lora weight 0.5 ~ 1.0
More than 2 Loras: Lora weight 0.2 ~ 0.5
A detail tweaker/booster inspired by the work of many awesome creators like Shed_The_Skin's Detail Slider LoRA, w4r10ck's Detail Tweaker XL, and many, MANY, others.
This gives an overhaul improvement to color, lighting, shadows and adds more details.
This Lora was made to be used on with Smooth Mix and Smooth Embeddings, but of course, they should work well with any other Checkpoints/Embeddings/Loras. Just try out your favorites and see how it turn out!
I recommend using:
Strength: 0.25~0.8
CFG Scale: 3~5
Adetailer
Hires. fix: Remacri
Have fun!
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
If you like my work you can drop a positive review and follow me, it's free and it helps me a lot :D
You can also buy me a coffee to support me :D
-
Commissions:https://ko-fi.com/shadowxart/commissions
Exclusive Models:https://tensor.art/u/650355879652217325/models
Thanks :)
-
Support the Artist:https://x.com/SSAMBAtea/status/1905897850153951544
-
Trigger Word:ssambstyle
Recommended Prompts:monochrome, motion_lines
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
👇👀 Read me! 👇👀This is a slider for Illustrious XL, trained on SweetMix. The purpose of this slider is to allow control of the age of depicted person(s) in generated images.
The SCALED version is the v1.0 LoRA but with the age representation in +/- 2 input weight instead of the non-Scaled version's +/- 5 input weight for full age representation.
[[ The REALISM version of this slider was removed, and I refunded everyone who bought the download for it what they paid + 500 buzz. I greatly appreciate your support, but CivitAI can not host a realism version of this slider. Consequently, it is now exclusive to TensorArt. ]]
Do not ask me to make this LoRA available for CivitAI's on-site generator. Per CivitAI's Terms of Service, Age adjusting LoRAs can not be used with CivitAI's on-site generator. This is not something I can change. If you want to use this LoRA with an online generation service, it is also available via Tensor Art.
~ How to use v1.0? ~
This is a bipolar slider LoRA. That means it supports both positive and negative input weights. General safe weighting is -5 to 5 , although you can push to +/- 6 with some oddities. No activation tag required. This LoRA is not trained for NoobAI V-Pred models.
If you are using this LoRA in combination with a character LoRA, adding supporting tags can be beneficial to affect age. Since character LoRAs often have a set character age trained, it can be difficult to change that age depending on how intensely the character LoRA wants to maintain the character's "correct" age. To help overcome this, I suggest you use tags like aged up, aged down, petite , mature female, etc... as needed to help change the character's age.
If you run into issues with using this LoRA, please feel free to send me a message on CivitAI, or leave a comment so that I can fix issues that may arise.
~ Known issues (v1.0) ~
At high input weights (> 5), the image subject may have male facial hair. Putting facial hair , mustache , old man , etc... in the negative prompt largely mitigates this.
Very low input weights ( < 4.5) may de-saturate the image. As a temporary workaround, I recommend using a style LoRA to alleviate this. (I like this one a lot)
~ Changelog ~
December 10, 2024: Initial release with Early Access.
December 11, 2024: Early Access goal met. Removed description disclaimer. A copy of it can be found here: https://pastebin.com/F9Cq5b5Q . Updated description.
December 12, 2024: Updated description with tips on how to best use the age slider with character LoRAs.
February 19, 2025: Updated the TensorArt Model link to point to the SCALED version on TensorArt as opposed to the non-scaled version on TensorArt
May 5, 2025: Released v1.1 REALISM version in Early Access. Updated description. CivitAI removed v1.1 Realism. Refunded everyone who purchased it. Updated description.
May 10, 2025: Updated description to link directly to the "v1.1 - REALISM" Tensor Art model page.
+ Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
A Lora to give you a girl having sex while they are on TV in the background.
Making illustrious versions of some of my past models.
Trained on on 93 images of side view blowjobs.
+ Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
- Discord: CLICK ME
Xipa's styling loRA for Illustrious. It was trained over Illustrious-XL - v0.1 and tested on WAI-NSFW-illustrious-SDXL - v11.0 (all example images were generated using this model). The model works fine by default and doesn't usually affect the image much, though any review is appreciated.
Trigger Word: xipa
I recommend using the trigger word at the beginning of the prompt to avoid problems.
All images were generated using reForge and applying Hires.fix (x1.5) and ADetailer (over the face).
The model MAY generate artifacts in the eyes (especially if they are squinted or you are using Illustrious v0.1 as your model). The easiest way to fix this is to use aDetailer, Inpaint the eyes, or choose a better model like NTRMix XIII, WAI v11, etc.
If you would like to support me in continuing to invest time in creating LoRAs, donations are always welcome
I may add the option to request models if I see it as optimal. For now, you can request whatever you want, and if I'm interested, I'll probably take a look at it.
+ Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
A Lora to give you a girl/girls watching while a couple have sex.
Making illustrious versions of some of my past models.
Trained on 69 images.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Do you like what I do? Consider supporting me on Patreon 🅿️ or feel free to buy me a coffee ☕
I've been using the (great) Detail Tweaker by CyberAIchemist for a while now, and I was really curious. I really just wanted to know if I was capable of doing something similar.
I had a lot of fun doing this, and I think it came out pretty great. I wil ldefinitely incorporate it in most of my work going forward.
You should use this between 0.5 and 1 weight, depending on your preference. You can go lower than 0.5 for a more subtle effect, of course.
I also found out that this gives some interesting results at negative weight, sometimes. See the examples to see what I mean.
How to use LoRA's in auto1111:
Update webui (use git pull like here or redownload it)
Copy the file to stable-diffusion-webui/models/lora
Select your LoRA like in this video
Make sure to change the weight (by default it's :1 which is usually too high)

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Anna Nishikinomiya [Shimoneta], works correctly with a strength of 1, the use of negative prompts for poses is recommended.
Checked with:
-WAI-SHUFFLE-NOOB:
https://civitai.com/models/989367?modelVersionId=1108388
-Images created via ComfyUI, using a much improved version of my workflow model.
📢 Any download or use of this LORA outside of the following web pages is a misappropriation of the material. (ರ╭╮ರ )
- https://civitai.com/user/ShadowPx
- https://pixai.art/@shadowpx/
- https://tensor.art/u/823365344247809917
‼️ support me on TensorArt.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Found her accidentally once.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Trigger base: backlighting
Helpful words: bokeh, lens flare
Hint: Instead of using a slider LoRA, you can use prompts like [blue theme::0.1] (webui syntax) to effectively change the color temperature.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Announcing the release of the CFG Scale Boost LoRA! After much research I've discovered a method which allows you to pseudo-control the CFG scale of your gens using a special LoRA. It is listed under Pony but will work with every SDXL model (including Illustrious XL)! It has the biggest effect on the backgrounds, where it will fill them out and make the scenes feel more complete without frying your gens. Second, it has a detailer-like effect on the main subject, but not always! Experimentation is key. I recommend 0.3-0.7 strength. Released under EA as many sleepless nights were dedicated to this project, which I originally wasn't going to release publicly. Works with negative values as well, allowing you to control the composition and detail of your art to a fine degree, something not possible with typical detail LoRAs.
A collection of various control LoRAs for use in gens, merges and fine tunes.
SDXL Enhance: controls the details and compositional elements of SDXL-base models.
CFG ScaleBoost: boost the CFG scale of your gens using a LORA without burning out your latents.
Update: Finally added a Pony version!
Original description:
This LoRA will make your model either 2D/anime <-> 3D/photorealistic depending on how to use it.
For example <lora:realistic:-1.0> (negative values) will strengthen the 2D style and <lore:realistic:1.0> (positive values) will strengthen the photo style.
Potential uses:
Adjust the style of your model slightly to make it look more original
Make a realistic model more realistic (such as when the skin is too smooth/plastic)
Make a 2D/anime model more stylised by making it more 2D
Combined with the detail LoRA (linked in the resources) you can reduce details in your anime model and make it look more 2D to create a unique cell shaded style
This LoRA works well with blended models. It is neutral and won’t overwrite the style of your gens.
Tip: depending on what model you’re using will determine what range of values will be suitable. 1.0 might be too much for some models and not enough for others, for example. This is especially true of negative values.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
*** Its a Tool - LORA ! ***
...
higher CFG(10-15) images with a plus-lora are not so overexposed or have that cartoon style.
same meaning in the other way, low CFG(1-3) images mostly very real but also very boring, so with negativ-lora weight you can "pimp up" the moody.
...
in short words let me explain...
It seems that this is a kind of CFG slider (but more), for example
CFG ~ 2 -> loraweight:-0.5 (in this combiniation you get good images)
CFG ~ 12 -> loraweight: 0.5 (in this combiniation you get good images)
for DPM Sampler
...
for Euler_a it seems some kind of turbo
CFG~4
Steps ~10
loraweight: -0.4 to -0.8
maybe its useful for turbo/lcm models to have a smoother cfg/slider ?!?
for abstract art -> minus is more real, plus is more abstract/fantasy
for face portraits, lets say you have a prompt:
eg
"portrait photo of a 20yo messy blond haired woman. wears a offices suite."
cfg 2 - hair ist not that messy and borring surrounding
cgf 5 - hair is messy
cgf 12 - the hair are more messy, but somehow overexposed/cartoon
on cgf 2: you can use that lora with a weight arround -0.5 to get a more interesting image with very messy hair ;)
on cgf 12: you can use it with weight arround 0.5 to get mroe real images
.
dont blame me its a tool, maybe you can play with it and tell me more ;)
:*

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Control contrast in SDXL just like using a slider on your monitor. Stable, linear, and has zero side effect on style.
Or an U-net output strength regulator, if you know what I am saying.
What does this LoRA do?
It can amplify (strength >0) or reduce (strength <0) the contrast of your base model.
400KiB LoRA file? WTF Is the file broken?
No. It is a fully functional LoRA.
How to use it?
Just apply it as normal LoRA.
If you want to enhance the contrast, recommend to set strength 0.2 and forget. Also recommend lower your CFG scale a little bit for better details.
Working strength is around -0.5~0.5.
You don't have to set the patch strength for text encoder. This LoRA does not patch it.
Supported models?
All Illustrious based models, including NoobAI e-pred, are guaranteed to work. Unless your base model somehow changed U-net OUT8 block, which is very rare.
Other models (like v-pred) maybe work, I just did not check and test.
What's the training data? Why zero side effect on style?
This LoRA does not come from training.
I know how stable diffusion works. There is no contrast. What you said above is BS.
Yes it is BS, so to speak. Because it's simple for most of users to understand.
Here is the description for advance gigachad users:
You may have heard about FreeU. It can manipulate U-net blocks output. But it requires software support. What about using LoRA to do the thing?
So you can think of this LoRA as FreeU, sort of. It manipulates U-net output, mainly in strength.
You no longer have to choose between "creativity/low CFG scale" and "high contrast". Now you can have both.
You can increase unet strength so you can lower CFG scale, and get more creative result and details.
Or vice versa, reduce unet strength so you can use higher CFG to get a stable and clean result, without oversaturation.
Share merges using this LoRA is allowed. However, you must credit the creator and provide a link to this page. Beware that weight pattern will become very unique after this LoRA applied. A normally trained model will never have such kind of pattern.
This LoRA is highly experimental. Remember to leave feedback in comment section. Don't write feedback in Civitai review system, it was poorly designed, literally nobody can find and see the review.
Have fun.

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
(日本語説明は後半にあります)
The popular "Hotaru's LoRA series" (for SD1) is now available for SDXL! More will be released gradually on CivitAI (already available on HuggingFace).
Using only prompts, it is difficult to render characters and backgrounds as dark while keeping only light sources bright (such as lamps, the moon, and stars). This LoRA enables to generate such images.
This LoRA is compatible with Pony / Illustrious based checkpoints since v2.0. Some checkpoints may have less compatibility.
This LoRA is mainly tested on 2D-2.5D illustrations, and may not suitable for photorealistic checkpoints.
My "Breed" series checkpoints will make good outputs. Sample images are generated with "Breed" series listed on "Suggested Resources" section.
SD1 version is also available.
It works simply by enabling the LoRA, so basically no additional prompts are necessary. But adding prompts such as darkness or dark sky to indicate darkness can lead to better results.
If you want to make the character's eyes glow, add glowing eyes.
The level of darkness can be adjusted by the LoRA weight. For a darker effect, increase the weight. The upper limit is around 2.0.
More various other SD1/SDXL LoRA's and checkpoints are available on my HuggingFace repository or CivitAI models list. Please check it out.
CAUTION : Some sample images' prompt is using NegPiP extension for minus weight.
SD1でご好評いただいていた十条蛍のLoRAシリーズが、SDXL対応になって再登場します。HuggingFaceで先行公開中ですが、CivitAIにも順次掲載していきます。
キャラクターや背景を暗くしつつ、灯具や月・星など光源部分だけ明るく描写する、というのはプロンプトだけでは難しいですが、本LoRAを用いることで出力できるようになります。
v2.0からはPony系・Illustrious系のデータモデルに対応しています。一部のデータモデルでは、うまく再現されないことがあります。
2D・2.5Dイラスト系のデータモデル用で主にテストされており、実写系での効果については未確認です(使うことはできるようです)。
拙作データモデル「Breedシリーズ」と組み合わせると、高い効果を発揮します。サンプル画像もBreedシリーズ("Suggested Resources"セクションに掲載)で生成されています。
SD1系対応バージョンも公開しています。
LoRAを有効にするだけで発動します。階層調整済みですので、基本的には階層指定せず強度指定のみで大丈夫です。
必須ではありませんが、darknessやdark skyなど暗さを示すプロンプトを追加すると効果が高まります。
キャラクターの目を光らせる場合はglowing eyesを追加してください。
LoRA適用強度で暗さを調整できます。より暗くするには強度を上げてください。上限は2.0くらいになります。
他にもさまざまなSD1/SDXL用LoRAおよびデータモデルを公開していますので、HuggingFaceリポジトリまたはCivitAIの配布モデルリストをご覧ください。
掲載しているサンプル画像のプロンプトの一部では、NegPiP拡張機能を使ったマイナス強度指定を使用しています。
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
训练底模为noobaiXLNAIXL_epsilonPred11Version,下载前请考虑是否与您使用的模型兼容。
选了15张Dishwasher1910的插画中皮肤表现较白的图片进行训练的模型,所以我讲它命名为white skin
作为参考我把我朋友训练的dish与此lora进行对比,左边为此lora,右边为其他lora



Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
SDXL LoRA/LyCORIS works best on the model that it was trained on. I will release multiple versions for a few popular models. Feel free to request for artist or model.
Version naming convention: MajorVersion.MinorVersion [BaseModel]
MajorVesion: For big updates that can apply to any style LoRA. For example, training parameter update.
MinorVersion: For small updates that only apply to this LoRA. For example, epoch selection, removing a certain image from dataset.
BaseModel: SDXL LoRA/LyCORIS works best on the model that it was trained on.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
(日本語説明は後半にあります)
The popular "Hotaru's LoRA series (for SD1)" is now also available for SDXL! More will be released gradually on CivitAI and HuggingFace.
This LoRA can be used to increase or decrease character's eyelashes.
This LoRA is compatible with Pony / Illustrious based checkpoints.
This LoRA is suitable for 2D-2.5D illustrations only and not suitable for photorealistic checkpoints. My "Breed" series checkpoints will make good outputs. Sample images are generated with "Breed" series listed on "Suggested Resources" section.
This LoRA is developed by a new training method called "ADDifT" (Japanese written article), implemented in TrainTrain extension.
It works by simply enabling LoRA. No extra prompts are necessary.
Positive weight increases eyelashes while negative weight decreases eyelashes.
More various other SD1/SDXL LoRA's and checkpoints are available on my HuggingFace repository or CivitAI models list. Please check it out.
SD1でご好評いただいていた十条蛍のLoRAシリーズが、SDXL対応や新作も加えて再登場します。HuggingFaceおよびCivitAIにて順次公開していきます。
キャラクターの睫毛を増加させたり減少させたりできます。
Pony系・Illustrious系のデータモデルで動作確認していますが、一部のデータモデルではうまく再現されないことがあります。
2D・2.5Dイラスト系のデータモデル用で、実写系には不向きです。拙作データモデル「Breedシリーズ」と組み合わせると、高い効果を発揮します。サンプル画像もBreedシリーズ("Suggested Resources"セクションに掲載)で生成されています。
このLoRAは、2025年3月発表の新しいLoRA学習方式である「ADDifT」(TrainTrain拡張機能で利用可能)を用いて開発されています。
LoRAを有効にするだけで発動します。階層調整済みですので、基本的には階層指定せず強度指定のみで大丈夫です。
プラス強度で睫毛が増加、マイナス強度で睫毛が減少します。
他にもさまざまなSD1/SDXL用LoRAおよびデータモデルを公開していますので、HuggingFaceリポジトリまたはCivitAIの配布モデルリストをご覧ください。
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
This is a concept LoRA to manipulate the camera angle. It provides a fixed perspective shot of the image. Intended for NSFW content but is not limited to that. Please do post pictures below, as I'm interested to see what everyone is able to make with this tool.
My first attempt at creating a concept LoRA and something I have been collecting images for a while as I come across them. Felt like I had enough to give it a shot and this is the result. I will need to collect more to update it in the future. Requires ADetailer and HiResFix to get good outputs. If prompting more than one subject inpainting will likely be required to fix it up.

I have expanded the size of the dataset used for training and have also categorized a section of the images to attempt to provide trigger words to manipulate the camera further. I was rough with it due to the sheer quantity of images used and limited time to spend pruning and cleaning up the tags. Due to that it could be improved, but I did manage to get part of the control that I wanted.


Removed some images and added new sources to training dataset to attempt to have the LoRA alter style of output as much as V2 or V1. Toyed around with tag weights to try and add further control. V3 is not strictly better than V2.



Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
This is a concept LoRA to manipulate the camera angle. It provides a fixed perspective shot of the image. Intended for NSFW content but is not limited to that. Please do post pictures below, as I'm interested to see what everyone is able to make with this tool.
My first attempt at creating a concept LoRA and something I have been collecting images for a while as I come across them. Felt like I had enough to give it a shot and this is the result. I will need to collect more to update it in the future. Requires ADetailer and HiResFix to get good outputs. If prompting more than one subject inpainting will likely be required to fix it up.

I have expanded the size of the dataset used for training and have also categorized a section of the images to attempt to provide trigger words to manipulate the camera further. I was rough with it due to the sheer quantity of images used and limited time to spend pruning and cleaning up the tags. Due to that it could be improved, but I did manage to get part of the control that I wanted.


Removed some images and added new sources to training dataset to attempt to have the LoRA alter style of output as much as V2 or V1. Toyed around with tag weights to try and add further control. V3 is not strictly better than V2.



Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Works quite well
+ Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Requested model.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
This LoHa can partially fix the bad anatomy in NOOB-series models at high resolutions.
It is known that NOOB-series models are trained at a fixed resolution of 1024x1024. When generating higher-resolution t2i images or during high-definition i2i, the following problems may occur: very long limbs, very long waist, multiple heads, multiple characters, additional hands or feet, and so on.
This model is designed to fix these issues. Note that the model still has some drawbacks and will continue to be improved in the future.
该LoHa模型能够部分改善NOOB系模型在高分辨率下的人体结构问题。
我们知道,NOOB系模型是在固定的分辨率下(1024*1024)训练的,当生成大于该分辨率的图像,或者在高清修复的过程中,会出现以下问题:过长的肢体,过长的腰部,多头,复数人物,额外的手脚,等等问题。
该模型旨在修复这些问题。注意,该模型仍有缺陷,并将在未来继续优化。
このLoHaモデルは、NOOBシリーズのモデルにおける高解像度での解剖学的問題を部分的に解決できます。
NOOBシリーズのモデルは、1024x1024の固定解像度でトレーニングされていることが知られています。高解像度のt2i画像を生成したり、高精細なi2i修復を行う際に、次のような問題が発生することがあります:非常に長い手足、非常に長い腰、複数の頭、複数のキャラクター、余分な手や足など。
このモデルは、これらの問題を修正することを目的としています。ただし、モデルにはまだいくつかの欠点があり、今後改善が続けられる予定です。
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Support me on Ko-fi https://ko-fi.com/hinablue
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Another awesome JJK meme lora again
This was fun, though I couldn't get it to work better on Noob VPRED..
What does this LoRA includes:
Gojo's hollow purple frame from the anime ✔
Manga version ❌
Toji Fushiguro frames ❌
I recommend you to put scanlines on the negative!!
Have fufnfdnfdfjhhs
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Update 2025-01-03: Added contrast slider
I noticed that my previous Amnesia + Avilia LoRA always looks too warm, so I made some patch LoRAs to fix that.
No triggers needed. LoRA weights can be set from -2 to 2, depending on your use case and your base model, but please be careful that larger weights may change the result composition.
positive weights make pictures colder
positive weights make pictures less saturated
positive weights make pictures brighter
positive weights make pictures less contrast

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Update 2025-01-03: Added contrast slider
I noticed that my previous Amnesia + Avilia LoRA always looks too warm, so I made some patch LoRAs to fix that.
No triggers needed. LoRA weights can be set from -2 to 2, depending on your use case and your base model, but please be careful that larger weights may change the result composition.
positive weights make pictures colder
positive weights make pictures less saturated
positive weights make pictures brighter
positive weights make pictures less contrast

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Update 2025-01-03: Added contrast slider
I noticed that my previous Amnesia + Avilia LoRA always looks too warm, so I made some patch LoRAs to fix that.
No triggers needed. LoRA weights can be set from -2 to 2, depending on your use case and your base model, but please be careful that larger weights may change the result composition.
positive weights make pictures colder
positive weights make pictures less saturated
positive weights make pictures brighter
positive weights make pictures less contrast

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Update 2025-01-03: Added contrast slider
I noticed that my previous Amnesia + Avilia LoRA always looks too warm, so I made some patch LoRAs to fix that.
No triggers needed. LoRA weights can be set from -2 to 2, depending on your use case and your base model, but please be careful that larger weights may change the result composition.
positive weights make pictures colder
positive weights make pictures less saturated
positive weights make pictures brighter
positive weights make pictures less contrast

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Sharing merges using this LoRA, re-printing it to other platforms, are prohibited.
All cover images are directly from the vanilla (the original, not finetuned) base model in a1111, no upscale, no inpaint fixes, no any plugin, even no negative prompt. They demonstrate the effect of the LoRA, not clickbait. You can drop the images into a1111 to reproduce yourself, they have metadata.
(5/19/2025): illus v1.152
Continual to improve lighting and textures and details.
Added 5K more photographs. Still, contains everything, except human. Covering all lighting conditions as much as possible. (From super bright to super dark)
Refactored my caption pipeline. All images now have natural captions from Google latest LLM. All lighting (brightness, color temperature, etc.) conditions are properly tagged. All anime characters are tagged by wd tagger v3 and Google LLM.
More data, so more training steps, as a result, stronger effect.
FAQ:
If you want 100% effects of texture, avoid base models with AI style (trained on AI images). Because what AI styles are super overfitted style, and will overlap the texture instantly. FYI. Cover images are from vanilla base model. And I only use vanilla models + artist style LoRAs.
How to know if it is AI style. No good method. Personally I look at hair (or other surfaces). The more plastic it feels (no texture, weird shiny reflections), the more AI style it may have.
If you got realistic faces on anime characters. Don't blame this LoRA. What it saw is what it learned. There is zero real human in dataset, so it has zero knowledge of realistic faces. Check whether your base model was merged with other realistic model.
.........
See more in the update log section.
.........
(3/2/2025): You can find the REAL me at TensorArt now. I've reclaimed all my models that were duped and faked by other accounts.
It's an all-in-one finetuned LoRA. If you apply it to NoobAI v1.1, then you will get my personal "finetuned" base model. (Why would you train a 6GB checkpoint if you can just train a 100MB LoRA? And you can also apply it to any model you want in no time.)
Same as full finetuned (trained, not merged) base models
Dataset is not small. (Comparing to a normal LoRA. Can't say big, there are many gigachads who like to finetune their models with millions images... Orz )
This LoRA is also trained in one go. No merging, so no confliction (at least inside this LoRA).
The dataset only contains high resolution images. Zero AI image. So you can get texture and details beyond pixel level. Instead of a weird smooth plastic feeling.
It does not focus on a very unique art style, and won't dramatically change the image composition.
Cover images are the direct outputs from the vanilla (the original, not finetuned) base model in a1111-sd-webui, no upscale, no inpaint fixes, no negative prompt. They demonstrate the effect of the LoRA, not clickbait. You can drop the images into a1111 to reproduce yourself, they have metadata.
Share merges using this LoRA is prohibited. FYI, there are hidden trigger words to print invisible watermark. It works well even if the merge strength is 0.05. I coded the watermark and detector myself. I don't want to use it, but I can.
Remember to leave feedback in comment section. So everyone can see it. Don't write feedback in Civitai review system, it was so poorly designed, literally nobody can find and see the review.
Have fun.
Just apply it. No trigger words needed. Also it does not patch text encoders. So you don't have to set the patch strength for text encoder (in comfyui, etc.).
Strength 0.4~0.8.
Version prefix:
illus01 = Trained on Illustrious v0.1.
nbep11 = Trained on NoobAI e-pred v1.1
Which version to use?
Hard to tell. You should try both version. Models nowadays are just merges of merges and merges. You would never know what's truly inside your base model. Most model creators don't know either.
Fun fact (5/10/2025): 90% models that labeled as "illustrious" are actually NoobAi, if you calculate their weight similarities.
You can also just use both, with low strength each, many users reported this has noticeable better result.
Every image is hand-picked by me.
Only normal good looking things. No crazy art style.
No AI images, no watermarks, etc.
Only high resolution images. Avg pixels 3.37 MP, ~1800x1800.
2 main dataset:
a 2D/anime dataset with ~1k images. Character-focus. Natural poses. Natural body proportions. No exaggerated art, chibi, jojo pose, etc.
a real world photographs dataset with ~1k images. Contains nature, indoors, animals, buildings...many things, except human.
Why real world images? You can get better background, lighting, pixel level details/textures. There is no human in dataset so it won't affect characters.
I named the dataset Touching Grass. There is also a LoRA that was only trained on this photograph dataset. If you want something pure.
But I got realistic faces on my anime characters.
Well, don't blame this LoRA. What it saw is what it learned. It has zero knowledge of realistic faces. Most likely your base model was mixed with other realistic models.
Some ideas that was going to, or used to, be part of the Stabilizer. Now they are separated LoRAs. For better flexibility. Collection link: https://civitai.com/collections/8274233.
Touching Grass: Trained on and only on the photographs dataset (No anime dataset). Has stronger effect. Useful for gigachad users who like pure concepts and like to balance weights themselves.
Dark: It can fix the high bias in anime models that towards high brightness. Trained on low brightness images in the Touching Grass dataset. Also, no human in dataset. So does not affect style.
Example on WAI v13.

Contrast Controller: Control the contrast like using a slider in your monitor. Unlike other trained "contrast enhancer", the effect of this LoRA is stable, linear, and has zero side effect on style. (Not an exaggeration, it's really mathematically zero and linear. It was not from training.) Example on WAI v13.

Style Strength Controller: Or overfitting effect reducer. Also not from training, so zero side effect on style and mathematically linear effects. Can reduce all kinds of overfitting effects (bias on objects, brightness, etc.).
Effect test on Hassaku XL: The prompt has keyword "dark", but the model almost ignored it. Notice that: at strength 0.25 this LoRA reduces the bias of high brightness, and a weird smooth feeling on every surfaces, so the image feels more natural.
Differences between Stabilizer:
Stabilizer affects style. Because it was trained on real world data. It can "reduce" overfitting effects about texture, details and backgrounds, by adding them back.
Style Controller was not from training. It is more like "undo" the training for base model, so it will less-overfitted. It does not affect style. And can reduce all overfitting effects, like bias on brightness, objects.
New version == new stuffs and new attempt != better version for you base model.
You can check the "Update log" section to find old versions. It's ok to use different versions together just like mixing base models. As long as the sum of strengths does not > 1.
(5/19/2025): illus01 v1.152
Continual to improve lighting and textures and details.
Added 5K more photographs. Still, contains everything, except human. Covering all lighting conditions as much as possible. (From super bright to super dark)
Refactored my caption pipeline. All images now have natural captions from Google latest LLM. All lighting (brightness, color temperature, etc.) conditions are properly tagged. All anime characters are tagged by wd tagger v3 and Google LLM.
More data, so more training steps, as a result, stronger effect.
(5/9/2025): nbep11 v0.205:
A quick fix of brightness and color issues in v0.198. Now it should not change brightness and colors so dramatically like a real photograph. v0.198 isn't bad, just creative, but too creative.
(5/7/2025): nbep11 v0.198:
Added more dark images. Less deformed body, background in dark environment.
Removed color and contrast enhancement. Because it's not needed anymore. Use Contrast Controller instead.
(4/25/2025): nbep11 v0.172.
Same new things in illus01 v1.93 ~ v1.121. Summary: New photographs dataset "Touching Grass". Better natural texture, background, lighting. Weaker character effects for better compatibility.
Better color accuracy and stability. (Comparing to nbep11 v0.160)
(4/17/2025): illus01 v1.121.
Rolled back to illustrious v0.1. illustrious v1.0 and newer versions were trained with AI images deliberately (maybe 30% of its dataset). Which is not ideal for LoRA training. I didn't notice until I read its paper.
Lower character style effect. Back to v1.23 level. Characters will have less details from this LoRA, but should have better compatibility. This is a trade-off.
Other things just same as below (v1.113).
(4/10/2025): illus11 v1.113 ❌.
Update: use this version only if you know your base model is based on Illustrious v1.1. Otherwise, use illus01 v1.121.
Trained on Illustrious v1.1.
New dataset "Touching Grass" added. Better natural texture, lighting and depth of field effect. Better background structural stability. Less deformed background, like deformed rooms, buildings.
Full natural language captions from LLM.
(3/30/2025): illus01 v1.93.
v1.72 was trained too hard. So I reduced it overall strength. Should have better compatibility.
(3/22/2025): nbep11 v0.160.
Same stuffs in illus v1.72.
(3/15/2025): illus01 v1.72
Same new texture and lighting dataset as mentioned in ani40z v0.4 below. More natural lighting and natural textures.
Added a small ~100 images dataset for hand enhancement, focusing on hand(s) with different tasks, like holding a glass or cup or something.
Removed all "simple background" images from dataset. -200 images.
Switched training tool from kohya to onetrainer. Changed LoRA architecture to DoRA.
(3/4/2025) ani40z v0.4
Trained on Animagine XL 4.0 ani40zero.
Added ~1k dataset focusing on natural dynamic lighting and real world texture.
More natural lighting and natural textures.
Above: Added more real world images. More natural texture and details.
ani04 v0.1
Init version for Animagine XL 4.0. Mainly to fix Animagine 4.0 brightness issues. Better and higher contrast.
illus01 v1.23
nbep11 v0.138
Added some furry/non-human/other images to balance the dataset.
nbep11 v0.129
bad version, effect is too weak, just ignore it
nbep11 v0.114
Implemented "Full range colors". It will automatically balance the things towards "normal and good looking". Think of this as the "one-click photo auto enhance" button in most of photo editing tools. One downside of this optimization: It prevents high bias. For example, you want 95% of the image to be black, and 5% bright, instead of 50/50%
Added a little bit realistic data. More vivid details, lighting, less flat colors.
illus01 v1.7
nbep11 v0.96
More training images.
Then finetuned again on a small "wallpaper" dataset (Real game wallpapers, the highest quality I could find. ~100 images). More improvements in details (noticeable in skin, hair) and contrast.
Above: Has a weak default style.
nbep11 v0.58
More images. Change the training parameters as close as to NoobAI base model.
illus01 v1.3
nbep11 v0.30
More images.
nbep11 v0.11: Trained on NoobAI epsilon pred v1.1.
Improved dataset tags. Improved LoRA structure and weight distribution. Should be more stable and have less impact on image composition.
illus01 v1.1
Trained on illustriousXL v0.1.
nbep10 v0.10
Trained on NoobAI epsilon pred v1.0.

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Sharing merges using this LoRA, re-printing it to other platforms, are prohibited.
All cover images are directly from the vanilla (the original, not finetuned) base model in a1111, no upscale, no inpaint fixes, no any plugin, even no negative prompt. They demonstrate the effect of the LoRA, not clickbait. You can drop the images into a1111 to reproduce yourself, they have metadata.
(5/19/2025): illus v1.152
Continual to improve lighting and textures and details.
Added 5K more photographs. Still, contains everything, except human. Covering all lighting conditions as much as possible. (From super bright to super dark)
Refactored my caption pipeline. All images now have natural captions from Google latest LLM. All lighting (brightness, color temperature, etc.) conditions are properly tagged. All anime characters are tagged by wd tagger v3 and Google LLM.
More data, so more training steps, as a result, stronger effect.
FAQ:
If you want 100% effects of texture, avoid base models with AI style (trained on AI images). Because what AI styles are super overfitted style, and will overlap the texture instantly. FYI. Cover images are from vanilla base model. And I only use vanilla models + artist style LoRAs.
How to know if it is AI style. No good method. Personally I look at hair (or other surfaces). The more plastic it feels (no texture, weird shiny reflections), the more AI style it may have.
If you got realistic faces on anime characters. Don't blame this LoRA. What it saw is what it learned. There is zero real human in dataset, so it has zero knowledge of realistic faces. Check whether your base model was merged with other realistic model.
.........
See more in the update log section.
.........
(3/2/2025): You can find the REAL me at TensorArt now. I've reclaimed all my models that were duped and faked by other accounts.
It's an all-in-one finetuned LoRA. If you apply it to NoobAI v1.1, then you will get my personal "finetuned" base model. (Why would you train a 6GB checkpoint if you can just train a 100MB LoRA? And you can also apply it to any model you want in no time.)
Same as full finetuned (trained, not merged) base models
Dataset is not small. (Comparing to a normal LoRA. Can't say big, there are many gigachads who like to finetune their models with millions images... Orz )
This LoRA is also trained in one go. No merging, so no confliction (at least inside this LoRA).
The dataset only contains high resolution images. Zero AI image. So you can get texture and details beyond pixel level. Instead of a weird smooth plastic feeling.
It does not focus on a very unique art style, and won't dramatically change the image composition.
Cover images are the direct outputs from the vanilla (the original, not finetuned) base model in a1111-sd-webui, no upscale, no inpaint fixes, no negative prompt. They demonstrate the effect of the LoRA, not clickbait. You can drop the images into a1111 to reproduce yourself, they have metadata.
Share merges using this LoRA is prohibited. FYI, there are hidden trigger words to print invisible watermark. It works well even if the merge strength is 0.05. I coded the watermark and detector myself. I don't want to use it, but I can.
Remember to leave feedback in comment section. So everyone can see it. Don't write feedback in Civitai review system, it was so poorly designed, literally nobody can find and see the review.
Have fun.
Just apply it. No trigger words needed. Also it does not patch text encoders. So you don't have to set the patch strength for text encoder (in comfyui, etc.).
Strength 0.4~0.8.
Version prefix:
illus01 = Trained on Illustrious v0.1.
nbep11 = Trained on NoobAI e-pred v1.1
Which version to use?
Hard to tell. You should try both version. Models nowadays are just merges of merges and merges. You would never know what's truly inside your base model. Most model creators don't know either.
Fun fact (5/10/2025): 90% models that labeled as "illustrious" are actually NoobAi, if you calculate their weight similarities.
You can also just use both, with low strength each, many users reported this has noticeable better result.
Every image is hand-picked by me.
Only normal good looking things. No crazy art style.
No AI images, no watermarks, etc.
Only high resolution images. Avg pixels 3.37 MP, ~1800x1800.
2 main dataset:
a 2D/anime dataset with ~1k images. Character-focus. Natural poses. Natural body proportions. No exaggerated art, chibi, jojo pose, etc.
a real world photographs dataset with ~1k images. Contains nature, indoors, animals, buildings...many things, except human.
Why real world images? You can get better background, lighting, pixel level details/textures. There is no human in dataset so it won't affect characters.
I named the dataset Touching Grass. There is also a LoRA that was only trained on this photograph dataset. If you want something pure.
But I got realistic faces on my anime characters.
Well, don't blame this LoRA. What it saw is what it learned. It has zero knowledge of realistic faces. Most likely your base model was mixed with other realistic models.
Some ideas that was going to, or used to, be part of the Stabilizer. Now they are separated LoRAs. For better flexibility. Collection link: https://civitai.com/collections/8274233.
Touching Grass: Trained on and only on the photographs dataset (No anime dataset). Has stronger effect. Useful for gigachad users who like pure concepts and like to balance weights themselves.
Dark: It can fix the high bias in anime models that towards high brightness. Trained on low brightness images in the Touching Grass dataset. Also, no human in dataset. So does not affect style.
Example on WAI v13.

Contrast Controller: Control the contrast like using a slider in your monitor. Unlike other trained "contrast enhancer", the effect of this LoRA is stable, linear, and has zero side effect on style. (Not an exaggeration, it's really mathematically zero and linear. It was not from training.) Example on WAI v13.

Style Strength Controller: Or overfitting effect reducer. Also not from training, so zero side effect on style and mathematically linear effects. Can reduce all kinds of overfitting effects (bias on objects, brightness, etc.).
Effect test on Hassaku XL: The prompt has keyword "dark", but the model almost ignored it. Notice that: at strength 0.25 this LoRA reduces the bias of high brightness, and a weird smooth feeling on every surfaces, so the image feels more natural.
Differences between Stabilizer:
Stabilizer affects style. Because it was trained on real world data. It can "reduce" overfitting effects about texture, details and backgrounds, by adding them back.
Style Controller was not from training. It is more like "undo" the training for base model, so it will less-overfitted. It does not affect style. And can reduce all overfitting effects, like bias on brightness, objects.
New version == new stuffs and new attempt != better version for you base model.
You can check the "Update log" section to find old versions. It's ok to use different versions together just like mixing base models. As long as the sum of strengths does not > 1.
(5/19/2025): illus01 v1.152
Continual to improve lighting and textures and details.
Added 5K more photographs. Still, contains everything, except human. Covering all lighting conditions as much as possible. (From super bright to super dark)
Refactored my caption pipeline. All images now have natural captions from Google latest LLM. All lighting (brightness, color temperature, etc.) conditions are properly tagged. All anime characters are tagged by wd tagger v3 and Google LLM.
More data, so more training steps, as a result, stronger effect.
(5/9/2025): nbep11 v0.205:
A quick fix of brightness and color issues in v0.198. Now it should not change brightness and colors so dramatically like a real photograph. v0.198 isn't bad, just creative, but too creative.
(5/7/2025): nbep11 v0.198:
Added more dark images. Less deformed body, background in dark environment.
Removed color and contrast enhancement. Because it's not needed anymore. Use Contrast Controller instead.
(4/25/2025): nbep11 v0.172.
Same new things in illus01 v1.93 ~ v1.121. Summary: New photographs dataset "Touching Grass". Better natural texture, background, lighting. Weaker character effects for better compatibility.
Better color accuracy and stability. (Comparing to nbep11 v0.160)
(4/17/2025): illus01 v1.121.
Rolled back to illustrious v0.1. illustrious v1.0 and newer versions were trained with AI images deliberately (maybe 30% of its dataset). Which is not ideal for LoRA training. I didn't notice until I read its paper.
Lower character style effect. Back to v1.23 level. Characters will have less details from this LoRA, but should have better compatibility. This is a trade-off.
Other things just same as below (v1.113).
(4/10/2025): illus11 v1.113 ❌.
Update: use this version only if you know your base model is based on Illustrious v1.1. Otherwise, use illus01 v1.121.
Trained on Illustrious v1.1.
New dataset "Touching Grass" added. Better natural texture, lighting and depth of field effect. Better background structural stability. Less deformed background, like deformed rooms, buildings.
Full natural language captions from LLM.
(3/30/2025): illus01 v1.93.
v1.72 was trained too hard. So I reduced it overall strength. Should have better compatibility.
(3/22/2025): nbep11 v0.160.
Same stuffs in illus v1.72.
(3/15/2025): illus01 v1.72
Same new texture and lighting dataset as mentioned in ani40z v0.4 below. More natural lighting and natural textures.
Added a small ~100 images dataset for hand enhancement, focusing on hand(s) with different tasks, like holding a glass or cup or something.
Removed all "simple background" images from dataset. -200 images.
Switched training tool from kohya to onetrainer. Changed LoRA architecture to DoRA.
(3/4/2025) ani40z v0.4
Trained on Animagine XL 4.0 ani40zero.
Added ~1k dataset focusing on natural dynamic lighting and real world texture.
More natural lighting and natural textures.
Above: Added more real world images. More natural texture and details.
ani04 v0.1
Init version for Animagine XL 4.0. Mainly to fix Animagine 4.0 brightness issues. Better and higher contrast.
illus01 v1.23
nbep11 v0.138
Added some furry/non-human/other images to balance the dataset.
nbep11 v0.129
bad version, effect is too weak, just ignore it
nbep11 v0.114
Implemented "Full range colors". It will automatically balance the things towards "normal and good looking". Think of this as the "one-click photo auto enhance" button in most of photo editing tools. One downside of this optimization: It prevents high bias. For example, you want 95% of the image to be black, and 5% bright, instead of 50/50%
Added a little bit realistic data. More vivid details, lighting, less flat colors.
illus01 v1.7
nbep11 v0.96
More training images.
Then finetuned again on a small "wallpaper" dataset (Real game wallpapers, the highest quality I could find. ~100 images). More improvements in details (noticeable in skin, hair) and contrast.
Above: Has a weak default style.
nbep11 v0.58
More images. Change the training parameters as close as to NoobAI base model.
illus01 v1.3
nbep11 v0.30
More images.
nbep11 v0.11: Trained on NoobAI epsilon pred v1.1.
Improved dataset tags. Improved LoRA structure and weight distribution. Should be more stable and have less impact on image composition.
illus01 v1.1
Trained on illustriousXL v0.1.
nbep10 v0.10
Trained on NoobAI epsilon pred v1.0.

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Lower the brightness. And make it more photography-ish.
I trained this LoRA because anime models have a very high bias towards bright images. So even if you prompted something dark, it still not dark enough, most part of the image still very bright.
However, I can't find a "dark" LoRA that is for natural dark. Because existing LoRAs are
Either trained on bright images, so you have to use negative strength to go dark, which is doable, but the quality is really bad.
Or focusing on pure black level, trained on pure black anime images, which causes style shifting, losing details, sometimes deformed faces and backgrounds.
This LoRA is trained on a sub dataset of "Touching Grass", only low brightness real world images.
Only environment, no human in dataset. So it will not "pollute" your base model style. Can be applied on both pure anime and realistic models.
It's dark, not black. The training images still have a quite wide brightness range and are full of details. E.g. cityscape at night and full of small building lights. So the model will know what it should do, just go dark, rather than go crazily black. You will not get deformed bodies, faces or backgrounds. Instead, it makes the dark environment more stable, and adds more details.
Useful if you:
want something very dark, darker than prompts can achieve.
want to lower the overall brightness and create a photography feeling.
Trained on illustrious v0.1, but I tested it on NoobAI as well. The effects are very good. So I don't think we need a separated NoobAI version.
All cover images directly come from a1111, zero modification or fix, no upscale, even no negative prompt.

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Sharing merges using this LoRA, re-printing it to other platforms, are prohibited.
All cover images are directly from the vanilla (the original, not finetuned) base model in a1111, no upscale, no inpaint fixes, no any plugin, even no negative prompt. They demonstrate the effect of the LoRA, not clickbait. You can drop the images into a1111 to reproduce yourself, they have metadata.
(5/19/2025): illus v1.152
Continual to improve lighting and textures and details.
Added 5K more photographs. Still, contains everything, except human. Covering all lighting conditions as much as possible. (From super bright to super dark)
Refactored my caption pipeline. All images now have natural captions from Google latest LLM. All lighting (brightness, color temperature, etc.) conditions are properly tagged. All anime characters are tagged by wd tagger v3 and Google LLM.
More data, so more training steps, as a result, stronger effect.
FAQ:
If you want 100% effects of texture, avoid base models with AI style (trained on AI images). Because what AI styles are super overfitted style, and will overlap the texture instantly. FYI. Cover images are from vanilla base model. And I only use vanilla models + artist style LoRAs.
How to know if it is AI style. No good method. Personally I look at hair (or other surfaces). The more plastic it feels (no texture, weird shiny reflections), the more AI style it may have.
If you got realistic faces on anime characters. Don't blame this LoRA. What it saw is what it learned. There is zero real human in dataset, so it has zero knowledge of realistic faces. Check whether your base model was merged with other realistic model.
.........
See more in the update log section.
.........
(3/2/2025): You can find the REAL me at TensorArt now. I've reclaimed all my models that were duped and faked by other accounts.
It's an all-in-one finetuned LoRA. If you apply it to NoobAI v1.1, then you will get my personal "finetuned" base model. (Why would you train a 6GB checkpoint if you can just train a 100MB LoRA? And you can also apply it to any model you want in no time.)
Same as full finetuned (trained, not merged) base models
Dataset is not small. (Comparing to a normal LoRA. Can't say big, there are many gigachads who like to finetune their models with millions images... Orz )
This LoRA is also trained in one go. No merging, so no confliction (at least inside this LoRA).
The dataset only contains high resolution images. Zero AI image. So you can get texture and details beyond pixel level. Instead of a weird smooth plastic feeling.
It does not focus on a very unique art style, and won't dramatically change the image composition.
Cover images are the direct outputs from the vanilla (the original, not finetuned) base model in a1111-sd-webui, no upscale, no inpaint fixes, no negative prompt. They demonstrate the effect of the LoRA, not clickbait. You can drop the images into a1111 to reproduce yourself, they have metadata.
Share merges using this LoRA is prohibited. FYI, there are hidden trigger words to print invisible watermark. It works well even if the merge strength is 0.05. I coded the watermark and detector myself. I don't want to use it, but I can.
Remember to leave feedback in comment section. So everyone can see it. Don't write feedback in Civitai review system, it was so poorly designed, literally nobody can find and see the review.
Have fun.
Just apply it. No trigger words needed. Also it does not patch text encoders. So you don't have to set the patch strength for text encoder (in comfyui, etc.).
Strength 0.4~0.8.
Version prefix:
illus01 = Trained on Illustrious v0.1.
nbep11 = Trained on NoobAI e-pred v1.1
Which version to use?
Hard to tell. You should try both version. Models nowadays are just merges of merges and merges. You would never know what's truly inside your base model. Most model creators don't know either.
Fun fact (5/10/2025): 90% models that labeled as "illustrious" are actually NoobAi, if you calculate their weight similarities.
You can also just use both, with low strength each, many users reported this has noticeable better result.
Every image is hand-picked by me.
Only normal good looking things. No crazy art style.
No AI images, no watermarks, etc.
Only high resolution images. Avg pixels 3.37 MP, ~1800x1800.
2 main dataset:
a 2D/anime dataset with ~1k images. Character-focus. Natural poses. Natural body proportions. No exaggerated art, chibi, jojo pose, etc.
a real world photographs dataset with ~1k images. Contains nature, indoors, animals, buildings...many things, except human.
Why real world images? You can get better background, lighting, pixel level details/textures. There is no human in dataset so it won't affect characters.
I named the dataset Touching Grass. There is also a LoRA that was only trained on this photograph dataset. If you want something pure.
But I got realistic faces on my anime characters.
Well, don't blame this LoRA. What it saw is what it learned. It has zero knowledge of realistic faces. Most likely your base model was mixed with other realistic models.
Some ideas that was going to, or used to, be part of the Stabilizer. Now they are separated LoRAs. For better flexibility. Collection link: https://civitai.com/collections/8274233.
Touching Grass: Trained on and only on the photographs dataset (No anime dataset). Has stronger effect. Useful for gigachad users who like pure concepts and like to balance weights themselves.
Dark: It can fix the high bias in anime models that towards high brightness. Trained on low brightness images in the Touching Grass dataset. Also, no human in dataset. So does not affect style.
Example on WAI v13.

Contrast Controller: Control the contrast like using a slider in your monitor. Unlike other trained "contrast enhancer", the effect of this LoRA is stable, linear, and has zero side effect on style. (Not an exaggeration, it's really mathematically zero and linear. It was not from training.) Example on WAI v13.

Style Strength Controller: Or overfitting effect reducer. Also not from training, so zero side effect on style and mathematically linear effects. Can reduce all kinds of overfitting effects (bias on objects, brightness, etc.).
Effect test on Hassaku XL: The prompt has keyword "dark", but the model almost ignored it. Notice that: at strength 0.25 this LoRA reduces the bias of high brightness, and a weird smooth feeling on every surfaces, so the image feels more natural.
Differences between Stabilizer:
Stabilizer affects style. Because it was trained on real world data. It can "reduce" overfitting effects about texture, details and backgrounds, by adding them back.
Style Controller was not from training. It is more like "undo" the training for base model, so it will less-overfitted. It does not affect style. And can reduce all overfitting effects, like bias on brightness, objects.
New version == new stuffs and new attempt != better version for you base model.
You can check the "Update log" section to find old versions. It's ok to use different versions together just like mixing base models. As long as the sum of strengths does not > 1.
(5/19/2025): illus01 v1.152
Continual to improve lighting and textures and details.
Added 5K more photographs. Still, contains everything, except human. Covering all lighting conditions as much as possible. (From super bright to super dark)
Refactored my caption pipeline. All images now have natural captions from Google latest LLM. All lighting (brightness, color temperature, etc.) conditions are properly tagged. All anime characters are tagged by wd tagger v3 and Google LLM.
More data, so more training steps, as a result, stronger effect.
(5/9/2025): nbep11 v0.205:
A quick fix of brightness and color issues in v0.198. Now it should not change brightness and colors so dramatically like a real photograph. v0.198 isn't bad, just creative, but too creative.
(5/7/2025): nbep11 v0.198:
Added more dark images. Less deformed body, background in dark environment.
Removed color and contrast enhancement. Because it's not needed anymore. Use Contrast Controller instead.
(4/25/2025): nbep11 v0.172.
Same new things in illus01 v1.93 ~ v1.121. Summary: New photographs dataset "Touching Grass". Better natural texture, background, lighting. Weaker character effects for better compatibility.
Better color accuracy and stability. (Comparing to nbep11 v0.160)
(4/17/2025): illus01 v1.121.
Rolled back to illustrious v0.1. illustrious v1.0 and newer versions were trained with AI images deliberately (maybe 30% of its dataset). Which is not ideal for LoRA training. I didn't notice until I read its paper.
Lower character style effect. Back to v1.23 level. Characters will have less details from this LoRA, but should have better compatibility. This is a trade-off.
Other things just same as below (v1.113).
(4/10/2025): illus11 v1.113 ❌.
Update: use this version only if you know your base model is based on Illustrious v1.1. Otherwise, use illus01 v1.121.
Trained on Illustrious v1.1.
New dataset "Touching Grass" added. Better natural texture, lighting and depth of field effect. Better background structural stability. Less deformed background, like deformed rooms, buildings.
Full natural language captions from LLM.
(3/30/2025): illus01 v1.93.
v1.72 was trained too hard. So I reduced it overall strength. Should have better compatibility.
(3/22/2025): nbep11 v0.160.
Same stuffs in illus v1.72.
(3/15/2025): illus01 v1.72
Same new texture and lighting dataset as mentioned in ani40z v0.4 below. More natural lighting and natural textures.
Added a small ~100 images dataset for hand enhancement, focusing on hand(s) with different tasks, like holding a glass or cup or something.
Removed all "simple background" images from dataset. -200 images.
Switched training tool from kohya to onetrainer. Changed LoRA architecture to DoRA.
(3/4/2025) ani40z v0.4
Trained on Animagine XL 4.0 ani40zero.
Added ~1k dataset focusing on natural dynamic lighting and real world texture.
More natural lighting and natural textures.
Above: Added more real world images. More natural texture and details.
ani04 v0.1
Init version for Animagine XL 4.0. Mainly to fix Animagine 4.0 brightness issues. Better and higher contrast.
illus01 v1.23
nbep11 v0.138
Added some furry/non-human/other images to balance the dataset.
nbep11 v0.129
bad version, effect is too weak, just ignore it
nbep11 v0.114
Implemented "Full range colors". It will automatically balance the things towards "normal and good looking". Think of this as the "one-click photo auto enhance" button in most of photo editing tools. One downside of this optimization: It prevents high bias. For example, you want 95% of the image to be black, and 5% bright, instead of 50/50%
Added a little bit realistic data. More vivid details, lighting, less flat colors.
illus01 v1.7
nbep11 v0.96
More training images.
Then finetuned again on a small "wallpaper" dataset (Real game wallpapers, the highest quality I could find. ~100 images). More improvements in details (noticeable in skin, hair) and contrast.
Above: Has a weak default style.
nbep11 v0.58
More images. Change the training parameters as close as to NoobAI base model.
illus01 v1.3
nbep11 v0.30
More images.
nbep11 v0.11: Trained on NoobAI epsilon pred v1.1.
Improved dataset tags. Improved LoRA structure and weight distribution. Should be more stable and have less impact on image composition.
illus01 v1.1
Trained on illustriousXL v0.1.
nbep10 v0.10
Trained on NoobAI epsilon pred v1.0.

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
This LoRA for enhancing the depiction of light in darkness.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
If you like my work, drop a 5 review and hit the heart icon. it's free and keeps me motivated
Support my work by joining any one of them and get early access to all my upcoming loras and other perks such as fan requests and Discord role.
check the images for prompts
use lora at 0.7-1
Adetailer for faces
Img2img upscale
4x-ultra sharp
comment you idea or request

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
You can support my work by posting your images here, by tipping Buzz, and by leaving a like if you enjoyed it! Get exclusive NSFW models at DeviantArt or Booth.pm. Follow for more!
Simple and subtle marl texture for clothes, extending the concept of my leggings LoRA.
Tags: patterned
E.g. patterned leggings OR patterned, leggings
The training was tailored for Illustrious anime models (e.g. WAI-NSFW, Hassaku XL, Prefect illustrious XL). I recommend using at least the quality tag "masterpiece" in your prompt. Recommended parameters: Euler a, 20-30 steps, CFG 4-7.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Based on the Danbooru tag, the idea is that you see a celeb on tv while being fucked in the room,
Usefull extra tags: Singing on tv, matching outfits, idol outfit,
see examples:

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Image style slider.
+ Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Not recommended for realistic model
Main tag : mtu virus,
Secondary tag : multiple views,
LoRA str : 1.0-1.2
Denoising str : 0.25
Training Powershell info :
DIM/Alpha : 8/8
Train batch size : 2
resolution : 768
mix/save_precision = BF16
optimizer_type = Lion
GPU : RTX 4060 16GB 160W
Torch 2.51 + cu12.1 + Xformers + gradient_checkpointing
Vram used : 11.3GB
Speed : 1.47sec/it

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
If you like my work, drop a 5 review and hit the heart icon. it's free and keeps me motivated
Support my work by joining any one of them and get early access to all my upcoming loras and other perks such as fan requests and Discord role.
check the images for prompts
sweet spot for the lora at -3 to 3
Adetailer for faces
Img2img upscale
4x-ultra sharp
comment you idea or request

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
(4/15/2025): Training on illustrious v1.1 is a mistake.
Next version will roll back to illustrious v0.1.
Guys, I can't test everything before dropping a new release. Next time if something is messed up, spamming the comment section to let me know.
(4/10/2025): illus11 v1.113.
Trained on Illus v1.1. Newer models should have already merged or based on illus v1.1 as it was downloadable many days ago.
New dataset "Touching Grass" added. ~1K real world S-tier photographs. Very diverse and creative. Good at lighting and depth of field effect. Same as previous, no human.
Full natural language captions from LLM.
Now contains 90% (learning weight perspective) real world knowledge. Why so much? Because it is the only way to add natural details and add diversity to overtrained anime models.
Slightly higher contract. For example, the cover image for this version has (1,1,1) black background and (255,255,255) snow/light dots. Highest contrast achieved. But unfortunately, this isn't guaranteed on all base models.
(4/3/2025): New update plan:
New added dataset/aspect will be trained as "sub" LoRAs. So if things go well, this dataset will join the Stabilizer. If you like raw and new things and like to balance weight yourself, make sure to check them out and leave feedback. I will add those new sub LoRAs to the collection so you can follow it to get notifications. FYI, you won't get notifications by just following this model page when I update the page info.
(4/3/2025): New sub LoRA:
"Touching Grass". This LoRA is trained on a new sub dataset contains ~1K real world S-tier photographs. It contains nature, indoors, animals, buildings...many things, except humans. Has much higher quality and diversity than what's currently in Stabilizer dataset. Also has natural captions from LLM. Depends on your usages. It can bring pixel level details. Can significantly improve background structural stability. Less deformed background, like deformed rooms, buildings. Or "fix" overtrained/overfitted models.
Quick comparison on Wai v13. With/without.


(3/30/2025): illus v1.93. v1.72 was trained too hard. So I reduced it overall strength. Should have better compatibility.
.........
See more in the update log section.
.........
(3/2/2025): You can find the REAL me at TensorArt now. I've reclaimed all my models that were duped and faked by other accounts.
Just a personal fun coding project. The name "stabilizer" is a bait now. There's no such definition of "stable". (The name used to mean "a better and stable default art style for vanilla NoobAI", back to NoobAI was released.)
It can improve character's overall details, hands, backgrounds, natural lighting, brightness, contrast.....
The advantage of doing this all-in-one thing is that you don't have to stack tons of different LoRAs for each of those aspects. So there is no conflict that might burn or blow up your output. (All conflicts in the dataset will be well averaged out to zero in one training)
Also, this LoRA was trained in a way that will only modify final details. It will not dramatically change the base model output. So it's compatible with basically all models.
Cover images might look mediocre or bad, because they are the direct outputs from the vanilla (not finetuned) base model in a1111-sd-webui with default settings, simplest prompts, no pipeline inpaint fixes, even no negative prompt. They demonstrate the effect of the LoRA, not clickbait.
Share merges using this LoRA is prohibited. FYI, there are hidden trigger words to print invisible watermark. It works well even if the merge strength is 0.05. I coded the watermark and detector myself. I don't want to use it, but I can.
Recommended strength: 0.4 for 2.5D/realistic models. 0.7 for pure flat colors anime models.
No trigger words needed.
You don't have to set the patch strength for text encoder. This LoRA does not patch it.
Recommended -30% of your current CFG scale. This LoRA will increase contrast so you can lower CFG scale for more details.
Version prefix:
illus01/11 = illustriousXL v0.1/v1.1
nbep11 = NoobAI e-pred v1.1 (compatible with v-pred)
New version == new stuffs and new attempt != better version for you base model.
You check the "Update log" section to find old versions.
Tldr: Only normal good looking things. No crazy art style. Not small (3k images, I won't brag it's big, there are many gigachads who like to finetune their models with millions of images).
Every image is hand-picked by me. No AI images, which will cause overfit and wipe out all high frequency details instantly. No watermarks, links, etc...
The main training dataset contains ~1k 2D images:
Character-focus. (The characters take up most of the image. I also cropped and rotated many images if necessary.)
Clear characters. (Clear character lines, hands, faces, eyes, no obstruction, no blur effect, etc.)
Natural poses. Natural body proportions. (No exaggerated art, chibi, jojo pose, etc.)
High quality and full of details. ~ Wallpaper level.
And a sub dataset called "Touching Grass". Contains ~1K real world S-tier photographs. It contains nature, indoors, animals, buildings...many things, except humans. It can balance the main dataset, add more natural details and avoid overfitting. Also with natural language captions from LLM. There is a "sub" LoRA that was only trained on this dataset. (Link above)
Implemented since nbep11 v0.114.
It will automatically balance the things towards "normal and good looking". Think of this as the "one-click photo auto enhance" button in most of photo editing tools.
Pros: Big improvements in brightness, contrast and CFG stability. Now you can get high contrast and full color range (almost 0~255) at CFG 4~5 (v-pred is CFG 3~3.5, assuming you are using Euler and Normal Schedule) without oversaturation and deformed image.
Cons: Normal == Not suitable for crazy bias effect. For example, you want 95% of the image is pure dark and only 5% is bright, instead of 50/50%.
(3/30/2025): illus v1.93.
v1.72 was trained too hard. So I reduced it overall strength. Should have better compatibility.
(3/22/2025): Noobai epred v0.160.
Same stuffs in illus v1.72.
(3/15/2025): illus v1.72
This is a quite big update.
Same new texture and lighting dataset as mentioned in ani40z v0.4 below. Brings RTX level natural lighting and super detailed texture to 2D characters. Tags that related to some kind of light sources, such as "lamp", "daylight", "soft lighting", may bring better lighting effect.
Other small changes:
Added a small ~100 images dataset for hand enhancement, focusing on hand(s) with different tasks, like holding a glass or cup or something. Can't say whether it's useful, as popular checkpoints already have similar enhancement. Anyways, the more, the better.
Removed all "simple background" images from dataset. -200 images.
Switched training tool from kohya to onetrainer. Changed LoRA architecture to DoRA.
(3/4/2025) ani40z v0.4
Trained on ani40zero.
The plan was to bring RTX natural lighting and super detailed texture to 2D characters.
So I prepared a ~1k dataset focusing on natural dynamic lighting and real world texture.
It was trained in a way that will only add small final details. So it won't blow up your output and create a realistic 3D character.
ani04 v0.1
Let's give Animagine 4.0 a try. Init version. In my test it fixed Animagine 4.0 brightness issues and brings much better contrast. Maybe too optimistic. Anyway this is a LoRA so you always can adjust the strength as you want. CFG 5 and LoRA strength 0.5 seems the best for most cases.
illus v1.23
nbep11 v0.138
Added some furry/non-human/other images to balance the dataset.
nbep11 v0.129
bad version, effect is too weak, just ignore it
nbep11 v0.114
Implemented "Full range colors". See the section above.
Added a little bit realistic data. More vivid details, illumination, less flat colors.
illus v1.7
nbep11 v0.96
More training images. Then finetuned again on a small "wallpaper" dataset (Real wallpapers, the highest quality I could find. Only ~100 images for now). More improvements in details (noticeable in skin, hair) and contrast.
nbep11 v0.58
More images. Change the training parameters as close as to NoobAI base model.
illus v1.3
nbep11 v0.30
More images.
nbep11 v0.11: Trained on NoobAI epsilon pred v1.1.
Improved dataset tags. Improved LoRA structure and weight distribution. Should be more stable and have less impact on image composition.
illus v1.1
Trained on illustriousXL v0.1.
nbep10 v0.10
Trained on NoobAI epsilon pred v1.0.
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Sharing merges using this LoRA, re-printing it to other platforms, are prohibited.
All cover images are directly from the vanilla (the original, not finetuned) base model in a1111, no upscale, no inpaint fixes, no any plugin, even no negative prompt. They demonstrate the effect of the LoRA, not clickbait. You can drop the images into a1111 to reproduce yourself, they have metadata.
(5/19/2025): illus v1.152
Continual to improve lighting and textures and details.
Added 5K more photographs. Still, contains everything, except human. Covering all lighting conditions as much as possible. (From super bright to super dark)
Refactored my caption pipeline. All images now have natural captions from Google latest LLM. All lighting (brightness, color temperature, etc.) conditions are properly tagged. All anime characters are tagged by wd tagger v3 and Google LLM.
More data, so more training steps, as a result, stronger effect.
FAQ:
If you want 100% effects of texture, avoid base models with AI style (trained on AI images). Because what AI styles are super overfitted style, and will overlap the texture instantly. FYI. Cover images are from vanilla base model. And I only use vanilla models + artist style LoRAs.
How to know if it is AI style. No good method. Personally I look at hair (or other surfaces). The more plastic it feels (no texture, weird shiny reflections), the more AI style it may have.
If you got realistic faces on anime characters. Don't blame this LoRA. What it saw is what it learned. There is zero real human in dataset, so it has zero knowledge of realistic faces. Check whether your base model was merged with other realistic model.
.........
See more in the update log section.
.........
(3/2/2025): You can find the REAL me at TensorArt now. I've reclaimed all my models that were duped and faked by other accounts.
It's an all-in-one finetuned LoRA. If you apply it to NoobAI v1.1, then you will get my personal "finetuned" base model. (Why would you train a 6GB checkpoint if you can just train a 100MB LoRA? And you can also apply it to any model you want in no time.)
Same as full finetuned (trained, not merged) base models
Dataset is not small. (Comparing to a normal LoRA. Can't say big, there are many gigachads who like to finetune their models with millions images... Orz )
This LoRA is also trained in one go. No merging, so no confliction (at least inside this LoRA).
The dataset only contains high resolution images. Zero AI image. So you can get texture and details beyond pixel level. Instead of a weird smooth plastic feeling.
It does not focus on a very unique art style, and won't dramatically change the image composition.
Cover images are the direct outputs from the vanilla (the original, not finetuned) base model in a1111-sd-webui, no upscale, no inpaint fixes, no negative prompt. They demonstrate the effect of the LoRA, not clickbait. You can drop the images into a1111 to reproduce yourself, they have metadata.
Share merges using this LoRA is prohibited. FYI, there are hidden trigger words to print invisible watermark. It works well even if the merge strength is 0.05. I coded the watermark and detector myself. I don't want to use it, but I can.
Remember to leave feedback in comment section. So everyone can see it. Don't write feedback in Civitai review system, it was so poorly designed, literally nobody can find and see the review.
Have fun.
Just apply it. No trigger words needed. Also it does not patch text encoders. So you don't have to set the patch strength for text encoder (in comfyui, etc.).
Strength 0.4~0.8.
Version prefix:
illus01 = Trained on Illustrious v0.1.
nbep11 = Trained on NoobAI e-pred v1.1
Which version to use?
Hard to tell. You should try both version. Models nowadays are just merges of merges and merges. You would never know what's truly inside your base model. Most model creators don't know either.
Fun fact (5/10/2025): 90% models that labeled as "illustrious" are actually NoobAi, if you calculate their weight similarities.
You can also just use both, with low strength each, many users reported this has noticeable better result.
Every image is hand-picked by me.
Only normal good looking things. No crazy art style.
No AI images, no watermarks, etc.
Only high resolution images. Avg pixels 3.37 MP, ~1800x1800.
2 main dataset:
a 2D/anime dataset with ~1k images. Character-focus. Natural poses. Natural body proportions. No exaggerated art, chibi, jojo pose, etc.
a real world photographs dataset with ~1k images. Contains nature, indoors, animals, buildings...many things, except human.
Why real world images? You can get better background, lighting, pixel level details/textures. There is no human in dataset so it won't affect characters.
I named the dataset Touching Grass. There is also a LoRA that was only trained on this photograph dataset. If you want something pure.
But I got realistic faces on my anime characters.
Well, don't blame this LoRA. What it saw is what it learned. It has zero knowledge of realistic faces. Most likely your base model was mixed with other realistic models.
Some ideas that was going to, or used to, be part of the Stabilizer. Now they are separated LoRAs. For better flexibility. Collection link: https://civitai.com/collections/8274233.
Touching Grass: Trained on and only on the photographs dataset (No anime dataset). Has stronger effect. Useful for gigachad users who like pure concepts and like to balance weights themselves.
Dark: It can fix the high bias in anime models that towards high brightness. Trained on low brightness images in the Touching Grass dataset. Also, no human in dataset. So does not affect style.
Example on WAI v13.

Contrast Controller: Control the contrast like using a slider in your monitor. Unlike other trained "contrast enhancer", the effect of this LoRA is stable, linear, and has zero side effect on style. (Not an exaggeration, it's really mathematically zero and linear. It was not from training.) Example on WAI v13.

Style Strength Controller: Or overfitting effect reducer. Also not from training, so zero side effect on style and mathematically linear effects. Can reduce all kinds of overfitting effects (bias on objects, brightness, etc.).
Effect test on Hassaku XL: The prompt has keyword "dark", but the model almost ignored it. Notice that: at strength 0.25 this LoRA reduces the bias of high brightness, and a weird smooth feeling on every surfaces, so the image feels more natural.
Differences between Stabilizer:
Stabilizer affects style. Because it was trained on real world data. It can "reduce" overfitting effects about texture, details and backgrounds, by adding them back.
Style Controller was not from training. It is more like "undo" the training for base model, so it will less-overfitted. It does not affect style. And can reduce all overfitting effects, like bias on brightness, objects.
New version == new stuffs and new attempt != better version for you base model.
You can check the "Update log" section to find old versions. It's ok to use different versions together just like mixing base models. As long as the sum of strengths does not > 1.
(5/19/2025): illus01 v1.152
Continual to improve lighting and textures and details.
Added 5K more photographs. Still, contains everything, except human. Covering all lighting conditions as much as possible. (From super bright to super dark)
Refactored my caption pipeline. All images now have natural captions from Google latest LLM. All lighting (brightness, color temperature, etc.) conditions are properly tagged. All anime characters are tagged by wd tagger v3 and Google LLM.
More data, so more training steps, as a result, stronger effect.
(5/9/2025): nbep11 v0.205:
A quick fix of brightness and color issues in v0.198. Now it should not change brightness and colors so dramatically like a real photograph. v0.198 isn't bad, just creative, but too creative.
(5/7/2025): nbep11 v0.198:
Added more dark images. Less deformed body, background in dark environment.
Removed color and contrast enhancement. Because it's not needed anymore. Use Contrast Controller instead.
(4/25/2025): nbep11 v0.172.
Same new things in illus01 v1.93 ~ v1.121. Summary: New photographs dataset "Touching Grass". Better natural texture, background, lighting. Weaker character effects for better compatibility.
Better color accuracy and stability. (Comparing to nbep11 v0.160)
(4/17/2025): illus01 v1.121.
Rolled back to illustrious v0.1. illustrious v1.0 and newer versions were trained with AI images deliberately (maybe 30% of its dataset). Which is not ideal for LoRA training. I didn't notice until I read its paper.
Lower character style effect. Back to v1.23 level. Characters will have less details from this LoRA, but should have better compatibility. This is a trade-off.
Other things just same as below (v1.113).
(4/10/2025): illus11 v1.113 ❌.
Update: use this version only if you know your base model is based on Illustrious v1.1. Otherwise, use illus01 v1.121.
Trained on Illustrious v1.1.
New dataset "Touching Grass" added. Better natural texture, lighting and depth of field effect. Better background structural stability. Less deformed background, like deformed rooms, buildings.
Full natural language captions from LLM.
(3/30/2025): illus01 v1.93.
v1.72 was trained too hard. So I reduced it overall strength. Should have better compatibility.
(3/22/2025): nbep11 v0.160.
Same stuffs in illus v1.72.
(3/15/2025): illus01 v1.72
Same new texture and lighting dataset as mentioned in ani40z v0.4 below. More natural lighting and natural textures.
Added a small ~100 images dataset for hand enhancement, focusing on hand(s) with different tasks, like holding a glass or cup or something.
Removed all "simple background" images from dataset. -200 images.
Switched training tool from kohya to onetrainer. Changed LoRA architecture to DoRA.
(3/4/2025) ani40z v0.4
Trained on Animagine XL 4.0 ani40zero.
Added ~1k dataset focusing on natural dynamic lighting and real world texture.
More natural lighting and natural textures.
Above: Added more real world images. More natural texture and details.
ani04 v0.1
Init version for Animagine XL 4.0. Mainly to fix Animagine 4.0 brightness issues. Better and higher contrast.
illus01 v1.23
nbep11 v0.138
Added some furry/non-human/other images to balance the dataset.
nbep11 v0.129
bad version, effect is too weak, just ignore it
nbep11 v0.114
Implemented "Full range colors". It will automatically balance the things towards "normal and good looking". Think of this as the "one-click photo auto enhance" button in most of photo editing tools. One downside of this optimization: It prevents high bias. For example, you want 95% of the image to be black, and 5% bright, instead of 50/50%
Added a little bit realistic data. More vivid details, lighting, less flat colors.
illus01 v1.7
nbep11 v0.96
More training images.
Then finetuned again on a small "wallpaper" dataset (Real game wallpapers, the highest quality I could find. ~100 images). More improvements in details (noticeable in skin, hair) and contrast.
Above: Has a weak default style.
nbep11 v0.58
More images. Change the training parameters as close as to NoobAI base model.
illus01 v1.3
nbep11 v0.30
More images.
nbep11 v0.11: Trained on NoobAI epsilon pred v1.1.
Improved dataset tags. Improved LoRA structure and weight distribution. Should be more stable and have less impact on image composition.
illus01 v1.1
Trained on illustriousXL v0.1.
nbep10 v0.10
Trained on NoobAI epsilon pred v1.0.

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
-------------𝔹𝕖𝕪𝕠𝕟𝕕 𝕆𝕗 𝕐𝕠𝕦𝕣 𝕀𝕞𝕒𝕘𝕚𝕟𝕒𝕥𝕚𝕠𝕟----------NOTE: ryosios text encoder (NoobAI fault, not my lora) is too heavy, try to lower down weight


This is settings for Euler Fan.:
Euler A
+Align Your Steps GITS or Align Your Steps
+Sampling steps: more than 20
+Mahiro CFG for reForge or comfyui Enabled
+CFG Scale: 4-5
+No NegativeThis is settings Another:
Restart
+Align Your Steps GITS
+Sampling steps: 10-20
+Mahiro CFG for reForge or comfyui Enabled
+CFG Scale: 4
+No NegativeThis is settings Best:
https://github.com/pamparamm/sd-perturbed-attention

Restart
+Align Your Steps GITS
+Sampling steps: 10 or 20
+Mahiro CFG for reForge or comfyui Enabled
+CFG Scale: 1-2
+No Negative
+ 
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
事情是这样的,大部分动漫主题的stable diffusion模型是以danbooru tags作为prompt来训练的。danbooru tags是1个非常大的标签集合,从人物的发型,到鞋子的款式应有尽有。但是它并不是很均衡,其中背景内容的标签往往只占很小1部分,例如标签中包含white hair/black hair/blonde hair等各色头发,但是table就只有table这个标签,没有white table,因此用danbooru tags训练出来的模型就不是很容易画背景。
那如果让mllm来给这些数据打上更加丰富的背景物件标签,是不是就能解决这个问题了呢?
让我们来试1试!
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
-------------𝔹𝕖𝕪𝕠𝕟𝕕 𝕆𝕗 𝕐𝕠𝕦𝕣 𝕀𝕞𝕒𝕘𝕚𝕟𝕒𝕥𝕚𝕠𝕟----------NOTE: ryosios text encoder (NoobAI fault, not my lora) is too heavy, try to lower down weight


This is settings for Euler Fan.:
Euler A
+Align Your Steps GITS or Align Your Steps
+Sampling steps: more than 20
+Mahiro CFG for reForge or comfyui Enabled
+CFG Scale: 4-5
+No NegativeThis is settings Another:
Restart
+Align Your Steps GITS
+Sampling steps: 10-20
+Mahiro CFG for reForge or comfyui Enabled
+CFG Scale: 4
+No NegativeThis is settings Best:
https://github.com/pamparamm/sd-perturbed-attention

Restart
+Align Your Steps GITS
+Sampling steps: 10 or 20
+Mahiro CFG for reForge or comfyui Enabled
+CFG Scale: 1-2
+No Negative
+ 
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Or overfitting effect reducer. I really don't know how to name.
NOTE: Forget about v0.1. It is alpha version.
Use v1. It's much better and stabler. Supports all tools. All info in this page has been updated for v1.
What can this LoRA do?
This LoRA can reduce the style of your base model. Or amplify it if strength < 0 and you want something crazy.
This lora will not bring any side effects to the style.
I like the style, why would I reduce its strength?
The real purpose of this LoRA is to reduce overfitting effects and bring creativity back, by just reducing the style a little bit.
Overfitting effects?
Because model was trained too hard, and the dataset has bias.
E.g.:
noticeable bias. E.g.: Always too bright/dark, generating same faces / things / backgrounds.
too sensitive to some prompt words.
What's the effect of this LoRA?
The effect mainly depends on what your base model looks like. You should test it and feel yourself.
Here is a example on Hassaku XL v2.1fix. Notice that
This base model has a noticeable bias towards high brightness, (and signs/paintings on wall, shiny reflections...)
So at strength -0.3, the model completely ignores the prompt word "dark". Because you amplified the style and bias as well.
At strength 0.25. Model has much less bias on brightness and feels more natural. Notice the table and wall "wooden" textures. Less weird reflection. The style doesn't noticeable change.
Strength 0.5 is for reference, weaker style and less bias (looking at viewer, signs/paintings on well, etc). More natural.
This LoRA can also stabilize other LoRAs, avoid "burn" effect caused by super overfitted LoRA.
How to use?
Just apply it as normal LoRA.
Find the best strength for your model. Start around 0.2. Super overfitted model may needs > 0.5.
Working strength is around -0.5~1.
You don't have to set the patch strength for text encoder. This LoRA does not patch it.
Some styles heavily affect CFG scale. So you may also need to adjust the CFG scales because the style strength changed.
What's the training data? Why it has zero side effect on style?
This LoRA is "calculated/calibrated" directly from SDXL and Illustrious v0.1/NoobAI ep11. No training process.

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Or overfitting effect reducer. I really don't know how to name.
NOTE: Forget about v0.1. It is alpha version.
Use v1. It's much better and stabler. Supports all tools. All info in this page has been updated for v1.
What can this LoRA do?
This LoRA can reduce the style of your base model. Or amplify it if strength < 0 and you want something crazy.
This lora will not bring any side effects to the style.
I like the style, why would I reduce its strength?
The real purpose of this LoRA is to reduce overfitting effects and bring creativity back, by just reducing the style a little bit.
Overfitting effects?
Because model was trained too hard, and the dataset has bias.
E.g.:
noticeable bias. E.g.: Always too bright/dark, generating same faces / things / backgrounds.
too sensitive to some prompt words.
What's the effect of this LoRA?
The effect mainly depends on what your base model looks like. You should test it and feel yourself.
Here is a example on Hassaku XL v2.1fix. Notice that
This base model has a noticeable bias towards high brightness, (and signs/paintings on wall, shiny reflections...)
So at strength -0.3, the model completely ignores the prompt word "dark". Because you amplified the style and bias as well.
At strength 0.25. Model has much less bias on brightness and feels more natural. Notice the table and wall "wooden" textures. Less weird reflection. The style doesn't noticeable change.
Strength 0.5 is for reference, weaker style and less bias (looking at viewer, signs/paintings on well, etc). More natural.
This LoRA can also stabilize other LoRAs, avoid "burn" effect caused by super overfitted LoRA.
How to use?
Just apply it as normal LoRA.
Find the best strength for your model. Start around 0.2. Super overfitted model may needs > 0.5.
Working strength is around -0.5~1.
You don't have to set the patch strength for text encoder. This LoRA does not patch it.
Some styles heavily affect CFG scale. So you may also need to adjust the CFG scales because the style strength changed.
What's the training data? Why it has zero side effect on style?
This LoRA is "calculated/calibrated" directly from SDXL and Illustrious v0.1/NoobAI ep11. No training process.

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
I will release multiple versions for a few popular models. Feel free to request for artist or model.
Version naming convention: MajorVersion.MinorVersion [BaseModel]
MajorVesion: For big updates that can apply to any style LoRA. For example, training parameter update.
MinorVersion: For small updates that only apply to this LoRA. For example, epoch selection, removing a certain image from dataset.
BaseModel: SDXL LoRA/LyCORIS works best on the model that it was trained on.
Change notes:
v2.233->v2.233-2: Switching to a new epoch selecting strategy: reducing overfitting and improving hands.

Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Will remove if requested
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Will remove if requested
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
Recommended weight: 1
Trigger word:
togawagatameA posture where one party uses their legs to pin down the other, primarily shown by pressing one’s thigh against the opponent’s thigh to force their legs apart.
Feel free to leave feedback in the comments below, or share images generated using this model!
______________________________________________________________________________________________
建議權重:1
觸發提詞:
togawagatame用腳將另外一方固定的一種姿勢,主要呈現是用大腿壓住另外一方的大腿,讓對方雙腿張開。
歡迎底下留言反饋,或是用這模型產出圖片返圖!
+
Use the model without crediting the creator
Sell images they generate
Run on services that generate images for money
Run on Civitai
Share merges using this model
Sell this model or merges using this model
Have different permissions when sharing merges
The best LoRA for grass details.
Terms of Use: If you merge this model into your own and sell your own models without citing the original author, you acknowledge that 100% of the proceeds will be used to purchase your own coffin for your own future use.
What's this LoRA?
This LoRA is for users who like raw and pure things and like to balance weights themselves.
Useful to fix those poorly trained model, which was trained on only dozens of AI images but for thousands of steps. (aka. super super overfitted models, which can only generate same things/faces/background over an over again.)
What is the difference between this and the Stabilizer LoRA?
Stabilizer has very weak effects. Because the rule is "don't break things". So it may have no effect on super overfitted models. Also Stabilizer has another anime dataset to make anime characters look better.
This LoRA is trained very hard and has much stronger effects, so it can "overwrite" those super overfitted models if you want. 100% real world images.
What's in the dataset?
~1K real world photographs of objects and environment.
No human. So it will not "pollute" your characters. Can be used on both anime and realistic models.
Very diverse and creative. Highest quality images. high contrast, full of details. (That's why they are photographs)
Paired with natural captions from LLM. Mainly because WD tagger v3 is really bad at real world images. Also because natural captions have more diverse vocabularies and can avoid overfitting.
What's the effect?
It really depends on your base model. Here is a quick comparations on WAI v13. This model has very strong AI style (trained on AI images).
With/without.

Pixel level natural details. A so-called "detailer". But instead of training on AI images to amplify fake details from noise to generate more fake objects. This LoRA focuses on natural texture. Less flat and smooth feelings. Notice the food, clothes, light reflection on the table, depth of field and blurry background.
Significantly improve background structural stability for anime models. Anime dataset doesn't contain much background knowledge. Most of are just "simple background". Even if some of them have some kind of background, they may be abstract art and lacking proper tags. So the base model will forget it or learn weird things during training. This LoRA was trained with tons of background/environment images with strong structural features.
How to use?
No trigger word needed.
You don't have to set the patch strength for text encoder. This LoRA does not patch it.
Lower your CFG scales (-30%) for better details.
I got realistic faces on my anime characters.
Don't blame this LoRA, it has zero knowledge of realistic faces. Most likely your base model was mixed with other realistic models, probably for better texture and lighting as well. It was already polluted. This LoRA may just active the polluted part because the training datasets are similar (both are from real world).
Share merges using this LoRA is prohibited. FYI, there are hidden trigger words to print invisible watermark. It works well even if the merge strength is 0.05. I coded the watermark and detector myself. I don't want to use it, but I can.
(4/15/2025) v0.2:
+30% images. Because there is a bug causing all avif files not being used in v0.1. Which is 30% of the dataset. lol.
Changed some parameters. Stronger, cleaner and more stable effect.
(4/02/2025) v0.1: init release.