diff --git a/modules_forge/config.py b/modules_forge/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a8970e013639722763d73af1c5b24bca953b7a9
--- /dev/null
+++ b/modules_forge/config.py
@@ -0,0 +1,4 @@
+always_disabled_extensions = [
+ 'sd-webui-controlnet',
+ 'multidiffusion-upscaler-for-automatic1111',
+]
diff --git a/modules_forge/cuda_malloc.py b/modules_forge/cuda_malloc.py
new file mode 100644
index 0000000000000000000000000000000000000000..8179a60ffc6fac1db983fe4eb7a8cff81d0b246b
--- /dev/null
+++ b/modules_forge/cuda_malloc.py
@@ -0,0 +1,92 @@
+import os
+import importlib.util
+
+
+# https://github.com/comfyanonymous/ComfyUI/blob/master/cuda_malloc.py
+def get_gpu_names():
+ if os.name == 'nt':
+ import ctypes
+
+ # Define necessary C structures and types
+ class DISPLAY_DEVICEA(ctypes.Structure):
+ _fields_ = [
+ ('cb', ctypes.c_ulong),
+ ('DeviceName', ctypes.c_char * 32),
+ ('DeviceString', ctypes.c_char * 128),
+ ('StateFlags', ctypes.c_ulong),
+ ('DeviceID', ctypes.c_char * 128),
+ ('DeviceKey', ctypes.c_char * 128)
+ ]
+
+ # Load user32.dll
+ user32 = ctypes.windll.user32
+
+ # Call EnumDisplayDevicesA
+ def enum_display_devices():
+ device_info = DISPLAY_DEVICEA()
+ device_info.cb = ctypes.sizeof(device_info)
+ device_index = 0
+ gpu_names = set()
+
+ while user32.EnumDisplayDevicesA(None, device_index, ctypes.byref(device_info), 0):
+ device_index += 1
+ gpu_names.add(device_info.DeviceString.decode('utf-8'))
+ return gpu_names
+ return enum_display_devices()
+ else:
+ return set()
+
+
+blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M",
+ "GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620",
+ "Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000",
+ "Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000",
+ "GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M",
+ "GeForce GTX 1650", "GeForce GTX 1630"
+ }
+
+
+def cuda_malloc_supported():
+ try:
+ names = get_gpu_names()
+ except:
+ names = set()
+ for x in names:
+ if "NVIDIA" in x:
+ for b in blacklist:
+ if b in x:
+ return False
+ return True
+
+
+def try_cuda_malloc():
+ do_cuda_malloc = False
+
+ try:
+ version = ""
+ torch_spec = importlib.util.find_spec("torch")
+ for folder in torch_spec.submodule_search_locations:
+ ver_file = os.path.join(folder, "version.py")
+ if os.path.isfile(ver_file):
+ spec = importlib.util.spec_from_file_location("torch_version_import", ver_file)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+ version = module.__version__
+ if int(version[0]) >= 2:
+ do_cuda_malloc = cuda_malloc_supported()
+ except:
+ pass
+
+ if do_cuda_malloc:
+ env_var = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', None)
+ if env_var is None:
+ env_var = "backend:cudaMallocAsync"
+ else:
+ env_var += ",backend:cudaMallocAsync"
+
+ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = env_var
+
+ print('Using cudaMallocAsync backend.')
+ else:
+ print('Failed to use cudaMallocAsync backend.')
+ return
diff --git a/modules_forge/diffusers_patcher.py b/modules_forge/diffusers_patcher.py
new file mode 100644
index 0000000000000000000000000000000000000000..69f9a09b6c376136b59c5fdf4d06f3bbca1e91a0
--- /dev/null
+++ b/modules_forge/diffusers_patcher.py
@@ -0,0 +1,49 @@
+import torch
+from backend import operations, memory_management
+from backend.patcher.base import ModelPatcher
+
+from transformers import modeling_utils
+
+
+class DiffusersModelPatcher:
+ def __init__(self, pipeline_class, dtype=torch.float16, *args, **kwargs):
+ load_device = memory_management.get_torch_device()
+ offload_device = torch.device("cpu")
+
+ if not memory_management.should_use_fp16(device=load_device):
+ dtype = torch.float32
+
+ self.dtype = dtype
+
+ with operations.using_forge_operations():
+ with modeling_utils.no_init_weights():
+ self.pipeline = pipeline_class.from_pretrained(*args, **kwargs)
+
+ if hasattr(self.pipeline, 'unet'):
+ if hasattr(self.pipeline.unet, 'set_attn_processor'):
+ from diffusers.models.attention_processor import AttnProcessor2_0
+ self.pipeline.unet.set_attn_processor(AttnProcessor2_0())
+ print('Attention optimization applied to DiffusersModelPatcher')
+
+ self.pipeline = self.pipeline.to(device=offload_device)
+
+ if self.dtype == torch.float16:
+ self.pipeline = self.pipeline.half()
+
+ self.pipeline.eval()
+
+ self.patcher = ModelPatcher(
+ model=self.pipeline,
+ load_device=load_device,
+ offload_device=offload_device)
+
+ def prepare_memory_before_sampling(self, batchsize, latent_width, latent_height):
+ area = 2 * batchsize * latent_width * latent_height
+ inference_memory = (((area * 0.6) / 0.9) + 1024) * (1024 * 1024)
+ memory_management.load_models_gpu(
+ models=[self.patcher],
+ memory_required=inference_memory
+ )
+
+ def move_tensor_to_current_device(self, x):
+ return x.to(device=self.patcher.current_device, dtype=self.dtype)
diff --git a/modules_forge/forge_alter_samplers.py b/modules_forge/forge_alter_samplers.py
new file mode 100644
index 0000000000000000000000000000000000000000..b30ddc1f7a2f68d983712ef5fc39f1e86616300d
--- /dev/null
+++ b/modules_forge/forge_alter_samplers.py
@@ -0,0 +1,50 @@
+import logging
+from typing import Callable
+
+import k_diffusion.sampling
+
+from modules import sd_samplers_common, sd_samplers_kdiffusion
+
+
+class AlterSampler(sd_samplers_kdiffusion.KDiffusionSampler):
+ def __init__(self, sd_model, sampler_name):
+ sampler_function: Callable = getattr(k_diffusion.sampling, f"sample_{sampler_name}", None)
+ if sampler_function is None:
+ raise ValueError(f"Unknown sampler: {sampler_name}")
+
+ super().__init__(sampler_function, sd_model, None)
+
+ def sample(self, p, *args, **kwargs):
+ if p.cfg_scale > 2.0:
+ logging.warning("CFG between 1.0 ~ 2.0 is recommended when using CFG++ samplers")
+ return super().sample(p, *args, **kwargs)
+
+ def sample_img2img(self, p, *args, **kwargs):
+ if p.cfg_scale > 2.0:
+ logging.warning("CFG between 1.0 ~ 2.0 is recommended when using CFG++ samplers")
+ return super().sample_img2img(p, *args, **kwargs)
+
+
+def build_constructor(sampler_key: str) -> Callable:
+ def constructor(model):
+ return AlterSampler(model, sampler_key)
+
+ return constructor
+
+
+def create_cfg_pp_sampler(sampler_name: str, sampler_key: str) -> "sd_samplers_common.SamplerData":
+ config = {}
+ base_name = sampler_name.removesuffix(" CFG++")
+ for name, _, _, params in sd_samplers_kdiffusion.samplers_k_diffusion:
+ if name == base_name:
+ config = params.copy()
+ break
+
+ return sd_samplers_common.SamplerData(sampler_name, build_constructor(sampler_key=sampler_key), [sampler_key], config)
+
+
+samplers_data_alter = [
+ create_cfg_pp_sampler("DPM++ 2M CFG++", "dpmpp_2m_cfg_pp"),
+ create_cfg_pp_sampler("Euler a CFG++", "euler_ancestral_cfg_pp"),
+ create_cfg_pp_sampler("Euler CFG++", "euler_cfg_pp"),
+]
diff --git a/modules_forge/forge_canvas/canvas.css b/modules_forge/forge_canvas/canvas.css
new file mode 100644
index 0000000000000000000000000000000000000000..45c5ac01b75f784317e32f00d17cb88eede0d2f5
--- /dev/null
+++ b/modules_forge/forge_canvas/canvas.css
@@ -0,0 +1,160 @@
+.forge-container {
+ width: 100%;
+ height: 512px;
+ position: relative;
+ overflow: hidden;
+ user-select: none;
+}
+
+.forge-image-container:not(.plain) {
+ background-color: #cccccc;
+ background-image:
+ linear-gradient(45deg, #eee 25%, transparent 25%, transparent 75%, #eee 75%, #eee),
+ linear-gradient(45deg, #eee 25%, transparent 25%, transparent 75%, #eee 75%, #eee);
+ background-size: 20px 20px;
+ background-position:
+ 0 0,
+ 10px 10px;
+}
+
+.forge-image-container {
+ width: 100%;
+ height: calc(100% - 6px);
+ position: relative;
+ overflow: hidden;
+}
+
+.forge-image {
+ position: absolute;
+ top: 0;
+ left: 0;
+ background-size: contain;
+ background-repeat: no-repeat;
+ cursor: grab;
+ max-width: unset !important;
+ max-height: unset !important;
+}
+
+.forge-image:active {
+ cursor: grabbing;
+}
+
+.forge-file-upload {
+ display: none;
+}
+
+.forge-toolbar-static {
+ position: absolute;
+ top: 0px;
+ left: 0px;
+ z-index: 10 !important;
+ background: rgba(47, 47, 47, 0.8);
+ padding: 6px 10px;
+ opacity: 1 !important;
+}
+
+.forge-toolbar {
+ position: absolute;
+ top: 0px;
+ left: 0px;
+ z-index: 10;
+ background: rgba(47, 47, 47, 0.8);
+ padding: 6px 10px;
+ opacity: 0;
+ transition: opacity 0.3s ease;
+}
+
+.forge-toolbar .forge-btn,
+.forge-toolbar-static .forge-btn {
+ padding: 2px 6px;
+ border: none;
+ background-color: #4a4a4a;
+ color: white;
+ font-size: 14px;
+ cursor: pointer;
+ transition: background-color 0.3s ease;
+}
+
+.forge-toolbar .forge-btn,
+.forge-toolbar-static .forge-btn:hover {
+ background-color: #5e5e5e;
+}
+
+.forge-toolbar .forge-btn,
+.forge-toolbar-static .forge-btn:active {
+ background-color: #3e3e3e;
+}
+
+.forge-toolbar-box-a {
+ flex-wrap: wrap;
+}
+
+.forge-toolbar-box-b {
+ display: flex;
+ flex-wrap: wrap;
+ align-items: center;
+ justify-content: center;
+ gap: 4px;
+}
+
+.forge-color-picker-block {
+ display: flex;
+ align-items: center;
+}
+
+.forge-range-row {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+}
+
+.forge-toolbar-color {
+ border: none;
+ background: none;
+ padding: 3px;
+ border-radius: 50%;
+ width: 20px;
+ height: 20px;
+ -webkit-appearance: none;
+ appearance: none;
+ cursor: pointer;
+}
+
+.forge-toolbar-color::-webkit-color-swatch-wrapper {
+ padding: 0;
+ border-radius: 50%;
+}
+
+.forge-toolbar-color::-webkit-color-swatch {
+ border: none;
+ border-radius: 50%;
+ background: none;
+}
+
+.forge-toolbar-label {
+ color: white !important;
+ padding: 0 4px;
+ display: flex;
+ align-items: center;
+ margin-bottom: 4px;
+}
+
+.forge-scribble-indicator {
+ position: relative;
+ border-radius: 50%;
+ border: 1px solid;
+ pointer-events: none;
+ display: none;
+ width: 80px;
+ height: 80px;
+}
+
+.forge-upload-hint {
+ position: absolute;
+ top: 50%;
+ left: 50%;
+ width: 30%;
+ height: 30%;
+ transform: translate(-50%, -50%);
+}
diff --git a/modules_forge/forge_canvas/canvas.html b/modules_forge/forge_canvas/canvas.html
new file mode 100644
index 0000000000000000000000000000000000000000..8ba60c909022b3fbd686b9b44f214087589634eb
--- /dev/null
+++ b/modules_forge/forge_canvas/canvas.html
@@ -0,0 +1,58 @@
+
+
+
+
+
![]()
+
+
+
+
+
diff --git a/modules_forge/forge_canvas/canvas.js b/modules_forge/forge_canvas/canvas.js
new file mode 100644
index 0000000000000000000000000000000000000000..01004823a1e7ae6769508926947fd06ff1e16780
--- /dev/null
+++ b/modules_forge/forge_canvas/canvas.js
@@ -0,0 +1,840 @@
+class GradioTextAreaBind {
+ constructor(id, className) {
+ this.target = document.querySelector(`#${id}.${className} textarea`);
+ this.sync_lock = false;
+ this.previousValue = "";
+ }
+
+ set_value(value) {
+ if (this.sync_lock) return;
+ this.sync_lock = true;
+ this.target.value = value;
+ this.previousValue = value;
+ const event = new Event("input", { bubbles: true });
+ Object.defineProperty(event, "target", { value: this.target });
+ this.target.dispatchEvent(event);
+ this.previousValue = value;
+ this.sync_lock = false;
+ }
+
+ listen(callback) {
+ setInterval(() => {
+ if (this.target.value !== this.previousValue) {
+ this.previousValue = this.target.value;
+ if (this.sync_lock) return;
+ this.sync_lock = true;
+ callback(this.target.value);
+ this.sync_lock = false;
+ }
+ }, 100);
+ }
+}
+
+class ForgeCanvas {
+ constructor(
+ uuid,
+ no_upload = false,
+ no_scribbles = false,
+ contrast_scribbles = false,
+ initial_height = 512,
+ scribbleColor = "#000000",
+ scribbleColorFixed = false,
+ scribbleWidth = 20,
+ scribbleWidthFixed = false,
+ scribbleWidthConsistent = false,
+ scribbleAlpha = 100,
+ scribbleAlphaFixed = false,
+ scribbleSoftness = 0,
+ scribbleSoftnessFixed = false,
+ ) {
+ this.gradio_config = gradio_config;
+ this.uuid = uuid;
+
+ this.no_upload = no_upload;
+ this.no_scribbles = no_scribbles;
+ this.contrast_scribbles = contrast_scribbles;
+
+ this.img = null;
+ this.imgX = 0;
+ this.imgY = 0;
+ this.orgWidth = 0;
+ this.orgHeight = 0;
+ this.imgScale = 1.0;
+ this.initial_height = initial_height;
+
+ this.dragging = false;
+ this.dragged_just_now = false;
+ this.drawing = false;
+ this.contrast_pattern = null;
+
+ this.scribbleColor = scribbleColor;
+ this.scribbleColorFixed = scribbleColorFixed;
+ this.scribbleWidth = scribbleWidth;
+ this.scribbleWidthFixed = scribbleWidthFixed;
+ this.scribbleWidthConsistent = scribbleWidthConsistent;
+ this.scribbleAlpha = scribbleAlpha;
+ this.scribbleAlphaFixed = scribbleAlphaFixed;
+ this.scribbleSoftness = scribbleSoftness;
+ this.scribbleSoftnessFixed = scribbleSoftnessFixed;
+
+ this.history = [];
+ this.historyIndex = -1;
+ this.maximized = false;
+ this.originalState = {};
+ this.pointerInsideContainer = false;
+ this.temp_canvas = document.createElement("canvas");
+ this.temp_draw_points = [];
+ this.temp_draw_bg = null;
+
+ this.background_gradio_bind = new GradioTextAreaBind(this.uuid, "logical_image_background");
+ this.foreground_gradio_bind = new GradioTextAreaBind(this.uuid, "logical_image_foreground");
+ this.init();
+
+ this._held_W = false;
+ this._held_A = false;
+ this._held_S = false;
+
+ this._original_alpha = null;
+ }
+
+ init() {
+ const self = this;
+
+ const container = document.getElementById(`container_${self.uuid}`);
+ const imageContainer = document.getElementById(`imageContainer_${self.uuid}`);
+ const drawingCanvas = document.getElementById(`drawingCanvas_${self.uuid}`);
+ const toolbar = document.getElementById(`toolbar_${self.uuid}`);
+
+ const maxButton = document.getElementById(`maxButton_${self.uuid}`);
+ const minButton = document.getElementById(`minButton_${self.uuid}`);
+ const uploadButton = document.getElementById(`uploadButton_${self.uuid}`);
+ const removeButton = document.getElementById(`removeButton_${self.uuid}`);
+ const centerButton = document.getElementById(`centerButton_${self.uuid}`);
+ const resetButton = document.getElementById(`resetButton_${self.uuid}`);
+ const undoButton = document.getElementById(`undoButton_${self.uuid}`);
+ const redoButton = document.getElementById(`redoButton_${self.uuid}`);
+
+ const uploadHint = document.getElementById(`uploadHint_${self.uuid}`);
+ const scribbleIndicator = document.getElementById(`scribbleIndicator_${self.uuid}`);
+
+ minButton.style.display = "none";
+ this.maximized = false;
+
+ const scribbleColorBlock = document.getElementById(`scribbleColorBlock_${self.uuid}`);
+ if (self.scribbleColorFixed) scribbleColorBlock.style.display = "none";
+ const scribbleColor = document.getElementById(`scribbleColor_${self.uuid}`);
+ scribbleColor.value = self.scribbleColor;
+
+ const scribbleWidthBlock = document.getElementById(`scribbleWidthBlock_${self.uuid}`);
+ if (self.scribbleWidthFixed) scribbleWidthBlock.style.display = "none";
+ const scribbleWidth = document.getElementById(`scribbleWidth_${self.uuid}`);
+ const scribbleWidthLabel = document.getElementById(`widthLabel_${self.uuid}`);
+ scribbleWidth.value = self.scribbleWidth;
+ scribbleWidthLabel.textContent = `Brush Width (${self.scribbleWidth})`;
+
+ const scribbleAlphaBlock = document.getElementById(`scribbleAlphaBlock_${self.uuid}`);
+ if (self.scribbleAlphaFixed) scribbleAlphaBlock.style.display = "none";
+ const scribbleAlpha = document.getElementById(`scribbleAlpha_${self.uuid}`);
+ const scribbleAlphaLabel = document.getElementById(`alphaLabel_${self.uuid}`);
+ scribbleAlpha.value = self.scribbleAlpha;
+ scribbleAlphaLabel.textContent = `Brush Opacity (${self.scribbleAlpha})`;
+
+ const scribbleSoftnessBlock = document.getElementById(`scribbleSoftnessBlock_${self.uuid}`);
+ if (self.scribbleSoftnessFixed) scribbleSoftnessBlock.style.display = "none";
+ const scribbleSoftness = document.getElementById(`scribbleSoftness_${self.uuid}`);
+ const scribbleSoftnessLabel = document.getElementById(`softnessLabel_${self.uuid}`);
+ scribbleSoftness.value = self.scribbleSoftness;
+ scribbleSoftnessLabel.textContent = `Brush Softness (${self.scribbleSoftness})`;
+
+ const indicatorSize = self.scribbleWidth * 4;
+ scribbleIndicator.style.width = `${indicatorSize}px`;
+ scribbleIndicator.style.height = `${indicatorSize}px`;
+
+ container.style.height = `${self.initial_height}px`;
+ drawingCanvas.width = imageContainer.clientWidth;
+ drawingCanvas.height = imageContainer.clientHeight;
+
+ const drawContext = drawingCanvas.getContext("2d");
+ self.drawingCanvas_ = drawingCanvas;
+
+ if (self.no_scribbles) {
+ toolbar.querySelector(".forge-toolbar-box-b").style.display = "none";
+ toolbar.removeAttribute("title");
+ resetButton.style.display = "none";
+ undoButton.style.display = "none";
+ redoButton.style.display = "none";
+ }
+
+ if (self.no_upload) {
+ uploadButton.style.display = "none";
+ uploadHint.style.display = "none";
+ }
+
+ if (self.contrast_scribbles) {
+ const size = 10;
+ const tempCanvas = self.temp_canvas;
+ tempCanvas.width = size * 2;
+ tempCanvas.height = size * 2;
+ const tempCtx = tempCanvas.getContext("2d");
+ tempCtx.fillStyle = "#ffffff";
+ tempCtx.fillRect(0, 0, size, size);
+ tempCtx.fillRect(size, size, size, size);
+ tempCtx.fillStyle = "#000000";
+ tempCtx.fillRect(size, 0, size, size);
+ tempCtx.fillRect(0, size, size, size);
+ self.contrast_pattern = drawContext.createPattern(tempCanvas, "repeat");
+ drawingCanvas.style.opacity = "0.5";
+ }
+
+ function resetScribble(e, rect) {
+ const indicatorSize = self.scribbleWidth * (self.scribbleWidthConsistent ? 1.0 : self.imgScale) * 4;
+ scribbleIndicator.style.width = `${indicatorSize}px`;
+ scribbleIndicator.style.height = `${indicatorSize}px`;
+ scribbleIndicator.style.left = `${e.clientX - rect.left - indicatorSize / 2}px`;
+ scribbleIndicator.style.top = `${e.clientY - rect.top - indicatorSize / 2}px`;
+ }
+
+ const resizeObserver = new ResizeObserver(() => {
+ self.adjustInitialPositionAndScale();
+ self.drawImage();
+ });
+ resizeObserver.observe(container);
+
+ document.getElementById(`imageInput_${self.uuid}`).addEventListener("change", (e) => {
+ self.handleFileUpload(e.target.files[0]);
+ });
+
+ uploadButton.addEventListener("click", () => {
+ if (self.no_upload) return;
+ document.getElementById(`imageInput_${self.uuid}`).click();
+ });
+
+ removeButton.addEventListener("click", () => {
+ self.resetImage();
+ self.removeImage();
+ });
+
+ centerButton.addEventListener("click", () => {
+ self.adjustInitialPositionAndScale();
+ self.drawImage();
+ });
+
+ resetButton.addEventListener("click", () => {
+ self.resetImage();
+ });
+
+ undoButton.addEventListener("click", () => {
+ self.undo();
+ });
+
+ redoButton.addEventListener("click", () => {
+ self.redo();
+ });
+
+ scribbleColor.addEventListener("input", (e) => {
+ self.scribbleColor = e.target.value;
+ scribbleIndicator.style.borderColor = self.scribbleColor;
+ });
+
+ scribbleWidth.addEventListener("input", (e) => {
+ self.scribbleWidth = e.target.value;
+ scribbleWidthLabel.textContent = `Brush Width (${self.scribbleWidth})`;
+ const indicatorSize = self.scribbleWidth * (self.scribbleWidthConsistent ? 1.0 : self.imgScale) * 4;
+ scribbleIndicator.style.width = `${indicatorSize}px`;
+ scribbleIndicator.style.height = `${indicatorSize}px`;
+ });
+
+ scribbleAlpha.addEventListener("input", (e) => {
+ self.scribbleAlpha = e.target.value;
+ scribbleAlphaLabel.textContent = `Brush Opacity (${self.scribbleAlpha})`;
+ });
+
+ scribbleSoftness.addEventListener("input", (e) => {
+ self.scribbleSoftness = e.target.value;
+ scribbleSoftnessLabel.textContent = `Brush Softness (${self.scribbleSoftness})`;
+ });
+
+ drawingCanvas.addEventListener("pointerdown", (e) => {
+ if (!self.img || e.button !== 0 || self.no_scribbles) return;
+ const rect = drawingCanvas.getBoundingClientRect();
+ self.drawing = true;
+ drawingCanvas.style.cursor = "crosshair";
+ scribbleIndicator.style.display = "none";
+ self.temp_draw_points = [[(e.clientX - rect.left) / self.imgScale, (e.clientY - rect.top) / self.imgScale]];
+ self.temp_draw_bg = drawContext.getImageData(0, 0, drawingCanvas.width, drawingCanvas.height);
+ self.handleDraw(e);
+ });
+
+ drawingCanvas.addEventListener("pointermove", (e) => {
+ if (self.drawing) self.handleDraw(e);
+ if (self.img && !self.drawing && !self.dragging && !self.no_scribbles) {
+ const rect = container.getBoundingClientRect();
+ resetScribble(e, rect);
+ scribbleIndicator.style.display = "inline-block";
+ }
+ });
+
+ toolbar.addEventListener("pointerdown", (e) => {
+ e.stopPropagation();
+ });
+
+ drawingCanvas.addEventListener("pointerup", () => {
+ self.drawing = false;
+ drawingCanvas.style.cursor = "";
+ self.saveState();
+ });
+
+ drawingCanvas.addEventListener("pointerout", () => {
+ self.drawing = false;
+ drawingCanvas.style.cursor = "";
+ scribbleIndicator.style.display = "none";
+ });
+
+ container.addEventListener("pointerdown", (e) => {
+ const rect = container.getBoundingClientRect();
+ const x = e.clientX - rect.left;
+ const y = e.clientY - rect.top;
+ if (e.button === 2 && self.isInsideImage(x, y)) {
+ self.dragging = true;
+ self.offsetX = x - self.imgX;
+ self.offsetY = y - self.imgY;
+ imageContainer.style.cursor = "grabbing";
+ drawingCanvas.style.cursor = "grabbing";
+ scribbleIndicator.style.display = "none";
+ } else if (e.button === 0 && !self.img && !self.no_upload) {
+ document.getElementById(`imageInput_${self.uuid}`).click();
+ }
+ });
+
+ container.addEventListener("pointermove", (e) => {
+ if (self.dragging) {
+ const rect = container.getBoundingClientRect();
+ const x = e.clientX - rect.left;
+ const y = e.clientY - rect.top;
+ self.imgX = x - self.offsetX;
+ self.imgY = y - self.offsetY;
+ self.drawImage();
+ self.dragged_just_now = true;
+ }
+ });
+
+ container.addEventListener("pointerup", (e) => {
+ if (self.dragging) self.handleDragEnd(e, false);
+ });
+
+ container.addEventListener("pointerout", (e) => {
+ if (self.dragging) self.handleDragEnd(e, true);
+ });
+
+ container.addEventListener("wheel", (e) => {
+ if (!self.img) return;
+ e.preventDefault();
+ const delta = e.deltaY * -0.001;
+ let scale = true;
+
+ if (this._held_W) {
+ // Width
+ scribbleWidth.value = parseInt(scribbleWidth.value) - Math.sign(e.deltaY) * 3;
+ updateInput(scribbleWidth);
+ const rect = container.getBoundingClientRect();
+ resetScribble(e, rect);
+ scale = false;
+ }
+ if (this._held_A) {
+ // Alpha (Opacity)
+ scribbleAlpha.value = parseInt(scribbleAlpha.value) - Math.sign(e.deltaY) * 5;
+ updateInput(scribbleAlpha);
+ scale = false;
+ }
+ if (this._held_S) {
+ // Softness
+ scribbleSoftness.value = parseInt(scribbleSoftness.value) - Math.sign(e.deltaY) * 5;
+ updateInput(scribbleSoftness);
+ scale = false;
+ }
+
+ if (!scale) return;
+
+ const rect = container.getBoundingClientRect();
+ const x = e.clientX - rect.left;
+ const y = e.clientY - rect.top;
+ const oldScale = self.imgScale;
+ self.imgScale += delta;
+ self.imgScale = Math.max(0.1, self.imgScale);
+ const newScale = self.imgScale / oldScale;
+ self.imgX = x - (x - self.imgX) * newScale;
+ self.imgY = y - (y - self.imgY) * newScale;
+ self.drawImage();
+ resetScribble(e, rect);
+ });
+
+ container.addEventListener("contextmenu", (e) => {
+ e.preventDefault();
+ self.dragged_just_now = false;
+ return false;
+ });
+
+ container.addEventListener("dragleave", () => {
+ toolbar.style.opacity = "0";
+ imageContainer.style.cursor = "";
+ drawingCanvas.style.cursor = "";
+ container.style.cursor = "";
+ scribbleIndicator.style.display = "none";
+ });
+
+ function preventDefaults(e) {
+ e.preventDefault();
+ e.stopPropagation();
+ }
+
+ for (const e of ["dragenter", "dragover", "dragleave", "drop"]) {
+ container.addEventListener(e, preventDefaults, false);
+ }
+
+ container.addEventListener("dragenter", () => {
+ imageContainer.style.cursor = "copy";
+ drawingCanvas.style.cursor = "copy";
+ });
+
+ container.addEventListener("dragleave", () => {
+ imageContainer.style.cursor = "";
+ drawingCanvas.style.cursor = "";
+ });
+
+ container.addEventListener("drop", (e) => {
+ imageContainer.style.cursor = "";
+ drawingCanvas.style.cursor = "";
+ const dt = e.dataTransfer;
+ const files = dt.files;
+ if (files.length > 0) self.handleFileUpload(files[0]);
+ });
+
+ container.addEventListener("pointerenter", () => {
+ self.pointerInsideContainer = true;
+ toolbar.style.opacity = "1";
+ if (!self.img && !self.no_upload) container.style.cursor = "pointer";
+ });
+
+ container.addEventListener("pointerleave", () => {
+ self.pointerInsideContainer = false;
+ toolbar.style.opacity = "0";
+ });
+
+ document.addEventListener("paste", (e) => {
+ if (self.pointerInsideContainer) self.handlePaste(e);
+ });
+
+ document.addEventListener("keydown", (e) => {
+ if (!self.pointerInsideContainer) return;
+ if (e.shiftKey) {
+ e.preventDefault();
+ if (this._original_alpha === null)
+ this._original_alpha = scribbleAlpha.value;
+ scribbleAlpha.value = 0.0;
+ updateInput(scribbleAlpha);
+ scribbleIndicator.style.border = "2px dotted";
+ return;
+ }
+ if (e.ctrlKey && e.key === "z") {
+ e.preventDefault();
+ this.undo();
+ }
+ if (e.ctrlKey && e.key === "y") {
+ e.preventDefault();
+ this.redo();
+ }
+ if (e.ctrlKey && e.key === "x") {
+ e.preventDefault();
+ this.resetImage();
+ }
+ if (e.key === "e") {
+ scribbleColor.click();
+ }
+ if (e.key === "r") {
+ centerButton.click();
+ }
+ if (e.key === "f") {
+ if (maxButton.style.display === "none")
+ minButton.click();
+ else
+ maxButton.click();
+ }
+
+ if (e.key === "w") this._held_W = true;
+ if (e.key === "a") this._held_A = true;
+ if (e.key === "s") this._held_S = true;
+ });
+
+ document.addEventListener("keyup", () => {
+ this._held_W = false;
+ this._held_A = false;
+ this._held_S = false;
+
+ if (this._original_alpha !== null) {
+ scribbleAlpha.value = this._original_alpha;
+ this._original_alpha = null;
+ updateInput(scribbleAlpha);
+ scribbleIndicator.style.border = "1px solid";
+ }
+ });
+
+ maxButton.addEventListener("click", () => {
+ self.maximize();
+ });
+
+ minButton.addEventListener("click", () => {
+ self.minimize();
+ });
+
+ self.updateUndoRedoButtons();
+
+ self.background_gradio_bind.listen((value) => {
+ self.loadImage(value);
+ });
+
+ self.foreground_gradio_bind.listen((value) => {
+ self.loadDrawing(value);
+ });
+ }
+
+ handleDraw(e) {
+ const canvas = this.drawingCanvas_;
+ const ctx = canvas.getContext("2d");
+ const rect = canvas.getBoundingClientRect();
+ const x = (e.clientX - rect.left) / this.imgScale;
+ const y = (e.clientY - rect.top) / this.imgScale;
+
+ this.temp_draw_points.push([x, y]);
+ ctx.putImageData(this.temp_draw_bg, 0, 0);
+ ctx.beginPath();
+ ctx.moveTo(this.temp_draw_points[0][0], this.temp_draw_points[0][1]);
+
+ for (let i = 1; i < this.temp_draw_points.length; i++) {
+ ctx.lineTo(this.temp_draw_points[i][0], this.temp_draw_points[i][1]);
+ }
+
+ ctx.lineCap = "round";
+ ctx.lineJoin = "round";
+ ctx.lineWidth = this.scribbleWidth / (this.scribbleWidthConsistent ? this.imgScale : 1.0) * 4;
+
+ if (this.scribbleAlpha <= 0) {
+ ctx.globalCompositeOperation = "destination-out";
+ ctx.globalAlpha = 1.0;
+ ctx.stroke();
+ return;
+ }
+
+ ctx.globalCompositeOperation = "source-over";
+
+ if (this.contrast_scribbles) {
+ ctx.strokeStyle = this.contrast_pattern;
+ ctx.stroke();
+ return;
+ }
+
+ ctx.strokeStyle = this.scribbleColor;
+
+ canvas.style.opacity = 1.0;
+ let drawingAlpha = this.scribbleAlpha;
+
+ if (this.scribbleAlphaFixed) {
+ canvas.style.opacity = this.scribbleAlpha / 100.0;
+ drawingAlpha = 100.0;
+ }
+
+ if (this.scribbleSoftness <= 0) {
+ ctx.save();
+ ctx.globalCompositeOperation = "destination-out";
+ ctx.globalAlpha = 1.0;
+ ctx.stroke();
+ ctx.restore();
+
+ ctx.globalCompositeOperation = "source-over";
+ ctx.globalAlpha = drawingAlpha / 100.0;
+ ctx.stroke();
+ return;
+ }
+
+ const innerWidth = ctx.lineWidth * (1 - this.scribbleSoftness / 96);
+ const outerWidth = ctx.lineWidth * (1 + this.scribbleSoftness / 96);
+ const steps = Math.round(5 + this.scribbleSoftness / 5);
+ const stepWidth = (outerWidth - innerWidth) / (steps - 1);
+
+ ctx.globalAlpha = 1.0 - Math.pow(1.0 - Math.min(drawingAlpha / 100, 0.95), 1.0 / steps);
+
+ for (let i = 0; i < steps; i++) {
+ ctx.lineWidth = innerWidth + stepWidth * i;
+ ctx.stroke();
+ }
+ }
+
+ handleFileUpload(file) {
+ if (file && !this.no_upload) {
+ const reader = new FileReader();
+ reader.onload = (e) => {
+ this.loadImage(e.target.result);
+ };
+ reader.readAsDataURL(file);
+ }
+ }
+
+ handlePaste(e) {
+ const items = e.clipboardData.items;
+ for (let i = 0; i < items.length; i++) {
+ const item = items[i];
+ if (item.type.indexOf("image") !== -1) {
+ const file = item.getAsFile();
+ this.handleFileUpload(file);
+ break;
+ }
+ }
+ }
+
+ loadImage(base64) {
+ if (typeof this.gradio_config !== "undefined") {
+ if (!this.gradio_config.version.startsWith("4.")) return;
+ } else {
+ return;
+ }
+
+ const image = new Image();
+ image.onload = () => {
+ this.img = base64;
+ this.orgWidth = image.width;
+ this.orgHeight = image.height;
+ const canvas = document.getElementById(`drawingCanvas_${this.uuid}`);
+ if (canvas.width !== image.width || canvas.height !== image.height) {
+ canvas.width = image.width;
+ canvas.height = image.height;
+ }
+ this.adjustInitialPositionAndScale();
+ this.drawImage();
+ this.updateBackgroundImageData();
+ this.saveState();
+ this.updateUndoRedoButtons();
+ document.getElementById(`imageInput_${this.uuid}`).value = null;
+ document.getElementById(`uploadHint_${this.uuid}`).style.display = "none";
+ };
+
+ if (base64) {
+ image.src = base64;
+ } else {
+ this.img = null;
+ const canvas = document.getElementById(`drawingCanvas_${this.uuid}`);
+ canvas.width = 1;
+ canvas.height = 1;
+ this.adjustInitialPositionAndScale();
+ this.drawImage();
+ this.saveState();
+ this.updateUndoRedoButtons();
+ }
+ }
+
+ loadDrawing(base64) {
+ const image = new Image();
+ image.onload = () => {
+ const canvas = document.getElementById(`drawingCanvas_${this.uuid}`);
+ const ctx = canvas.getContext("2d");
+ ctx.clearRect(0, 0, canvas.width, canvas.height);
+ ctx.drawImage(image, 0, 0);
+ this.saveState();
+ };
+ if (base64) {
+ image.src = base64;
+ } else {
+ const canvas = document.getElementById(`drawingCanvas_${this.uuid}`);
+ const ctx = canvas.getContext("2d");
+ ctx.clearRect(0, 0, canvas.width, canvas.height);
+ this.saveState();
+ }
+ }
+
+ isInsideImage(x, y) {
+ const scaledWidth = this.orgWidth * this.imgScale;
+ const scaledHeight = this.orgHeight * this.imgScale;
+ return x > this.imgX && x < this.imgX + scaledWidth && y > this.imgY && y < this.imgY + scaledHeight;
+ }
+
+ drawImage() {
+ const image = document.getElementById(`image_${this.uuid}`);
+ const drawingCanvas = document.getElementById(`drawingCanvas_${this.uuid}`);
+ if (this.img) {
+ const scaledWidth = this.orgWidth * this.imgScale;
+ const scaledHeight = this.orgHeight * this.imgScale;
+ image.src = this.img;
+ image.style.width = `${scaledWidth}px`;
+ image.style.height = `${scaledHeight}px`;
+ image.style.left = `${this.imgX}px`;
+ image.style.top = `${this.imgY}px`;
+ image.style.display = "block";
+ drawingCanvas.style.width = `${scaledWidth}px`;
+ drawingCanvas.style.height = `${scaledHeight}px`;
+ drawingCanvas.style.left = `${this.imgX}px`;
+ drawingCanvas.style.top = `${this.imgY}px`;
+ } else {
+ image.src = "";
+ image.style.display = "none";
+ }
+ }
+
+ adjustInitialPositionAndScale() {
+ const container = document.getElementById(`container_${this.uuid}`);
+ const containerWidth = container.clientWidth - 20;
+ const containerHeight = container.clientHeight - 20;
+ const scaleX = containerWidth / this.orgWidth;
+ const scaleY = containerHeight / this.orgHeight;
+ this.imgScale = Math.min(scaleX, scaleY);
+ const scaledWidth = this.orgWidth * this.imgScale;
+ const scaledHeight = this.orgHeight * this.imgScale;
+ this.imgX = (container.clientWidth - scaledWidth) / 2;
+ this.imgY = (container.clientHeight - scaledHeight) / 2;
+ }
+
+ resetImage() {
+ const canvas = document.getElementById(`drawingCanvas_${this.uuid}`);
+ const ctx = canvas.getContext("2d");
+ ctx.clearRect(0, 0, canvas.width, canvas.height);
+ this.adjustInitialPositionAndScale();
+ this.drawImage();
+ this.saveState();
+ }
+
+ removeImage() {
+ this.img = null;
+ const image = document.getElementById(`image_${this.uuid}`);
+ const canvas = document.getElementById(`drawingCanvas_${this.uuid}`);
+ const ctx = canvas.getContext("2d");
+ ctx.clearRect(0, 0, canvas.width, canvas.height);
+ image.src = "";
+ image.style.width = "0";
+ image.style.height = "0";
+ this.saveState();
+ if (!this.no_upload) {
+ document.getElementById(`uploadHint_${this.uuid}`).style.display = "inline-block";
+ }
+ this.loadImage(null);
+ }
+
+ saveState() {
+ const canvas = document.getElementById(`drawingCanvas_${this.uuid}`);
+ const ctx = canvas.getContext("2d");
+ const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
+ this.history = this.history.slice(0, this.historyIndex + 1);
+ this.history.push(imageData);
+ this.historyIndex++;
+ this.updateUndoRedoButtons();
+ this.updateDrawingData();
+ }
+
+ undo() {
+ if (this.historyIndex > 0) {
+ this.historyIndex--;
+ this.restoreState();
+ this.updateUndoRedoButtons();
+ }
+ }
+
+ redo() {
+ if (this.historyIndex < this.history.length - 1) {
+ this.historyIndex++;
+ this.restoreState();
+ this.updateUndoRedoButtons();
+ }
+ }
+
+ restoreState() {
+ const canvas = document.getElementById(`drawingCanvas_${this.uuid}`);
+ const ctx = canvas.getContext("2d");
+ const imageData = this.history[this.historyIndex];
+ ctx.putImageData(imageData, 0, 0);
+ this.updateDrawingData();
+ }
+
+ updateUndoRedoButtons() {
+ const undoButton = document.getElementById(`undoButton_${this.uuid}`);
+ const redoButton = document.getElementById(`redoButton_${this.uuid}`);
+ undoButton.disabled = this.historyIndex <= 0;
+ redoButton.disabled = this.historyIndex >= this.history.length - 1;
+ undoButton.style.opacity = undoButton.disabled ? "0.5" : "1";
+ redoButton.style.opacity = redoButton.disabled ? "0.5" : "1";
+ }
+
+ updateBackgroundImageData() {
+ if (!this.img) {
+ this.background_gradio_bind.set_value("");
+ return;
+ }
+ const image = document.getElementById(`image_${this.uuid}`);
+ const tempCanvas = this.temp_canvas;
+ const tempCtx = tempCanvas.getContext("2d");
+ tempCanvas.width = this.orgWidth;
+ tempCanvas.height = this.orgHeight;
+ tempCtx.drawImage(image, 0, 0, this.orgWidth, this.orgHeight);
+ const dataUrl = tempCanvas.toDataURL("image/png");
+ this.background_gradio_bind.set_value(dataUrl);
+ }
+
+ updateDrawingData() {
+ if (!this.img) {
+ this.foreground_gradio_bind.set_value("");
+ return;
+ }
+ const canvas = document.getElementById(`drawingCanvas_${this.uuid}`);
+ const dataUrl = canvas.toDataURL("image/png");
+ this.foreground_gradio_bind.set_value(dataUrl);
+ }
+
+ maximize() {
+ if (this.maximized) return;
+ const container = document.getElementById(`container_${this.uuid}`);
+ const maxButton = document.getElementById(`maxButton_${this.uuid}`);
+ const minButton = document.getElementById(`minButton_${this.uuid}`);
+
+ this.originalState = {
+ width: container.style.width,
+ height: container.style.height,
+ top: container.style.top,
+ left: container.style.left,
+ position: container.style.position,
+ zIndex: container.style.zIndex,
+ };
+
+ container.style.width = "100vw";
+ container.style.height = "100vh";
+ container.style.top = "0";
+ container.style.left = "0";
+ container.style.position = "fixed";
+ container.style.zIndex = "1000";
+ maxButton.style.display = "none";
+ minButton.style.display = "inline-block";
+ this.maximized = true;
+ }
+
+ minimize() {
+ if (!this.maximized) return;
+ const container = document.getElementById(`container_${this.uuid}`);
+ const maxButton = document.getElementById(`maxButton_${this.uuid}`);
+ const minButton = document.getElementById(`minButton_${this.uuid}`);
+
+ container.style.width = this.originalState.width;
+ container.style.height = this.originalState.height;
+ container.style.top = this.originalState.top;
+ container.style.left = this.originalState.left;
+ container.style.position = this.originalState.position;
+ container.style.zIndex = this.originalState.zIndex;
+ maxButton.style.display = "inline-block";
+ minButton.style.display = "none";
+ this.maximized = false;
+ }
+
+ handleDragEnd(e, isPointerOut) {
+ const image = document.getElementById(`image_${this.uuid}`);
+ const drawingCanvas = document.getElementById(`drawingCanvas_${this.uuid}`);
+ this.dragging = false;
+ image.style.cursor = "grab";
+ drawingCanvas.style.cursor = "grab";
+ }
+}
+
+const True = true;
+const False = false;
diff --git a/modules_forge/forge_canvas/canvas.py b/modules_forge/forge_canvas/canvas.py
new file mode 100644
index 0000000000000000000000000000000000000000..43cb23b3f9853ad4da2c932ec3aaf3b47c5d6395
--- /dev/null
+++ b/modules_forge/forge_canvas/canvas.py
@@ -0,0 +1,138 @@
+"""
+Forge Canvas
+Copyright (C) 2024 lllyasviel
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+"""
+
+import gradio.component_meta
+
+create_or_modify_pyi_org = gradio.component_meta.create_or_modify_pyi
+
+
+def create_or_modify_pyi_org_patched(component_class, class_name, events):
+ try:
+ if component_class.__name__ == "LogicalImage":
+ return
+ return create_or_modify_pyi_org(component_class, class_name, events)
+ except Exception:
+ return
+
+
+gradio.component_meta.create_or_modify_pyi = create_or_modify_pyi_org_patched
+
+
+import base64
+import os
+import uuid
+from functools import wraps
+from io import BytesIO
+
+import gradio as gr
+import numpy as np
+from gradio.context import Context
+from PIL import Image
+
+from modules.shared import opts
+
+DEBUG_MODE = False
+canvas_js_root_path = os.path.dirname(__file__)
+
+
+def web_js(file_name):
+ full_path = os.path.join(canvas_js_root_path, file_name)
+ return f'\n'
+
+
+def web_css(file_name):
+ full_path = os.path.join(canvas_js_root_path, file_name)
+ return f'\n'
+
+
+canvas_html = open(os.path.join(canvas_js_root_path, "canvas.html"), encoding="utf-8").read()
+canvas_head = "".join((web_css("canvas.css"), web_js("canvas.js")))
+
+
+def image_to_base64(image_array, numpy=True):
+ image = Image.fromarray(image_array) if numpy else image_array
+ image = image.convert("RGBA")
+ buffered = BytesIO()
+ image.save(buffered, format="PNG")
+ image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
+ return f"data:image/png;base64,{image_base64}"
+
+
+def base64_to_image(base64_str, numpy=True):
+ if base64_str.startswith("data:image/png;base64,"):
+ base64_str = base64_str.replace("data:image/png;base64,", "")
+ image_data = base64.b64decode(base64_str)
+ image = Image.open(BytesIO(image_data))
+ image = image.convert("RGBA")
+ image_array = np.array(image) if numpy else image
+ return image_array
+
+
+class LogicalImage(gr.Textbox):
+ @wraps(gr.Textbox.__init__)
+ def __init__(self, *args, numpy=True, **kwargs):
+ self.numpy = numpy
+ self.infotext = dict()
+
+ if "value" in kwargs:
+ initial_value = kwargs["value"]
+ if initial_value is not None:
+ kwargs["value"] = self.image_to_base64(initial_value)
+ else:
+ del kwargs["value"]
+
+ super().__init__(*args, **kwargs)
+
+ def preprocess(self, payload):
+ if not isinstance(payload, str):
+ return None
+
+ if not payload.startswith("data:image/png;base64,"):
+ return None
+
+ image = base64_to_image(payload, numpy=self.numpy)
+ if hasattr(image, "info"):
+ image.info = self.infotext
+
+ return image
+
+ def postprocess(self, value):
+ if value is None:
+ return None
+
+ if hasattr(value, "info"):
+ self.infotext = value.info
+
+ return image_to_base64(value, numpy=self.numpy)
+
+ def get_block_name(self):
+ return "textbox"
+
+
+class ForgeCanvas:
+ def __init__(self, no_upload=False, no_scribbles=False, contrast_scribbles=False, height=None, scribble_color="#000000", scribble_color_fixed=False, scribble_width=25, scribble_width_fixed=False, scribble_alpha=100, scribble_alpha_fixed=False, scribble_softness=0, scribble_softness_fixed=False, visible=True, numpy=False, initial_image=None, elem_id=None, elem_classes=None):
+ self.uuid = "uuid_" + uuid.uuid4().hex
+
+ canvas_html_uuid = canvas_html.replace("forge_mixin", self.uuid)
+
+ if opts.forge_canvas_plain:
+ canvas_html_uuid = canvas_html_uuid.replace('class="forge-image-container"', f'class="forge-image-container plain" style="background-color: {opts.forge_canvas_plain_color}"').replace('stroke="white"', "stroke=#444")
+ if opts.forge_canvas_toolbar_always:
+ canvas_html_uuid = canvas_html_uuid.replace('class="forge-toolbar"', 'class="forge-toolbar-static"')
+
+ self.block = gr.HTML(canvas_html_uuid, visible=visible, elem_id=elem_id, elem_classes=elem_classes)
+ self.foreground = LogicalImage(visible=DEBUG_MODE, label="foreground", numpy=numpy, elem_id=self.uuid, elem_classes=["logical_image_foreground"])
+ self.background = LogicalImage(visible=DEBUG_MODE, label="background", numpy=numpy, value=initial_image, elem_id=self.uuid, elem_classes=["logical_image_background"])
+ Context.root_block.load(None, js=f'async ()=>{{new ForgeCanvas("{self.uuid}", {no_upload}, {no_scribbles}, {contrast_scribbles}, {height or opts.forge_canvas_height}, ' f"'{scribble_color}', {scribble_color_fixed}, {scribble_width}, {scribble_width_fixed}, {opts.forge_canvas_consistent_brush}, " f"{scribble_alpha}, {scribble_alpha_fixed}, {scribble_softness}, {scribble_softness_fixed});}}")
diff --git a/modules_forge/forge_version.py b/modules_forge/forge_version.py
new file mode 100644
index 0000000000000000000000000000000000000000..6032f8313fb1c6a8006cebe53bc610bfb7d61984
--- /dev/null
+++ b/modules_forge/forge_version.py
@@ -0,0 +1 @@
+version = "neo"
diff --git a/modules_forge/initialization.py b/modules_forge/initialization.py
new file mode 100644
index 0000000000000000000000000000000000000000..37ec8a8d31e3b1e5b9a33080e574f21435db8f28
--- /dev/null
+++ b/modules_forge/initialization.py
@@ -0,0 +1,80 @@
+import os
+import sys
+
+from modules.timer import startup_timer
+
+INITIALIZED = False
+
+
+def initialize_forge():
+ global INITIALIZED
+ if INITIALIZED:
+ return
+
+ INITIALIZED = True
+
+ sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "modules_forge", "packages"))
+
+ from backend.args import args
+
+ if args.gpu_device_id is not None:
+ os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_device_id)
+ print("Set device to:", args.gpu_device_id)
+
+ if args.cuda_malloc:
+ from modules_forge.cuda_malloc import try_cuda_malloc
+
+ try_cuda_malloc()
+ startup_timer.record("cuda_malloc")
+
+ from backend import memory_management
+
+ startup_timer.record("memory_management")
+
+ import torch
+ import torchvision # noqa: F401
+
+ startup_timer.record("import torch")
+
+ device = memory_management.get_torch_device()
+ torch.zeros((1, 1)).to(device, torch.float32)
+ memory_management.soft_empty_cache()
+
+ startup_timer.record("tensor warmup")
+
+ from backend import stream
+
+ print("CUDA Using Stream:", stream.should_use_stream())
+
+ startup_timer.record("stream")
+
+ from modules_forge.shared import diffusers_dir
+
+ if "HF_HOME" not in os.environ:
+ os.environ["HF_HOME"] = diffusers_dir
+
+ if "HF_DATASETS_CACHE" not in os.environ:
+ os.environ["HF_DATASETS_CACHE"] = diffusers_dir
+
+ if "HUGGINGFACE_HUB_CACHE" not in os.environ:
+ os.environ["HUGGINGFACE_HUB_CACHE"] = diffusers_dir
+
+ if "HUGGINGFACE_ASSETS_CACHE" not in os.environ:
+ os.environ["HUGGINGFACE_ASSETS_CACHE"] = diffusers_dir
+
+ if "HF_HUB_CACHE" not in os.environ:
+ os.environ["HF_HUB_CACHE"] = diffusers_dir
+
+ startup_timer.record("diffusers_dir")
+
+ from modules_forge import patch_basic
+
+ patch_basic.patch_all_basics()
+
+ startup_timer.record("patch basics")
+
+ from backend.huggingface import process
+
+ process()
+
+ startup_timer.record("decompress tokenizers")
diff --git a/modules_forge/main_entry.py b/modules_forge/main_entry.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b269086ff0ecb06ea4cf68a939999dcd2f4f7f5
--- /dev/null
+++ b/modules_forge/main_entry.py
@@ -0,0 +1,343 @@
+import os
+
+import gradio as gr
+import torch
+from gradio.context import Context
+
+from backend import memory_management, operations, stream
+from backend.args import dynamic_args
+from modules import infotext_utils, paths, processing, sd_models, shared, shared_items, ui_common
+
+total_vram = int(memory_management.total_vram)
+
+ui_forge_preset: gr.Radio = None
+
+ui_checkpoint: gr.Dropdown = None
+ui_vae: gr.Dropdown = None
+ui_clip_skip: gr.Slider = None
+
+ui_forge_unet_storage_dtype_options: gr.Radio = None
+ui_forge_async_loading: gr.Radio = None
+ui_forge_pin_shared_memory: gr.Radio = None
+ui_forge_inference_memory: gr.Slider = None
+
+
+forge_unet_storage_dtype_options = {
+ "Automatic": (None, False),
+ "Automatic (fp16 LoRA)": (None, True),
+ "float8-e4m3fn": (torch.float8_e4m3fn, False),
+ "float8-e4m3fn (fp16 LoRA)": (torch.float8_e4m3fn, True),
+}
+
+bnb_storage_dtype_options = {
+ "bnb-nf4": ("nf4", False),
+ "bnb-nf4 (fp16 LoRA)": ("nf4", True),
+ "bnb-fp4": ("fp4", False),
+ "bnb-fp4 (fp16 LoRA)": ("fp4", True),
+}
+
+if operations.bnb_available:
+ forge_unet_storage_dtype_options.update(bnb_storage_dtype_options)
+
+module_list = {}
+
+
+def bind_to_opts(comp, k, save=False, callback=None):
+ def on_change(v):
+ shared.opts.set(k, v)
+ if save:
+ shared.opts.save(shared.config_filename)
+ if callback is not None:
+ callback()
+
+ comp.change(on_change, inputs=[comp], queue=False, show_progress=False)
+
+
+def make_checkpoint_manager_ui():
+ global ui_checkpoint, ui_vae, ui_clip_skip, ui_forge_unet_storage_dtype_options, ui_forge_async_loading, ui_forge_pin_shared_memory, ui_forge_inference_memory, ui_forge_preset
+
+ if shared.opts.sd_model_checkpoint in [None, "None", "none", ""]:
+ if len(sd_models.checkpoints_list) == 0:
+ sd_models.list_models()
+ if len(sd_models.checkpoints_list) > 0:
+ shared.opts.set("sd_model_checkpoint", next(iter(sd_models.checkpoints_list.values())).name)
+
+ ui_forge_preset = gr.Radio(label="UI Preset", value=lambda: shared.opts.forge_preset, choices=("sd", "xl", "flux", "qwen", "lumina", "wan"), elem_id="forge_ui_preset")
+
+ ui_checkpoint = gr.Dropdown(label="Checkpoint", value=None, choices=None, elem_id="setting_sd_model_checkpoint", elem_classes=["model_selection"])
+
+ ui_vae = gr.Dropdown(label="VAE / Text Encoder", value=None, choices=None, multiselect=True)
+
+ def gr_refresh_models():
+ ckpt_list, vae_list = refresh_models()
+ return gr.update(choices=ckpt_list), gr.update(choices=vae_list)
+
+ refresh_button = ui_common.ToolButton(value=ui_common.refresh_symbol, elem_id="forge_refresh_checkpoint", tooltip="Refresh")
+ refresh_button.click(fn=gr_refresh_models, outputs=[ui_checkpoint, ui_vae], queue=False)
+
+ def gr_refresh_on_load():
+ ckpt_list, vae_list = refresh_models()
+ refresh_memory_management_settings()
+ return [gr.update(value=shared.opts.sd_model_checkpoint, choices=ckpt_list), gr.update(value=[os.path.basename(x) for x in shared.opts.forge_additional_modules], choices=vae_list)]
+
+ Context.root_block.load(fn=gr_refresh_on_load, outputs=[ui_checkpoint, ui_vae], show_progress=False, queue=False)
+
+ ui_forge_unet_storage_dtype_options = gr.Dropdown(label="Diffusion in Low Bits", value=lambda: shared.opts.forge_unet_storage_dtype, choices=list(forge_unet_storage_dtype_options.keys()))
+ bind_to_opts(ui_forge_unet_storage_dtype_options, "forge_unet_storage_dtype", save=True, callback=refresh_model_loading_parameters)
+
+ ui_forge_async_loading = gr.Radio(label="Swap Method", value=lambda: shared.opts.forge_async_loading, choices=["Queue", "Async"])
+ ui_forge_pin_shared_memory = gr.Radio(label="Swap Location", value=lambda: shared.opts.forge_pin_shared_memory, choices=["CPU", "Shared"])
+ ui_forge_inference_memory = gr.Slider(label="GPU Weights (MB)", value=lambda: total_vram - shared.opts.forge_inference_memory, minimum=0, maximum=int(memory_management.total_vram), step=1)
+
+ mem_comps = [ui_forge_inference_memory, ui_forge_async_loading, ui_forge_pin_shared_memory]
+
+ ui_forge_inference_memory.change(ui_refresh_memory_management_settings, inputs=mem_comps, queue=False, show_progress=False)
+ ui_forge_async_loading.change(ui_refresh_memory_management_settings, inputs=mem_comps, queue=False, show_progress=False)
+ ui_forge_pin_shared_memory.change(ui_refresh_memory_management_settings, inputs=mem_comps, queue=False, show_progress=False)
+
+ ui_clip_skip = gr.Slider(label="Clip Skip", value=lambda: shared.opts.CLIP_stop_at_last_layers, minimum=1, maximum=12, step=1)
+ bind_to_opts(ui_clip_skip, "CLIP_stop_at_last_layers", save=True)
+
+ ui_checkpoint.change(checkpoint_change, inputs=[ui_checkpoint, ui_forge_preset], show_progress=False)
+ ui_vae.change(modules_change, inputs=[ui_vae, ui_forge_preset], queue=False, show_progress=False)
+
+
+def find_files_with_extensions(base_path, extensions):
+ found_files = {}
+ for root, _, files in os.walk(base_path):
+ for file in files:
+ if any(file.endswith(ext) for ext in extensions):
+ full_path = os.path.join(root, file)
+ found_files[file] = full_path
+ return found_files
+
+
+def refresh_models():
+ global module_list
+
+ shared_items.refresh_checkpoints()
+ ckpt_list = shared_items.list_checkpoint_tiles(shared.opts.sd_checkpoint_dropdown_use_short)
+
+ file_extensions = ("ckpt", "pt", "pth", "bin", "safetensors", "sft", "gguf")
+
+ module_list.clear()
+
+ module_paths: set[str] = {
+ os.path.abspath(os.path.join(paths.models_path, "VAE")),
+ os.path.abspath(os.path.join(paths.models_path, "text_encoder")),
+ *shared.cmd_opts.vae_dirs,
+ *shared.cmd_opts.text_encoder_dirs,
+ }
+
+ for vae_path in module_paths:
+ vae_files = find_files_with_extensions(vae_path, file_extensions)
+ module_list.update(vae_files)
+
+ return sorted(ckpt_list), sorted(module_list.keys())
+
+
+def ui_refresh_memory_management_settings(model_memory, async_loading, pin_shared_memory):
+ """Pass calculated `model_memory` from "GPU Weights" UI slider"""
+ refresh_memory_management_settings(async_loading=async_loading, pin_shared_memory=pin_shared_memory, model_memory=model_memory) # Use model_memory directly from UI slider value
+
+
+def refresh_memory_management_settings(async_loading=None, inference_memory=None, pin_shared_memory=None, model_memory=None):
+ # Fallback to defaults if values are not passed
+ async_loading = async_loading if async_loading is not None else shared.opts.forge_async_loading
+ inference_memory = inference_memory if inference_memory is not None else shared.opts.forge_inference_memory
+ pin_shared_memory = pin_shared_memory if pin_shared_memory is not None else shared.opts.forge_pin_shared_memory
+
+ # If model_memory is provided, calculate inference memory accordingly, otherwise use inference_memory directly
+ if model_memory is None:
+ model_memory = total_vram - inference_memory
+ else:
+ inference_memory = total_vram - model_memory
+
+ shared.opts.set("forge_async_loading", async_loading)
+ shared.opts.set("forge_inference_memory", inference_memory)
+ shared.opts.set("forge_pin_shared_memory", pin_shared_memory)
+
+ stream.stream_activated = async_loading == "Async"
+ memory_management.current_inference_memory = inference_memory * 1024 * 1024 # Convert MB to bytes
+ memory_management.PIN_SHARED_MEMORY = pin_shared_memory == "Shared"
+
+ log_dict = dict(stream=stream.should_use_stream(), inference_memory=memory_management.minimum_inference_memory() / (1024 * 1024), pin_shared_memory=memory_management.PIN_SHARED_MEMORY)
+
+ print(f"Environment vars changed: {log_dict}")
+
+ if inference_memory < min(512, total_vram * 0.05):
+ print("------------------")
+ print(f"[Low VRAM Warning] You just set Forge to use 100% GPU memory ({model_memory:.2f} MB) to load model weights.")
+ print("[Low VRAM Warning] This means you will have 0% GPU memory (0.00 MB) to do matrix computation. Computations may fallback to CPU or go Out of Memory.")
+ print("[Low VRAM Warning] In many cases, image generation will be 10x slower.")
+ print("[Low VRAM Warning] To solve the problem, you can set the 'GPU Weights' (on the top of page) to a lower value.")
+ print("[Low VRAM Warning] If you cannot find 'GPU Weights', you can click the 'all' option in the 'UI' area on the left-top corner of the webpage.")
+ print("[Low VRAM Warning] Make sure that you know what you are testing.")
+ print("------------------")
+ else:
+ compute_percentage = (inference_memory / total_vram) * 100.0
+ print(f"[GPU Setting] You will use {(100 - compute_percentage):.2f}% GPU memory ({model_memory:.2f} MB) to load weights, and use {compute_percentage:.2f}% GPU memory ({inference_memory:.2f} MB) to do matrix computation.")
+
+ processing.need_global_unload = True
+
+
+def refresh_model_loading_parameters():
+ from modules.sd_models import model_data, select_checkpoint
+
+ checkpoint_info = select_checkpoint()
+
+ unet_storage_dtype, lora_fp16 = forge_unet_storage_dtype_options.get(shared.opts.forge_unet_storage_dtype, (None, False))
+
+ dynamic_args["online_lora"] = lora_fp16
+
+ model_data.forge_loading_parameters = dict(checkpoint_info=checkpoint_info, additional_modules=shared.opts.forge_additional_modules, unet_storage_dtype=unet_storage_dtype)
+
+ print(f"Model selected: {model_data.forge_loading_parameters}")
+ print(f"Using online LoRAs in FP16: {lora_fp16}")
+ processing.need_global_unload = True
+
+
+def checkpoint_change(ckpt_name: str, preset: str, save=True, refresh=True) -> bool:
+ """`ckpt_name` accepts valid aliases; returns `True` if checkpoint changed"""
+ new_ckpt_info = sd_models.get_closet_checkpoint_match(ckpt_name)
+ current_ckpt_info = sd_models.get_closet_checkpoint_match(shared.opts.data.get("sd_model_checkpoint", ""))
+ if new_ckpt_info == current_ckpt_info:
+ return False
+
+ shared.opts.set("sd_model_checkpoint", ckpt_name)
+ if preset is not None:
+ shared.opts.set(f"forge_checkpoint_{preset}", ckpt_name)
+
+ if save:
+ shared.opts.save(shared.config_filename)
+ if refresh:
+ refresh_model_loading_parameters()
+ return True
+
+
+def modules_change(module_values: list, preset: str, save=True, refresh=True) -> bool:
+ """`module_values` accepts file paths or just the module names; returns `True` if modules changed"""
+ modules = []
+ for v in module_values:
+ module_name = os.path.basename(v) # If the input is a filepath, extract the file name
+ if module_name in module_list:
+ modules.append(module_list[module_name])
+
+ # skip further processing if value unchanged
+ if sorted(modules) == sorted(shared.opts.data.get("forge_additional_modules", [])):
+ return False
+
+ shared.opts.set("forge_additional_modules", modules)
+ if preset is not None:
+ shared.opts.set(f"forge_additional_modules_{preset}", modules)
+
+ if save:
+ shared.opts.save(shared.config_filename)
+ if refresh:
+ refresh_model_loading_parameters()
+ return True
+
+
+def get_a1111_ui_component(tab, label):
+ fields = infotext_utils.paste_fields[tab]["fields"]
+ for f in fields:
+ if f.label == label or f.api == label:
+ return f.component
+
+
+def forge_main_entry():
+ ui_txt2img_width = get_a1111_ui_component("txt2img", "Size-1")
+ ui_txt2img_height = get_a1111_ui_component("txt2img", "Size-2")
+ ui_txt2img_cfg = get_a1111_ui_component("txt2img", "CFG scale")
+ ui_txt2img_distilled_cfg = get_a1111_ui_component("txt2img", "Distilled CFG Scale")
+ ui_txt2img_sampler = get_a1111_ui_component("txt2img", "sampler_name")
+ ui_txt2img_scheduler = get_a1111_ui_component("txt2img", "scheduler")
+
+ ui_img2img_width = get_a1111_ui_component("img2img", "Size-1")
+ ui_img2img_height = get_a1111_ui_component("img2img", "Size-2")
+ ui_img2img_cfg = get_a1111_ui_component("img2img", "CFG scale")
+ ui_img2img_distilled_cfg = get_a1111_ui_component("img2img", "Distilled CFG Scale")
+ ui_img2img_sampler = get_a1111_ui_component("img2img", "sampler_name")
+ ui_img2img_scheduler = get_a1111_ui_component("img2img", "scheduler")
+
+ ui_txt2img_hr_cfg = get_a1111_ui_component("txt2img", "Hires CFG Scale")
+ ui_txt2img_hr_distilled_cfg = get_a1111_ui_component("txt2img", "Hires Distilled CFG Scale")
+
+ ui_txt2img_batch_size = get_a1111_ui_component("txt2img", "Batch size")
+ ui_img2img_batch_size = get_a1111_ui_component("img2img", "Batch size")
+
+ output_targets = [
+ ui_checkpoint,
+ ui_vae,
+ ui_clip_skip,
+ ui_forge_unet_storage_dtype_options,
+ ui_forge_async_loading,
+ ui_forge_pin_shared_memory,
+ ui_forge_inference_memory,
+ ui_txt2img_width,
+ ui_img2img_width,
+ ui_txt2img_height,
+ ui_img2img_height,
+ ui_txt2img_cfg,
+ ui_img2img_cfg,
+ ui_txt2img_distilled_cfg,
+ ui_img2img_distilled_cfg,
+ ui_txt2img_sampler,
+ ui_img2img_sampler,
+ ui_txt2img_scheduler,
+ ui_img2img_scheduler,
+ ui_txt2img_hr_cfg,
+ ui_txt2img_hr_distilled_cfg,
+ ui_txt2img_batch_size,
+ ui_img2img_batch_size,
+ ]
+
+ ui_forge_preset.change(on_preset_change, inputs=[ui_forge_preset], outputs=output_targets, queue=False, show_progress=False).then(js="clickLoraRefresh", fn=None, queue=False, show_progress=False)
+ Context.root_block.load(on_preset_change, inputs=[ui_forge_preset], outputs=output_targets, queue=False, show_progress=False)
+
+ refresh_model_loading_parameters()
+
+
+def on_preset_change(preset: str):
+ assert preset is not None
+ shared.opts.set("forge_preset", preset)
+ shared.opts.save(shared.config_filename)
+
+ model_mem = getattr(shared.opts, f"{preset}_gpu_mb", total_vram - 1024)
+ if model_mem < 0 or model_mem > total_vram:
+ model_mem = total_vram - 1024
+
+ show_clip_skip = preset not in ("qwen", "lumina", "wan")
+ show_basic_mem = preset != "sd"
+ show_adv_mem = preset in ("flux", "qwen", "wan")
+ distilled = preset in ("flux", "lumina", "wan")
+ d_label = "Distilled CFG Scale" if preset == "flux" else "Shift"
+ batch_args = {"minimum": 1, "maximum": 97, "step": 16, "label": "Frames", "value": 1} if preset == "wan" else {"minimum": 1, "maximum": 8, "step": 1, "label": "Batch size", "value": 1}
+
+ additional_modules = [os.path.basename(x) for x in getattr(shared.opts, f"forge_additional_modules_{preset}", [])]
+
+ return [
+ gr.update(value=getattr(shared.opts, f"forge_checkpoint_{preset}", shared.opts.sd_model_checkpoint)), # ui_checkpoint
+ gr.update(value=additional_modules), # ui_vae
+ gr.update(visible=show_clip_skip, value=getattr(shared.opts, "CLIP_stop_at_last_layers", 2)), # ui_clip_skip
+ gr.update(visible=show_basic_mem, value=getattr(shared.opts, "forge_unet_storage_dtype", "Automatic")), # ui_forge_unet_storage_dtype_options
+ gr.update(visible=show_adv_mem, value=getattr(shared.opts, "forge_async_loading", "Queue")), # ui_forge_async_loading
+ gr.update(visible=show_adv_mem, value=getattr(shared.opts, "forge_pin_shared_memory", "CPU")), # ui_forge_pin_shared_memory
+ gr.update(visible=show_basic_mem, value=model_mem), # ui_forge_inference_memory
+ gr.update(value=getattr(shared.opts, f"{preset}_t2i_width", 768)), # ui_txt2img_width
+ gr.update(value=getattr(shared.opts, f"{preset}_i2i_width", 768)), # ui_img2img_width
+ gr.update(value=getattr(shared.opts, f"{preset}_t2i_height", 768)), # ui_txt2img_height
+ gr.update(value=getattr(shared.opts, f"{preset}_i2i_height", 768)), # ui_img2img_height
+ gr.update(value=getattr(shared.opts, f"{preset}_t2i_cfg", 1.0)), # ui_txt2img_cfg
+ gr.update(value=getattr(shared.opts, f"{preset}_i2i_cfg", 1.0)), # ui_img2img_cfg
+ gr.update(visible=distilled, label=d_label, value=getattr(shared.opts, f"{preset}_t2i_d_cfg", 3.0)), # ui_txt2img_distilled_cfg
+ gr.update(visible=distilled, label=d_label, value=getattr(shared.opts, f"{preset}_i2i_d_cfg", 3.0)), # ui_img2img_distilled_cfg
+ gr.update(value=getattr(shared.opts, f"{preset}_t2i_sampler", "Euler")), # ui_txt2img_sampler
+ gr.update(value=getattr(shared.opts, f"{preset}_i2i_sampler", "Euler")), # ui_img2img_sampler
+ gr.update(value=getattr(shared.opts, f"{preset}_t2i_scheduler", "Simple")), # ui_txt2img_scheduler
+ gr.update(value=getattr(shared.opts, f"{preset}_i2i_scheduler", "Simple")), # ui_img2img_scheduler
+ gr.update(value=getattr(shared.opts, f"{preset}_t2i_hr_cfg", 1.0)), # ui_txt2img_hr_cfg
+ gr.update(visible=distilled, label=d_label, value=getattr(shared.opts, f"{preset}_t2i_hr_d_cfg", 3.0)), # ui_txt2img_hr_distilled_cfg
+ gr.update(**batch_args), # ui_txt2img_batch_size
+ gr.update(**batch_args), # ui_img2img_batch_size
+ ]
diff --git a/modules_forge/main_thread.py b/modules_forge/main_thread.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb3e7889de36cbfad39fcabf1c75942c11acb488
--- /dev/null
+++ b/modules_forge/main_thread.py
@@ -0,0 +1,77 @@
+# This file is the main thread that handles all gradio calls for major t2i or i2i processing.
+# Other gradio calls (like those from extensions) are not influenced.
+# By using one single thread to process all major calls, model moving is significantly faster.
+
+
+import time
+import traceback
+import threading
+
+
+lock = threading.Lock()
+last_id = 0
+waiting_list = []
+finished_list = []
+last_exception = None
+
+
+class Task:
+ def __init__(self, task_id, func, args, kwargs):
+ self.task_id = task_id
+ self.func = func
+ self.args = args
+ self.kwargs = kwargs
+ self.result = None
+ self.exception = None
+
+ def work(self):
+ global last_exception
+ try:
+ self.result = self.func(*self.args, **self.kwargs)
+ self.exception = None
+ last_exception = None
+ except Exception as e:
+ traceback.print_exc()
+ print(e)
+ self.exception = e
+ last_exception = e
+
+
+def loop():
+ global lock, last_id, waiting_list, finished_list
+ while True:
+ time.sleep(0.01)
+ if len(waiting_list) > 0:
+ with lock:
+ task = waiting_list.pop(0)
+
+ task.work()
+
+ with lock:
+ finished_list.append(task)
+
+
+def async_run(func, *args, **kwargs):
+ global lock, last_id, waiting_list, finished_list
+ with lock:
+ last_id += 1
+ new_task = Task(task_id=last_id, func=func, args=args, kwargs=kwargs)
+ waiting_list.append(new_task)
+ return new_task.task_id
+
+
+def run_and_wait_result(func, *args, **kwargs):
+ global lock, last_id, waiting_list, finished_list
+ current_id = async_run(func, *args, **kwargs)
+ while True:
+ time.sleep(0.01)
+ finished_task = None
+ for t in finished_list.copy(): # thread safe shallow copy without needing a lock
+ if t.task_id == current_id:
+ finished_task = t
+ break
+ if finished_task is not None:
+ with lock:
+ finished_list.remove(finished_task)
+ return finished_task.result
+
diff --git a/modules_forge/packages/comfy/LICENSE b/modules_forge/packages/comfy/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7
--- /dev/null
+++ b/modules_forge/packages/comfy/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/modules_forge/packages/comfy/lora.py b/modules_forge/packages/comfy/lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..96719fb3d3e525a74cbce188f23206bf83da0afd
--- /dev/null
+++ b/modules_forge/packages/comfy/lora.py
@@ -0,0 +1,232 @@
+# https://github.com/comfyanonymous/ComfyUI/blob/v0.3.77/comfy/lora.py
+
+"""
+This file is part of ComfyUI.
+Copyright (C) 2024 Comfy
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+import torch
+
+from modules_forge.packages.comfy import weight_adapter
+
+from .utils import flux_to_diffusers, unet_to_diffusers, z_image_to_diffusers
+
+LORA_CLIP_MAP = {
+ "mlp.fc1": "mlp_fc1",
+ "mlp.fc2": "mlp_fc2",
+ "self_attn.k_proj": "self_attn_k_proj",
+ "self_attn.q_proj": "self_attn_q_proj",
+ "self_attn.v_proj": "self_attn_v_proj",
+ "self_attn.out_proj": "self_attn_out_proj",
+}
+
+
+def load_lora(lora, to_load):
+
+ def convert_lora_bfl_control(sd): # BFL loras for Flux
+ sd_out = {}
+ for k in sd:
+ k_to = "diffusion_model.{}".format(k.replace(".lora_B.bias", ".diff_b").replace("_norm.scale", "_norm.scale.set_weight"))
+ sd_out[k_to] = sd[k]
+
+ sd_out["diffusion_model.img_in.reshape_weight"] = torch.tensor([sd["img_in.lora_B.weight"].shape[0], sd["img_in.lora_A.weight"].shape[1]])
+ return sd_out
+
+ if "img_in.lora_A.weight" in lora and "single_blocks.0.norm.key_norm.scale" in lora:
+ lora = convert_lora_bfl_control(lora)
+
+ patch_dict = {}
+ loaded_keys = set()
+ for x in to_load:
+ alpha_name = "{}.alpha".format(x)
+ alpha = None
+ if alpha_name in lora.keys():
+ alpha = lora[alpha_name].item()
+ loaded_keys.add(alpha_name)
+
+ dora_scale_name = "{}.dora_scale".format(x)
+ dora_scale = None
+ if dora_scale_name in lora.keys():
+ dora_scale = lora[dora_scale_name]
+ loaded_keys.add(dora_scale_name)
+
+ for adapter_cls in weight_adapter.adapters:
+ adapter = adapter_cls.load(x, lora, alpha, dora_scale, loaded_keys)
+ if adapter is not None:
+ patch_dict[to_load[x]] = adapter
+ loaded_keys.update(adapter.loaded_keys)
+ continue
+
+ w_norm_name = "{}.w_norm".format(x)
+ b_norm_name = "{}.b_norm".format(x)
+ w_norm = lora.get(w_norm_name, None)
+ b_norm = lora.get(b_norm_name, None)
+
+ if w_norm is not None:
+ loaded_keys.add(w_norm_name)
+ patch_dict[to_load[x]] = ("diff", (w_norm,))
+ if b_norm is not None:
+ loaded_keys.add(b_norm_name)
+ patch_dict["{}.bias".format(to_load[x][: -len(".weight")])] = ("diff", (b_norm,))
+
+ diff_name = "{}.diff".format(x)
+ diff_weight = lora.get(diff_name, None)
+ if diff_weight is not None:
+ patch_dict[to_load[x]] = ("diff", (diff_weight,))
+ loaded_keys.add(diff_name)
+
+ diff_bias_name = "{}.diff_b".format(x)
+ diff_bias = lora.get(diff_bias_name, None)
+ if diff_bias is not None:
+ patch_dict["{}.bias".format(to_load[x][: -len(".weight")])] = ("diff", (diff_bias,))
+ loaded_keys.add(diff_bias_name)
+
+ set_weight_name = "{}.set_weight".format(x)
+ set_weight = lora.get(set_weight_name, None)
+ if set_weight is not None:
+ patch_dict[to_load[x]] = ("set", (set_weight,))
+ loaded_keys.add(set_weight_name)
+
+ remaining_dict = {x: y for x, y in lora.items() if x not in loaded_keys}
+ return patch_dict, remaining_dict
+
+
+def model_lora_keys_clip(model, key_map={}):
+ sdk = model.state_dict().keys()
+ for k in sdk:
+ if k.endswith(".weight"):
+ key_map["text_encoders.{}".format(k[: -len(".weight")])] = k # generic lora format without any weird key names
+
+ text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
+ clip_l_present = False
+ clip_g_present = False
+ for b in range(32): # TODO: clean up
+ for c in LORA_CLIP_MAP:
+ k = "clip_h.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
+ if k in sdk:
+ lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
+ key_map[lora_key] = k
+ lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c])
+ key_map[lora_key] = k
+ lora_key = "text_encoder.text_model.encoder.layers.{}.{}".format(b, c) # diffusers lora
+ key_map[lora_key] = k
+
+ k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
+ if k in sdk:
+ lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
+ key_map[lora_key] = k
+ lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) # SDXL base
+ key_map[lora_key] = k
+ clip_l_present = True
+ lora_key = "text_encoder.text_model.encoder.layers.{}.{}".format(b, c) # diffusers lora
+ key_map[lora_key] = k
+
+ k = "clip_g.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
+ if k in sdk:
+ clip_g_present = True
+ if clip_l_present:
+ lora_key = "lora_te2_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) # SDXL base
+ key_map[lora_key] = k
+ lora_key = "text_encoder_2.text_model.encoder.layers.{}.{}".format(b, c) # diffusers lora
+ key_map[lora_key] = k
+ else:
+ lora_key = "lora_te_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) # TODO: test if this is correct for SDXL-Refiner
+ key_map[lora_key] = k
+ lora_key = "text_encoder.text_model.encoder.layers.{}.{}".format(b, c) # diffusers lora
+ key_map[lora_key] = k
+ lora_key = "lora_prior_te_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) # cascade lora: TODO put lora key prefix in the model config
+ key_map[lora_key] = k
+
+ for k in sdk:
+ if k.endswith(".weight") and k.startswith("t5xxl.transformer."): # OneTrainer SD3 and Flux lora
+ l_key = k[len("t5xxl.transformer.") : -len(".weight")]
+ t5_index = 1
+ if clip_g_present:
+ t5_index += 1
+ if clip_l_present:
+ t5_index += 1
+ if t5_index == 2:
+ key_map["lora_te{}_{}".format(t5_index, l_key.replace(".", "_"))] = k # OneTrainer Flux
+ t5_index += 1
+
+ key_map["lora_te{}_{}".format(t5_index, l_key.replace(".", "_"))] = k
+
+ return key_map
+
+
+def model_lora_keys_unet(model, key_map={}):
+ sd = model.state_dict()
+ sdk = sd.keys()
+
+ for k in sdk:
+ if k.startswith("diffusion_model."):
+ if k.endswith(".weight"):
+ key_lora = k[len("diffusion_model.") : -len(".weight")].replace(".", "_")
+ key_map["lora_unet_{}".format(key_lora)] = k
+ key_map["{}".format(k[: -len(".weight")])] = k # generic lora format without any weird key names
+ else:
+ key_map["{}".format(k)] = k # generic lora format for not .weight without any weird key names
+
+ diffusers_keys = unet_to_diffusers(model.diffusion_model.config)
+ for k in diffusers_keys:
+ if k.endswith(".weight"):
+ unet_key = "diffusion_model.{}".format(diffusers_keys[k])
+ key_lora = k[: -len(".weight")].replace(".", "_")
+ key_map["lora_unet_{}".format(key_lora)] = unet_key
+ key_map["lycoris_{}".format(key_lora)] = unet_key # simpletuner lycoris format
+
+ diffusers_lora_prefix = ["", "unet."]
+ for p in diffusers_lora_prefix:
+ diffusers_lora_key = "{}{}".format(p, k[: -len(".weight")].replace(".to_", ".processor.to_"))
+ if diffusers_lora_key.endswith(".to_out.0"):
+ diffusers_lora_key = diffusers_lora_key[:-2]
+ key_map[diffusers_lora_key] = unet_key
+
+ _model_name: str = model.config.huggingface_repo.lower()
+
+ if "flux" in _model_name or "chroma" in _model_name: # Diffusers lora Flux
+ diffusers_keys = flux_to_diffusers(model.diffusion_model.config, output_prefix="diffusion_model.")
+ for k in diffusers_keys:
+ if k.endswith(".weight"):
+ to = diffusers_keys[k]
+ key_map["transformer.{}".format(k[: -len(".weight")])] = to # simpletrainer and probably regular diffusers flux lora format
+ key_map["lycoris_{}".format(k[: -len(".weight")].replace(".", "_"))] = to # simpletrainer lycoris
+ key_map["lora_transformer_{}".format(k[: -len(".weight")].replace(".", "_"))] = to # onetrainer
+ for k in sdk:
+ hidden_size = model.diffusion_model.config.get("hidden_size", 0)
+ if k.endswith(".weight") and ".linear1." in k:
+ key_map["{}".format(k.replace(".linear1.weight", ".linear1_qkv"))] = (k, (0, 0, hidden_size * 3))
+
+ if "qwen" in _model_name:
+ for k in sdk:
+ if k.startswith("diffusion_model.") and k.endswith(".weight"): # QwenImage lora format
+ key_lora = k[len("diffusion_model.") : -len(".weight")]
+ # Direct mapping for transformer_blocks format (QwenImage LoRA format)
+ key_map["{}".format(key_lora)] = k
+ # Support transformer prefix format
+ key_map["transformer.{}".format(key_lora)] = k
+ key_map["lycoris_{}".format(key_lora.replace(".", "_"))] = k # SimpleTuner lycoris format
+
+ if "lumina" in _model_name or "z-image" in _model_name:
+ diffusers_keys = z_image_to_diffusers(model.diffusion_model.config, output_prefix="diffusion_model.")
+ for k in diffusers_keys:
+ if k.endswith(".weight"):
+ to = diffusers_keys[k]
+ key_lora = k[: -len(".weight")]
+ key_map["diffusion_model.{}".format(key_lora)] = to
+ key_map["lycoris_{}".format(key_lora.replace(".", "_"))] = to
+
+ return key_map
diff --git a/modules_forge/packages/comfy/utils.py b/modules_forge/packages/comfy/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a78adb5116e612d925b9db8359ea476910c5d96
--- /dev/null
+++ b/modules_forge/packages/comfy/utils.py
@@ -0,0 +1,336 @@
+# https://github.com/comfyanonymous/ComfyUI/blob/v0.3.77/comfy/utils.py
+
+"""
+This file is part of ComfyUI.
+Copyright (C) 2024 Comfy
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+import torch
+
+UNET_MAP_ATTENTIONS = {
+ "proj_in.weight",
+ "proj_in.bias",
+ "proj_out.weight",
+ "proj_out.bias",
+ "norm.weight",
+ "norm.bias",
+}
+
+TRANSFORMER_BLOCKS = {
+ "norm1.weight",
+ "norm1.bias",
+ "norm2.weight",
+ "norm2.bias",
+ "norm3.weight",
+ "norm3.bias",
+ "attn1.to_q.weight",
+ "attn1.to_k.weight",
+ "attn1.to_v.weight",
+ "attn1.to_out.0.weight",
+ "attn1.to_out.0.bias",
+ "attn2.to_q.weight",
+ "attn2.to_k.weight",
+ "attn2.to_v.weight",
+ "attn2.to_out.0.weight",
+ "attn2.to_out.0.bias",
+ "ff.net.0.proj.weight",
+ "ff.net.0.proj.bias",
+ "ff.net.2.weight",
+ "ff.net.2.bias",
+}
+
+UNET_MAP_RESNET = {
+ "in_layers.2.weight": "conv1.weight",
+ "in_layers.2.bias": "conv1.bias",
+ "emb_layers.1.weight": "time_emb_proj.weight",
+ "emb_layers.1.bias": "time_emb_proj.bias",
+ "out_layers.3.weight": "conv2.weight",
+ "out_layers.3.bias": "conv2.bias",
+ "skip_connection.weight": "conv_shortcut.weight",
+ "skip_connection.bias": "conv_shortcut.bias",
+ "in_layers.0.weight": "norm1.weight",
+ "in_layers.0.bias": "norm1.bias",
+ "out_layers.0.weight": "norm2.weight",
+ "out_layers.0.bias": "norm2.bias",
+}
+
+UNET_MAP_BASIC = {
+ ("label_emb.0.0.weight", "class_embedding.linear_1.weight"),
+ ("label_emb.0.0.bias", "class_embedding.linear_1.bias"),
+ ("label_emb.0.2.weight", "class_embedding.linear_2.weight"),
+ ("label_emb.0.2.bias", "class_embedding.linear_2.bias"),
+ ("label_emb.0.0.weight", "add_embedding.linear_1.weight"),
+ ("label_emb.0.0.bias", "add_embedding.linear_1.bias"),
+ ("label_emb.0.2.weight", "add_embedding.linear_2.weight"),
+ ("label_emb.0.2.bias", "add_embedding.linear_2.bias"),
+ ("input_blocks.0.0.weight", "conv_in.weight"),
+ ("input_blocks.0.0.bias", "conv_in.bias"),
+ ("out.0.weight", "conv_norm_out.weight"),
+ ("out.0.bias", "conv_norm_out.bias"),
+ ("out.2.weight", "conv_out.weight"),
+ ("out.2.bias", "conv_out.bias"),
+ ("time_embed.0.weight", "time_embedding.linear_1.weight"),
+ ("time_embed.0.bias", "time_embedding.linear_1.bias"),
+ ("time_embed.2.weight", "time_embedding.linear_2.weight"),
+ ("time_embed.2.bias", "time_embedding.linear_2.bias"),
+}
+
+
+def unet_to_diffusers(unet_config):
+ if "num_res_blocks" not in unet_config:
+ return {}
+ num_res_blocks = unet_config["num_res_blocks"]
+ channel_mult = unet_config["channel_mult"]
+ transformer_depth = unet_config["transformer_depth"][:]
+ transformer_depth_output = unet_config["transformer_depth_output"][:]
+ num_blocks = len(channel_mult)
+
+ transformers_mid = unet_config.get("transformer_depth_middle", None)
+
+ diffusers_unet_map = {}
+ for x in range(num_blocks):
+ n = 1 + (num_res_blocks[x] + 1) * x
+ for i in range(num_res_blocks[x]):
+ for b in UNET_MAP_RESNET:
+ diffusers_unet_map["down_blocks.{}.resnets.{}.{}".format(x, i, UNET_MAP_RESNET[b])] = "input_blocks.{}.0.{}".format(n, b)
+ num_transformers = transformer_depth.pop(0)
+ if num_transformers > 0:
+ for b in UNET_MAP_ATTENTIONS:
+ diffusers_unet_map["down_blocks.{}.attentions.{}.{}".format(x, i, b)] = "input_blocks.{}.1.{}".format(n, b)
+ for t in range(num_transformers):
+ for b in TRANSFORMER_BLOCKS:
+ diffusers_unet_map["down_blocks.{}.attentions.{}.transformer_blocks.{}.{}".format(x, i, t, b)] = "input_blocks.{}.1.transformer_blocks.{}.{}".format(n, t, b)
+ n += 1
+ for k in ["weight", "bias"]:
+ diffusers_unet_map["down_blocks.{}.downsamplers.0.conv.{}".format(x, k)] = "input_blocks.{}.0.op.{}".format(n, k)
+
+ i = 0
+ for b in UNET_MAP_ATTENTIONS:
+ diffusers_unet_map["mid_block.attentions.{}.{}".format(i, b)] = "middle_block.1.{}".format(b)
+ for t in range(transformers_mid):
+ for b in TRANSFORMER_BLOCKS:
+ diffusers_unet_map["mid_block.attentions.{}.transformer_blocks.{}.{}".format(i, t, b)] = "middle_block.1.transformer_blocks.{}.{}".format(t, b)
+
+ for i, n in enumerate([0, 2]):
+ for b in UNET_MAP_RESNET:
+ diffusers_unet_map["mid_block.resnets.{}.{}".format(i, UNET_MAP_RESNET[b])] = "middle_block.{}.{}".format(n, b)
+
+ num_res_blocks = list(reversed(num_res_blocks))
+ for x in range(num_blocks):
+ n = (num_res_blocks[x] + 1) * x
+ l = num_res_blocks[x] + 1
+ for i in range(l):
+ c = 0
+ for b in UNET_MAP_RESNET:
+ diffusers_unet_map["up_blocks.{}.resnets.{}.{}".format(x, i, UNET_MAP_RESNET[b])] = "output_blocks.{}.0.{}".format(n, b)
+ c += 1
+ num_transformers = transformer_depth_output.pop()
+ if num_transformers > 0:
+ c += 1
+ for b in UNET_MAP_ATTENTIONS:
+ diffusers_unet_map["up_blocks.{}.attentions.{}.{}".format(x, i, b)] = "output_blocks.{}.1.{}".format(n, b)
+ for t in range(num_transformers):
+ for b in TRANSFORMER_BLOCKS:
+ diffusers_unet_map["up_blocks.{}.attentions.{}.transformer_blocks.{}.{}".format(x, i, t, b)] = "output_blocks.{}.1.transformer_blocks.{}.{}".format(n, t, b)
+ if i == l - 1:
+ for k in ["weight", "bias"]:
+ diffusers_unet_map["up_blocks.{}.upsamplers.0.conv.{}".format(x, k)] = "output_blocks.{}.{}.conv.{}".format(n, c, k)
+ n += 1
+
+ for k in UNET_MAP_BASIC:
+ diffusers_unet_map[k[1]] = k[0]
+
+ return diffusers_unet_map
+
+
+def swap_scale_shift(weight):
+ shift, scale = weight.chunk(2, dim=0)
+ new_weight = torch.cat([scale, shift], dim=0)
+ return new_weight
+
+
+def flux_to_diffusers(mmdit_config, output_prefix=""):
+ n_double_layers = mmdit_config.get("depth", 0)
+ n_single_layers = mmdit_config.get("depth_single_blocks", 0)
+ hidden_size = mmdit_config.get("hidden_size", 0)
+
+ key_map = {}
+ for index in range(n_double_layers):
+ prefix_from = "transformer_blocks.{}".format(index)
+ prefix_to = "{}double_blocks.{}".format(output_prefix, index)
+
+ for end in ("weight", "bias"):
+ k = "{}.attn.".format(prefix_from)
+ qkv = "{}.img_attn.qkv.{}".format(prefix_to, end)
+ key_map["{}to_q.{}".format(k, end)] = (qkv, (0, 0, hidden_size))
+ key_map["{}to_k.{}".format(k, end)] = (qkv, (0, hidden_size, hidden_size))
+ key_map["{}to_v.{}".format(k, end)] = (qkv, (0, hidden_size * 2, hidden_size))
+
+ k = "{}.attn.".format(prefix_from)
+ qkv = "{}.txt_attn.qkv.{}".format(prefix_to, end)
+ key_map["{}add_q_proj.{}".format(k, end)] = (qkv, (0, 0, hidden_size))
+ key_map["{}add_k_proj.{}".format(k, end)] = (qkv, (0, hidden_size, hidden_size))
+ key_map["{}add_v_proj.{}".format(k, end)] = (qkv, (0, hidden_size * 2, hidden_size))
+
+ block_map = {
+ "attn.to_out.0.weight": "img_attn.proj.weight",
+ "attn.to_out.0.bias": "img_attn.proj.bias",
+ "norm1.linear.weight": "img_mod.lin.weight",
+ "norm1.linear.bias": "img_mod.lin.bias",
+ "norm1_context.linear.weight": "txt_mod.lin.weight",
+ "norm1_context.linear.bias": "txt_mod.lin.bias",
+ "attn.to_add_out.weight": "txt_attn.proj.weight",
+ "attn.to_add_out.bias": "txt_attn.proj.bias",
+ "ff.net.0.proj.weight": "img_mlp.0.weight",
+ "ff.net.0.proj.bias": "img_mlp.0.bias",
+ "ff.net.2.weight": "img_mlp.2.weight",
+ "ff.net.2.bias": "img_mlp.2.bias",
+ "ff_context.net.0.proj.weight": "txt_mlp.0.weight",
+ "ff_context.net.0.proj.bias": "txt_mlp.0.bias",
+ "ff_context.net.2.weight": "txt_mlp.2.weight",
+ "ff_context.net.2.bias": "txt_mlp.2.bias",
+ "attn.norm_q.weight": "img_attn.norm.query_norm.scale",
+ "attn.norm_k.weight": "img_attn.norm.key_norm.scale",
+ "attn.norm_added_q.weight": "txt_attn.norm.query_norm.scale",
+ "attn.norm_added_k.weight": "txt_attn.norm.key_norm.scale",
+ }
+
+ for k in block_map:
+ key_map["{}.{}".format(prefix_from, k)] = "{}.{}".format(prefix_to, block_map[k])
+
+ for index in range(n_single_layers):
+ prefix_from = "single_transformer_blocks.{}".format(index)
+ prefix_to = "{}single_blocks.{}".format(output_prefix, index)
+
+ for end in ("weight", "bias"):
+ k = "{}.attn.".format(prefix_from)
+ qkv = "{}.linear1.{}".format(prefix_to, end)
+ key_map["{}to_q.{}".format(k, end)] = (qkv, (0, 0, hidden_size))
+ key_map["{}to_k.{}".format(k, end)] = (qkv, (0, hidden_size, hidden_size))
+ key_map["{}to_v.{}".format(k, end)] = (qkv, (0, hidden_size * 2, hidden_size))
+ key_map["{}.proj_mlp.{}".format(prefix_from, end)] = (qkv, (0, hidden_size * 3, hidden_size * 4))
+
+ block_map = {
+ "norm.linear.weight": "modulation.lin.weight",
+ "norm.linear.bias": "modulation.lin.bias",
+ "proj_out.weight": "linear2.weight",
+ "proj_out.bias": "linear2.bias",
+ "attn.norm_q.weight": "norm.query_norm.scale",
+ "attn.norm_k.weight": "norm.key_norm.scale",
+ }
+
+ for k in block_map:
+ key_map["{}.{}".format(prefix_from, k)] = "{}.{}".format(prefix_to, block_map[k])
+
+ MAP_BASIC = {
+ ("final_layer.linear.bias", "proj_out.bias"),
+ ("final_layer.linear.weight", "proj_out.weight"),
+ ("img_in.bias", "x_embedder.bias"),
+ ("img_in.weight", "x_embedder.weight"),
+ ("time_in.in_layer.bias", "time_text_embed.timestep_embedder.linear_1.bias"),
+ ("time_in.in_layer.weight", "time_text_embed.timestep_embedder.linear_1.weight"),
+ ("time_in.out_layer.bias", "time_text_embed.timestep_embedder.linear_2.bias"),
+ ("time_in.out_layer.weight", "time_text_embed.timestep_embedder.linear_2.weight"),
+ ("txt_in.bias", "context_embedder.bias"),
+ ("txt_in.weight", "context_embedder.weight"),
+ ("vector_in.in_layer.bias", "time_text_embed.text_embedder.linear_1.bias"),
+ ("vector_in.in_layer.weight", "time_text_embed.text_embedder.linear_1.weight"),
+ ("vector_in.out_layer.bias", "time_text_embed.text_embedder.linear_2.bias"),
+ ("vector_in.out_layer.weight", "time_text_embed.text_embedder.linear_2.weight"),
+ ("guidance_in.in_layer.bias", "time_text_embed.guidance_embedder.linear_1.bias"),
+ ("guidance_in.in_layer.weight", "time_text_embed.guidance_embedder.linear_1.weight"),
+ ("guidance_in.out_layer.bias", "time_text_embed.guidance_embedder.linear_2.bias"),
+ ("guidance_in.out_layer.weight", "time_text_embed.guidance_embedder.linear_2.weight"),
+ ("final_layer.adaLN_modulation.1.bias", "norm_out.linear.bias", swap_scale_shift),
+ ("final_layer.adaLN_modulation.1.weight", "norm_out.linear.weight", swap_scale_shift),
+ ("pos_embed_input.bias", "controlnet_x_embedder.bias"),
+ ("pos_embed_input.weight", "controlnet_x_embedder.weight"),
+ }
+
+ for k in MAP_BASIC:
+ if len(k) > 2:
+ key_map[k[1]] = ("{}{}".format(output_prefix, k[0]), None, k[2])
+ else:
+ key_map[k[1]] = "{}{}".format(output_prefix, k[0])
+
+ return key_map
+
+
+def z_image_to_diffusers(mmdit_config, output_prefix=""):
+ n_layers = mmdit_config.get("n_layers", 0)
+ hidden_size = mmdit_config.get("dim", 0)
+ n_context_refiner = mmdit_config.get("n_refiner_layers", 2)
+ n_noise_refiner = mmdit_config.get("n_refiner_layers", 2)
+ key_map = {}
+
+ def add_block_keys(prefix_from, prefix_to, has_adaln=True):
+ for end in ("weight", "bias"):
+ k = "{}.attention.".format(prefix_from)
+ qkv = "{}.attention.qkv.{}".format(prefix_to, end)
+ key_map["{}to_q.{}".format(k, end)] = (qkv, (0, 0, hidden_size))
+ key_map["{}to_k.{}".format(k, end)] = (qkv, (0, hidden_size, hidden_size))
+ key_map["{}to_v.{}".format(k, end)] = (qkv, (0, hidden_size * 2, hidden_size))
+
+ block_map = {
+ "attention.norm_q.weight": "attention.q_norm.weight",
+ "attention.norm_k.weight": "attention.k_norm.weight",
+ "attention.to_out.0.weight": "attention.out.weight",
+ "attention.to_out.0.bias": "attention.out.bias",
+ "attention_norm1.weight": "attention_norm1.weight",
+ "attention_norm2.weight": "attention_norm2.weight",
+ "feed_forward.w1.weight": "feed_forward.w1.weight",
+ "feed_forward.w2.weight": "feed_forward.w2.weight",
+ "feed_forward.w3.weight": "feed_forward.w3.weight",
+ "ffn_norm1.weight": "ffn_norm1.weight",
+ "ffn_norm2.weight": "ffn_norm2.weight",
+ }
+ if has_adaln:
+ block_map["adaLN_modulation.0.weight"] = "adaLN_modulation.0.weight"
+ block_map["adaLN_modulation.0.bias"] = "adaLN_modulation.0.bias"
+ for k, v in block_map.items():
+ key_map["{}.{}".format(prefix_from, k)] = "{}.{}".format(prefix_to, v)
+
+ for i in range(n_layers):
+ add_block_keys("layers.{}".format(i), "{}layers.{}".format(output_prefix, i))
+
+ for i in range(n_context_refiner):
+ add_block_keys("context_refiner.{}".format(i), "{}context_refiner.{}".format(output_prefix, i))
+
+ for i in range(n_noise_refiner):
+ add_block_keys("noise_refiner.{}".format(i), "{}noise_refiner.{}".format(output_prefix, i))
+
+ MAP_BASIC = [
+ ("final_layer.linear.weight", "all_final_layer.2-1.linear.weight"),
+ ("final_layer.linear.bias", "all_final_layer.2-1.linear.bias"),
+ ("final_layer.adaLN_modulation.1.weight", "all_final_layer.2-1.adaLN_modulation.1.weight"),
+ ("final_layer.adaLN_modulation.1.bias", "all_final_layer.2-1.adaLN_modulation.1.bias"),
+ ("x_embedder.weight", "all_x_embedder.2-1.weight"),
+ ("x_embedder.bias", "all_x_embedder.2-1.bias"),
+ ("x_pad_token", "x_pad_token"),
+ ("cap_embedder.0.weight", "cap_embedder.0.weight"),
+ ("cap_embedder.1.weight", "cap_embedder.1.weight"),
+ ("cap_embedder.1.bias", "cap_embedder.1.bias"),
+ ("cap_pad_token", "cap_pad_token"),
+ ("t_embedder.mlp.0.weight", "t_embedder.mlp.0.weight"),
+ ("t_embedder.mlp.0.bias", "t_embedder.mlp.0.bias"),
+ ("t_embedder.mlp.2.weight", "t_embedder.mlp.2.weight"),
+ ("t_embedder.mlp.2.bias", "t_embedder.mlp.2.bias"),
+ ]
+
+ for c, diffusers in MAP_BASIC:
+ key_map[diffusers] = "{}{}".format(output_prefix, c)
+
+ return key_map
diff --git a/modules_forge/packages/comfy/weight_adapter/__init__.py b/modules_forge/packages/comfy/weight_adapter/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ff1e7e8a833750234b70cdb743bd698ac19bbc1
--- /dev/null
+++ b/modules_forge/packages/comfy/weight_adapter/__init__.py
@@ -0,0 +1,39 @@
+# https://github.com/comfyanonymous/ComfyUI/tree/v0.3.77/comfy/weight_adapter
+
+from typing import Final
+
+from .base import WeightAdapterBase
+from .boft import BOFTAdapter
+from .glora import GLoRAAdapter
+from .loha import LoHaAdapter
+from .lokr import LoKrAdapter
+from .lora import LoRAAdapter
+from .oft import OFTAdapter
+from .oftv2 import OFTv2Adapter
+
+adapters: Final[list[type[WeightAdapterBase]]] = [
+ BOFTAdapter,
+ GLoRAAdapter,
+ LoHaAdapter,
+ LoKrAdapter,
+ LoRAAdapter,
+ OFTAdapter,
+ OFTv2Adapter,
+]
+
+adapter_maps: Final[dict[str, type[WeightAdapterBase]]] = {
+ "LoRA": LoRAAdapter,
+ "LoHa": LoHaAdapter,
+ "LoKr": LoKrAdapter,
+ "OFT": OFTAdapter,
+ "OFTv2": OFTv2Adapter,
+ # "GLoRA": GLoRAAdapter,
+ # "BOFT": BOFTAdapter,
+}
+
+
+__all__ = [
+ "WeightAdapterBase",
+ "adapters",
+ "adapter_maps",
+] + [a.__name__ for a in adapters]
diff --git a/modules_forge/packages/comfy/weight_adapter/base.py b/modules_forge/packages/comfy/weight_adapter/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cc725f698e5e3268d9c1ad539cdc1729f4c5850
--- /dev/null
+++ b/modules_forge/packages/comfy/weight_adapter/base.py
@@ -0,0 +1,165 @@
+from typing import Optional
+
+import torch
+import torch.nn as nn
+
+from backend import memory_management
+
+
+class WeightAdapterBase:
+ name: str
+ loaded_keys: set[str]
+ weights: list[torch.Tensor]
+
+ @classmethod
+ def load(cls, x: str, lora: dict[str, torch.Tensor], alpha: float, dora_scale: torch.Tensor) -> Optional["WeightAdapterBase"]:
+ raise NotImplementedError
+
+ def to_train(self) -> "WeightAdapterTrainBase":
+ raise NotImplementedError
+
+ @classmethod
+ def create_train(cls, weight, *args) -> "WeightAdapterTrainBase":
+ """
+ weight: The original weight tensor to be modified.
+ *args: Additional arguments for configuration, such as rank, alpha etc.
+ """
+ raise NotImplementedError
+
+ def calculate_weight(
+ self,
+ weight,
+ key,
+ strength,
+ strength_model,
+ offset,
+ function,
+ intermediate_dtype=torch.float32,
+ original_weight=None,
+ ):
+ raise NotImplementedError
+
+
+class WeightAdapterTrainBase(nn.Module):
+ # We follow the scheme of PR #7032
+ def __init__(self):
+ super().__init__()
+
+ def __call__(self, w):
+ """
+ w: The original weight tensor to be modified.
+ """
+ raise NotImplementedError
+
+ def passive_memory_usage(self):
+ raise NotImplementedError("passive_memory_usage is not implemented")
+
+ def move_to(self, device):
+ self.to(device)
+ return self.passive_memory_usage()
+
+
+def weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function):
+ dora_scale = memory_management.cast_to_device(dora_scale, weight.device, intermediate_dtype)
+ lora_diff *= alpha
+ weight_calc = weight + function(lora_diff).type(weight.dtype)
+
+ wd_on_output_axis = dora_scale.shape[0] == weight_calc.shape[0]
+ if wd_on_output_axis:
+ weight_norm = weight.reshape(weight.shape[0], -1).norm(dim=1, keepdim=True).reshape(weight.shape[0], *[1] * (weight.dim() - 1))
+ else:
+ weight_norm = weight_calc.transpose(0, 1).reshape(weight_calc.shape[1], -1).norm(dim=1, keepdim=True).reshape(weight_calc.shape[1], *[1] * (weight_calc.dim() - 1)).transpose(0, 1)
+ weight_norm = weight_norm + torch.finfo(weight.dtype).eps
+
+ weight_calc *= (dora_scale / weight_norm).type(weight.dtype)
+ if strength != 1.0:
+ weight_calc -= weight
+ weight += strength * (weight_calc)
+ else:
+ weight[:] = weight_calc
+ return weight
+
+
+def pad_tensor_to_shape(tensor: torch.Tensor, new_shape: list[int]) -> torch.Tensor:
+ """
+ Pad a tensor to a new shape with zeros.
+
+ Args:
+ tensor (torch.Tensor): The original tensor to be padded.
+ new_shape (List[int]): The desired shape of the padded tensor.
+
+ Returns:
+ torch.Tensor: A new tensor padded with zeros to the specified shape.
+
+ Note:
+ If the new shape is smaller than the original tensor in any dimension,
+ the original tensor will be truncated in that dimension.
+ """
+ if any([new_shape[i] < tensor.shape[i] for i in range(len(new_shape))]):
+ raise ValueError("The new shape must be larger than the original tensor in all dimensions")
+
+ if len(new_shape) != len(tensor.shape):
+ raise ValueError("The new shape must have the same number of dimensions as the original tensor")
+
+ # Create a new tensor filled with zeros
+ padded_tensor = torch.zeros(new_shape, dtype=tensor.dtype, device=tensor.device)
+
+ # Create slicing tuples for both tensors
+ orig_slices = tuple(slice(0, dim) for dim in tensor.shape)
+ new_slices = tuple(slice(0, dim) for dim in tensor.shape)
+
+ # Copy the original tensor into the new tensor
+ padded_tensor[new_slices] = tensor[orig_slices]
+
+ return padded_tensor
+
+
+def tucker_weight_from_conv(up, down, mid):
+ up = up.reshape(up.size(0), up.size(1))
+ down = down.reshape(down.size(0), down.size(1))
+ return torch.einsum("m n ..., i m, n j -> i j ...", mid, up, down)
+
+
+def tucker_weight(wa, wb, t):
+ temp = torch.einsum("i j ..., j r -> i r ...", t, wb)
+ return torch.einsum("i j ..., i r -> r j ...", temp, wa)
+
+
+def factorization(dimension: int, factor: int = -1) -> tuple[int, int]:
+ """
+ return a tuple of two value of input dimension decomposed by the number closest to factor
+ second value is higher or equal than first value.
+
+ examples)
+ factor
+ -1 2 4 8 16 ...
+ 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127
+ 128 -> 8, 16 128 -> 2, 64 128 -> 4, 32 128 -> 8, 16 128 -> 8, 16
+ 250 -> 10, 25 250 -> 2, 125 250 -> 2, 125 250 -> 5, 50 250 -> 10, 25
+ 360 -> 8, 45 360 -> 2, 180 360 -> 4, 90 360 -> 8, 45 360 -> 12, 30
+ 512 -> 16, 32 512 -> 2, 256 512 -> 4, 128 512 -> 8, 64 512 -> 16, 32
+ 1024 -> 32, 32 1024 -> 2, 512 1024 -> 4, 256 1024 -> 8, 128 1024 -> 16, 64
+ """
+
+ if factor > 0 and (dimension % factor) == 0 and dimension >= factor**2:
+ m = factor
+ n = dimension // factor
+ if m > n:
+ n, m = m, n
+ return m, n
+ if factor < 0:
+ factor = dimension
+ m, n = 1, dimension
+ length = m + n
+ while m < n:
+ new_m = m + 1
+ while dimension % new_m != 0:
+ new_m += 1
+ new_n = dimension // new_m
+ if new_m + new_n > length or new_m > factor:
+ break
+ else:
+ m, n = new_m, new_n
+ if m > n:
+ n, m = m, n
+ return m, n
diff --git a/modules_forge/packages/comfy/weight_adapter/boft.py b/modules_forge/packages/comfy/weight_adapter/boft.py
new file mode 100644
index 0000000000000000000000000000000000000000..09937811ea38922066fe78679deb10ed336b0075
--- /dev/null
+++ b/modules_forge/packages/comfy/weight_adapter/boft.py
@@ -0,0 +1,110 @@
+import logging
+from typing import Optional
+
+import torch
+
+from backend import memory_management
+
+from .base import WeightAdapterBase, weight_decompose
+
+
+class BOFTAdapter(WeightAdapterBase):
+ name = "boft"
+
+ def __init__(self, loaded_keys, weights):
+ self.loaded_keys = loaded_keys
+ self.weights = weights
+
+ @classmethod
+ def load(
+ cls,
+ x: str,
+ lora: dict[str, torch.Tensor],
+ alpha: float,
+ dora_scale: torch.Tensor,
+ loaded_keys: set[str] = None,
+ ) -> Optional["BOFTAdapter"]:
+ if loaded_keys is None:
+ loaded_keys = set()
+ blocks_name = "{}.oft_blocks".format(x)
+ rescale_name = "{}.rescale".format(x)
+
+ blocks = None
+ if blocks_name in lora.keys():
+ blocks = lora[blocks_name]
+ if blocks.ndim == 4:
+ loaded_keys.add(blocks_name)
+ else:
+ blocks = None
+ if blocks is None:
+ return None
+
+ rescale = None
+ if rescale_name in lora.keys():
+ rescale = lora[rescale_name]
+ loaded_keys.add(rescale_name)
+
+ weights = (blocks, rescale, alpha, dora_scale)
+ return cls(loaded_keys, weights)
+
+ def calculate_weight(
+ self,
+ weight,
+ key,
+ strength,
+ strength_model,
+ offset,
+ function,
+ intermediate_dtype=torch.float32,
+ original_weight=None,
+ ):
+ v = self.weights
+ blocks = v[0]
+ rescale = v[1]
+ alpha = v[2]
+ dora_scale = v[3]
+
+ blocks = memory_management.cast_to_device(blocks, weight.device, intermediate_dtype)
+ if rescale is not None:
+ rescale = memory_management.cast_to_device(rescale, weight.device, intermediate_dtype)
+
+ boft_m, block_num, boft_b, *_ = blocks.shape
+
+ try:
+ # Get r
+ I = torch.eye(boft_b, device=blocks.device, dtype=blocks.dtype)
+ # for Q = -Q^T
+ q = blocks - blocks.transpose(-1, -2)
+ normed_q = q
+ if alpha > 0: # alpha in boft/bboft is for constraint
+ q_norm = torch.norm(q) + 1e-8
+ if q_norm > alpha:
+ normed_q = q * alpha / q_norm
+ # use float() to prevent unsupported type in .inverse()
+ r = (I + normed_q) @ (I - normed_q).float().inverse()
+ r = r.to(weight)
+ inp = org = weight
+
+ r_b = boft_b // 2
+ for i in range(boft_m):
+ bi = r[i]
+ g = 2
+ k = 2**i * r_b
+ if strength != 1:
+ bi = bi * strength + (1 - strength) * I
+ inp = inp.unflatten(0, (-1, g, k)).transpose(1, 2).flatten(0, 2).unflatten(0, (-1, boft_b))
+ inp = torch.einsum("b i j, b j ...-> b i ...", bi, inp)
+ inp = inp.flatten(0, 1).unflatten(0, (-1, k, g)).transpose(1, 2).flatten(0, 2)
+
+ if rescale is not None:
+ inp = inp * rescale
+
+ lora_diff = inp - org
+ lora_diff = memory_management.cast_to_device(lora_diff, weight.device, intermediate_dtype)
+ if dora_scale is not None:
+ weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function)
+ else:
+ weight += function((strength * lora_diff).type(weight.dtype))
+ except Exception as e:
+ logging.error("ERROR {} {} {}".format(self.name, key, e))
+ return weight
diff --git a/modules_forge/packages/comfy/weight_adapter/glora.py b/modules_forge/packages/comfy/weight_adapter/glora.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3f80aae56c2d74a639718ec84fb38f4ac70a8d6
--- /dev/null
+++ b/modules_forge/packages/comfy/weight_adapter/glora.py
@@ -0,0 +1,95 @@
+import logging
+from typing import Optional
+
+import torch
+
+from backend import memory_management
+
+from .base import WeightAdapterBase, weight_decompose
+
+
+class GLoRAAdapter(WeightAdapterBase):
+ name = "glora"
+
+ def __init__(self, loaded_keys, weights):
+ self.loaded_keys = loaded_keys
+ self.weights = weights
+
+ @classmethod
+ def load(
+ cls,
+ x: str,
+ lora: dict[str, torch.Tensor],
+ alpha: float,
+ dora_scale: torch.Tensor,
+ loaded_keys: set[str] = None,
+ ) -> Optional["GLoRAAdapter"]:
+ if loaded_keys is None:
+ loaded_keys = set()
+ a1_name = "{}.a1.weight".format(x)
+ a2_name = "{}.a2.weight".format(x)
+ b1_name = "{}.b1.weight".format(x)
+ b2_name = "{}.b2.weight".format(x)
+ if a1_name in lora:
+ weights = (lora[a1_name], lora[a2_name], lora[b1_name], lora[b2_name], alpha, dora_scale)
+ loaded_keys.add(a1_name)
+ loaded_keys.add(a2_name)
+ loaded_keys.add(b1_name)
+ loaded_keys.add(b2_name)
+ return cls(loaded_keys, weights)
+ else:
+ return None
+
+ def calculate_weight(
+ self,
+ weight,
+ key,
+ strength,
+ strength_model,
+ offset,
+ function,
+ intermediate_dtype=torch.float32,
+ original_weight=None,
+ ):
+ v = self.weights
+ dora_scale = v[5]
+
+ old_glora = False
+ if v[3].shape[1] == v[2].shape[0] == v[0].shape[0] == v[1].shape[1]:
+ rank = v[0].shape[0]
+ old_glora = True
+
+ if v[3].shape[0] == v[2].shape[1] == v[0].shape[1] == v[1].shape[0]:
+ if old_glora and v[1].shape[0] == weight.shape[0] and weight.shape[0] == weight.shape[1]:
+ pass
+ else:
+ old_glora = False
+ rank = v[1].shape[0]
+
+ a1 = memory_management.cast_to_device(v[0].flatten(start_dim=1), weight.device, intermediate_dtype)
+ a2 = memory_management.cast_to_device(v[1].flatten(start_dim=1), weight.device, intermediate_dtype)
+ b1 = memory_management.cast_to_device(v[2].flatten(start_dim=1), weight.device, intermediate_dtype)
+ b2 = memory_management.cast_to_device(v[3].flatten(start_dim=1), weight.device, intermediate_dtype)
+
+ if v[4] is not None:
+ alpha = v[4] / rank
+ else:
+ alpha = 1.0
+
+ try:
+ if old_glora:
+ lora_diff = (torch.mm(b2, b1) + torch.mm(torch.mm(weight.flatten(start_dim=1).to(dtype=intermediate_dtype), a2), a1)).reshape(weight.shape) # old lycoris glora
+ else:
+ if weight.dim() > 2:
+ lora_diff = torch.einsum("o i ..., i j -> o j ...", torch.einsum("o i ..., i j -> o j ...", weight.to(dtype=intermediate_dtype), a1), a2).reshape(weight.shape)
+ else:
+ lora_diff = torch.mm(torch.mm(weight.to(dtype=intermediate_dtype), a1), a2).reshape(weight.shape)
+ lora_diff += torch.mm(b1, b2).reshape(weight.shape)
+
+ if dora_scale is not None:
+ weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function)
+ else:
+ weight += function(((strength * alpha) * lora_diff).type(weight.dtype))
+ except Exception as e:
+ logging.error("ERROR {} {} {}".format(self.name, key, e))
+ return weight
diff --git a/modules_forge/packages/comfy/weight_adapter/loha.py b/modules_forge/packages/comfy/weight_adapter/loha.py
new file mode 100644
index 0000000000000000000000000000000000000000..94968fefa3e87eee88fc1723a3acadb6db138e97
--- /dev/null
+++ b/modules_forge/packages/comfy/weight_adapter/loha.py
@@ -0,0 +1,224 @@
+import logging
+from typing import Optional
+
+import torch
+
+from backend import memory_management
+
+from .base import WeightAdapterBase, WeightAdapterTrainBase, weight_decompose
+
+
+class HadaWeight(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, w1u, w1d, w2u, w2d, scale=torch.tensor(1)):
+ ctx.save_for_backward(w1d, w1u, w2d, w2u, scale)
+ diff_weight = ((w1u @ w1d) * (w2u @ w2d)) * scale
+ return diff_weight
+
+ @staticmethod
+ def backward(ctx, grad_out):
+ (w1d, w1u, w2d, w2u, scale) = ctx.saved_tensors
+ grad_out = grad_out * scale
+ temp = grad_out * (w2u @ w2d)
+ grad_w1u = temp @ w1d.T
+ grad_w1d = w1u.T @ temp
+
+ temp = grad_out * (w1u @ w1d)
+ grad_w2u = temp @ w2d.T
+ grad_w2d = w2u.T @ temp
+
+ del temp
+ return grad_w1u, grad_w1d, grad_w2u, grad_w2d, None
+
+
+class HadaWeightTucker(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, t1, w1u, w1d, t2, w2u, w2d, scale=torch.tensor(1)):
+ ctx.save_for_backward(t1, w1d, w1u, t2, w2d, w2u, scale)
+
+ rebuild1 = torch.einsum("i j ..., j r, i p -> p r ...", t1, w1d, w1u)
+ rebuild2 = torch.einsum("i j ..., j r, i p -> p r ...", t2, w2d, w2u)
+
+ return rebuild1 * rebuild2 * scale
+
+ @staticmethod
+ def backward(ctx, grad_out):
+ (t1, w1d, w1u, t2, w2d, w2u, scale) = ctx.saved_tensors
+ grad_out = grad_out * scale
+
+ temp = torch.einsum("i j ..., j r -> i r ...", t2, w2d)
+ rebuild = torch.einsum("i j ..., i r -> r j ...", temp, w2u)
+
+ grad_w = rebuild * grad_out
+ del rebuild
+
+ grad_w1u = torch.einsum("r j ..., i j ... -> r i", temp, grad_w)
+ grad_temp = torch.einsum("i j ..., i r -> r j ...", grad_w, w1u.T)
+ del grad_w, temp
+
+ grad_w1d = torch.einsum("i r ..., i j ... -> r j", t1, grad_temp)
+ grad_t1 = torch.einsum("i j ..., j r -> i r ...", grad_temp, w1d.T)
+ del grad_temp
+
+ temp = torch.einsum("i j ..., j r -> i r ...", t1, w1d)
+ rebuild = torch.einsum("i j ..., i r -> r j ...", temp, w1u)
+
+ grad_w = rebuild * grad_out
+ del rebuild
+
+ grad_w2u = torch.einsum("r j ..., i j ... -> r i", temp, grad_w)
+ grad_temp = torch.einsum("i j ..., i r -> r j ...", grad_w, w2u.T)
+ del grad_w, temp
+
+ grad_w2d = torch.einsum("i r ..., i j ... -> r j", t2, grad_temp)
+ grad_t2 = torch.einsum("i j ..., j r -> i r ...", grad_temp, w2d.T)
+ del grad_temp
+ return grad_t1, grad_w1u, grad_w1d, grad_t2, grad_w2u, grad_w2d, None
+
+
+class LohaDiff(WeightAdapterTrainBase):
+ def __init__(self, weights):
+ super().__init__()
+ # Unpack weights tuple from LoHaAdapter
+ w1a, w1b, alpha, w2a, w2b, t1, t2, _ = weights
+
+ # Create trainable parameters
+ self.hada_w1_a = torch.nn.Parameter(w1a)
+ self.hada_w1_b = torch.nn.Parameter(w1b)
+ self.hada_w2_a = torch.nn.Parameter(w2a)
+ self.hada_w2_b = torch.nn.Parameter(w2b)
+
+ self.use_tucker = False
+ if t1 is not None and t2 is not None:
+ self.use_tucker = True
+ self.hada_t1 = torch.nn.Parameter(t1)
+ self.hada_t2 = torch.nn.Parameter(t2)
+ else:
+ # Keep the attributes for consistent access
+ self.hada_t1 = None
+ self.hada_t2 = None
+
+ # Store rank and non-trainable alpha
+ self.rank = w1b.shape[0]
+ self.alpha = torch.nn.Parameter(torch.tensor(alpha), requires_grad=False)
+
+ def __call__(self, w):
+ org_dtype = w.dtype
+
+ scale = self.alpha / self.rank
+ if self.use_tucker:
+ diff_weight = HadaWeightTucker.apply(self.hada_t1, self.hada_w1_a, self.hada_w1_b, self.hada_t2, self.hada_w2_a, self.hada_w2_b, scale)
+ else:
+ diff_weight = HadaWeight.apply(self.hada_w1_a, self.hada_w1_b, self.hada_w2_a, self.hada_w2_b, scale)
+
+ # Add the scaled difference to the original weight
+ weight = w.to(diff_weight) + diff_weight.reshape(w.shape)
+
+ return weight.to(org_dtype)
+
+ def passive_memory_usage(self):
+ """Calculates memory usage of the trainable parameters."""
+ return sum(param.numel() * param.element_size() for param in self.parameters())
+
+
+class LoHaAdapter(WeightAdapterBase):
+ name = "loha"
+
+ def __init__(self, loaded_keys, weights):
+ self.loaded_keys = loaded_keys
+ self.weights = weights
+
+ @classmethod
+ def create_train(cls, weight, rank=1, alpha=1.0):
+ out_dim = weight.shape[0]
+ in_dim = weight.shape[1:].numel()
+ mat1 = torch.empty(out_dim, rank, device=weight.device, dtype=torch.float32)
+ mat2 = torch.empty(rank, in_dim, device=weight.device, dtype=torch.float32)
+ torch.nn.init.normal_(mat1, 0.1)
+ torch.nn.init.constant_(mat2, 0.0)
+ mat3 = torch.empty(out_dim, rank, device=weight.device, dtype=torch.float32)
+ mat4 = torch.empty(rank, in_dim, device=weight.device, dtype=torch.float32)
+ torch.nn.init.normal_(mat3, 0.1)
+ torch.nn.init.normal_(mat4, 0.01)
+ return LohaDiff((mat1, mat2, alpha, mat3, mat4, None, None, None))
+
+ def to_train(self):
+ return LohaDiff(self.weights)
+
+ @classmethod
+ def load(
+ cls,
+ x: str,
+ lora: dict[str, torch.Tensor],
+ alpha: float,
+ dora_scale: torch.Tensor,
+ loaded_keys: set[str] = None,
+ ) -> Optional["LoHaAdapter"]:
+ if loaded_keys is None:
+ loaded_keys = set()
+
+ hada_w1_a_name = "{}.hada_w1_a".format(x)
+ hada_w1_b_name = "{}.hada_w1_b".format(x)
+ hada_w2_a_name = "{}.hada_w2_a".format(x)
+ hada_w2_b_name = "{}.hada_w2_b".format(x)
+ hada_t1_name = "{}.hada_t1".format(x)
+ hada_t2_name = "{}.hada_t2".format(x)
+ if hada_w1_a_name in lora.keys():
+ hada_t1 = None
+ hada_t2 = None
+ if hada_t1_name in lora.keys():
+ hada_t1 = lora[hada_t1_name]
+ hada_t2 = lora[hada_t2_name]
+ loaded_keys.add(hada_t1_name)
+ loaded_keys.add(hada_t2_name)
+
+ weights = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2, dora_scale)
+ loaded_keys.add(hada_w1_a_name)
+ loaded_keys.add(hada_w1_b_name)
+ loaded_keys.add(hada_w2_a_name)
+ loaded_keys.add(hada_w2_b_name)
+ return cls(loaded_keys, weights)
+ else:
+ return None
+
+ def calculate_weight(
+ self,
+ weight,
+ key,
+ strength,
+ strength_model,
+ offset,
+ function,
+ intermediate_dtype=torch.float32,
+ original_weight=None,
+ ):
+ v = self.weights
+ w1a = v[0]
+ w1b = v[1]
+ if v[2] is not None:
+ alpha = v[2] / w1b.shape[0]
+ else:
+ alpha = 1.0
+
+ w2a = v[3]
+ w2b = v[4]
+ dora_scale = v[7]
+ if v[5] is not None: # cp decomposition
+ t1 = v[5]
+ t2 = v[6]
+ m1 = torch.einsum("i j k l, j r, i p -> p r k l", memory_management.cast_to_device(t1, weight.device, intermediate_dtype), memory_management.cast_to_device(w1b, weight.device, intermediate_dtype), memory_management.cast_to_device(w1a, weight.device, intermediate_dtype))
+
+ m2 = torch.einsum("i j k l, j r, i p -> p r k l", memory_management.cast_to_device(t2, weight.device, intermediate_dtype), memory_management.cast_to_device(w2b, weight.device, intermediate_dtype), memory_management.cast_to_device(w2a, weight.device, intermediate_dtype))
+ else:
+ m1 = torch.mm(memory_management.cast_to_device(w1a, weight.device, intermediate_dtype), memory_management.cast_to_device(w1b, weight.device, intermediate_dtype))
+ m2 = torch.mm(memory_management.cast_to_device(w2a, weight.device, intermediate_dtype), memory_management.cast_to_device(w2b, weight.device, intermediate_dtype))
+
+ try:
+ lora_diff = (m1 * m2).reshape(weight.shape)
+ if dora_scale is not None:
+ weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function)
+ else:
+ weight += function(((strength * alpha) * lora_diff).type(weight.dtype))
+ except Exception as e:
+ logging.error("ERROR {} {} {}".format(self.name, key, e))
+ return weight
diff --git a/modules_forge/packages/comfy/weight_adapter/lokr.py b/modules_forge/packages/comfy/weight_adapter/lokr.py
new file mode 100644
index 0000000000000000000000000000000000000000..69fee74827c095794676bdc84a6246ace4177047
--- /dev/null
+++ b/modules_forge/packages/comfy/weight_adapter/lokr.py
@@ -0,0 +1,210 @@
+import logging
+from typing import Optional
+
+import torch
+
+from backend import memory_management
+
+from .base import (
+ WeightAdapterBase,
+ WeightAdapterTrainBase,
+ factorization,
+ weight_decompose,
+)
+
+
+class LokrDiff(WeightAdapterTrainBase):
+ def __init__(self, weights):
+ super().__init__()
+ (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2, dora_scale) = weights
+ self.use_tucker = False
+ if lokr_w1_a is not None:
+ _, rank_a = lokr_w1_a.shape[0], lokr_w1_a.shape[1]
+ rank_a, _ = lokr_w1_b.shape[0], lokr_w1_b.shape[1]
+ self.lokr_w1_a = torch.nn.Parameter(lokr_w1_a)
+ self.lokr_w1_b = torch.nn.Parameter(lokr_w1_b)
+ self.w1_rebuild = True
+ self.ranka = rank_a
+
+ if lokr_w2_a is not None:
+ _, rank_b = lokr_w2_a.shape[0], lokr_w2_a.shape[1]
+ rank_b, _ = lokr_w2_b.shape[0], lokr_w2_b.shape[1]
+ self.lokr_w2_a = torch.nn.Parameter(lokr_w2_a)
+ self.lokr_w2_b = torch.nn.Parameter(lokr_w2_b)
+ if lokr_t2 is not None:
+ self.use_tucker = True
+ self.lokr_t2 = torch.nn.Parameter(lokr_t2)
+ self.w2_rebuild = True
+ self.rankb = rank_b
+
+ if lokr_w1 is not None:
+ self.lokr_w1 = torch.nn.Parameter(lokr_w1)
+ self.w1_rebuild = False
+
+ if lokr_w2 is not None:
+ self.lokr_w2 = torch.nn.Parameter(lokr_w2)
+ self.w2_rebuild = False
+
+ self.alpha = torch.nn.Parameter(torch.tensor(alpha), requires_grad=False)
+
+ @property
+ def w1(self):
+ if self.w1_rebuild:
+ return (self.lokr_w1_a @ self.lokr_w1_b) * (self.alpha / self.ranka)
+ else:
+ return self.lokr_w1
+
+ @property
+ def w2(self):
+ if self.w2_rebuild:
+ if self.use_tucker:
+ w2 = torch.einsum("i j k l, j r, i p -> p r k l", self.lokr_t2, self.lokr_w2_b, self.lokr_w2_a)
+ else:
+ w2 = self.lokr_w2_a @ self.lokr_w2_b
+ return w2 * (self.alpha / self.rankb)
+ else:
+ return self.lokr_w2
+
+ def __call__(self, w):
+ diff = torch.kron(self.w1, self.w2)
+ return w + diff.reshape(w.shape).to(w)
+
+ def passive_memory_usage(self):
+ return sum(param.numel() * param.element_size() for param in self.parameters())
+
+
+class LoKrAdapter(WeightAdapterBase):
+ name = "lokr"
+
+ def __init__(self, loaded_keys, weights):
+ self.loaded_keys = loaded_keys
+ self.weights = weights
+
+ @classmethod
+ def create_train(cls, weight, rank=1, alpha=1.0):
+ out_dim = weight.shape[0]
+ in_dim = weight.shape[1:].numel()
+ out1, out2 = factorization(out_dim, rank)
+ in1, in2 = factorization(in_dim, rank)
+ mat1 = torch.empty(out1, in1, device=weight.device, dtype=torch.float32)
+ mat2 = torch.empty(out2, in2, device=weight.device, dtype=torch.float32)
+ torch.nn.init.kaiming_uniform_(mat2, a=5**0.5)
+ torch.nn.init.constant_(mat1, 0.0)
+ return LokrDiff((mat1, mat2, alpha, None, None, None, None, None, None))
+
+ def to_train(self):
+ return LokrDiff(self.weights)
+
+ @classmethod
+ def load(
+ cls,
+ x: str,
+ lora: dict[str, torch.Tensor],
+ alpha: float,
+ dora_scale: torch.Tensor,
+ loaded_keys: set[str] = None,
+ ) -> Optional["LoKrAdapter"]:
+ if loaded_keys is None:
+ loaded_keys = set()
+ lokr_w1_name = "{}.lokr_w1".format(x)
+ lokr_w2_name = "{}.lokr_w2".format(x)
+ lokr_w1_a_name = "{}.lokr_w1_a".format(x)
+ lokr_w1_b_name = "{}.lokr_w1_b".format(x)
+ lokr_t2_name = "{}.lokr_t2".format(x)
+ lokr_w2_a_name = "{}.lokr_w2_a".format(x)
+ lokr_w2_b_name = "{}.lokr_w2_b".format(x)
+
+ lokr_w1 = None
+ if lokr_w1_name in lora.keys():
+ lokr_w1 = lora[lokr_w1_name]
+ loaded_keys.add(lokr_w1_name)
+
+ lokr_w2 = None
+ if lokr_w2_name in lora.keys():
+ lokr_w2 = lora[lokr_w2_name]
+ loaded_keys.add(lokr_w2_name)
+
+ lokr_w1_a = None
+ if lokr_w1_a_name in lora.keys():
+ lokr_w1_a = lora[lokr_w1_a_name]
+ loaded_keys.add(lokr_w1_a_name)
+
+ lokr_w1_b = None
+ if lokr_w1_b_name in lora.keys():
+ lokr_w1_b = lora[lokr_w1_b_name]
+ loaded_keys.add(lokr_w1_b_name)
+
+ lokr_w2_a = None
+ if lokr_w2_a_name in lora.keys():
+ lokr_w2_a = lora[lokr_w2_a_name]
+ loaded_keys.add(lokr_w2_a_name)
+
+ lokr_w2_b = None
+ if lokr_w2_b_name in lora.keys():
+ lokr_w2_b = lora[lokr_w2_b_name]
+ loaded_keys.add(lokr_w2_b_name)
+
+ lokr_t2 = None
+ if lokr_t2_name in lora.keys():
+ lokr_t2 = lora[lokr_t2_name]
+ loaded_keys.add(lokr_t2_name)
+
+ if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None):
+ weights = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2, dora_scale)
+ return cls(loaded_keys, weights)
+ else:
+ return None
+
+ def calculate_weight(
+ self,
+ weight,
+ key,
+ strength,
+ strength_model,
+ offset,
+ function,
+ intermediate_dtype=torch.float32,
+ original_weight=None,
+ ):
+ v = self.weights
+ w1 = v[0]
+ w2 = v[1]
+ w1_a = v[3]
+ w1_b = v[4]
+ w2_a = v[5]
+ w2_b = v[6]
+ t2 = v[7]
+ dora_scale = v[8]
+ dim = None
+
+ if w1 is None:
+ dim = w1_b.shape[0]
+ w1 = torch.mm(memory_management.cast_to_device(w1_a, weight.device, intermediate_dtype), memory_management.cast_to_device(w1_b, weight.device, intermediate_dtype))
+ else:
+ w1 = memory_management.cast_to_device(w1, weight.device, intermediate_dtype)
+
+ if w2 is None:
+ dim = w2_b.shape[0]
+ if t2 is None:
+ w2 = torch.mm(memory_management.cast_to_device(w2_a, weight.device, intermediate_dtype), memory_management.cast_to_device(w2_b, weight.device, intermediate_dtype))
+ else:
+ w2 = torch.einsum("i j k l, j r, i p -> p r k l", memory_management.cast_to_device(t2, weight.device, intermediate_dtype), memory_management.cast_to_device(w2_b, weight.device, intermediate_dtype), memory_management.cast_to_device(w2_a, weight.device, intermediate_dtype))
+ else:
+ w2 = memory_management.cast_to_device(w2, weight.device, intermediate_dtype)
+
+ if len(w2.shape) == 4:
+ w1 = w1.unsqueeze(2).unsqueeze(2)
+ if v[2] is not None and dim is not None:
+ alpha = v[2] / dim
+ else:
+ alpha = 1.0
+
+ try:
+ lora_diff = torch.kron(w1, w2).reshape(weight.shape)
+ if dora_scale is not None:
+ weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function)
+ else:
+ weight += function(((strength * alpha) * lora_diff).type(weight.dtype))
+ except Exception as e:
+ logging.error("ERROR {} {} {}".format(self.name, key, e))
+ return weight
diff --git a/modules_forge/packages/comfy/weight_adapter/lora.py b/modules_forge/packages/comfy/weight_adapter/lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..b567806c8838b6146bdca8f7e52e6d212bd7c274
--- /dev/null
+++ b/modules_forge/packages/comfy/weight_adapter/lora.py
@@ -0,0 +1,198 @@
+import logging
+from typing import Optional
+
+import torch
+
+from backend import memory_management
+
+from .base import (
+ WeightAdapterBase,
+ WeightAdapterTrainBase,
+ pad_tensor_to_shape,
+ tucker_weight_from_conv,
+ weight_decompose,
+)
+
+
+class LoraDiff(WeightAdapterTrainBase):
+ def __init__(self, weights):
+ super().__init__()
+ mat1, mat2, alpha, mid, dora_scale, reshape = weights
+ out_dim, rank = mat1.shape[0], mat1.shape[1]
+ rank, in_dim = mat2.shape[0], mat2.shape[1]
+ if mid is not None:
+ convdim = mid.ndim - 2
+ layer = (torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d)[convdim]
+ else:
+ layer = torch.nn.Linear
+ self.lora_up = layer(rank, out_dim, bias=False)
+ self.lora_down = layer(in_dim, rank, bias=False)
+ self.lora_up.weight.data.copy_(mat1)
+ self.lora_down.weight.data.copy_(mat2)
+ if mid is not None:
+ self.lora_mid = layer(mid, rank, bias=False)
+ self.lora_mid.weight.data.copy_(mid)
+ else:
+ self.lora_mid = None
+ self.rank = rank
+ self.alpha = torch.nn.Parameter(torch.tensor(alpha), requires_grad=False)
+
+ def __call__(self, w):
+ org_dtype = w.dtype
+ if self.lora_mid is None:
+ diff = self.lora_up.weight @ self.lora_down.weight
+ else:
+ diff = tucker_weight_from_conv(self.lora_up.weight, self.lora_down.weight, self.lora_mid.weight)
+ scale = self.alpha / self.rank
+ weight = w + scale * diff.reshape(w.shape)
+ return weight.to(org_dtype)
+
+ def passive_memory_usage(self):
+ return sum(param.numel() * param.element_size() for param in self.parameters())
+
+
+class LoRAAdapter(WeightAdapterBase):
+ name = "lora"
+
+ def __init__(self, loaded_keys, weights):
+ self.loaded_keys = loaded_keys
+ self.weights = weights
+
+ @classmethod
+ def create_train(cls, weight, rank=1, alpha=1.0):
+ out_dim = weight.shape[0]
+ in_dim = weight.shape[1:].numel()
+ mat1 = torch.empty(out_dim, rank, device=weight.device, dtype=torch.float32)
+ mat2 = torch.empty(rank, in_dim, device=weight.device, dtype=torch.float32)
+ torch.nn.init.kaiming_uniform_(mat1, a=5**0.5)
+ torch.nn.init.constant_(mat2, 0.0)
+ return LoraDiff((mat1, mat2, alpha, None, None, None))
+
+ def to_train(self):
+ return LoraDiff(self.weights)
+
+ @classmethod
+ def load(
+ cls,
+ x: str,
+ lora: dict[str, torch.Tensor],
+ alpha: float,
+ dora_scale: torch.Tensor,
+ loaded_keys: set[str] = None,
+ ) -> Optional["LoRAAdapter"]:
+ if loaded_keys is None:
+ loaded_keys = set()
+
+ reshape_name = "{}.reshape_weight".format(x)
+ regular_lora = "{}.lora_up.weight".format(x)
+ diffusers_lora = "{}_lora.up.weight".format(x)
+ diffusers2_lora = "{}.lora_B.weight".format(x)
+ diffusers3_lora = "{}.lora.up.weight".format(x)
+ mochi_lora = "{}.lora_B".format(x)
+ transformers_lora = "{}.lora_linear_layer.up.weight".format(x)
+ qwen_default_lora = "{}.lora_B.default.weight".format(x)
+ A_name = None
+
+ if regular_lora in lora.keys():
+ A_name = regular_lora
+ B_name = "{}.lora_down.weight".format(x)
+ mid_name = "{}.lora_mid.weight".format(x)
+ elif diffusers_lora in lora.keys():
+ A_name = diffusers_lora
+ B_name = "{}_lora.down.weight".format(x)
+ mid_name = None
+ elif diffusers2_lora in lora.keys():
+ A_name = diffusers2_lora
+ B_name = "{}.lora_A.weight".format(x)
+ mid_name = None
+ elif diffusers3_lora in lora.keys():
+ A_name = diffusers3_lora
+ B_name = "{}.lora.down.weight".format(x)
+ mid_name = None
+ elif mochi_lora in lora.keys():
+ A_name = mochi_lora
+ B_name = "{}.lora_A".format(x)
+ mid_name = None
+ elif transformers_lora in lora.keys():
+ A_name = transformers_lora
+ B_name = "{}.lora_linear_layer.down.weight".format(x)
+ mid_name = None
+ elif qwen_default_lora in lora.keys():
+ A_name = qwen_default_lora
+ B_name = "{}.lora_A.default.weight".format(x)
+ mid_name = None
+
+ if A_name is not None:
+ mid = None
+ if mid_name is not None and mid_name in lora.keys():
+ mid = lora[mid_name]
+ loaded_keys.add(mid_name)
+ reshape = None
+ if reshape_name in lora.keys():
+ try:
+ reshape = lora[reshape_name].tolist()
+ loaded_keys.add(reshape_name)
+ except:
+ pass
+ weights = (lora[A_name], lora[B_name], alpha, mid, dora_scale, reshape)
+ loaded_keys.add(A_name)
+ loaded_keys.add(B_name)
+ return cls(loaded_keys, weights)
+ else:
+ return None
+
+ def calculate_weight(
+ self,
+ weight,
+ key,
+ strength,
+ strength_model,
+ offset,
+ function,
+ intermediate_dtype=torch.float32,
+ original_weight=None,
+ ):
+ v = self.weights
+ mat1 = memory_management.cast_to_device(v[0], weight.device, intermediate_dtype)
+ mat2 = memory_management.cast_to_device(v[1], weight.device, intermediate_dtype)
+ dora_scale = v[4]
+ reshape = v[5]
+
+ if reshape is not None:
+ weight = pad_tensor_to_shape(weight, reshape)
+
+ if v[2] is not None:
+ alpha = v[2] / mat2.shape[0]
+ else:
+ alpha = 1.0
+
+ if v[3] is not None:
+ # locon mid weights, hopefully the math is fine because I didn't properly test it
+ mat3 = memory_management.cast_to_device(v[3], weight.device, intermediate_dtype)
+ final_shape = [mat2.shape[1], mat2.shape[0], mat3.shape[2], mat3.shape[3]]
+ mat2 = (
+ torch.mm(
+ mat2.transpose(0, 1).flatten(start_dim=1),
+ mat3.transpose(0, 1).flatten(start_dim=1),
+ )
+ .reshape(final_shape)
+ .transpose(0, 1)
+ )
+ try:
+ lora_diff = torch.mm(mat1.flatten(start_dim=1), mat2.flatten(start_dim=1)).reshape(weight.shape)
+ del mat1, mat2
+ if dora_scale is not None:
+ weight = weight_decompose(
+ dora_scale,
+ weight,
+ lora_diff,
+ alpha,
+ strength,
+ intermediate_dtype,
+ function,
+ )
+ else:
+ weight += function(((strength * alpha) * lora_diff).type(weight.dtype))
+ except Exception as e:
+ logging.error("ERROR {} {} {}".format(self.name, key, e))
+ return weight
diff --git a/modules_forge/packages/comfy/weight_adapter/oft.py b/modules_forge/packages/comfy/weight_adapter/oft.py
new file mode 100644
index 0000000000000000000000000000000000000000..b926c049841ec798edbb38b127f92983279adc91
--- /dev/null
+++ b/modules_forge/packages/comfy/weight_adapter/oft.py
@@ -0,0 +1,166 @@
+import logging
+from typing import Optional
+
+import torch
+
+from backend import memory_management
+
+from .base import (
+ WeightAdapterBase,
+ WeightAdapterTrainBase,
+ factorization,
+ weight_decompose,
+)
+
+
+class OFTDiff(WeightAdapterTrainBase):
+ def __init__(self, weights):
+ super().__init__()
+ # Unpack weights tuple from LoHaAdapter
+ blocks, rescale, alpha, _ = weights
+
+ # Create trainable parameters
+ self.oft_blocks = torch.nn.Parameter(blocks)
+ if rescale is not None:
+ self.rescale = torch.nn.Parameter(rescale)
+ self.rescaled = True
+ else:
+ self.rescaled = False
+ self.block_num, self.block_size, _ = blocks.shape
+ self.constraint = float(alpha)
+ self.alpha = torch.nn.Parameter(torch.tensor(alpha), requires_grad=False)
+
+ def __call__(self, w):
+ org_dtype = w.dtype
+ I = torch.eye(self.block_size, device=self.oft_blocks.device)
+
+ ## generate r
+ # for Q = -Q^T
+ q = self.oft_blocks - self.oft_blocks.transpose(1, 2)
+ normed_q = q
+ if self.constraint:
+ q_norm = torch.norm(q) + 1e-8
+ if q_norm > self.constraint:
+ normed_q = q * self.constraint / q_norm
+ # use float() to prevent unsupported type
+ r = (I + normed_q) @ (I - normed_q).float().inverse()
+
+ ## Apply chunked matmul on weight
+ _, *shape = w.shape
+ org_weight = w.to(dtype=r.dtype)
+ org_weight = org_weight.unflatten(0, (self.block_num, self.block_size))
+ # Init R=0, so add I on it to ensure the output of step0 is original model output
+ weight = torch.einsum(
+ "k n m, k n ... -> k m ...",
+ r,
+ org_weight,
+ ).flatten(0, 1)
+ if self.rescaled:
+ weight = self.rescale * weight
+ return weight.to(org_dtype)
+
+ def passive_memory_usage(self):
+ """Calculates memory usage of the trainable parameters."""
+ return sum(param.numel() * param.element_size() for param in self.parameters())
+
+
+class OFTAdapter(WeightAdapterBase):
+ name = "oft"
+
+ def __init__(self, loaded_keys, weights):
+ self.loaded_keys = loaded_keys
+ self.weights = weights
+
+ @classmethod
+ def create_train(cls, weight, rank=1, alpha=1.0):
+ out_dim = weight.shape[0]
+ block_size, block_num = factorization(out_dim, rank)
+ block = torch.zeros(block_num, block_size, block_size, device=weight.device, dtype=torch.float32)
+ return OFTDiff((block, None, alpha, None))
+
+ def to_train(self):
+ return OFTDiff(self.weights)
+
+ @classmethod
+ def load(
+ cls,
+ x: str,
+ lora: dict[str, torch.Tensor],
+ alpha: float,
+ dora_scale: torch.Tensor,
+ loaded_keys: set[str] = None,
+ ) -> Optional["OFTAdapter"]:
+ if loaded_keys is None:
+ loaded_keys = set()
+ blocks_name = "{}.oft_blocks".format(x)
+ rescale_name = "{}.rescale".format(x)
+
+ blocks = None
+ if blocks_name in lora.keys():
+ blocks = lora[blocks_name]
+ if blocks.ndim == 3:
+ loaded_keys.add(blocks_name)
+ else:
+ blocks = None
+ if blocks is None:
+ return None
+
+ rescale = None
+ if rescale_name in lora.keys():
+ rescale = lora[rescale_name]
+ loaded_keys.add(rescale_name)
+
+ weights = (blocks, rescale, alpha, dora_scale)
+ return cls(loaded_keys, weights)
+
+ def calculate_weight(
+ self,
+ weight,
+ key,
+ strength,
+ strength_model,
+ offset,
+ function,
+ intermediate_dtype=torch.float32,
+ original_weight=None,
+ ):
+ v = self.weights
+ blocks = v[0]
+ rescale = v[1]
+ alpha = v[2]
+ if alpha is None:
+ alpha = 0
+ dora_scale = v[3]
+
+ blocks = memory_management.cast_to_device(blocks, weight.device, intermediate_dtype)
+ if rescale is not None:
+ rescale = memory_management.cast_to_device(rescale, weight.device, intermediate_dtype)
+
+ block_num, block_size, *_ = blocks.shape
+
+ try:
+ # Get r
+ I = torch.eye(block_size, device=blocks.device, dtype=blocks.dtype)
+ # for Q = -Q^T
+ q = blocks - blocks.transpose(1, 2)
+ normed_q = q
+ if alpha > 0: # alpha in oft/boft is for constraint
+ q_norm = torch.norm(q) + 1e-8
+ if q_norm > alpha:
+ normed_q = q * alpha / q_norm
+ # use float() to prevent unsupported type in .inverse()
+ r = (I + normed_q) @ (I - normed_q).float().inverse()
+ r = r.to(weight)
+ _, *shape = weight.shape
+ lora_diff = torch.einsum(
+ "k n m, k n ... -> k m ...",
+ (r * strength) - strength * I,
+ weight.view(block_num, block_size, *shape),
+ ).view(-1, *shape)
+ if dora_scale is not None:
+ weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function)
+ else:
+ weight += function((strength * lora_diff).type(weight.dtype))
+ except Exception as e:
+ logging.error("ERROR {} {} {}".format(self.name, key, e))
+ return weight
diff --git a/modules_forge/packages/comfy/weight_adapter/oftv2.py b/modules_forge/packages/comfy/weight_adapter/oftv2.py
new file mode 100644
index 0000000000000000000000000000000000000000..6bef185b132f1a30855d6909d62f73505218b0f1
--- /dev/null
+++ b/modules_forge/packages/comfy/weight_adapter/oftv2.py
@@ -0,0 +1,196 @@
+import logging
+from typing import Optional
+
+import torch
+
+from .base import WeightAdapterBase, weight_decompose
+
+
+class OFTRotationUtil:
+ def __init__(
+ self,
+ weight: torch.Tensor,
+ block_size: int,
+ coft: bool = False,
+ eps: float = 6e-5,
+ use_cayley_neumann: bool = True,
+ num_cayley_neumann_terms: int = 5,
+ ):
+ self.weight = weight
+ self.block_size = block_size
+ self.coft = coft
+ self.eps = eps
+ self.use_cayley_neumann = use_cayley_neumann
+ self.num_cayley_neumann_terms = num_cayley_neumann_terms
+ self.rows, self.cols = torch.triu_indices(self.block_size, self.block_size, 1)
+
+ def _get_triu_indices(self, device):
+ if self.rows.device != device:
+ self.rows = self.rows.to(device)
+ self.cols = self.cols.to(device)
+ return self.rows, self.cols
+
+ def _pytorch_skew_symmetric(self, vec: torch.Tensor) -> torch.Tensor:
+ batch_size = vec.shape[0]
+ matrix = torch.zeros(batch_size, self.block_size, self.block_size, device=vec.device, dtype=vec.dtype)
+ rows, cols = self._get_triu_indices(vec.device)
+ matrix[:, rows, cols] = vec
+ matrix = matrix - matrix.transpose(-2, -1)
+ return matrix
+
+ def _pytorch_skew_symmetric_inv(self, matrix: torch.Tensor) -> torch.Tensor:
+ rows, cols = self._get_triu_indices(matrix.device)
+ vec = matrix[:, rows, cols]
+ return vec
+
+ def _project_batch(self) -> torch.Tensor:
+ oft_R = self._pytorch_skew_symmetric(self.weight)
+ eps = self.eps * (1 / torch.sqrt(torch.tensor(oft_R.shape[0], device=oft_R.device)))
+ origin_matrix = torch.zeros_like(oft_R)
+ diff = oft_R - origin_matrix
+ norm_diff = torch.norm(diff, dim=(1, 2), keepdim=True)
+ mask = (norm_diff <= eps).bool()
+ out = torch.where(mask, oft_R, origin_matrix + eps * (diff / norm_diff))
+ return self._pytorch_skew_symmetric_inv(out)
+
+ def _cayley_batch(self, Q: torch.Tensor) -> torch.Tensor:
+ b, _ = Q.shape
+ previous_dtype = Q.dtype
+ Q_skew = self._pytorch_skew_symmetric(Q)
+ if self.use_cayley_neumann:
+ R = torch.eye(self.block_size, device=Q.device, dtype=Q.dtype).repeat(b, 1, 1)
+ if self.num_cayley_neumann_terms > 1:
+ R.add_(Q_skew, alpha=2.0)
+ if self.num_cayley_neumann_terms > 2:
+ Q_squared = torch.bmm(Q_skew, Q_skew)
+ R.add_(Q_squared, alpha=2.0)
+ Q_power = Q_squared
+ for _ in range(3, self.num_cayley_neumann_terms):
+ Q_power = torch.bmm(Q_power, Q_skew)
+ R.add_(Q_power, alpha=2.0)
+ else:
+ id_mat = torch.eye(self.block_size, device=Q_skew.device).unsqueeze(0).expand_as(Q_skew)
+ R = torch.linalg.solve(id_mat + Q_skew, id_mat - Q_skew, left=False)
+ return R.to(previous_dtype)
+
+ def get_rotation_matrix(self) -> torch.Tensor:
+ weight = self.weight
+ if self.coft:
+ with torch.no_grad():
+ projected_weight = self._project_batch()
+ weight.copy_(projected_weight)
+ return self._cayley_batch(weight)
+
+
+class OFTv2Adapter(WeightAdapterBase):
+ name = "oftv2"
+
+ def __init__(self, loaded_keys: set[str], weights: tuple):
+ self.loaded_keys = loaded_keys
+ self.weights = weights
+
+ @classmethod
+ def load(
+ cls,
+ x: str,
+ lora: dict[str, torch.Tensor],
+ alpha: float,
+ dora_scale: torch.Tensor,
+ loaded_keys: Optional[set[str]] = None,
+ ) -> Optional["OFTv2Adapter"]:
+ if loaded_keys is None:
+ loaded_keys = set()
+ oft_r_weight_name = f"{x}.oft_R.weight"
+ if oft_r_weight_name in lora:
+ oft_r_weight = lora[oft_r_weight_name]
+ loaded_keys.add(oft_r_weight_name)
+ weights = (oft_r_weight, alpha, dora_scale)
+ return cls(loaded_keys, weights)
+ return None
+
+ def calculate_weight(
+ self,
+ weight,
+ key,
+ strength,
+ strength_model,
+ offset,
+ function,
+ intermediate_dtype=torch.float32,
+ original_weight=None,
+ ):
+ if strength == 0.0:
+ return weight
+
+ oft_r_weight_orig, alpha, dora_scale = self.weights
+
+ try:
+ oft_r_weight_processed = oft_r_weight_orig.to(weight.device, dtype=intermediate_dtype)
+
+ r_loaded, n_elements = oft_r_weight_processed.shape
+ block_size_f = (1 + (1 + 8 * n_elements) ** 0.5) / 2
+ if abs(block_size_f - round(block_size_f)) > 1e-6:
+ logging.error(f"OFTv2: Could not determine integer block_size for {key}. n_elements={n_elements} is invalid.")
+ return weight
+ block_size = int(round(block_size_f))
+
+ base_weight = original_weight if original_weight is not None else weight
+ out_features, *in_dims_tuple = base_weight.shape
+ in_features = torch.prod(torch.tensor(in_dims_tuple)).item()
+
+ if in_features % block_size != 0:
+ logging.warning(f"OFTv2: in_features ({in_features}) not divisible by block_size ({block_size}) for {key}.")
+ return weight
+
+ r_actual = in_features // block_size
+ block_share = r_loaded == 1
+
+ if not block_share and r_loaded != r_actual:
+ logging.error(f"OFTv2: Mismatch in number of blocks for {key}. Loaded: {r_loaded}, Expected: {r_actual}.")
+ return weight
+
+ # Pass the unscaled weight to the utility to get the full rotation matrix
+ util = OFTRotationUtil(oft_r_weight_processed, block_size)
+ orth_rotate = util.get_rotation_matrix()
+
+ # For Linear layers, rotates the input (x @ R), equivalent to rotating weights by R.T (W @ R.T).
+ # For Conv2d layers, rotates the weights directly (W @ R) to preserve spatial information.
+
+ # Linear delta: W @ (R.T - I)
+ # Conv2d delta: W @ (R - I)
+ I = torch.eye(block_size, device=orth_rotate.device, dtype=orth_rotate.dtype)
+
+ # Use weight dimension to determine layer type. Linear is 2D, Conv2d is 4D.
+ is_conv2d = base_weight.dim() == 4
+
+ if is_conv2d:
+ # Use R for Conv2d layers
+ rotation_matrix_for_weight = orth_rotate
+ else:
+ # Use R.T for Linear layers
+ rotation_matrix_for_weight = orth_rotate.transpose(-1, -2)
+
+ if block_share:
+ diff_matrix = rotation_matrix_for_weight - I.unsqueeze(0)
+ else:
+ diff_matrix = rotation_matrix_for_weight - I
+
+ w_flat = base_weight.view(out_features, in_features)
+ w_reshaped = w_flat.view(out_features, r_actual, block_size).to(intermediate_dtype)
+
+ if block_share:
+ w_diff_reshaped = torch.einsum("ork, kc -> orc", w_reshaped, diff_matrix.squeeze(0))
+ else:
+ w_diff_reshaped = torch.einsum("ork, rkc -> orc", w_reshaped, diff_matrix)
+
+ lora_diff = w_diff_reshaped.reshape(base_weight.shape)
+
+ if dora_scale is not None:
+ weight = weight_decompose(dora_scale, weight, lora_diff, strength, 1.0, intermediate_dtype, function)
+ else:
+ weight += function((lora_diff * strength).type(weight.dtype))
+
+ except Exception as e:
+ logging.error(f"ERROR applying OFTv2 for {key}: {e}", exc_info=True)
+
+ return weight
diff --git a/modules_forge/packages/gguf/LICENSE b/modules_forge/packages/gguf/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..76f67efdc6470081b512a8db5bf2b1d4962d9c3c
--- /dev/null
+++ b/modules_forge/packages/gguf/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2023 Georgi Gerganov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/modules_forge/packages/gguf/README.md b/modules_forge/packages/gguf/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..6e7354a43daa1785eb870ff3d39c29d664fd9cb3
--- /dev/null
+++ b/modules_forge/packages/gguf/README.md
@@ -0,0 +1,3 @@
+## Forge's implementation of GGUF
+- Code is based on **llama.cpp**'s GGUF
+- The main difference is that it supports PyTorch quant/dequant
diff --git a/modules_forge/packages/gguf/__init__.py b/modules_forge/packages/gguf/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb289c37ff3d43aa277a5a192a851722a4455608
--- /dev/null
+++ b/modules_forge/packages/gguf/__init__.py
@@ -0,0 +1,8 @@
+from .constants import *
+from .gguf_reader import *
+from .gguf_writer import *
+from .lazy import *
+from .metadata import *
+from .quants import *
+from .tensor_mapping import *
+from .utility import *
diff --git a/modules_forge/packages/gguf/constants.py b/modules_forge/packages/gguf/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..7087a98098ee55b87ee3095921ae9f83b6ce8731
--- /dev/null
+++ b/modules_forge/packages/gguf/constants.py
@@ -0,0 +1,1362 @@
+from __future__ import annotations
+
+from enum import Enum, IntEnum, auto
+from typing import Any
+
+#
+# constants
+#
+
+GGUF_MAGIC = 0x46554747 # "GGUF"
+GGUF_VERSION = 3
+GGUF_DEFAULT_ALIGNMENT = 32
+GGML_QUANT_VERSION = 2 # GGML_QNT_VERSION from ggml.h
+
+#
+# metadata keys
+#
+
+
+class Keys:
+ class General:
+ TYPE = "general.type"
+ ARCHITECTURE = "general.architecture"
+ QUANTIZATION_VERSION = "general.quantization_version"
+ ALIGNMENT = "general.alignment"
+ FILE_TYPE = "general.file_type"
+
+ # Authorship Metadata
+ NAME = "general.name"
+ AUTHOR = "general.author"
+ VERSION = "general.version"
+ ORGANIZATION = "general.organization"
+
+ FINETUNE = "general.finetune"
+ BASENAME = "general.basename"
+
+ DESCRIPTION = "general.description"
+ QUANTIZED_BY = "general.quantized_by"
+
+ SIZE_LABEL = "general.size_label"
+
+ # Licensing details
+ LICENSE = "general.license"
+ LICENSE_NAME = "general.license.name"
+ LICENSE_LINK = "general.license.link"
+
+ # Typically represents the converted GGUF repo (Unless native)
+ URL = "general.url" # Model Website/Paper
+ DOI = "general.doi"
+ UUID = "general.uuid"
+ REPO_URL = "general.repo_url" # Model Source Repository (git/svn/etc...)
+
+ # Model Source during conversion
+ SOURCE_URL = "general.source.url" # Model Website/Paper
+ SOURCE_DOI = "general.source.doi"
+ SOURCE_UUID = "general.source.uuid"
+ SOURCE_REPO_URL = (
+ "general.source.repo_url" # Model Source Repository (git/svn/etc...)
+ )
+
+ # Base Model Source. There can be more than one source if it's a merged
+ # model like with 'Mistral-7B-Merge-14-v0.1'. This will assist in
+ # tracing linage of models as it is finetuned or merged over time.
+ BASE_MODEL_COUNT = "general.base_model.count"
+ BASE_MODEL_NAME = "general.base_model.{id}.name"
+ BASE_MODEL_AUTHOR = "general.base_model.{id}.author"
+ BASE_MODEL_VERSION = "general.base_model.{id}.version"
+ BASE_MODEL_ORGANIZATION = "general.base_model.{id}.organization"
+ BASE_MODEL_URL = "general.base_model.{id}.url" # Model Website/Paper
+ BASE_MODEL_DOI = "general.base_model.{id}.doi"
+ BASE_MODEL_UUID = "general.base_model.{id}.uuid"
+ BASE_MODEL_REPO_URL = "general.base_model.{id}.repo_url" # Model Source Repository (git/svn/etc...)
+
+ # Array based KV stores
+ TAGS = "general.tags"
+ LANGUAGES = "general.languages"
+ DATASETS = "general.datasets"
+
+ class LLM:
+ VOCAB_SIZE = "{arch}.vocab_size"
+ CONTEXT_LENGTH = "{arch}.context_length"
+ EMBEDDING_LENGTH = "{arch}.embedding_length"
+ BLOCK_COUNT = "{arch}.block_count"
+ LEADING_DENSE_BLOCK_COUNT = "{arch}.leading_dense_block_count"
+ FEED_FORWARD_LENGTH = "{arch}.feed_forward_length"
+ EXPERT_FEED_FORWARD_LENGTH = "{arch}.expert_feed_forward_length"
+ EXPERT_SHARED_FEED_FORWARD_LENGTH = "{arch}.expert_shared_feed_forward_length"
+ USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual"
+ TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout"
+ EXPERT_COUNT = "{arch}.expert_count"
+ EXPERT_USED_COUNT = "{arch}.expert_used_count"
+ EXPERT_SHARED_COUNT = "{arch}.expert_shared_count"
+ EXPERT_WEIGHTS_SCALE = "{arch}.expert_weights_scale"
+ POOLING_TYPE = "{arch}.pooling_type"
+ LOGIT_SCALE = "{arch}.logit_scale"
+ DECODER_START_TOKEN_ID = "{arch}.decoder_start_token_id"
+ ATTN_LOGIT_SOFTCAPPING = "{arch}.attn_logit_softcapping"
+ FINAL_LOGIT_SOFTCAPPING = "{arch}.final_logit_softcapping"
+
+ class Attention:
+ HEAD_COUNT = "{arch}.attention.head_count"
+ HEAD_COUNT_KV = "{arch}.attention.head_count_kv"
+ MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias"
+ CLAMP_KQV = "{arch}.attention.clamp_kqv"
+ KEY_LENGTH = "{arch}.attention.key_length"
+ VALUE_LENGTH = "{arch}.attention.value_length"
+ LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon"
+ LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon"
+ CAUSAL = "{arch}.attention.causal"
+ Q_LORA_RANK = "{arch}.attention.q_lora_rank"
+ KV_LORA_RANK = "{arch}.attention.kv_lora_rank"
+ REL_BUCKETS_COUNT = "{arch}.attention.relative_buckets_count"
+ SLIDING_WINDOW = "{arch}.attention.sliding_window"
+
+ class Rope:
+ DIMENSION_COUNT = "{arch}.rope.dimension_count"
+ FREQ_BASE = "{arch}.rope.freq_base"
+ SCALING_TYPE = "{arch}.rope.scaling.type"
+ SCALING_FACTOR = "{arch}.rope.scaling.factor"
+ SCALING_ATTN_FACTOR = "{arch}.rope.scaling.attn_factor"
+ SCALING_ORIG_CTX_LEN = "{arch}.rope.scaling.original_context_length"
+ SCALING_FINETUNED = "{arch}.rope.scaling.finetuned"
+ SCALING_YARN_LOG_MUL = "{arch}.rope.scaling.yarn_log_multiplier"
+
+ class Split:
+ LLM_KV_SPLIT_NO = "split.no"
+ LLM_KV_SPLIT_COUNT = "split.count"
+ LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count"
+
+ class SSM:
+ CONV_KERNEL = "{arch}.ssm.conv_kernel"
+ INNER_SIZE = "{arch}.ssm.inner_size"
+ STATE_SIZE = "{arch}.ssm.state_size"
+ TIME_STEP_RANK = "{arch}.ssm.time_step_rank"
+
+ class Tokenizer:
+ MODEL = "tokenizer.ggml.model"
+ PRE = "tokenizer.ggml.pre"
+ LIST = "tokenizer.ggml.tokens"
+ TOKEN_TYPE = "tokenizer.ggml.token_type"
+ TOKEN_TYPE_COUNT = (
+ "tokenizer.ggml.token_type_count" # for BERT-style token types
+ )
+ SCORES = "tokenizer.ggml.scores"
+ MERGES = "tokenizer.ggml.merges"
+ BOS_ID = "tokenizer.ggml.bos_token_id"
+ EOS_ID = "tokenizer.ggml.eos_token_id"
+ UNK_ID = "tokenizer.ggml.unknown_token_id"
+ SEP_ID = "tokenizer.ggml.seperator_token_id"
+ PAD_ID = "tokenizer.ggml.padding_token_id"
+ CLS_ID = "tokenizer.ggml.cls_token_id"
+ MASK_ID = "tokenizer.ggml.mask_token_id"
+ ADD_BOS = "tokenizer.ggml.add_bos_token"
+ ADD_EOS = "tokenizer.ggml.add_eos_token"
+ ADD_PREFIX = "tokenizer.ggml.add_space_prefix"
+ REMOVE_EXTRA_WS = "tokenizer.ggml.remove_extra_whitespaces"
+ PRECOMPILED_CHARSMAP = "tokenizer.ggml.precompiled_charsmap"
+ HF_JSON = "tokenizer.huggingface.json"
+ RWKV = "tokenizer.rwkv.world"
+ CHAT_TEMPLATE = "tokenizer.chat_template"
+ CHAT_TEMPLATE_N = "tokenizer.chat_template.{name}"
+ CHAT_TEMPLATES = "tokenizer.chat_templates"
+ # FIM/Infill special tokens constants
+ PREFIX_ID = "tokenizer.ggml.prefix_token_id"
+ SUFFIX_ID = "tokenizer.ggml.suffix_token_id"
+ MIDDLE_ID = "tokenizer.ggml.middle_token_id"
+ EOT_ID = "tokenizer.ggml.eot_token_id"
+ EOM_ID = "tokenizer.ggml.eom_token_id"
+
+ class Adapter:
+ TYPE = "adapter.type"
+ LORA_ALPHA = "adapter.lora.alpha"
+
+
+#
+# recommended mapping of model tensor names for storage in gguf
+#
+
+
+class GGUFType:
+ MODEL = "model"
+ ADAPTER = "adapter"
+
+
+class MODEL_ARCH(IntEnum):
+ LLAMA = auto()
+ FALCON = auto()
+ BAICHUAN = auto()
+ GROK = auto()
+ GPT2 = auto()
+ GPTJ = auto()
+ GPTNEOX = auto()
+ MPT = auto()
+ STARCODER = auto()
+ REFACT = auto()
+ BERT = auto()
+ NOMIC_BERT = auto()
+ JINA_BERT_V2 = auto()
+ BLOOM = auto()
+ STABLELM = auto()
+ QWEN = auto()
+ QWEN2 = auto()
+ QWEN2MOE = auto()
+ PHI2 = auto()
+ PHI3 = auto()
+ PLAMO = auto()
+ CODESHELL = auto()
+ ORION = auto()
+ INTERNLM2 = auto()
+ MINICPM = auto()
+ GEMMA = auto()
+ GEMMA2 = auto()
+ STARCODER2 = auto()
+ MAMBA = auto()
+ XVERSE = auto()
+ COMMAND_R = auto()
+ DBRX = auto()
+ OLMO = auto()
+ OPENELM = auto()
+ ARCTIC = auto()
+ DEEPSEEK2 = auto()
+ CHATGLM = auto()
+ BITNET = auto()
+ T5 = auto()
+ T5ENCODER = auto()
+ JAIS = auto()
+
+
+class MODEL_TENSOR(IntEnum):
+ TOKEN_EMBD = auto()
+ TOKEN_EMBD_NORM = auto()
+ TOKEN_TYPES = auto()
+ POS_EMBD = auto()
+ OUTPUT = auto()
+ OUTPUT_NORM = auto()
+ ROPE_FREQS = auto()
+ ROPE_FACTORS_LONG = auto()
+ ROPE_FACTORS_SHORT = auto()
+ ATTN_Q = auto()
+ ATTN_K = auto()
+ ATTN_V = auto()
+ ATTN_QKV = auto()
+ ATTN_OUT = auto()
+ ATTN_NORM = auto()
+ ATTN_NORM_2 = auto()
+ ATTN_OUT_NORM = auto()
+ ATTN_POST_NORM = auto()
+ ATTN_ROT_EMBD = auto()
+ FFN_GATE_INP = auto()
+ FFN_GATE_INP_SHEXP = auto()
+ FFN_NORM = auto()
+ FFN_PRE_NORM = auto()
+ FFN_POST_NORM = auto()
+ FFN_GATE = auto()
+ FFN_DOWN = auto()
+ FFN_UP = auto()
+ FFN_ACT = auto()
+ FFN_NORM_EXP = auto()
+ FFN_GATE_EXP = auto()
+ FFN_DOWN_EXP = auto()
+ FFN_UP_EXP = auto()
+ FFN_GATE_SHEXP = auto()
+ FFN_DOWN_SHEXP = auto()
+ FFN_UP_SHEXP = auto()
+ ATTN_Q_NORM = auto()
+ ATTN_K_NORM = auto()
+ LAYER_OUT_NORM = auto()
+ SSM_IN = auto()
+ SSM_CONV1D = auto()
+ SSM_X = auto()
+ SSM_DT = auto()
+ SSM_A = auto()
+ SSM_D = auto()
+ SSM_OUT = auto()
+ ATTN_Q_A = auto()
+ ATTN_Q_B = auto()
+ ATTN_KV_A_MQA = auto()
+ ATTN_KV_B = auto()
+ ATTN_Q_A_NORM = auto()
+ ATTN_KV_A_NORM = auto()
+ FFN_SUB_NORM = auto()
+ ATTN_SUB_NORM = auto()
+ DEC_ATTN_NORM = auto()
+ DEC_ATTN_Q = auto()
+ DEC_ATTN_K = auto()
+ DEC_ATTN_V = auto()
+ DEC_ATTN_OUT = auto()
+ DEC_ATTN_REL_B = auto()
+ DEC_CROSS_ATTN_NORM = auto()
+ DEC_CROSS_ATTN_Q = auto()
+ DEC_CROSS_ATTN_K = auto()
+ DEC_CROSS_ATTN_V = auto()
+ DEC_CROSS_ATTN_OUT = auto()
+ DEC_CROSS_ATTN_REL_B = auto()
+ DEC_FFN_NORM = auto()
+ DEC_FFN_GATE = auto()
+ DEC_FFN_DOWN = auto()
+ DEC_FFN_UP = auto()
+ DEC_OUTPUT_NORM = auto()
+ ENC_ATTN_NORM = auto()
+ ENC_ATTN_Q = auto()
+ ENC_ATTN_K = auto()
+ ENC_ATTN_V = auto()
+ ENC_ATTN_OUT = auto()
+ ENC_ATTN_REL_B = auto()
+ ENC_FFN_NORM = auto()
+ ENC_FFN_GATE = auto()
+ ENC_FFN_DOWN = auto()
+ ENC_FFN_UP = auto()
+ ENC_OUTPUT_NORM = auto()
+
+
+MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
+ MODEL_ARCH.LLAMA: "llama",
+ MODEL_ARCH.FALCON: "falcon",
+ MODEL_ARCH.BAICHUAN: "baichuan",
+ MODEL_ARCH.GROK: "grok",
+ MODEL_ARCH.GPT2: "gpt2",
+ MODEL_ARCH.GPTJ: "gptj",
+ MODEL_ARCH.GPTNEOX: "gptneox",
+ MODEL_ARCH.MPT: "mpt",
+ MODEL_ARCH.STARCODER: "starcoder",
+ MODEL_ARCH.REFACT: "refact",
+ MODEL_ARCH.BERT: "bert",
+ MODEL_ARCH.NOMIC_BERT: "nomic-bert",
+ MODEL_ARCH.JINA_BERT_V2: "jina-bert-v2",
+ MODEL_ARCH.BLOOM: "bloom",
+ MODEL_ARCH.STABLELM: "stablelm",
+ MODEL_ARCH.QWEN: "qwen",
+ MODEL_ARCH.QWEN2: "qwen2",
+ MODEL_ARCH.QWEN2MOE: "qwen2moe",
+ MODEL_ARCH.PHI2: "phi2",
+ MODEL_ARCH.PHI3: "phi3",
+ MODEL_ARCH.PLAMO: "plamo",
+ MODEL_ARCH.CODESHELL: "codeshell",
+ MODEL_ARCH.ORION: "orion",
+ MODEL_ARCH.INTERNLM2: "internlm2",
+ MODEL_ARCH.MINICPM: "minicpm",
+ MODEL_ARCH.GEMMA: "gemma",
+ MODEL_ARCH.GEMMA2: "gemma2",
+ MODEL_ARCH.STARCODER2: "starcoder2",
+ MODEL_ARCH.MAMBA: "mamba",
+ MODEL_ARCH.XVERSE: "xverse",
+ MODEL_ARCH.COMMAND_R: "command-r",
+ MODEL_ARCH.DBRX: "dbrx",
+ MODEL_ARCH.OLMO: "olmo",
+ MODEL_ARCH.OPENELM: "openelm",
+ MODEL_ARCH.ARCTIC: "arctic",
+ MODEL_ARCH.DEEPSEEK2: "deepseek2",
+ MODEL_ARCH.CHATGLM: "chatglm",
+ MODEL_ARCH.BITNET: "bitnet",
+ MODEL_ARCH.T5: "t5",
+ MODEL_ARCH.T5ENCODER: "t5encoder",
+ MODEL_ARCH.JAIS: "jais",
+}
+
+TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
+ MODEL_TENSOR.TOKEN_EMBD: "token_embd",
+ MODEL_TENSOR.TOKEN_EMBD_NORM: "token_embd_norm",
+ MODEL_TENSOR.TOKEN_TYPES: "token_types",
+ MODEL_TENSOR.POS_EMBD: "position_embd",
+ MODEL_TENSOR.OUTPUT_NORM: "output_norm",
+ MODEL_TENSOR.OUTPUT: "output",
+ MODEL_TENSOR.ROPE_FREQS: "rope_freqs",
+ MODEL_TENSOR.ROPE_FACTORS_LONG: "rope_factors_long",
+ MODEL_TENSOR.ROPE_FACTORS_SHORT: "rope_factors_short",
+ MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm",
+ MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2",
+ MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv",
+ MODEL_TENSOR.ATTN_Q: "blk.{bid}.attn_q",
+ MODEL_TENSOR.ATTN_K: "blk.{bid}.attn_k",
+ MODEL_TENSOR.ATTN_V: "blk.{bid}.attn_v",
+ MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output",
+ MODEL_TENSOR.ATTN_ROT_EMBD: "blk.{bid}.attn_rot_embd",
+ MODEL_TENSOR.ATTN_Q_NORM: "blk.{bid}.attn_q_norm",
+ MODEL_TENSOR.ATTN_K_NORM: "blk.{bid}.attn_k_norm",
+ MODEL_TENSOR.ATTN_OUT_NORM: "blk.{bid}.attn_output_norm",
+ MODEL_TENSOR.ATTN_POST_NORM: "blk.{bid}.post_attention_norm",
+ MODEL_TENSOR.FFN_GATE_INP: "blk.{bid}.ffn_gate_inp",
+ MODEL_TENSOR.FFN_GATE_INP_SHEXP: "blk.{bid}.ffn_gate_inp_shexp",
+ MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm",
+ MODEL_TENSOR.FFN_PRE_NORM: "blk.{bid}.ffn_norm",
+ MODEL_TENSOR.FFN_POST_NORM: "blk.{bid}.post_ffw_norm",
+ MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate",
+ MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
+ MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up",
+ MODEL_TENSOR.FFN_GATE_SHEXP: "blk.{bid}.ffn_gate_shexp",
+ MODEL_TENSOR.FFN_DOWN_SHEXP: "blk.{bid}.ffn_down_shexp",
+ MODEL_TENSOR.FFN_UP_SHEXP: "blk.{bid}.ffn_up_shexp",
+ MODEL_TENSOR.FFN_ACT: "blk.{bid}.ffn",
+ MODEL_TENSOR.FFN_NORM_EXP: "blk.{bid}.ffn_norm_exps",
+ MODEL_TENSOR.FFN_GATE_EXP: "blk.{bid}.ffn_gate_exps",
+ MODEL_TENSOR.FFN_DOWN_EXP: "blk.{bid}.ffn_down_exps",
+ MODEL_TENSOR.FFN_UP_EXP: "blk.{bid}.ffn_up_exps",
+ MODEL_TENSOR.LAYER_OUT_NORM: "blk.{bid}.layer_output_norm",
+ MODEL_TENSOR.SSM_IN: "blk.{bid}.ssm_in",
+ MODEL_TENSOR.SSM_CONV1D: "blk.{bid}.ssm_conv1d",
+ MODEL_TENSOR.SSM_X: "blk.{bid}.ssm_x",
+ MODEL_TENSOR.SSM_DT: "blk.{bid}.ssm_dt",
+ MODEL_TENSOR.SSM_A: "blk.{bid}.ssm_a",
+ MODEL_TENSOR.SSM_D: "blk.{bid}.ssm_d",
+ MODEL_TENSOR.SSM_OUT: "blk.{bid}.ssm_out",
+ MODEL_TENSOR.ATTN_Q_A: "blk.{bid}.attn_q_a",
+ MODEL_TENSOR.ATTN_Q_B: "blk.{bid}.attn_q_b",
+ MODEL_TENSOR.ATTN_KV_A_MQA: "blk.{bid}.attn_kv_a_mqa",
+ MODEL_TENSOR.ATTN_KV_B: "blk.{bid}.attn_kv_b",
+ MODEL_TENSOR.ATTN_Q_A_NORM: "blk.{bid}.attn_q_a_norm",
+ MODEL_TENSOR.ATTN_KV_A_NORM: "blk.{bid}.attn_kv_a_norm",
+ MODEL_TENSOR.ATTN_SUB_NORM: "blk.{bid}.attn_sub_norm",
+ MODEL_TENSOR.FFN_SUB_NORM: "blk.{bid}.ffn_sub_norm",
+ MODEL_TENSOR.DEC_ATTN_NORM: "dec.blk.{bid}.attn_norm",
+ MODEL_TENSOR.DEC_ATTN_Q: "dec.blk.{bid}.attn_q",
+ MODEL_TENSOR.DEC_ATTN_K: "dec.blk.{bid}.attn_k",
+ MODEL_TENSOR.DEC_ATTN_V: "dec.blk.{bid}.attn_v",
+ MODEL_TENSOR.DEC_ATTN_OUT: "dec.blk.{bid}.attn_o",
+ MODEL_TENSOR.DEC_ATTN_REL_B: "dec.blk.{bid}.attn_rel_b",
+ MODEL_TENSOR.DEC_CROSS_ATTN_NORM: "dec.blk.{bid}.cross_attn_norm",
+ MODEL_TENSOR.DEC_CROSS_ATTN_Q: "dec.blk.{bid}.cross_attn_q",
+ MODEL_TENSOR.DEC_CROSS_ATTN_K: "dec.blk.{bid}.cross_attn_k",
+ MODEL_TENSOR.DEC_CROSS_ATTN_V: "dec.blk.{bid}.cross_attn_v",
+ MODEL_TENSOR.DEC_CROSS_ATTN_OUT: "dec.blk.{bid}.cross_attn_o",
+ MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: "dec.blk.{bid}.cross_attn_rel_b",
+ MODEL_TENSOR.DEC_FFN_NORM: "dec.blk.{bid}.ffn_norm",
+ MODEL_TENSOR.DEC_FFN_GATE: "dec.blk.{bid}.ffn_gate",
+ MODEL_TENSOR.DEC_FFN_DOWN: "dec.blk.{bid}.ffn_down",
+ MODEL_TENSOR.DEC_FFN_UP: "dec.blk.{bid}.ffn_up",
+ MODEL_TENSOR.DEC_OUTPUT_NORM: "dec.output_norm",
+ MODEL_TENSOR.ENC_ATTN_NORM: "enc.blk.{bid}.attn_norm",
+ MODEL_TENSOR.ENC_ATTN_Q: "enc.blk.{bid}.attn_q",
+ MODEL_TENSOR.ENC_ATTN_K: "enc.blk.{bid}.attn_k",
+ MODEL_TENSOR.ENC_ATTN_V: "enc.blk.{bid}.attn_v",
+ MODEL_TENSOR.ENC_ATTN_OUT: "enc.blk.{bid}.attn_o",
+ MODEL_TENSOR.ENC_ATTN_REL_B: "enc.blk.{bid}.attn_rel_b",
+ MODEL_TENSOR.ENC_FFN_NORM: "enc.blk.{bid}.ffn_norm",
+ MODEL_TENSOR.ENC_FFN_GATE: "enc.blk.{bid}.ffn_gate",
+ MODEL_TENSOR.ENC_FFN_DOWN: "enc.blk.{bid}.ffn_down",
+ MODEL_TENSOR.ENC_FFN_UP: "enc.blk.{bid}.ffn_up",
+ MODEL_TENSOR.ENC_OUTPUT_NORM: "enc.output_norm",
+}
+
+MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
+ MODEL_ARCH.LLAMA: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.FFN_GATE_INP,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.FFN_GATE_EXP,
+ MODEL_TENSOR.FFN_DOWN_EXP,
+ MODEL_TENSOR.FFN_UP_EXP,
+ ],
+ MODEL_ARCH.GROK: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.ATTN_OUT_NORM,
+ MODEL_TENSOR.FFN_GATE_INP,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.FFN_GATE_EXP,
+ MODEL_TENSOR.FFN_DOWN_EXP,
+ MODEL_TENSOR.FFN_UP_EXP,
+ MODEL_TENSOR.LAYER_OUT_NORM,
+ ],
+ MODEL_ARCH.GPTNEOX: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.FALCON: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_NORM_2,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.BAICHUAN: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.STARCODER: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.POS_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.BERT: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.TOKEN_EMBD_NORM,
+ MODEL_TENSOR.TOKEN_TYPES,
+ MODEL_TENSOR.POS_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.ATTN_OUT_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.LAYER_OUT_NORM,
+ ],
+ MODEL_ARCH.NOMIC_BERT: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.TOKEN_EMBD_NORM,
+ MODEL_TENSOR.TOKEN_TYPES,
+ MODEL_TENSOR.POS_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.ATTN_OUT_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.LAYER_OUT_NORM,
+ ],
+ MODEL_ARCH.JINA_BERT_V2: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.TOKEN_EMBD_NORM,
+ MODEL_TENSOR.TOKEN_TYPES,
+ MODEL_TENSOR.ATTN_NORM_2,
+ MODEL_TENSOR.ATTN_OUT_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_Q_NORM,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_K_NORM,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.LAYER_OUT_NORM,
+ ],
+ MODEL_ARCH.MPT: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.FFN_ACT,
+ MODEL_TENSOR.ATTN_Q_NORM,
+ MODEL_TENSOR.ATTN_K_NORM,
+ MODEL_TENSOR.POS_EMBD,
+ ],
+ MODEL_ARCH.GPTJ: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.REFACT: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.BLOOM: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.TOKEN_EMBD_NORM,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.STABLELM: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.ATTN_Q_NORM,
+ MODEL_TENSOR.ATTN_K_NORM,
+ ],
+ MODEL_ARCH.QWEN: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.QWEN2: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.QWEN2MOE: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE_INP,
+ MODEL_TENSOR.FFN_GATE_EXP,
+ MODEL_TENSOR.FFN_DOWN_EXP,
+ MODEL_TENSOR.FFN_UP_EXP,
+ MODEL_TENSOR.FFN_GATE_INP_SHEXP,
+ MODEL_TENSOR.FFN_GATE_SHEXP,
+ MODEL_TENSOR.FFN_DOWN_SHEXP,
+ MODEL_TENSOR.FFN_UP_SHEXP,
+ ],
+ MODEL_ARCH.PLAMO: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.GPT2: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.POS_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.PHI2: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.PHI3: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.CODESHELL: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.POS_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.ORION: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.INTERNLM2: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.MINICPM: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.FFN_GATE_INP,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.FFN_GATE_EXP,
+ MODEL_TENSOR.FFN_DOWN_EXP,
+ MODEL_TENSOR.FFN_UP_EXP,
+ ],
+ MODEL_ARCH.GEMMA: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.FFN_NORM,
+ ],
+ MODEL_ARCH.GEMMA2: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_POST_NORM,
+ MODEL_TENSOR.FFN_PRE_NORM,
+ MODEL_TENSOR.FFN_POST_NORM,
+ ],
+ MODEL_ARCH.STARCODER2: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.MAMBA: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.SSM_IN,
+ MODEL_TENSOR.SSM_CONV1D,
+ MODEL_TENSOR.SSM_X,
+ MODEL_TENSOR.SSM_DT,
+ MODEL_TENSOR.SSM_A,
+ MODEL_TENSOR.SSM_D,
+ MODEL_TENSOR.SSM_OUT,
+ ],
+ MODEL_ARCH.XVERSE: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.COMMAND_R: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.ATTN_K_NORM,
+ MODEL_TENSOR.ATTN_Q_NORM,
+ ],
+ MODEL_ARCH.DBRX: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_OUT_NORM,
+ MODEL_TENSOR.FFN_GATE_INP,
+ MODEL_TENSOR.FFN_GATE_EXP,
+ MODEL_TENSOR.FFN_DOWN_EXP,
+ MODEL_TENSOR.FFN_UP_EXP,
+ ],
+ MODEL_ARCH.OLMO: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.OPENELM: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_Q_NORM,
+ MODEL_TENSOR.ATTN_K_NORM,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.ARCTIC: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.FFN_GATE_INP,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.FFN_NORM_EXP,
+ MODEL_TENSOR.FFN_GATE_EXP,
+ MODEL_TENSOR.FFN_DOWN_EXP,
+ MODEL_TENSOR.FFN_UP_EXP,
+ ],
+ MODEL_ARCH.DEEPSEEK2: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_Q_A,
+ MODEL_TENSOR.ATTN_Q_B,
+ MODEL_TENSOR.ATTN_KV_A_MQA,
+ MODEL_TENSOR.ATTN_KV_B,
+ MODEL_TENSOR.ATTN_Q_A_NORM,
+ MODEL_TENSOR.ATTN_KV_A_NORM,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.FFN_GATE_INP,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.FFN_GATE_EXP,
+ MODEL_TENSOR.FFN_DOWN_EXP,
+ MODEL_TENSOR.FFN_UP_EXP,
+ MODEL_TENSOR.FFN_GATE_SHEXP,
+ MODEL_TENSOR.FFN_DOWN_SHEXP,
+ MODEL_TENSOR.FFN_UP_SHEXP,
+ ],
+ MODEL_ARCH.CHATGLM: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ MODEL_ARCH.BITNET: [
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ MODEL_TENSOR.ATTN_SUB_NORM,
+ MODEL_TENSOR.FFN_SUB_NORM,
+ ],
+ MODEL_ARCH.T5: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.DEC_ATTN_NORM,
+ MODEL_TENSOR.DEC_ATTN_Q,
+ MODEL_TENSOR.DEC_ATTN_K,
+ MODEL_TENSOR.DEC_ATTN_V,
+ MODEL_TENSOR.DEC_ATTN_OUT,
+ MODEL_TENSOR.DEC_ATTN_REL_B,
+ MODEL_TENSOR.DEC_CROSS_ATTN_NORM,
+ MODEL_TENSOR.DEC_CROSS_ATTN_Q,
+ MODEL_TENSOR.DEC_CROSS_ATTN_K,
+ MODEL_TENSOR.DEC_CROSS_ATTN_V,
+ MODEL_TENSOR.DEC_CROSS_ATTN_OUT,
+ MODEL_TENSOR.DEC_CROSS_ATTN_REL_B,
+ MODEL_TENSOR.DEC_FFN_NORM,
+ MODEL_TENSOR.DEC_FFN_GATE,
+ MODEL_TENSOR.DEC_FFN_DOWN,
+ MODEL_TENSOR.DEC_FFN_UP,
+ MODEL_TENSOR.DEC_OUTPUT_NORM,
+ MODEL_TENSOR.ENC_ATTN_NORM,
+ MODEL_TENSOR.ENC_ATTN_Q,
+ MODEL_TENSOR.ENC_ATTN_K,
+ MODEL_TENSOR.ENC_ATTN_V,
+ MODEL_TENSOR.ENC_ATTN_OUT,
+ MODEL_TENSOR.ENC_ATTN_REL_B,
+ MODEL_TENSOR.ENC_FFN_NORM,
+ MODEL_TENSOR.ENC_FFN_GATE,
+ MODEL_TENSOR.ENC_FFN_DOWN,
+ MODEL_TENSOR.ENC_FFN_UP,
+ MODEL_TENSOR.ENC_OUTPUT_NORM,
+ ],
+ MODEL_ARCH.T5ENCODER: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ENC_ATTN_NORM,
+ MODEL_TENSOR.ENC_ATTN_Q,
+ MODEL_TENSOR.ENC_ATTN_K,
+ MODEL_TENSOR.ENC_ATTN_V,
+ MODEL_TENSOR.ENC_ATTN_OUT,
+ MODEL_TENSOR.ENC_ATTN_REL_B,
+ MODEL_TENSOR.ENC_FFN_NORM,
+ MODEL_TENSOR.ENC_FFN_GATE,
+ MODEL_TENSOR.ENC_FFN_DOWN,
+ MODEL_TENSOR.ENC_FFN_UP,
+ MODEL_TENSOR.ENC_OUTPUT_NORM,
+ ],
+ MODEL_ARCH.JAIS: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_UP,
+ ],
+ # TODO
+}
+
+# tensors that will not be serialized
+MODEL_TENSOR_SKIP: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
+ MODEL_ARCH.LLAMA: [
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ ],
+ MODEL_ARCH.BAICHUAN: [
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ ],
+ MODEL_ARCH.QWEN: [
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ ],
+ MODEL_ARCH.CODESHELL: [
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ ],
+ MODEL_ARCH.ORION: [
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ ],
+ MODEL_ARCH.STARCODER2: [
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ ],
+ MODEL_ARCH.XVERSE: [
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ ],
+ MODEL_ARCH.DEEPSEEK2: [
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ ],
+ MODEL_ARCH.CHATGLM: [
+ MODEL_TENSOR.ROPE_FREQS,
+ ],
+}
+
+#
+# types
+#
+
+
+class TokenType(IntEnum):
+ NORMAL = 1
+ UNKNOWN = 2
+ CONTROL = 3
+ USER_DEFINED = 4
+ UNUSED = 5
+ BYTE = 6
+
+
+class RopeScalingType(Enum):
+ NONE = "none"
+ LINEAR = "linear"
+ YARN = "yarn"
+
+
+class PoolingType(IntEnum):
+ NONE = 0
+ MEAN = 1
+ CLS = 2
+
+
+class GGMLQuantizationType(IntEnum):
+ F32 = 0
+ F16 = 1
+ Q4_0 = 2
+ Q4_1 = 3
+ Q5_0 = 6
+ Q5_1 = 7
+ Q8_0 = 8
+ Q8_1 = 9
+ Q2_K = 10
+ Q3_K = 11
+ Q4_K = 12
+ Q5_K = 13
+ Q6_K = 14
+ Q8_K = 15
+ IQ2_XXS = 16
+ IQ2_XS = 17
+ IQ3_XXS = 18
+ IQ1_S = 19
+ IQ4_NL = 20
+ IQ3_S = 21
+ IQ2_S = 22
+ IQ4_XS = 23
+ I8 = 24
+ I16 = 25
+ I32 = 26
+ I64 = 27
+ F64 = 28
+ IQ1_M = 29
+ BF16 = 30
+ Q4_0_4_4 = 31
+ Q4_0_4_8 = 32
+ Q4_0_8_8 = 33
+
+
+# TODO: add GGMLFileType from ggml_ftype in ggml.h
+
+
+# from llama_ftype in llama.h
+# ALL VALUES SHOULD BE THE SAME HERE AS THEY ARE OVER THERE.
+class LlamaFileType(IntEnum):
+ ALL_F32 = 0
+ MOSTLY_F16 = 1 # except 1d tensors
+ MOSTLY_Q4_0 = 2 # except 1d tensors
+ MOSTLY_Q4_1 = 3 # except 1d tensors
+ # MOSTLY_Q4_1_SOME_F16 = 4 # tok_embeddings.weight and output.weight are F16
+ # MOSTLY_Q4_2 = 5 # support has been removed
+ # MOSTLY_Q4_3 = 6 # support has been removed
+ MOSTLY_Q8_0 = 7 # except 1d tensors
+ MOSTLY_Q5_0 = 8 # except 1d tensors
+ MOSTLY_Q5_1 = 9 # except 1d tensors
+ MOSTLY_Q2_K = 10 # except 1d tensors
+ MOSTLY_Q3_K_S = 11 # except 1d tensors
+ MOSTLY_Q3_K_M = 12 # except 1d tensors
+ MOSTLY_Q3_K_L = 13 # except 1d tensors
+ MOSTLY_Q4_K_S = 14 # except 1d tensors
+ MOSTLY_Q4_K_M = 15 # except 1d tensors
+ MOSTLY_Q5_K_S = 16 # except 1d tensors
+ MOSTLY_Q5_K_M = 17 # except 1d tensors
+ MOSTLY_Q6_K = 18 # except 1d tensors
+ MOSTLY_IQ2_XXS = 19 # except 1d tensors
+ MOSTLY_IQ2_XS = 20 # except 1d tensors
+ MOSTLY_Q2_K_S = 21 # except 1d tensors
+ MOSTLY_IQ3_XS = 22 # except 1d tensors
+ MOSTLY_IQ3_XXS = 23 # except 1d tensors
+ MOSTLY_IQ1_S = 24 # except 1d tensors
+ MOSTLY_IQ4_NL = 25 # except 1d tensors
+ MOSTLY_IQ3_S = 26 # except 1d tensors
+ MOSTLY_IQ3_M = 27 # except 1d tensors
+ MOSTLY_IQ2_S = 28 # except 1d tensors
+ MOSTLY_IQ2_M = 29 # except 1d tensors
+ MOSTLY_IQ4_XS = 30 # except 1d tensors
+ MOSTLY_IQ1_M = 31 # except 1d tensors
+ MOSTLY_BF16 = 32 # except 1d tensors
+ MOSTLY_Q4_0_4_4 = 33 # except 1d tensors
+ MOSTLY_Q4_0_4_8 = 34 # except 1d tensors
+ MOSTLY_Q4_0_8_8 = 35 # except 1d tensors
+
+ GUESSED = 1024 # not specified in the model file
+
+
+class GGUFEndian(IntEnum):
+ LITTLE = 0
+ BIG = 1
+
+
+class GGUFValueType(IntEnum):
+ UINT8 = 0
+ INT8 = 1
+ UINT16 = 2
+ INT16 = 3
+ UINT32 = 4
+ INT32 = 5
+ FLOAT32 = 6
+ BOOL = 7
+ STRING = 8
+ ARRAY = 9
+ UINT64 = 10
+ INT64 = 11
+ FLOAT64 = 12
+
+ @staticmethod
+ def get_type(val: Any) -> GGUFValueType:
+ if isinstance(val, (str, bytes, bytearray)):
+ return GGUFValueType.STRING
+ elif isinstance(val, list):
+ return GGUFValueType.ARRAY
+ elif isinstance(val, float):
+ return GGUFValueType.FLOAT32
+ elif isinstance(val, bool):
+ return GGUFValueType.BOOL
+ elif isinstance(val, int):
+ return GGUFValueType.INT32
+ # TODO: need help with 64-bit types in Python
+ else:
+ raise ValueError(f"Unknown type: {type(val)}")
+
+
+# Items here are (block size, type size)
+QK_K = 256
+GGML_QUANT_SIZES: dict[GGMLQuantizationType, tuple[int, int]] = {
+ GGMLQuantizationType.F32: (1, 4),
+ GGMLQuantizationType.F16: (1, 2),
+ GGMLQuantizationType.Q4_0: (32, 2 + 16),
+ GGMLQuantizationType.Q4_1: (32, 2 + 2 + 16),
+ GGMLQuantizationType.Q5_0: (32, 2 + 4 + 16),
+ GGMLQuantizationType.Q5_1: (32, 2 + 2 + 4 + 16),
+ GGMLQuantizationType.Q8_0: (32, 2 + 32),
+ GGMLQuantizationType.Q8_1: (32, 4 + 4 + 32),
+ GGMLQuantizationType.Q2_K: (256, 2 + 2 + QK_K // 16 + QK_K // 4),
+ GGMLQuantizationType.Q3_K: (256, 2 + QK_K // 4 + QK_K // 8 + 12),
+ GGMLQuantizationType.Q4_K: (256, 2 + 2 + QK_K // 2 + 12),
+ GGMLQuantizationType.Q5_K: (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12),
+ GGMLQuantizationType.Q6_K: (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16),
+ GGMLQuantizationType.Q8_K: (256, 4 + QK_K + QK_K // 8),
+ GGMLQuantizationType.IQ2_XXS: (256, 2 + QK_K // 4),
+ GGMLQuantizationType.IQ2_XS: (256, 2 + QK_K // 4 + QK_K // 32),
+ GGMLQuantizationType.IQ3_XXS: (256, 2 + QK_K // 4 + QK_K // 8),
+ GGMLQuantizationType.IQ1_S: (256, 2 + QK_K // 8 + QK_K // 16),
+ GGMLQuantizationType.IQ4_NL: (32, 2 + 16),
+ GGMLQuantizationType.IQ3_S: (256, 2 + QK_K // 4 + QK_K // 8 + QK_K // 32 + 4),
+ GGMLQuantizationType.IQ2_S: (256, 2 + QK_K // 4 + QK_K // 16),
+ GGMLQuantizationType.IQ4_XS: (256, 2 + 2 + QK_K // 2 + QK_K // 64),
+ GGMLQuantizationType.I8: (1, 1),
+ GGMLQuantizationType.I16: (1, 2),
+ GGMLQuantizationType.I32: (1, 4),
+ GGMLQuantizationType.I64: (1, 8),
+ GGMLQuantizationType.F64: (1, 8),
+ GGMLQuantizationType.IQ1_M: (256, QK_K // 8 + QK_K // 16 + QK_K // 32),
+ GGMLQuantizationType.BF16: (1, 2),
+ GGMLQuantizationType.Q4_0_4_4: (32, 2 + 16),
+ GGMLQuantizationType.Q4_0_4_8: (32, 2 + 16),
+ GGMLQuantizationType.Q4_0_8_8: (32, 2 + 16),
+}
+
+
+# Aliases for backward compatibility.
+
+# general
+KEY_GENERAL_ARCHITECTURE = Keys.General.ARCHITECTURE
+KEY_GENERAL_QUANTIZATION_VERSION = Keys.General.QUANTIZATION_VERSION
+KEY_GENERAL_ALIGNMENT = Keys.General.ALIGNMENT
+KEY_GENERAL_NAME = Keys.General.NAME
+KEY_GENERAL_AUTHOR = Keys.General.AUTHOR
+KEY_GENERAL_URL = Keys.General.URL
+KEY_GENERAL_DESCRIPTION = Keys.General.DESCRIPTION
+KEY_GENERAL_LICENSE = Keys.General.LICENSE
+KEY_GENERAL_SOURCE_URL = Keys.General.SOURCE_URL
+KEY_GENERAL_FILE_TYPE = Keys.General.FILE_TYPE
+
+# LLM
+KEY_VOCAB_SIZE = Keys.LLM.VOCAB_SIZE
+KEY_CONTEXT_LENGTH = Keys.LLM.CONTEXT_LENGTH
+KEY_EMBEDDING_LENGTH = Keys.LLM.EMBEDDING_LENGTH
+KEY_BLOCK_COUNT = Keys.LLM.BLOCK_COUNT
+KEY_FEED_FORWARD_LENGTH = Keys.LLM.FEED_FORWARD_LENGTH
+KEY_USE_PARALLEL_RESIDUAL = Keys.LLM.USE_PARALLEL_RESIDUAL
+KEY_TENSOR_DATA_LAYOUT = Keys.LLM.TENSOR_DATA_LAYOUT
+
+# attention
+KEY_ATTENTION_HEAD_COUNT = Keys.Attention.HEAD_COUNT
+KEY_ATTENTION_HEAD_COUNT_KV = Keys.Attention.HEAD_COUNT_KV
+KEY_ATTENTION_MAX_ALIBI_BIAS = Keys.Attention.MAX_ALIBI_BIAS
+KEY_ATTENTION_CLAMP_KQV = Keys.Attention.CLAMP_KQV
+KEY_ATTENTION_LAYERNORM_EPS = Keys.Attention.LAYERNORM_EPS
+KEY_ATTENTION_LAYERNORM_RMS_EPS = Keys.Attention.LAYERNORM_RMS_EPS
+
+# RoPE
+KEY_ROPE_DIMENSION_COUNT = Keys.Rope.DIMENSION_COUNT
+KEY_ROPE_FREQ_BASE = Keys.Rope.FREQ_BASE
+KEY_ROPE_SCALING_TYPE = Keys.Rope.SCALING_TYPE
+KEY_ROPE_SCALING_FACTOR = Keys.Rope.SCALING_FACTOR
+KEY_ROPE_SCALING_ORIG_CTX_LEN = Keys.Rope.SCALING_ORIG_CTX_LEN
+KEY_ROPE_SCALING_FINETUNED = Keys.Rope.SCALING_FINETUNED
+
+# SSM
+KEY_SSM_CONV_KERNEL = Keys.SSM.CONV_KERNEL
+KEY_SSM_INNER_SIZE = Keys.SSM.INNER_SIZE
+KEY_SSM_STATE_SIZE = Keys.SSM.STATE_SIZE
+KEY_SSM_TIME_STEP_RANK = Keys.SSM.TIME_STEP_RANK
+
+# tokenization
+KEY_TOKENIZER_MODEL = Keys.Tokenizer.MODEL
+KEY_TOKENIZER_PRE = Keys.Tokenizer.PRE
+KEY_TOKENIZER_LIST = Keys.Tokenizer.LIST
+KEY_TOKENIZER_TOKEN_TYPE = Keys.Tokenizer.TOKEN_TYPE
+KEY_TOKENIZER_SCORES = Keys.Tokenizer.SCORES
+KEY_TOKENIZER_MERGES = Keys.Tokenizer.MERGES
+KEY_TOKENIZER_BOS_ID = Keys.Tokenizer.BOS_ID
+KEY_TOKENIZER_EOS_ID = Keys.Tokenizer.EOS_ID
+KEY_TOKENIZER_UNK_ID = Keys.Tokenizer.UNK_ID
+KEY_TOKENIZER_SEP_ID = Keys.Tokenizer.SEP_ID
+KEY_TOKENIZER_PAD_ID = Keys.Tokenizer.PAD_ID
+KEY_TOKENIZER_CLS_ID = Keys.Tokenizer.CLS_ID
+KEY_TOKENIZER_MASK_ID = Keys.Tokenizer.MASK_ID
+KEY_TOKENIZER_HF_JSON = Keys.Tokenizer.HF_JSON
+KEY_TOKENIZER_RWKV = Keys.Tokenizer.RWKV
+KEY_TOKENIZER_PRIFIX_ID = Keys.Tokenizer.PREFIX_ID
+KEY_TOKENIZER_SUFFIX_ID = Keys.Tokenizer.SUFFIX_ID
+KEY_TOKENIZER_MIDDLE_ID = Keys.Tokenizer.MIDDLE_ID
+KEY_TOKENIZER_EOT_ID = Keys.Tokenizer.EOT_ID
+KEY_TOKENIZER_EOM_ID = Keys.Tokenizer.EOM_ID
diff --git a/modules_forge/packages/gguf/gguf_reader.py b/modules_forge/packages/gguf/gguf_reader.py
new file mode 100644
index 0000000000000000000000000000000000000000..695503dad2c44a365b5005ea6946ff356acc9892
--- /dev/null
+++ b/modules_forge/packages/gguf/gguf_reader.py
@@ -0,0 +1,333 @@
+#
+# GGUF file reading/modification support. For API usage information,
+# please see the files scripts/ for some fairly simple examples.
+#
+from __future__ import annotations
+
+import logging
+import os
+from collections import OrderedDict
+from typing import Any, Literal, NamedTuple, TypeVar, Union
+
+import numpy as np
+import numpy.typing as npt
+
+from .quants import quant_shape_to_byte_shape
+
+if __name__ == "__main__":
+ import sys
+ from pathlib import Path
+
+ # Allow running file in package as a script.
+ sys.path.insert(0, str(Path(__file__).parent.parent))
+
+from gguf.constants import (
+ GGML_QUANT_SIZES,
+ GGUF_DEFAULT_ALIGNMENT,
+ GGUF_MAGIC,
+ GGUF_VERSION,
+ GGMLQuantizationType,
+ GGUFValueType,
+)
+
+logger = logging.getLogger(__name__)
+
+READER_SUPPORTED_VERSIONS = [2, GGUF_VERSION]
+
+
+class ReaderField(NamedTuple):
+ # Offset to start of this field.
+ offset: int
+
+ # Name of the field (not necessarily from file data).
+ name: str
+
+ # Data parts. Some types have multiple components, such as strings
+ # that consist of a length followed by the string data.
+ parts: list[npt.NDArray[Any]] = []
+
+ # Indexes into parts that we can call the actual data. For example
+ # an array of strings will be populated with indexes to the actual
+ # string data.
+ data: list[int] = [-1]
+
+ types: list[GGUFValueType] = []
+
+
+class ReaderTensor(NamedTuple):
+ name: str
+ tensor_type: GGMLQuantizationType
+ shape: npt.NDArray[np.uint32]
+ n_elements: int
+ n_bytes: int
+ data_offset: int
+ data: npt.NDArray[Any]
+ field: ReaderField
+
+
+class GGUFReader:
+ # I - same as host, S - swapped
+ byte_order: Literal["I", "S"] = "I"
+ alignment: int = GGUF_DEFAULT_ALIGNMENT
+ data_offset: int
+
+ # Note: Internal helper, API may change.
+ gguf_scalar_to_np: dict[GGUFValueType, type[np.generic]] = {
+ GGUFValueType.UINT8: np.uint8,
+ GGUFValueType.INT8: np.int8,
+ GGUFValueType.UINT16: np.uint16,
+ GGUFValueType.INT16: np.int16,
+ GGUFValueType.UINT32: np.uint32,
+ GGUFValueType.INT32: np.int32,
+ GGUFValueType.FLOAT32: np.float32,
+ GGUFValueType.UINT64: np.uint64,
+ GGUFValueType.INT64: np.int64,
+ GGUFValueType.FLOAT64: np.float64,
+ GGUFValueType.BOOL: np.bool_,
+ }
+
+ def __init__(self, path: os.PathLike[str] | str, mode: Literal["r", "r+", "c"] = "r"):
+ self.data = np.memmap(path, mode=mode)
+ offs = 0
+
+ # Check for GGUF magic
+ if self._get(offs, np.uint32, override_order="<")[0] != GGUF_MAGIC:
+ raise ValueError("GGUF magic invalid")
+ offs += 4
+
+ # Check GGUF version
+ temp_version = self._get(offs, np.uint32)
+ if temp_version[0] & 65535 == 0:
+ # If we get 0 here that means it's (probably) a GGUF file created for
+ # the opposite byte order of the machine this script is running on.
+ self.byte_order = "S"
+ temp_version = temp_version.newbyteorder(self.byte_order)
+ version = temp_version[0]
+ if version not in READER_SUPPORTED_VERSIONS:
+ raise ValueError(f"Sorry, file appears to be version {version} which we cannot handle")
+ self.fields: OrderedDict[str, ReaderField] = OrderedDict()
+ self.tensors: list[ReaderTensor] = []
+ offs += self._push_field(ReaderField(offs, "GGUF.version", [temp_version], [0], [GGUFValueType.UINT32]))
+
+ # Check tensor count and kv count
+ temp_counts = self._get(offs, np.uint64, 2)
+ offs += self._push_field(
+ ReaderField(
+ offs,
+ "GGUF.tensor_count",
+ [temp_counts[:1]],
+ [0],
+ [GGUFValueType.UINT64],
+ )
+ )
+ offs += self._push_field(ReaderField(offs, "GGUF.kv_count", [temp_counts[1:]], [0], [GGUFValueType.UINT64]))
+ tensor_count, kv_count = temp_counts
+ offs = self._build_fields(offs, kv_count)
+
+ # Build Tensor Info Fields
+ offs, tensors_fields = self._build_tensor_info(offs, tensor_count)
+ new_align = self.fields.get("general.alignment")
+ if new_align is not None:
+ if new_align.types != [GGUFValueType.UINT32]:
+ raise ValueError("Bad type for general.alignment field")
+ self.alignment = new_align.parts[-1][0]
+ padding = offs % self.alignment
+ if padding != 0:
+ offs += self.alignment - padding
+ self.data_offset = offs
+ self._build_tensors(offs, tensors_fields)
+
+ _DT = TypeVar("_DT", bound=npt.DTypeLike)
+
+ # Fetch a key/value metadata field by key.
+ def get_field(self, key: str) -> Union[ReaderField, None]:
+ return self.fields.get(key, None)
+
+ # Fetch a tensor from the list by index.
+ def get_tensor(self, idx: int) -> ReaderTensor:
+ return self.tensors[idx]
+
+ def _get(
+ self,
+ offset: int,
+ dtype: npt.DTypeLike,
+ count: int = 1,
+ override_order: None | Literal["I", "S", "<"] = None,
+ ) -> npt.NDArray[Any]:
+ count = int(count)
+ itemsize = int(np.empty([], dtype=dtype).itemsize)
+ end_offs = offset + itemsize * count
+ arr: npt.NDArray[Any] = self.data[offset:end_offs].view(dtype=dtype)[:count]
+ return arr.view(arr.dtype.newbyteorder(override_order or self.byte_order))
+
+ def _push_field(self, field: ReaderField, skip_sum: bool = False) -> int:
+ if field.name in self.fields:
+ # TODO: add option to generate error on duplicate keys
+ # raise KeyError(f'Duplicate {field.name} already in list at offset {field.offset}')
+
+ logger.warning(f"Duplicate key {field.name} at offset {field.offset}")
+ self.fields[field.name + "_{}".format(field.offset)] = field
+ else:
+ self.fields[field.name] = field
+ return 0 if skip_sum else sum(int(part.nbytes) for part in field.parts)
+
+ def _get_str(self, offset: int) -> tuple[npt.NDArray[np.uint64], npt.NDArray[np.uint8]]:
+ slen = self._get(offset, np.uint64)
+ return slen, self._get(offset + 8, np.uint8, slen[0])
+
+ def _get_field_parts(
+ self,
+ orig_offs: int,
+ raw_type: int,
+ ) -> tuple[int, list[npt.NDArray[Any]], list[int], list[GGUFValueType]]:
+ offs = orig_offs
+ types: list[GGUFValueType] = []
+ gtype = GGUFValueType(raw_type)
+ types.append(gtype)
+ # Handle strings.
+ if gtype == GGUFValueType.STRING:
+ sparts: list[npt.NDArray[Any]] = list(self._get_str(offs))
+ size = sum(int(part.nbytes) for part in sparts)
+ return size, sparts, [1], types
+ # Check if it's a simple scalar type.
+ nptype = self.gguf_scalar_to_np.get(gtype)
+ if nptype is not None:
+ val = self._get(offs, nptype)
+ return int(val.nbytes), [val], [0], types
+ # Handle arrays.
+ if gtype == GGUFValueType.ARRAY:
+ raw_itype = self._get(offs, np.uint32)
+ offs += int(raw_itype.nbytes)
+ alen = self._get(offs, np.uint64)
+ offs += int(alen.nbytes)
+ aparts: list[npt.NDArray[Any]] = [raw_itype, alen]
+ data_idxs: list[int] = []
+ for idx in range(alen[0]):
+ curr_size, curr_parts, curr_idxs, curr_types = self._get_field_parts(offs, raw_itype[0])
+ if idx == 0:
+ types += curr_types
+ idxs_offs = len(aparts)
+ aparts += curr_parts
+ data_idxs += (idx + idxs_offs for idx in curr_idxs)
+ offs += curr_size
+ return offs - orig_offs, aparts, data_idxs, types
+ # We can't deal with this one.
+ raise ValueError("Unknown/unhandled field type {gtype}")
+
+ def _get_tensor_info_field(self, orig_offs: int) -> ReaderField:
+ offs = orig_offs
+
+ # Get Tensor Name
+ name_len, name_data = self._get_str(offs)
+ offs += int(name_len.nbytes + name_data.nbytes)
+
+ # Get Tensor Dimensions Count
+ n_dims = self._get(offs, np.uint32)
+ offs += int(n_dims.nbytes)
+
+ # Get Tensor Dimension Array
+ dims = self._get(offs, np.uint64, n_dims[0])
+ offs += int(dims.nbytes)
+
+ # Get Tensor Encoding Scheme Type
+ raw_dtype = self._get(offs, np.uint32)
+ offs += int(raw_dtype.nbytes)
+
+ # Get Tensor Offset
+ offset_tensor = self._get(offs, np.uint64)
+ offs += int(offset_tensor.nbytes)
+
+ return ReaderField(
+ orig_offs,
+ str(bytes(name_data), encoding="utf-8"),
+ [name_len, name_data, n_dims, dims, raw_dtype, offset_tensor],
+ [1, 3, 4, 5],
+ )
+
+ def _build_fields(self, offs: int, count: int) -> int:
+ for _ in range(count):
+ orig_offs = offs
+ kv_klen, kv_kdata = self._get_str(offs)
+ offs += int(kv_klen.nbytes + kv_kdata.nbytes)
+ raw_kv_type = self._get(offs, np.uint32)
+ offs += int(raw_kv_type.nbytes)
+ parts: list[npt.NDArray[Any]] = [kv_klen, kv_kdata, raw_kv_type]
+ idxs_offs = len(parts)
+ field_size, field_parts, field_idxs, field_types = self._get_field_parts(offs, raw_kv_type[0])
+ parts += field_parts
+ self._push_field(
+ ReaderField(
+ orig_offs,
+ str(bytes(kv_kdata), encoding="utf-8"),
+ parts,
+ [idx + idxs_offs for idx in field_idxs],
+ field_types,
+ ),
+ skip_sum=True,
+ )
+ offs += field_size
+ return offs
+
+ def _build_tensor_info(self, offs: int, count: int) -> tuple[int, list[ReaderField]]:
+ tensor_fields = []
+ for _ in range(count):
+ field = self._get_tensor_info_field(offs)
+ offs += sum(int(part.nbytes) for part in field.parts)
+ tensor_fields.append(field)
+ return offs, tensor_fields
+
+ def _build_tensors(self, start_offs: int, fields: list[ReaderField]) -> None:
+ tensors = []
+ tensor_names = set() # keep track of name to prevent duplicated tensors
+ for field in fields:
+ _name_len, name_data, _n_dims, dims, raw_dtype, offset_tensor = field.parts
+ # check if there's any tensor having same name already in the list
+ tensor_name = str(bytes(name_data), encoding="utf-8")
+ if tensor_name in tensor_names:
+ raise ValueError(f"Found duplicated tensor with name {tensor_name}")
+ tensor_names.add(tensor_name)
+ ggml_type = GGMLQuantizationType(raw_dtype[0])
+ n_elems = int(np.prod(dims))
+ np_dims = tuple(reversed(dims.tolist()))
+ block_size, type_size = GGML_QUANT_SIZES[ggml_type]
+ n_bytes = n_elems * type_size // block_size
+ data_offs = int(start_offs + offset_tensor[0])
+ item_type: npt.DTypeLike
+ if ggml_type == GGMLQuantizationType.F16:
+ item_count = n_elems
+ item_type = np.float16
+ elif ggml_type == GGMLQuantizationType.F32:
+ item_count = n_elems
+ item_type = np.float32
+ elif ggml_type == GGMLQuantizationType.F64:
+ item_count = n_elems
+ item_type = np.float64
+ elif ggml_type == GGMLQuantizationType.I8:
+ item_count = n_elems
+ item_type = np.int8
+ elif ggml_type == GGMLQuantizationType.I16:
+ item_count = n_elems
+ item_type = np.int16
+ elif ggml_type == GGMLQuantizationType.I32:
+ item_count = n_elems
+ item_type = np.int32
+ elif ggml_type == GGMLQuantizationType.I64:
+ item_count = n_elems
+ item_type = np.int64
+ else:
+ item_count = n_bytes
+ item_type = np.uint8
+ np_dims = quant_shape_to_byte_shape(np_dims, ggml_type)
+ tensors.append(
+ ReaderTensor(
+ name=tensor_name,
+ tensor_type=ggml_type,
+ shape=dims,
+ n_elements=n_elems,
+ n_bytes=n_bytes,
+ data_offset=data_offs,
+ data=self._get(data_offs, item_type, item_count).reshape(np_dims),
+ field=field,
+ )
+ )
+ self.tensors = tensors
diff --git a/modules_forge/packages/gguf/gguf_writer.py b/modules_forge/packages/gguf/gguf_writer.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ba97a64a5eec3e60c769a93f73fcebf73fc3b6b
--- /dev/null
+++ b/modules_forge/packages/gguf/gguf_writer.py
@@ -0,0 +1,975 @@
+from __future__ import annotations
+
+import logging
+import os
+import shutil
+import struct
+import tempfile
+from dataclasses import dataclass
+from enum import Enum, auto
+from io import BufferedWriter
+from math import prod
+from pathlib import Path
+from string import ascii_letters, digits
+from typing import IO, Any, Mapping, Sequence
+
+import numpy as np
+
+from .constants import (
+ GGUF_DEFAULT_ALIGNMENT,
+ GGUF_MAGIC,
+ GGUF_VERSION,
+ GGMLQuantizationType,
+ GGUFEndian,
+ GGUFValueType,
+ Keys,
+ PoolingType,
+ RopeScalingType,
+ TokenType,
+)
+from .quants import quant_shape_from_byte_shape
+
+logger = logging.getLogger(__name__)
+
+
+SHARD_NAME_FORMAT = "{:s}-{:05d}-of-{:05d}.gguf"
+
+
+@dataclass
+class TensorInfo:
+ shape: Sequence[int]
+ dtype: GGMLQuantizationType
+ nbytes: int
+ tensor: np.ndarray[Any, Any] | None = None
+
+
+@dataclass
+class GGUFValue:
+ value: Any
+ type: GGUFValueType
+
+
+class WriterState(Enum):
+ NO_FILE = auto()
+ EMPTY = auto()
+ HEADER = auto()
+ KV_DATA = auto()
+ TI_DATA = auto()
+ WEIGHTS = auto()
+
+
+class GGUFWriter:
+ fout: list[BufferedWriter] | None
+ path: Path | None
+ temp_file: tempfile.SpooledTemporaryFile[bytes] | None
+ tensors: list[dict[str, TensorInfo]]
+ kv_data: list[dict[str, GGUFValue]]
+ state: WriterState
+ _simple_value_packing = {
+ GGUFValueType.UINT8: "B",
+ GGUFValueType.INT8: "b",
+ GGUFValueType.UINT16: "H",
+ GGUFValueType.INT16: "h",
+ GGUFValueType.UINT32: "I",
+ GGUFValueType.INT32: "i",
+ GGUFValueType.FLOAT32: "f",
+ GGUFValueType.UINT64: "Q",
+ GGUFValueType.INT64: "q",
+ GGUFValueType.FLOAT64: "d",
+ GGUFValueType.BOOL: "?",
+ }
+
+ def __init__(
+ self,
+ path: os.PathLike[str] | str | None,
+ arch: str,
+ use_temp_file: bool = False,
+ endianess: GGUFEndian = GGUFEndian.LITTLE,
+ split_max_tensors: int = 0,
+ split_max_size: int = 0,
+ dry_run: bool = False,
+ small_first_shard: bool = False,
+ ):
+ self.fout = None
+ self.path = Path(path) if path else None
+ self.arch = arch
+ self.endianess = endianess
+ self.data_alignment = GGUF_DEFAULT_ALIGNMENT
+ self.use_temp_file = use_temp_file
+ self.temp_file = None
+ self.tensors = [{}]
+ self.kv_data = [{}]
+ self.split_max_tensors = split_max_tensors
+ self.split_max_size = split_max_size
+ self.dry_run = dry_run
+ self.small_first_shard = small_first_shard
+ logger.info(
+ "gguf: This GGUF file is for {0} Endian only".format(
+ "Big" if self.endianess == GGUFEndian.BIG else "Little",
+ )
+ )
+ self.state = WriterState.NO_FILE
+
+ if self.small_first_shard:
+ self.tensors.append({})
+
+ self.add_architecture()
+
+ def get_total_parameter_count(self) -> tuple[int, int, int, int]:
+ total_params = 0
+ shared_params = 0
+ expert_params = 0
+
+ expert_sum = 0
+ n_expert_tensors = 0
+
+ last_lora_a: tuple[str, TensorInfo] | None = None
+
+ for tensors in self.tensors:
+ for name, info in tensors.items():
+
+ shape = info.shape
+
+ if name.endswith(".lora_a"):
+ last_lora_a = (name, info)
+ continue
+ elif name.endswith(".lora_b"):
+ if last_lora_a is None or last_lora_a[0] != name[:-1] + "a":
+ # Bail when the LoRA pair can't be found trivially
+ logger.warning(
+ "can't measure LoRA size correctly, tensor order is unusual"
+ )
+ return 0, 0, 0, 0
+ else:
+ shape = (*shape[:-1], last_lora_a[1].shape[-1])
+
+ size = prod(shape)
+
+ if "_exps." in name:
+ expert_params += size // shape[-3]
+ expert_sum += shape[-3]
+ n_expert_tensors += 1
+ else:
+ shared_params += size
+
+ total_params += size
+
+ # Hopefully this should work even for variable-expert-count models
+ expert_count = (expert_sum // n_expert_tensors) if n_expert_tensors > 0 else 0
+
+ # Negate the total to signal it's likely not exact
+ if last_lora_a is not None:
+ total_params = -total_params
+
+ # NOTE: keep the output in the same order as accepted by 'size_label' in gguf-py/gguf/utility.py
+ return total_params, shared_params, expert_params, expert_count
+
+ def format_shard_names(self, path: Path) -> list[Path]:
+ if len(self.tensors) == 1:
+ return [path]
+ return [
+ path.with_name(
+ SHARD_NAME_FORMAT.format(path.stem, i + 1, len(self.tensors))
+ )
+ for i in range(len(self.tensors))
+ ]
+
+ def open_output_file(self, path: Path | None = None) -> None:
+ if (
+ self.state is WriterState.EMPTY
+ and self.fout is not None
+ and (path is None or path == self.path)
+ ):
+ # allow calling this multiple times as long as the path is the same
+ return
+
+ if self.state is not WriterState.NO_FILE:
+ raise ValueError(
+ f"Expected output file to be not yet opened, got {self.state}"
+ )
+
+ if path is not None:
+ self.path = path
+
+ if self.path is not None:
+ filenames = self.print_plan()
+ self.fout = [open(filename, "wb") for filename in filenames]
+ self.state = WriterState.EMPTY
+
+ def print_plan(self) -> list[Path]:
+ logger.info("Writing the following files:")
+ assert self.path is not None
+ filenames = self.format_shard_names(self.path)
+ assert len(filenames) == len(self.tensors)
+ for name, tensors in zip(filenames, self.tensors):
+ logger.info(
+ f"{name}: n_tensors = {len(tensors)}, total_size = {GGUFWriter.format_n_bytes_to_str(sum(ti.nbytes for ti in tensors.values()))}"
+ )
+
+ if self.dry_run:
+ logger.info("Dry run, not writing files")
+ for name in filenames:
+ print(name) # noqa: NP100
+ exit()
+
+ return filenames
+
+ def add_shard_kv_data(self) -> None:
+ if len(self.tensors) == 1:
+ return
+
+ total_tensors = sum(len(t) for t in self.tensors)
+ assert self.fout is not None
+ total_splits = len(self.fout)
+ self.kv_data.extend({} for _ in range(len(self.kv_data), total_splits))
+ for i, kv_data in enumerate(self.kv_data):
+ kv_data[Keys.Split.LLM_KV_SPLIT_NO] = GGUFValue(i, GGUFValueType.UINT16)
+ kv_data[Keys.Split.LLM_KV_SPLIT_COUNT] = GGUFValue(
+ total_splits, GGUFValueType.UINT16
+ )
+ kv_data[Keys.Split.LLM_KV_SPLIT_TENSORS_COUNT] = GGUFValue(
+ total_tensors, GGUFValueType.INT32
+ )
+
+ def write_header_to_file(self, path: Path | None = None) -> None:
+ if len(self.tensors) == 1 and (
+ self.split_max_tensors != 0 or self.split_max_size != 0
+ ):
+ logger.warning("Model fails split requirements, not splitting")
+
+ self.open_output_file(path)
+
+ if self.state is not WriterState.EMPTY:
+ raise ValueError(f"Expected output file to be empty, got {self.state}")
+
+ assert self.fout is not None
+ assert len(self.fout) == len(self.tensors)
+ assert len(self.kv_data) == 1
+
+ self.add_shard_kv_data()
+
+ for fout, tensors, kv_data in zip(self.fout, self.tensors, self.kv_data):
+ fout.write(self._pack(" None:
+ if self.state is not WriterState.HEADER:
+ raise ValueError(
+ f"Expected output file to contain the header, got {self.state}"
+ )
+ assert self.fout is not None
+
+ for fout, kv_data in zip(self.fout, self.kv_data):
+ kv_bytes = bytearray()
+
+ for key, val in kv_data.items():
+ kv_bytes += self._pack_val(key, GGUFValueType.STRING, add_vtype=False)
+ kv_bytes += self._pack_val(val.value, val.type, add_vtype=True)
+
+ fout.write(kv_bytes)
+
+ self.flush()
+ self.state = WriterState.KV_DATA
+
+ def write_ti_data_to_file(self) -> None:
+ if self.state is not WriterState.KV_DATA:
+ raise ValueError(
+ f"Expected output file to contain KV data, got {self.state}"
+ )
+ assert self.fout is not None
+
+ for fout, tensors in zip(self.fout, self.tensors):
+ ti_data = bytearray()
+ offset_tensor = 0
+
+ for name, ti in tensors.items():
+ ti_data += self._pack_val(name, GGUFValueType.STRING, add_vtype=False)
+ n_dims = len(ti.shape)
+ ti_data += self._pack("I", n_dims)
+ for j in range(n_dims):
+ ti_data += self._pack("Q", ti.shape[n_dims - 1 - j])
+ ti_data += self._pack("I", ti.dtype)
+ ti_data += self._pack("Q", offset_tensor)
+ offset_tensor += GGUFWriter.ggml_pad(ti.nbytes, self.data_alignment)
+
+ fout.write(ti_data)
+ fout.flush()
+ self.state = WriterState.TI_DATA
+
+ def add_key_value(self, key: str, val: Any, vtype: GGUFValueType) -> None:
+ if any(key in kv_data for kv_data in self.kv_data):
+ raise ValueError(f"Duplicated key name {key!r}")
+
+ self.kv_data[0][key] = GGUFValue(value=val, type=vtype)
+
+ def add_uint8(self, key: str, val: int) -> None:
+ self.add_key_value(key, val, GGUFValueType.UINT8)
+
+ def add_int8(self, key: str, val: int) -> None:
+ self.add_key_value(key, val, GGUFValueType.INT8)
+
+ def add_uint16(self, key: str, val: int) -> None:
+ self.add_key_value(key, val, GGUFValueType.UINT16)
+
+ def add_int16(self, key: str, val: int) -> None:
+ self.add_key_value(key, val, GGUFValueType.INT16)
+
+ def add_uint32(self, key: str, val: int) -> None:
+ self.add_key_value(key, val, GGUFValueType.UINT32)
+
+ def add_int32(self, key: str, val: int) -> None:
+ self.add_key_value(key, val, GGUFValueType.INT32)
+
+ def add_float32(self, key: str, val: float) -> None:
+ self.add_key_value(key, val, GGUFValueType.FLOAT32)
+
+ def add_uint64(self, key: str, val: int) -> None:
+ self.add_key_value(key, val, GGUFValueType.UINT64)
+
+ def add_int64(self, key: str, val: int) -> None:
+ self.add_key_value(key, val, GGUFValueType.INT64)
+
+ def add_float64(self, key: str, val: float) -> None:
+ self.add_key_value(key, val, GGUFValueType.FLOAT64)
+
+ def add_bool(self, key: str, val: bool) -> None:
+ self.add_key_value(key, val, GGUFValueType.BOOL)
+
+ def add_string(self, key: str, val: str) -> None:
+ if not val:
+ return
+ self.add_key_value(key, val, GGUFValueType.STRING)
+
+ def add_array(self, key: str, val: Sequence[Any]) -> None:
+ if len(val) == 0:
+ return
+ self.add_key_value(key, val, GGUFValueType.ARRAY)
+
+ @staticmethod
+ def ggml_pad(x: int, n: int) -> int:
+ return ((x + n - 1) // n) * n
+
+ def add_tensor_info(
+ self,
+ name: str,
+ tensor_shape: Sequence[int],
+ tensor_dtype: np.dtype,
+ tensor_nbytes: int,
+ raw_dtype: GGMLQuantizationType | None = None,
+ ) -> None:
+ if self.state is not WriterState.NO_FILE:
+ raise ValueError(
+ f"Expected output file to be not yet opened, got {self.state}"
+ )
+
+ if any(name in tensors for tensors in self.tensors):
+ raise ValueError(f"Duplicated tensor name {name!r}")
+
+ if raw_dtype is None:
+ if tensor_dtype == np.float16:
+ dtype = GGMLQuantizationType.F16
+ elif tensor_dtype == np.float32:
+ dtype = GGMLQuantizationType.F32
+ elif tensor_dtype == np.float64:
+ dtype = GGMLQuantizationType.F64
+ elif tensor_dtype == np.int8:
+ dtype = GGMLQuantizationType.I8
+ elif tensor_dtype == np.int16:
+ dtype = GGMLQuantizationType.I16
+ elif tensor_dtype == np.int32:
+ dtype = GGMLQuantizationType.I32
+ elif tensor_dtype == np.int64:
+ dtype = GGMLQuantizationType.I64
+ else:
+ raise ValueError(
+ "Only F16, F32, F64, I8, I16, I32, I64 tensors are supported for now"
+ )
+ else:
+ dtype = raw_dtype
+ if tensor_dtype == np.uint8:
+ tensor_shape = quant_shape_from_byte_shape(tensor_shape, raw_dtype)
+
+ # make sure there is at least one tensor before splitting
+ if len(self.tensors[-1]) > 0:
+ if ( # split when over tensor limit
+ self.split_max_tensors != 0
+ and len(self.tensors[-1]) >= self.split_max_tensors
+ ) or ( # split when over size limit
+ self.split_max_size != 0
+ and sum(ti.nbytes for ti in self.tensors[-1].values()) + tensor_nbytes
+ > self.split_max_size
+ ):
+ self.tensors.append({})
+
+ self.tensors[-1][name] = TensorInfo(
+ shape=tensor_shape, dtype=dtype, nbytes=tensor_nbytes
+ )
+
+ def add_tensor(
+ self,
+ name: str,
+ tensor: np.ndarray[Any, Any],
+ raw_shape: Sequence[int] | None = None,
+ raw_dtype: GGMLQuantizationType | None = None,
+ ) -> None:
+ if self.endianess == GGUFEndian.BIG:
+ tensor.byteswap(inplace=True)
+ if self.use_temp_file and self.temp_file is None:
+ fp = tempfile.SpooledTemporaryFile(mode="w+b", max_size=256 * 1024 * 1024)
+ fp.seek(0)
+ self.temp_file = fp
+
+ shape: Sequence[int] = raw_shape if raw_shape is not None else tensor.shape
+ self.add_tensor_info(
+ name, shape, tensor.dtype, tensor.nbytes, raw_dtype=raw_dtype
+ )
+
+ if self.temp_file is None:
+ self.tensors[-1][name].tensor = tensor
+ return
+
+ tensor.tofile(self.temp_file)
+ self.write_padding(self.temp_file, tensor.nbytes)
+
+ def write_padding(self, fp: IO[bytes], n: int, align: int | None = None) -> None:
+ pad = (
+ GGUFWriter.ggml_pad(n, align if align is not None else self.data_alignment)
+ - n
+ )
+ if pad != 0:
+ fp.write(bytes([0] * pad))
+
+ def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None:
+ if (
+ self.state is not WriterState.TI_DATA
+ and self.state is not WriterState.WEIGHTS
+ ):
+ raise ValueError(
+ f"Expected output file to contain tensor info or weights, got {self.state}"
+ )
+ assert self.fout is not None
+
+ if self.endianess == GGUFEndian.BIG:
+ tensor.byteswap(inplace=True)
+
+ file_id = -1
+ for i, tensors in enumerate(self.tensors):
+ if len(tensors) > 0:
+ file_id = i
+ break
+
+ fout = self.fout[file_id]
+
+ # pop the first tensor info
+ # TODO: cleaner way to get the first key
+ first_tensor_name = [
+ name for name, _ in zip(self.tensors[file_id].keys(), range(1))
+ ][0]
+ ti = self.tensors[file_id].pop(first_tensor_name)
+ assert ti.nbytes == tensor.nbytes
+
+ self.write_padding(fout, fout.tell())
+ tensor.tofile(fout)
+ self.write_padding(fout, tensor.nbytes)
+
+ self.state = WriterState.WEIGHTS
+
+ def write_tensors_to_file(self, *, progress: bool = False) -> None:
+ self.write_ti_data_to_file()
+
+ assert self.fout is not None
+
+ for fout in self.fout:
+ self.write_padding(fout, fout.tell())
+
+ if self.temp_file is None:
+ shard_bar = None
+ bar = None
+
+ if progress:
+ from tqdm import tqdm
+
+ total_bytes = sum(ti.nbytes for t in self.tensors for ti in t.values())
+
+ if len(self.fout) > 1:
+ shard_bar = tqdm(
+ desc=f"Shard (0/{len(self.fout)})",
+ total=None,
+ unit="byte",
+ unit_scale=True,
+ )
+ bar = tqdm(
+ desc="Writing", total=total_bytes, unit="byte", unit_scale=True
+ )
+
+ for i, (fout, tensors) in enumerate(zip(self.fout, self.tensors)):
+ if shard_bar is not None:
+ shard_bar.set_description(f"Shard ({i + 1}/{len(self.fout)})")
+ total = sum(ti.nbytes for ti in tensors.values())
+ shard_bar.reset(total=(total if total > 0 else None))
+
+ # relying on the fact that Python dicts preserve insertion order (since 3.7)
+ for ti in tensors.values():
+ assert (
+ ti.tensor is not None
+ ) # can only iterate once over the tensors
+ assert ti.tensor.nbytes == ti.nbytes
+ ti.tensor.tofile(fout)
+ if shard_bar is not None:
+ shard_bar.update(ti.nbytes)
+ if bar is not None:
+ bar.update(ti.nbytes)
+ self.write_padding(fout, ti.nbytes)
+ ti.tensor = None
+ else:
+ self.temp_file.seek(0)
+
+ shutil.copyfileobj(
+ self.temp_file, self.fout[0 if not self.small_first_shard else 1]
+ )
+ self.flush()
+ self.temp_file.close()
+
+ self.state = WriterState.WEIGHTS
+
+ def flush(self) -> None:
+ assert self.fout is not None
+ for fout in self.fout:
+ fout.flush()
+
+ def close(self) -> None:
+ if self.fout is not None:
+ for fout in self.fout:
+ fout.close()
+ self.fout = None
+
+ def add_type(self, type_name: str) -> None:
+ self.add_string(Keys.General.TYPE, type_name)
+
+ def add_architecture(self) -> None:
+ self.add_string(Keys.General.ARCHITECTURE, self.arch)
+
+ def add_quantization_version(self, quantization_version: int) -> None:
+ self.add_uint32(Keys.General.QUANTIZATION_VERSION, quantization_version)
+
+ def add_custom_alignment(self, alignment: int) -> None:
+ self.data_alignment = alignment
+ self.add_uint32(Keys.General.ALIGNMENT, alignment)
+
+ def add_file_type(self, ftype: int) -> None:
+ self.add_uint32(Keys.General.FILE_TYPE, ftype)
+
+ def add_name(self, name: str) -> None:
+ self.add_string(Keys.General.NAME, name)
+
+ def add_author(self, author: str) -> None:
+ self.add_string(Keys.General.AUTHOR, author)
+
+ def add_version(self, version: str) -> None:
+ self.add_string(Keys.General.VERSION, version)
+
+ def add_organization(self, organization: str) -> None:
+ self.add_string(Keys.General.ORGANIZATION, organization)
+
+ def add_finetune(self, finetune: str) -> None:
+ self.add_string(Keys.General.FINETUNE, finetune)
+
+ def add_basename(self, basename: str) -> None:
+ self.add_string(Keys.General.BASENAME, basename)
+
+ def add_description(self, description: str) -> None:
+ self.add_string(Keys.General.DESCRIPTION, description)
+
+ def add_quantized_by(self, quantized: str) -> None:
+ self.add_string(Keys.General.QUANTIZED_BY, quantized)
+
+ def add_size_label(self, size_label: str) -> None:
+ self.add_string(Keys.General.SIZE_LABEL, size_label)
+
+ def add_license(self, license: str) -> None:
+ self.add_string(Keys.General.LICENSE, license)
+
+ def add_license_name(self, license: str) -> None:
+ self.add_string(Keys.General.LICENSE_NAME, license)
+
+ def add_license_link(self, license: str) -> None:
+ self.add_string(Keys.General.LICENSE_LINK, license)
+
+ def add_url(self, url: str) -> None:
+ self.add_string(Keys.General.URL, url)
+
+ def add_doi(self, doi: str) -> None:
+ self.add_string(Keys.General.DOI, doi)
+
+ def add_uuid(self, uuid: str) -> None:
+ self.add_string(Keys.General.UUID, uuid)
+
+ def add_repo_url(self, repo_url: str) -> None:
+ self.add_string(Keys.General.REPO_URL, repo_url)
+
+ def add_source_url(self, url: str) -> None:
+ self.add_string(Keys.General.SOURCE_URL, url)
+
+ def add_source_doi(self, doi: str) -> None:
+ self.add_string(Keys.General.SOURCE_DOI, doi)
+
+ def add_source_uuid(self, uuid: str) -> None:
+ self.add_string(Keys.General.SOURCE_UUID, uuid)
+
+ def add_source_repo_url(self, repo_url: str) -> None:
+ self.add_string(Keys.General.SOURCE_REPO_URL, repo_url)
+
+ def add_base_model_count(self, source_count: int) -> None:
+ self.add_uint32(Keys.General.BASE_MODEL_COUNT, source_count)
+
+ def add_base_model_name(self, source_id: int, name: str) -> None:
+ self.add_string(Keys.General.BASE_MODEL_NAME.format(id=source_id), name)
+
+ def add_base_model_author(self, source_id: int, author: str) -> None:
+ self.add_string(Keys.General.BASE_MODEL_AUTHOR.format(id=source_id), author)
+
+ def add_base_model_version(self, source_id: int, version: str) -> None:
+ self.add_string(Keys.General.BASE_MODEL_VERSION.format(id=source_id), version)
+
+ def add_base_model_organization(self, source_id: int, organization: str) -> None:
+ self.add_string(
+ Keys.General.BASE_MODEL_ORGANIZATION.format(id=source_id), organization
+ )
+
+ def add_base_model_url(self, source_id: int, url: str) -> None:
+ self.add_string(Keys.General.BASE_MODEL_URL.format(id=source_id), url)
+
+ def add_base_model_doi(self, source_id: int, doi: str) -> None:
+ self.add_string(Keys.General.BASE_MODEL_DOI.format(id=source_id), doi)
+
+ def add_base_model_uuid(self, source_id: int, uuid: str) -> None:
+ self.add_string(Keys.General.BASE_MODEL_UUID.format(id=source_id), uuid)
+
+ def add_base_model_repo_url(self, source_id: int, repo_url: str) -> None:
+ self.add_string(Keys.General.BASE_MODEL_REPO_URL.format(id=source_id), repo_url)
+
+ def add_tags(self, tags: Sequence[str]) -> None:
+ self.add_array(Keys.General.TAGS, tags)
+
+ def add_languages(self, languages: Sequence[str]) -> None:
+ self.add_array(Keys.General.LANGUAGES, languages)
+
+ def add_datasets(self, datasets: Sequence[str]) -> None:
+ self.add_array(Keys.General.DATASETS, datasets)
+
+ def add_tensor_data_layout(self, layout: str) -> None:
+ self.add_string(Keys.LLM.TENSOR_DATA_LAYOUT.format(arch=self.arch), layout)
+
+ def add_vocab_size(self, size: int) -> None:
+ self.add_uint32(Keys.LLM.VOCAB_SIZE.format(arch=self.arch), size)
+
+ def add_context_length(self, length: int) -> None:
+ self.add_uint32(Keys.LLM.CONTEXT_LENGTH.format(arch=self.arch), length)
+
+ def add_embedding_length(self, length: int) -> None:
+ self.add_uint32(Keys.LLM.EMBEDDING_LENGTH.format(arch=self.arch), length)
+
+ def add_block_count(self, length: int) -> None:
+ self.add_uint32(Keys.LLM.BLOCK_COUNT.format(arch=self.arch), length)
+
+ def add_leading_dense_block_count(self, length: int) -> None:
+ self.add_uint32(
+ Keys.LLM.LEADING_DENSE_BLOCK_COUNT.format(arch=self.arch), length
+ )
+
+ def add_feed_forward_length(self, length: int | Sequence[int]) -> None:
+ if isinstance(length, int):
+ self.add_uint32(Keys.LLM.FEED_FORWARD_LENGTH.format(arch=self.arch), length)
+ else:
+ self.add_array(Keys.LLM.FEED_FORWARD_LENGTH.format(arch=self.arch), length)
+
+ def add_expert_feed_forward_length(self, length: int) -> None:
+ self.add_uint32(
+ Keys.LLM.EXPERT_FEED_FORWARD_LENGTH.format(arch=self.arch), length
+ )
+
+ def add_expert_shared_feed_forward_length(self, length: int) -> None:
+ self.add_uint32(
+ Keys.LLM.EXPERT_SHARED_FEED_FORWARD_LENGTH.format(arch=self.arch), length
+ )
+
+ def add_parallel_residual(self, use: bool) -> None:
+ self.add_bool(Keys.LLM.USE_PARALLEL_RESIDUAL.format(arch=self.arch), use)
+
+ def add_decoder_start_token_id(self, id: int) -> None:
+ self.add_uint32(Keys.LLM.DECODER_START_TOKEN_ID.format(arch=self.arch), id)
+
+ def add_head_count(self, count: int | Sequence[int]) -> None:
+ if isinstance(count, int):
+ self.add_uint32(Keys.Attention.HEAD_COUNT.format(arch=self.arch), count)
+ else:
+ self.add_array(Keys.Attention.HEAD_COUNT.format(arch=self.arch), count)
+
+ def add_head_count_kv(self, count: int | Sequence[int]) -> None:
+ if isinstance(count, int):
+ self.add_uint32(Keys.Attention.HEAD_COUNT_KV.format(arch=self.arch), count)
+ else:
+ self.add_array(Keys.Attention.HEAD_COUNT_KV.format(arch=self.arch), count)
+
+ def add_key_length(self, length: int) -> None:
+ self.add_uint32(Keys.Attention.KEY_LENGTH.format(arch=self.arch), length)
+
+ def add_value_length(self, length: int) -> None:
+ self.add_uint32(Keys.Attention.VALUE_LENGTH.format(arch=self.arch), length)
+
+ def add_max_alibi_bias(self, bias: float) -> None:
+ self.add_float32(Keys.Attention.MAX_ALIBI_BIAS.format(arch=self.arch), bias)
+
+ def add_clamp_kqv(self, value: float) -> None:
+ self.add_float32(Keys.Attention.CLAMP_KQV.format(arch=self.arch), value)
+
+ def add_logit_scale(self, value: float) -> None:
+ self.add_float32(Keys.LLM.LOGIT_SCALE.format(arch=self.arch), value)
+
+ def add_attn_logit_softcapping(self, value: float) -> None:
+ self.add_float32(Keys.LLM.ATTN_LOGIT_SOFTCAPPING.format(arch=self.arch), value)
+
+ def add_final_logit_softcapping(self, value: float) -> None:
+ self.add_float32(Keys.LLM.FINAL_LOGIT_SOFTCAPPING.format(arch=self.arch), value)
+
+ def add_expert_count(self, count: int) -> None:
+ self.add_uint32(Keys.LLM.EXPERT_COUNT.format(arch=self.arch), count)
+
+ def add_expert_used_count(self, count: int) -> None:
+ self.add_uint32(Keys.LLM.EXPERT_USED_COUNT.format(arch=self.arch), count)
+
+ def add_expert_shared_count(self, count: int) -> None:
+ self.add_uint32(Keys.LLM.EXPERT_SHARED_COUNT.format(arch=self.arch), count)
+
+ def add_expert_weights_scale(self, value: float) -> None:
+ self.add_float32(Keys.LLM.EXPERT_WEIGHTS_SCALE.format(arch=self.arch), value)
+
+ def add_layer_norm_eps(self, value: float) -> None:
+ self.add_float32(Keys.Attention.LAYERNORM_EPS.format(arch=self.arch), value)
+
+ def add_layer_norm_rms_eps(self, value: float) -> None:
+ self.add_float32(Keys.Attention.LAYERNORM_RMS_EPS.format(arch=self.arch), value)
+
+ def add_causal_attention(self, value: bool) -> None:
+ self.add_bool(Keys.Attention.CAUSAL.format(arch=self.arch), value)
+
+ def add_q_lora_rank(self, length: int) -> None:
+ self.add_uint32(Keys.Attention.Q_LORA_RANK.format(arch=self.arch), length)
+
+ def add_kv_lora_rank(self, length: int) -> None:
+ self.add_uint32(Keys.Attention.KV_LORA_RANK.format(arch=self.arch), length)
+
+ def add_relative_attn_buckets_count(self, value: int) -> None:
+ self.add_uint32(Keys.Attention.REL_BUCKETS_COUNT.format(arch=self.arch), value)
+
+ def add_sliding_window(self, value: int) -> None:
+ self.add_uint32(Keys.Attention.SLIDING_WINDOW.format(arch=self.arch), value)
+
+ def add_pooling_type(self, value: PoolingType) -> None:
+ self.add_uint32(Keys.LLM.POOLING_TYPE.format(arch=self.arch), value.value)
+
+ def add_rope_dimension_count(self, count: int) -> None:
+ self.add_uint32(Keys.Rope.DIMENSION_COUNT.format(arch=self.arch), count)
+
+ def add_rope_freq_base(self, value: float) -> None:
+ self.add_float32(Keys.Rope.FREQ_BASE.format(arch=self.arch), value)
+
+ def add_rope_scaling_type(self, value: RopeScalingType) -> None:
+ self.add_string(Keys.Rope.SCALING_TYPE.format(arch=self.arch), value.value)
+
+ def add_rope_scaling_factor(self, value: float) -> None:
+ self.add_float32(Keys.Rope.SCALING_FACTOR.format(arch=self.arch), value)
+
+ def add_rope_scaling_attn_factors(self, value: float) -> None:
+ self.add_float32(Keys.Rope.SCALING_ATTN_FACTOR.format(arch=self.arch), value)
+
+ def add_rope_scaling_orig_ctx_len(self, value: int) -> None:
+ self.add_uint32(Keys.Rope.SCALING_ORIG_CTX_LEN.format(arch=self.arch), value)
+
+ def add_rope_scaling_finetuned(self, value: bool) -> None:
+ self.add_bool(Keys.Rope.SCALING_FINETUNED.format(arch=self.arch), value)
+
+ def add_rope_scaling_yarn_log_mul(self, value: float) -> None:
+ self.add_float32(Keys.Rope.SCALING_YARN_LOG_MUL.format(arch=self.arch), value)
+
+ def add_ssm_conv_kernel(self, value: int) -> None:
+ self.add_uint32(Keys.SSM.CONV_KERNEL.format(arch=self.arch), value)
+
+ def add_ssm_inner_size(self, value: int) -> None:
+ self.add_uint32(Keys.SSM.INNER_SIZE.format(arch=self.arch), value)
+
+ def add_ssm_state_size(self, value: int) -> None:
+ self.add_uint32(Keys.SSM.STATE_SIZE.format(arch=self.arch), value)
+
+ def add_ssm_time_step_rank(self, value: int) -> None:
+ self.add_uint32(Keys.SSM.TIME_STEP_RANK.format(arch=self.arch), value)
+
+ def add_tokenizer_model(self, model: str) -> None:
+ self.add_string(Keys.Tokenizer.MODEL, model)
+
+ def add_tokenizer_pre(self, pre: str) -> None:
+ self.add_string(Keys.Tokenizer.PRE, pre)
+
+ def add_token_list(
+ self, tokens: Sequence[str] | Sequence[bytes] | Sequence[bytearray]
+ ) -> None:
+ self.add_array(Keys.Tokenizer.LIST, tokens)
+
+ def add_token_merges(
+ self, merges: Sequence[str] | Sequence[bytes] | Sequence[bytearray]
+ ) -> None:
+ self.add_array(Keys.Tokenizer.MERGES, merges)
+
+ def add_token_types(self, types: Sequence[TokenType] | Sequence[int]) -> None:
+ self.add_array(Keys.Tokenizer.TOKEN_TYPE, types)
+
+ def add_token_type_count(self, value: int) -> None:
+ self.add_uint32(Keys.Tokenizer.TOKEN_TYPE_COUNT, value)
+
+ def add_token_scores(self, scores: Sequence[float]) -> None:
+ self.add_array(Keys.Tokenizer.SCORES, scores)
+
+ def add_bos_token_id(self, id: int) -> None:
+ self.add_uint32(Keys.Tokenizer.BOS_ID, id)
+
+ def add_eos_token_id(self, id: int) -> None:
+ self.add_uint32(Keys.Tokenizer.EOS_ID, id)
+
+ def add_unk_token_id(self, id: int) -> None:
+ self.add_uint32(Keys.Tokenizer.UNK_ID, id)
+
+ def add_sep_token_id(self, id: int) -> None:
+ self.add_uint32(Keys.Tokenizer.SEP_ID, id)
+
+ def add_pad_token_id(self, id: int) -> None:
+ self.add_uint32(Keys.Tokenizer.PAD_ID, id)
+
+ def add_cls_token_id(self, id: int) -> None:
+ self.add_uint32(Keys.Tokenizer.CLS_ID, id)
+
+ def add_mask_token_id(self, id: int) -> None:
+ self.add_uint32(Keys.Tokenizer.MASK_ID, id)
+
+ def add_add_bos_token(self, value: bool) -> None:
+ self.add_bool(Keys.Tokenizer.ADD_BOS, value)
+
+ def add_add_eos_token(self, value: bool) -> None:
+ self.add_bool(Keys.Tokenizer.ADD_EOS, value)
+
+ def add_add_space_prefix(self, value: bool) -> None:
+ self.add_bool(Keys.Tokenizer.ADD_PREFIX, value)
+
+ def add_remove_extra_whitespaces(self, value: bool) -> None:
+ self.add_bool(Keys.Tokenizer.REMOVE_EXTRA_WS, value)
+
+ def add_precompiled_charsmap(self, charsmap: Sequence[bytes]) -> None:
+ self.add_array(Keys.Tokenizer.PRECOMPILED_CHARSMAP, charsmap)
+
+ def add_chat_template(self, value: str | Sequence[Mapping[str, str]]) -> None:
+ if not isinstance(value, str):
+ template_default = None
+ template_names = set()
+
+ for choice in value:
+ name = choice.get("name", "")
+ template = choice.get("template")
+
+ # Allowing non-alphanumerical characters in template name is probably not a good idea, so filter it
+ name = "".join(
+ (c if c in ascii_letters + digits else "_" for c in name)
+ )
+
+ if name and template is not None:
+ if name == "default":
+ template_default = template
+ else:
+ template_names.add(name)
+ self.add_string(
+ Keys.Tokenizer.CHAT_TEMPLATE_N.format(name=name), template
+ )
+
+ if template_names:
+ self.add_array(Keys.Tokenizer.CHAT_TEMPLATES, list(template_names))
+
+ if template_default is None:
+ return
+
+ value = template_default
+
+ self.add_string(Keys.Tokenizer.CHAT_TEMPLATE, value)
+
+ def add_prefix_token_id(self, id: int) -> None:
+ self.add_uint32(Keys.Tokenizer.PREFIX_ID, id)
+
+ def add_suffix_token_id(self, id: int) -> None:
+ self.add_uint32(Keys.Tokenizer.SUFFIX_ID, id)
+
+ def add_middle_token_id(self, id: int) -> None:
+ self.add_uint32(Keys.Tokenizer.MIDDLE_ID, id)
+
+ def add_eot_token_id(self, id: int) -> None:
+ self.add_uint32(Keys.Tokenizer.EOT_ID, id)
+
+ def add_eom_token_id(self, id: int) -> None:
+ self.add_uint32(Keys.Tokenizer.EOM_ID, id)
+
+ def _pack(self, fmt: str, value: Any, skip_pack_prefix: bool = False) -> bytes:
+ pack_prefix = ""
+ if not skip_pack_prefix:
+ pack_prefix = "<" if self.endianess == GGUFEndian.LITTLE else ">"
+ return struct.pack(f"{pack_prefix}{fmt}", value)
+
+ def _pack_val(self, val: Any, vtype: GGUFValueType, add_vtype: bool) -> bytes:
+ kv_data = bytearray()
+
+ if add_vtype:
+ kv_data += self._pack("I", vtype)
+
+ pack_fmt = self._simple_value_packing.get(vtype)
+ if pack_fmt is not None:
+ kv_data += self._pack(
+ pack_fmt, val, skip_pack_prefix=vtype == GGUFValueType.BOOL
+ )
+ elif vtype == GGUFValueType.STRING:
+ encoded_val = val.encode("utf-8") if isinstance(val, str) else val
+ kv_data += self._pack("Q", len(encoded_val))
+ kv_data += encoded_val
+ elif vtype == GGUFValueType.ARRAY:
+
+ if not isinstance(val, Sequence):
+ raise ValueError("Invalid GGUF metadata array, expecting sequence")
+
+ if len(val) == 0:
+ raise ValueError("Invalid GGUF metadata array. Empty array")
+
+ if isinstance(val, bytes):
+ ltype = GGUFValueType.UINT8
+ else:
+ ltype = GGUFValueType.get_type(val[0])
+ if not all(GGUFValueType.get_type(i) is ltype for i in val[1:]):
+ raise ValueError(
+ "All items in a GGUF array should be of the same type"
+ )
+ kv_data += self._pack("I", ltype)
+ kv_data += self._pack("Q", len(val))
+ for item in val:
+ kv_data += self._pack_val(item, ltype, add_vtype=False)
+ else:
+ raise ValueError("Invalid GGUF metadata value type or value")
+
+ return kv_data
+
+ @staticmethod
+ def format_n_bytes_to_str(num: int) -> str:
+ if num == 0:
+ return "negligible - metadata only"
+ fnum = float(num)
+ for unit in ("", "K", "M", "G"):
+ if abs(fnum) < 1000.0:
+ return f"{fnum:3.1f}{unit}"
+ fnum /= 1000.0
+ return f"{fnum:.1f}T - over 1TB, split recommended"
diff --git a/modules_forge/packages/gguf/lazy.py b/modules_forge/packages/gguf/lazy.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3c0ae8da8821ee51a192e97a0126899ca908854
--- /dev/null
+++ b/modules_forge/packages/gguf/lazy.py
@@ -0,0 +1,286 @@
+from __future__ import annotations
+
+import logging
+from abc import ABC, ABCMeta, abstractmethod
+from typing import Any, Callable
+
+import numpy as np
+from numpy.typing import DTypeLike
+
+logger = logging.getLogger(__name__)
+
+
+class LazyMeta(ABCMeta):
+
+ def __new__(
+ cls, name: str, bases: tuple[type, ...], namespace: dict[str, Any], **kwargs
+ ):
+ def __getattr__(self, name: str) -> Any:
+ meta_attr = getattr(self._meta, name)
+ if callable(meta_attr):
+ return type(self)._wrap_fn(
+ (lambda s, *args, **kwargs: getattr(s, name)(*args, **kwargs)),
+ use_self=self,
+ )
+ elif isinstance(meta_attr, self._tensor_type):
+ # e.g. self.T with torch.Tensor should still be wrapped
+ return type(self)._wrap_fn(lambda s: getattr(s, name))(self)
+ else:
+ # no need to wrap non-tensor properties,
+ # and they likely don't depend on the actual contents of the tensor
+ return meta_attr
+
+ namespace["__getattr__"] = __getattr__
+
+ # need to make a builder for the wrapped wrapper to copy the name,
+ # or else it fails with very cryptic error messages,
+ # because somehow the same string would end up in every closures
+ def mk_wrap(op_name: str, *, meta_noop: bool = False):
+ # need to wrap the wrapper to get self
+ def wrapped_special_op(self, *args, **kwargs):
+ return type(self)._wrap_fn(
+ getattr(type(self)._tensor_type, op_name),
+ meta_noop=meta_noop,
+ )(self, *args, **kwargs)
+
+ return wrapped_special_op
+
+ # special methods bypass __getattr__, so they need to be added manually
+ # ref: https://docs.python.org/3/reference/datamodel.html#special-lookup
+ # NOTE: doing this from a metaclass is very convenient
+ # TODO: make this even more comprehensive
+ for binary_op in (
+ "lt",
+ "le",
+ "eq",
+ "ne",
+ "ge",
+ "gt",
+ "not" "abs",
+ "add",
+ "and",
+ "floordiv",
+ "invert",
+ "lshift",
+ "mod",
+ "mul",
+ "matmul",
+ "neg",
+ "or",
+ "pos",
+ "pow",
+ "rshift",
+ "sub",
+ "truediv",
+ "xor",
+ "iadd",
+ "iand",
+ "ifloordiv",
+ "ilshift",
+ "imod",
+ "imul",
+ "ior",
+ "irshift",
+ "isub",
+ "ixor",
+ "radd",
+ "rand",
+ "rfloordiv",
+ "rmul",
+ "ror",
+ "rpow",
+ "rsub",
+ "rtruediv",
+ "rxor",
+ ):
+ attr_name = f"__{binary_op}__"
+ # the result of these operators usually has the same shape and dtype as the input,
+ # so evaluation on the meta tensor can be skipped.
+ namespace[attr_name] = mk_wrap(attr_name, meta_noop=True)
+
+ for special_op in (
+ "getitem",
+ "setitem",
+ "len",
+ ):
+ attr_name = f"__{special_op}__"
+ namespace[attr_name] = mk_wrap(attr_name, meta_noop=False)
+
+ return super().__new__(cls, name, bases, namespace, **kwargs)
+
+
+# Tree of lazy tensors
+class LazyBase(ABC, metaclass=LazyMeta):
+ _tensor_type: type
+ _meta: Any
+ _data: Any | None
+ _args: tuple
+ _kwargs: dict[str, Any]
+ _func: Callable[[Any], Any] | None
+
+ def __init__(
+ self,
+ *,
+ meta: Any,
+ data: Any | None = None,
+ args: tuple = (),
+ kwargs: dict[str, Any] | None = None,
+ func: Callable[[Any], Any] | None = None,
+ ):
+ super().__init__()
+ self._meta = meta
+ self._data = data
+ self._args = args
+ self._kwargs = kwargs if kwargs is not None else {}
+ self._func = func
+ assert self._func is not None or self._data is not None
+
+ def __init_subclass__(cls) -> None:
+ if "_tensor_type" not in cls.__dict__:
+ raise TypeError(f"property '_tensor_type' must be defined for {cls!r}")
+ return super().__init_subclass__()
+
+ @staticmethod
+ def _recurse_apply(o: Any, fn: Callable[[Any], Any]) -> Any:
+ # TODO: dict and set
+ if isinstance(o, (list, tuple)):
+ L = []
+ for item in o:
+ L.append(LazyBase._recurse_apply(item, fn))
+ if isinstance(o, tuple):
+ L = tuple(L)
+ return L
+ elif isinstance(o, LazyBase):
+ return fn(o)
+ else:
+ return o
+
+ @classmethod
+ def _wrap_fn(
+ cls,
+ fn: Callable,
+ *,
+ use_self: LazyBase | None = None,
+ meta_noop: (
+ bool
+ | DTypeLike
+ | tuple[DTypeLike, Callable[[tuple[int, ...]], tuple[int, ...]]]
+ ) = False,
+ ) -> Callable[[Any], Any]:
+ def wrapped_fn(*args, **kwargs):
+ if kwargs is None:
+ kwargs = {}
+ args = ((use_self,) if use_self is not None else ()) + args
+
+ meta_args = LazyBase._recurse_apply(args, lambda t: t._meta)
+ # TODO: maybe handle tensors in kwargs too
+
+ if isinstance(meta_noop, bool) and not meta_noop:
+ try:
+ res = fn(*meta_args, **kwargs)
+ except NotImplementedError:
+ # running some operations on PyTorch's Meta tensors can cause this exception
+ res = None
+ else:
+ # some operators don't need to actually run on the meta tensors
+ assert len(args) > 0
+ res = args[0]
+ assert isinstance(res, cls)
+ res = res._meta
+ # allow operations to override the dtype and shape
+ if meta_noop is not True:
+ if isinstance(meta_noop, tuple):
+ dtype, shape = meta_noop
+ assert callable(shape)
+ res = cls.meta_with_dtype_and_shape(dtype, shape(res.shape))
+ else:
+ res = cls.meta_with_dtype_and_shape(meta_noop, res.shape)
+
+ if isinstance(res, cls._tensor_type):
+ return cls(
+ meta=cls.eager_to_meta(res), args=args, kwargs=kwargs, func=fn
+ )
+ else:
+ del res # not needed
+ # non-tensor return likely relies on the contents of the args
+ # (e.g. the result of torch.equal)
+ eager_args = cls.to_eager(args)
+ return fn(*eager_args, **kwargs)
+
+ return wrapped_fn
+
+ @classmethod
+ def to_eager(cls, t: Any) -> Any:
+ def simple_to_eager(_t: LazyBase) -> Any:
+ if _t._data is not None:
+ return _t._data
+
+ # NOTE: there's a recursion limit in Python (usually 1000)
+
+ assert _t._func is not None
+ _t._args = cls._recurse_apply(_t._args, simple_to_eager)
+ _t._data = _t._func(*_t._args, **_t._kwargs)
+ # sanity check
+ assert _t._data is not None
+ assert _t._data.dtype == _t._meta.dtype
+ assert _t._data.shape == _t._meta.shape
+
+ return _t._data
+
+ # recurse into lists and/or tuples, keeping their structure
+ return cls._recurse_apply(t, simple_to_eager)
+
+ @classmethod
+ def eager_to_meta(cls, t: Any) -> Any:
+ return cls.meta_with_dtype_and_shape(t.dtype, t.shape)
+
+ # must be overridden, meta tensor init is backend-specific
+ @classmethod
+ @abstractmethod
+ def meta_with_dtype_and_shape(cls, dtype: Any, shape: Any) -> Any:
+ pass
+
+ @classmethod
+ def from_eager(cls, t: Any) -> Any:
+ if type(t) is cls:
+ # already lazy
+ return t
+ elif isinstance(t, cls._tensor_type):
+ return cls(meta=cls.eager_to_meta(t), data=t)
+ else:
+ return TypeError(f"{type(t)!r} is not compatible with {cls._tensor_type!r}")
+
+
+class LazyNumpyTensor(LazyBase):
+ _tensor_type = np.ndarray
+
+ shape: tuple[int, ...] # Makes the type checker happy in quants.py
+
+ @classmethod
+ def meta_with_dtype_and_shape(
+ cls, dtype: DTypeLike, shape: tuple[int, ...]
+ ) -> np.ndarray[Any, Any]:
+ # The initial idea was to use np.nan as the fill value,
+ # but non-float types like np.int16 can't use that.
+ # So zero it is.
+ cheat = np.zeros(1, dtype)
+ return np.lib.stride_tricks.as_strided(cheat, shape, (0 for _ in shape))
+
+ def astype(self, dtype, *args, **kwargs):
+ meta = type(self).meta_with_dtype_and_shape(dtype, self._meta.shape)
+ full_args = (
+ self,
+ dtype,
+ ) + args
+ return type(self)(
+ meta=meta,
+ args=full_args,
+ kwargs=kwargs,
+ func=(lambda a, *args, **kwargs: a.astype(*args, **kwargs)),
+ )
+
+ def tofile(self, *args, **kwargs):
+ eager = LazyNumpyTensor.to_eager(self)
+ return eager.tofile(*args, **kwargs)
+
+ # TODO: __array_function__
diff --git a/modules_forge/packages/gguf/metadata.py b/modules_forge/packages/gguf/metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f3c7274169decbafd15cc1ece1423cb1004bb2e
--- /dev/null
+++ b/modules_forge/packages/gguf/metadata.py
@@ -0,0 +1,653 @@
+from __future__ import annotations
+
+import json
+import logging
+import re
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Any, Literal, Optional
+
+import yaml
+
+import gguf
+
+from .constants import Keys
+
+logger = logging.getLogger("metadata")
+
+
+@dataclass
+class Metadata:
+ # Authorship Metadata to be written to GGUF KV Store
+ name: Optional[str] = None
+ author: Optional[str] = None
+ version: Optional[str] = None
+ organization: Optional[str] = None
+ finetune: Optional[str] = None
+ basename: Optional[str] = None
+ description: Optional[str] = None
+ quantized_by: Optional[str] = None
+ size_label: Optional[str] = None
+ url: Optional[str] = None
+ doi: Optional[str] = None
+ uuid: Optional[str] = None
+ repo_url: Optional[str] = None
+ source_url: Optional[str] = None
+ source_doi: Optional[str] = None
+ source_uuid: Optional[str] = None
+ source_repo_url: Optional[str] = None
+ license: Optional[str] = None
+ license_name: Optional[str] = None
+ license_link: Optional[str] = None
+ base_models: Optional[list[dict]] = None
+ tags: Optional[list[str]] = None
+ languages: Optional[list[str]] = None
+ datasets: Optional[list[str]] = None
+
+ @staticmethod
+ def load(
+ metadata_override_path: Optional[Path] = None,
+ model_path: Optional[Path] = None,
+ model_name: Optional[str] = None,
+ total_params: int = 0,
+ ) -> Metadata:
+ # This grabs as many contextual authorship metadata as possible from the model repository
+ # making any conversion as required to match the gguf kv store metadata format
+ # as well as giving users the ability to override any authorship metadata that may be incorrect
+
+ # Create a new Metadata instance
+ metadata = Metadata()
+
+ model_card = Metadata.load_model_card(model_path)
+ hf_params = Metadata.load_hf_parameters(model_path)
+ # TODO: load adapter_config.json when possible, it usually contains the base model of the LoRA adapter
+
+ # heuristics
+ metadata = Metadata.apply_metadata_heuristic(
+ metadata, model_card, hf_params, model_path, total_params
+ )
+
+ # Metadata Override File Provided
+ # This is based on LLM_KV_NAMES mapping in llama.cpp
+ metadata_override = Metadata.load_metadata_override(metadata_override_path)
+
+ metadata.name = metadata_override.get(Keys.General.NAME, metadata.name)
+ metadata.author = metadata_override.get(Keys.General.AUTHOR, metadata.author)
+ metadata.version = metadata_override.get(Keys.General.VERSION, metadata.version)
+ metadata.organization = metadata_override.get(
+ Keys.General.ORGANIZATION, metadata.organization
+ )
+
+ metadata.finetune = metadata_override.get(
+ Keys.General.FINETUNE, metadata.finetune
+ )
+ metadata.basename = metadata_override.get(
+ Keys.General.BASENAME, metadata.basename
+ )
+
+ metadata.description = metadata_override.get(
+ Keys.General.DESCRIPTION, metadata.description
+ )
+ metadata.quantized_by = metadata_override.get(
+ Keys.General.QUANTIZED_BY, metadata.quantized_by
+ )
+
+ metadata.size_label = metadata_override.get(
+ Keys.General.SIZE_LABEL, metadata.size_label
+ )
+ metadata.license_name = metadata_override.get(
+ Keys.General.LICENSE_NAME, metadata.license_name
+ )
+ metadata.license_link = metadata_override.get(
+ Keys.General.LICENSE_LINK, metadata.license_link
+ )
+
+ metadata.url = metadata_override.get(Keys.General.URL, metadata.url)
+ metadata.doi = metadata_override.get(Keys.General.DOI, metadata.doi)
+ metadata.uuid = metadata_override.get(Keys.General.UUID, metadata.uuid)
+ metadata.repo_url = metadata_override.get(
+ Keys.General.REPO_URL, metadata.repo_url
+ )
+
+ metadata.source_url = metadata_override.get(
+ Keys.General.SOURCE_URL, metadata.source_url
+ )
+ metadata.source_doi = metadata_override.get(
+ Keys.General.SOURCE_DOI, metadata.source_doi
+ )
+ metadata.source_uuid = metadata_override.get(
+ Keys.General.SOURCE_UUID, metadata.source_uuid
+ )
+ metadata.source_repo_url = metadata_override.get(
+ Keys.General.SOURCE_REPO_URL, metadata.source_repo_url
+ )
+
+ # Base Models is received here as an array of models
+ metadata.base_models = metadata_override.get(
+ "general.base_models", metadata.base_models
+ )
+
+ metadata.tags = metadata_override.get(Keys.General.TAGS, metadata.tags)
+ metadata.languages = metadata_override.get(
+ Keys.General.LANGUAGES, metadata.languages
+ )
+ metadata.datasets = metadata_override.get(
+ Keys.General.DATASETS, metadata.datasets
+ )
+
+ # Direct Metadata Override (via direct cli argument)
+ if model_name is not None:
+ metadata.name = model_name
+
+ return metadata
+
+ @staticmethod
+ def load_metadata_override(
+ metadata_override_path: Optional[Path] = None,
+ ) -> dict[str, Any]:
+ if metadata_override_path is None or not metadata_override_path.is_file():
+ return {}
+
+ with open(metadata_override_path, "r", encoding="utf-8") as f:
+ return json.load(f)
+
+ @staticmethod
+ def load_model_card(model_path: Optional[Path] = None) -> dict[str, Any]:
+ if model_path is None or not model_path.is_dir():
+ return {}
+
+ model_card_path = model_path / "README.md"
+
+ if not model_card_path.is_file():
+ return {}
+
+ # The model card metadata is assumed to always be in YAML
+ # ref: https://github.com/huggingface/transformers/blob/a5c642fe7a1f25d3bdcd76991443ba6ff7ee34b2/src/transformers/modelcard.py#L468-L473
+ with open(model_card_path, "r", encoding="utf-8") as f:
+ if f.readline() == "---\n":
+ raw = f.read().partition("---\n")[0]
+ data = yaml.safe_load(raw)
+ if isinstance(data, dict):
+ return data
+ else:
+ logger.error(
+ f"while reading YAML model card frontmatter, data is {type(data)} instead of dict"
+ )
+ return {}
+ else:
+ return {}
+
+ @staticmethod
+ def load_hf_parameters(model_path: Optional[Path] = None) -> dict[str, Any]:
+ if model_path is None or not model_path.is_dir():
+ return {}
+
+ config_path = model_path / "config.json"
+
+ if not config_path.is_file():
+ return {}
+
+ with open(config_path, "r", encoding="utf-8") as f:
+ return json.load(f)
+
+ @staticmethod
+ def id_to_title(string):
+ # Convert capitalization into title form unless acronym or version number
+ return " ".join(
+ [
+ (
+ w.title()
+ if w.islower() and not re.match(r"^(v\d+(?:\.\d+)*|\d.*)$", w)
+ else w
+ )
+ for w in string.strip().replace("-", " ").split()
+ ]
+ )
+
+ @staticmethod
+ def get_model_id_components(
+ model_id: Optional[str] = None, total_params: int = 0
+ ) -> tuple[str | None, str | None, str | None, str | None, str | None, str | None]:
+ # Huggingface often store model id as '/'
+ # so let's parse it and apply some heuristics if possible for model name components
+
+ if model_id is None:
+ # model ID missing
+ return None, None, None, None, None, None
+
+ if " " in model_id:
+ # model ID is actually a normal human sentence
+ # which means its most likely a normal model name only
+ # not part of the hugging face naming standard, but whatever
+ return model_id, None, None, None, None, None
+
+ if "/" in model_id:
+ # model ID (huggingface style)
+ org_component, model_full_name_component = model_id.split("/", 1)
+ else:
+ # model ID but missing org components
+ org_component, model_full_name_component = None, model_id
+
+ # Check if we erroneously matched against './' or '../' etc...
+ if (
+ org_component is not None
+ and len(org_component) > 0
+ and org_component[0] == "."
+ ):
+ org_component = None
+
+ name_parts: list[str] = model_full_name_component.split("-")
+
+ # Remove empty parts
+ for i in reversed(range(len(name_parts))):
+ if len(name_parts[i]) == 0:
+ del name_parts[i]
+
+ name_types: list[
+ set[Literal["basename", "size_label", "finetune", "version", "type"]]
+ ] = [set() for _ in name_parts]
+
+ # Annotate the name
+ for i, part in enumerate(name_parts):
+ # Version
+ if re.fullmatch(r"(v|iter)?\d+([.]\d+)*", part, re.IGNORECASE):
+ name_types[i].add("version")
+ # Quant type (should not be there for base models, but still annotated)
+ elif re.fullmatch(r"i?q\d(_\w)*|b?fp?(16|32)", part, re.IGNORECASE):
+ name_types[i].add("type")
+ name_parts[i] = part.upper()
+ # Model size
+ elif i > 0 and re.fullmatch(
+ r"(([A]|\d+[x])?\d+([._]\d+)?[KMBT][\d]?|small|mini|medium|large|x?xl)",
+ part,
+ re.IGNORECASE,
+ ):
+ part = part.replace("_", ".")
+ # Handle weird bloom-7b1 notation
+ if part[-1].isdecimal():
+ part = part[:-2] + "." + part[-1] + part[-2]
+ # Normalize the size suffixes
+ if len(part) > 1 and part[-2].isdecimal():
+ if part[-1] in "kmbt":
+ part = part[:-1] + part[-1].upper()
+ if total_params != 0:
+ try:
+ label_params = float(part[:-1]) * pow(
+ 1000, " KMBT".find(part[-1])
+ )
+ # Only use it as a size label if it's close or bigger than the model size
+ # Note that LoRA adapters don't necessarily include all layers,
+ # so this is why bigger label sizes are accepted.
+ # Do not use the size label when it's smaller than 1/8 of the model size
+ if (
+ total_params < 0 and label_params < abs(total_params) // 8
+ ) or (
+ # Check both directions when the current model isn't a LoRA adapter
+ total_params > 0
+ and abs(label_params - total_params) > 7 * total_params // 8
+ ):
+ # Likely a context length
+ name_types[i].add("finetune")
+ # Lowercase the size when it's a context length
+ part = part[:-1] + part[-1].lower()
+ except ValueError:
+ # Failed to convert the size label to float, use it anyway
+ pass
+ if len(name_types[i]) == 0:
+ name_types[i].add("size_label")
+ name_parts[i] = part
+ # Some easy to recognize finetune names
+ elif i > 0 and re.fullmatch(
+ r"chat|instruct|vision|lora", part, re.IGNORECASE
+ ):
+ if total_params < 0 and part.lower() == "lora":
+ # ignore redundant "lora" in the finetune part when the output is a lora adapter
+ name_types[i].add("type")
+ else:
+ name_types[i].add("finetune")
+
+ # Ignore word-based size labels when there is at least a number-based one present
+ # TODO: should word-based size labels always be removed instead?
+ if any(
+ c.isdecimal()
+ for n, t in zip(name_parts, name_types)
+ if "size_label" in t
+ for c in n
+ ):
+ for n, t in zip(name_parts, name_types):
+ if "size_label" in t:
+ if all(c.isalpha() for c in n):
+ t.remove("size_label")
+
+ at_start = True
+ # Find the basename through the annotated name
+ for part, t in zip(name_parts, name_types):
+ if at_start and ((len(t) == 0 and part[0].isalpha()) or "version" in t):
+ t.add("basename")
+ else:
+ if at_start:
+ at_start = False
+ if len(t) == 0:
+ t.add("finetune")
+
+ # Remove the basename annotation from trailing version
+ for part, t in zip(reversed(name_parts), reversed(name_types)):
+ if "basename" in t and len(t) > 1:
+ t.remove("basename")
+ else:
+ break
+
+ basename = (
+ "-".join(n for n, t in zip(name_parts, name_types) if "basename" in t)
+ or None
+ )
+ # Deduplicate size labels using order-preserving 'dict' ('set' seems to sort the keys)
+ size_label = (
+ "-".join(
+ dict.fromkeys(
+ s for s, t in zip(name_parts, name_types) if "size_label" in t
+ ).keys()
+ )
+ or None
+ )
+ finetune = (
+ "-".join(f for f, t in zip(name_parts, name_types) if "finetune" in t)
+ or None
+ )
+ # TODO: should the basename version always be excluded?
+ # NOTE: multiple finetune versions are joined together
+ version = (
+ "-".join(
+ v
+ for v, t, in zip(name_parts, name_types)
+ if "version" in t and "basename" not in t
+ )
+ or None
+ )
+
+ if size_label is None and finetune is None and version is None:
+ # Too ambiguous, output nothing
+ basename = None
+
+ return (
+ model_full_name_component,
+ org_component,
+ basename,
+ finetune,
+ version,
+ size_label,
+ )
+
+ @staticmethod
+ def apply_metadata_heuristic(
+ metadata: Metadata,
+ model_card: Optional[dict] = None,
+ hf_params: Optional[dict] = None,
+ model_path: Optional[Path] = None,
+ total_params: int = 0,
+ ) -> Metadata:
+ # Reference Model Card Metadata: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1
+
+ # Model Card Heuristics
+ ########################
+ if model_card is not None:
+
+ def use_model_card_metadata(metadata_key: str, model_card_key: str):
+ if (
+ model_card_key in model_card
+ and getattr(metadata, metadata_key, None) is None
+ ):
+ setattr(metadata, metadata_key, model_card.get(model_card_key))
+
+ def use_array_model_card_metadata(metadata_key: str, model_card_key: str):
+ # Note: Will append rather than replace if already exist
+ tags_value = model_card.get(model_card_key, None)
+ if tags_value is None:
+ return
+
+ current_value = getattr(metadata, metadata_key, None)
+ if current_value is None:
+ current_value = []
+
+ if isinstance(tags_value, str):
+ current_value.append(tags_value)
+ elif isinstance(tags_value, list):
+ current_value.extend(tags_value)
+
+ setattr(metadata, metadata_key, current_value)
+
+ # LLAMA.cpp's direct internal convention
+ # (Definitely not part of hugging face formal/informal standard)
+ #########################################
+ use_model_card_metadata("name", "name")
+ use_model_card_metadata("author", "author")
+ use_model_card_metadata("version", "version")
+ use_model_card_metadata("organization", "organization")
+ use_model_card_metadata("description", "description")
+ use_model_card_metadata("finetune", "finetune")
+ use_model_card_metadata("basename", "basename")
+ use_model_card_metadata("size_label", "size_label")
+ use_model_card_metadata("source_url", "url")
+ use_model_card_metadata("source_doi", "doi")
+ use_model_card_metadata("source_uuid", "uuid")
+ use_model_card_metadata("source_repo_url", "repo_url")
+
+ # LLAMA.cpp's huggingface style convention
+ # (Definitely not part of hugging face formal/informal standard... but with model_ appended to match their style)
+ ###########################################
+ use_model_card_metadata("name", "model_name")
+ use_model_card_metadata("author", "model_author")
+ use_model_card_metadata("version", "model_version")
+ use_model_card_metadata("organization", "model_organization")
+ use_model_card_metadata("description", "model_description")
+ use_model_card_metadata("finetune", "model_finetune")
+ use_model_card_metadata("basename", "model_basename")
+ use_model_card_metadata("size_label", "model_size_label")
+ use_model_card_metadata("source_url", "model_url")
+ use_model_card_metadata("source_doi", "model_doi")
+ use_model_card_metadata("source_uuid", "model_uuid")
+ use_model_card_metadata("source_repo_url", "model_repo_url")
+
+ # Hugging Face Direct Convention
+ #################################
+
+ # Not part of huggingface model card standard but notice some model creator using it
+ # such as TheBloke in 'TheBloke/Mistral-7B-Instruct-v0.2-GGUF'
+ use_model_card_metadata("name", "model_name")
+ use_model_card_metadata("author", "model_creator")
+ use_model_card_metadata("basename", "model_type")
+
+ if "base_model" in model_card:
+ # This represents the parent models that this is based on
+ # Example: stabilityai/stable-diffusion-xl-base-1.0. Can also be a list (for merges)
+ # Example of merges: https://huggingface.co/EmbeddedLLM/Mistral-7B-Merge-14-v0.1/blob/main/README.md
+ metadata_base_models = []
+ base_model_value = model_card.get("base_model", None)
+
+ if base_model_value is not None:
+ if isinstance(base_model_value, str):
+ metadata_base_models.append(base_model_value)
+ elif isinstance(base_model_value, list):
+ metadata_base_models.extend(base_model_value)
+
+ if metadata.base_models is None:
+ metadata.base_models = []
+
+ for model_id in metadata_base_models:
+ # NOTE: model size of base model is assumed to be similar to the size of the current model
+ (
+ model_full_name_component,
+ org_component,
+ basename,
+ finetune,
+ version,
+ size_label,
+ ) = Metadata.get_model_id_components(model_id, total_params)
+ base_model = {}
+ if model_full_name_component is not None:
+ base_model["name"] = Metadata.id_to_title(
+ model_full_name_component
+ )
+ if org_component is not None:
+ base_model["organization"] = Metadata.id_to_title(org_component)
+ if version is not None:
+ base_model["version"] = version
+ if (
+ org_component is not None
+ and model_full_name_component is not None
+ ):
+ base_model["repo_url"] = (
+ f"https://huggingface.co/{org_component}/{model_full_name_component}"
+ )
+ metadata.base_models.append(base_model)
+
+ use_model_card_metadata("license", "license")
+ use_model_card_metadata("license_name", "license_name")
+ use_model_card_metadata("license_link", "license_link")
+
+ use_array_model_card_metadata("tags", "tags")
+ use_array_model_card_metadata("tags", "pipeline_tag")
+
+ use_array_model_card_metadata("languages", "languages")
+ use_array_model_card_metadata("languages", "language")
+
+ use_array_model_card_metadata("datasets", "datasets")
+ use_array_model_card_metadata("datasets", "dataset")
+
+ # Hugging Face Parameter Heuristics
+ ####################################
+
+ if hf_params is not None:
+
+ hf_name_or_path = hf_params.get("_name_or_path")
+ if hf_name_or_path is not None and hf_name_or_path.count("/") <= 1:
+ # Use _name_or_path only if its actually a model name and not some computer path
+ # e.g. 'meta-llama/Llama-2-7b-hf'
+ model_id = hf_name_or_path
+ (
+ model_full_name_component,
+ org_component,
+ basename,
+ finetune,
+ version,
+ size_label,
+ ) = Metadata.get_model_id_components(model_id, total_params)
+ if metadata.name is None and model_full_name_component is not None:
+ metadata.name = Metadata.id_to_title(model_full_name_component)
+ if metadata.organization is None and org_component is not None:
+ metadata.organization = Metadata.id_to_title(org_component)
+ if metadata.basename is None and basename is not None:
+ metadata.basename = basename
+ if metadata.finetune is None and finetune is not None:
+ metadata.finetune = finetune
+ if metadata.version is None and version is not None:
+ metadata.version = version
+ if metadata.size_label is None and size_label is not None:
+ metadata.size_label = size_label
+
+ # Directory Folder Name Fallback Heuristics
+ ############################################
+ if model_path is not None:
+ model_id = model_path.name
+ (
+ model_full_name_component,
+ org_component,
+ basename,
+ finetune,
+ version,
+ size_label,
+ ) = Metadata.get_model_id_components(model_id, total_params)
+ if metadata.name is None and model_full_name_component is not None:
+ metadata.name = Metadata.id_to_title(model_full_name_component)
+ if metadata.organization is None and org_component is not None:
+ metadata.organization = Metadata.id_to_title(org_component)
+ if metadata.basename is None and basename is not None:
+ metadata.basename = basename
+ if metadata.finetune is None and finetune is not None:
+ metadata.finetune = finetune
+ if metadata.version is None and version is not None:
+ metadata.version = version
+ if metadata.size_label is None and size_label is not None:
+ metadata.size_label = size_label
+
+ return metadata
+
+ def set_gguf_meta_model(self, gguf_writer: gguf.GGUFWriter):
+ assert self.name is not None
+ gguf_writer.add_name(self.name)
+
+ if self.author is not None:
+ gguf_writer.add_author(self.author)
+ if self.version is not None:
+ gguf_writer.add_version(self.version)
+ if self.organization is not None:
+ gguf_writer.add_organization(self.organization)
+
+ if self.finetune is not None:
+ gguf_writer.add_finetune(self.finetune)
+ if self.basename is not None:
+ gguf_writer.add_basename(self.basename)
+
+ if self.description is not None:
+ gguf_writer.add_description(self.description)
+ if self.quantized_by is not None:
+ gguf_writer.add_quantized_by(self.quantized_by)
+
+ if self.size_label is not None:
+ gguf_writer.add_size_label(self.size_label)
+
+ if self.license is not None:
+ gguf_writer.add_license(self.license)
+ if self.license_name is not None:
+ gguf_writer.add_license_name(self.license_name)
+ if self.license_link is not None:
+ gguf_writer.add_license_link(self.license_link)
+
+ if self.url is not None:
+ gguf_writer.add_url(self.url)
+ if self.doi is not None:
+ gguf_writer.add_doi(self.doi)
+ if self.uuid is not None:
+ gguf_writer.add_uuid(self.uuid)
+ if self.repo_url is not None:
+ gguf_writer.add_repo_url(self.repo_url)
+
+ if self.source_url is not None:
+ gguf_writer.add_source_url(self.source_url)
+ if self.source_doi is not None:
+ gguf_writer.add_source_doi(self.source_doi)
+ if self.source_uuid is not None:
+ gguf_writer.add_source_uuid(self.source_uuid)
+ if self.source_repo_url is not None:
+ gguf_writer.add_source_repo_url(self.source_repo_url)
+
+ if self.base_models is not None:
+ gguf_writer.add_base_model_count(len(self.base_models))
+ for key, base_model_entry in enumerate(self.base_models):
+ if "name" in base_model_entry:
+ gguf_writer.add_base_model_name(key, base_model_entry["name"])
+ if "author" in base_model_entry:
+ gguf_writer.add_base_model_author(key, base_model_entry["author"])
+ if "version" in base_model_entry:
+ gguf_writer.add_base_model_version(key, base_model_entry["version"])
+ if "organization" in base_model_entry:
+ gguf_writer.add_base_model_organization(
+ key, base_model_entry["organization"]
+ )
+ if "url" in base_model_entry:
+ gguf_writer.add_base_model_url(key, base_model_entry["url"])
+ if "doi" in base_model_entry:
+ gguf_writer.add_base_model_doi(key, base_model_entry["doi"])
+ if "uuid" in base_model_entry:
+ gguf_writer.add_base_model_uuid(key, base_model_entry["uuid"])
+ if "repo_url" in base_model_entry:
+ gguf_writer.add_base_model_repo_url(
+ key, base_model_entry["repo_url"]
+ )
+
+ if self.tags is not None:
+ gguf_writer.add_tags(self.tags)
+ if self.languages is not None:
+ gguf_writer.add_languages(self.languages)
+ if self.datasets is not None:
+ gguf_writer.add_datasets(self.datasets)
diff --git a/modules_forge/packages/gguf/quants.py b/modules_forge/packages/gguf/quants.py
new file mode 100644
index 0000000000000000000000000000000000000000..028feb4744a70825d9c568a37e18f862167d2841
--- /dev/null
+++ b/modules_forge/packages/gguf/quants.py
@@ -0,0 +1,1762 @@
+from __future__ import annotations
+
+from abc import ABC, abstractmethod
+from math import ceil, log2
+from typing import Any, Callable, Sequence
+
+import numpy as np
+import torch
+from numpy.typing import DTypeLike
+
+from .constants import GGML_QUANT_SIZES, QK_K, GGMLQuantizationType
+from .lazy import LazyNumpyTensor
+from .quick_4bits_ops import (
+ change_4bits_order,
+ quick_unpack_4bits,
+ quick_unpack_4bits_u,
+)
+
+quick_split = lambda x, p: torch.split(x, p + [x.shape[1] - sum(p)], dim=-1)
+
+
+def quant_shape_to_byte_shape(
+ shape: Sequence[int], quant_type: GGMLQuantizationType
+) -> tuple[int, ...]:
+ block_size, type_size = GGML_QUANT_SIZES[quant_type]
+ if shape[-1] % block_size != 0:
+ raise ValueError(
+ f"Quantized tensor row size ({shape[-1]}) is not a multiple of {quant_type.name} block size ({block_size})"
+ )
+ return (*shape[:-1], shape[-1] // block_size * type_size)
+
+
+def quant_shape_from_byte_shape(
+ shape: Sequence[int], quant_type: GGMLQuantizationType
+) -> tuple[int, ...]:
+ block_size, type_size = GGML_QUANT_SIZES[quant_type]
+ if shape[-1] % type_size != 0:
+ raise ValueError(
+ f"Quantized tensor bytes per row ({shape[-1]}) is not a multiple of {quant_type.name} type size ({type_size})"
+ )
+ return (*shape[:-1], shape[-1] // type_size * block_size)
+
+
+# This is faster than np.vectorize and np.apply_along_axis because it works on more than one row at a time
+def _apply_over_grouped_rows(
+ func: Callable[[np.ndarray], np.ndarray],
+ arr: np.ndarray,
+ otype: DTypeLike,
+ oshape: tuple[int, ...],
+) -> np.ndarray:
+ rows = arr.reshape((-1, arr.shape[-1]))
+ osize = 1
+ for dim in oshape:
+ osize *= dim
+ out = np.empty(shape=osize, dtype=otype)
+ # compute over groups of 16 rows (arbitrary, but seems good for performance)
+ n_groups = (rows.shape[0] // 16) or 1
+ np.concatenate(
+ [func(group).ravel() for group in np.array_split(rows, n_groups)],
+ axis=0,
+ out=out,
+ )
+ return out.reshape(oshape)
+
+
+# round away from zero
+# ref: https://stackoverflow.com/a/59143326/22827863
+def np_roundf(n: np.ndarray) -> np.ndarray:
+ a = abs(n)
+ floored = np.floor(a)
+ b = floored + np.floor(2 * (a - floored))
+ return np.sign(n) * b
+
+
+class QuantError(Exception): ...
+
+
+_type_traits: dict[GGMLQuantizationType, type[__Quant]] = {}
+
+
+def quantize(data: np.ndarray, qtype: GGMLQuantizationType) -> np.ndarray:
+ if qtype == GGMLQuantizationType.F32:
+ return data.astype(np.float32, copy=False)
+ elif qtype == GGMLQuantizationType.F16:
+ return data.astype(np.float16, copy=False)
+ elif (q := _type_traits.get(qtype)) is not None:
+ return q.quantize(data)
+ else:
+ raise NotImplementedError(
+ f"Quantization for {qtype.name} is not yet implemented"
+ )
+
+
+def dequantize(data: np.ndarray, qtype: GGMLQuantizationType) -> np.ndarray:
+ if qtype == GGMLQuantizationType.F32:
+ return data.view(np.float32)
+ elif qtype == GGMLQuantizationType.F16:
+ return data.view(np.float16).astype(np.float32)
+ elif (q := _type_traits.get(qtype)) is not None:
+ return q.dequantize(data)
+ else:
+ raise NotImplementedError(
+ f"Dequantization for {qtype.name} is not yet implemented"
+ )
+
+
+class __Quant(ABC):
+ qtype: GGMLQuantizationType
+ block_size: int
+ type_size: int
+
+ grid: np.ndarray[Any, np.dtype[np.float32]] | None = None
+ grid_shape: tuple[int, int] = (0, 0)
+ grid_map: tuple[int | float, ...] = ()
+ grid_hex: bytes | None = None
+
+ def __init__(self):
+ return TypeError("Quant conversion classes can't have instances")
+
+ def __init_subclass__(cls, qtype: GGMLQuantizationType) -> None:
+ cls.qtype = qtype
+ cls.block_size, cls.type_size = GGML_QUANT_SIZES[qtype]
+ cls.__quantize_lazy = LazyNumpyTensor._wrap_fn(
+ cls.__quantize_array, meta_noop=(np.uint8, cls.__shape_to_bytes)
+ )
+ cls.__dequantize_lazy = LazyNumpyTensor._wrap_fn(
+ cls.__dequantize_array, meta_noop=(np.float32, cls.__shape_from_bytes)
+ )
+ assert qtype not in _type_traits
+ _type_traits[qtype] = cls
+
+ @classmethod
+ def init_grid(cls):
+ if cls.grid is not None or cls.grid_hex is None:
+ return
+
+ bits_per_elem = ceil(log2(len(cls.grid_map)))
+ assert bits_per_elem != 0, cls.qtype.name
+ elems_per_byte = 8 // bits_per_elem
+
+ grid = np.frombuffer(cls.grid_hex, dtype=np.uint8)
+ # decode hexadecimal chars from grid
+ grid = grid.reshape((-1, 2))
+ grid = (np.where(grid > 0x40, grid + 9, grid) & 0x0F) << np.array(
+ [4, 0], dtype=np.uint8
+ ).reshape((1, 2))
+ grid = grid[..., 0] | grid[..., 1]
+ # unpack the grid values
+ grid = grid.reshape((-1, 1)) >> np.array(
+ [i for i in range(0, 8, 8 // elems_per_byte)], dtype=np.uint8
+ ).reshape((1, elems_per_byte))
+ grid = (grid & ((1 << bits_per_elem) - 1)).reshape((-1, 1))
+ grid_map = np.array(cls.grid_map, dtype=np.float32).reshape((1, -1))
+ grid = np.take_along_axis(grid_map, grid, axis=-1)
+ cls.grid = grid.reshape((1, 1, *cls.grid_shape))
+
+ @classmethod
+ def quantize_pytorch(cls, data, parent) -> torch.Tensor:
+ if not parent.baked:
+ raise ValueError("GGUF Tensor is not baked!")
+
+ block_size, type_size = GGML_QUANT_SIZES[cls.qtype]
+ blocks = data.reshape(-1, block_size)
+ parent.data = cls.quantize_blocks_pytorch(
+ blocks, block_size, type_size, parent
+ ).contiguous()
+ return parent
+
+ @classmethod
+ def bake(cls, parameter):
+ if parameter.baked:
+ return
+
+ data = parameter.data
+ cls.block_size, cls.type_size = GGML_QUANT_SIZES[cls.qtype]
+ rows = data.reshape((-1, data.shape[-1])).view(torch.uint8)
+ n_blocks = rows.numel() // cls.type_size
+ blocks = rows.reshape((n_blocks, cls.type_size))
+ parameter.data = blocks.contiguous()
+ cls.bake_inner(parameter)
+ parameter.baked = True
+ return
+
+ @classmethod
+ def bake_inner(cls, parameter):
+ pass
+
+ @classmethod
+ def dequantize_pytorch(cls, x):
+ if not x.baked:
+ raise ValueError("GGUF Tensor is not baked!")
+
+ blocks = cls.dequantize_blocks_pytorch(x.data, cls.block_size, cls.type_size, x)
+ return blocks.view(x.shape)
+
+ @classmethod
+ @abstractmethod
+ def dequantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parameter
+ ) -> torch.Tensor:
+ raise NotImplementedError
+
+ @classmethod
+ @abstractmethod
+ def quantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parent
+ ) -> torch.Tensor:
+ raise NotImplementedError(
+ 'Low bit LoRA for this data type is not implemented yet. Please select "Automatic (fp16 LoRA)" in "Diffusion in Low Bits" (on the top line of this page) to use this LoRA.'
+ )
+
+ @classmethod
+ @abstractmethod
+ def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ raise NotImplementedError
+
+ @classmethod
+ @abstractmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ raise NotImplementedError
+
+ @classmethod
+ def quantize_rows(cls, rows: np.ndarray) -> np.ndarray:
+ rows = rows.astype(np.float32, copy=False)
+ shape = rows.shape
+ n_blocks = rows.size // cls.block_size
+ blocks = rows.reshape((n_blocks, cls.block_size))
+ blocks = cls.quantize_blocks(blocks)
+ assert blocks.dtype == np.uint8
+ assert blocks.shape[-1] == cls.type_size
+ return blocks.reshape(cls.__shape_to_bytes(shape))
+
+ @classmethod
+ def dequantize_rows(cls, rows: np.ndarray) -> np.ndarray:
+ rows = rows.view(np.uint8)
+ shape = rows.shape
+ n_blocks = rows.size // cls.type_size
+ blocks = rows.reshape((n_blocks, cls.type_size))
+ blocks = cls.dequantize_blocks(blocks)
+ assert blocks.dtype == np.float32
+ assert blocks.shape[-1] == cls.block_size
+ return blocks.reshape(cls.__shape_from_bytes(shape))
+
+ @classmethod
+ def __shape_to_bytes(cls, shape: Sequence[int]):
+ return quant_shape_to_byte_shape(shape, cls.qtype)
+
+ @classmethod
+ def __shape_from_bytes(cls, shape: Sequence[int]):
+ return quant_shape_from_byte_shape(shape, cls.qtype)
+
+ @classmethod
+ def __quantize_array(cls, array: np.ndarray) -> np.ndarray:
+ return _apply_over_grouped_rows(
+ cls.quantize_rows,
+ arr=array,
+ otype=np.uint8,
+ oshape=cls.__shape_to_bytes(array.shape),
+ )
+
+ @classmethod
+ def __dequantize_array(cls, array: np.ndarray) -> np.ndarray:
+ cls.init_grid()
+ return _apply_over_grouped_rows(
+ cls.dequantize_rows,
+ arr=array,
+ otype=np.float32,
+ oshape=cls.__shape_from_bytes(array.shape),
+ )
+
+ @classmethod
+ def __quantize_lazy(cls, lazy_tensor: LazyNumpyTensor, /) -> Any:
+ pass
+
+ @classmethod
+ def __dequantize_lazy(cls, lazy_tensor: LazyNumpyTensor, /) -> Any:
+ pass
+
+ @classmethod
+ def can_quantize(cls, tensor: np.ndarray | LazyNumpyTensor) -> bool:
+ return tensor.shape[-1] % cls.block_size == 0
+
+ @classmethod
+ def quantize(cls, tensor: np.ndarray | LazyNumpyTensor) -> np.ndarray:
+ if not cls.can_quantize(tensor):
+ raise QuantError(
+ f"Can't quantize tensor with shape {tensor.shape} to {cls.qtype.name}"
+ )
+ if isinstance(tensor, LazyNumpyTensor):
+ return cls.__quantize_lazy(tensor)
+ else:
+ return cls.__quantize_array(tensor)
+
+ @classmethod
+ def dequantize(cls, tensor: np.ndarray | LazyNumpyTensor) -> np.ndarray:
+ if isinstance(tensor, LazyNumpyTensor):
+ return cls.__dequantize_lazy(tensor)
+ else:
+ return cls.__dequantize_array(tensor)
+
+
+class BF16(__Quant, qtype=GGMLQuantizationType.BF16):
+ @classmethod
+ # same as ggml_compute_fp32_to_bf16 in ggml-impl.h
+ def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n = blocks.view(np.uint32)
+ # force nan to quiet
+ n = np.where(
+ (n & 0x7FFFFFFF) > 0x7F800000,
+ (n & np.uint32(0xFFFF0000)) | np.uint32(64 << 16),
+ n,
+ )
+ # round to nearest even
+ n = (np.uint64(n) + (0x7FFF + ((n >> 16) & 1))) >> 16
+ return n.astype(np.uint16).view(np.uint8)
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ return (blocks.view(np.int16).astype(np.int32) << 16).view(np.float32)
+
+ @classmethod
+ def dequantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parameter
+ ) -> torch.Tensor:
+ return (blocks.view(torch.int16).to(torch.int32) << 16).view(torch.float32)
+
+
+class Q4_0(__Quant, qtype=GGMLQuantizationType.Q4_0):
+ @classmethod
+ def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ imax = abs(blocks).argmax(axis=-1, keepdims=True)
+ max = np.take_along_axis(blocks, imax, axis=-1)
+
+ d = max / -8
+ with np.errstate(divide="ignore"):
+ id = np.where(d == 0, 0, 1 / d)
+ # FIXME: Q4_0's reference rounding is cursed and depends on FMA
+ qs = (
+ np.trunc(
+ (np.float64(blocks) * np.float64(id)) + np.float64(8.5),
+ dtype=np.float32,
+ )
+ .astype(np.uint8)
+ .clip(0, 15)
+ )
+
+ qs = qs.reshape((n_blocks, 2, cls.block_size // 2))
+ qs = qs[..., 0, :] | (qs[..., 1, :] << np.uint8(4))
+
+ d = d.astype(np.float16).view(np.uint8)
+
+ return np.concatenate([d, qs], axis=-1)
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, qs = np.hsplit(blocks, [2])
+
+ d = d.view(np.float16).astype(np.float32)
+
+ qs = qs.reshape((n_blocks, -1, 1, cls.block_size // 2)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 1, 2, 1))
+ qs = (qs & np.uint8(0x0F)).reshape((n_blocks, -1)).astype(np.int8) - np.int8(8)
+
+ return d * qs.astype(np.float32)
+
+ @classmethod
+ def bake_inner(cls, parameter):
+ blocks = parameter.data
+ d, x = quick_split(blocks, [2])
+ d = d.view(torch.float16).to(parameter.computation_dtype).view(torch.uint8)
+ x = change_4bits_order(x).view(torch.uint8)
+ parameter.data = torch.cat([d, x], dim=-1).contiguous()
+ return
+
+ @classmethod
+ def dequantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parameter
+ ) -> torch.Tensor:
+ d, qs = quick_split(blocks, [2])
+ d = d.view(parameter.computation_dtype)
+ qs = quick_unpack_4bits(qs)
+ return d * qs
+
+ @classmethod
+ def quantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parent
+ ) -> torch.Tensor:
+ # Copyright Forge 2024, AGPL V3 + CC-BY SA
+
+ n_blocks = blocks.shape[0]
+
+ imax = torch.abs(blocks).argmax(dim=-1, keepdim=True)
+ max_vals = torch.gather(blocks, -1, imax)
+
+ d = max_vals / -8
+ id = torch.where(d == 0, torch.tensor(0.0, device=d.device), 1.0 / d)
+
+ qs = torch.trunc((blocks * id) + 8.5).clip(0, 15).to(torch.uint8)
+
+ qs = qs.reshape((n_blocks, block_size // 2, 2))
+ qs = qs[..., 0] | (qs[..., 1] << 4)
+
+ d = d.to(parent.computation_dtype).view(torch.uint8)
+
+ return torch.cat([d, qs], dim=-1)
+
+
+class Q4_1(__Quant, qtype=GGMLQuantizationType.Q4_1):
+ @classmethod
+ def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ max = blocks.max(axis=-1, keepdims=True)
+ min = blocks.min(axis=-1, keepdims=True)
+
+ d = (max - min) / 15
+ with np.errstate(divide="ignore"):
+ id = np.where(d == 0, 0, 1 / d)
+ qs = (
+ np.trunc((blocks - min) * id + np.float32(0.5), dtype=np.float32)
+ .astype(np.uint8)
+ .clip(0, 15)
+ )
+
+ qs = qs.reshape((n_blocks, 2, cls.block_size // 2))
+ qs = qs[..., 0, :] | (qs[..., 1, :] << np.uint8(4))
+
+ d = d.astype(np.float16).view(np.uint8)
+ m = min.astype(np.float16).view(np.uint8)
+
+ return np.concatenate([d, m, qs], axis=-1)
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, rest = np.hsplit(blocks, [2])
+ m, qs = np.hsplit(rest, [2])
+
+ d = d.view(np.float16).astype(np.float32)
+ m = m.view(np.float16).astype(np.float32)
+
+ qs = qs.reshape((n_blocks, -1, 1, cls.block_size // 2)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 1, 2, 1))
+ qs = (qs & np.uint8(0x0F)).reshape((n_blocks, -1)).astype(np.float32)
+
+ return (d * qs) + m
+
+ @classmethod
+ def bake_inner(cls, parameter):
+ blocks = parameter.data
+
+ d, m, qs = quick_split(blocks, [2, 2])
+ d = d.view(torch.float16).to(parameter.computation_dtype).view(torch.uint8)
+ m = m.view(torch.float16).to(parameter.computation_dtype).view(torch.uint8)
+ qs = change_4bits_order(qs).view(torch.uint8)
+
+ parameter.data = torch.cat([d, m, qs], dim=-1).contiguous()
+
+ return
+
+ @classmethod
+ def dequantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parameter
+ ) -> torch.Tensor:
+ d, m, qs = quick_split(blocks, [2, 2])
+ d = d.view(parameter.computation_dtype)
+ m = m.view(parameter.computation_dtype)
+ qs = quick_unpack_4bits_u(qs)
+ return (d * qs) + m
+
+
+class Q5_0(__Quant, qtype=GGMLQuantizationType.Q5_0):
+ @classmethod
+ def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ imax = abs(blocks).argmax(axis=-1, keepdims=True)
+ max = np.take_along_axis(blocks, imax, axis=-1)
+
+ d = max / -16
+ with np.errstate(divide="ignore"):
+ id = np.where(d == 0, 0, 1 / d)
+ # FIXME: Q5_0's reference rounding is cursed and depends on FMA
+ q = (
+ np.trunc(
+ (np.float64(blocks) * np.float64(id)) + np.float64(16.5),
+ dtype=np.float32,
+ )
+ .astype(np.uint8)
+ .clip(0, 31)
+ )
+
+ qs = q.reshape((n_blocks, 2, cls.block_size // 2))
+ qs = (qs[..., 0, :] & np.uint8(0x0F)) | (qs[..., 1, :] << np.uint8(4))
+
+ qh = np.packbits(
+ q.reshape((n_blocks, 1, 32)) >> np.uint8(4), axis=-1, bitorder="little"
+ ).reshape(n_blocks, 4)
+
+ d = d.astype(np.float16).view(np.uint8)
+
+ return np.concatenate([d, qh, qs], axis=-1)
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, rest = np.hsplit(blocks, [2])
+ qh, qs = np.hsplit(rest, [4])
+
+ d = d.view(np.float16).astype(np.float32)
+ qh = qh.view(np.uint32)
+
+ qh = qh.reshape((n_blocks, 1)) >> np.array(
+ [i for i in range(32)], dtype=np.uint32
+ ).reshape((1, 32))
+ ql = qs.reshape((n_blocks, -1, 1, cls.block_size // 2)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 1, 2, 1))
+ qh = (qh & np.uint32(0x01)).astype(np.uint8)
+ ql = (ql & np.uint8(0x0F)).reshape((n_blocks, -1))
+
+ qs = (ql | (qh << np.uint8(4))).astype(np.int8) - np.int8(16)
+
+ return d * qs.astype(np.float32)
+
+ @classmethod
+ def dequantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parameter
+ ) -> torch.Tensor:
+ def to_uint32(x):
+ # pytorch uint32 by City96 - Apache-2.0
+ x = x.view(torch.uint8).to(torch.int32)
+ return (x[:, 0] | x[:, 1] << 8 | x[:, 2] << 16 | x[:, 3] << 24).unsqueeze(1)
+
+ n_blocks = blocks.shape[0]
+
+ d, qh, qs = quick_split(blocks, [2, 4])
+ d = d.view(torch.float16).to(parameter.computation_dtype)
+ qh = to_uint32(qh)
+
+ qh = qh.reshape(n_blocks, 1) >> torch.arange(
+ 32, device=d.device, dtype=torch.int32
+ ).reshape(1, 32)
+ ql = qs.reshape(n_blocks, -1, 1, block_size // 2) >> torch.tensor(
+ [0, 4], device=d.device, dtype=torch.uint8
+ ).reshape(1, 1, 2, 1)
+
+ qh = (qh & 1).to(torch.uint8)
+ ql = (ql & 0x0F).reshape(n_blocks, -1)
+
+ qs = (ql | (qh << 4)).to(torch.int8) - 16
+ return d * qs
+
+ @classmethod
+ def quantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parent
+ ) -> torch.Tensor:
+ # Copyright Forge 2024, AGPL V3 + CC-BY SA
+
+ n_blocks = blocks.shape[0]
+
+ imax = torch.abs(blocks).argmax(dim=-1, keepdim=True)
+ max_val = torch.gather(blocks, dim=-1, index=imax)
+
+ d = max_val / -16
+ id = torch.where(d == 0, torch.tensor(0.0, device=d.device), 1.0 / d)
+
+ q = (
+ torch.trunc((blocks.float() * id.float()) + 16.5)
+ .clamp(0, 31)
+ .to(torch.uint8)
+ )
+
+ qs = q.view(n_blocks, 2, block_size // 2)
+ qs = (qs[..., 0, :] & 0x0F) | (qs[..., 1, :] << 4)
+
+ qh = q.view(n_blocks, 32)
+ qh_packed = torch.zeros((n_blocks, 4), dtype=torch.uint8, device=qh.device)
+
+ for i in range(4):
+ qh_packed[:, i] = (
+ (qh[:, i * 8 + 0] >> 4)
+ | (qh[:, i * 8 + 1] >> 3 & 0x02)
+ | (qh[:, i * 8 + 2] >> 2 & 0x04)
+ | (qh[:, i * 8 + 3] >> 1 & 0x08)
+ | (qh[:, i * 8 + 4] << 0 & 0x10)
+ | (qh[:, i * 8 + 5] << 1 & 0x20)
+ | (qh[:, i * 8 + 6] << 2 & 0x40)
+ | (qh[:, i * 8 + 7] << 3 & 0x80)
+ )
+
+ d = d.to(torch.float16).view(torch.uint8)
+
+ return torch.cat([d, qh_packed, qs], dim=-1)
+
+
+class Q5_1(__Quant, qtype=GGMLQuantizationType.Q5_1):
+ @classmethod
+ def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ max = blocks.max(axis=-1, keepdims=True)
+ min = blocks.min(axis=-1, keepdims=True)
+
+ d = (max - min) / 31
+ with np.errstate(divide="ignore"):
+ id = np.where(d == 0, 0, 1 / d)
+ q = (
+ np.trunc((blocks - min) * id + np.float32(0.5), dtype=np.float32)
+ .astype(np.uint8)
+ .clip(0, 31)
+ )
+
+ qs = q.reshape((n_blocks, 2, cls.block_size // 2))
+ qs = (qs[..., 0, :] & np.uint8(0x0F)) | (qs[..., 1, :] << np.uint8(4))
+
+ qh = np.packbits(
+ q.reshape((n_blocks, 1, 32)) >> np.uint8(4), axis=-1, bitorder="little"
+ ).reshape(n_blocks, 4)
+
+ d = d.astype(np.float16).view(np.uint8)
+ m = min.astype(np.float16).view(np.uint8)
+
+ return np.concatenate([d, m, qh, qs], axis=-1)
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, rest = np.hsplit(blocks, [2])
+ m, rest = np.hsplit(rest, [2])
+ qh, qs = np.hsplit(rest, [4])
+
+ d = d.view(np.float16).astype(np.float32)
+ m = m.view(np.float16).astype(np.float32)
+ qh = qh.view(np.uint32)
+
+ qh = qh.reshape((n_blocks, 1)) >> np.array(
+ [i for i in range(32)], dtype=np.uint32
+ ).reshape((1, 32))
+ ql = qs.reshape((n_blocks, -1, 1, cls.block_size // 2)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 1, 2, 1))
+ qh = (qh & np.uint32(0x01)).astype(np.uint8)
+ ql = (ql & np.uint8(0x0F)).reshape((n_blocks, -1))
+
+ qs = (ql | (qh << np.uint8(4))).astype(np.float32)
+
+ return (d * qs) + m
+
+ @classmethod
+ def dequantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parameter
+ ) -> torch.Tensor:
+ def to_uint32(x):
+ # pytorch uint32 by City96 - Apache-2.0
+ x = x.view(torch.uint8).to(torch.int32)
+ return (x[:, 0] | x[:, 1] << 8 | x[:, 2] << 16 | x[:, 3] << 24).unsqueeze(1)
+
+ n_blocks = blocks.shape[0]
+
+ d, m, qh, qs = quick_split(blocks, [2, 2, 4])
+ d = d.view(torch.float16).to(parameter.computation_dtype)
+ m = m.view(torch.float16).to(parameter.computation_dtype)
+ qh = to_uint32(qh)
+
+ qh = qh.reshape((n_blocks, 1)) >> torch.arange(
+ 32, device=d.device, dtype=torch.int32
+ ).reshape(1, 32)
+ ql = qs.reshape((n_blocks, -1, 1, block_size // 2)) >> torch.tensor(
+ [0, 4], device=d.device, dtype=torch.uint8
+ ).reshape(1, 1, 2, 1)
+ qh = (qh & 1).to(torch.uint8)
+ ql = (ql & 0x0F).reshape((n_blocks, -1))
+
+ qs = ql | (qh << 4)
+ return (d * qs) + m
+
+
+class Q8_0(__Quant, qtype=GGMLQuantizationType.Q8_0):
+ @classmethod
+ # Implementation of Q8_0 with bit-exact same results as reference implementation in ggml-quants.c
+ def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+
+ d = abs(blocks).max(axis=1, keepdims=True) / 127
+ with np.errstate(divide="ignore"):
+ id = np.where(d == 0, 0, 1 / d)
+ qs = np_roundf(blocks * id)
+
+ # (n_blocks, 2)
+ d = d.astype(np.float16).view(np.uint8)
+ # (n_blocks, block_size)
+ qs = qs.astype(np.int8).view(np.uint8)
+
+ return np.concatenate([d, qs], axis=1)
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ d, x = np.split(blocks, [2], axis=1)
+ d = d.view(np.float16).astype(np.float32)
+ x = x.view(np.int8).astype(np.float32)
+
+ return x * d
+
+ @classmethod
+ def bake_inner(cls, parameter):
+ blocks = parameter.data
+ d, x = quick_split(blocks, [2])
+ x = x.view(torch.int8)
+ d = d.view(torch.float16).to(parameter.computation_dtype).view(torch.int8)
+ parameter.data = torch.cat([d, x], dim=-1).contiguous()
+ return
+
+ @classmethod
+ def dequantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parameter
+ ) -> torch.Tensor:
+ d, x = quick_split(blocks, [2])
+ d = d.view(parameter.computation_dtype)
+ return x * d
+
+ @classmethod
+ def quantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parent
+ ) -> torch.Tensor:
+ # Copyright Forge 2024, AGPL V3 + CC-BY SA
+ d = torch.abs(blocks).max(dim=1, keepdim=True).values / 127
+ ids = torch.where(d == 0, torch.zeros_like(d), 1 / d)
+ qs = torch.round(blocks * ids)
+ d = d.to(parent.computation_dtype).view(torch.int8)
+ qs = qs.to(torch.int8)
+ return torch.cat([d, qs], dim=1)
+
+
+class Q2_K(__Quant, qtype=GGMLQuantizationType.Q2_K):
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ scales, rest = np.hsplit(blocks, [QK_K // 16])
+ qs, rest = np.hsplit(rest, [QK_K // 4])
+ d, dmin = np.hsplit(rest, [2])
+
+ d = d.view(np.float16).astype(np.float32)
+ dmin = dmin.view(np.float16).astype(np.float32)
+
+ # (n_blocks, 16, 1)
+ dl = (d * (scales & 0xF).astype(np.float32)).reshape((n_blocks, QK_K // 16, 1))
+ ml = (dmin * (scales >> 4).astype(np.float32)).reshape(
+ (n_blocks, QK_K // 16, 1)
+ )
+
+ shift = np.array([0, 2, 4, 6], dtype=np.uint8).reshape((1, 1, 4, 1))
+
+ qs = (qs.reshape((n_blocks, -1, 1, 32)) >> shift) & np.uint8(3)
+
+ qs = qs.reshape((n_blocks, QK_K // 16, 16)).astype(np.float32)
+
+ qs = dl * qs - ml
+
+ return qs.reshape((n_blocks, -1))
+
+ @classmethod
+ def dequantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parameter
+ ) -> torch.Tensor:
+ # (c) City96 || Apache-2.0 (apache.org/licenses/LICENSE-2.0)
+ n_blocks = blocks.shape[0]
+ scales, qs, d, dmin = quick_split(blocks, [QK_K // 16, QK_K // 4, 2])
+ d = d.view(torch.float16).to(parameter.computation_dtype)
+ dmin = dmin.view(torch.float16).to(parameter.computation_dtype)
+ # (n_blocks, 16, 1)
+ dl = (d * (scales & 0xF)).reshape((n_blocks, QK_K // 16, 1))
+ ml = (dmin * (scales >> 4)).reshape((n_blocks, QK_K // 16, 1))
+ shift = torch.tensor([0, 2, 4, 6], device=d.device, dtype=torch.uint8).reshape(
+ (1, 1, 4, 1)
+ )
+ qs = (qs.reshape((n_blocks, -1, 1, 32)) >> shift) & 3
+ qs = qs.reshape((n_blocks, QK_K // 16, 16))
+ qs = dl * qs - ml
+ return qs.reshape((n_blocks, -1))
+
+
+class Q3_K(__Quant, qtype=GGMLQuantizationType.Q3_K):
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ hmask, rest = np.hsplit(blocks, [QK_K // 8])
+ qs, rest = np.hsplit(rest, [QK_K // 4])
+ scales, d = np.hsplit(rest, [12])
+
+ d = d.view(np.float16).astype(np.float32)
+
+ # The scales are packed at 6-bit each in this pattern:
+ # 0: IIIIAAAA
+ # 1: JJJJBBBB
+ # 2: KKKKCCCC
+ # 3: LLLLDDDD
+ # 4: MMMMEEEE
+ # 5: NNNNFFFF
+ # 6: OOOOGGGG
+ # 7: PPPPHHHH
+ # 8: MMIIEEAA
+ # 9: NNJJFFBB
+ # 10: OOKKGGCC
+ # 11: PPLLHHDD
+ lscales, hscales = np.hsplit(scales, [8])
+ lscales = lscales.reshape((n_blocks, 1, 8)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 2, 1))
+ lscales = lscales.reshape((n_blocks, 16))
+ hscales = hscales.reshape((n_blocks, 1, 4)) >> np.array(
+ [0, 2, 4, 6], dtype=np.uint8
+ ).reshape((1, 4, 1))
+ hscales = hscales.reshape((n_blocks, 16))
+ scales = (lscales & np.uint8(0x0F)) | (
+ (hscales & np.uint8(0x03)) << np.uint8(4)
+ )
+ scales = (scales.astype(np.int8) - np.int8(32)).astype(np.float32)
+
+ dl = (d * scales).reshape((n_blocks, 16, 1))
+
+ ql = qs.reshape((n_blocks, -1, 1, 32)) >> np.array(
+ [0, 2, 4, 6], dtype=np.uint8
+ ).reshape((1, 1, 4, 1))
+ qh = hmask.reshape(n_blocks, -1, 1, 32) >> np.array(
+ [i for i in range(8)], dtype=np.uint8
+ ).reshape((1, 1, 8, 1))
+ ql = ql.reshape((n_blocks, 16, QK_K // 16)) & np.uint8(3)
+ qh = qh.reshape((n_blocks, 16, QK_K // 16)) & np.uint8(1)
+ qh = qh ^ np.uint8(1) # strangely, the offset is zero when the bitmask is 1
+ q = (ql.astype(np.int8) - (qh << np.uint8(2)).astype(np.int8)).astype(
+ np.float32
+ )
+
+ return (dl * q).reshape((n_blocks, QK_K))
+
+ @classmethod
+ def dequantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parameter
+ ) -> torch.Tensor:
+ # (c) City96 || Apache-2.0 (apache.org/licenses/LICENSE-2.0)
+ n_blocks = blocks.shape[0]
+ hmask, qs, scales, d = quick_split(blocks, [QK_K // 8, QK_K // 4, 12])
+ d = d.view(torch.float16).to(parameter.computation_dtype)
+ lscales, hscales = scales[:, :8], scales[:, 8:]
+ lscales = lscales.reshape((n_blocks, 1, 8)) >> torch.tensor(
+ [0, 4], device=d.device, dtype=torch.uint8
+ ).reshape((1, 2, 1))
+ lscales = lscales.reshape((n_blocks, 16))
+ hscales = hscales.reshape((n_blocks, 1, 4)) >> torch.tensor(
+ [0, 2, 4, 6], device=d.device, dtype=torch.uint8
+ ).reshape((1, 4, 1))
+ hscales = hscales.reshape((n_blocks, 16))
+ scales = (lscales & 0x0F) | ((hscales & 0x03) << 4)
+ scales = scales.to(torch.int8) - 32
+ dl = (d * scales).reshape((n_blocks, 16, 1))
+ ql = qs.reshape((n_blocks, -1, 1, 32)) >> torch.tensor(
+ [0, 2, 4, 6], device=d.device, dtype=torch.uint8
+ ).reshape((1, 1, 4, 1))
+ qh = hmask.reshape(n_blocks, -1, 1, 32) >> torch.tensor(
+ [i for i in range(8)], device=d.device, dtype=torch.uint8
+ ).reshape((1, 1, 8, 1))
+ ql = ql.reshape((n_blocks, 16, QK_K // 16)) & 3
+ qh = (qh.reshape((n_blocks, 16, QK_K // 16)) & 1) ^ 1
+ q = ql.to(torch.int8) - (qh << 2).to(torch.int8)
+ return (dl * q).reshape((n_blocks, QK_K))
+
+
+class Q4_K(__Quant, qtype=GGMLQuantizationType.Q4_K):
+ K_SCALE_SIZE = 12
+
+ @staticmethod
+ def get_scale_min(scales: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
+ n_blocks = scales.shape[0]
+ scales = scales.view(np.uint8)
+ ### Unpacking the following: ###
+ # 0 EEAAAAAA
+ # 1 FFBBBBBB
+ # 2 GGCCCCCC
+ # 3 HHDDDDDD
+ # 4 eeaaaaaa
+ # 5 ffbbbbbb
+ # 6 ggcccccc
+ # 7 hhdddddd
+ # 8 eeeeEEEE
+ # 9 ffffFFFF
+ # 10 ggggGGGG
+ # 11 hhhhHHHH
+ scales = scales.reshape((n_blocks, 3, 4))
+ d, m, m_d = np.split(scales, 3, axis=-2)
+
+ sc = np.concatenate([d & 0x3F, (m_d & 0x0F) | ((d >> 2) & 0x30)], axis=-1)
+ min = np.concatenate([m & 0x3F, (m_d >> 4) | ((m >> 2) & 0x30)], axis=-1)
+
+ return (sc.reshape((n_blocks, 8)), min.reshape((n_blocks, 8)))
+
+ @staticmethod
+ def get_scale_min_pytorch(scales):
+ n_blocks = scales.shape[0]
+ scales = scales.view(torch.uint8)
+ scales = scales.reshape((n_blocks, 3, 4))
+ d, m, m_d = torch.split(scales, scales.shape[-2] // 3, dim=-2)
+ sc = torch.cat([d & 0x3F, (m_d & 0x0F) | ((d >> 2) & 0x30)], dim=-1)
+ min = torch.cat([m & 0x3F, (m_d >> 4) | ((m >> 2) & 0x30)], dim=-1)
+ return (sc.reshape((n_blocks, 8)), min.reshape((n_blocks, 8)))
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, rest = np.hsplit(blocks, [2])
+ dmin, rest = np.hsplit(rest, [2])
+ scales, qs = np.hsplit(rest, [cls.K_SCALE_SIZE])
+
+ d = d.view(np.float16).astype(np.float32)
+ dmin = dmin.view(np.float16).astype(np.float32)
+
+ sc, m = Q4_K.get_scale_min(scales)
+
+ d = (d * sc.astype(np.float32)).reshape((n_blocks, -1, 1))
+ dm = (dmin * m.astype(np.float32)).reshape((n_blocks, -1, 1))
+
+ qs = qs.reshape((n_blocks, -1, 1, 32)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 1, 2, 1))
+ qs = (qs & np.uint8(0x0F)).reshape((n_blocks, -1, 32)).astype(np.float32)
+
+ return (d * qs - dm).reshape((n_blocks, QK_K))
+
+ @classmethod
+ def bake_inner(cls, parameter): # Only compute one time when model load
+ # Copyright Forge 2024
+
+ blocks = parameter.data
+ n_blocks = blocks.shape[0]
+ d, dmin, scales, qs = quick_split(blocks, [2, 2, cls.K_SCALE_SIZE])
+ d = d.view(torch.float16).to(parameter.computation_dtype)
+ dmin = dmin.view(torch.float16).to(parameter.computation_dtype)
+ sc, m = Q4_K.get_scale_min_pytorch(scales)
+ d = (d * sc).reshape((n_blocks, -1, 1))
+ dm = (dmin * m).reshape((n_blocks, -1, 1)).to(parameter.computation_dtype)
+
+ qs = qs.reshape((n_blocks, -1, 1, 32))
+ qs = change_4bits_order(qs)
+
+ d = d.view(torch.uint8).reshape((n_blocks, -1))
+ dm = dm.view(torch.uint8).reshape((n_blocks, -1))
+ qs = qs.view(torch.uint8)
+
+ parameter.data = torch.cat([d, dm, qs], dim=-1).contiguous()
+ return
+
+ @classmethod
+ def dequantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parameter
+ ) -> torch.Tensor:
+ # Compute in each diffusion iteration
+
+ n_blocks = blocks.shape[0]
+
+ d, dm, qs = quick_split(blocks, [16, 16])
+ d = d.view(parameter.computation_dtype).view((n_blocks, -1, 1))
+ dm = dm.view(parameter.computation_dtype).view((n_blocks, -1, 1))
+ qs = quick_unpack_4bits_u(qs).view((n_blocks, -1, 32))
+
+ return (d * qs - dm).reshape((n_blocks, QK_K))
+
+
+class Q5_K(__Quant, qtype=GGMLQuantizationType.Q5_K):
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, rest = np.hsplit(blocks, [2])
+ dmin, rest = np.hsplit(rest, [2])
+ scales, rest = np.hsplit(rest, [Q4_K.K_SCALE_SIZE])
+ qh, qs = np.hsplit(rest, [QK_K // 8])
+
+ d = d.view(np.float16).astype(np.float32)
+ dmin = dmin.view(np.float16).astype(np.float32)
+
+ sc, m = Q4_K.get_scale_min(scales)
+
+ d = (d * sc.astype(np.float32)).reshape((n_blocks, -1, 1))
+ dm = (dmin * m.astype(np.float32)).reshape((n_blocks, -1, 1))
+
+ ql = qs.reshape((n_blocks, -1, 1, 32)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 1, 2, 1))
+ qh = qh.reshape((n_blocks, -1, 1, 32)) >> np.array(
+ [i for i in range(8)], dtype=np.uint8
+ ).reshape((1, 1, 8, 1))
+ ql = (ql & np.uint8(0x0F)).reshape((n_blocks, -1, 32))
+ qh = (qh & np.uint8(0x01)).reshape((n_blocks, -1, 32))
+ q = (ql | (qh << np.uint8(4))).astype(np.float32)
+
+ return (d * q - dm).reshape((n_blocks, QK_K))
+
+ @classmethod
+ def dequantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parameter
+ ) -> torch.Tensor:
+ # (c) City96 || Apache-2.0 (apache.org/licenses/LICENSE-2.0)
+ QK_K = 256
+ K_SCALE_SIZE = 12
+ n_blocks = blocks.shape[0]
+ d, dmin, scales, qh, qs = quick_split(blocks, [2, 2, K_SCALE_SIZE, QK_K // 8])
+ d = d.view(torch.float16).to(parameter.computation_dtype)
+ dmin = dmin.view(torch.float16).to(parameter.computation_dtype)
+ sc, m = Q4_K.get_scale_min_pytorch(scales)
+ d = (d * sc).reshape((n_blocks, -1, 1))
+ dm = (dmin * m).reshape((n_blocks, -1, 1))
+ ql = qs.reshape((n_blocks, -1, 1, 32)) >> torch.tensor(
+ [0, 4], device=d.device, dtype=torch.uint8
+ ).reshape((1, 1, 2, 1))
+ qh = qh.reshape((n_blocks, -1, 1, 32)) >> torch.tensor(
+ [i for i in range(8)], device=d.device, dtype=torch.uint8
+ ).reshape((1, 1, 8, 1))
+ ql = (ql & 0x0F).reshape((n_blocks, -1, 32))
+ qh = (qh & 0x01).reshape((n_blocks, -1, 32))
+ q = ql | (qh << 4)
+ return (d * q - dm).reshape((n_blocks, QK_K))
+
+
+class Q6_K(__Quant, qtype=GGMLQuantizationType.Q6_K):
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ ql, rest = np.hsplit(blocks, [QK_K // 2])
+ qh, rest = np.hsplit(rest, [QK_K // 4])
+ scales, d = np.hsplit(rest, [QK_K // 16])
+
+ scales = scales.view(np.int8).astype(np.float32)
+ d = d.view(np.float16).astype(np.float32)
+ d = (d * scales).reshape((n_blocks, QK_K // 16, 1))
+
+ ql = ql.reshape((n_blocks, -1, 1, 64)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 1, 2, 1))
+ ql = (ql & np.uint8(0x0F)).reshape((n_blocks, -1, 32))
+ qh = qh.reshape((n_blocks, -1, 1, 32)) >> np.array(
+ [0, 2, 4, 6], dtype=np.uint8
+ ).reshape((1, 1, 4, 1))
+ qh = (qh & np.uint8(0x03)).reshape((n_blocks, -1, 32))
+ q = (ql | (qh << np.uint8(4))).astype(np.int8) - np.int8(32)
+ q = q.reshape((n_blocks, QK_K // 16, -1)).astype(np.float32)
+
+ return (d * q).reshape((n_blocks, QK_K))
+
+ @classmethod
+ def dequantize_blocks_pytorch(
+ cls, blocks, block_size, type_size, parameter
+ ) -> torch.Tensor:
+ # Written by ChatGPT
+ n_blocks = blocks.shape[0]
+ (
+ ql,
+ qh,
+ scales,
+ d,
+ ) = quick_split(blocks, [QK_K // 2, QK_K // 4, QK_K // 16])
+ scales = scales.view(torch.int8).to(parameter.computation_dtype)
+ d = d.view(torch.float16).to(parameter.computation_dtype)
+ d = (d * scales).reshape((n_blocks, QK_K // 16, 1))
+ ql = ql.reshape((n_blocks, -1, 1, 64)) >> torch.tensor(
+ [0, 4], device=d.device, dtype=torch.uint8
+ ).reshape((1, 1, 2, 1))
+ ql = (ql & 0x0F).reshape((n_blocks, -1, 32))
+ qh = qh.reshape((n_blocks, -1, 1, 32)) >> torch.tensor(
+ [0, 2, 4, 6], device=d.device, dtype=torch.uint8
+ ).reshape((1, 1, 4, 1))
+ qh = (qh & 0x03).reshape((n_blocks, -1, 32))
+ q = (ql | (qh << 4)).to(torch.int8) - 32
+ q = q.reshape((n_blocks, QK_K // 16, -1))
+ return (d * q).reshape((n_blocks, QK_K))
+
+
+class IQ2_XXS(__Quant, qtype=GGMLQuantizationType.IQ2_XXS):
+ ksigns: bytes = (
+ b"\x00\x81\x82\x03\x84\x05\x06\x87\x88\x09\x0a\x8b\x0c\x8d\x8e\x0f"
+ b"\x90\x11\x12\x93\x14\x95\x96\x17\x18\x99\x9a\x1b\x9c\x1d\x1e\x9f"
+ b"\xa0\x21\x22\xa3\x24\xa5\xa6\x27\x28\xa9\xaa\x2b\xac\x2d\x2e\xaf"
+ b"\x30\xb1\xb2\x33\xb4\x35\x36\xb7\xb8\x39\x3a\xbb\x3c\xbd\xbe\x3f"
+ b"\xc0\x41\x42\xc3\x44\xc5\xc6\x47\x48\xc9\xca\x4b\xcc\x4d\x4e\xcf"
+ b"\x50\xd1\xd2\x53\xd4\x55\x56\xd7\xd8\x59\x5a\xdb\x5c\xdd\xde\x5f"
+ b"\x60\xe1\xe2\x63\xe4\x65\x66\xe7\xe8\x69\x6a\xeb\x6c\xed\xee\x6f"
+ b"\xf0\x71\x72\xf3\x74\xf5\xf6\x77\x78\xf9\xfa\x7b\xfc\x7d\x7e\xff"
+ )
+
+ # iq2xxs_grid, but with each byte of the original packed in 2 bits,
+ # by mapping 0x08 to 0, 0x19 to 1, and 0x2b to 2.
+ grid_shape = (256, 8)
+ grid_map = (0x08, 0x19, 0x2B)
+ grid_hex = (
+ b"00000200050008000a00110014002000220028002a0041004400500058006100"
+ b"6400800082008a00a20001010401100115014001840198010002020222028202"
+ b"010404041004210424044004420448046004810484049004a404000502050805"
+ b"200546056905800591050906100640068406a406000805080808140828084108"
+ b"440850085208880804094009020a140a01100410101021104010601084109010"
+ b"951000110811201150115a118011241245120014081420142514491480141815"
+ b"6215001616160118041810184018811800190519a019511a002002200a204420"
+ b"6120802082202921482100220222012404241024402456240025412564259026"
+ b"082820289428442a014004401040184021402440404048405640604081408440"
+ b"9040004120416141804185410142104248425642684200440844204480449944"
+ b"124524450046014804481048404845480049584961498249454a904a00500850"
+ b"1150195020508050885004514251a4519152905492540a550156545600581158"
+ b"195864584059085a046010604060686000615561186260620064056410651265"
+ b"84654268008002800a8041808280048118814081118201840484108415844084"
+ b"608400854685948509864086608602880489118a0490109024904090a1901691"
+ b"8091459200942294449451958198209902a050a085a009a100a218a450a804a9"
+ )
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, qs = np.hsplit(blocks, [2])
+
+ d = d.view(np.float16).astype(np.float32)
+
+ qs = qs.view(np.uint32).reshape(n_blocks, -1, 2)
+
+ db = (
+ d
+ * (np.float32(0.5) + (qs[..., 1] >> 28).astype(np.float32))
+ * np.float32(0.25)
+ )
+ db = db.reshape((n_blocks, -1, 1, 1))
+
+ # get the sign indices and unpack the bits
+ signs = qs[..., 1].reshape((n_blocks, -1, 1)) >> np.array(
+ [0, 7, 14, 21], dtype=np.uint32
+ ).reshape((1, 1, 4))
+ ksigns = np.frombuffer(cls.ksigns, dtype=np.uint8).reshape((1, 1, 1, 128))
+ signs = (signs & np.uint32(0x7F)).reshape((n_blocks, -1, 4, 1))
+ signs = np.take_along_axis(ksigns, signs, axis=-1)
+ signs = signs.reshape((n_blocks, -1, 4, 1)) >> np.array(
+ [i for i in range(8)], dtype=np.uint8
+ ).reshape((1, 1, 1, 8))
+ signs = signs & np.uint8(0x01)
+ signs = np.where(signs == 0, np.float32(1), np.float32(-1))
+ signs = signs.reshape((n_blocks, -1, 4, 8))
+
+ assert cls.grid is not None
+ grid = np.take_along_axis(
+ cls.grid,
+ qs[..., 0].copy().view(np.uint8).reshape((n_blocks, -1, 1, 1)),
+ axis=-2,
+ )
+ grid = grid.reshape((n_blocks, -1, 4, 8))
+
+ return (db * grid * signs).reshape((n_blocks, -1))
+
+
+class IQ2_XS(__Quant, qtype=GGMLQuantizationType.IQ2_XS):
+ # iq2xs_grid, but with each byte of the original packed in 2 bits,
+ # by mapping 0x08 to 0, 0x19 to 1, and 0x2b to 2.
+ grid_shape = (512, 8)
+ grid_map = (0x08, 0x19, 0x2B)
+ grid_hex = (
+ b"00000200050008000a0011001400160019002000220025002800410044004600"
+ b"49005000520055005800610064008000820085008800910094009900a0000101"
+ b"04010601090110011201150118011a0121012401400142014501480151015401"
+ b"6001680181018401900100020202050208021102140220024102440250025502"
+ b"80028a0201040404060409041004120415041804210424044004420445044804"
+ b"5104540456046004810484049004000502050505080511051405200541054405"
+ b"500561058005010604061006260640064206840600080208050808080a081108"
+ b"14082008250841084408500858088008a008aa08010904091009400981098909"
+ b"000a200a280a960aa00a01100410061009101010121015101810211024104010"
+ b"4210451048105110541060106a10811084109010001102110511081111111411"
+ b"2011411144115011801194119611011204120612101240126012001402140514"
+ b"0814111414142014411444144914501464148014011504151015401500161416"
+ b"49160118041810181218401854188618001905196619511aa91a002002200520"
+ b"08200a201120142020204120442050208020a020012104211021402148216521"
+ b"002222228022a82201240424102429244024002541255225992501261a26a626"
+ b"002808280a28202855288828a22868299029082a202a822a882a8a2a01400440"
+ b"0640094010401240154018402140244040404240454048404a40514054406040"
+ b"6540814084409040004102410541084111411441204141414441504180418541"
+ b"a241014204421042124229424042004402440544084411441444194420444144"
+ b"4444504480449444014504451045244540459a4500460a464446504601480448"
+ b"1048404845485448624800491149444950496949044a00500250055008501150"
+ b"145020502850415044505050805001510451105115514051425100524452aa52"
+ b"0154045410542154405460548154a154005508558055885521566856a1560058"
+ b"14584158505899581a5940594259855a0160046010604060546062608660a960"
+ b"006124624a62926200641664106540654565a46501686a682569066a546a626a"
+ b"00800280058008801180148020802a8041804480508080808280a880aa800181"
+ b"0481068110814081518159810082208280828282a082a8820184048410841284"
+ b"158440846084898400854485a58518866a860088088825885a8880888288a888"
+ b"0689228a808a888a968aa88a0190049010904090569084900091229164915692"
+ b"89920094059444945094589429959095929541965198a6984999159a609a00a0"
+ b"02a008a00aa020a02aa0a0a051a159a1a6a100a202a208a22aa280a2a0a240a4"
+ b"95a465a698a60aa820a822a828a8a0a8a8a804a984a986a928aa2aaa91aaaaaa"
+ )
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, rest = np.hsplit(blocks, [2])
+ qs, scales = np.hsplit(rest, [2 * QK_K // 8])
+
+ d = d.view(np.float16).astype(np.float32)
+ qs = qs.view(np.uint16)
+
+ scales = scales.reshape((n_blocks, -1, 1)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 1, 2))
+ scales = (scales & 0x0F).reshape((n_blocks, -1))
+ db = d * (np.float32(0.5) + scales) * np.float32(0.25)
+ db = db.reshape((n_blocks, -1, 1, 1))
+
+ # get the sign indices and unpack the bits
+ signs = np.frombuffer(IQ2_XXS.ksigns, dtype=np.uint8).reshape(1, 1, 128)
+ signs = np.take_along_axis(signs, (qs >> 9).reshape((n_blocks, -1, 1)), axis=-1)
+ signs = signs.reshape((n_blocks, -1, 1)) >> np.array(
+ [i for i in range(8)], dtype=np.uint8
+ ).reshape((1, 1, 8))
+ signs = signs & np.uint8(0x01)
+ signs = np.where(signs == 0, np.float32(1), np.float32(-1))
+ signs = signs.reshape((n_blocks, -1, 2, 8))
+
+ assert cls.grid is not None
+ grid = np.take_along_axis(
+ cls.grid, (qs & np.uint16(511)).reshape((n_blocks, -1, 1, 1)), axis=-2
+ )
+ grid = grid.reshape((n_blocks, -1, 2, 8))
+
+ return (db * grid * signs).reshape((n_blocks, -1))
+
+
+class IQ2_S(__Quant, qtype=GGMLQuantizationType.IQ2_S):
+ # iq2s_grid, but with each byte of the original packed in 2 bits,
+ # by mapping 0x08 to 0, 0x19 to 1, and 0x2b to 2.
+ grid_shape = (1024, 8)
+ grid_map = (0x08, 0x19, 0x2B)
+ grid_hex = (
+ b"00000200050008000a0011001400160019002000220025002800410044004600"
+ b"490050005200550058006100640066006900800082008500880091009400a000"
+ b"a500aa0001010401060109011001120115011801210124014001420145014801"
+ b"510154015601590160016501680181018401900192019501a101a40100020202"
+ b"050208021102140220022a02410244024602490250025502800285028a029402"
+ b"a202010404040604090410041204150418042104240426042904400442044504"
+ b"48044a0451045404560459046004620465048104840486048904900495049804"
+ b"a104a40400050205050508050a05110514051605190520052505280541054405"
+ b"46054905500552055505580561056405800582058505880591059405a0050106"
+ b"0406060609061006150640064506480651065406600681068406900600080208"
+ b"050808081108140816081908200825082a084108440846084908500852085508"
+ b"580861086408800885089408aa08010904091009120915091809210940094509"
+ b"480951095409600981099009000a110a140a220a280a2a0a500a990a01100410"
+ b"0610091010101210151018102110241026104010421045104810511054105610"
+ b"59106010621065106810811084108610901095109810a110a410001102110511"
+ b"08110a1111111411161119112011221125112811411144114611491150115211"
+ b"5511581161116411801182118511881191119411011204120912101215122112"
+ b"2412401245125112541281128412901200140214051408141114141416141914"
+ b"2014251428144114441446144914501452145514581461146414801482148514"
+ b"881491149414a014011504150615091510151215151518152115241540154215"
+ b"4515481551155415601581158415901500160516081611161416201641164416"
+ b"50168016aa160118041806180918101815181818211840184218451848185118"
+ b"541860188118841800190219051908191119141920194119441950196919a219"
+ b"041a101a401a561a00200220052008201120142016201920202025202a204120"
+ b"4420502052205520642080208a209420aa200121042110211221152121214021"
+ b"4221452151215421602181218421902100220a22222228222a22442250228822"
+ b"8a22a82201240424062409241024152418242124242440244224452448245124"
+ b"5424602481248424902400250525082511251425202541254425502566258025"
+ b"0126042610264026592600280528112814284128442850288a28aa2801290429"
+ b"102995290a2a222a642a882a8a2a014004400640094010401240154018401a40"
+ b"21402440264040404240454048404a4051405440564059406040624065408140"
+ b"8440904095409840a140a4400041024105410841114114411641194120412241"
+ b"2541414144414641494150415241554158416141644180418241854188419141"
+ b"9441a04101420442104212421542184224424042454248425142544260428142"
+ b"844200440244054408440a441144144416441944204422442544284441444444"
+ b"46444944504452445544584461446444804482448544884491449444a0440145"
+ b"0445064509451045124515451845214524454045424545454845514554456045"
+ b"6a4581458445904500460246054608461146144620464146444650468046a546"
+ b"0148044809481048124815481848214824484048424845484848514854486048"
+ b"84489048004902490549084911491449204941494449504980499649014a044a"
+ b"104a404a00500250055008501150145016501950205022502550285041504450"
+ b"4650495050505250555058506150645080508250855088509150945001510451"
+ b"0651095110511251155118512151245140514251455148515151545160518151"
+ b"8451905100520552085211521452205241524452505269528052015404540654"
+ b"0954105412541554185421542454405442544554485451545454605481548454"
+ b"9054005502550555085511551455205541554455505580550156045610562656"
+ b"405600580258055808581158145820584158445850585a588058015904591059"
+ b"4059005a195a855aa85a01600460066010601260156018602160246040604560"
+ b"4860516054606060846090600061026105610861116114612061416144615061"
+ b"806199610462106240625662a162006405640864116414642064416444645064"
+ b"806401650465106540654a656865926500669466016804681068656898680069"
+ b"2a69426aa16a0080028005800880118014801980208025804180448050805280"
+ b"5580588061808080858091809480018104810981108112811581188121812481"
+ b"408142814581488151815481818184819081a981008205820a82118214824182"
+ b"4482508201840484068409841084128415841884218440844284458448845184"
+ b"5484608481848484908400850285058508851185148520854185448550858085"
+ b"8a85018604861086298640860088058811881488418844885088a28801890489"
+ b"40896589228a588a5a8a828aa28a019004900990109012901590189024904090"
+ b"4290459048905190549060908190849090900091059111911491419144915091"
+ b"5a910192049210924092a6920094029405940894119414942094419444945094"
+ b"8094969401950495109540959895a19500964696649601980498109826984098"
+ b"a998009949995299909a00a005a00aa014a022a02aa041a044a050a0a2a0aaa0"
+ b"40a165a102a20aa222a228a22aa282a288a28aa2a8a201a404a410a440a489a4"
+ b"a4a400a519a551a60aa828a8a2a854a986a908aa0aaa20aa22aa28aa88aaaaaa"
+ )
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, rest = np.hsplit(blocks, [2])
+ qs, rest = np.hsplit(rest, [QK_K // 8])
+ signs, rest = np.hsplit(rest, [QK_K // 8])
+ qh, scales = np.hsplit(rest, [QK_K // 32])
+
+ d = d.view(np.float16).astype(np.float32)
+
+ scales = scales.reshape((n_blocks, -1, 1)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 1, 2))
+ scales = (scales & 0x0F).reshape((n_blocks, -1))
+ db = d * (np.float32(0.5) + scales) * np.float32(0.25)
+ db = db.reshape((n_blocks, -1, 1, 1))
+
+ # unpack the sign bits
+ signs = signs.reshape((n_blocks, -1, 1)) >> np.array(
+ [i for i in range(8)], dtype=np.uint8
+ ).reshape((1, 1, 8))
+ signs = signs & np.uint8(0x01)
+ signs = np.where(signs == 0, np.float32(1), np.float32(-1))
+ signs = signs.reshape((n_blocks, -1, 2, 8))
+
+ qh = qh.reshape((n_blocks, -1, 1)) >> np.array(
+ [0, 2, 4, 6], dtype=np.uint8
+ ).reshape((1, 1, 4))
+ qs = qs.astype(np.uint16) | ((qh & 0x03).astype(np.uint16) << 8).reshape(
+ (n_blocks, -1)
+ )
+
+ assert cls.grid is not None
+ grid = np.take_along_axis(cls.grid, qs.reshape((n_blocks, -1, 1, 1)), axis=-2)
+ grid = grid.reshape((n_blocks, -1, 2, 8))
+
+ return (db * grid * signs).reshape((n_blocks, -1))
+
+
+class IQ3_XXS(__Quant, qtype=GGMLQuantizationType.IQ3_XXS):
+ grid_shape = (256, 4)
+ grid_map = (0x04, 0x0C, 0x14, 0x1C, 0x24, 0x2C, 0x34, 0x3E)
+ grid_hex = (
+ b"0000020004001100130017002000220031004200730075000101030110011201"
+ b"2101250130013201410154017001000202020402110220022202310233023702"
+ b"5102570275020103070310031203250370031304370444045704730475040105"
+ b"0705320552053506640610071407160743076107011003101010121021102310"
+ b"3010321034104710501000110211111120112211011203121012121221123012"
+ b"7212001302132013311346136613011405145014201524154615711505162217"
+ b"4017002002201120132020202220262031204220012103210521102112212121"
+ b"3021632167217021002202221122172220222222372240225522012310231423"
+ b"7023742335245324032527254125742501270327162745270130103012302130"
+ b"2330503065307230003102312031313144314631013203321032253252327232"
+ b"1133333330344734723400350635223555351436363663363337603704401740"
+ b"3540374053405740744120423742404260426642074345430444514464442545"
+ b"4345704505471047124730471250415070500051065126515551145232527252"
+ b"0253535310542354275472540255315550562457425724604460466064602161"
+ b"6161176264623063366344640565526533660367216703700570077010703270"
+ b"5270267140711272457252720073157333736073217441740075027524753076"
+ )
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, rest = np.hsplit(blocks, [2])
+ qs, scales = np.hsplit(rest, [QK_K // 4])
+
+ d = d.view(np.float16).astype(np.float32)
+ scales = scales.view(np.uint32)
+
+ db = d * (np.float32(0.5) + (scales >> 28).astype(np.float32)) * np.float32(0.5)
+ db = db.reshape((n_blocks, -1, 1, 1))
+
+ # get the sign indices and unpack the bits
+ signs = scales.reshape((n_blocks, -1, 1)) >> np.array(
+ [0, 7, 14, 21], dtype=np.uint32
+ ).reshape((1, 1, 4))
+ ksigns = np.frombuffer(IQ2_XXS.ksigns, dtype=np.uint8).reshape((1, 1, 1, 128))
+ signs = (signs & np.uint32(0x7F)).reshape((n_blocks, -1, 4, 1))
+ signs = np.take_along_axis(ksigns, signs, axis=-1)
+ signs = signs.reshape((n_blocks, -1, 4, 1)) >> np.array(
+ [i for i in range(8)], dtype=np.uint8
+ ).reshape((1, 1, 1, 8))
+ signs = signs & np.uint8(0x01)
+ signs = np.where(signs == 0, np.float32(1), np.float32(-1))
+ signs = signs.reshape((n_blocks, -1, 4, 8))
+
+ assert cls.grid is not None
+ grid = np.take_along_axis(cls.grid, qs.reshape((n_blocks, -1, 1, 1)), axis=-2)
+ grid = grid.reshape((n_blocks, -1, 4, 8))
+
+ return (db * grid * signs).reshape((n_blocks, -1))
+
+
+class IQ3_S(__Quant, qtype=GGMLQuantizationType.IQ3_S):
+ grid_shape = (512, 4)
+ grid_map = (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F)
+ grid_hex = (
+ b"0000010002000500070010001100120014001600200021002500330040004200"
+ b"4500470051005300600062007100740077000001010102010401100111011501"
+ b"2001230127013101350144016101650172010002010205020702100213021602"
+ b"2102250230023402420245024702510253027002730203031103150320032203"
+ b"3103330336034403500352036703710375030004130417042104240432044004"
+ b"4304510470040205040520052205260533054105450547056605730506061106"
+ b"1306310652067106000702070407200722072607330750075407001001100210"
+ b"0410101011101310151017102010221031103410361054105610611072100011"
+ b"0111031106111011141121113011331141115011521170117611001212121512"
+ b"1712201224123212401243125512601272120113041307131013131321132713"
+ b"3013341341136213701303140514121414143114331442144614501454140115"
+ b"1015131521153015321551152016241627164416461601170317101712172117"
+ b"3517411762177017002001200320052007201020122014201620212023202720"
+ b"3020322041204320452050205220672070207320752000210221102113211721"
+ b"2221252131213421422151210122042207222122232230223722412253225722"
+ b"7122742200230223052311232223242331233323422350236623012407242024"
+ b"2324322435244124722475240425112522253725402553257025002602260726"
+ b"2126552661260527112726273027432750270230113013301530173022303130"
+ b"3330353042304430473051306330713001310331053114312131233140316031"
+ b"7231763100321232203232323432503201331033143321332333273330334133"
+ b"4333473355337333033411341634223431345234603464340135103512352535"
+ b"3235443556357335163641360137033720372237353700400440124020402440"
+ b"2740324041405040704002410741114113412241304135414341514155410142"
+ b"0342104215422142334240425742624270420443114313432043224331433543"
+ b"0044024424443744404471440545074521456245134634466046104715473047"
+ b"4347514702501050145022504050445047505250665074500151035105511251"
+ b"2151325172510052115223523052365253520253075310532753445351536553"
+ b"7353015404542054325446541255265551555355425602570457225711601360"
+ b"1560316033606060006120612761646112623462426255626262706200631463"
+ b"2163406325644364626400650365346560650566406611671367007004700770"
+ b"2070227036704070547062700271117124714371457101720472107216722172"
+ b"3072517202733273357353730174057413742074507422754275027631760077"
+ )
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, rest = np.hsplit(blocks, [2])
+ qs, rest = np.hsplit(rest, [QK_K // 4])
+ qh, rest = np.hsplit(rest, [QK_K // 32])
+ signs, scales = np.hsplit(rest, [QK_K // 8])
+
+ d = d.view(np.float16).astype(np.float32)
+
+ scales = scales.reshape((n_blocks, -1, 1)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 1, 2))
+ scales = (scales & 0x0F).reshape((n_blocks, -1))
+ db = d * (1 + 2 * scales)
+ db = db.reshape((n_blocks, -1, 1, 1))
+
+ # unpack the sign bits
+ signs = signs.reshape((n_blocks, -1, 1)) >> np.array(
+ [i for i in range(8)], dtype=np.uint8
+ ).reshape((1, 1, 8))
+ signs = signs & np.uint8(0x01)
+ signs = np.where(signs == 0, np.float32(1), np.float32(-1))
+ signs = signs.reshape((n_blocks, -1, 4, 8))
+
+ qh = qh.reshape((n_blocks, -1, 1)) >> np.array(
+ [i for i in range(8)], dtype=np.uint8
+ )
+ qh = (qh & 0x01).astype(np.uint16).reshape((n_blocks, -1))
+ qs = qs.astype(np.uint16) | (qh << 8)
+
+ assert cls.grid is not None
+ grid = np.take_along_axis(cls.grid, qs.reshape((n_blocks, -1, 1, 1)), axis=-2)
+ grid = grid.reshape((n_blocks, -1, 4, 8))
+
+ return (db * grid * signs).reshape((n_blocks, -1))
+
+
+class IQ1_S(__Quant, qtype=GGMLQuantizationType.IQ1_S):
+ # iq1s_grid, with each byte packed into 2 bits
+ # -1, 0, 1 <=> 0, 1, 2
+ grid_shape = (2048, 8)
+ grid_map = (-1, 0, 1)
+ grid_hex = (
+ b"00000200050008000a00110015002000220028002a0045005100540056006500"
+ b"8000820088008a009500a000a200a800aa000401050111011401160119011a01"
+ b"2501410146014901520155015a0161016401660168018501910194019601a501"
+ b"0002020208020a0215022002220228022a024502510259026402690280028202"
+ b"88028a02910295029902a002a202a802aa021104140416042504410449045504"
+ b"5a046404650491049904a5040105040505050605150518051a05290540054505"
+ b"4a0550055105540555055605590560056205650568056a058105910595059805"
+ b"9a05a105a405a505a605a9051406190641064406500652065506580660066106"
+ b"6606690685069106940699060008020808080a0815082008220828082a084508"
+ b"5108560865088008820888088a089508a008a208a808aa080509110914091909"
+ b"2409250941095009510955096109640969099109940996099909a509000a020a"
+ b"080a0a0a150a200a220a280a2a0a450a510a590a610a650a800a820a850a880a"
+ b"8a0a950aa00aa20aa80aaa0a1010111014101910241025104110441050105510"
+ b"58106110641065106910911094109610a110a510011104110611091110111211"
+ b"1511181121112411291145114a11501151115211541155115611591160116511"
+ b"841192119511a111a41111121412161225124012461249125212551258125a12"
+ b"641266128512911294129612a512011406140914141415141814191421142614"
+ b"41144514461448144a1451145414551456145914621465146814841489149014"
+ b"94149514981499149a14a114a414a514a914021505150a151115141515151615"
+ b"191520152215251528152a154115441545154615511552155415551556155915"
+ b"5a1561156415651566156915801582158415851588158a159015911594159515"
+ b"961599159a15a015a215a51501160416051606161516161618161a1621162616"
+ b"401642164416451648164a165116551656165816591661166416651668166916"
+ b"6a1686168a1692169516a416a916111816182518411844184618491850185518"
+ b"58185a1860186118641866186918851891189418a5181019121915191a192119"
+ b"25194219441945194819511954195519561959195a19601965196a1989199119"
+ b"921995199819a119a619a919091a161a241a261a441a461a491a501a521a551a"
+ b"581a611a661a691a851a911a961a9a1a0020022008200a201520202022202520"
+ b"28202a20452051205920612065208020822088208a209520a020a220a520a820"
+ b"aa2005211121142119212521422144214921552158215a216121642165216621"
+ b"8521902196219921a521012208220a22112215222022222228222a2245225122"
+ b"562259226522812288228a2291229522a022a222a822aa220524142416241924"
+ b"252444244524462449245224552458245a2466248524912494249924a124a524"
+ b"0925152521252925402545254825512554255525592562256525682589259025"
+ b"9425952598259a25a125a425a625a92505261026122619262526412649265526"
+ b"6026612669268426862690269a260028022808280a2815282028222828282a28"
+ b"45285128542865288028822888288a28a028a228a828aa280929112914291929"
+ b"2529462949295229552961296429662969298529902996299929a429a529002a"
+ b"022a082a0a2a202a222a282a2a2a452a512a562a592a652a802a822a882a8a2a"
+ b"952aa02aa22aa82aaa2a054011401640254049405240554058405a4061406440"
+ b"664094409940a140a6400041014104410641094112411541164118411a412141"
+ b"26412941454148414a41514154415541564159415a41654168416a4181418441"
+ b"8641904192419541a041a141a241054211421442164225424142524255425a42"
+ b"6442694289429442a5420144154419442944454448444a445144544455445644"
+ b"61446244654468446a44814486448944904492449544a044a144a94401450245"
+ b"05450a4511451445154516451945204525452a45414544454545464549455045"
+ b"5145544555455645584559456145644565456645694582458445854588459145"
+ b"94459545964599459a45a545a845aa450146054609461446154618461a462146"
+ b"2446294640464246454648465046514652465546564659466246654668468146"
+ b"85468a4694469546a146a446a6460548114815481a4825484248494850485548"
+ b"5848614864486648694885489148944896489948a5480149054906490a491049"
+ b"144915491849214924492649404945494a495149524954495549564959496049"
+ b"6249654966496a49864989499249954996499849a149a449a649a949164a444a"
+ b"464a494a554a584a5a4a644a694a944aa54a0150045005500650095012501550"
+ b"1a50215024502950405045504850515054505550565059506550685086508950"
+ b"95509850a050a150a650a9500551085109510a51115114511551165118511951"
+ b"20512551265128512a5141514451455146514951505151515251545155515651"
+ b"585159515a51615164516551665169518251855191519451955196519951a051"
+ b"a551aa5101520652125215521a5221522452425245524a525152545255525652"
+ b"595262526552855290529252955299529a52a452045405541154145415541654"
+ b"185419542154255428542a54415444544554465449544a545054515454545554"
+ b"5654585459545a54615462546454655466546954805488548a54915494549554"
+ b"96549954a154a454a554aa540155025504550555065509551055115512551455"
+ b"1555165519551a55215524552555265529554055415542554455455546554855"
+ b"4955505551555255545555555655585559555a55605561556455655566556855"
+ b"69556a5581558455855589558a559055915594559555965598559955a155a455"
+ b"a555a655a9550056015602560456065608560956115614561556185619562056"
+ b"2156225624562556265628562956415645564656485649564a56505651565256"
+ b"545655565656585659565a566156645665566956825685568656885689568a56"
+ b"915695569a56a256a556a656a856a95604580558065809581058155818582158"
+ b"2a58455848584a58515854585558565858585958605862586458655882588958"
+ b"9058925895589858a158a9580159025905590a59115914591559165919592559"
+ b"41594459455946594959505951595259545955595659585959595a5961596459"
+ b"655966596959815985598959915994599559965998599959a559045a085a155a"
+ b"1a5a205a255a265a295a455a485a495a515a555a565a585a595a625a655a685a"
+ b"6a5a815a8a5a925a955a965a985a9a5aa15a0560146016601960256044605060"
+ b"5560566058605a60616064606660696081609660a56001610461066109611261"
+ b"15612161226126612961456149615161556156615961656166616a6184618a61"
+ b"92619561a161a661a96111621662196240624162466255625662586260628562"
+ b"91629662a56211641264156416641a6421642664296440644264456448644a64"
+ b"516454645564566459645a646064626465648464856489649064926494649564"
+ b"966498649a64a164a464a964056508650a651165156516651965446545654665"
+ b"496550655165546555655665596561656465656566656965866589658a659165"
+ b"9565966599659a65a265a565a665a86502660966156620662666286629664066"
+ b"456648664a66516654665566566658665a666066656668668066826685668a66"
+ b"9466966698669966a066a466a666aa661668196825684168526855685a686168"
+ b"6968856891689868a66801690469106915692169246926692969406941694569"
+ b"4669486951695469556956695969606965696a69826984698a699569a169a469"
+ b"a569a969116a166a186a416a446a496a506a556a586a5a6a646a656a696a866a"
+ b"946a986a9a6aa66a0080028008800a802080228028802a804580508051805480"
+ b"5680598065808080828088808a809580a080a280a880aa800581118114811681"
+ b"1981258141814481498150815281558156815881598164816681698185818981"
+ b"948196819981a5810082028208820a8215822082228228822a82518254825982"
+ b"65828082828288828a829582a082a282a882aa82148419844184448451845584"
+ b"5a846184648469849484998401850985128515851a8526852985408541854585"
+ b"4885518554855585568559855a856585668568856a8581858485868589859085"
+ b"928595859885a68511861686198625864186448649864a865086558659865a86"
+ b"618666866a86858691869a86a4860088028808880a8815882088228828882a88"
+ b"41884588518854885988658869888088828888888a889588a088a288a888aa88"
+ b"05890689118914891689258941894489468949895089528955895a8961896489"
+ b"858996899989a589008a028a088a0a8a158a208a228a288a2a8a458a518a548a"
+ b"568a808a828a888a8a8a958aa08aa28aa88aaa8a059011901690189019902590"
+ b"419046904990559058905a9069906a9085909190949096909990a59001910491"
+ b"069109911091159118911a912191249126912991409145915091519154915591"
+ b"569159916291659184918691929195919891a191a491a691a991059211921492"
+ b"19922592449246924992509252925592589266926992859294929692a9920194"
+ b"04940694109415941894269440944a9451945494559456945894599460946194"
+ b"62946594849486949294949495949894a194a9940095059508950a9510951195"
+ b"14951595169519952195259529952a9541954495459546954995509551955295"
+ b"549555955695589559955a956195649565956695699581958595889591959295"
+ b"94959595969599959a95a095a295a595a895aa95019604961096159619962096"
+ b"2696299645964896499651965296559656965996659668968296849689968a96"
+ b"929694969596a496a696a9960598169819982598419846985098529855985698"
+ b"5a98649865988598919896989998a59804990699099910991299159918991a99"
+ b"209921992499269940994299459948994a995199549955995699599962996599"
+ b"66996a99819984999099929995999a99a199a699059a159a259a449a469a499a"
+ b"509a559a589a619a859a919a949a959a969a00a002a008a00aa015a020a022a0"
+ b"28a02aa045a051a054a056a059a080a082a088a08aa095a0a0a0a2a0a8a0aaa0"
+ b"05a109a111a114a116a119a11aa146a149a151a155a158a15aa161a164a185a1"
+ b"90a192a196a199a102a208a20aa210a219a222a228a22aa245a251a256a259a2"
+ b"65a280a282a288a28aa295a2a0a2a2a2a8a2aaa219a425a441a444a450a454a4"
+ b"55a458a45aa461a465a466a468a469a485a406a509a510a512a515a518a526a5"
+ b"29a542a545a551a554a555a556a559a565a56aa581a584a585a586a589a592a5"
+ b"95a598a505a611a616a61aa621a625a644a646a64aa652a655a656a658a660a6"
+ b"62a686a690a695a696a699a6a1a6a4a6a6a600a802a808a80aa820a822a828a8"
+ b"2aa851a854a856a859a880a882a888a88aa895a8a0a8a2a8a8a8aaa805a914a9"
+ b"19a921a925a941a950a955a95aa961a966a969a990a996a900aa02aa08aa0aaa"
+ b"20aa22aa28aa2aaa51aa54aa56aa80aa82aa88aa8aaa95aaa0aaa2aaa8aaaaaa"
+ )
+
+ delta = np.float32(0.125)
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, rest = np.hsplit(blocks, [2])
+ qs, qh = np.hsplit(rest, [QK_K // 8])
+
+ d = d.view(np.float16).astype(np.float32)
+ qh = qh.view(np.uint16)
+
+ dl = d * (2 * ((qh >> 12) & 7) + 1)
+ dl = dl.reshape((n_blocks, -1, 1, 1))
+ delta = np.where((qh & np.uint16(0x8000)) == 0, cls.delta, -cls.delta)
+ delta = delta.reshape((n_blocks, -1, 1, 1))
+
+ qh = qh.reshape((n_blocks, -1, 1)) >> np.array(
+ [0, 3, 6, 9], dtype=np.uint16
+ ).reshape((1, 1, 4))
+ qs = qs.astype(np.uint16) | ((qh & 7) << 8).reshape((n_blocks, -1))
+
+ assert cls.grid is not None
+ grid = np.take_along_axis(cls.grid, qs.reshape((n_blocks, -1, 1, 1)), axis=-2)
+ grid = grid.reshape((n_blocks, -1, 4, 8))
+
+ return (dl * (grid + delta)).reshape((n_blocks, -1))
+
+
+class IQ1_M(__Quant, qtype=GGMLQuantizationType.IQ1_M):
+ grid_shape = IQ1_S.grid_shape
+ grid_map = IQ1_S.grid_map
+ grid_hex = IQ1_S.grid_hex
+
+ delta = IQ1_S.delta
+
+ # Okay *this* type is weird. It's the only one which stores the f16 scales in multiple parts.
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ qs, rest = np.hsplit(blocks, [QK_K // 8])
+ qh, scales = np.hsplit(rest, [QK_K // 16])
+
+ # The f16 scale is packed across multiple bytes
+ scales = scales.view(np.uint16)
+ d = (scales.reshape((n_blocks, 4)) & np.uint16(0xF000)) >> np.array(
+ [12, 8, 4, 0], dtype=np.uint16
+ ).reshape((1, 4))
+ d = d[..., 0] | d[..., 1] | d[..., 2] | d[..., 3]
+ d = d.view(np.float16).astype(np.float32).reshape((n_blocks, 1))
+
+ scales = scales.reshape(n_blocks, -1, 1) >> np.array(
+ [0, 3, 6, 9], dtype=np.uint16
+ ).reshape((1, 1, 4))
+ scales = (scales & 0x07).reshape((n_blocks, -1))
+ dl = d * (2 * scales + 1)
+ dl = dl.reshape((n_blocks, -1, 2, 1, 1))
+
+ qh = qh.reshape((n_blocks, -1, 1)) >> np.array([0, 4], dtype=np.uint8).reshape(
+ (1, 1, 2)
+ )
+ qs = qs.astype(np.uint16) | ((qh & 0x07).astype(np.uint16) << 8).reshape(
+ (n_blocks, -1)
+ )
+
+ delta = np.where(qh & 0x08 == 0, cls.delta, -cls.delta)
+ delta = delta.reshape((n_blocks, -1, 2, 2, 1))
+
+ assert cls.grid is not None
+ grid = np.take_along_axis(cls.grid, qs.reshape((n_blocks, -1, 1, 1)), axis=-2)
+ grid = grid.reshape((n_blocks, -1, 2, 2, 8))
+
+ return (dl * (grid + delta)).reshape((n_blocks, -1))
+
+
+class IQ4_NL(__Quant, qtype=GGMLQuantizationType.IQ4_NL):
+ kvalues = (-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113)
+
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, qs = np.hsplit(blocks, [2])
+
+ d = d.view(np.float16).astype(np.float32)
+
+ qs = qs.reshape((n_blocks, -1, 1, cls.block_size // 2)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 1, 2, 1))
+
+ qs = (qs & np.uint8(0x0F)).reshape((n_blocks, -1, 1))
+
+ kvalues = np.array(cls.kvalues, dtype=np.int8).reshape(1, 1, 16)
+ qs = (
+ np.take_along_axis(kvalues, qs, axis=-1)
+ .astype(np.float32)
+ .reshape((n_blocks, -1))
+ )
+
+ return d * qs
+
+
+class IQ4_XS(__Quant, qtype=GGMLQuantizationType.IQ4_XS):
+ @classmethod
+ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
+ n_blocks = blocks.shape[0]
+
+ d, rest = np.hsplit(blocks, [2])
+ scales_h, rest = np.hsplit(rest, [2])
+ scales_l, qs = np.hsplit(rest, [QK_K // 64])
+
+ d = d.view(np.float16).astype(np.float32)
+ scales_h = scales_h.view(np.uint16)
+
+ scales_l = scales_l.reshape((n_blocks, -1, 1)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 1, 2))
+ scales_h = scales_h.reshape((n_blocks, 1, -1)) >> np.array(
+ [2 * i for i in range(QK_K // 32)], dtype=np.uint16
+ ).reshape((1, -1, 1))
+ scales_l = scales_l.reshape((n_blocks, -1)) & np.uint8(0x0F)
+ scales_h = scales_h.reshape((n_blocks, -1)).astype(np.uint8) & np.uint8(0x03)
+
+ scales = (scales_l | (scales_h << np.uint8(4))).astype(np.int8) - np.int8(32)
+ dl = (d * scales.astype(np.float32)).reshape((n_blocks, -1, 1))
+
+ qs = qs.reshape((n_blocks, -1, 1, 16)) >> np.array(
+ [0, 4], dtype=np.uint8
+ ).reshape((1, 1, 2, 1))
+ qs = qs.reshape((n_blocks, -1, 32, 1)) & np.uint8(0x0F)
+
+ kvalues = np.array(IQ4_NL.kvalues, dtype=np.int8).reshape((1, 1, 1, -1))
+ qs = (
+ np.take_along_axis(kvalues, qs, axis=-1)
+ .astype(np.float32)
+ .reshape((n_blocks, -1, 32))
+ )
+
+ return (dl * qs).reshape((n_blocks, -1))
diff --git a/modules_forge/packages/gguf/quick_4bits_ops.py b/modules_forge/packages/gguf/quick_4bits_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..e16b77b91c1500400572b3e873f39d2cb0f6a445
--- /dev/null
+++ b/modules_forge/packages/gguf/quick_4bits_ops.py
@@ -0,0 +1,95 @@
+# By Forge
+
+
+import torch
+
+
+def native_unpack_4x4bits_in_1x16bits_to_4x8bits_in_1x32bits(x):
+ x = x.view(torch.uint8).view(x.size(0), -1)
+ unpacked = torch.stack([x & 15, x >> 4], dim=-1)
+ reshaped = unpacked.view(x.size(0), -1)
+ reshaped = reshaped.view(torch.int8) - 8
+ return reshaped.view(torch.int32)
+
+
+def native_unpack_4x4bits_in_1x16bits_to_4x8bits_in_1x32bits_u(x):
+ x = x.view(torch.uint8).view(x.size(0), -1)
+ unpacked = torch.stack([x & 15, x >> 4], dim=-1)
+ reshaped = unpacked.view(x.size(0), -1)
+ return reshaped.view(torch.int32)
+
+
+disable_all_optimizations = False
+
+if not hasattr(torch, "uint16"):
+ disable_all_optimizations = True
+
+if disable_all_optimizations:
+ print(
+ "You are using PyTorch below version 2.3. Some optimizations will be disabled."
+ )
+
+if not disable_all_optimizations:
+ native_4bits_lookup_table = (
+ native_unpack_4x4bits_in_1x16bits_to_4x8bits_in_1x32bits(
+ torch.arange(start=0, end=256 * 256, dtype=torch.long).to(torch.uint16)
+ )[:, 0]
+ )
+ native_4bits_lookup_table_u = (
+ native_unpack_4x4bits_in_1x16bits_to_4x8bits_in_1x32bits_u(
+ torch.arange(start=0, end=256 * 256, dtype=torch.long).to(torch.uint16)
+ )[:, 0]
+ )
+
+
+def quick_unpack_4bits(x):
+ if disable_all_optimizations:
+ return (
+ torch.stack([x & 15, x >> 4], dim=-1).view(x.size(0), -1).view(torch.int8)
+ - 8
+ )
+
+ global native_4bits_lookup_table
+
+ s0 = x.size(0)
+ x = x.view(torch.uint16)
+
+ if native_4bits_lookup_table.device != x.device:
+ native_4bits_lookup_table = native_4bits_lookup_table.to(device=x.device)
+
+ y = torch.index_select(
+ input=native_4bits_lookup_table, dim=0, index=x.to(dtype=torch.int32).flatten()
+ )
+ y = y.view(torch.int8)
+ y = y.view(s0, -1)
+
+ return y
+
+
+def quick_unpack_4bits_u(x):
+ if disable_all_optimizations:
+ return torch.stack([x & 15, x >> 4], dim=-1).view(x.size(0), -1)
+
+ global native_4bits_lookup_table_u
+
+ s0 = x.size(0)
+ x = x.view(torch.uint16)
+
+ if native_4bits_lookup_table_u.device != x.device:
+ native_4bits_lookup_table_u = native_4bits_lookup_table_u.to(device=x.device)
+
+ y = torch.index_select(
+ input=native_4bits_lookup_table_u,
+ dim=0,
+ index=x.to(dtype=torch.int32).flatten(),
+ )
+ y = y.view(torch.uint8)
+ y = y.view(s0, -1)
+
+ return y
+
+
+def change_4bits_order(x):
+ y = torch.stack([x & 15, x >> 4], dim=-2).view(x.size(0), -1)
+ z = y[:, ::2] | (y[:, 1::2] << 4)
+ return z
diff --git a/modules_forge/packages/gguf/tensor_mapping.py b/modules_forge/packages/gguf/tensor_mapping.py
new file mode 100644
index 0000000000000000000000000000000000000000..5389d5fbf14e94eadb75319783188363e11afd53
--- /dev/null
+++ b/modules_forge/packages/gguf/tensor_mapping.py
@@ -0,0 +1,540 @@
+from __future__ import annotations
+
+from typing import Sequence
+
+from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES
+
+
+class TensorNameMap:
+ mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
+ # Token embeddings
+ MODEL_TENSOR.TOKEN_EMBD: (
+ "gpt_neox.embed_in", # gptneox
+ "transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais
+ "transformer.word_embeddings", # falcon
+ "word_embeddings", # bloom
+ "model.embed_tokens", # llama-hf
+ "tok_embeddings", # llama-pth
+ "embeddings.word_embeddings", # bert nomic-bert
+ "language_model.embedding.word_embeddings", # persimmon
+ "wte", # gpt2
+ "transformer.embd.wte", # phi2
+ "model.tok_embeddings", # internlm2
+ "model.embedding", # mamba-qbert
+ "backbone.embedding", # mamba
+ "backbone.embeddings", # mamba-hf
+ "transformer.in_out_embed", # Grok
+ "embedding.word_embeddings", # chatglm
+ "transformer.token_embeddings", # openelm
+ "shared", # t5
+ ),
+ # Token type embeddings
+ MODEL_TENSOR.TOKEN_TYPES: (
+ "embeddings.token_type_embeddings", # bert nomic-bert
+ ),
+ # Normalization of token embeddings
+ MODEL_TENSOR.TOKEN_EMBD_NORM: (
+ "word_embeddings_layernorm", # bloom
+ "embeddings.LayerNorm", # bert
+ "emb_ln", # nomic-bert
+ "transformer.norm", # openelm
+ ),
+ # Position embeddings
+ MODEL_TENSOR.POS_EMBD: (
+ "transformer.wpe", # gpt2
+ "embeddings.position_embeddings", # bert
+ "wpe", # gpt2
+ ),
+ # Output
+ MODEL_TENSOR.OUTPUT: (
+ "embed_out", # gptneox
+ "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais
+ "output", # llama-pth bloom internlm2
+ "word_embeddings_for_head", # persimmon
+ "lm_head.linear", # phi2
+ "output_layer", # chatglm
+ ),
+ # Output norm
+ MODEL_TENSOR.OUTPUT_NORM: (
+ "gpt_neox.final_layer_norm", # gptneox
+ "transformer.ln_f", # gpt2 gpt-j falcon jais
+ "model.norm", # llama-hf baichuan internlm2
+ "norm", # llama-pth
+ "transformer.norm_f", # mpt dbrx
+ "ln_f", # refact bloom qwen gpt2
+ "language_model.encoder.final_layernorm", # persimmon
+ "model.final_layernorm", # persimmon
+ "lm_head.ln", # phi2
+ "model.norm_f", # mamba-qbert
+ "backbone.norm_f", # mamba
+ "transformer.rms_norm", # Grok
+ "encoder.final_layernorm", # chatglm
+ "transformer.norm", # openelm
+ ),
+ # Rope frequencies
+ MODEL_TENSOR.ROPE_FREQS: (
+ "rope.freqs", # llama-pth
+ "rotary_pos_emb.inv_freq", # chatglm
+ ),
+ }
+
+ block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
+ # Attention norm
+ MODEL_TENSOR.ATTN_NORM: (
+ "gpt_neox.layers.{bid}.input_layernorm", # gptneox
+ "transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen jais
+ "transformer.blocks.{bid}.norm_1", # mpt
+ "transformer.h.{bid}.input_layernorm", # falcon7b
+ "h.{bid}.input_layernorm", # bloom
+ "transformer.h.{bid}.ln_mlp", # falcon40b
+ "model.layers.{bid}.input_layernorm", # llama-hf
+ "layers.{bid}.attention_norm", # llama-pth
+ "language_model.encoder.layers.{bid}.input_layernorm", # persimmon
+ "model.layers.{bid}.ln1", # yi
+ "h.{bid}.ln_1", # gpt2
+ "transformer.h.{bid}.ln", # phi2
+ "model.layers.layers.{bid}.norm", # plamo
+ "model.layers.{bid}.attention_norm", # internlm2
+ "model.layers.{bid}.norm", # mamba-qbert
+ "backbone.layers.{bid}.norm", # mamba
+ "transformer.decoder_layer.{bid}.rms_norm", # Grok
+ "transformer.blocks.{bid}.norm_attn_norm.norm_1", # dbrx
+ "encoder.layers.{bid}.input_layernorm", # chatglm
+ "transformer.layers.{bid}.attn_norm", # openelm
+ ),
+ # Attention norm 2
+ MODEL_TENSOR.ATTN_NORM_2: (
+ "transformer.h.{bid}.ln_attn", # falcon40b
+ "encoder.layer.{bid}.layer_norm_1", # jina-v2-code
+ ),
+ # Attention query-key-value
+ MODEL_TENSOR.ATTN_QKV: (
+ "gpt_neox.layers.{bid}.attention.query_key_value", # gptneox
+ "transformer.h.{bid}.attn.c_attn", # gpt2 qwen jais
+ "transformer.blocks.{bid}.attn.Wqkv", # mpt
+ "transformer.blocks.{bid}.norm_attn_norm.attn.Wqkv", # dbrx
+ "transformer.h.{bid}.self_attention.query_key_value", # falcon
+ "h.{bid}.self_attention.query_key_value", # bloom
+ "language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon
+ "model.layers.{bid}.self_attn.query_key_value", # persimmon
+ "h.{bid}.attn.c_attn", # gpt2
+ "transformer.h.{bid}.mixer.Wqkv", # phi2
+ "encoder.layers.{bid}.attn.Wqkv", # nomic-bert
+ "model.layers.{bid}.self_attn.qkv_proj", # phi3
+ "encoder.layers.{bid}.self_attention.query_key_value", # chatglm
+ "transformer.layers.{bid}.attn.qkv_proj", # openelm
+ ),
+ # Attention query
+ MODEL_TENSOR.ATTN_Q: (
+ "model.layers.{bid}.self_attn.q_proj", # llama-hf
+ "layers.{bid}.attention.wq", # llama-pth
+ "encoder.layer.{bid}.attention.self.query", # bert
+ "transformer.h.{bid}.attn.q_proj", # gpt-j
+ "model.layers.layers.{bid}.self_attn.q_proj", # plamo
+ "model.layers.{bid}.attention.wq", # internlm2
+ "transformer.decoder_layer.{bid}.multi_head_attention.query", # Grok
+ ),
+ # Attention key
+ MODEL_TENSOR.ATTN_K: (
+ "model.layers.{bid}.self_attn.k_proj", # llama-hf
+ "layers.{bid}.attention.wk", # llama-pth
+ "encoder.layer.{bid}.attention.self.key", # bert
+ "transformer.h.{bid}.attn.k_proj", # gpt-j
+ "transformer.h.{bid}.attn.k", # refact
+ "model.layers.layers.{bid}.self_attn.k_proj", # plamo
+ "model.layers.{bid}.attention.wk", # internlm2
+ "transformer.decoder_layer.{bid}.multi_head_attention.key", # Grok
+ ),
+ # Attention value
+ MODEL_TENSOR.ATTN_V: (
+ "model.layers.{bid}.self_attn.v_proj", # llama-hf
+ "layers.{bid}.attention.wv", # llama-pth
+ "encoder.layer.{bid}.attention.self.value", # bert
+ "transformer.h.{bid}.attn.v_proj", # gpt-j
+ "transformer.h.{bid}.attn.v", # refact
+ "model.layers.layers.{bid}.self_attn.v_proj", # plamo
+ "model.layers.{bid}.attention.wv", # internlm2
+ "transformer.decoder_layer.{bid}.multi_head_attention.value", # Grok
+ ),
+ # Attention output
+ MODEL_TENSOR.ATTN_OUT: (
+ "gpt_neox.layers.{bid}.attention.dense", # gptneox
+ "transformer.h.{bid}.attn.c_proj", # gpt2 refact qwen jais
+ "transformer.blocks.{bid}.attn.out_proj", # mpt
+ "transformer.h.{bid}.self_attention.dense", # falcon
+ "h.{bid}.self_attention.dense", # bloom
+ "model.layers.{bid}.self_attn.o_proj", # llama-hf
+ "layers.{bid}.attention.wo", # llama-pth
+ "encoder.layer.{bid}.attention.output.dense", # bert
+ "transformer.h.{bid}.attn.out_proj", # gpt-j
+ "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon
+ "model.layers.{bid}.self_attn.dense", # persimmon
+ "h.{bid}.attn.c_proj", # gpt2
+ "transformer.h.{bid}.mixer.out_proj", # phi2
+ "model.layers.layers.{bid}.self_attn.o_proj", # plamo
+ "model.layers.{bid}.attention.wo", # internlm2
+ "encoder.layers.{bid}.attn.out_proj", # nomic-bert
+ "transformer.decoder_layer.{bid}.multi_head_attention.linear", # Grok
+ "transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx
+ "encoder.layers.{bid}.self_attention.dense", # chatglm
+ "transformer.layers.{bid}.attn.out_proj", # openelm
+ ),
+ # Attention output norm
+ MODEL_TENSOR.ATTN_OUT_NORM: (
+ "encoder.layer.{bid}.attention.output.LayerNorm", # bert
+ "encoder.layers.{bid}.norm1", # nomic-bert
+ "transformer.decoder_layer.{bid}.rms_norm_1", # Grok
+ "transformer.blocks.{bid}.norm_attn_norm.norm_2", # dbrx
+ ),
+ MODEL_TENSOR.ATTN_POST_NORM: (
+ "model.layers.{bid}.post_attention_layernorm", # gemma2
+ ),
+ # Rotary embeddings
+ MODEL_TENSOR.ATTN_ROT_EMBD: (
+ "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
+ "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
+ "model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo
+ "transformer.h.{bid}.attn.rotary_emb.inv_freq", # codeshell
+ ),
+ # Feed-forward norm
+ MODEL_TENSOR.FFN_NORM: (
+ "gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox
+ "transformer.h.{bid}.ln_2", # gpt2 refact qwen jais
+ "h.{bid}.post_attention_layernorm", # bloom
+ "transformer.blocks.{bid}.norm_2", # mpt
+ "model.layers.{bid}.post_attention_layernorm", # llama-hf
+ "layers.{bid}.ffn_norm", # llama-pth
+ "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
+ "model.layers.{bid}.ln2", # yi
+ "h.{bid}.ln_2", # gpt2
+ "model.layers.{bid}.ffn_norm", # internlm2
+ "transformer.decoder_layer.{bid}.rms_norm_2", # Grok
+ "encoder.layers.{bid}.post_attention_layernorm", # chatglm
+ "transformer.layers.{bid}.ffn_norm", # openelm
+ ),
+ # Post feed-forward norm
+ MODEL_TENSOR.FFN_PRE_NORM: (
+ "model.layers.{bid}.pre_feedforward_layernorm", # gemma2
+ ),
+ # Post feed-forward norm
+ MODEL_TENSOR.FFN_POST_NORM: (
+ "model.layers.{bid}.post_feedforward_layernorm", # gemma2
+ ),
+ MODEL_TENSOR.FFN_GATE_INP: (
+ "layers.{bid}.feed_forward.gate", # mixtral
+ "model.layers.{bid}.block_sparse_moe.gate", # mixtral
+ "model.layers.{bid}.mlp.gate", # qwen2moe
+ "transformer.decoder_layer.{bid}.router", # Grok
+ "transformer.blocks.{bid}.ffn.router.layer", # dbrx
+ ),
+ MODEL_TENSOR.FFN_GATE_INP_SHEXP: (
+ "model.layers.{bid}.mlp.shared_expert_gate", # qwen2moe
+ ),
+ # Feed-forward up
+ MODEL_TENSOR.FFN_UP: (
+ "gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox
+ "transformer.h.{bid}.mlp.c_fc", # gpt2 jais
+ "transformer.blocks.{bid}.ffn.up_proj", # mpt
+ "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon
+ "h.{bid}.mlp.dense_h_to_4h", # bloom
+ "model.layers.{bid}.mlp.up_proj", # llama-hf refact
+ "layers.{bid}.feed_forward.w3", # llama-pth
+ "encoder.layer.{bid}.intermediate.dense", # bert
+ "transformer.h.{bid}.mlp.fc_in", # gpt-j
+ "transformer.h.{bid}.mlp.linear_3", # refact
+ "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
+ "model.layers.{bid}.mlp.dense_h_to_4h", # persimmon
+ "transformer.h.{bid}.mlp.w1", # qwen
+ "h.{bid}.mlp.c_fc", # gpt2
+ "transformer.h.{bid}.mlp.fc1", # phi2
+ "model.layers.{bid}.mlp.fc1", # phi2
+ "model.layers.{bid}.mlp.gate_up_proj", # phi3
+ "model.layers.layers.{bid}.mlp.up_proj", # plamo
+ "model.layers.{bid}.feed_forward.w3", # internlm2
+ "encoder.layers.{bid}.mlp.fc11", # nomic-bert
+ "model.layers.{bid}.mlp.c_fc", # starcoder2
+ "encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2
+ "model.layers.{bid}.residual_mlp.w3", # arctic
+ "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm
+ ),
+ MODEL_TENSOR.FFN_UP_EXP: (
+ "layers.{bid}.feed_forward.experts.w3", # mixtral (merged)
+ "transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged)
+ "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx
+ "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe (merged)
+ ),
+ MODEL_TENSOR.FFN_UP_SHEXP: (
+ "model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe
+ "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek2
+ ),
+ # AWQ-activation gate
+ MODEL_TENSOR.FFN_ACT: ("transformer.blocks.{bid}.ffn.act",), # mpt
+ # Feed-forward gate
+ MODEL_TENSOR.FFN_GATE: (
+ "model.layers.{bid}.mlp.gate_proj", # llama-hf refact
+ "layers.{bid}.feed_forward.w1", # llama-pth
+ "transformer.h.{bid}.mlp.w2", # qwen
+ "transformer.h.{bid}.mlp.c_fc2", # jais
+ "model.layers.layers.{bid}.mlp.gate_proj", # plamo
+ "model.layers.{bid}.feed_forward.w1", # internlm2
+ "encoder.layers.{bid}.mlp.fc12", # nomic-bert
+ "encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2
+ "transformer.h.{bid}.mlp.linear_1", # refact
+ "model.layers.{bid}.residual_mlp.w1", # arctic
+ ),
+ MODEL_TENSOR.FFN_GATE_EXP: (
+ "layers.{bid}.feed_forward.experts.w1", # mixtral (merged)
+ "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged)
+ "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx
+ "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe (merged)
+ ),
+ MODEL_TENSOR.FFN_GATE_SHEXP: (
+ "model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe
+ "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek2
+ ),
+ # Feed-forward down
+ MODEL_TENSOR.FFN_DOWN: (
+ "gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox
+ "transformer.h.{bid}.mlp.c_proj", # gpt2 refact qwen jais
+ "transformer.blocks.{bid}.ffn.down_proj", # mpt
+ "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon
+ "h.{bid}.mlp.dense_4h_to_h", # bloom
+ "model.layers.{bid}.mlp.down_proj", # llama-hf
+ "layers.{bid}.feed_forward.w2", # llama-pth
+ "encoder.layer.{bid}.output.dense", # bert
+ "transformer.h.{bid}.mlp.fc_out", # gpt-j
+ "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon
+ "model.layers.{bid}.mlp.dense_4h_to_h", # persimmon
+ "h.{bid}.mlp.c_proj", # gpt2
+ "transformer.h.{bid}.mlp.fc2", # phi2
+ "model.layers.{bid}.mlp.fc2", # phi2
+ "model.layers.layers.{bid}.mlp.down_proj", # plamo
+ "model.layers.{bid}.feed_forward.w2", # internlm2
+ "encoder.layers.{bid}.mlp.fc2", # nomic-bert
+ "model.layers.{bid}.mlp.c_proj", # starcoder2
+ "encoder.layer.{bid}.mlp.wo", # jina-bert-v2
+ "transformer.layers.{bid}.ffn.proj_2", # openelm
+ "model.layers.{bid}.residual_mlp.w2", # arctic
+ "encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2
+ "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm
+ ),
+ MODEL_TENSOR.FFN_DOWN_EXP: (
+ "layers.{bid}.feed_forward.experts.w2", # mixtral (merged)
+ "transformer.decoder_layer.{bid}.moe.linear_1", # Grok (merged)
+ "transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx
+ "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe (merged)
+ ),
+ MODEL_TENSOR.FFN_DOWN_SHEXP: (
+ "model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe
+ "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek2
+ ),
+ MODEL_TENSOR.ATTN_Q_NORM: (
+ "language_model.encoder.layers.{bid}.self_attention.q_layernorm",
+ "model.layers.{bid}.self_attn.q_layernorm", # persimmon
+ "model.layers.{bid}.self_attn.q_norm", # cohere
+ "transformer.blocks.{bid}.attn.q_ln", # sea-lion
+ "encoder.layer.{bid}.attention.self.layer_norm_q", # jina-bert-v2
+ "transformer.layers.{bid}.attn.q_norm", # openelm
+ ),
+ MODEL_TENSOR.ATTN_K_NORM: (
+ "language_model.encoder.layers.{bid}.self_attention.k_layernorm",
+ "model.layers.{bid}.self_attn.k_layernorm", # persimmon
+ "model.layers.{bid}.self_attn.k_norm", # cohere
+ "transformer.blocks.{bid}.attn.k_ln", # sea-lion
+ "encoder.layer.{bid}.attention.self.layer_norm_k", # jina-bert-v2
+ "transformer.layers.{bid}.attn.k_norm", # openelm
+ ),
+ MODEL_TENSOR.ROPE_FREQS: (
+ "language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon
+ ),
+ MODEL_TENSOR.LAYER_OUT_NORM: (
+ "encoder.layer.{bid}.output.LayerNorm", # bert
+ "encoder.layers.{bid}.norm2", # nomic-bert
+ "transformer.decoder_layer.{bid}.rms_norm_3", # Grok
+ "encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2
+ "encoder.layer.{bid}.layer_norm_2", # jina-v2-code
+ ),
+ MODEL_TENSOR.SSM_IN: (
+ "model.layers.{bid}.in_proj",
+ "backbone.layers.{bid}.mixer.in_proj",
+ ),
+ MODEL_TENSOR.SSM_CONV1D: (
+ "model.layers.{bid}.conv1d",
+ "backbone.layers.{bid}.mixer.conv1d",
+ ),
+ MODEL_TENSOR.SSM_X: (
+ "model.layers.{bid}.x_proj",
+ "backbone.layers.{bid}.mixer.x_proj",
+ ),
+ MODEL_TENSOR.SSM_DT: (
+ "model.layers.{bid}.dt_proj",
+ "backbone.layers.{bid}.mixer.dt_proj",
+ ),
+ MODEL_TENSOR.SSM_A: (
+ "model.layers.{bid}.A_log",
+ "backbone.layers.{bid}.mixer.A_log",
+ ),
+ MODEL_TENSOR.SSM_D: (
+ "model.layers.{bid}.D",
+ "backbone.layers.{bid}.mixer.D",
+ ),
+ MODEL_TENSOR.SSM_OUT: (
+ "model.layers.{bid}.out_proj",
+ "backbone.layers.{bid}.mixer.out_proj",
+ ),
+ MODEL_TENSOR.ATTN_Q_A: ("model.layers.{bid}.self_attn.q_a_proj",), # deepseek2
+ MODEL_TENSOR.ATTN_Q_B: ("model.layers.{bid}.self_attn.q_b_proj",), # deepseek2
+ MODEL_TENSOR.ATTN_KV_A_MQA: (
+ "model.layers.{bid}.self_attn.kv_a_proj_with_mqa", # deepseek2
+ ),
+ MODEL_TENSOR.ATTN_KV_B: (
+ "model.layers.{bid}.self_attn.kv_b_proj", # deepseek2
+ ),
+ MODEL_TENSOR.ATTN_Q_A_NORM: (
+ "model.layers.{bid}.self_attn.q_a_layernorm", # deepseek2
+ ),
+ MODEL_TENSOR.ATTN_KV_A_NORM: (
+ "model.layers.{bid}.self_attn.kv_a_layernorm", # deepseek2
+ ),
+ MODEL_TENSOR.ATTN_SUB_NORM: (
+ "model.layers.{bid}.self_attn.inner_attn_ln", # bitnet
+ ),
+ MODEL_TENSOR.FFN_SUB_NORM: ("model.layers.{bid}.mlp.ffn_layernorm",), # bitnet
+ MODEL_TENSOR.DEC_ATTN_NORM: ("decoder.block.{bid}.layer.0.layer_norm",), # t5
+ MODEL_TENSOR.DEC_ATTN_Q: ("decoder.block.{bid}.layer.0.SelfAttention.q",), # t5
+ MODEL_TENSOR.DEC_ATTN_K: ("decoder.block.{bid}.layer.0.SelfAttention.k",), # t5
+ MODEL_TENSOR.DEC_ATTN_V: ("decoder.block.{bid}.layer.0.SelfAttention.v",), # t5
+ MODEL_TENSOR.DEC_ATTN_OUT: (
+ "decoder.block.{bid}.layer.0.SelfAttention.o", # t5
+ ),
+ MODEL_TENSOR.DEC_ATTN_REL_B: (
+ "decoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
+ ),
+ MODEL_TENSOR.DEC_CROSS_ATTN_NORM: (
+ "decoder.block.{bid}.layer.1.layer_norm", # t5
+ ),
+ MODEL_TENSOR.DEC_CROSS_ATTN_Q: (
+ "decoder.block.{bid}.layer.1.EncDecAttention.q", # t5
+ ),
+ MODEL_TENSOR.DEC_CROSS_ATTN_K: (
+ "decoder.block.{bid}.layer.1.EncDecAttention.k", # t5
+ ),
+ MODEL_TENSOR.DEC_CROSS_ATTN_V: (
+ "decoder.block.{bid}.layer.1.EncDecAttention.v", # t5
+ ),
+ MODEL_TENSOR.DEC_CROSS_ATTN_OUT: (
+ "decoder.block.{bid}.layer.1.EncDecAttention.o", # t5
+ ),
+ MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: (
+ "decoder.block.{bid}.layer.1.EncDecAttention.relative_attention_bias", # t5
+ ),
+ MODEL_TENSOR.DEC_FFN_NORM: ("decoder.block.{bid}.layer.2.layer_norm",), # t5
+ MODEL_TENSOR.DEC_FFN_GATE: (
+ "decoder.block.{bid}.layer.2.DenseReluDense.wi_0", # flan-t5
+ ),
+ MODEL_TENSOR.DEC_FFN_UP: (
+ "decoder.block.{bid}.layer.2.DenseReluDense.wi", # t5
+ "decoder.block.{bid}.layer.2.DenseReluDense.wi_1", # flan-t5
+ ),
+ MODEL_TENSOR.DEC_FFN_DOWN: (
+ "decoder.block.{bid}.layer.2.DenseReluDense.wo", # t5
+ ),
+ MODEL_TENSOR.DEC_OUTPUT_NORM: ("decoder.final_layer_norm",), # t5
+ MODEL_TENSOR.ENC_ATTN_NORM: ("encoder.block.{bid}.layer.0.layer_norm",), # t5
+ MODEL_TENSOR.ENC_ATTN_Q: ("encoder.block.{bid}.layer.0.SelfAttention.q",), # t5
+ MODEL_TENSOR.ENC_ATTN_K: ("encoder.block.{bid}.layer.0.SelfAttention.k",), # t5
+ MODEL_TENSOR.ENC_ATTN_V: ("encoder.block.{bid}.layer.0.SelfAttention.v",), # t5
+ MODEL_TENSOR.ENC_ATTN_OUT: (
+ "encoder.block.{bid}.layer.0.SelfAttention.o", # t5
+ ),
+ MODEL_TENSOR.ENC_ATTN_REL_B: (
+ "encoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
+ ),
+ MODEL_TENSOR.ENC_FFN_NORM: ("encoder.block.{bid}.layer.1.layer_norm",), # t5
+ MODEL_TENSOR.ENC_FFN_GATE: (
+ "encoder.block.{bid}.layer.1.DenseReluDense.wi_0", # flan-t5
+ ),
+ MODEL_TENSOR.ENC_FFN_UP: (
+ "encoder.block.{bid}.layer.1.DenseReluDense.wi", # t5
+ "encoder.block.{bid}.layer.1.DenseReluDense.wi_1", # flan-t5
+ ),
+ MODEL_TENSOR.ENC_FFN_DOWN: (
+ "encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5
+ ),
+ MODEL_TENSOR.ENC_OUTPUT_NORM: ("encoder.final_layer_norm",), # t5
+ }
+
+ # architecture-specific block mappings
+ arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = {
+ MODEL_ARCH.ARCTIC: {
+ MODEL_TENSOR.FFN_NORM: ("model.layers.{bid}.residual_layernorm",),
+ MODEL_TENSOR.FFN_NORM_EXP: ("model.layers.{bid}.post_attention_layernorm",),
+ },
+ }
+
+ mapping: dict[str, tuple[MODEL_TENSOR, str]]
+
+ def __init__(self, arch: MODEL_ARCH, n_blocks: int):
+ self.mapping = {}
+ for tensor, keys in self.mappings_cfg.items():
+ if tensor not in MODEL_TENSORS[arch]:
+ continue
+ tensor_name = TENSOR_NAMES[tensor]
+ self.mapping[tensor_name] = (tensor, tensor_name)
+ for key in keys:
+ self.mapping[key] = (tensor, tensor_name)
+ if arch in self.arch_block_mappings_cfg:
+ self.block_mappings_cfg.update(self.arch_block_mappings_cfg[arch])
+ for bid in range(n_blocks):
+ for tensor, keys in self.block_mappings_cfg.items():
+ if tensor not in MODEL_TENSORS[arch]:
+ continue
+
+ tensor_name = TENSOR_NAMES[tensor].format(bid=bid)
+ self.mapping[tensor_name] = (tensor, tensor_name)
+ for key in keys:
+ key = key.format(bid=bid)
+ self.mapping[key] = (tensor, tensor_name)
+
+ def get_type_and_name(
+ self, key: str, try_suffixes: Sequence[str] = ()
+ ) -> tuple[MODEL_TENSOR, str] | None:
+ result = self.mapping.get(key)
+ if result is not None:
+ return result
+ for suffix in try_suffixes:
+ if key.endswith(suffix):
+ result = self.mapping.get(key[: -len(suffix)])
+ if result is not None:
+ return result[0], result[1] + suffix
+ return None
+
+ def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None:
+ result = self.get_type_and_name(key, try_suffixes=try_suffixes)
+ if result is None:
+ return None
+ return result[1]
+
+ def get_type(
+ self, key: str, try_suffixes: Sequence[str] = ()
+ ) -> MODEL_TENSOR | None:
+ result = self.get_type_and_name(key, try_suffixes=try_suffixes)
+ if result is None:
+ return None
+ return result[0]
+
+ def __getitem__(self, key: str) -> str:
+ try:
+ return self.mapping[key][1]
+ except KeyError:
+ raise KeyError(key)
+
+ def __contains__(self, key: str) -> bool:
+ return key in self.mapping
+
+ def __repr__(self) -> str:
+ return repr(self.mapping)
+
+
+def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap:
+ return TensorNameMap(arch, n_blocks)
diff --git a/modules_forge/packages/gguf/utility.py b/modules_forge/packages/gguf/utility.py
new file mode 100644
index 0000000000000000000000000000000000000000..e72b904bdee292b1fd6b6cd5cee26f955bb1a6fa
--- /dev/null
+++ b/modules_forge/packages/gguf/utility.py
@@ -0,0 +1,101 @@
+from __future__ import annotations
+
+from typing import Literal
+
+
+def fill_templated_filename(filename: str, output_type: str | None) -> str:
+ # Given a file name fill in any type templates e.g. 'some-model-name.{ftype}.gguf'
+ ftype_lowercase: str = output_type.lower() if output_type is not None else ""
+ ftype_uppercase: str = output_type.upper() if output_type is not None else ""
+ return filename.format(
+ ftype_lowercase,
+ outtype=ftype_lowercase,
+ ftype=ftype_lowercase,
+ OUTTYPE=ftype_uppercase,
+ FTYPE=ftype_uppercase,
+ )
+
+
+def model_weight_count_rounded_notation(
+ model_params_count: int, min_digits: int = 2
+) -> str:
+ if model_params_count > 1e12:
+ # Trillions Of Parameters
+ scaled_model_params = model_params_count * 1e-12
+ scale_suffix = "T"
+ elif model_params_count > 1e9:
+ # Billions Of Parameters
+ scaled_model_params = model_params_count * 1e-9
+ scale_suffix = "B"
+ elif model_params_count > 1e6:
+ # Millions Of Parameters
+ scaled_model_params = model_params_count * 1e-6
+ scale_suffix = "M"
+ else:
+ # Thousands Of Parameters
+ scaled_model_params = model_params_count * 1e-3
+ scale_suffix = "K"
+
+ fix = max(min_digits - len(str(round(scaled_model_params)).lstrip("0")), 0)
+
+ return f"{scaled_model_params:.{fix}f}{scale_suffix}"
+
+
+def size_label(
+ total_params: int, shared_params: int, expert_params: int, expert_count: int
+) -> str:
+
+ if expert_count > 0:
+ pretty_size = model_weight_count_rounded_notation(
+ abs(shared_params) + abs(expert_params), min_digits=2
+ )
+ size_class = f"{expert_count}x{pretty_size}"
+ else:
+ size_class = model_weight_count_rounded_notation(
+ abs(total_params), min_digits=2
+ )
+
+ return size_class
+
+
+def naming_convention(
+ model_name: str | None,
+ base_name: str | None,
+ finetune_string: str | None,
+ version_string: str | None,
+ size_label: str | None,
+ output_type: str | None,
+ model_type: Literal["vocab", "LoRA"] | None = None,
+) -> str:
+ # Reference: https://github.com/ggerganov/ggml/blob/master/docs/gguf.md#gguf-naming-convention
+
+ if base_name is not None:
+ name = base_name.strip().replace(" ", "-").replace("/", "-")
+ elif model_name is not None:
+ name = model_name.strip().replace(" ", "-").replace("/", "-")
+ else:
+ name = "ggml-model"
+
+ parameters = f"-{size_label}" if size_label is not None else ""
+
+ finetune = (
+ f"-{finetune_string.strip().replace(' ', '-')}"
+ if finetune_string is not None
+ else ""
+ )
+
+ version = (
+ f"-{version_string.strip().replace(' ', '-')}"
+ if version_string is not None
+ else ""
+ )
+
+ encoding = (
+ f"-{output_type.strip().replace(' ', '-').upper()}"
+ if output_type is not None
+ else ""
+ )
+
+ kind = f"-{model_type.strip().replace(' ', '-')}" if model_type is not None else ""
+
+ return f"{name}{parameters}{finetune}{version}{encoding}{kind}"
diff --git a/modules_forge/packages/huggingface_guess/LICENSE b/modules_forge/packages/huggingface_guess/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..810fce6e9bf2aa10265b85614db5ac65941ecf81
--- /dev/null
+++ b/modules_forge/packages/huggingface_guess/LICENSE
@@ -0,0 +1,621 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
diff --git a/modules_forge/packages/huggingface_guess/README.md b/modules_forge/packages/huggingface_guess/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d337b73dc6aabc09cd2626276720b016cbcaa855
--- /dev/null
+++ b/modules_forge/packages/huggingface_guess/README.md
@@ -0,0 +1,28 @@
+# HuggingFace Guess
+A simple tool to guess an HuggingFace repo URL from a state dict.
+
+> The main model detection logics are extracted from **Diffusers** and stolen from **ComfyUI**.
+
+
+
+- This repo does almost the same thing as the following code, but a bit stronger and more robust.
+
+```py
+from diffusers.loaders.single_file_utils import fetch_diffusers_config
+```
+
+- The following code will print `runwayml/stable-diffusion-v1-5`
+
+```py
+import safetensors.torch as sf
+import huggingface_guess
+
+
+state_dict = sf.load_file("./realisticVisionV51_v51VAE.safetensors")
+repo_name = huggingface_guess.guess_repo_name(state_dict)
+print(repo_name)
+```
+
+
+
+Then you can download (or prefetch configs) from HuggingFace to instantiate models and load weights.
diff --git a/modules_forge/packages/huggingface_guess/__init__.py b/modules_forge/packages/huggingface_guess/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8facf059e25e95acace2e3fc58ad965f490fe760
--- /dev/null
+++ b/modules_forge/packages/huggingface_guess/__init__.py
@@ -0,0 +1,38 @@
+"""
+Copyright (C) 2024 lllyasviel
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see https://www.gnu.org/licenses/
+"""
+
+from .detection import model_config_from_unet, unet_prefix_from_state_dict
+
+
+def guess(state_dict):
+ unet_key_prefix = unet_prefix_from_state_dict(state_dict)
+ result = model_config_from_unet(
+ state_dict, unet_key_prefix, use_base_if_no_match=False
+ )
+ result.unet_key_prefix = [unet_key_prefix]
+ if "image_model" in result.unet_config:
+ del result.unet_config["image_model"]
+ if "audio_model" in result.unet_config:
+ del result.unet_config["audio_model"]
+ return result
+
+
+def guess_repo_name(state_dict):
+ config = guess(state_dict)
+ assert config is not None
+ repo_id = config.huggingface_repo
+ return repo_id
diff --git a/modules_forge/packages/huggingface_guess/detection.py b/modules_forge/packages/huggingface_guess/detection.py
new file mode 100644
index 0000000000000000000000000000000000000000..6bea770271a4c9256f05d37a8f350b61107d9d93
--- /dev/null
+++ b/modules_forge/packages/huggingface_guess/detection.py
@@ -0,0 +1,560 @@
+# reference: https://github.com/comfyanonymous/ComfyUI/blob/v0.3.52/comfy/model_detection.py
+
+import logging
+
+from . import model_list
+
+
+def count_blocks(state_dict_keys, prefix_string):
+ count = 0
+ while True:
+ c = False
+ for k in state_dict_keys:
+ if k.startswith(prefix_string.format(count)):
+ c = True
+ break
+ if c == False:
+ break
+ count += 1
+ return count
+
+
+def calculate_transformer_depth(prefix, state_dict_keys, state_dict):
+ context_dim = None
+ use_linear_in_transformer = False
+
+ transformer_prefix = prefix + "1.transformer_blocks."
+ transformer_keys = sorted(list(filter(lambda a: a.startswith(transformer_prefix), state_dict_keys)))
+ if len(transformer_keys) > 0:
+ last_transformer_depth = count_blocks(state_dict_keys, transformer_prefix + "{}")
+ context_dim = state_dict["{}0.attn2.to_k.weight".format(transformer_prefix)].shape[1]
+ use_linear_in_transformer = len(state_dict["{}1.proj_in.weight".format(prefix)].shape) == 2
+ time_stack = "{}1.time_stack.0.attn1.to_q.weight".format(prefix) in state_dict or "{}1.time_mix_blocks.0.attn1.to_q.weight".format(prefix) in state_dict
+ time_stack_cross = "{}1.time_stack.0.attn2.to_q.weight".format(prefix) in state_dict or "{}1.time_mix_blocks.0.attn2.to_q.weight".format(prefix) in state_dict
+ return last_transformer_depth, context_dim, use_linear_in_transformer, time_stack, time_stack_cross
+ return None
+
+
+def detect_unet_config(state_dict: dict, key_prefix: str):
+ state_dict_keys = list(state_dict.keys())
+
+ if "{}cap_embedder.1.weight".format(key_prefix) in state_dict_keys: # Lumina 2
+ dit_config = {}
+ dit_config["image_model"] = "lumina2"
+ dit_config["patch_size"] = 2
+ dit_config["in_channels"] = 16
+ w = state_dict["{}cap_embedder.1.weight".format(key_prefix)]
+ dit_config["dim"] = int(w.shape[0])
+ dit_config["cap_feat_dim"] = int(w.shape[1])
+ dit_config["n_layers"] = count_blocks(state_dict_keys, "{}layers.".format(key_prefix) + "{}.")
+ dit_config["qk_norm"] = True
+
+ if dit_config["dim"] == 2304: # Lumina 2
+ dit_config["n_heads"] = 24
+ dit_config["n_kv_heads"] = 8
+ dit_config["axes_dims"] = [32, 32, 32]
+ dit_config["axes_lens"] = [300, 512, 512]
+ dit_config["rope_theta"] = 10000.0
+ dit_config["ffn_dim_multiplier"] = 4.0
+ elif dit_config["dim"] == 3840: # Z-Image
+ dit_config["nunchaku"] = "{}layers.0.attention.to_out.0.qweight".format(key_prefix) in state_dict_keys
+ dit_config["n_heads"] = 30
+ dit_config["n_kv_heads"] = 30
+ dit_config["axes_dims"] = [32, 48, 48]
+ dit_config["axes_lens"] = [1536, 512, 512]
+ dit_config["rope_theta"] = 256.0
+ dit_config["ffn_dim_multiplier"] = 8.0 / 3.0
+ dit_config["z_image_modulation"] = True
+ dit_config["time_scale"] = 1000.0
+ if "{}cap_pad_token".format(key_prefix) in state_dict_keys:
+ dit_config["pad_tokens_multiple"] = 32
+
+ return dit_config
+
+ if "{}head.modulation".format(key_prefix) in state_dict_keys: # Wan 2.1
+ dit_config = {}
+ dit_config["image_model"] = "wan2.1"
+ dim = state_dict["{}head.modulation".format(key_prefix)].shape[-1]
+ out_dim = state_dict["{}head.head.weight".format(key_prefix)].shape[0] // 4
+ dit_config["dim"] = int(dim)
+ dit_config["out_dim"] = int(out_dim)
+ dit_config["num_heads"] = int(dim // 128)
+ dit_config["ffn_dim"] = int(state_dict["{}blocks.0.ffn.0.weight".format(key_prefix)].shape[0])
+ dit_config["num_layers"] = count_blocks(state_dict_keys, "{}blocks.".format(key_prefix) + "{}.")
+ dit_config["patch_size"] = (1, 2, 2)
+ dit_config["freq_dim"] = 256
+ dit_config["window_size"] = (-1, -1)
+ dit_config["qk_norm"] = True
+ dit_config["cross_attn_norm"] = True
+ dit_config["eps"] = 1e-6
+ dit_config["in_dim"] = int(state_dict["{}patch_embedding.weight".format(key_prefix)].shape[1])
+ if "{}img_emb.proj.0.bias".format(key_prefix) in state_dict_keys:
+ dit_config["model_type"] = "i2v"
+ else:
+ dit_config["model_type"] = "t2v"
+ flf_weight = state_dict.get("{}img_emb.emb_pos".format(key_prefix))
+ if flf_weight is not None:
+ dit_config["flf_pos_embed_token_number"] = flf_weight.shape[1]
+ return dit_config
+
+ if "{}single_transformer_blocks.0.mlp_fc1.qweight".format(key_prefix) in state_dict_keys: # SVDQ
+ dit_config = {"nunchaku": True}
+ dit_config["axes_dim"] = [16, 56, 56]
+ dit_config["context_in_dim"] = 4096
+ dit_config["depth"] = 19
+ dit_config["depth_single_blocks"] = 38
+ dit_config["disable_unet_model_creation"] = True
+ dit_config["guidance_embed"] = True
+ dit_config["hidden_size"] = 3072
+ dit_config["image_model"] = "flux"
+ dit_config["in_channels"] = 16
+ dit_config["mlp_ratio"] = 4.0
+ dit_config["num_heads"] = 24
+ dit_config["out_channels"] = 16
+ dit_config["patch_size"] = 2
+ dit_config["qkv_bias"] = True
+ dit_config["theta"] = 10000
+ dit_config["vec_in_dim"] = 768
+ return dit_config
+
+ if "{}double_blocks.0.img_attn.norm.key_norm.scale".format(key_prefix) in state_dict_keys and "{}img_in.weight".format(key_prefix) in state_dict_keys: # Flux
+ dit_config = {}
+ dit_config["image_model"] = "flux"
+ dit_config["in_channels"] = 16
+ patch_size = 2
+ dit_config["patch_size"] = patch_size
+ in_key = "{}img_in.weight".format(key_prefix)
+ if in_key in state_dict_keys:
+ dit_config["in_channels"] = state_dict[in_key].shape[1] // (patch_size * patch_size)
+ dit_config["out_channels"] = 16
+ vec_in_key = "{}vector_in.in_layer.weight".format(key_prefix)
+ if vec_in_key in state_dict_keys:
+ dit_config["vec_in_dim"] = state_dict[vec_in_key].shape[1]
+ dit_config["context_in_dim"] = 4096
+ dit_config["hidden_size"] = 3072
+ dit_config["mlp_ratio"] = 4.0
+ dit_config["num_heads"] = 24
+ dit_config["depth"] = count_blocks(state_dict_keys, "{}double_blocks.".format(key_prefix) + "{}.")
+ dit_config["depth_single_blocks"] = count_blocks(state_dict_keys, "{}single_blocks.".format(key_prefix) + "{}.")
+ dit_config["axes_dim"] = [16, 56, 56]
+ dit_config["theta"] = 10000
+ dit_config["qkv_bias"] = True
+ if "{}distilled_guidance_layer.0.norms.0.scale".format(key_prefix) in state_dict_keys or "{}distilled_guidance_layer.norms.0.scale".format(key_prefix) in state_dict_keys: # Chroma
+ dit_config["image_model"] = "chroma"
+ dit_config["in_channels"] = 64
+ dit_config["out_channels"] = 64
+ dit_config["in_dim"] = 64
+ dit_config["out_dim"] = 3072
+ dit_config["hidden_dim"] = 5120
+ dit_config["n_layers"] = 5
+ else:
+ dit_config["guidance_embed"] = "{}guidance_in.in_layer.weight".format(key_prefix) in state_dict_keys
+ return dit_config
+
+ if "{}txt_norm.weight".format(key_prefix) in state_dict_keys: # Qwen Image
+ _qweight: bool = "{}transformer_blocks.0.attn.to_qkv.qweight".format(key_prefix) in state_dict_keys
+ dit_config = {"nunchaku": _qweight}
+ dit_config["image_model"] = "qwen_image"
+ dit_config["in_channels"] = state_dict["{}img_in.weight".format(key_prefix)].shape[1]
+ dit_config["num_layers"] = count_blocks(state_dict_keys, "{}transformer_blocks.".format(key_prefix) + "{}.")
+ return dit_config
+
+ if "{}input_blocks.0.0.weight".format(key_prefix) not in state_dict_keys:
+ return None
+
+ unet_config = {
+ "use_checkpoint": False,
+ "image_size": 32,
+ "use_spatial_transformer": True,
+ "legacy": False,
+ }
+
+ y_input = "{}label_emb.0.0.weight".format(key_prefix)
+ if y_input in state_dict_keys:
+ unet_config["num_classes"] = "sequential"
+ unet_config["adm_in_channels"] = state_dict[y_input].shape[1]
+ else:
+ unet_config["adm_in_channels"] = None
+
+ model_channels = state_dict["{}input_blocks.0.0.weight".format(key_prefix)].shape[0]
+ in_channels = state_dict["{}input_blocks.0.0.weight".format(key_prefix)].shape[1]
+
+ out_key = "{}out.2.weight".format(key_prefix)
+ if out_key in state_dict:
+ out_channels = state_dict[out_key].shape[0]
+ else:
+ out_channels = 4
+
+ num_res_blocks = []
+ channel_mult = []
+ transformer_depth = []
+ transformer_depth_output = []
+ context_dim = None
+ use_linear_in_transformer = False
+
+ video_model = False
+
+ current_res = 1
+ count = 0
+
+ last_res_blocks = 0
+ last_channel_mult = 0
+
+ input_block_count = count_blocks(state_dict_keys, "{}input_blocks".format(key_prefix) + ".{}.")
+ for count in range(input_block_count):
+ prefix = "{}input_blocks.{}.".format(key_prefix, count)
+ prefix_output = "{}output_blocks.{}.".format(key_prefix, input_block_count - count - 1)
+
+ block_keys = sorted(list(filter(lambda a: a.startswith(prefix), state_dict_keys)))
+ if len(block_keys) == 0:
+ break
+
+ block_keys_output = sorted(list(filter(lambda a: a.startswith(prefix_output), state_dict_keys)))
+
+ if "{}0.op.weight".format(prefix) in block_keys: # new layer
+ num_res_blocks.append(last_res_blocks)
+ channel_mult.append(last_channel_mult)
+
+ current_res *= 2
+ last_res_blocks = 0
+ last_channel_mult = 0
+ out = calculate_transformer_depth(prefix_output, state_dict_keys, state_dict)
+ if out is not None:
+ transformer_depth_output.append(out[0])
+ else:
+ transformer_depth_output.append(0)
+ else:
+ res_block_prefix = "{}0.in_layers.0.weight".format(prefix)
+ if res_block_prefix in block_keys:
+ last_res_blocks += 1
+ last_channel_mult = state_dict["{}0.out_layers.3.weight".format(prefix)].shape[0] // model_channels
+
+ out = calculate_transformer_depth(prefix, state_dict_keys, state_dict)
+ if out is not None:
+ transformer_depth.append(out[0])
+ if context_dim is None:
+ context_dim = out[1]
+ use_linear_in_transformer = out[2]
+ video_model = out[3]
+ else:
+ transformer_depth.append(0)
+
+ res_block_prefix = "{}0.in_layers.0.weight".format(prefix_output)
+ if res_block_prefix in block_keys_output:
+ out = calculate_transformer_depth(prefix_output, state_dict_keys, state_dict)
+ if out is not None:
+ transformer_depth_output.append(out[0])
+ else:
+ transformer_depth_output.append(0)
+
+ num_res_blocks.append(last_res_blocks)
+ channel_mult.append(last_channel_mult)
+ if "{}middle_block.1.proj_in.weight".format(key_prefix) in state_dict_keys:
+ transformer_depth_middle = count_blocks(state_dict_keys, "{}middle_block.1.transformer_blocks.".format(key_prefix) + "{}")
+ elif "{}middle_block.0.in_layers.0.weight".format(key_prefix) in state_dict_keys:
+ transformer_depth_middle = -1
+ else:
+ transformer_depth_middle = -2
+
+ unet_config["in_channels"] = in_channels
+ unet_config["out_channels"] = out_channels
+ unet_config["model_channels"] = model_channels
+ unet_config["num_res_blocks"] = num_res_blocks
+ unet_config["transformer_depth"] = transformer_depth
+ unet_config["transformer_depth_output"] = transformer_depth_output
+ unet_config["channel_mult"] = channel_mult
+ unet_config["transformer_depth_middle"] = transformer_depth_middle
+ unet_config["use_linear_in_transformer"] = use_linear_in_transformer
+ unet_config["context_dim"] = context_dim
+
+ assert not video_model
+ unet_config["use_temporal_resblock"] = False
+ unet_config["use_temporal_attention"] = False
+
+ return unet_config
+
+
+def model_config_from_unet_config(unet_config, state_dict=None):
+ for model_config in model_list.models:
+ if model_config.matches(unet_config, state_dict):
+ return model_config(unet_config)
+
+ logging.error("no match {}".format(unet_config))
+ return None
+
+
+def model_config_from_unet(state_dict, unet_key_prefix, use_base_if_no_match=False):
+ unet_config = detect_unet_config(state_dict, unet_key_prefix)
+ if unet_config is None:
+ return None
+ model_config = model_config_from_unet_config(unet_config, state_dict)
+ if model_config is None and use_base_if_no_match:
+ return model_list.BASE(unet_config)
+ else:
+ return model_config
+
+
+def top_candidate(state_dict, candidates):
+ counts = {k: 0 for k in candidates}
+ for k in state_dict:
+ for c in candidates:
+ if k.startswith(c):
+ counts[c] += 1
+ break
+ top = max(counts, key=counts.get)
+ return top, counts[top]
+
+
+def unet_prefix_from_state_dict(state_dict):
+ candidates = [
+ "model.diffusion_model.", # ldm/sgm models
+ "model.model.", # audio models
+ "net.", # cosmos
+ ]
+ counts = {k: 0 for k in candidates}
+ for k in state_dict:
+ for c in candidates:
+ if k.startswith(c):
+ counts[c] += 1
+ break
+
+ top = max(counts, key=counts.get)
+ if counts[top] > 5:
+ return top
+ else:
+ return "model." # etc.
+
+
+def convert_config(unet_config):
+ new_config = unet_config.copy()
+ num_res_blocks = new_config.get("num_res_blocks", None)
+ channel_mult = new_config.get("channel_mult", None)
+
+ if isinstance(num_res_blocks, int):
+ num_res_blocks = len(channel_mult) * [num_res_blocks]
+
+ if "attention_resolutions" in new_config:
+ attention_resolutions = new_config.pop("attention_resolutions")
+ transformer_depth = new_config.get("transformer_depth", None)
+ transformer_depth_middle = new_config.get("transformer_depth_middle", None)
+
+ if isinstance(transformer_depth, int):
+ transformer_depth = len(channel_mult) * [transformer_depth]
+ if transformer_depth_middle is None:
+ transformer_depth_middle = transformer_depth[-1]
+ t_in = []
+ t_out = []
+ s = 1
+ for i in range(len(num_res_blocks)):
+ res = num_res_blocks[i]
+ d = 0
+ if s in attention_resolutions:
+ d = transformer_depth[i]
+
+ t_in += [d] * res
+ t_out += [d] * (res + 1)
+ s *= 2
+ transformer_depth = t_in
+ new_config["transformer_depth"] = t_in
+ new_config["transformer_depth_output"] = t_out
+ new_config["transformer_depth_middle"] = transformer_depth_middle
+
+ new_config["num_res_blocks"] = num_res_blocks
+ return new_config
+
+
+def unet_config_from_diffusers_unet(state_dict, dtype=None):
+ match = {}
+ transformer_depth = []
+
+ attn_res = 1
+ down_blocks = count_blocks(state_dict, "down_blocks.{}")
+ for i in range(down_blocks):
+ attn_blocks = count_blocks(state_dict, "down_blocks.{}.attentions.".format(i) + "{}")
+ res_blocks = count_blocks(state_dict, "down_blocks.{}.resnets.".format(i) + "{}")
+ for ab in range(attn_blocks):
+ transformer_count = count_blocks(
+ state_dict,
+ "down_blocks.{}.attentions.{}.transformer_blocks.".format(i, ab) + "{}",
+ )
+ transformer_depth.append(transformer_count)
+ if transformer_count > 0:
+ match["context_dim"] = state_dict["down_blocks.{}.attentions.{}.transformer_blocks.0.attn2.to_k.weight".format(i, ab)].shape[1]
+
+ attn_res *= 2
+ if attn_blocks == 0:
+ for i in range(res_blocks):
+ transformer_depth.append(0)
+
+ match["transformer_depth"] = transformer_depth
+
+ match["model_channels"] = state_dict["conv_in.weight"].shape[0]
+ match["in_channels"] = state_dict["conv_in.weight"].shape[1]
+ match["adm_in_channels"] = None
+ if "class_embedding.linear_1.weight" in state_dict:
+ match["adm_in_channels"] = state_dict["class_embedding.linear_1.weight"].shape[1]
+ elif "add_embedding.linear_1.weight" in state_dict:
+ match["adm_in_channels"] = state_dict["add_embedding.linear_1.weight"].shape[1]
+
+ SDXL = {
+ "use_checkpoint": False,
+ "image_size": 32,
+ "out_channels": 4,
+ "use_spatial_transformer": True,
+ "legacy": False,
+ "num_classes": "sequential",
+ "adm_in_channels": 2816,
+ "dtype": dtype,
+ "in_channels": 4,
+ "model_channels": 320,
+ "num_res_blocks": [2, 2, 2],
+ "transformer_depth": [0, 0, 2, 2, 10, 10],
+ "channel_mult": [1, 2, 4],
+ "transformer_depth_middle": 10,
+ "use_linear_in_transformer": True,
+ "context_dim": 2048,
+ "num_head_channels": 64,
+ "transformer_depth_output": [0, 0, 0, 2, 2, 2, 10, 10, 10],
+ "use_temporal_attention": False,
+ "use_temporal_resblock": False,
+ }
+
+ SDXL_refiner = {
+ "use_checkpoint": False,
+ "image_size": 32,
+ "out_channels": 4,
+ "use_spatial_transformer": True,
+ "legacy": False,
+ "num_classes": "sequential",
+ "adm_in_channels": 2560,
+ "dtype": dtype,
+ "in_channels": 4,
+ "model_channels": 384,
+ "num_res_blocks": [2, 2, 2, 2],
+ "transformer_depth": [0, 0, 4, 4, 4, 4, 0, 0],
+ "channel_mult": [1, 2, 4, 4],
+ "transformer_depth_middle": 4,
+ "use_linear_in_transformer": True,
+ "context_dim": 1280,
+ "num_head_channels": 64,
+ "transformer_depth_output": [0, 0, 0, 4, 4, 4, 4, 4, 4, 0, 0, 0],
+ "use_temporal_attention": False,
+ "use_temporal_resblock": False,
+ }
+
+ SD15 = {
+ "use_checkpoint": False,
+ "image_size": 32,
+ "out_channels": 4,
+ "use_spatial_transformer": True,
+ "legacy": False,
+ "adm_in_channels": None,
+ "dtype": dtype,
+ "in_channels": 4,
+ "model_channels": 320,
+ "num_res_blocks": [2, 2, 2, 2],
+ "transformer_depth": [1, 1, 1, 1, 1, 1, 0, 0],
+ "channel_mult": [1, 2, 4, 4],
+ "transformer_depth_middle": 1,
+ "use_linear_in_transformer": False,
+ "context_dim": 768,
+ "num_heads": 8,
+ "transformer_depth_output": [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
+ "use_temporal_attention": False,
+ "use_temporal_resblock": False,
+ }
+
+ SDXL_mid_cnet = {
+ "use_checkpoint": False,
+ "image_size": 32,
+ "out_channels": 4,
+ "use_spatial_transformer": True,
+ "legacy": False,
+ "num_classes": "sequential",
+ "adm_in_channels": 2816,
+ "dtype": dtype,
+ "in_channels": 4,
+ "model_channels": 320,
+ "num_res_blocks": [2, 2, 2],
+ "transformer_depth": [0, 0, 0, 0, 1, 1],
+ "channel_mult": [1, 2, 4],
+ "transformer_depth_middle": 1,
+ "use_linear_in_transformer": True,
+ "context_dim": 2048,
+ "num_head_channels": 64,
+ "transformer_depth_output": [0, 0, 0, 0, 0, 0, 1, 1, 1],
+ "use_temporal_attention": False,
+ "use_temporal_resblock": False,
+ }
+
+ SDXL_small_cnet = {
+ "use_checkpoint": False,
+ "image_size": 32,
+ "out_channels": 4,
+ "use_spatial_transformer": True,
+ "legacy": False,
+ "num_classes": "sequential",
+ "adm_in_channels": 2816,
+ "dtype": dtype,
+ "in_channels": 4,
+ "model_channels": 320,
+ "num_res_blocks": [2, 2, 2],
+ "transformer_depth": [0, 0, 0, 0, 0, 0],
+ "channel_mult": [1, 2, 4],
+ "transformer_depth_middle": 0,
+ "use_linear_in_transformer": True,
+ "num_head_channels": 64,
+ "context_dim": 1,
+ "transformer_depth_output": [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ "use_temporal_attention": False,
+ "use_temporal_resblock": False,
+ }
+
+ SDXL_diffusers_inpaint = {
+ "use_checkpoint": False,
+ "image_size": 32,
+ "out_channels": 4,
+ "use_spatial_transformer": True,
+ "legacy": False,
+ "num_classes": "sequential",
+ "adm_in_channels": 2816,
+ "dtype": dtype,
+ "in_channels": 9,
+ "model_channels": 320,
+ "num_res_blocks": [2, 2, 2],
+ "transformer_depth": [0, 0, 2, 2, 10, 10],
+ "channel_mult": [1, 2, 4],
+ "transformer_depth_middle": 10,
+ "use_linear_in_transformer": True,
+ "context_dim": 2048,
+ "num_head_channels": 64,
+ "transformer_depth_output": [0, 0, 0, 2, 2, 2, 10, 10, 10],
+ "use_temporal_attention": False,
+ "use_temporal_resblock": False,
+ }
+
+ supported_models = [
+ SD15,
+ SDXL,
+ SDXL_refiner,
+ SDXL_mid_cnet,
+ SDXL_small_cnet,
+ SDXL_diffusers_inpaint,
+ ]
+
+ for unet_config in supported_models:
+ matches = True
+ for k in match:
+ if match[k] != unet_config[k]:
+ matches = False
+ break
+ if matches:
+ return convert_config(unet_config)
+ return None
+
+
+def model_config_from_diffusers_unet(state_dict):
+ unet_config = unet_config_from_diffusers_unet(state_dict)
+ if unet_config is not None:
+ return model_config_from_unet_config(unet_config)
+ return None
diff --git a/modules_forge/packages/huggingface_guess/diffusers_convert.py b/modules_forge/packages/huggingface_guess/diffusers_convert.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd3f6f7ce502952c23a3233c6efd7fa1c663a743
--- /dev/null
+++ b/modules_forge/packages/huggingface_guess/diffusers_convert.py
@@ -0,0 +1,280 @@
+import logging
+import re
+
+import torch
+
+# conversion code from https://github.com/huggingface/diffusers/blob/main/scripts/convert_diffusers_to_original_stable_diffusion.py
+
+# =============== #
+# UNet Conversion #
+# =============== #
+
+unet_conversion_map = [
+ # (stable-diffusion, HF Diffusers)
+ ("time_embed.0.weight", "time_embedding.linear_1.weight"),
+ ("time_embed.0.bias", "time_embedding.linear_1.bias"),
+ ("time_embed.2.weight", "time_embedding.linear_2.weight"),
+ ("time_embed.2.bias", "time_embedding.linear_2.bias"),
+ ("input_blocks.0.0.weight", "conv_in.weight"),
+ ("input_blocks.0.0.bias", "conv_in.bias"),
+ ("out.0.weight", "conv_norm_out.weight"),
+ ("out.0.bias", "conv_norm_out.bias"),
+ ("out.2.weight", "conv_out.weight"),
+ ("out.2.bias", "conv_out.bias"),
+]
+
+unet_conversion_map_resnet = [
+ # (stable-diffusion, HF Diffusers)
+ ("in_layers.0", "norm1"),
+ ("in_layers.2", "conv1"),
+ ("out_layers.0", "norm2"),
+ ("out_layers.3", "conv2"),
+ ("emb_layers.1", "time_emb_proj"),
+ ("skip_connection", "conv_shortcut"),
+]
+
+unet_conversion_map_layer = []
+# hardcoded number of downblocks and resnets/attentions...
+# would need smarter logic for other networks.
+for i in range(4):
+ # loop over downblocks/upblocks
+
+ for j in range(2):
+ # loop over resnets/attentions for downblocks
+ hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
+ sd_down_res_prefix = f"input_blocks.{3 * i + j + 1}.0."
+ unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
+
+ if i < 3:
+ # no attention layers in down_blocks.3
+ hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
+ sd_down_atn_prefix = f"input_blocks.{3 * i + j + 1}.1."
+ unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
+
+ for j in range(3):
+ # loop over resnets/attentions for upblocks
+ hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
+ sd_up_res_prefix = f"output_blocks.{3 * i + j}.0."
+ unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
+
+ if i > 0:
+ # no attention layers in up_blocks.0
+ hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
+ sd_up_atn_prefix = f"output_blocks.{3 * i + j}.1."
+ unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
+
+ if i < 3:
+ # no downsample in down_blocks.3
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
+ sd_downsample_prefix = f"input_blocks.{3 * (i + 1)}.0.op."
+ unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
+
+ # no upsample in up_blocks.3
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
+ sd_upsample_prefix = f"output_blocks.{3 * i + 2}.{1 if i == 0 else 2}."
+ unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
+
+hf_mid_atn_prefix = "mid_block.attentions.0."
+sd_mid_atn_prefix = "middle_block.1."
+unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
+
+for j in range(2):
+ hf_mid_res_prefix = f"mid_block.resnets.{j}."
+ sd_mid_res_prefix = f"middle_block.{2 * j}."
+ unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
+
+
+def convert_unet_state_dict(unet_state_dict):
+ # buyer beware: this is a *brittle* function,
+ # and correct output requires that all of these pieces interact in
+ # the exact order in which I have arranged them.
+ mapping = {k: k for k in unet_state_dict.keys()}
+ for sd_name, hf_name in unet_conversion_map:
+ mapping[hf_name] = sd_name
+ for k, v in mapping.items():
+ if "resnets" in k:
+ for sd_part, hf_part in unet_conversion_map_resnet:
+ v = v.replace(hf_part, sd_part)
+ mapping[k] = v
+ for k, v in mapping.items():
+ for sd_part, hf_part in unet_conversion_map_layer:
+ v = v.replace(hf_part, sd_part)
+ mapping[k] = v
+ new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
+ return new_state_dict
+
+
+# ============== #
+# VAE Conversion #
+# ============== #
+
+vae_conversion_map = [
+ # (stable-diffusion, HF Diffusers)
+ ("nin_shortcut", "conv_shortcut"),
+ ("norm_out", "conv_norm_out"),
+ ("mid.attn_1.", "mid_block.attentions.0."),
+]
+
+for i in range(4):
+ # down_blocks have two resnets
+ for j in range(2):
+ hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}."
+ sd_down_prefix = f"encoder.down.{i}.block.{j}."
+ vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
+
+ if i < 3:
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0."
+ sd_downsample_prefix = f"down.{i}.downsample."
+ vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
+
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
+ sd_upsample_prefix = f"up.{3 - i}.upsample."
+ vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
+
+ # up_blocks have three resnets
+ # also, up blocks in hf are numbered in reverse from sd
+ for j in range(3):
+ hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}."
+ sd_up_prefix = f"decoder.up.{3 - i}.block.{j}."
+ vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
+
+# this part accounts for mid blocks in both the encoder and the decoder
+for i in range(2):
+ hf_mid_res_prefix = f"mid_block.resnets.{i}."
+ sd_mid_res_prefix = f"mid.block_{i + 1}."
+ vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
+
+vae_conversion_map_attn = [
+ # (stable-diffusion, HF Diffusers)
+ ("norm.", "group_norm."),
+ ("q.", "query."),
+ ("k.", "key."),
+ ("v.", "value."),
+ ("q.", "to_q."),
+ ("k.", "to_k."),
+ ("v.", "to_v."),
+ ("proj_out.", "to_out.0."),
+ ("proj_out.", "proj_attn."),
+]
+
+
+def reshape_weight_for_sd(w):
+ # convert HF linear weights to SD conv2d weights
+ return w.reshape(*w.shape, 1, 1)
+
+
+def convert_vae_state_dict(vae_state_dict):
+ mapping = {k: k for k in vae_state_dict.keys()}
+ for k, v in mapping.items():
+ for sd_part, hf_part in vae_conversion_map:
+ v = v.replace(hf_part, sd_part)
+ mapping[k] = v
+ for k, v in mapping.items():
+ if "attentions" in k:
+ for sd_part, hf_part in vae_conversion_map_attn:
+ v = v.replace(hf_part, sd_part)
+ mapping[k] = v
+ new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()}
+ weights_to_convert = ["q", "k", "v", "proj_out"]
+ for k, v in new_state_dict.items():
+ for weight_name in weights_to_convert:
+ if f"mid.attn_1.{weight_name}.weight" in k:
+ logging.debug(f"Reshaping {k} for SD format")
+ new_state_dict[k] = reshape_weight_for_sd(v)
+ return new_state_dict
+
+
+# =========================#
+# Text Encoder Conversion #
+# =========================#
+
+
+textenc_conversion_lst = [
+ # (stable-diffusion, HF Diffusers)
+ ("resblocks.", "text_model.encoder.layers."),
+ ("ln_1", "layer_norm1"),
+ ("ln_2", "layer_norm2"),
+ (".c_fc.", ".fc1."),
+ (".c_proj.", ".fc2."),
+ (".attn", ".self_attn"),
+ ("ln_final.", "transformer.text_model.final_layer_norm."),
+ (
+ "token_embedding.weight",
+ "transformer.text_model.embeddings.token_embedding.weight",
+ ),
+ (
+ "positional_embedding",
+ "transformer.text_model.embeddings.position_embedding.weight",
+ ),
+]
+protected = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
+textenc_pattern = re.compile("|".join(protected.keys()))
+
+# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
+code2idx = {"q": 0, "k": 1, "v": 2}
+
+
+# This function exists because at the time of writing torch.cat can't do fp8 with cuda
+def cat_tensors(tensors):
+ x = 0
+ for t in tensors:
+ x += t.shape[0]
+
+ shape = [x] + list(tensors[0].shape)[1:]
+ out = torch.empty(shape, device=tensors[0].device, dtype=tensors[0].dtype)
+
+ x = 0
+ for t in tensors:
+ out[x : x + t.shape[0]] = t
+ x += t.shape[0]
+
+ return out
+
+
+def convert_text_enc_state_dict_v20(text_enc_dict, prefix=""):
+ new_state_dict = {}
+ capture_qkv_weight = {}
+ capture_qkv_bias = {}
+ for k, v in text_enc_dict.items():
+ if not k.startswith(prefix):
+ continue
+ if k.endswith(".self_attn.q_proj.weight") or k.endswith(".self_attn.k_proj.weight") or k.endswith(".self_attn.v_proj.weight"):
+ k_pre = k[: -len(".q_proj.weight")]
+ k_code = k[-len("q_proj.weight")]
+ if k_pre not in capture_qkv_weight:
+ capture_qkv_weight[k_pre] = [None, None, None]
+ capture_qkv_weight[k_pre][code2idx[k_code]] = v
+ continue
+
+ if k.endswith(".self_attn.q_proj.bias") or k.endswith(".self_attn.k_proj.bias") or k.endswith(".self_attn.v_proj.bias"):
+ k_pre = k[: -len(".q_proj.bias")]
+ k_code = k[-len("q_proj.bias")]
+ if k_pre not in capture_qkv_bias:
+ capture_qkv_bias[k_pre] = [None, None, None]
+ capture_qkv_bias[k_pre][code2idx[k_code]] = v
+ continue
+
+ text_proj = "transformer.text_projection.weight"
+ if k.endswith(text_proj):
+ new_state_dict[k.replace(text_proj, "text_projection")] = v.transpose(0, 1).contiguous()
+ else:
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k)
+ new_state_dict[relabelled_key] = v
+
+ for k_pre, tensors in capture_qkv_weight.items():
+ if None in tensors:
+ raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
+ new_state_dict[relabelled_key + ".in_proj_weight"] = cat_tensors(tensors)
+
+ for k_pre, tensors in capture_qkv_bias.items():
+ if None in tensors:
+ raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
+ new_state_dict[relabelled_key + ".in_proj_bias"] = cat_tensors(tensors)
+
+ return new_state_dict
+
+
+def convert_text_enc_state_dict(text_enc_dict):
+ return text_enc_dict
diff --git a/modules_forge/packages/huggingface_guess/latent.py b/modules_forge/packages/huggingface_guess/latent.py
new file mode 100644
index 0000000000000000000000000000000000000000..07c0caf98f03b721b134ea2a9252962a0dc06be4
--- /dev/null
+++ b/modules_forge/packages/huggingface_guess/latent.py
@@ -0,0 +1,121 @@
+# reference: https://github.com/comfyanonymous/ComfyUI/blob/v0.3.52/comfy/latent_formats.py
+
+import torch
+
+
+class LatentFormat:
+ scale_factor = 1.0
+ latent_channels = 4
+ latent_rgb_factors = None
+ latent_rgb_factors_bias = None
+ taesd_decoder_name = None
+
+ def process_in(self, latent):
+ return latent * self.scale_factor
+
+ def process_out(self, latent):
+ return latent / self.scale_factor
+
+
+class SD15(LatentFormat):
+ def __init__(self, scale_factor=0.18215):
+ self.scale_factor = scale_factor
+ self.latent_rgb_factors = [
+ # R G B
+ [ 0.3512, 0.2297, 0.3227],
+ [ 0.3250, 0.4974, 0.2350],
+ [-0.2829, 0.1762, 0.2721],
+ [-0.2120, -0.2616, -0.7177],
+ ]
+ self.taesd_decoder_name = "taesd_decoder"
+
+
+class SDXL(LatentFormat):
+ scale_factor = 0.13025
+
+ def __init__(self):
+ self.latent_rgb_factors = [
+ # R G B
+ [ 0.3651, 0.4232, 0.4341],
+ [-0.2533, -0.0042, 0.1068],
+ [ 0.1076, 0.1111, -0.0362],
+ [-0.3165, -0.2492, -0.2188],
+ ]
+ self.latent_rgb_factors_bias = [0.1084, -0.0175, -0.0011]
+ self.taesd_decoder_name = "taesdxl_decoder"
+
+
+class Flux(LatentFormat):
+ latent_channels = 16
+
+ def __init__(self):
+ self.scale_factor = 0.3611
+ self.shift_factor = 0.1159
+ self.latent_rgb_factors = [
+ [-0.0346, 0.0244, 0.0681],
+ [ 0.0034, 0.0210, 0.0687],
+ [ 0.0275, -0.0668, -0.0433],
+ [-0.0174, 0.0160, 0.0617],
+ [ 0.0859, 0.0721, 0.0329],
+ [ 0.0004, 0.0383, 0.0115],
+ [ 0.0405, 0.0861, 0.0915],
+ [-0.0236, -0.0185, -0.0259],
+ [-0.0245, 0.0250, 0.1180],
+ [ 0.1008, 0.0755, -0.0421],
+ [-0.0515, 0.0201, 0.0011],
+ [ 0.0428, -0.0012, -0.0036],
+ [ 0.0817, 0.0765, 0.0749],
+ [-0.1264, -0.0522, -0.1103],
+ [-0.0280, -0.0881, -0.0499],
+ [-0.1262, -0.0982, -0.0778],
+ ]
+ self.latent_rgb_factors_bias = [-0.0329, -0.0718, -0.0851]
+ self.taesd_decoder_name = "taef1_decoder"
+
+ def process_in(self, latent):
+ return (latent - self.shift_factor) * self.scale_factor
+
+ def process_out(self, latent):
+ return (latent / self.scale_factor) + self.shift_factor
+
+
+class Wan21(LatentFormat):
+ latent_channels = 16
+ latent_dimensions = 3
+
+ latent_rgb_factors = [
+ [-0.1299, -0.1692, 0.2932],
+ [ 0.0671, 0.0406, 0.0442],
+ [ 0.3568, 0.2548, 0.1747],
+ [ 0.0372, 0.2344, 0.1420],
+ [ 0.0313, 0.0189, -0.0328],
+ [ 0.0296, -0.0956, -0.0665],
+ [-0.3477, -0.4059, -0.2925],
+ [ 0.0166, 0.1902, 0.1975],
+ [-0.0412, 0.0267, -0.1364],
+ [-0.1293, 0.0740, 0.1636],
+ [ 0.0680, 0.3019, 0.1128],
+ [ 0.0032, 0.0581, 0.0639],
+ [-0.1251, 0.0927, 0.1699],
+ [ 0.0060, -0.0633, 0.0005],
+ [ 0.3477, 0.2275, 0.2950],
+ [ 0.1984, 0.0913, 0.1861],
+ ]
+ latent_rgb_factors_bias = [-0.1835, -0.0868, -0.3360]
+
+ def __init__(self):
+ self.scale_factor = 1.0
+ self.latents_mean = torch.tensor([-0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921]).view(1, self.latent_channels, 1, 1, 1)
+ self.latents_std = torch.tensor([2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.9160]).view(1, self.latent_channels, 1, 1, 1)
+
+ self.taesd_decoder_name = None
+
+ def process_in(self, latent):
+ latents_mean = self.latents_mean.to(latent.device, latent.dtype)
+ latents_std = self.latents_std.to(latent.device, latent.dtype)
+ return (latent - latents_mean) * self.scale_factor / latents_std
+
+ def process_out(self, latent):
+ latents_mean = self.latents_mean.to(latent.device, latent.dtype)
+ latents_std = self.latents_std.to(latent.device, latent.dtype)
+ return latent * latents_std / self.scale_factor + latents_mean
diff --git a/modules_forge/packages/huggingface_guess/model_list.py b/modules_forge/packages/huggingface_guess/model_list.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a4a0d92fc51a836699146be9cb5feabfb0c001f
--- /dev/null
+++ b/modules_forge/packages/huggingface_guess/model_list.py
@@ -0,0 +1,466 @@
+# reference: https://github.com/comfyanonymous/ComfyUI/blob/v0.3.77/comfy/supported_models.py
+
+from enum import Enum
+
+import torch
+
+from . import diffusers_convert, latent, utils
+
+
+class ModelType(Enum):
+ EPS = 1
+ V_PREDICTION = 2
+ FLUX = 3
+ FLOW = 4
+
+
+class BASE:
+ huggingface_repo = None
+ unet_config = {}
+ unet_extra_config = {
+ "num_heads": -1,
+ "num_head_channels": 64,
+ }
+
+ required_keys = {}
+
+ clip_prefix = []
+ clip_vision_prefix = None
+ noise_aug_config = None
+ sampling_settings = {}
+ latent_format = latent.LatentFormat
+ vae_key_prefix = ["first_stage_model."]
+ text_encoder_key_prefix = ["cond_stage_model."]
+ supported_inference_dtypes = [torch.float16, torch.bfloat16, torch.float32]
+
+ memory_usage_factor = 2.0
+
+ manual_cast_dtype = None
+ unet_target = "unet"
+ vae_target = "vae"
+
+ @classmethod
+ def matches(cls, unet_config, state_dict=None):
+ for k in cls.unet_config:
+ if k not in unet_config or cls.unet_config[k] != unet_config[k]:
+ return False
+ if state_dict is not None:
+ for k in cls.required_keys:
+ if k not in state_dict:
+ return False
+ return True
+
+ def model_type(self, state_dict):
+ return ModelType.EPS
+
+ def clip_target(self, state_dict: dict):
+ return {}
+
+ def inpaint_model(self):
+ return self.unet_config.get("in_channels", -1) > 4
+
+ def __init__(self, unet_config):
+ self.unet_config = unet_config.copy()
+ self.nunchaku: bool = self.unet_config.pop("nunchaku", False)
+ self.sampling_settings = self.sampling_settings.copy()
+ self.latent_format = self.latent_format()
+ for x in self.unet_extra_config:
+ self.unet_config[x] = self.unet_extra_config[x]
+
+ def process_clip_state_dict(self, state_dict):
+ replace_prefix = {k: "" for k in self.text_encoder_key_prefix}
+ return utils.state_dict_prefix_replace(state_dict, replace_prefix, filter_keys=True)
+
+ def process_unet_state_dict(self, state_dict):
+ return state_dict
+
+ def process_vae_state_dict(self, state_dict):
+ return state_dict
+
+ def process_clip_state_dict_for_saving(self, state_dict):
+ replace_prefix = {"": self.text_encoder_key_prefix[0]}
+ return utils.state_dict_prefix_replace(state_dict, replace_prefix)
+
+ def process_clip_vision_state_dict_for_saving(self, state_dict):
+ replace_prefix = {}
+ if self.clip_vision_prefix is not None:
+ replace_prefix[""] = self.clip_vision_prefix
+ return utils.state_dict_prefix_replace(state_dict, replace_prefix)
+
+ def process_unet_state_dict_for_saving(self, state_dict):
+ replace_prefix = {"": "model.diffusion_model."}
+ return utils.state_dict_prefix_replace(state_dict, replace_prefix)
+
+ def process_vae_state_dict_for_saving(self, state_dict):
+ replace_prefix = {"": self.vae_key_prefix[0]}
+ return utils.state_dict_prefix_replace(state_dict, replace_prefix)
+
+
+class SD15(BASE):
+ huggingface_repo = "runwayml/stable-diffusion-v1-5"
+
+ unet_config = {
+ "context_dim": 768,
+ "model_channels": 320,
+ "use_linear_in_transformer": False,
+ "adm_in_channels": None,
+ "use_temporal_attention": False,
+ }
+
+ unet_extra_config = {
+ "num_heads": 8,
+ "num_head_channels": -1,
+ }
+
+ latent_format = latent.SD15
+ memory_usage_factor = 1.0
+
+ def process_clip_state_dict(self, state_dict):
+ k = list(state_dict.keys())
+ for x in k:
+ if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
+ y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
+ state_dict[y] = state_dict.pop(x)
+
+ if "cond_stage_model.transformer.text_model.embeddings.position_ids" in state_dict:
+ ids = state_dict["cond_stage_model.transformer.text_model.embeddings.position_ids"]
+ if ids.dtype == torch.float32:
+ state_dict["cond_stage_model.transformer.text_model.embeddings.position_ids"] = ids.round()
+
+ replace_prefix = {"cond_stage_model.": "clip_l."}
+ return utils.state_dict_prefix_replace(state_dict, replace_prefix, filter_keys=True)
+
+ def process_clip_state_dict_for_saving(self, state_dict):
+ pop_keys = ["clip_l.transformer.text_projection.weight", "clip_l.logit_scale"]
+ for p in pop_keys:
+ if p in state_dict:
+ state_dict.pop(p)
+
+ replace_prefix = {"clip_l.": "cond_stage_model."}
+ return utils.state_dict_prefix_replace(state_dict, replace_prefix)
+
+ def clip_target(self, state_dict: dict):
+ return {"clip_l": "text_encoder"}
+
+
+class SDXLRefiner(BASE):
+ huggingface_repo = "stabilityai/stable-diffusion-xl-refiner-1.0"
+
+ unet_config = {
+ "model_channels": 384,
+ "use_linear_in_transformer": True,
+ "context_dim": 1280,
+ "adm_in_channels": 2560,
+ "transformer_depth": [0, 0, 4, 4, 4, 4, 0, 0],
+ "use_temporal_attention": False,
+ }
+
+ latent_format = latent.SDXL
+ memory_usage_factor = 1.0
+
+ def process_clip_state_dict(self, state_dict):
+ replace_prefix = {"conditioner.embedders.0.model.": "clip_g."}
+ state_dict = utils.state_dict_prefix_replace(state_dict, replace_prefix, filter_keys=True)
+ return utils.clip_text_transformers_convert(state_dict, "clip_g.", "clip_g.transformer.")
+
+ def process_clip_state_dict_for_saving(self, state_dict):
+ state_dict_g = diffusers_convert.convert_text_enc_state_dict_v20(state_dict, "clip_g")
+ if "clip_g.transformer.text_model.embeddings.position_ids" in state_dict_g:
+ state_dict_g.pop("clip_g.transformer.text_model.embeddings.position_ids")
+ replace_prefix = {"clip_g": "conditioner.embedders.0.model"}
+ return utils.state_dict_prefix_replace(state_dict_g, replace_prefix)
+
+ def clip_target(self, state_dict: dict):
+ return {"clip_g": "text_encoder"}
+
+
+class SDXL(BASE):
+ huggingface_repo = "stabilityai/stable-diffusion-xl-base-1.0"
+
+ unet_config = {
+ "model_channels": 320,
+ "use_linear_in_transformer": True,
+ "transformer_depth": [0, 0, 2, 2, 10, 10],
+ "context_dim": 2048,
+ "adm_in_channels": 2816,
+ "use_temporal_attention": False,
+ }
+
+ latent_format = latent.SDXL
+ memory_usage_factor = 0.8
+
+ def model_type(self, state_dict: dict):
+ if "v_pred" in state_dict:
+ return ModelType.V_PREDICTION
+ else:
+ return ModelType.EPS
+
+ def process_clip_state_dict(self, state_dict):
+ replace_prefix = {
+ "conditioner.embedders.0.transformer.text_model": "clip_l.transformer.text_model",
+ "conditioner.embedders.1.model.": "clip_g.",
+ }
+ state_dict = utils.state_dict_prefix_replace(state_dict, replace_prefix, filter_keys=True)
+ return utils.clip_text_transformers_convert(state_dict, "clip_g.", "clip_g.transformer.")
+
+ def process_clip_state_dict_for_saving(self, state_dict):
+ state_dict_g = diffusers_convert.convert_text_enc_state_dict_v20(state_dict, "clip_g")
+ for k in state_dict:
+ if k.startswith("clip_l"):
+ state_dict_g[k] = state_dict[k]
+
+ state_dict_g["clip_l.transformer.text_model.embeddings.position_ids"] = torch.arange(77).expand((1, -1))
+ pop_keys = ["clip_l.transformer.text_projection.weight", "clip_l.logit_scale"]
+ for p in pop_keys:
+ if p in state_dict_g:
+ state_dict_g.pop(p)
+
+ replace_prefix = {
+ "clip_g": "conditioner.embedders.1.model",
+ "clip_l": "conditioner.embedders.0",
+ }
+ return utils.state_dict_prefix_replace(state_dict_g, replace_prefix)
+
+ def clip_target(self, state_dict: dict):
+ return {"clip_l": "text_encoder", "clip_g": "text_encoder_2"}
+
+
+class Flux(BASE):
+ huggingface_repo = "black-forest-labs/FLUX.1-dev"
+
+ unet_config = {
+ "image_model": "flux",
+ "guidance_embed": True,
+ }
+
+ sampling_settings = {}
+
+ unet_extra_config = {}
+ latent_format = latent.Flux
+
+ memory_usage_factor = 2.8
+
+ supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32]
+
+ vae_key_prefix = ["vae."]
+ text_encoder_key_prefix = ["text_encoders."]
+
+ unet_target = "transformer"
+
+ def model_type(self, state_dict):
+ return ModelType.FLUX
+
+ def clip_target(self, state_dict: dict):
+ result = {}
+ pref = self.text_encoder_key_prefix[0]
+
+ if "{}clip_l.transformer.text_model.final_layer_norm.weight".format(pref) in state_dict:
+ result["clip_l"] = "text_encoder"
+
+ if "{}t5xxl.transformer.encoder.final_layer_norm.weight".format(pref) in state_dict:
+ result["t5xxl"] = "text_encoder_2"
+
+ elif "{}t5xxl.transformer.encoder.final_layer_norm.qweight".format(pref) in state_dict:
+ result["t5xxl"] = "text_encoder_2"
+
+ return result
+
+
+class FluxSchnell(Flux):
+ huggingface_repo = "black-forest-labs/FLUX.1-schnell"
+
+ unet_config = {
+ "image_model": "flux",
+ "guidance_embed": False,
+ }
+
+ sampling_settings = {
+ "multiplier": 1.0,
+ "shift": 1.0,
+ }
+
+ supported_inference_dtypes = [torch.bfloat16, torch.float32]
+
+
+class Chroma(FluxSchnell):
+ huggingface_repo = "Chroma"
+
+ unet_config = {
+ "image_model": "chroma",
+ }
+
+ sampling_settings = {
+ "multiplier": 1.0,
+ }
+
+ memory_usage_factor = 3.2
+
+ supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32]
+
+ text_encoder_key_prefix = ["text_encoders.", "cond_stage_model."]
+
+ def clip_target(self, state_dict: dict):
+ for pref in self.text_encoder_key_prefix:
+ if "{}t5xxl.transformer.encoder.final_layer_norm.weight".format(pref) in state_dict:
+ return {"t5xxl": "text_encoder"}
+ elif "{}t5xxl.transformer.encoder.final_layer_norm.qweight".format(pref) in state_dict:
+ return {"t5xxl": "text_encoder"}
+
+ def process_vae_state_dict(self, state_dict):
+ replace_prefix = {"first_stage_model.": "vae."}
+ return utils.state_dict_prefix_replace(state_dict, replace_prefix)
+
+
+class Lumina2(BASE):
+ huggingface_repo = "neta-art/Neta-Lumina"
+
+ unet_config = {
+ "image_model": "lumina2",
+ "dim": 2304,
+ }
+
+ sampling_settings = {
+ "multiplier": 1.0,
+ "shift": 6.0,
+ }
+
+ memory_usage_factor = 1.4
+
+ unet_extra_config = {}
+ latent_format = latent.Flux
+
+ supported_inference_dtypes = [torch.bfloat16, torch.float32]
+
+ vae_key_prefix = ["vae."]
+ text_encoder_key_prefix = ["text_encoders."]
+
+ unet_target = "transformer"
+
+ def model_type(self, state_dict):
+ return ModelType.FLOW
+
+ def clip_target(self, state_dict: dict):
+ pref = self.text_encoder_key_prefix[0]
+ if "{}gemma2_2b.transformer.model.embed_tokens.weight".format(pref) in state_dict:
+ state_dict.pop("{}gemma2_2b.logit_scale".format(pref), None)
+ state_dict.pop("{}spiece_model".format(pref), None)
+ return {"gemma2_2b.transformer": "text_encoder"}
+ else:
+ return {"gemma2_2b": "text_encoder"}
+
+
+class ZImage(Lumina2):
+ huggingface_repo = "Tongyi-MAI/Z-Image-Turbo"
+
+ unet_config = {
+ "image_model": "lumina2",
+ "dim": 3840,
+ }
+
+ sampling_settings = {
+ "multiplier": 1.0,
+ "shift": 3.0,
+ }
+
+ memory_usage_factor = 2.0
+
+ supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32]
+
+ def clip_target(self, state_dict={}):
+ return {"qwen3_4b.transformer": "text_encoder"}
+
+
+class WAN21_T2V(BASE):
+ huggingface_repo = "Wan-AI/Wan2.1-T2V-14B"
+
+ unet_config = {
+ "image_model": "wan2.1",
+ "model_type": "t2v",
+ }
+
+ sampling_settings = {
+ "shift": 8.0,
+ }
+
+ unet_extra_config = {}
+ latent_format = latent.Wan21
+
+ memory_usage_factor = 1.0
+
+ supported_inference_dtypes = [torch.float16, torch.bfloat16, torch.float32]
+
+ vae_key_prefix = ["vae."]
+ text_encoder_key_prefix = ["text_encoders."]
+
+ unet_target = "transformer"
+
+ def __init__(self, unet_config):
+ super().__init__(unet_config)
+ self.memory_usage_factor = self.unet_config.get("dim", 2000) / 2000
+
+ def model_type(self, state_dict):
+ return ModelType.FLOW
+
+ def clip_target(self, state_dict: dict):
+ return {"umt5xxl": "text_encoder"}
+
+
+class WAN21_I2V(WAN21_T2V):
+ huggingface_repo = "Wan-AI/Wan2.1-I2V-14B"
+
+ unet_config = {
+ "image_model": "wan2.1",
+ "model_type": "i2v",
+ "in_dim": 36,
+ }
+
+
+class QwenImage(BASE):
+ huggingface_repo = "Qwen/Qwen-Image"
+
+ unet_config = {
+ "image_model": "qwen_image",
+ }
+
+ sampling_settings = {
+ "multiplier": 1.0,
+ "shift": 1.15,
+ }
+
+ memory_usage_factor = 1.8
+
+ unet_extra_config = {}
+ latent_format = latent.Wan21
+
+ supported_inference_dtypes = [torch.bfloat16, torch.float32]
+
+ vae_key_prefix = ["vae."]
+ text_encoder_key_prefix = ["text_encoders."]
+
+ unet_target = "transformer"
+
+ def model_type(self, state_dict):
+ return ModelType.FLOW
+
+ def clip_target(self, state_dict: dict):
+ pref = self.text_encoder_key_prefix[0]
+ if "{}.qwen25_7b.transformer.model.embed_tokens.weight".format(pref) in state_dict:
+ state_dict.pop("{}qwen25_7b.logit_scale".format(pref), None)
+ return {"qwen25_7b.transformer": "text_encoder"}
+ else:
+ return {"qwen25_7b": "text_encoder"}
+
+
+models = [
+ SD15,
+ SDXL,
+ SDXLRefiner,
+ Flux,
+ FluxSchnell,
+ Chroma,
+ Lumina2,
+ ZImage,
+ WAN21_T2V,
+ WAN21_I2V,
+ QwenImage,
+]
diff --git a/modules_forge/packages/huggingface_guess/utils.py b/modules_forge/packages/huggingface_guess/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0e27ecf00ddb14cb4231994254bf7547914a7df
--- /dev/null
+++ b/modules_forge/packages/huggingface_guess/utils.py
@@ -0,0 +1,482 @@
+# reference: https://github.com/comfyanonymous/ComfyUI/blob/v0.3.52/comfy/utils.py
+
+import math
+import struct
+
+import torch
+
+
+def calculate_parameters(sd, prefix=""):
+ params = 0
+ for k in sd.keys():
+ if k.startswith(prefix):
+ w = sd[k]
+ params += w.nelement()
+ return params
+
+
+def weight_dtype(sd, prefix=""):
+ dtypes = {}
+ for k in sd.keys():
+ if k.startswith(prefix):
+ w = sd[k]
+ dtypes[w.dtype] = dtypes.get(w.dtype, 0) + w.numel()
+
+ if len(dtypes) == 0:
+ return None
+
+ return max(dtypes, key=dtypes.get)
+
+
+def state_dict_key_replace(state_dict, keys_to_replace):
+ for x in keys_to_replace:
+ if x in state_dict:
+ state_dict[keys_to_replace[x]] = state_dict.pop(x)
+ return state_dict
+
+
+def state_dict_prefix_replace(state_dict, replace_prefix, filter_keys=False):
+ if filter_keys:
+ out = {}
+ else:
+ out = state_dict
+ for rp in replace_prefix:
+ replace = list(map(lambda a: (a, "{}{}".format(replace_prefix[rp], a[len(rp) :])), filter(lambda a: a.startswith(rp), state_dict.keys())))
+ for x in replace:
+ w = state_dict.pop(x[0])
+ out[x[1]] = w
+ return out
+
+
+def transformers_convert(sd, prefix_from, prefix_to, number):
+ keys_to_replace = {
+ "{}positional_embedding": "{}embeddings.position_embedding.weight",
+ "{}token_embedding.weight": "{}embeddings.token_embedding.weight",
+ "{}ln_final.weight": "{}final_layer_norm.weight",
+ "{}ln_final.bias": "{}final_layer_norm.bias",
+ }
+
+ for k in keys_to_replace:
+ x = k.format(prefix_from)
+ if x in sd:
+ sd[keys_to_replace[k].format(prefix_to)] = sd.pop(x)
+
+ resblock_to_replace = {
+ "ln_1": "layer_norm1",
+ "ln_2": "layer_norm2",
+ "mlp.c_fc": "mlp.fc1",
+ "mlp.c_proj": "mlp.fc2",
+ "attn.out_proj": "self_attn.out_proj",
+ }
+
+ for resblock in range(number):
+ for x in resblock_to_replace:
+ for y in ["weight", "bias"]:
+ k = "{}transformer.resblocks.{}.{}.{}".format(prefix_from, resblock, x, y)
+ k_to = "{}encoder.layers.{}.{}.{}".format(prefix_to, resblock, resblock_to_replace[x], y)
+ if k in sd:
+ sd[k_to] = sd.pop(k)
+
+ for y in ["weight", "bias"]:
+ k_from = "{}transformer.resblocks.{}.attn.in_proj_{}".format(prefix_from, resblock, y)
+ if k_from in sd:
+ weights = sd.pop(k_from)
+ shape_from = weights.shape[0] // 3
+ for x in range(3):
+ p = ["self_attn.q_proj", "self_attn.k_proj", "self_attn.v_proj"]
+ k_to = "{}encoder.layers.{}.{}.{}".format(prefix_to, resblock, p[x], y)
+ sd[k_to] = weights[shape_from * x : shape_from * (x + 1)]
+
+ return sd
+
+
+def clip_text_transformers_convert(sd, prefix_from, prefix_to):
+ sd = transformers_convert(sd, prefix_from, "{}text_model.".format(prefix_to), 32)
+
+ tp = "{}text_projection.weight".format(prefix_from)
+ if tp in sd:
+ sd["{}text_projection.weight".format(prefix_to)] = sd.pop(tp)
+
+ tp = "{}text_projection".format(prefix_from)
+ if tp in sd:
+ sd["{}text_projection.weight".format(prefix_to)] = sd.pop(tp).transpose(0, 1).contiguous()
+ return sd
+
+
+UNET_MAP_ATTENTIONS = {
+ "proj_in.weight",
+ "proj_in.bias",
+ "proj_out.weight",
+ "proj_out.bias",
+ "norm.weight",
+ "norm.bias",
+}
+
+TRANSFORMER_BLOCKS = {
+ "norm1.weight",
+ "norm1.bias",
+ "norm2.weight",
+ "norm2.bias",
+ "norm3.weight",
+ "norm3.bias",
+ "attn1.to_q.weight",
+ "attn1.to_k.weight",
+ "attn1.to_v.weight",
+ "attn1.to_out.0.weight",
+ "attn1.to_out.0.bias",
+ "attn2.to_q.weight",
+ "attn2.to_k.weight",
+ "attn2.to_v.weight",
+ "attn2.to_out.0.weight",
+ "attn2.to_out.0.bias",
+ "ff.net.0.proj.weight",
+ "ff.net.0.proj.bias",
+ "ff.net.2.weight",
+ "ff.net.2.bias",
+}
+
+UNET_MAP_RESNET = {
+ "in_layers.2.weight": "conv1.weight",
+ "in_layers.2.bias": "conv1.bias",
+ "emb_layers.1.weight": "time_emb_proj.weight",
+ "emb_layers.1.bias": "time_emb_proj.bias",
+ "out_layers.3.weight": "conv2.weight",
+ "out_layers.3.bias": "conv2.bias",
+ "skip_connection.weight": "conv_shortcut.weight",
+ "skip_connection.bias": "conv_shortcut.bias",
+ "in_layers.0.weight": "norm1.weight",
+ "in_layers.0.bias": "norm1.bias",
+ "out_layers.0.weight": "norm2.weight",
+ "out_layers.0.bias": "norm2.bias",
+}
+
+UNET_MAP_BASIC = {
+ ("label_emb.0.0.weight", "class_embedding.linear_1.weight"),
+ ("label_emb.0.0.bias", "class_embedding.linear_1.bias"),
+ ("label_emb.0.2.weight", "class_embedding.linear_2.weight"),
+ ("label_emb.0.2.bias", "class_embedding.linear_2.bias"),
+ ("label_emb.0.0.weight", "add_embedding.linear_1.weight"),
+ ("label_emb.0.0.bias", "add_embedding.linear_1.bias"),
+ ("label_emb.0.2.weight", "add_embedding.linear_2.weight"),
+ ("label_emb.0.2.bias", "add_embedding.linear_2.bias"),
+ ("input_blocks.0.0.weight", "conv_in.weight"),
+ ("input_blocks.0.0.bias", "conv_in.bias"),
+ ("out.0.weight", "conv_norm_out.weight"),
+ ("out.0.bias", "conv_norm_out.bias"),
+ ("out.2.weight", "conv_out.weight"),
+ ("out.2.bias", "conv_out.bias"),
+ ("time_embed.0.weight", "time_embedding.linear_1.weight"),
+ ("time_embed.0.bias", "time_embedding.linear_1.bias"),
+ ("time_embed.2.weight", "time_embedding.linear_2.weight"),
+ ("time_embed.2.bias", "time_embedding.linear_2.bias"),
+}
+
+
+def unet_to_diffusers(unet_config):
+ if "num_res_blocks" not in unet_config:
+ return {}
+ num_res_blocks = unet_config["num_res_blocks"]
+ channel_mult = unet_config["channel_mult"]
+ transformer_depth = unet_config["transformer_depth"][:]
+ transformer_depth_output = unet_config["transformer_depth_output"][:]
+ num_blocks = len(channel_mult)
+
+ transformers_mid = unet_config.get("transformer_depth_middle", None)
+
+ diffusers_unet_map = {}
+ for x in range(num_blocks):
+ n = 1 + (num_res_blocks[x] + 1) * x
+ for i in range(num_res_blocks[x]):
+ for b in UNET_MAP_RESNET:
+ diffusers_unet_map["down_blocks.{}.resnets.{}.{}".format(x, i, UNET_MAP_RESNET[b])] = "input_blocks.{}.0.{}".format(n, b)
+ num_transformers = transformer_depth.pop(0)
+ if num_transformers > 0:
+ for b in UNET_MAP_ATTENTIONS:
+ diffusers_unet_map["down_blocks.{}.attentions.{}.{}".format(x, i, b)] = "input_blocks.{}.1.{}".format(n, b)
+ for t in range(num_transformers):
+ for b in TRANSFORMER_BLOCKS:
+ diffusers_unet_map["down_blocks.{}.attentions.{}.transformer_blocks.{}.{}".format(x, i, t, b)] = "input_blocks.{}.1.transformer_blocks.{}.{}".format(n, t, b)
+ n += 1
+ for k in ["weight", "bias"]:
+ diffusers_unet_map["down_blocks.{}.downsamplers.0.conv.{}".format(x, k)] = "input_blocks.{}.0.op.{}".format(n, k)
+
+ i = 0
+ for b in UNET_MAP_ATTENTIONS:
+ diffusers_unet_map["mid_block.attentions.{}.{}".format(i, b)] = "middle_block.1.{}".format(b)
+ for t in range(transformers_mid):
+ for b in TRANSFORMER_BLOCKS:
+ diffusers_unet_map["mid_block.attentions.{}.transformer_blocks.{}.{}".format(i, t, b)] = "middle_block.1.transformer_blocks.{}.{}".format(t, b)
+
+ for i, n in enumerate([0, 2]):
+ for b in UNET_MAP_RESNET:
+ diffusers_unet_map["mid_block.resnets.{}.{}".format(i, UNET_MAP_RESNET[b])] = "middle_block.{}.{}".format(n, b)
+
+ num_res_blocks = list(reversed(num_res_blocks))
+ for x in range(num_blocks):
+ n = (num_res_blocks[x] + 1) * x
+ l = num_res_blocks[x] + 1
+ for i in range(l):
+ c = 0
+ for b in UNET_MAP_RESNET:
+ diffusers_unet_map["up_blocks.{}.resnets.{}.{}".format(x, i, UNET_MAP_RESNET[b])] = "output_blocks.{}.0.{}".format(n, b)
+ c += 1
+ num_transformers = transformer_depth_output.pop()
+ if num_transformers > 0:
+ c += 1
+ for b in UNET_MAP_ATTENTIONS:
+ diffusers_unet_map["up_blocks.{}.attentions.{}.{}".format(x, i, b)] = "output_blocks.{}.1.{}".format(n, b)
+ for t in range(num_transformers):
+ for b in TRANSFORMER_BLOCKS:
+ diffusers_unet_map["up_blocks.{}.attentions.{}.transformer_blocks.{}.{}".format(x, i, t, b)] = "output_blocks.{}.1.transformer_blocks.{}.{}".format(n, t, b)
+ if i == l - 1:
+ for k in ["weight", "bias"]:
+ diffusers_unet_map["up_blocks.{}.upsamplers.0.conv.{}".format(x, k)] = "output_blocks.{}.{}.conv.{}".format(n, c, k)
+ n += 1
+
+ for k in UNET_MAP_BASIC:
+ diffusers_unet_map[k[1]] = k[0]
+
+ return diffusers_unet_map
+
+
+def swap_scale_shift(weight):
+ shift, scale = weight.chunk(2, dim=0)
+ new_weight = torch.cat([scale, shift], dim=0)
+ return new_weight
+
+
+MMDIT_MAP_BASIC = {
+ ("context_embedder.bias", "context_embedder.bias"),
+ ("context_embedder.weight", "context_embedder.weight"),
+ ("t_embedder.mlp.0.bias", "time_text_embed.timestep_embedder.linear_1.bias"),
+ ("t_embedder.mlp.0.weight", "time_text_embed.timestep_embedder.linear_1.weight"),
+ ("t_embedder.mlp.2.bias", "time_text_embed.timestep_embedder.linear_2.bias"),
+ ("t_embedder.mlp.2.weight", "time_text_embed.timestep_embedder.linear_2.weight"),
+ ("x_embedder.proj.bias", "pos_embed.proj.bias"),
+ ("x_embedder.proj.weight", "pos_embed.proj.weight"),
+ ("y_embedder.mlp.0.bias", "time_text_embed.text_embedder.linear_1.bias"),
+ ("y_embedder.mlp.0.weight", "time_text_embed.text_embedder.linear_1.weight"),
+ ("y_embedder.mlp.2.bias", "time_text_embed.text_embedder.linear_2.bias"),
+ ("y_embedder.mlp.2.weight", "time_text_embed.text_embedder.linear_2.weight"),
+ ("pos_embed", "pos_embed.pos_embed"),
+ ("final_layer.adaLN_modulation.1.bias", "norm_out.linear.bias", swap_scale_shift),
+ ("final_layer.adaLN_modulation.1.weight", "norm_out.linear.weight", swap_scale_shift),
+ ("final_layer.linear.bias", "proj_out.bias"),
+ ("final_layer.linear.weight", "proj_out.weight"),
+}
+
+MMDIT_MAP_BLOCK = {
+ ("context_block.adaLN_modulation.1.bias", "norm1_context.linear.bias"),
+ ("context_block.adaLN_modulation.1.weight", "norm1_context.linear.weight"),
+ ("context_block.attn.proj.bias", "attn.to_add_out.bias"),
+ ("context_block.attn.proj.weight", "attn.to_add_out.weight"),
+ ("context_block.mlp.fc1.bias", "ff_context.net.0.proj.bias"),
+ ("context_block.mlp.fc1.weight", "ff_context.net.0.proj.weight"),
+ ("context_block.mlp.fc2.bias", "ff_context.net.2.bias"),
+ ("context_block.mlp.fc2.weight", "ff_context.net.2.weight"),
+ ("context_block.attn.ln_q.weight", "attn.norm_added_q.weight"),
+ ("context_block.attn.ln_k.weight", "attn.norm_added_k.weight"),
+ ("x_block.adaLN_modulation.1.bias", "norm1.linear.bias"),
+ ("x_block.adaLN_modulation.1.weight", "norm1.linear.weight"),
+ ("x_block.attn.proj.bias", "attn.to_out.0.bias"),
+ ("x_block.attn.proj.weight", "attn.to_out.0.weight"),
+ ("x_block.attn.ln_q.weight", "attn.norm_q.weight"),
+ ("x_block.attn.ln_k.weight", "attn.norm_k.weight"),
+ ("x_block.attn2.proj.bias", "attn2.to_out.0.bias"),
+ ("x_block.attn2.proj.weight", "attn2.to_out.0.weight"),
+ ("x_block.attn2.ln_q.weight", "attn2.norm_q.weight"),
+ ("x_block.attn2.ln_k.weight", "attn2.norm_k.weight"),
+ ("x_block.mlp.fc1.bias", "ff.net.0.proj.bias"),
+ ("x_block.mlp.fc1.weight", "ff.net.0.proj.weight"),
+ ("x_block.mlp.fc2.bias", "ff.net.2.bias"),
+ ("x_block.mlp.fc2.weight", "ff.net.2.weight"),
+}
+
+
+def mmdit_to_diffusers(mmdit_config, output_prefix=""):
+ key_map = {}
+
+ depth = mmdit_config.get("depth", 0)
+ num_blocks = mmdit_config.get("num_blocks", depth)
+ for i in range(num_blocks):
+ block_from = "transformer_blocks.{}".format(i)
+ block_to = "{}joint_blocks.{}".format(output_prefix, i)
+
+ offset = depth * 64
+
+ for end in ("weight", "bias"):
+ k = "{}.attn.".format(block_from)
+ qkv = "{}.x_block.attn.qkv.{}".format(block_to, end)
+ key_map["{}to_q.{}".format(k, end)] = (qkv, (0, 0, offset))
+ key_map["{}to_k.{}".format(k, end)] = (qkv, (0, offset, offset))
+ key_map["{}to_v.{}".format(k, end)] = (qkv, (0, offset * 2, offset))
+
+ qkv = "{}.context_block.attn.qkv.{}".format(block_to, end)
+ key_map["{}add_q_proj.{}".format(k, end)] = (qkv, (0, 0, offset))
+ key_map["{}add_k_proj.{}".format(k, end)] = (qkv, (0, offset, offset))
+ key_map["{}add_v_proj.{}".format(k, end)] = (qkv, (0, offset * 2, offset))
+
+ k = "{}.attn2.".format(block_from)
+ qkv = "{}.x_block.attn2.qkv.{}".format(block_to, end)
+ key_map["{}to_q.{}".format(k, end)] = (qkv, (0, 0, offset))
+ key_map["{}to_k.{}".format(k, end)] = (qkv, (0, offset, offset))
+ key_map["{}to_v.{}".format(k, end)] = (qkv, (0, offset * 2, offset))
+
+ for k in MMDIT_MAP_BLOCK:
+ key_map["{}.{}".format(block_from, k[1])] = "{}.{}".format(block_to, k[0])
+
+ map_basic = MMDIT_MAP_BASIC.copy()
+ map_basic.add(("joint_blocks.{}.context_block.adaLN_modulation.1.bias".format(depth - 1), "transformer_blocks.{}.norm1_context.linear.bias".format(depth - 1), swap_scale_shift))
+ map_basic.add(("joint_blocks.{}.context_block.adaLN_modulation.1.weight".format(depth - 1), "transformer_blocks.{}.norm1_context.linear.weight".format(depth - 1), swap_scale_shift))
+
+ for k in map_basic:
+ if len(k) > 2:
+ key_map[k[1]] = ("{}{}".format(output_prefix, k[0]), None, k[2])
+ else:
+ key_map[k[1]] = "{}{}".format(output_prefix, k[0])
+
+ return key_map
+
+
+def auraflow_to_diffusers(mmdit_config, output_prefix=""):
+ n_double_layers = mmdit_config.get("n_double_layers", 0)
+ n_layers = mmdit_config.get("n_layers", 0)
+
+ key_map = {}
+ for i in range(n_layers):
+ if i < n_double_layers:
+ index = i
+ prefix_from = "joint_transformer_blocks"
+ prefix_to = "{}double_layers".format(output_prefix)
+ block_map = {
+ "attn.to_q.weight": "attn.w2q.weight",
+ "attn.to_k.weight": "attn.w2k.weight",
+ "attn.to_v.weight": "attn.w2v.weight",
+ "attn.to_out.0.weight": "attn.w2o.weight",
+ "attn.add_q_proj.weight": "attn.w1q.weight",
+ "attn.add_k_proj.weight": "attn.w1k.weight",
+ "attn.add_v_proj.weight": "attn.w1v.weight",
+ "attn.to_add_out.weight": "attn.w1o.weight",
+ "ff.linear_1.weight": "mlpX.c_fc1.weight",
+ "ff.linear_2.weight": "mlpX.c_fc2.weight",
+ "ff.out_projection.weight": "mlpX.c_proj.weight",
+ "ff_context.linear_1.weight": "mlpC.c_fc1.weight",
+ "ff_context.linear_2.weight": "mlpC.c_fc2.weight",
+ "ff_context.out_projection.weight": "mlpC.c_proj.weight",
+ "norm1.linear.weight": "modX.1.weight",
+ "norm1_context.linear.weight": "modC.1.weight",
+ }
+ else:
+ index = i - n_double_layers
+ prefix_from = "single_transformer_blocks"
+ prefix_to = "{}single_layers".format(output_prefix)
+
+ block_map = {
+ "attn.to_q.weight": "attn.w1q.weight",
+ "attn.to_k.weight": "attn.w1k.weight",
+ "attn.to_v.weight": "attn.w1v.weight",
+ "attn.to_out.0.weight": "attn.w1o.weight",
+ "norm1.linear.weight": "modCX.1.weight",
+ "ff.linear_1.weight": "mlp.c_fc1.weight",
+ "ff.linear_2.weight": "mlp.c_fc2.weight",
+ "ff.out_projection.weight": "mlp.c_proj.weight",
+ }
+
+ for k in block_map:
+ key_map["{}.{}.{}".format(prefix_from, index, k)] = "{}.{}.{}".format(prefix_to, index, block_map[k])
+
+ MAP_BASIC = {
+ ("positional_encoding", "pos_embed.pos_embed"),
+ ("register_tokens", "register_tokens"),
+ ("t_embedder.mlp.0.weight", "time_step_proj.linear_1.weight"),
+ ("t_embedder.mlp.0.bias", "time_step_proj.linear_1.bias"),
+ ("t_embedder.mlp.2.weight", "time_step_proj.linear_2.weight"),
+ ("t_embedder.mlp.2.bias", "time_step_proj.linear_2.bias"),
+ ("cond_seq_linear.weight", "context_embedder.weight"),
+ ("init_x_linear.weight", "pos_embed.proj.weight"),
+ ("init_x_linear.bias", "pos_embed.proj.bias"),
+ ("final_linear.weight", "proj_out.weight"),
+ ("modF.1.weight", "norm_out.linear.weight", swap_scale_shift),
+ }
+
+ for k in MAP_BASIC:
+ if len(k) > 2:
+ key_map[k[1]] = ("{}{}".format(output_prefix, k[0]), None, k[2])
+ else:
+ key_map[k[1]] = "{}{}".format(output_prefix, k[0])
+
+ return key_map
+
+
+def repeat_to_batch_size(tensor, batch_size, dim=0):
+ if tensor.shape[dim] > batch_size:
+ return tensor.narrow(dim, 0, batch_size)
+ elif tensor.shape[dim] < batch_size:
+ return tensor.repeat(dim * [1] + [math.ceil(batch_size / tensor.shape[dim])] + [1] * (len(tensor.shape) - 1 - dim)).narrow(dim, 0, batch_size)
+ return tensor
+
+
+def resize_to_batch_size(tensor, batch_size):
+ in_batch_size = tensor.shape[0]
+ if in_batch_size == batch_size:
+ return tensor
+
+ if batch_size <= 1:
+ return tensor[:batch_size]
+
+ output = torch.empty([batch_size] + list(tensor.shape)[1:], dtype=tensor.dtype, device=tensor.device)
+ if batch_size < in_batch_size:
+ scale = (in_batch_size - 1) / (batch_size - 1)
+ for i in range(batch_size):
+ output[i] = tensor[min(round(i * scale), in_batch_size - 1)]
+ else:
+ scale = in_batch_size / batch_size
+ for i in range(batch_size):
+ output[i] = tensor[min(math.floor((i + 0.5) * scale), in_batch_size - 1)]
+
+ return output
+
+
+def convert_sd_to(state_dict, dtype):
+ keys = list(state_dict.keys())
+ for k in keys:
+ state_dict[k] = state_dict[k].to(dtype)
+ return state_dict
+
+
+def safetensors_header(safetensors_path, max_size=100 * 1024 * 1024):
+ with open(safetensors_path, "rb") as f:
+ header = f.read(8)
+ length_of_header = struct.unpack(" max_size:
+ return None
+ return f.read(length_of_header)
+
+
+def set_attr(obj, attr, value):
+ attrs = attr.split(".")
+ for name in attrs[:-1]:
+ obj = getattr(obj, name)
+ prev = getattr(obj, attrs[-1])
+ setattr(obj, attrs[-1], value)
+ return prev
+
+
+def set_attr_param(obj, attr, value):
+ return set_attr(obj, attr, torch.nn.Parameter(value, requires_grad=False))
+
+
+def copy_to_param(obj, attr, value):
+ # inplace update tensor instead of replacing it
+ attrs = attr.split(".")
+ for name in attrs[:-1]:
+ obj = getattr(obj, name)
+ prev = getattr(obj, attrs[-1])
+ prev.data.copy_(value)
+
+
+def get_attr(obj, attr: str):
+ attrs = attr.split(".")
+ for name in attrs:
+ obj = getattr(obj, name)
+ return obj
diff --git a/modules_forge/packages/k_diffusion/deis.py b/modules_forge/packages/k_diffusion/deis.py
new file mode 100644
index 0000000000000000000000000000000000000000..da9d2ffc69f0208735ec0cbf4a24cff8693a46eb
--- /dev/null
+++ b/modules_forge/packages/k_diffusion/deis.py
@@ -0,0 +1,127 @@
+"""
+Reimplementation of DEIS (https://github.com/qsh-zh/deis)
+Credit: https://github.com/zju-pi/diff-sampler/blob/main/gits-main/solver_utils.py
+License: Apache-2.0
+"""
+
+import numpy as np
+import torch
+
+# ----------------------------------------------------------------------------
+
+
+def edm2t(edm_steps, epsilon_s=1e-3, sigma_min=0.002, sigma_max=80):
+ vp_sigma = lambda beta_d, beta_min: lambda t: (np.e ** (0.5 * beta_d * (t**2) + beta_min * t) - 1) ** 0.5
+ vp_sigma_inv = lambda beta_d, beta_min: lambda sigma: ((beta_min**2 + 2 * beta_d * (sigma**2 + 1).log()).sqrt() - beta_min) / beta_d
+ vp_beta_d = 2 * (np.log(torch.tensor(sigma_min).cpu() ** 2 + 1) / epsilon_s - np.log(torch.tensor(sigma_max).cpu() ** 2 + 1)) / (epsilon_s - 1)
+ vp_beta_min = np.log(torch.tensor(sigma_max).cpu() ** 2 + 1) - 0.5 * vp_beta_d
+ t_steps = vp_sigma_inv(vp_beta_d.clone().detach().cpu(), vp_beta_min.clone().detach().cpu())(edm_steps.clone().detach().cpu())
+ return t_steps, vp_beta_min, vp_beta_d + vp_beta_min
+
+
+# ----------------------------------------------------------------------------
+
+
+def cal_poly(prev_t, j, taus):
+ poly = 1
+ for k in range(prev_t.shape[0]):
+ if k == j:
+ continue
+ poly *= (taus - prev_t[k]) / (prev_t[j] - prev_t[k])
+ return poly
+
+
+# ----------------------------------------------------------------------------
+
+
+def t2alpha_fn(beta_0, beta_1, t):
+ return torch.exp(-0.5 * t**2 * (beta_1 - beta_0) - t * beta_0)
+
+
+# ----------------------------------------------------------------------------
+
+
+def cal_intergrand(beta_0, beta_1, taus):
+ with torch.inference_mode(mode=False):
+ taus = taus.clone()
+ beta_0 = beta_0.clone()
+ beta_1 = beta_1.clone()
+ with torch.enable_grad():
+ taus.requires_grad_(True)
+ alpha = t2alpha_fn(beta_0, beta_1, taus)
+ log_alpha = alpha.log()
+ log_alpha.sum().backward()
+ d_log_alpha_dtau = taus.grad
+ integrand = -0.5 * d_log_alpha_dtau / torch.sqrt(alpha * (1 - alpha))
+ return integrand
+
+
+# ----------------------------------------------------------------------------
+
+
+def get_deis_coeff_list(t_steps, max_order, N=10000, deis_mode="tab"):
+ """
+ Get the coefficient list for DEIS sampling.
+
+ Args:
+ t_steps: A pytorch tensor. The time steps for sampling.
+ max_order: A `int`. Maximum order of the solver. 1 <= max_order <= 4
+ N: A `int`. Use how many points to perform the numerical integration when deis_mode=='tab'.
+ deis_mode: A `str`. Select between 'tab' and 'rhoab'. Type of DEIS.
+ Returns:
+ A pytorch tensor. A batch of generated samples or sampling trajectories if return_inters=True.
+ """
+ if deis_mode == "tab":
+ t_steps, beta_0, beta_1 = edm2t(t_steps)
+ C = []
+ for i, (t_cur, t_next) in enumerate(zip(t_steps[:-1], t_steps[1:])):
+ order = min(i + 1, max_order)
+ if order == 1:
+ C.append([])
+ else:
+ taus = torch.linspace(t_cur, t_next, N) # split the interval for integral appximation
+ dtau = (t_next - t_cur) / N
+ prev_t = t_steps[[i - k for k in range(order)]]
+ coeff_temp = []
+ integrand = cal_intergrand(beta_0, beta_1, taus)
+ for j in range(order):
+ poly = cal_poly(prev_t, j, taus)
+ coeff_temp.append(torch.sum(integrand * poly) * dtau)
+ C.append(coeff_temp)
+
+ elif deis_mode == "rhoab":
+ # Analytical solution, second order
+ def get_def_intergral_2(a, b, start, end, c):
+ coeff = (end**3 - start**3) / 3 - (end**2 - start**2) * (a + b) / 2 + (end - start) * a * b
+ return coeff / ((c - a) * (c - b))
+
+ # Analytical solution, third order
+ def get_def_intergral_3(a, b, c, start, end, d):
+ coeff = (end**4 - start**4) / 4 - (end**3 - start**3) * (a + b + c) / 3 + (end**2 - start**2) * (a * b + a * c + b * c) / 2 - (end - start) * a * b * c
+ return coeff / ((d - a) * (d - b) * (d - c))
+
+ C = []
+ for i, (t_cur, t_next) in enumerate(zip(t_steps[:-1], t_steps[1:])):
+ order = min(i, max_order)
+ if order == 0:
+ C.append([])
+ else:
+ prev_t = t_steps[[i - k for k in range(order + 1)]]
+ if order == 1:
+ coeff_cur = ((t_next - prev_t[1]) ** 2 - (t_cur - prev_t[1]) ** 2) / (2 * (t_cur - prev_t[1]))
+ coeff_prev1 = (t_next - t_cur) ** 2 / (2 * (prev_t[1] - t_cur))
+ coeff_temp = [coeff_cur, coeff_prev1]
+ elif order == 2:
+ coeff_cur = get_def_intergral_2(prev_t[1], prev_t[2], t_cur, t_next, t_cur)
+ coeff_prev1 = get_def_intergral_2(t_cur, prev_t[2], t_cur, t_next, prev_t[1])
+ coeff_prev2 = get_def_intergral_2(t_cur, prev_t[1], t_cur, t_next, prev_t[2])
+ coeff_temp = [coeff_cur, coeff_prev1, coeff_prev2]
+ elif order == 3:
+ coeff_cur = get_def_intergral_3(prev_t[1], prev_t[2], prev_t[3], t_cur, t_next, t_cur)
+ coeff_prev1 = get_def_intergral_3(t_cur, prev_t[2], prev_t[3], t_cur, t_next, prev_t[1])
+ coeff_prev2 = get_def_intergral_3(t_cur, prev_t[1], prev_t[3], t_cur, t_next, prev_t[2])
+ coeff_prev3 = get_def_intergral_3(t_cur, prev_t[1], prev_t[2], t_cur, t_next, prev_t[3])
+ coeff_temp = [coeff_cur, coeff_prev1, coeff_prev2, coeff_prev3]
+ C.append(coeff_temp)
+
+ return C
diff --git a/modules_forge/packages/k_diffusion/external.py b/modules_forge/packages/k_diffusion/external.py
new file mode 100644
index 0000000000000000000000000000000000000000..22c02a4e11a296498cd9093bc8d16a8a433ec5b4
--- /dev/null
+++ b/modules_forge/packages/k_diffusion/external.py
@@ -0,0 +1,39 @@
+import torch
+from torch import nn
+
+from . import sampling
+
+
+class ForgeScheduleLinker(nn.Module):
+ def __init__(self, predictor):
+ super().__init__()
+ self.predictor = predictor
+
+ @property
+ def sigmas(self):
+ return self.predictor.sigmas
+
+ @property
+ def log_sigmas(self):
+ return self.predictor.sigmas.log()
+
+ @property
+ def sigma_min(self):
+ return self.predictor.sigma_min()
+
+ @property
+ def sigma_max(self):
+ return self.predictor.sigma_max()
+
+ def get_sigmas(self, n=None):
+ if n is None:
+ return sampling.append_zero(self.sigmas.flip(0))
+ t_max = len(self.sigmas) - 1
+ t = torch.linspace(t_max, 0, n, device=self.sigmas.device)
+ return sampling.append_zero(self.t_to_sigma(t))
+
+ def sigma_to_t(self, sigma, quantize=None):
+ return self.predictor.timestep(sigma)
+
+ def t_to_sigma(self, t):
+ return self.predictor.sigma(t)
diff --git a/modules_forge/packages/k_diffusion/sampling.py b/modules_forge/packages/k_diffusion/sampling.py
new file mode 100644
index 0000000000000000000000000000000000000000..927753b33da9aa715dbfb3f3112a4ebb7c43b2d3
--- /dev/null
+++ b/modules_forge/packages/k_diffusion/sampling.py
@@ -0,0 +1,845 @@
+# https://github.com/comfyanonymous/ComfyUI/blob/v0.3.75/comfy/k_diffusion/sampling.py
+
+import math
+from functools import partial
+
+import torch
+import torchsde
+from scipy import integrate
+from tqdm.auto import trange
+
+from backend.patcher.base import set_model_options_post_cfg_function
+
+from . import utils
+
+
+def _is_const(sampling) -> bool:
+ return sampling.prediction_type == "const"
+
+
+def append_zero(x):
+ return torch.cat([x, x.new_zeros([1])])
+
+
+def get_sigmas_karras(n, sigma_min, sigma_max, rho=7.0, device="cpu"):
+ """Constructs the noise schedule of Karras et al. (2022)"""
+ ramp = torch.linspace(0, 1, n, device=device)
+ min_inv_rho = sigma_min ** (1 / rho)
+ max_inv_rho = sigma_max ** (1 / rho)
+ sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
+ return append_zero(sigmas).to(device)
+
+
+def get_sigmas_exponential(n, sigma_min, sigma_max, device="cpu"):
+ """Constructs an exponential noise schedule"""
+ sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), n, device=device).exp()
+ return append_zero(sigmas)
+
+
+def get_sigmas_polyexponential(n, sigma_min, sigma_max, rho=1.0, device="cpu"):
+ """Constructs an polynomial in log sigma noise schedule"""
+ ramp = torch.linspace(1, 0, n, device=device) ** rho
+ sigmas = torch.exp(ramp * (math.log(sigma_max) - math.log(sigma_min)) + math.log(sigma_min))
+ return append_zero(sigmas)
+
+
+def get_sigmas_vp(n, beta_d=19.9, beta_min=0.1, eps_s=1e-3, device="cpu"):
+ """Constructs a continuous VP noise schedule"""
+ t = torch.linspace(1, eps_s, n, device=device)
+ sigmas = torch.sqrt(torch.special.expm1(beta_d * t**2 / 2 + beta_min * t))
+ return append_zero(sigmas)
+
+
+def to_d(x, sigma, denoised):
+ """Converts a denoiser output to a Karras ODE derivative"""
+ return (x - denoised) / utils.append_dims(sigma, x.ndim)
+
+
+def get_ancestral_step(sigma_from, sigma_to, eta=1.0):
+ """Calculates the noise level (sigma_down) to step down to and the amount
+ of noise to add (sigma_up) when doing an ancestral sampling step"""
+ if not eta:
+ return sigma_to, 0.0
+ sigma_up = min(sigma_to, eta * (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5)
+ sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
+ return sigma_down, sigma_up
+
+
+def default_noise_sampler(x):
+ return lambda sigma, sigma_next: torch.randn_like(x)
+
+
+class BatchedBrownianTree:
+ """A wrapper around torchsde.BrownianTree that enables batches of entropy"""
+
+ def __init__(self, x, t0, t1, seed=None, **kwargs):
+ self.cpu_tree = kwargs.pop("cpu", True)
+ t0, t1, self.sign = self.sort(t0, t1)
+ w0 = kwargs.pop("w0", None)
+ if w0 is None:
+ w0 = torch.zeros_like(x)
+ self.batched = False
+ if seed is None:
+ seed = (torch.randint(0, 2**63 - 1, ()).item(),)
+ elif isinstance(seed, (tuple, list)):
+ if len(seed) != x.shape[0]:
+ raise ValueError("Passing a list or tuple of seeds to BatchedBrownianTree requires a length matching the batch size.")
+ self.batched = True
+ w0 = w0[0]
+ else:
+ seed = (seed,)
+ if self.cpu_tree:
+ t0, w0, t1 = t0.detach().cpu(), w0.detach().cpu(), t1.detach().cpu()
+ self.trees = tuple(torchsde.BrownianTree(t0, w0, t1, entropy=s, **kwargs) for s in seed)
+
+ @staticmethod
+ def sort(a, b):
+ return (a, b, 1) if a < b else (b, a, -1)
+
+ def __call__(self, t0, t1):
+ t0, t1, sign = self.sort(t0, t1)
+ device, dtype = t0.device, t0.dtype
+ if self.cpu_tree:
+ t0, t1 = t0.detach().cpu().float(), t1.detach().cpu().float()
+ w = torch.stack([tree(t0, t1) for tree in self.trees]).to(device=device, dtype=dtype) * (self.sign * sign)
+ return w if self.batched else w[0]
+
+
+class BrownianTreeNoiseSampler:
+ """A noise sampler backed by a torchsde.BrownianTree.
+
+ Args:
+ x (Tensor): The tensor whose shape, device and dtype to use to generate
+ random samples.
+ sigma_min (float): The low end of the valid interval.
+ sigma_max (float): The high end of the valid interval.
+ seed (int or List[int]): The random seed. If a list of seeds is
+ supplied instead of a single integer, then the noise sampler will
+ use one BrownianTree per batch item, each with its own seed.
+ transform (callable): A function that maps sigma to the sampler's
+ internal timestep.
+ """
+
+ def __init__(self, x, sigma_min, sigma_max, seed=None, transform=lambda x: x, cpu=False):
+ self.transform = transform
+ t0, t1 = self.transform(torch.as_tensor(sigma_min)), self.transform(torch.as_tensor(sigma_max))
+ self.tree = BatchedBrownianTree(x, t0, t1, seed, cpu=cpu)
+
+ def __call__(self, sigma, sigma_next):
+ t0, t1 = self.transform(torch.as_tensor(sigma)), self.transform(torch.as_tensor(sigma_next))
+ return self.tree(t0, t1) / (t1 - t0).abs().sqrt()
+
+
+def sigma_to_half_log_snr(sigma, model_sampling):
+ """Convert sigma to half-logSNR log(alpha_t / sigma_t)"""
+ if _is_const(model_sampling):
+ # log((1 - t) / t) = log((1 - sigma) / sigma)
+ return sigma.logit().neg()
+ return sigma.log().neg()
+
+
+def half_log_snr_to_sigma(half_log_snr, model_sampling):
+ """Convert half-logSNR log(alpha_t / sigma_t) to sigma"""
+ if _is_const(model_sampling):
+ # 1 / (1 + exp(half_log_snr))
+ return half_log_snr.neg().sigmoid()
+ return half_log_snr.neg().exp()
+
+
+def offset_first_sigma_for_snr(sigmas, model_sampling, percent_offset=1e-4):
+ """Adjust the first sigma to avoid invalid logSNR"""
+ if len(sigmas) <= 1:
+ return sigmas
+ if _is_const(model_sampling):
+ if sigmas[0] >= 1:
+ sigmas = sigmas.clone()
+ sigmas[0] = model_sampling.percent_to_sigma(percent_offset)
+ return sigmas
+
+
+@torch.no_grad()
+def sample_euler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0.0, s_tmin=0.0, s_tmax=float("inf"), s_noise=1.0):
+ """Implements Algorithm 2 (Euler steps) from Karras et al. (2022)"""
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ for i in trange(len(sigmas) - 1, disable=disable):
+ if s_churn > 0:
+ gamma = min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
+ sigma_hat = sigmas[i] * (gamma + 1)
+ else:
+ gamma = 0
+ sigma_hat = sigmas[i]
+
+ if gamma > 0:
+ eps = torch.randn_like(x) * s_noise
+ x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
+ denoised = model(x, sigma_hat * s_in, **extra_args)
+ d = to_d(x, sigma_hat, denoised)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
+ dt = sigmas[i + 1] - sigma_hat
+ # Euler method
+ x = x + d * dt
+ return x
+
+
+@torch.no_grad()
+def sample_euler_ancestral(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None):
+ if _is_const(model.inner_model.predictor):
+ return sample_euler_ancestral_RF(model, x, sigmas, extra_args, callback, disable, eta, s_noise, noise_sampler)
+ """Ancestral sampling with Euler method steps"""
+ extra_args = {} if extra_args is None else extra_args
+
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
+ s_in = x.new_ones([x.shape[0]])
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+
+ if sigma_down == 0:
+ x = denoised
+ else:
+ d = to_d(x, sigmas[i], denoised)
+ # Euler method
+ dt = sigma_down - sigmas[i]
+ x = x + d * dt + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
+ return x
+
+
+@torch.no_grad()
+def sample_euler_ancestral_RF(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None):
+ """Ancestral sampling with Euler method steps"""
+ extra_args = {} if extra_args is None else extra_args
+
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
+ s_in = x.new_ones([x.shape[0]])
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ # sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+
+ if sigmas[i + 1] == 0:
+ x = denoised
+ else:
+ downstep_ratio = 1 + (sigmas[i + 1] / sigmas[i] - 1) * eta
+ sigma_down = sigmas[i + 1] * downstep_ratio
+ alpha_ip1 = 1 - sigmas[i + 1]
+ alpha_down = 1 - sigma_down
+ renoise_coeff = (sigmas[i + 1] ** 2 - sigma_down**2 * alpha_ip1**2 / alpha_down**2) ** 0.5
+ # Euler method
+ sigma_down_i_ratio = sigma_down / sigmas[i]
+ x = sigma_down_i_ratio * x + (1 - sigma_down_i_ratio) * denoised
+ if eta > 0:
+ x = (alpha_ip1 / alpha_down) * x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * renoise_coeff
+ return x
+
+
+@torch.no_grad()
+def sample_heun(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0.0, s_tmin=0.0, s_tmax=float("inf"), s_noise=1.0):
+ """Implements Algorithm 2 (Heun steps) from Karras et al. (2022)"""
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ for i in trange(len(sigmas) - 1, disable=disable):
+ if s_churn > 0:
+ gamma = min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
+ sigma_hat = sigmas[i] * (gamma + 1)
+ else:
+ gamma = 0
+ sigma_hat = sigmas[i]
+
+ sigma_hat = sigmas[i] * (gamma + 1)
+ if gamma > 0:
+ eps = torch.randn_like(x) * s_noise
+ x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
+ denoised = model(x, sigma_hat * s_in, **extra_args)
+ d = to_d(x, sigma_hat, denoised)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
+ dt = sigmas[i + 1] - sigma_hat
+ if sigmas[i + 1] == 0:
+ # Euler method
+ x = x + d * dt
+ else:
+ # Heun's method
+ x_2 = x + d * dt
+ denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
+ d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
+ d_prime = (d + d_2) / 2
+ x = x + d_prime * dt
+ return x
+
+
+@torch.no_grad()
+def sample_dpm_2(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0.0, s_tmin=0.0, s_tmax=float("inf"), s_noise=1.0):
+ """A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)"""
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ for i in trange(len(sigmas) - 1, disable=disable):
+ if s_churn > 0:
+ gamma = min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
+ sigma_hat = sigmas[i] * (gamma + 1)
+ else:
+ gamma = 0
+ sigma_hat = sigmas[i]
+
+ if gamma > 0:
+ eps = torch.randn_like(x) * s_noise
+ x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
+ denoised = model(x, sigma_hat * s_in, **extra_args)
+ d = to_d(x, sigma_hat, denoised)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
+ if sigmas[i + 1] == 0:
+ # Euler method
+ dt = sigmas[i + 1] - sigma_hat
+ x = x + d * dt
+ else:
+ # DPM-Solver-2
+ sigma_mid = sigma_hat.log().lerp(sigmas[i + 1].log(), 0.5).exp()
+ dt_1 = sigma_mid - sigma_hat
+ dt_2 = sigmas[i + 1] - sigma_hat
+ x_2 = x + d * dt_1
+ denoised_2 = model(x_2, sigma_mid * s_in, **extra_args)
+ d_2 = to_d(x_2, sigma_mid, denoised_2)
+ x = x + d_2 * dt_2
+ return x
+
+
+def linear_multistep_coeff(order, t, i, j):
+ if order - 1 > i:
+ raise ValueError(f"Order {order} too high for step {i}")
+
+ def fn(tau):
+ prod = 1.0
+ for k in range(order):
+ if j == k:
+ continue
+ prod *= (tau - t[i - k]) / (t[i - j] - t[i - k])
+ return prod
+
+ return integrate.quad(fn, t[i], t[i + 1], epsrel=1e-4)[0]
+
+
+@torch.no_grad()
+def sample_lms(model, x, sigmas, extra_args=None, callback=None, disable=None, order=4):
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ sigmas_cpu = sigmas.detach().cpu().numpy()
+ ds = []
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ d = to_d(x, sigmas[i], denoised)
+ ds.append(d)
+ if len(ds) > order:
+ ds.pop(0)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+ if sigmas[i + 1] == 0:
+ # Denoising step
+ x = denoised
+ else:
+ cur_order = min(i + 1, order)
+ coeffs = [linear_multistep_coeff(cur_order, sigmas_cpu, i, j) for j in range(cur_order)]
+ x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
+ return x
+
+
+@torch.no_grad()
+def sample_dpmpp_2s_ancestral(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None):
+ if _is_const(model.inner_model.predictor):
+ return sample_dpmpp_2s_ancestral_RF(model, x, sigmas, extra_args, callback, disable, eta, s_noise, noise_sampler)
+
+ """Ancestral sampling with DPM-Solver++(2S) second-order steps"""
+ extra_args = {} if extra_args is None else extra_args
+
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
+ s_in = x.new_ones([x.shape[0]])
+ sigma_fn = lambda t: t.neg().exp()
+ t_fn = lambda sigma: sigma.log().neg()
+
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+ if sigma_down == 0:
+ # Euler method
+ d = to_d(x, sigmas[i], denoised)
+ dt = sigma_down - sigmas[i]
+ x = x + d * dt
+ else:
+ # DPM-Solver++(2S)
+ t, t_next = t_fn(sigmas[i]), t_fn(sigma_down)
+ r = 1 / 2
+ h = t_next - t
+ s = t + r * h
+ x_2 = (sigma_fn(s) / sigma_fn(t)) * x - (-h * r).expm1() * denoised
+ denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)
+ x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_2
+ # Noise addition
+ if sigmas[i + 1] > 0:
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
+ return x
+
+
+@torch.no_grad()
+def sample_dpmpp_2s_ancestral_RF(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None):
+ """Ancestral sampling with DPM-Solver++(2S) second-order steps"""
+ extra_args = {} if extra_args is None else extra_args
+
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
+ s_in = x.new_ones([x.shape[0]])
+ sigma_fn = lambda lbda: (lbda.exp() + 1) ** -1
+ lambda_fn = lambda sigma: ((1 - sigma) / sigma).log()
+
+ # logged_x = x.unsqueeze(0)
+
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ downstep_ratio = 1 + (sigmas[i + 1] / sigmas[i] - 1) * eta
+ sigma_down = sigmas[i + 1] * downstep_ratio
+ alpha_ip1 = 1 - sigmas[i + 1]
+ alpha_down = 1 - sigma_down
+ renoise_coeff = (sigmas[i + 1] ** 2 - sigma_down**2 * alpha_ip1**2 / alpha_down**2) ** 0.5
+ # sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+ if sigmas[i + 1] == 0:
+ # Euler method
+ d = to_d(x, sigmas[i], denoised)
+ dt = sigma_down - sigmas[i]
+ x = x + d * dt
+ else:
+ # DPM-Solver++(2S)
+ if sigmas[i] == 1.0:
+ sigma_s = 0.9999
+ else:
+ t_i, t_down = lambda_fn(sigmas[i]), lambda_fn(sigma_down)
+ r = 1 / 2
+ h = t_down - t_i
+ s = t_i + r * h
+ sigma_s = sigma_fn(s)
+ # sigma_s = sigmas[i+1]
+ sigma_s_i_ratio = sigma_s / sigmas[i]
+ u = sigma_s_i_ratio * x + (1 - sigma_s_i_ratio) * denoised
+ D_i = model(u, sigma_s * s_in, **extra_args)
+ sigma_down_i_ratio = sigma_down / sigmas[i]
+ x = sigma_down_i_ratio * x + (1 - sigma_down_i_ratio) * D_i
+ # print("sigma_i", sigmas[i], "sigma_ip1", sigmas[i+1],"sigma_down", sigma_down, "sigma_down_i_ratio", sigma_down_i_ratio, "sigma_s_i_ratio", sigma_s_i_ratio, "renoise_coeff", renoise_coeff)
+ # Noise addition
+ if sigmas[i + 1] > 0 and eta > 0:
+ x = (alpha_ip1 / alpha_down) * x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * renoise_coeff
+ # logged_x = torch.cat((logged_x, x.unsqueeze(0)), dim=0)
+ return x
+
+
+@torch.no_grad()
+def sample_dpmpp_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None, r=1 / 2):
+ """DPM-Solver++ (stochastic)"""
+ if len(sigmas) <= 1:
+ return x
+
+ extra_args = {} if extra_args is None else extra_args
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
+ seed = extra_args.get("seed", None)
+ noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
+ s_in = x.new_ones([x.shape[0]])
+
+ model_sampling = model.inner_model.predictor
+ sigma_fn = partial(half_log_snr_to_sigma, model_sampling=model_sampling)
+ lambda_fn = partial(sigma_to_half_log_snr, model_sampling=model_sampling)
+ sigmas = offset_first_sigma_for_snr(sigmas, model_sampling)
+
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+ if sigmas[i + 1] == 0:
+ # Denoising step
+ x = denoised
+ else:
+ # DPM-Solver++
+ lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1])
+ h = lambda_t - lambda_s
+ lambda_s_1 = lambda_s + r * h
+ fac = 1 / (2 * r)
+
+ sigma_s_1 = sigma_fn(lambda_s_1)
+
+ alpha_s = sigmas[i] * lambda_s.exp()
+ alpha_s_1 = sigma_s_1 * lambda_s_1.exp()
+ alpha_t = sigmas[i + 1] * lambda_t.exp()
+
+ # Step 1
+ sd, su = get_ancestral_step(lambda_s.neg().exp(), lambda_s_1.neg().exp(), eta)
+ lambda_s_1_ = sd.log().neg()
+ h_ = lambda_s_1_ - lambda_s
+ x_2 = (alpha_s_1 / alpha_s) * (-h_).exp() * x - alpha_s_1 * (-h_).expm1() * denoised
+ if eta > 0 and s_noise > 0:
+ x_2 = x_2 + alpha_s_1 * noise_sampler(sigmas[i], sigma_s_1) * s_noise * su
+ denoised_2 = model(x_2, sigma_s_1 * s_in, **extra_args)
+
+ # Step 2
+ sd, su = get_ancestral_step(lambda_s.neg().exp(), lambda_t.neg().exp(), eta)
+ lambda_t_ = sd.log().neg()
+ h_ = lambda_t_ - lambda_s
+ denoised_d = (1 - fac) * denoised + fac * denoised_2
+ x = (alpha_t / alpha_s) * (-h_).exp() * x - alpha_t * (-h_).expm1() * denoised_d
+ if eta > 0 and s_noise > 0:
+ x = x + alpha_t * noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * su
+ return x
+
+
+@torch.no_grad()
+def sample_dpmpp_2m(model, x, sigmas, extra_args=None, callback=None, disable=None):
+ """DPM-Solver++(2M)"""
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ sigma_fn = lambda t: t.neg().exp()
+ t_fn = lambda sigma: sigma.log().neg()
+ old_denoised = None
+
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+ t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
+ h = t_next - t
+ if old_denoised is None or sigmas[i + 1] == 0:
+ x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised
+ else:
+ h_last = t - t_fn(sigmas[i - 1])
+ r = h_last / h
+ denoised_d = (1 + 1 / (2 * r)) * denoised - (1 / (2 * r)) * old_denoised
+ x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_d
+ old_denoised = denoised
+ return x
+
+
+@torch.no_grad()
+def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None, solver_type="midpoint"):
+ """DPM-Solver++(2M) SDE"""
+ if len(sigmas) <= 1:
+ return x
+
+ if solver_type not in {"heun", "midpoint"}:
+ raise ValueError("solver_type must be 'heun' or 'midpoint'")
+
+ extra_args = {} if extra_args is None else extra_args
+ seed = extra_args.get("seed", None)
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
+ noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
+ s_in = x.new_ones([x.shape[0]])
+
+ model_sampling = model.inner_model.predictor
+ lambda_fn = partial(sigma_to_half_log_snr, model_sampling=model_sampling)
+ sigmas = offset_first_sigma_for_snr(sigmas, model_sampling)
+
+ old_denoised = None
+ h, h_last = None, None
+
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+ if sigmas[i + 1] == 0:
+ # Denoising step
+ x = denoised
+ else:
+ # DPM-Solver++(2M) SDE
+ lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1])
+ h = lambda_t - lambda_s
+ h_eta = h * (eta + 1)
+
+ alpha_t = sigmas[i + 1] * lambda_t.exp()
+
+ x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x + alpha_t * (-h_eta).expm1().neg() * denoised
+
+ if old_denoised is not None:
+ r = h_last / h
+ if solver_type == "heun":
+ x = x + alpha_t * ((-h_eta).expm1().neg() / (-h_eta) + 1) * (1 / r) * (denoised - old_denoised)
+ elif solver_type == "midpoint":
+ x = x + 0.5 * alpha_t * (-h_eta).expm1().neg() * (1 / r) * (denoised - old_denoised)
+
+ if eta > 0 and s_noise > 0:
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise
+
+ old_denoised = denoised
+ h_last = h
+ return x
+
+
+@torch.no_grad()
+def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None):
+ """DPM-Solver++(3M) SDE"""
+
+ if len(sigmas) <= 1:
+ return x
+
+ extra_args = {} if extra_args is None else extra_args
+ seed = extra_args.get("seed", None)
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
+ noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
+ s_in = x.new_ones([x.shape[0]])
+
+ model_sampling = model.inner_model.predictor
+ lambda_fn = partial(sigma_to_half_log_snr, model_sampling=model_sampling)
+ sigmas = offset_first_sigma_for_snr(sigmas, model_sampling)
+
+ denoised_1, denoised_2 = None, None
+ h, h_1, h_2 = None, None, None
+
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+ if sigmas[i + 1] == 0:
+ # Denoising step
+ x = denoised
+ else:
+ lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1])
+ h = lambda_t - lambda_s
+ h_eta = h * (eta + 1)
+
+ alpha_t = sigmas[i + 1] * lambda_t.exp()
+
+ x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x + alpha_t * (-h_eta).expm1().neg() * denoised
+
+ if h_2 is not None:
+ # DPM-Solver++(3M) SDE
+ r0 = h_1 / h
+ r1 = h_2 / h
+ d1_0 = (denoised - denoised_1) / r0
+ d1_1 = (denoised_1 - denoised_2) / r1
+ d1 = d1_0 + (d1_0 - d1_1) * r0 / (r0 + r1)
+ d2 = (d1_0 - d1_1) / (r0 + r1)
+ phi_2 = h_eta.neg().expm1() / h_eta + 1
+ phi_3 = phi_2 / h_eta - 0.5
+ x = x + (alpha_t * phi_2) * d1 - (alpha_t * phi_3) * d2
+ elif h_1 is not None:
+ # DPM-Solver++(2M) SDE
+ r = h_1 / h
+ d = (denoised - denoised_1) / r
+ phi_2 = h_eta.neg().expm1() / h_eta + 1
+ x = x + (alpha_t * phi_2) * d
+
+ if eta > 0 and s_noise > 0:
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise
+
+ denoised_1, denoised_2 = denoised, denoised_1
+ h_1, h_2 = h, h_1
+ return x
+
+
+@torch.no_grad()
+def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
+ extra_args = {} if extra_args is None else extra_args
+
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
+ s_in = x.new_ones([x.shape[0]])
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+
+ x = denoised
+ if sigmas[i + 1] > 0:
+ x = model.inner_model.predictor.noise_scaling(sigmas[i + 1], noise_sampler(sigmas[i], sigmas[i + 1]), x)
+ return x
+
+
+@torch.no_grad()
+def sample_euler_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None):
+ """Ancestral sampling with Euler method steps (CFG++)"""
+ extra_args = {} if extra_args is None else extra_args
+
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
+
+ model_sampling = model.inner_model.predictor
+ lambda_fn = partial(sigma_to_half_log_snr, model_sampling=model_sampling)
+
+ uncond_denoised = None
+
+ def post_cfg_function(args):
+ nonlocal uncond_denoised
+ uncond_denoised = args["uncond_denoised"]
+ return args["denoised"]
+
+ model_options = extra_args.get("model_options", {}).copy()
+ extra_args["model_options"] = set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
+
+ s_in = x.new_ones([x.shape[0]])
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+ if sigmas[i + 1] == 0:
+ # Denoising step
+ x = denoised
+ else:
+ alpha_s = sigmas[i] * lambda_fn(sigmas[i]).exp()
+ alpha_t = sigmas[i + 1] * lambda_fn(sigmas[i + 1]).exp()
+ d = to_d(x, sigmas[i], alpha_s * uncond_denoised) # to noise
+
+ # DDIM stochastic sampling
+ sigma_down, sigma_up = get_ancestral_step(sigmas[i] / alpha_s, sigmas[i + 1] / alpha_t, eta=eta)
+ sigma_down = alpha_t * sigma_down
+
+ # Euler method
+ x = alpha_t * denoised + sigma_down * d
+ if eta > 0 and s_noise > 0:
+ x = x + alpha_t * noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
+ return x
+
+
+@torch.no_grad()
+def sample_euler_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None):
+ """Euler method steps (CFG++)"""
+ return sample_euler_ancestral_cfg_pp(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=0.0, s_noise=0.0, noise_sampler=None)
+
+
+@torch.no_grad()
+def sample_dpmpp_2m_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None):
+ """DPM-Solver++(2M)"""
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ t_fn = lambda sigma: sigma.log().neg()
+
+ old_uncond_denoised = None
+ uncond_denoised = None
+
+ def post_cfg_function(args):
+ nonlocal uncond_denoised
+ uncond_denoised = args["uncond_denoised"]
+ return args["denoised"]
+
+ model_options = extra_args.get("model_options", {}).copy()
+ extra_args["model_options"] = set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
+
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+ t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
+ h = t_next - t
+ if old_uncond_denoised is None or sigmas[i + 1] == 0:
+ denoised_mix = -torch.exp(-h) * uncond_denoised
+ else:
+ h_last = t - t_fn(sigmas[i - 1])
+ r = h_last / h
+ denoised_mix = -torch.exp(-h) * uncond_denoised - torch.expm1(-h) * (1 / (2 * r)) * (denoised - old_uncond_denoised)
+ x = denoised + denoised_mix + torch.exp(-h) * x
+ old_uncond_denoised = uncond_denoised
+ return x
+
+
+@torch.no_grad()
+def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1.0, noise_sampler=None, eta=1.0, cfg_pp=False):
+ extra_args = {} if extra_args is None else extra_args
+
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
+ s_in = x.new_ones([x.shape[0]])
+ sigma_fn = lambda t: t.neg().exp()
+ t_fn = lambda sigma: sigma.log().neg()
+ phi1_fn = lambda t: torch.expm1(t) / t
+ phi2_fn = lambda t: (phi1_fn(t) - 1.0) / t
+
+ old_sigma_down = None
+ old_denoised = None
+ uncond_denoised = None
+
+ def post_cfg_function(args):
+ nonlocal uncond_denoised
+ uncond_denoised = args["uncond_denoised"]
+ return args["denoised"]
+
+ if cfg_pp:
+ model_options = extra_args.get("model_options", {}).copy()
+ extra_args["model_options"] = set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
+
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+ if sigma_down == 0 or old_denoised is None:
+ # Euler method
+ if cfg_pp:
+ d = to_d(x, sigmas[i], uncond_denoised)
+ x = denoised + d * sigma_down
+ else:
+ d = to_d(x, sigmas[i], denoised)
+ dt = sigma_down - sigmas[i]
+ x = x + d * dt
+ else:
+ # Second order multistep method in https://arxiv.org/pdf/2308.02157
+ t, t_old, t_next, t_prev = t_fn(sigmas[i]), t_fn(old_sigma_down), t_fn(sigma_down), t_fn(sigmas[i - 1])
+ h = t_next - t
+ c2 = (t_prev - t_old) / h
+
+ phi1_val, phi2_val = phi1_fn(-h), phi2_fn(-h)
+ b1 = torch.nan_to_num(phi1_val - phi2_val / c2, nan=0.0)
+ b2 = torch.nan_to_num(phi2_val / c2, nan=0.0)
+
+ if cfg_pp:
+ x = x + (denoised - uncond_denoised)
+ x = sigma_fn(h) * x + h * (b1 * uncond_denoised + b2 * old_denoised)
+ else:
+ x = sigma_fn(h) * x + h * (b1 * denoised + b2 * old_denoised)
+
+ # Noise addition
+ if sigmas[i + 1] > 0:
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
+
+ if cfg_pp:
+ old_denoised = uncond_denoised
+ else:
+ old_denoised = denoised
+ old_sigma_down = sigma_down
+ return x
+
+
+@torch.no_grad()
+def sample_res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1.0, noise_sampler=None):
+ return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=0.0, cfg_pp=False)
+
+
+@torch.no_grad()
+def sample_Kohaku_LoNyu_Yog(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=None, s_tmin=None, s_tmax=float("inf"), s_noise=None, noise_sampler=None, eta=None):
+ s_churn = 0.0 if s_churn is None else s_churn
+ s_tmin = 0.0 if s_tmin is None else s_tmin
+ s_noise = 1.0 if s_noise is None else s_noise
+ eta = 1.0 if eta is None else eta
+
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
+ for i in trange(len(sigmas) - 1, disable=disable):
+ gamma = min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
+ eps = torch.randn_like(x) * s_noise
+ sigma_hat = sigmas[i] * (gamma + 1)
+ if gamma > 0:
+ x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
+ denoised = model(x, sigma_hat * s_in, **extra_args)
+ d = to_d(x, sigma_hat, denoised)
+ sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
+ dt = sigma_down - sigmas[i]
+ if i <= (len(sigmas) - 1) / 2:
+ x2 = -x
+ denoised2 = model(x2, sigma_hat * s_in, **extra_args)
+ d2 = to_d(x2, sigma_hat, denoised2)
+ x3 = x + ((d + d2) / 2) * dt
+ denoised3 = model(x3, sigma_hat * s_in, **extra_args)
+ d3 = to_d(x3, sigma_hat, denoised3)
+ real_d = (d + d3) / 2
+ x = x + real_d * dt
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
+ else:
+ x = x + d * dt
+ return x
diff --git a/modules_forge/packages/k_diffusion/utils.py b/modules_forge/packages/k_diffusion/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..addb8875d9512a54156cb8248bee09a5960fb734
--- /dev/null
+++ b/modules_forge/packages/k_diffusion/utils.py
@@ -0,0 +1,447 @@
+import hashlib
+import math
+import shutil
+import threading
+import urllib
+import warnings
+from contextlib import contextmanager
+from pathlib import Path
+
+import safetensors
+import torch
+from PIL import Image
+from torch import nn, optim
+from torch.utils import data
+from torchvision.transforms import functional as TF
+
+
+def from_pil_image(x):
+ """Converts from a PIL image to a tensor"""
+ x = TF.to_tensor(x)
+ if x.ndim == 2:
+ x = x[..., None]
+ return x * 2 - 1
+
+
+def to_pil_image(x):
+ """Converts from a tensor to a PIL image"""
+ if x.ndim == 4:
+ assert x.shape[0] == 1
+ x = x[0]
+ if x.shape[0] == 1:
+ x = x[0]
+ return TF.to_pil_image((x.clamp(-1, 1) + 1) / 2)
+
+
+def hf_datasets_augs_helper(examples, transform, image_key, mode="RGB"):
+ """Apply passed in transforms for HuggingFace Datasets"""
+ images = [transform(image.convert(mode)) for image in examples[image_key]]
+ return {image_key: images}
+
+
+def append_dims(x, target_dims):
+ """Appends dimensions to the end of a tensor until it has target_dims dimensions"""
+ dims_to_append = target_dims - x.ndim
+ if dims_to_append < 0:
+ raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less")
+ return x[(...,) + (None,) * dims_to_append]
+
+
+def n_params(module):
+ """Returns the number of trainable parameters in a module"""
+ return sum(p.numel() for p in module.parameters())
+
+
+def download_file(path, url, digest=None):
+ """Downloads a file if it does not exist, optionally checking its SHA-256 hash"""
+ path = Path(path)
+ path.parent.mkdir(parents=True, exist_ok=True)
+ if not path.exists():
+ with urllib.request.urlopen(url) as response, open(path, "wb") as f:
+ shutil.copyfileobj(response, f)
+ if digest is not None:
+ file_digest = hashlib.sha256(open(path, "rb").read()).hexdigest()
+ if digest != file_digest:
+ raise OSError(f"hash of {path} (url: {url}) failed to validate")
+ return path
+
+
+@contextmanager
+def train_mode(model, mode=True):
+ """A context manager that places a model into training mode and restores
+ the previous mode on exit"""
+ modes = [module.training for module in model.modules()]
+ try:
+ yield model.train(mode)
+ finally:
+ for i, module in enumerate(model.modules()):
+ module.training = modes[i]
+
+
+def eval_mode(model):
+ """A context manager that places a model into evaluation mode and restores
+ the previous mode on exit"""
+ return train_mode(model, False)
+
+
+@torch.no_grad()
+def ema_update(model, averaged_model, decay):
+ """Incorporates updated model parameters into an exponential moving averaged
+ version of a model. It should be called after each optimizer step"""
+ model_params = dict(model.named_parameters())
+ averaged_params = dict(averaged_model.named_parameters())
+ assert model_params.keys() == averaged_params.keys()
+
+ for name, param in model_params.items():
+ averaged_params[name].lerp_(param, 1 - decay)
+
+ model_buffers = dict(model.named_buffers())
+ averaged_buffers = dict(averaged_model.named_buffers())
+ assert model_buffers.keys() == averaged_buffers.keys()
+
+ for name, buf in model_buffers.items():
+ averaged_buffers[name].copy_(buf)
+
+
+class EMAWarmup:
+ """Implements an EMA warmup using an inverse decay schedule.
+ If inv_gamma=1 and power=1, implements a simple average. inv_gamma=1, power=2/3 are
+ good values for models you plan to train for a million or more steps (reaches decay
+ factor 0.999 at 31.6K steps, 0.9999 at 1M steps), inv_gamma=1, power=3/4 for models
+ you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at
+ 215.4k steps).
+ Args:
+ inv_gamma (float): Inverse multiplicative factor of EMA warmup. Default: 1.
+ power (float): Exponential factor of EMA warmup. Default: 1.
+ min_value (float): The minimum EMA decay rate. Default: 0.
+ max_value (float): The maximum EMA decay rate. Default: 1.
+ start_at (int): The epoch to start averaging at. Default: 0.
+ last_epoch (int): The index of last epoch. Default: 0.
+ """
+
+ def __init__(self, inv_gamma=1.0, power=1.0, min_value=0.0, max_value=1.0, start_at=0, last_epoch=0):
+ self.inv_gamma = inv_gamma
+ self.power = power
+ self.min_value = min_value
+ self.max_value = max_value
+ self.start_at = start_at
+ self.last_epoch = last_epoch
+
+ def state_dict(self):
+ """Returns the state of the class as a :class:`dict`"""
+ return dict(self.__dict__.items())
+
+ def load_state_dict(self, state_dict):
+ """Loads the class's state.
+ Args:
+ state_dict (dict): scaler state. Should be an object returned
+ from a call to :meth:`state_dict`.
+ """
+ self.__dict__.update(state_dict)
+
+ def get_value(self):
+ """Gets the current EMA decay rate"""
+ epoch = max(0, self.last_epoch - self.start_at)
+ value = 1 - (1 + epoch / self.inv_gamma) ** -self.power
+ return 0.0 if epoch < 0 else min(self.max_value, max(self.min_value, value))
+
+ def step(self):
+ """Updates the step count"""
+ self.last_epoch += 1
+
+
+class InverseLR(optim.lr_scheduler._LRScheduler):
+ """Implements an inverse decay learning rate schedule with an optional exponential
+ warmup. When last_epoch=-1, sets initial lr as lr.
+ inv_gamma is the number of steps/epochs required for the learning rate to decay to
+ (1 / 2)**power of its original value.
+ Args:
+ optimizer (Optimizer): Wrapped optimizer.
+ inv_gamma (float): Inverse multiplicative factor of learning rate decay. Default: 1.
+ power (float): Exponential factor of learning rate decay. Default: 1.
+ warmup (float): Exponential warmup factor (0 <= warmup < 1, 0 to disable)
+ Default: 0.
+ min_lr (float): The minimum learning rate. Default: 0.
+ last_epoch (int): The index of last epoch. Default: -1.
+ verbose (bool): If ``True``, prints a message to stdout for
+ each update. Default: ``False``.
+ """
+
+ def __init__(self, optimizer, inv_gamma=1.0, power=1.0, warmup=0.0, min_lr=0.0, last_epoch=-1, verbose=False):
+ self.inv_gamma = inv_gamma
+ self.power = power
+ if not 0.0 <= warmup < 1:
+ raise ValueError("Invalid value for warmup")
+ self.warmup = warmup
+ self.min_lr = min_lr
+ super().__init__(optimizer, last_epoch, verbose)
+
+ def get_lr(self):
+ if not self._get_lr_called_within_step:
+ warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.")
+
+ return self._get_closed_form_lr()
+
+ def _get_closed_form_lr(self):
+ warmup = 1 - self.warmup ** (self.last_epoch + 1)
+ lr_mult = (1 + self.last_epoch / self.inv_gamma) ** -self.power
+ return [warmup * max(self.min_lr, base_lr * lr_mult) for base_lr in self.base_lrs]
+
+
+class ExponentialLR(optim.lr_scheduler._LRScheduler):
+ """Implements an exponential learning rate schedule with an optional exponential
+ warmup. When last_epoch=-1, sets initial lr as lr. Decays the learning rate
+ continuously by decay (default 0.5) every num_steps steps.
+ Args:
+ optimizer (Optimizer): Wrapped optimizer.
+ num_steps (float): The number of steps to decay the learning rate by decay in.
+ decay (float): The factor by which to decay the learning rate every num_steps
+ steps. Default: 0.5.
+ warmup (float): Exponential warmup factor (0 <= warmup < 1, 0 to disable)
+ Default: 0.
+ min_lr (float): The minimum learning rate. Default: 0.
+ last_epoch (int): The index of last epoch. Default: -1.
+ verbose (bool): If ``True``, prints a message to stdout for
+ each update. Default: ``False``.
+ """
+
+ def __init__(self, optimizer, num_steps, decay=0.5, warmup=0.0, min_lr=0.0, last_epoch=-1, verbose=False):
+ self.num_steps = num_steps
+ self.decay = decay
+ if not 0.0 <= warmup < 1:
+ raise ValueError("Invalid value for warmup")
+ self.warmup = warmup
+ self.min_lr = min_lr
+ super().__init__(optimizer, last_epoch, verbose)
+
+ def get_lr(self):
+ if not self._get_lr_called_within_step:
+ warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.")
+
+ return self._get_closed_form_lr()
+
+ def _get_closed_form_lr(self):
+ warmup = 1 - self.warmup ** (self.last_epoch + 1)
+ lr_mult = (self.decay ** (1 / self.num_steps)) ** self.last_epoch
+ return [warmup * max(self.min_lr, base_lr * lr_mult) for base_lr in self.base_lrs]
+
+
+class ConstantLRWithWarmup(optim.lr_scheduler._LRScheduler):
+ """Implements a constant learning rate schedule with an optional exponential
+ warmup. When last_epoch=-1, sets initial lr as lr.
+ Args:
+ optimizer (Optimizer): Wrapped optimizer.
+ warmup (float): Exponential warmup factor (0 <= warmup < 1, 0 to disable)
+ Default: 0.
+ last_epoch (int): The index of last epoch. Default: -1.
+ verbose (bool): If ``True``, prints a message to stdout for
+ each update. Default: ``False``.
+ """
+
+ def __init__(self, optimizer, warmup=0.0, last_epoch=-1, verbose=False):
+ if not 0.0 <= warmup < 1:
+ raise ValueError("Invalid value for warmup")
+ self.warmup = warmup
+ super().__init__(optimizer, last_epoch, verbose)
+
+ def get_lr(self):
+ if not self._get_lr_called_within_step:
+ warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.")
+
+ return self._get_closed_form_lr()
+
+ def _get_closed_form_lr(self):
+ warmup = 1 - self.warmup ** (self.last_epoch + 1)
+ return [warmup * base_lr for base_lr in self.base_lrs]
+
+
+def stratified_uniform(shape, group=0, groups=1, dtype=None, device=None):
+ """Draws stratified samples from a uniform distribution"""
+ if groups <= 0:
+ raise ValueError(f"groups must be positive, got {groups}")
+ if group < 0 or group >= groups:
+ raise ValueError(f"group must be in [0, {groups})")
+ n = shape[-1] * groups
+ offsets = torch.arange(group, n, groups, dtype=dtype, device=device)
+ u = torch.rand(shape, dtype=dtype, device=device)
+ return (offsets + u) / n
+
+
+stratified_settings = threading.local()
+
+
+@contextmanager
+def enable_stratified(group=0, groups=1, disable=False):
+ """A context manager that enables stratified sampling"""
+ try:
+ stratified_settings.disable = disable
+ stratified_settings.group = group
+ stratified_settings.groups = groups
+ yield
+ finally:
+ del stratified_settings.disable
+ del stratified_settings.group
+ del stratified_settings.groups
+
+
+@contextmanager
+def enable_stratified_accelerate(accelerator, disable=False):
+ """A context manager that enables stratified sampling, distributing the strata across
+ all processes and gradient accumulation steps using settings from Hugging Face Accelerate"""
+ try:
+ rank = accelerator.process_index
+ world_size = accelerator.num_processes
+ acc_steps = accelerator.gradient_state.num_steps
+ acc_step = accelerator.step % acc_steps
+ group = rank * acc_steps + acc_step
+ groups = world_size * acc_steps
+ with enable_stratified(group, groups, disable=disable):
+ yield
+ finally:
+ pass
+
+
+def stratified_with_settings(shape, dtype=None, device=None):
+ """Draws stratified samples from a uniform distribution, using settings from a context
+ manager"""
+ if not hasattr(stratified_settings, "disable") or stratified_settings.disable:
+ return torch.rand(shape, dtype=dtype, device=device)
+ return stratified_uniform(shape, stratified_settings.group, stratified_settings.groups, dtype=dtype, device=device)
+
+
+def rand_log_normal(shape, loc=0.0, scale=1.0, device="cpu", dtype=torch.float32):
+ """Draws samples from an lognormal distribution"""
+ u = stratified_with_settings(shape, device=device, dtype=dtype) * (1 - 2e-7) + 1e-7
+ return torch.distributions.Normal(loc, scale).icdf(u).exp()
+
+
+def rand_log_logistic(shape, loc=0.0, scale=1.0, min_value=0.0, max_value=float("inf"), device="cpu", dtype=torch.float32):
+ """Draws samples from an optionally truncated log-logistic distribution"""
+ min_value = torch.as_tensor(min_value, device=device, dtype=torch.float64)
+ max_value = torch.as_tensor(max_value, device=device, dtype=torch.float64)
+ min_cdf = min_value.log().sub(loc).div(scale).sigmoid()
+ max_cdf = max_value.log().sub(loc).div(scale).sigmoid()
+ u = stratified_with_settings(shape, device=device, dtype=torch.float64) * (max_cdf - min_cdf) + min_cdf
+ return u.logit().mul(scale).add(loc).exp().to(dtype)
+
+
+def rand_log_uniform(shape, min_value, max_value, device="cpu", dtype=torch.float32):
+ """Draws samples from an log-uniform distribution"""
+ min_value = math.log(min_value)
+ max_value = math.log(max_value)
+ return (stratified_with_settings(shape, device=device, dtype=dtype) * (max_value - min_value) + min_value).exp()
+
+
+def rand_v_diffusion(shape, sigma_data=1.0, min_value=0.0, max_value=float("inf"), device="cpu", dtype=torch.float32):
+ """Draws samples from a truncated v-diffusion training timestep distribution"""
+ min_cdf = math.atan(min_value / sigma_data) * 2 / math.pi
+ max_cdf = math.atan(max_value / sigma_data) * 2 / math.pi
+ u = stratified_with_settings(shape, device=device, dtype=dtype) * (max_cdf - min_cdf) + min_cdf
+ return torch.tan(u * math.pi / 2) * sigma_data
+
+
+def rand_cosine_interpolated(shape, image_d, noise_d_low, noise_d_high, sigma_data=1.0, min_value=1e-3, max_value=1e3, device="cpu", dtype=torch.float32):
+ """Draws samples from an interpolated cosine timestep distribution (from simple diffusion)"""
+
+ def logsnr_schedule_cosine(t, logsnr_min, logsnr_max):
+ t_min = math.atan(math.exp(-0.5 * logsnr_max))
+ t_max = math.atan(math.exp(-0.5 * logsnr_min))
+ return -2 * torch.log(torch.tan(t_min + t * (t_max - t_min)))
+
+ def logsnr_schedule_cosine_shifted(t, image_d, noise_d, logsnr_min, logsnr_max):
+ shift = 2 * math.log(noise_d / image_d)
+ return logsnr_schedule_cosine(t, logsnr_min - shift, logsnr_max - shift) + shift
+
+ def logsnr_schedule_cosine_interpolated(t, image_d, noise_d_low, noise_d_high, logsnr_min, logsnr_max):
+ logsnr_low = logsnr_schedule_cosine_shifted(t, image_d, noise_d_low, logsnr_min, logsnr_max)
+ logsnr_high = logsnr_schedule_cosine_shifted(t, image_d, noise_d_high, logsnr_min, logsnr_max)
+ return torch.lerp(logsnr_low, logsnr_high, t)
+
+ logsnr_min = -2 * math.log(min_value / sigma_data)
+ logsnr_max = -2 * math.log(max_value / sigma_data)
+ u = stratified_with_settings(shape, device=device, dtype=dtype)
+ logsnr = logsnr_schedule_cosine_interpolated(u, image_d, noise_d_low, noise_d_high, logsnr_min, logsnr_max)
+ return torch.exp(-logsnr / 2) * sigma_data
+
+
+def rand_split_log_normal(shape, loc, scale_1, scale_2, device="cpu", dtype=torch.float32):
+ """Draws samples from a split lognormal distribution"""
+ n = torch.randn(shape, device=device, dtype=dtype).abs()
+ u = torch.rand(shape, device=device, dtype=dtype)
+ n_left = n * -scale_1 + loc
+ n_right = n * scale_2 + loc
+ ratio = scale_1 / (scale_1 + scale_2)
+ return torch.where(u < ratio, n_left, n_right).exp()
+
+
+class FolderOfImages(data.Dataset):
+ """Recursively finds all images in a directory. It does not support
+ classes/targets"""
+
+ IMG_EXTENSIONS = {".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp"}
+
+ def __init__(self, root, transform=None):
+ super().__init__()
+ self.root = Path(root)
+ self.transform = nn.Identity() if transform is None else transform
+ self.paths = sorted(path for path in self.root.rglob("*") if path.suffix.lower() in self.IMG_EXTENSIONS)
+
+ def __repr__(self):
+ return f'FolderOfImages(root="{self.root}", len: {len(self)})'
+
+ def __len__(self):
+ return len(self.paths)
+
+ def __getitem__(self, key):
+ path = self.paths[key]
+ with open(path, "rb") as f:
+ image = Image.open(f).convert("RGB")
+ image = self.transform(image)
+ return (image,)
+
+
+class CSVLogger:
+ def __init__(self, filename, columns):
+ self.filename = Path(filename)
+ self.columns = columns
+ if self.filename.exists():
+ self.file = open(self.filename, "a")
+ else:
+ self.file = open(self.filename, "w")
+ self.write(*self.columns)
+
+ def write(self, *args):
+ print(*args, sep=",", file=self.file, flush=True)
+
+
+@contextmanager
+def tf32_mode(cudnn=None, matmul=None):
+ """A context manager that sets whether TF32 is allowed on cuDNN or matmul"""
+ cudnn_old = torch.backends.cudnn.allow_tf32
+ matmul_old = torch.backends.cuda.matmul.allow_tf32
+ try:
+ if cudnn is not None:
+ torch.backends.cudnn.allow_tf32 = cudnn
+ if matmul is not None:
+ torch.backends.cuda.matmul.allow_tf32 = matmul
+ yield
+ finally:
+ if cudnn is not None:
+ torch.backends.cudnn.allow_tf32 = cudnn_old
+ if matmul is not None:
+ torch.backends.cuda.matmul.allow_tf32 = matmul_old
+
+
+def get_safetensors_metadata(path):
+ """Retrieves the metadata from a safetensors file"""
+ return safetensors.safe_open(path, "pt").metadata()
+
+
+def ema_update_dict(values, updates, decay):
+ for k, v in updates.items():
+ if k not in values:
+ values[k] = v
+ else:
+ values[k] *= decay
+ values[k] += (1 - decay) * v
+ return values
diff --git a/modules_forge/patch_basic.py b/modules_forge/patch_basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..b123287523bfa122fea2efcbe6436d680634902e
--- /dev/null
+++ b/modules_forge/patch_basic.py
@@ -0,0 +1,96 @@
+import os
+import time
+import warnings
+from functools import wraps
+from pathlib import Path
+
+import gradio.networking
+import httpx
+import safetensors.torch
+import torch
+from tqdm import tqdm
+
+from modules.errors import display
+
+
+def gradio_url_ok_fix(url: str) -> bool:
+ try:
+ for _ in range(5):
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore")
+ r = httpx.head(url, timeout=999, verify=False)
+ if r.status_code in (200, 401, 302):
+ return True
+ time.sleep(0.500)
+ except (ConnectionError, httpx.ConnectError):
+ return False
+ return False
+
+
+def build_loaded(module, loader_name):
+ original_loader_name = f"{loader_name}_origin"
+
+ if not hasattr(module, original_loader_name):
+ setattr(module, original_loader_name, getattr(module, loader_name))
+
+ original_loader = getattr(module, original_loader_name)
+
+ @wraps(original_loader)
+ def loader(*args, **kwargs):
+ try:
+ with warnings.catch_warnings():
+ warnings.simplefilter(action="ignore", category=FutureWarning)
+ return original_loader(*args, **kwargs)
+ except Exception as e:
+ display(e, f"{module.__name__}.{loader_name}")
+
+ exc = "\n"
+ for path in list(args) + list(kwargs.values()):
+ if isinstance(path, str) and os.path.isfile(path):
+ exc += f'Failed to read file "{path}"\n'
+ backup_file = f"{path}.corrupted"
+ if os.path.exists(backup_file):
+ os.remove(backup_file)
+ os.replace(path, backup_file)
+ exc += f'Forge has moved the corrupted file to "{backup_file}"\n'
+ exc += "Please try downloading the model again\n"
+ print(exc)
+ raise ValueError from None
+
+ setattr(module, loader_name, loader)
+
+
+def always_show_tqdm(*args, **kwargs):
+ kwargs["disable"] = False
+ if "name" in kwargs:
+ del kwargs["name"]
+ return tqdm(*args, **kwargs)
+
+
+def long_path_prefix(path: Path) -> Path:
+ if os.name == "nt" and not str(path).startswith("\\\\?\\") and not path.exists():
+ return Path("\\\\?\\" + str(path))
+ return path
+
+
+def patch_all_basics():
+ import logging
+
+ from huggingface_hub import file_download
+
+ file_download.tqdm = always_show_tqdm
+ file_download.logger.setLevel(logging.ERROR)
+
+ from huggingface_hub.file_download import _download_to_tmp_and_move as original_download_to_tmp_and_move
+
+ @wraps(original_download_to_tmp_and_move)
+ def patched_download_to_tmp_and_move(incomplete_path: Path, destination_path: Path, *args, **kwargs):
+ incomplete_path = long_path_prefix(incomplete_path)
+ destination_path = long_path_prefix(destination_path)
+ return original_download_to_tmp_and_move(incomplete_path, destination_path, *args, **kwargs)
+
+ file_download._download_to_tmp_and_move = patched_download_to_tmp_and_move
+
+ gradio.networking.url_ok = gradio_url_ok_fix
+ build_loaded(safetensors.torch, "load_file")
+ build_loaded(torch, "load")
diff --git a/modules_forge/presets.py b/modules_forge/presets.py
new file mode 100644
index 0000000000000000000000000000000000000000..c814b86732dfa2d4575c7e87987bb74a144684d3
--- /dev/null
+++ b/modules_forge/presets.py
@@ -0,0 +1,148 @@
+from typing import TYPE_CHECKING, Callable
+
+if TYPE_CHECKING:
+ from modules.options import OptionInfo
+
+from enum import Enum
+
+import gradio as gr
+
+from backend.memory_management import total_vram
+from modules.shared_items import list_samplers, list_schedulers
+
+
+class PresetArch(Enum):
+ sd = 1
+ xl = 2
+ flux = 3
+ qwen = 4
+ lumina = 5
+ wan = 6
+
+
+SAMPLERS = {
+ PresetArch.sd: "Euler a",
+ PresetArch.xl: "DPM++ 2M SDE",
+ PresetArch.flux: "Euler",
+ PresetArch.qwen: "LCM",
+ PresetArch.lumina: "Res Multistep",
+ PresetArch.wan: "Euler",
+}
+
+SCHEDULERS = {
+ PresetArch.sd: "Automatic",
+ PresetArch.xl: "Karras",
+ PresetArch.flux: "Beta",
+ PresetArch.qwen: "Normal",
+ PresetArch.lumina: "Linear Quadratic",
+ PresetArch.wan: "Simple",
+}
+
+WIDTH = {
+ PresetArch.sd: 512,
+ PresetArch.xl: 896,
+ PresetArch.flux: 896,
+ PresetArch.qwen: 896,
+ PresetArch.lumina: 1024,
+ PresetArch.wan: 1152,
+}
+
+HEIGHT = {
+ PresetArch.sd: 512,
+ PresetArch.xl: 1152,
+ PresetArch.flux: 1152,
+ PresetArch.qwen: 1152,
+ PresetArch.lumina: 1024,
+ PresetArch.wan: 896,
+}
+
+CFG = {
+ PresetArch.sd: 6.0,
+ PresetArch.xl: 4.0,
+ PresetArch.flux: 1.0,
+ PresetArch.qwen: 1.0,
+ PresetArch.lumina: 4.5,
+ PresetArch.wan: 1.0,
+}
+
+
+def register(options_templates: dict, options_section: Callable, OptionInfo: "OptionInfo"):
+ inference_vram = int(total_vram - (1024 if total_vram < 8200 else 2048))
+
+ for arch in PresetArch:
+ name = arch.name
+
+ options_templates.update(
+ options_section(
+ (None, "Forge Hidden Options"),
+ {
+ f"forge_checkpoint_{name}": OptionInfo(None),
+ f"forge_additional_modules_{name}": OptionInfo([]),
+ },
+ )
+ )
+
+ sampler, scheduler = SAMPLERS[arch], SCHEDULERS[arch]
+
+ options_templates.update(
+ options_section(
+ (f"ui_{name}", name.upper(), "presets"),
+ {
+ f"{name}_t2i_sampler": OptionInfo(sampler, "txt2img sampler", gr.Dropdown, lambda: {"choices": [x.name for x in list_samplers()]}),
+ f"{name}_t2i_scheduler": OptionInfo(scheduler, "txt2img scheduler", gr.Dropdown, lambda: {"choices": list_schedulers()}),
+ f"{name}_i2i_sampler": OptionInfo(sampler, "img2img sampler", gr.Dropdown, lambda: {"choices": [x.name for x in list_samplers()]}),
+ f"{name}_i2i_scheduler": OptionInfo(scheduler, "img2img scheduler", gr.Dropdown, lambda: {"choices": list_schedulers()}),
+ },
+ )
+ )
+
+ w, h, cfg = WIDTH[arch], HEIGHT[arch], CFG[arch]
+
+ options_templates.update(
+ options_section(
+ (f"ui_{name}", name.upper(), "presets"),
+ {
+ f"{name}_t2i_width": OptionInfo(w, "txt2img Width", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
+ f"{name}_t2i_height": OptionInfo(h, "txt2img Height", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
+ f"{name}_t2i_cfg": OptionInfo(cfg, "txt2img CFG", gr.Slider, {"minimum": 1, "maximum": 30, "step": 0.1}),
+ f"{name}_t2i_hr_cfg": OptionInfo(cfg, "txt2img Hires. CFG", gr.Slider, {"minimum": 1, "maximum": 30, "step": 0.1}),
+ f"{name}_i2i_width": OptionInfo(w, "img2img Width", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
+ f"{name}_i2i_height": OptionInfo(h, "img2img Height", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
+ f"{name}_i2i_cfg": OptionInfo(cfg, "img2img CFG", gr.Slider, {"minimum": 1, "maximum": 30, "step": 0.1}),
+ f"{name}_gpu_mb": OptionInfo(inference_vram, "GPU Weights (MB)", gr.Slider, {"visible": (arch is not PresetArch.sd), "minimum": 0, "maximum": total_vram, "step": 1}),
+ },
+ )
+ )
+
+ options_templates.update(
+ options_section(
+ ("ui_flux", "FLUX", "presets"),
+ {
+ "flux_t2i_d_cfg": OptionInfo(3.0, "txt2img Distilled CFG", gr.Slider, {"minimum": 1, "maximum": 10, "step": 0.1}),
+ "flux_t2i_hr_d_cfg": OptionInfo(3.0, "txt2img Distilled Hires. CFG", gr.Slider, {"minimum": 1, "maximum": 10, "step": 0.1}),
+ "flux_i2i_d_cfg": OptionInfo(3.0, "img2img Distilled CFG", gr.Slider, {"minimum": 1, "maximum": 10, "step": 0.1}),
+ },
+ )
+ )
+
+ options_templates.update(
+ options_section(
+ ("ui_lumina", "LUMINA", "presets"),
+ {
+ "lumina_t2i_d_cfg": OptionInfo(6.0, "txt2img Shift", gr.Slider, {"minimum": 1, "maximum": 10, "step": 0.1}),
+ "lumina_t2i_hr_d_cfg": OptionInfo(6.0, "txt2img Hires. Shift", gr.Slider, {"minimum": 1, "maximum": 10, "step": 0.1}),
+ "lumina_i2i_d_cfg": OptionInfo(6.0, "img2img Shift", gr.Slider, {"minimum": 1, "maximum": 10, "step": 0.1}),
+ },
+ )
+ )
+
+ options_templates.update(
+ options_section(
+ ("ui_wan", "WAN", "presets"),
+ {
+ "wan_t2i_d_cfg": OptionInfo(8.0, "txt2img Shift", gr.Slider, {"minimum": 1, "maximum": 10, "step": 0.1}),
+ "wan_t2i_hr_d_cfg": OptionInfo(8.0, "txt2img Hires. Shift", gr.Slider, {"minimum": 1, "maximum": 10, "step": 0.1}),
+ "wan_i2i_d_cfg": OptionInfo(8.0, "img2img Shift", gr.Slider, {"minimum": 1, "maximum": 10, "step": 0.1}),
+ },
+ )
+ )
diff --git a/modules_forge/shared.py b/modules_forge/shared.py
new file mode 100644
index 0000000000000000000000000000000000000000..5211ef64f01b3b1e567cbf0404e62c62e1da7ccb
--- /dev/null
+++ b/modules_forge/shared.py
@@ -0,0 +1,50 @@
+import os
+
+from backend import utils
+from modules.paths_internal import models_path, normalized_filepath, parser
+
+parser.add_argument(
+ "--controlnet-dir",
+ type=normalized_filepath,
+ help="Path to directory with ControlNet models",
+ default=os.path.join(models_path, "ControlNet"),
+)
+parser.add_argument(
+ "--controlnet-preprocessor-models-dir",
+ type=normalized_filepath,
+ help="Path to directory with Annotator models",
+ default=os.path.join(models_path, "ControlNetPreprocessor"),
+)
+
+cmd_opts, _ = parser.parse_known_args()
+
+controlnet_dir: str = cmd_opts.controlnet_dir
+os.makedirs(controlnet_dir, exist_ok=True)
+
+preprocessor_dir: str = cmd_opts.controlnet_preprocessor_models_dir
+os.makedirs(preprocessor_dir, exist_ok=True)
+
+diffusers_dir: str = os.path.join(models_path, "diffusers")
+os.makedirs(diffusers_dir, exist_ok=True)
+
+supported_preprocessors = {}
+supported_control_models = []
+
+
+def add_supported_preprocessor(preprocessor):
+ supported_preprocessors[preprocessor.name] = preprocessor
+
+
+def add_supported_control_model(control_model):
+ supported_control_models.append(control_model)
+
+
+def try_load_supported_control_model(ckpt_path):
+ state_dict = utils.load_torch_file(ckpt_path, safe_load=True)
+ for supported_type in supported_control_models:
+ state_dict_copy = {k: v for k, v in state_dict.items()}
+ model = supported_type.try_build_from_state_dict(state_dict_copy, ckpt_path)
+ if model is not None:
+ return model
+
+ return None
diff --git a/modules_forge/shared_options.py b/modules_forge/shared_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd920bd6aa6d6f630185f82dd2452961c6201a01
--- /dev/null
+++ b/modules_forge/shared_options.py
@@ -0,0 +1,28 @@
+def register(options_templates, options_section, OptionInfo):
+ from modules.ui_components import FormColorPicker
+
+ options_templates.update(
+ options_section(
+ (None, "Forge Hidden Options"),
+ {
+ "forge_unet_storage_dtype": OptionInfo("Automatic"),
+ "forge_inference_memory": OptionInfo(1024),
+ "forge_async_loading": OptionInfo("Queue"),
+ "forge_pin_shared_memory": OptionInfo("CPU"),
+ "forge_preset": OptionInfo("sd"),
+ "forge_additional_modules": OptionInfo([]),
+ },
+ )
+ )
+ options_templates.update(
+ options_section(
+ ("ui_forgecanvas", "Forge Canvas", "ui"),
+ {
+ "forge_canvas_height": OptionInfo(512, "Canvas Height").info("in pixels").needs_reload_ui(),
+ "forge_canvas_toolbar_always": OptionInfo(False, "Always Visible Toolbar").info("disabled: toolbar only appears when hovering the canvas").needs_reload_ui(),
+ "forge_canvas_consistent_brush": OptionInfo(False, "Fixed Brush Size").info("disabled: the brush size is pixel-space, the brush stays small when zoomed out ; enabled: the brush size is canvas-space, the brush stays big when zoomed in").needs_reload_ui(),
+ "forge_canvas_plain": OptionInfo(False, "Plain Background").info("disabled: checkerboard pattern ; enabled: solid color").needs_reload_ui(),
+ "forge_canvas_plain_color": OptionInfo("#808080", "Solid Color for Plain Background", FormColorPicker, {}).needs_reload_ui(),
+ },
+ )
+ )
diff --git a/modules_forge/supported_controlnet.py b/modules_forge/supported_controlnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ca7b768af194d763117ccd9a7a3de25970e5310
--- /dev/null
+++ b/modules_forge/supported_controlnet.py
@@ -0,0 +1,169 @@
+import os
+import torch
+
+from huggingface_guess.detection import unet_config_from_diffusers_unet, model_config_from_unet
+from huggingface_guess.utils import unet_to_diffusers
+from backend import memory_management
+from backend.operations import using_forge_operations
+from backend.nn.cnets import cldm
+from backend.patcher.controlnet import ControlLora, ControlNet, load_t2i_adapter, apply_controlnet_advanced
+from modules_forge.shared import add_supported_control_model
+
+
+class ControlModelPatcher:
+ @staticmethod
+ def try_build_from_state_dict(state_dict, ckpt_path):
+ return None
+
+ def __init__(self, model_patcher=None):
+ self.model_patcher = model_patcher
+ self.strength = 1.0
+ self.start_percent = 0.0
+ self.end_percent = 1.0
+ self.positive_advanced_weighting = None
+ self.negative_advanced_weighting = None
+ self.advanced_frame_weighting = None
+ self.advanced_sigma_weighting = None
+ self.advanced_mask_weighting = None
+
+ def process_after_running_preprocessors(self, process, params, *args, **kwargs):
+ return
+
+ def process_before_every_sampling(self, process, cond, mask, *args, **kwargs):
+ return
+
+ def process_after_every_sampling(self, process, params, *args, **kwargs):
+ return
+
+
+class ControlNetPatcher(ControlModelPatcher):
+ @staticmethod
+ def try_build_from_state_dict(controlnet_data, ckpt_path):
+ if "lora_controlnet" in controlnet_data:
+ return ControlNetPatcher(ControlLora(controlnet_data))
+
+ controlnet_config = None
+ if "controlnet_cond_embedding.conv_in.weight" in controlnet_data: # diffusers format
+ unet_dtype = memory_management.unet_dtype()
+ controlnet_config = unet_config_from_diffusers_unet(controlnet_data, unet_dtype)
+ diffusers_keys = unet_to_diffusers(controlnet_config)
+ diffusers_keys["controlnet_mid_block.weight"] = "middle_block_out.0.weight"
+ diffusers_keys["controlnet_mid_block.bias"] = "middle_block_out.0.bias"
+
+ count = 0
+ loop = True
+ while loop:
+ suffix = [".weight", ".bias"]
+ for s in suffix:
+ k_in = "controlnet_down_blocks.{}{}".format(count, s)
+ k_out = "zero_convs.{}.0{}".format(count, s)
+ if k_in not in controlnet_data:
+ loop = False
+ break
+ diffusers_keys[k_in] = k_out
+ count += 1
+
+ count = 0
+ loop = True
+ while loop:
+ suffix = [".weight", ".bias"]
+ for s in suffix:
+ if count == 0:
+ k_in = "controlnet_cond_embedding.conv_in{}".format(s)
+ else:
+ k_in = "controlnet_cond_embedding.blocks.{}{}".format(count - 1, s)
+ k_out = "input_hint_block.{}{}".format(count * 2, s)
+ if k_in not in controlnet_data:
+ k_in = "controlnet_cond_embedding.conv_out{}".format(s)
+ loop = False
+ diffusers_keys[k_in] = k_out
+ count += 1
+
+ new_sd = {}
+ for k in diffusers_keys:
+ if k in controlnet_data:
+ new_sd[diffusers_keys[k]] = controlnet_data.pop(k)
+
+ leftover_keys = controlnet_data.keys()
+ if len(leftover_keys) > 0:
+ print("leftover keys:", leftover_keys)
+ controlnet_data = new_sd
+
+ pth_key = 'control_model.zero_convs.0.0.weight'
+ pth = False
+ key = 'zero_convs.0.0.weight'
+ if pth_key in controlnet_data:
+ pth = True
+ key = pth_key
+ prefix = "control_model."
+ elif key in controlnet_data:
+ prefix = ""
+ else:
+ net = load_t2i_adapter(controlnet_data)
+ if net is None:
+ return None
+ return ControlNetPatcher(net)
+
+ if controlnet_config is None:
+ unet_dtype = memory_management.unet_dtype()
+ controlnet_config = model_config_from_unet(controlnet_data, prefix, True).unet_config
+ controlnet_config['dtype'] = unet_dtype
+
+ load_device = memory_management.get_torch_device()
+ computation_dtype = memory_management.get_computation_dtype(load_device)
+
+ controlnet_config.pop("out_channels")
+ controlnet_config["hint_channels"] = controlnet_data["{}input_hint_block.0.weight".format(prefix)].shape[1]
+
+ with using_forge_operations(dtype=unet_dtype, manual_cast_enabled=computation_dtype != unet_dtype):
+ control_model = cldm.ControlNet(**controlnet_config).to(dtype=unet_dtype)
+
+ if pth:
+ if 'difference' in controlnet_data:
+ print("WARNING: Your controlnet model is diff version rather than official float16 model. "
+ "Please use an official float16/float32 model for robust performance.")
+
+ class WeightsLoader(torch.nn.Module):
+ pass
+
+ w = WeightsLoader()
+ w.control_model = control_model
+ missing, unexpected = w.load_state_dict(controlnet_data, strict=False)
+ else:
+ missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
+ print(missing, unexpected)
+
+ global_average_pooling = False
+ filename = os.path.splitext(ckpt_path)[0]
+ if filename.endswith("_shuffle") or filename.endswith("_shuffle_fp16"):
+ # TODO: smarter way of enabling global_average_pooling
+ global_average_pooling = True
+
+ control = ControlNet(control_model, global_average_pooling=global_average_pooling, load_device=load_device, manual_cast_dtype=computation_dtype)
+ return ControlNetPatcher(control)
+
+ def __init__(self, model_patcher):
+ super().__init__(model_patcher)
+
+ def process_before_every_sampling(self, process, cond, mask, *args, **kwargs):
+ unet = process.sd_model.forge_objects.unet
+
+ unet = apply_controlnet_advanced(
+ unet=unet,
+ controlnet=self.model_patcher,
+ image_bchw=cond,
+ strength=self.strength,
+ start_percent=self.start_percent,
+ end_percent=self.end_percent,
+ positive_advanced_weighting=self.positive_advanced_weighting,
+ negative_advanced_weighting=self.negative_advanced_weighting,
+ advanced_frame_weighting=self.advanced_frame_weighting,
+ advanced_sigma_weighting=self.advanced_sigma_weighting,
+ advanced_mask_weighting=self.advanced_mask_weighting
+ )
+
+ process.sd_model.forge_objects.unet = unet
+ return
+
+
+add_supported_control_model(ControlNetPatcher)
diff --git a/modules_forge/supported_preprocessor.py b/modules_forge/supported_preprocessor.py
new file mode 100644
index 0000000000000000000000000000000000000000..185db56ce17fc20b4371bfe240c5a24830337cce
--- /dev/null
+++ b/modules_forge/supported_preprocessor.py
@@ -0,0 +1,138 @@
+import cv2
+import torch
+
+from modules_forge.shared import add_supported_preprocessor, preprocessor_dir
+from backend import memory_management
+from backend.patcher.base import ModelPatcher
+from backend.patcher import clipvision
+from modules_forge.utils import resize_image_with_pad
+from modules.modelloader import load_file_from_url
+from modules_forge.utils import numpy_to_pytorch
+
+
+class PreprocessorParameter:
+ def __init__(self, minimum=0.0, maximum=1.0, step=0.01, label='Parameter 1', value=0.5, visible=False, **kwargs):
+ self.gradio_update_kwargs = dict(
+ minimum=minimum, maximum=maximum, step=step, label=label, value=value, visible=visible, **kwargs
+ )
+
+
+class Preprocessor:
+ def __init__(self):
+ self.name = 'PreprocessorBase'
+ self.tags = []
+ self.model_filename_filters = []
+ self.slider_resolution = PreprocessorParameter(label='Resolution', minimum=128, maximum=2048, value=512, step=8, visible=True)
+ self.slider_1 = PreprocessorParameter()
+ self.slider_2 = PreprocessorParameter()
+ self.slider_3 = PreprocessorParameter()
+ self.model_patcher: ModelPatcher = None
+ self.show_control_mode = True
+ self.do_not_need_model = False
+ self.sorting_priority = 0 # higher goes to top in the list
+ self.corp_image_with_a1111_mask_when_in_img2img_inpaint_tab = True
+ self.fill_mask_with_one_when_resize_and_fill = False
+ self.use_soft_projection_in_hr_fix = False
+ self.expand_mask_when_resize_and_fill = False
+
+ def setup_model_patcher(self, model, load_device=None, offload_device=None, dtype=torch.float32, **kwargs):
+ if load_device is None:
+ load_device = memory_management.get_torch_device()
+
+ if offload_device is None:
+ offload_device = torch.device('cpu')
+
+ if not memory_management.should_use_fp16(load_device):
+ dtype = torch.float32
+
+ model.eval()
+ model = model.to(device=offload_device, dtype=dtype)
+
+ self.model_patcher = ModelPatcher(model=model, load_device=load_device, offload_device=offload_device, **kwargs)
+ self.model_patcher.dtype = dtype
+ return self.model_patcher
+
+ def move_all_model_patchers_to_gpu(self):
+ memory_management.load_models_gpu([self.model_patcher])
+ return
+
+ def send_tensor_to_model_device(self, x):
+ return x.to(device=self.model_patcher.current_device, dtype=self.model_patcher.dtype)
+
+ def process_after_running_preprocessors(self, process, params, *args, **kwargs):
+ return
+
+ def process_before_every_sampling(self, process, cond, mask, *args, **kwargs):
+ return cond, mask
+
+ def process_after_every_sampling(self, process, params, *args, **kwargs):
+ return
+
+ def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, input_mask=None, **kwargs):
+ return input_image
+
+
+class PreprocessorNone(Preprocessor):
+ def __init__(self):
+ super().__init__()
+ self.name = 'None'
+ self.sorting_priority = 10
+
+
+class PreprocessorCanny(Preprocessor):
+ def __init__(self):
+ super().__init__()
+ self.name = 'canny'
+ self.tags = ['Canny']
+ self.model_filename_filters = ['canny']
+ self.slider_1 = PreprocessorParameter(minimum=0, maximum=256, step=1, value=100, label='Low Threshold', visible=True)
+ self.slider_2 = PreprocessorParameter(minimum=0, maximum=256, step=1, value=200, label='High Threshold', visible=True)
+ self.sorting_priority = 100
+ self.use_soft_projection_in_hr_fix = True
+
+ def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
+ input_image, remove_pad = resize_image_with_pad(input_image, resolution)
+ canny_image = cv2.cvtColor(cv2.Canny(input_image, int(slider_1), int(slider_2)), cv2.COLOR_GRAY2RGB)
+ return remove_pad(canny_image)
+
+
+add_supported_preprocessor(PreprocessorNone())
+add_supported_preprocessor(PreprocessorCanny())
+
+
+class PreprocessorClipVision(Preprocessor):
+ global_cache = {}
+
+ def __init__(self, name, url, filename):
+ super().__init__()
+ self.name = name
+ self.url = url
+ self.filename = filename
+ self.slider_resolution = PreprocessorParameter(visible=False)
+ self.corp_image_with_a1111_mask_when_in_img2img_inpaint_tab = False
+ self.show_control_mode = False
+ self.sorting_priority = 1
+ self.clipvision = None
+
+ def load_clipvision(self):
+ if self.clipvision is not None:
+ return self.clipvision
+
+ ckpt_path = load_file_from_url(
+ url=self.url,
+ model_dir=preprocessor_dir,
+ file_name=self.filename
+ )
+
+ if ckpt_path in PreprocessorClipVision.global_cache:
+ self.clipvision = PreprocessorClipVision.global_cache[ckpt_path]
+ else:
+ self.clipvision = clipvision.load(ckpt_path)
+ PreprocessorClipVision.global_cache[ckpt_path] = self.clipvision
+
+ return self.clipvision
+
+ @torch.no_grad()
+ def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
+ clipvision = self.load_clipvision()
+ return clipvision.encode_image(numpy_to_pytorch(input_image))
diff --git a/modules_forge/utils.py b/modules_forge/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9da2e51127f2c742439199da6a2d38fb6b14bcf
--- /dev/null
+++ b/modules_forge/utils.py
@@ -0,0 +1,101 @@
+import random
+import string
+import time
+
+import cv2
+import numpy as np
+import torch
+
+from backend import memory_management
+
+
+def prepare_free_memory(aggressive=False):
+ if aggressive:
+ memory_management.unload_all_models()
+ print("Cleanup all memory...")
+ return
+
+ memory_management.free_memory(memory_required=memory_management.minimum_inference_memory(), device=memory_management.get_torch_device())
+ print("Cleanup minimal inference memory...")
+
+
+def apply_circular_forge(model, tiling_enabled=False):
+ if not model.is_webui_legacy_model():
+ return
+ if model.tiling_enabled == tiling_enabled:
+ return
+
+ model.tiling_enabled = tiling_enabled
+
+ unet: torch.nn.Module = model.forge_objects.unet.model.diffusion_model
+ for layer in [layer for layer in unet.modules() if isinstance(layer, torch.nn.Conv2d)]:
+ layer.padding_mode = "circular" if tiling_enabled else "zeros"
+
+ print(f"Tiling: {tiling_enabled}")
+
+
+def HWC3(x):
+ assert x.dtype == np.uint8
+ if x.ndim == 2:
+ x = x[:, :, None]
+ assert x.ndim == 3
+ H, W, C = x.shape
+ assert C == 1 or C == 3 or C == 4
+ if C == 3:
+ return x
+ if C == 1:
+ return np.concatenate([x, x, x], axis=2)
+ if C == 4:
+ color = x[:, :, 0:3].astype(np.float32)
+ alpha = x[:, :, 3:4].astype(np.float32) / 255.0
+ y = color * alpha + 255.0 * (1.0 - alpha)
+ y = y.clip(0, 255).astype(np.uint8)
+ return y
+
+
+def generate_random_filename(extension=".txt"):
+ timestamp = time.strftime("%Y%m%d-%H%M%S")
+ random_string = "".join(random.choices(string.ascii_lowercase + string.digits, k=5))
+ filename = f"{timestamp}-{random_string}{extension}"
+ return filename
+
+
+@torch.no_grad()
+@torch.inference_mode()
+def pytorch_to_numpy(x):
+ return [np.clip(255.0 * y.cpu().numpy(), 0, 255).astype(np.uint8) for y in x]
+
+
+@torch.no_grad()
+@torch.inference_mode()
+def numpy_to_pytorch(x):
+ y = x.astype(np.float32) / 255.0
+ y = y[None]
+ y = np.ascontiguousarray(y.copy())
+ y = torch.from_numpy(y).float()
+ return y
+
+
+def pad64(x):
+ return int(np.ceil(float(x) / 64.0) * 64 - x)
+
+
+def safer_memory(x):
+ # Fix many MAC/AMD problems
+ return np.ascontiguousarray(x.copy()).copy()
+
+
+def resize_image_with_pad(img, resolution):
+ H_raw, W_raw, _ = img.shape
+ k = float(resolution) / float(min(H_raw, W_raw))
+ interpolation = cv2.INTER_CUBIC if k > 1 else cv2.INTER_AREA
+ H_target = int(np.round(float(H_raw) * k))
+ W_target = int(np.round(float(W_raw) * k))
+ img = cv2.resize(img, (W_target, H_target), interpolation=interpolation)
+ H_pad, W_pad = pad64(H_target), pad64(W_target)
+ img_padded = np.pad(img, [[0, H_pad], [0, W_pad], [0, 0]], mode="edge")
+
+ def remove_pad(x):
+ return safer_memory(x[:H_target, :W_target])
+
+ return safer_memory(img_padded), remove_pad
diff --git a/modules_forge/uv_hook.py b/modules_forge/uv_hook.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a6b65cfad53e170bf71c829da88e8efa46a0ce0
--- /dev/null
+++ b/modules_forge/uv_hook.py
@@ -0,0 +1,49 @@
+import shlex
+import subprocess
+from copy import copy
+from functools import wraps
+
+
+def patch(symlink: bool):
+ if hasattr(subprocess, "__original_run"):
+ return
+
+ subprocess.__original_run = subprocess.run
+ BAD_FLAGS = ("--prefer-binary", "--ignore-installed", "-I")
+
+ @wraps(subprocess.__original_run)
+ def patched_run(*args, **kwargs):
+ _original_args = copy(args)
+ _original_kwargs = copy(kwargs)
+
+ if args:
+ command, *_args = args
+ else:
+ command, _args = kwargs.pop("args", ""), ()
+
+ if isinstance(command, str):
+ command = shlex.split(command)
+ else:
+ command = [arg.strip() for arg in command]
+
+ assert isinstance(command, list)
+
+ if "pip" not in command:
+ return subprocess.__original_run(*_original_args, **_original_kwargs)
+
+ cmd = command[command.index("pip") + 1 :]
+
+ cmd = [arg for arg in cmd if arg not in BAD_FLAGS]
+
+ modified_command: list[str] = ["uv", "pip", *cmd]
+
+ if symlink:
+ modified_command.extend(["--link-mode", "symlink"])
+
+ command = [*modified_command, *_args]
+ if kwargs.get("shell", False):
+ command = shlex.join(command).replace("'", '"')
+
+ return subprocess.__original_run(command, **kwargs)
+
+ subprocess.run = patched_run