Module
stringclasses
3 values
Name
stringlengths
3
15
MLX Python
stringlengths
1.77k
25.6k
MLX Swift
stringlengths
3.79k
44k
Notes
stringclasses
1 value
Transformers / Diffusers .py (TODO)
float64
Unnamed: 6
float64
Unnamed: 7
float64
Unnamed: 8
float64
Unnamed: 9
float64
Unnamed: 10
float64
Unnamed: 11
float64
Unnamed: 12
float64
Unnamed: 13
float64
Unnamed: 14
float64
Unnamed: 15
float64
Unnamed: 16
float64
Unnamed: 17
float64
Unnamed: 18
float64
Unnamed: 19
float64
Unnamed: 20
float64
Unnamed: 21
float64
Unnamed: 22
float64
Unnamed: 23
float64
Unnamed: 24
float64
Unnamed: 25
float64
Unnamed: 26
float64
StableDiffusion
StableDiffusion
# Copyright © 2023-2024 Apple Inc. import time from typing import Optional, Tuple import mlx.core as mx from .model_io import ( _DEFAULT_MODEL, load_autoencoder, load_diffusion_config, load_text_encoder, load_tokenizer, load_unet, ) from .sampler import SimpleEulerAncestralSampler, SimpleEulerSampler class StableDiffusion: def __init__(self, model: str = _DEFAULT_MODEL, float16: bool = False): self.dtype = mx.float16 if float16 else mx.float32 self.diffusion_config = load_diffusion_config(model) self.unet = load_unet(model, float16) self.text_encoder = load_text_encoder(model, float16) self.autoencoder = load_autoencoder(model, False) self.sampler = SimpleEulerSampler(self.diffusion_config) self.tokenizer = load_tokenizer(model) def ensure_models_are_loaded(self): mx.eval(self.unet.parameters()) mx.eval(self.text_encoder.parameters()) mx.eval(self.autoencoder.parameters()) def _tokenize(self, tokenizer, text: str, negative_text: Optional[str] = None): # Tokenize the text tokens = [tokenizer.tokenize(text)] if negative_text is not None: tokens += [tokenizer.tokenize(negative_text)] lengths = [len(t) for t in tokens] N = max(lengths) tokens = [t + [0] * (N - len(t)) for t in tokens] tokens = mx.array(tokens) return tokens def _get_text_conditioning( self, text: str, n_images: int = 1, cfg_weight: float = 7.5, negative_text: str = "", ): # Tokenize the text tokens = self._tokenize( self.tokenizer, text, (negative_text if cfg_weight > 1 else None) ) # Compute the features conditioning = self.text_encoder(tokens).last_hidden_state # Repeat the conditioning for each of the generated images if n_images > 1: conditioning = mx.repeat(conditioning, n_images, axis=0) return conditioning def _denoising_step( self, x_t, t, t_prev, conditioning, cfg_weight: float = 7.5, text_time=None ): x_t_unet = mx.concatenate([x_t] * 2, axis=0) if cfg_weight > 1 else x_t t_unet = mx.broadcast_to(t, [len(x_t_unet)]) eps_pred = self.unet( x_t_unet, t_unet, encoder_x=conditioning, text_time=text_time ) if cfg_weight > 1: eps_text, eps_neg = eps_pred.split(2) eps_pred = eps_neg + cfg_weight * (eps_text - eps_neg) x_t_prev = self.sampler.step(eps_pred, x_t, t, t_prev) return x_t_prev def _denoising_loop( self, x_T, T, conditioning, num_steps: int = 50, cfg_weight: float = 7.5, text_time=None, ): x_t = x_T for t, t_prev in self.sampler.timesteps( num_steps, start_time=T, dtype=self.dtype ): x_t = self._denoising_step( x_t, t, t_prev, conditioning, cfg_weight, text_time ) yield x_t def generate_latents( self, text: str, n_images: int = 1, num_steps: int = 50, cfg_weight: float = 7.5, negative_text: str = "", latent_size: Tuple[int] = (64, 64), seed=None, ): # Set the PRNG state seed = int(time.time()) if seed is None else seed mx.random.seed(seed) # Get the text conditioning conditioning = self._get_text_conditioning( text, n_images, cfg_weight, negative_text ) # Create the latent variables x_T = self.sampler.sample_prior( (n_images, *latent_size, self.autoencoder.latent_channels), dtype=self.dtype ) # Perform the denoising loop yield from self._denoising_loop( x_T, self.sampler.max_time, conditioning, num_steps, cfg_weight ) def generate_latents_from_image( self, image, text: str, n_images: int = 1, strength: float = 0.8, num_steps: int = 50, cfg_weight: float = 7.5, negative_text: str = "", seed=None, ): # Set the PRNG state seed = int(time.time()) if seed is None else seed mx.random.seed(seed) # Define the num steps and start step start_step = self.sampler.max_time * strength num_steps = int(num_steps * strength) # Get the text conditioning conditioning = self._get_text_conditioning( text, n_images, cfg_weight, negative_text ) # Get the latents from the input image and add noise according to the # start time. x_0, _ = self.autoencoder.encode(image[None]) x_0 = mx.broadcast_to(x_0, (n_images,) + x_0.shape[1:]) x_T = self.sampler.add_noise(x_0, mx.array(start_step)) # Perform the denoising loop yield from self._denoising_loop( x_T, start_step, conditioning, num_steps, cfg_weight ) def decode(self, x_t): x = self.autoencoder.decode(x_t) x = mx.clip(x / 2 + 0.5, 0, 1) return x class StableDiffusionXL(StableDiffusion): def __init__(self, model: str = _DEFAULT_MODEL, float16: bool = False): super().__init__(model, float16) self.sampler = SimpleEulerAncestralSampler(self.diffusion_config) self.text_encoder_1 = self.text_encoder self.tokenizer_1 = self.tokenizer del self.tokenizer, self.text_encoder self.text_encoder_2 = load_text_encoder( model, float16, model_key="text_encoder_2", ) self.tokenizer_2 = load_tokenizer( model, merges_key="tokenizer_2_merges", vocab_key="tokenizer_2_vocab", ) def ensure_models_are_loaded(self): mx.eval(self.unet.parameters()) mx.eval(self.text_encoder_1.parameters()) mx.eval(self.text_encoder_2.parameters()) mx.eval(self.autoencoder.parameters()) def _get_text_conditioning( self, text: str, n_images: int = 1, cfg_weight: float = 7.5, negative_text: str = "", ): tokens_1 = self._tokenize( self.tokenizer_1, text, (negative_text if cfg_weight > 1 else None), ) tokens_2 = self._tokenize( self.tokenizer_2, text, (negative_text if cfg_weight > 1 else None), ) conditioning_1 = self.text_encoder_1(tokens_1) conditioning_2 = self.text_encoder_2(tokens_2) conditioning = mx.concatenate( [conditioning_1.hidden_states[-2], conditioning_2.hidden_states[-2]], axis=-1, ) pooled_conditioning = conditioning_2.pooled_output if n_images > 1: conditioning = mx.repeat(conditioning, n_images, axis=0) pooled_conditioning = mx.repeat(pooled_conditioning, n_images, axis=0) return conditioning, pooled_conditioning def generate_latents( self, text: str, n_images: int = 1, num_steps: int = 2, cfg_weight: float = 0.0, negative_text: str = "", latent_size: Tuple[int] = (64, 64), seed=None, ): # Set the PRNG state seed = int(time.time()) if seed is None else seed mx.random.seed(seed) # Get the text conditioning conditioning, pooled_conditioning = self._get_text_conditioning( text, n_images, cfg_weight, negative_text ) text_time = ( pooled_conditioning, mx.array([[512, 512, 0, 0, 512, 512.0]] * len(pooled_conditioning)), ) # Create the latent variables x_T = self.sampler.sample_prior( (n_images, *latent_size, self.autoencoder.latent_channels), dtype=self.dtype ) # Perform the denoising loop yield from self._denoising_loop( x_T, self.sampler.max_time, conditioning, num_steps, cfg_weight, text_time=text_time, ) def generate_latents_from_image( self, image, text: str, n_images: int = 1, strength: float = 0.8, num_steps: int = 2, cfg_weight: float = 0.0, negative_text: str = "", seed=None, ): # Set the PRNG state seed = seed or int(time.time()) mx.random.seed(seed) # Define the num steps and start step start_step = self.sampler.max_time * strength num_steps = int(num_steps * strength) # Get the text conditioning conditioning, pooled_conditioning = self._get_text_conditioning( text, n_images, cfg_weight, negative_text ) text_time = ( pooled_conditioning, mx.array([[512, 512, 0, 0, 512, 512.0]] * len(pooled_conditioning)), ) # Get the latents from the input image and add noise according to the # start time. x_0, _ = self.autoencoder.encode(image[None]) x_0 = mx.broadcast_to(x_0, (n_images,) + x_0.shape[1:]) x_T = self.sampler.add_noise(x_0, mx.array(start_step)) # Perform the denoising loop yield from self._denoising_loop( x_T, start_step, conditioning, num_steps, cfg_weight, text_time=text_time )
// Copyright © 2024 Apple Inc. import Foundation import Hub import MLX import MLXNN // port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/__init__.py /// Iterator that produces latent images. /// /// Created by: /// /// - ``TextToImageGenerator/generateLatents(parameters:)`` /// - ``ImageToImageGenerator/generateLatents(image:parameters:strength:)`` public struct DenoiseIterator: Sequence, IteratorProtocol { let sd: StableDiffusion var xt: MLXArray let conditioning: MLXArray let cfgWeight: Float let textTime: (MLXArray, MLXArray)? var i: Int let steps: [(MLXArray, MLXArray)] init( sd: StableDiffusion, xt: MLXArray, t: Int, conditioning: MLXArray, steps: Int, cfgWeight: Float, textTime: (MLXArray, MLXArray)? = nil ) { self.sd = sd self.steps = sd.sampler.timeSteps(steps: steps, start: t, dType: sd.dType) self.i = 0 self.xt = xt self.conditioning = conditioning self.cfgWeight = cfgWeight self.textTime = textTime } public var underestimatedCount: Int { steps.count } mutating public func next() -> MLXArray? { guard i < steps.count else { return nil } let (t, tPrev) = steps[i] i += 1 xt = sd.step( xt: xt, t: t, tPrev: tPrev, conditioning: conditioning, cfgWeight: cfgWeight, textTime: textTime) return xt } } /// Type for the _decoder_ step. public typealias ImageDecoder = (MLXArray) -> MLXArray public protocol ImageGenerator { func ensureLoaded() /// Return a detached decoder -- this is useful if trying to conserve memory. /// /// The decoder can be used independently of the ImageGenerator to transform /// latents into raster images. func detachedDecoder() -> ImageDecoder /// the equivalent to the ``detachedDecoder()`` but without the detatching func decode(xt: MLXArray) -> MLXArray } /// Public interface for transforming a text prompt into an image. /// /// Steps: /// /// - ``generateLatents(parameters:)`` /// - evaluate each of the latents from the iterator /// - ``ImageGenerator/decode(xt:)`` or ``ImageGenerator/detachedDecoder()`` to convert the final latent into an image /// - use ``Image`` to save the image public protocol TextToImageGenerator: ImageGenerator { func generateLatents(parameters: EvaluateParameters) -> DenoiseIterator } /// Public interface for transforming a text prompt into an image. /// /// Steps: /// /// - ``generateLatents(image:parameters:strength:)`` /// - evaluate each of the latents from the iterator /// - ``ImageGenerator/decode(xt:)`` or ``ImageGenerator/detachedDecoder()`` to convert the final latent into an image /// - use ``Image`` to save the image public protocol ImageToImageGenerator: ImageGenerator { func generateLatents(image: MLXArray, parameters: EvaluateParameters, strength: Float) -> DenoiseIterator } enum ModelContainerError: LocalizedError { /// Unable to create the particular type of model, e.g. it doesn't support image to image case unableToCreate(String, String) /// When operating in conserveMemory mode, it tried to use a model that had been discarded case modelDiscarded var errorDescription: String? { switch self { case .unableToCreate(let modelId, let generatorType): return String( localized: "Unable to create a \(generatorType) with model ID '\(modelId)'. The model may not support this operation type." ) case .modelDiscarded: return String( localized: "The model has been discarded to conserve memory and is no longer available. Please recreate the model container." ) } } } /// Container for models that guarantees single threaded access. public actor ModelContainer<M> { enum State { case discarded case loaded(M) } var state: State /// if true this will discard the model in ``performTwoStage(first:second:)`` var conserveMemory = false private init(model: M) { self.state = .loaded(model) } /// create a ``ModelContainer`` that supports ``TextToImageGenerator`` static public func createTextToImageGenerator( configuration: StableDiffusionConfiguration, loadConfiguration: LoadConfiguration = .init() ) throws -> ModelContainer<TextToImageGenerator> { if let model = try configuration.textToImageGenerator(configuration: loadConfiguration) { return .init(model: model) } else { throw ModelContainerError.unableToCreate(configuration.id, "TextToImageGenerator") } } /// create a ``ModelContainer`` that supports ``ImageToImageGenerator`` static public func createImageToImageGenerator( configuration: StableDiffusionConfiguration, loadConfiguration: LoadConfiguration = .init() ) throws -> ModelContainer<ImageToImageGenerator> { if let model = try configuration.imageToImageGenerator(configuration: loadConfiguration) { return .init(model: model) } else { throw ModelContainerError.unableToCreate(configuration.id, "ImageToImageGenerator") } } public func setConserveMemory(_ conserveMemory: Bool) { self.conserveMemory = conserveMemory } /// Perform an action on the model and/or tokenizer. Callers _must_ eval any `MLXArray` before returning as /// `MLXArray` is not `Sendable`. public func perform<R>(_ action: @Sendable (M) throws -> R) throws -> R { switch state { case .discarded: throw ModelContainerError.modelDiscarded case .loaded(let m): try action(m) } } /// Perform a two stage action where the first stage returns values passed to the second stage. /// /// If ``setConservativeMemory(_:)`` is `true` this will discard the model in between /// the `first` and `second` blocks. The container will have to be recreated if a caller /// wants to use it again. /// /// If `false` this will just run them in sequence and the container can be reused. /// /// Callers _must_ eval any `MLXArray` before returning as `MLXArray` is not `Sendable`. public func performTwoStage<R1, R2>( first: @Sendable (M) throws -> R1, second: @Sendable (R1) throws -> R2 ) throws -> R2 { let r1 = switch state { case .discarded: throw ModelContainerError.modelDiscarded case .loaded(let m): try first(m) } if conserveMemory { self.state = .discarded } return try second(r1) } } /// Base class for Stable Diffusion. open class StableDiffusion { let dType: DType let diffusionConfiguration: DiffusionConfiguration let unet: UNetModel let textEncoder: CLIPTextModel let autoencoder: Autoencoder let sampler: SimpleEulerSampler let tokenizer: CLIPTokenizer internal init( hub: HubApi, configuration: StableDiffusionConfiguration, dType: DType, diffusionConfiguration: DiffusionConfiguration? = nil, unet: UNetModel? = nil, textEncoder: CLIPTextModel? = nil, autoencoder: Autoencoder? = nil, sampler: SimpleEulerSampler? = nil, tokenizer: CLIPTokenizer? = nil ) throws { self.dType = dType self.diffusionConfiguration = try diffusionConfiguration ?? loadDiffusionConfiguration(hub: hub, configuration: configuration) self.unet = try unet ?? loadUnet(hub: hub, configuration: configuration, dType: dType) self.textEncoder = try textEncoder ?? loadTextEncoder(hub: hub, configuration: configuration, dType: dType) // note: autoencoder uses float32 weights self.autoencoder = try autoencoder ?? loadAutoEncoder(hub: hub, configuration: configuration, dType: .float32) if let sampler { self.sampler = sampler } else { self.sampler = SimpleEulerSampler(configuration: self.diffusionConfiguration) } self.tokenizer = try tokenizer ?? loadTokenizer(hub: hub, configuration: configuration) } open func ensureLoaded() { eval(unet, textEncoder, autoencoder) } func tokenize(tokenizer: CLIPTokenizer, text: String, negativeText: String?) -> MLXArray { var tokens = [tokenizer.tokenize(text: text)] if let negativeText { tokens.append(tokenizer.tokenize(text: negativeText)) } let c = tokens.count let max = tokens.map { $0.count }.max() ?? 0 let mlxTokens = MLXArray( tokens .map { ($0 + Array(repeating: 0, count: max - $0.count)) } .flatMap { $0 } ) .reshaped(c, max) return mlxTokens } open func step( xt: MLXArray, t: MLXArray, tPrev: MLXArray, conditioning: MLXArray, cfgWeight: Float, textTime: (MLXArray, MLXArray)? ) -> MLXArray { let xtUnet = cfgWeight > 1 ? concatenated([xt, xt], axis: 0) : xt let tUnet = broadcast(t, to: [xtUnet.count]) var epsPred = unet(xtUnet, timestep: tUnet, encoderX: conditioning, textTime: textTime) if cfgWeight > 1 { let (epsText, epsNeg) = epsPred.split() epsPred = epsNeg + cfgWeight * (epsText - epsNeg) } return sampler.step(epsPred: epsPred, xt: xt, t: t, tPrev: tPrev) } public func detachedDecoder() -> ImageDecoder { let autoencoder = self.autoencoder func decode(xt: MLXArray) -> MLXArray { var x = autoencoder.decode(xt) x = clip(x / 2 + 0.5, min: 0, max: 1) return x } return decode(xt:) } public func decode(xt: MLXArray) -> MLXArray { detachedDecoder()(xt) } } /// Implementation of ``StableDiffusion`` for the `stabilityai/stable-diffusion-2-1-base` model. open class StableDiffusionBase: StableDiffusion, TextToImageGenerator { public init(hub: HubApi, configuration: StableDiffusionConfiguration, dType: DType) throws { try super.init(hub: hub, configuration: configuration, dType: dType) } func conditionText(text: String, imageCount: Int, cfgWeight: Float, negativeText: String?) -> MLXArray { // tokenize the text let tokens = tokenize( tokenizer: tokenizer, text: text, negativeText: cfgWeight > 1 ? negativeText : nil) // compute the features var conditioning = textEncoder(tokens).lastHiddenState // repeat the conditioning for each of the generated images if imageCount > 1 { conditioning = repeated(conditioning, count: imageCount, axis: 0) } return conditioning } public func generateLatents(parameters: EvaluateParameters) -> DenoiseIterator { MLXRandom.seed(parameters.seed) let conditioning = conditionText( text: parameters.prompt, imageCount: parameters.imageCount, cfgWeight: parameters.cfgWeight, negativeText: parameters.negativePrompt) let xt = sampler.samplePrior( shape: [parameters.imageCount] + parameters.latentSize + [autoencoder.latentChannels], dType: dType) return DenoiseIterator( sd: self, xt: xt, t: sampler.maxTime, conditioning: conditioning, steps: parameters.steps, cfgWeight: parameters.cfgWeight) } } /// Implementation of ``StableDiffusion`` for the `stabilityai/sdxl-turbo` model. open class StableDiffusionXL: StableDiffusion, TextToImageGenerator, ImageToImageGenerator { let textEncoder2: CLIPTextModel let tokenizer2: CLIPTokenizer public init(hub: HubApi, configuration: StableDiffusionConfiguration, dType: DType) throws { let diffusionConfiguration = try loadConfiguration( hub: hub, configuration: configuration, key: .diffusionConfig, type: DiffusionConfiguration.self) let sampler = SimpleEulerAncestralSampler(configuration: diffusionConfiguration) self.textEncoder2 = try loadTextEncoder( hub: hub, configuration: configuration, configKey: .textEncoderConfig2, weightsKey: .textEncoderWeights2, dType: dType) self.tokenizer2 = try loadTokenizer( hub: hub, configuration: configuration, vocabulary: .tokenizerVocabulary2, merges: .tokenizerMerges2) try super.init( hub: hub, configuration: configuration, dType: dType, diffusionConfiguration: diffusionConfiguration, sampler: sampler) } open override func ensureLoaded() { super.ensureLoaded() eval(textEncoder2) } func conditionText(text: String, imageCount: Int, cfgWeight: Float, negativeText: String?) -> ( MLXArray, MLXArray ) { let tokens1 = tokenize( tokenizer: tokenizer, text: text, negativeText: cfgWeight > 1 ? negativeText : nil) let tokens2 = tokenize( tokenizer: tokenizer2, text: text, negativeText: cfgWeight > 1 ? negativeText : nil) let conditioning1 = textEncoder(tokens1) let conditioning2 = textEncoder2(tokens2) var conditioning = concatenated( [ conditioning1.hiddenStates.dropLast().last!, conditioning2.hiddenStates.dropLast().last!, ], axis: -1) var pooledConditionng = conditioning2.pooledOutput if imageCount > 1 { conditioning = repeated(conditioning, count: imageCount, axis: 0) pooledConditionng = repeated(pooledConditionng, count: imageCount, axis: 0) } return (conditioning, pooledConditionng) } public func generateLatents(parameters: EvaluateParameters) -> DenoiseIterator { MLXRandom.seed(parameters.seed) let (conditioning, pooledConditioning) = conditionText( text: parameters.prompt, imageCount: parameters.imageCount, cfgWeight: parameters.cfgWeight, negativeText: parameters.negativePrompt) let textTime = ( pooledConditioning, repeated( MLXArray(converting: [512.0, 512, 0, 0, 512, 512]).reshaped(1, -1), count: pooledConditioning.count, axis: 0) ) let xt = sampler.samplePrior( shape: [parameters.imageCount] + parameters.latentSize + [autoencoder.latentChannels], dType: dType) return DenoiseIterator( sd: self, xt: xt, t: sampler.maxTime, conditioning: conditioning, steps: parameters.steps, cfgWeight: parameters.cfgWeight, textTime: textTime) } public func generateLatents(image: MLXArray, parameters: EvaluateParameters, strength: Float) -> DenoiseIterator { MLXRandom.seed(parameters.seed) // Define the num steps and start step let startStep = Float(sampler.maxTime) * strength let numSteps = Int(Float(parameters.steps) * strength) let (conditioning, pooledConditioning) = conditionText( text: parameters.prompt, imageCount: parameters.imageCount, cfgWeight: parameters.cfgWeight, negativeText: parameters.negativePrompt) let textTime = ( pooledConditioning, repeated( MLXArray(converting: [512.0, 512, 0, 0, 512, 512]).reshaped(1, -1), count: pooledConditioning.count, axis: 0) ) // Get the latents from the input image and add noise according to the // start time. var (x0, _) = autoencoder.encode(image[.newAxis]) x0 = broadcast(x0, to: [parameters.imageCount] + x0.shape.dropFirst()) let xt = sampler.addNoise(x: x0, t: MLXArray(startStep)) return DenoiseIterator( sd: self, xt: xt, t: sampler.maxTime, conditioning: conditioning, steps: numSteps, cfgWeight: parameters.cfgWeight, textTime: textTime) } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
StableDiffusion
Tokenizer
# Copyright © 2023 Apple Inc. import regex class Tokenizer: """A simple port of CLIPTokenizer from https://github.com/huggingface/transformers/ .""" def __init__(self, bpe_ranks, vocab): self.bpe_ranks = bpe_ranks self.vocab = vocab self.pat = regex.compile( r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", regex.IGNORECASE, ) self._cache = {self.bos: self.bos, self.eos: self.eos} @property def bos(self): return "<|startoftext|>" @property def bos_token(self): return self.vocab[self.bos] @property def eos(self): return "<|endoftext|>" @property def eos_token(self): return self.vocab[self.eos] def bpe(self, text): if text in self._cache: return self._cache[text] unigrams = list(text[:-1]) + [text[-1] + "</w>"] unique_bigrams = set(zip(unigrams, unigrams[1:])) if not unique_bigrams: return unigrams # In every iteration try to merge the two most likely bigrams. If none # was merged we are done. # # Ported from https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/tokenization_clip.py while unique_bigrams: bigram = min( unique_bigrams, key=lambda pair: self.bpe_ranks.get(pair, float("inf")) ) if bigram not in self.bpe_ranks: break new_unigrams = [] skip = False for a, b in zip(unigrams, unigrams[1:]): if skip: skip = False continue if (a, b) == bigram: new_unigrams.append(a + b) skip = True else: new_unigrams.append(a) if not skip: new_unigrams.append(b) unigrams = new_unigrams unique_bigrams = set(zip(unigrams, unigrams[1:])) self._cache[text] = unigrams return unigrams def tokenize(self, text, prepend_bos=True, append_eos=True): if isinstance(text, list): return [self.tokenize(t, prepend_bos, append_eos) for t in text] # Lower case cleanup and split according to self.pat. Hugging Face does # a much more thorough job here but this should suffice for 95% of # cases. clean_text = regex.sub(r"\s+", " ", text.lower()) tokens = regex.findall(self.pat, clean_text) # Split the tokens according to the byte-pair merge file bpe_tokens = [ti for t in tokens for ti in self.bpe(t)] # Map to token ids and return tokens = [self.vocab[t] for t in bpe_tokens] if prepend_bos: tokens = [self.bos_token] + tokens if append_eos: tokens.append(self.eos_token) return tokens
// Copyright © 2024 Apple Inc. import Foundation struct Bigram: Hashable { let a: String let b: String init(_ s: String) { let pieces = s.split(separator: " ") precondition(pieces.count == 2, "BPEPair expected two pieces for '\(s)'") self.a = String(pieces[0]) self.b = String(pieces[1]) } init(_ a: String, _ b: String) { self.a = a self.b = b } init(_ v: (String, String)) { self.a = v.0 self.b = v.1 } } /// A CLIP tokenizer. /// /// Ported from: /// /// - https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/tokenizer.py /// - https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/tokenization_clip.py /// /// Ideally this would be a tokenizer from `swift-transformers` but this is too special purpose to be representable in /// what exists there (at time of writing). class CLIPTokenizer { let pattern = #/<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+/# let bpeRanks: [Bigram: Int] let vocabulary: [String: Int] let bos = "<|startoftext|>" let eos = "<|endoftext|>" let bosToken: Int let eosToken: Int var cache = [String: [String]]() init(merges: [String], vocabulary: [String: Int]) { self.bpeRanks = Dictionary( uniqueKeysWithValues: merges .map { Bigram($0) } .enumerated() .map { ($0.element, $0.offset) }) self.vocabulary = vocabulary self.cache[bos] = [bos] self.cache[eos] = [eos] self.bosToken = vocabulary[bos]! self.eosToken = vocabulary[eos]! } func bpe(text: String) -> [String] { if let result = cache[text] { return result } precondition(!text.isEmpty) var unigrams = text.dropLast().map { String($0) } + ["\(text.last!)</w>"] var uniqueBigrams = Set(zip(unigrams, unigrams.dropFirst()).map { Bigram($0) }) // In every iteration try to merge the two most likely bigrams. If none // was merged we are done while !uniqueBigrams.isEmpty { let (bigram, _) = uniqueBigrams .map { ($0, bpeRanks[$0] ?? Int.max) } .min { $0.1 < $1.1 }! if bpeRanks[bigram] == nil { break } var newUnigrams = [String]() var skip = false for (a, b) in zip(unigrams, unigrams.dropFirst()) { if skip { skip = false continue } if Bigram(a, b) == bigram { newUnigrams.append(a + b) skip = true } else { newUnigrams.append(a) } } if !skip, let last = unigrams.last { newUnigrams.append(last) } unigrams = newUnigrams uniqueBigrams = Set(zip(unigrams, unigrams.dropFirst()).map { Bigram($0) }) } cache[text] = unigrams return unigrams } public func tokenize(text: String) -> [Int32] { // Lower case cleanup and split according to self.pat. Hugging Face does // a much more thorough job here but this should suffice for 95% of // cases. let clean = text.lowercased().replacing(#/\s+/#, with: " ") let tokens = clean.matches(of: pattern).map { $0.description } // Split the tokens according to the byte-pair merge file let bpeTokens = tokens.flatMap { bpe(text: String($0)) } // Map to token ids and return let result = [bosToken] + bpeTokens.compactMap { vocabulary[$0] } + [eosToken] return result.map { Int32($0) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
StableDiffusion
Unet
# Copyright © 2023 Apple Inc. import math from typing import Optional import mlx.core as mx import mlx.nn as nn from .config import UNetConfig def upsample_nearest(x, scale: int = 2): B, H, W, C = x.shape x = mx.broadcast_to(x[:, :, None, :, None, :], (B, H, scale, W, scale, C)) x = x.reshape(B, H * scale, W * scale, C) return x class TimestepEmbedding(nn.Module): def __init__(self, in_channels: int, time_embed_dim: int): super().__init__() self.linear_1 = nn.Linear(in_channels, time_embed_dim) self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim) def __call__(self, x): x = self.linear_1(x) x = nn.silu(x) x = self.linear_2(x) return x class TransformerBlock(nn.Module): def __init__( self, model_dims: int, num_heads: int, hidden_dims: Optional[int] = None, memory_dims: Optional[int] = None, ): super().__init__() self.norm1 = nn.LayerNorm(model_dims) self.attn1 = nn.MultiHeadAttention(model_dims, num_heads) self.attn1.out_proj.bias = mx.zeros(model_dims) memory_dims = memory_dims or model_dims self.norm2 = nn.LayerNorm(model_dims) self.attn2 = nn.MultiHeadAttention( model_dims, num_heads, key_input_dims=memory_dims ) self.attn2.out_proj.bias = mx.zeros(model_dims) hidden_dims = hidden_dims or 4 * model_dims self.norm3 = nn.LayerNorm(model_dims) self.linear1 = nn.Linear(model_dims, hidden_dims) self.linear2 = nn.Linear(model_dims, hidden_dims) self.linear3 = nn.Linear(hidden_dims, model_dims) def __call__(self, x, memory, attn_mask, memory_mask): # Self attention y = self.norm1(x) y = self.attn1(y, y, y, attn_mask) x = x + y # Cross attention y = self.norm2(x) y = self.attn2(y, memory, memory, memory_mask) x = x + y # FFN y = self.norm3(x) y_a = self.linear1(y) y_b = self.linear2(y) y = y_a * nn.gelu(y_b) y = self.linear3(y) x = x + y return x class Transformer2D(nn.Module): """A transformer model for inputs with 2 spatial dimensions.""" def __init__( self, in_channels: int, model_dims: int, encoder_dims: int, num_heads: int, num_layers: int = 1, norm_num_groups: int = 32, ): super().__init__() self.norm = nn.GroupNorm(norm_num_groups, in_channels, pytorch_compatible=True) self.proj_in = nn.Linear(in_channels, model_dims) self.transformer_blocks = [ TransformerBlock(model_dims, num_heads, memory_dims=encoder_dims) for i in range(num_layers) ] self.proj_out = nn.Linear(model_dims, in_channels) def __call__(self, x, encoder_x, attn_mask, encoder_attn_mask): # Save the input to add to the output input_x = x dtype = x.dtype # Perform the input norm and projection B, H, W, C = x.shape x = self.norm(x).reshape(B, -1, C) x = self.proj_in(x) # Apply the transformer for block in self.transformer_blocks: x = block(x, encoder_x, attn_mask, encoder_attn_mask) # Apply the output projection and reshape x = self.proj_out(x) x = x.reshape(B, H, W, C) return x + input_x class ResnetBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: Optional[int] = None, groups: int = 32, temb_channels: Optional[int] = None, ): super().__init__() out_channels = out_channels or in_channels self.norm1 = nn.GroupNorm(groups, in_channels, pytorch_compatible=True) self.conv1 = nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=1, padding=1 ) if temb_channels is not None: self.time_emb_proj = nn.Linear(temb_channels, out_channels) self.norm2 = nn.GroupNorm(groups, out_channels, pytorch_compatible=True) self.conv2 = nn.Conv2d( out_channels, out_channels, kernel_size=3, stride=1, padding=1 ) if in_channels != out_channels: self.conv_shortcut = nn.Linear(in_channels, out_channels) def __call__(self, x, temb=None): dtype = x.dtype if temb is not None: temb = self.time_emb_proj(nn.silu(temb)) y = self.norm1(x) y = nn.silu(y) y = self.conv1(y) if temb is not None: y = y + temb[:, None, None, :] y = self.norm2(y) y = nn.silu(y) y = self.conv2(y) x = y + (x if "conv_shortcut" not in self else self.conv_shortcut(x)) return x class UNetBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, prev_out_channels: Optional[int] = None, num_layers: int = 1, transformer_layers_per_block: int = 1, num_attention_heads: int = 8, cross_attention_dim=1280, resnet_groups: int = 32, add_downsample=True, add_upsample=True, add_cross_attention=True, ): super().__init__() # Prepare the in channels list for the resnets if prev_out_channels is None: in_channels_list = [in_channels] + [out_channels] * (num_layers - 1) else: in_channels_list = [prev_out_channels] + [out_channels] * (num_layers - 1) res_channels_list = [out_channels] * (num_layers - 1) + [in_channels] in_channels_list = [ a + b for a, b in zip(in_channels_list, res_channels_list) ] # Add resnet blocks that also process the time embedding self.resnets = [ ResnetBlock2D( in_channels=ic, out_channels=out_channels, temb_channels=temb_channels, groups=resnet_groups, ) for ic in in_channels_list ] # Add optional cross attention layers if add_cross_attention: self.attentions = [ Transformer2D( in_channels=out_channels, model_dims=out_channels, num_heads=num_attention_heads, num_layers=transformer_layers_per_block, encoder_dims=cross_attention_dim, ) for i in range(num_layers) ] # Add an optional downsampling layer if add_downsample: self.downsample = nn.Conv2d( out_channels, out_channels, kernel_size=3, stride=2, padding=1 ) # or upsampling layer if add_upsample: self.upsample = nn.Conv2d( out_channels, out_channels, kernel_size=3, stride=1, padding=1 ) def __call__( self, x, encoder_x=None, temb=None, attn_mask=None, encoder_attn_mask=None, residual_hidden_states=None, ): output_states = [] for i in range(len(self.resnets)): if residual_hidden_states is not None: x = mx.concatenate([x, residual_hidden_states.pop()], axis=-1) x = self.resnets[i](x, temb) if "attentions" in self: x = self.attentions[i](x, encoder_x, attn_mask, encoder_attn_mask) output_states.append(x) if "downsample" in self: x = self.downsample(x) output_states.append(x) if "upsample" in self: x = self.upsample(upsample_nearest(x)) output_states.append(x) return x, output_states class UNetModel(nn.Module): """The conditional 2D UNet model that actually performs the denoising.""" def __init__(self, config: UNetConfig): super().__init__() self.conv_in = nn.Conv2d( config.in_channels, config.block_out_channels[0], config.conv_in_kernel, padding=(config.conv_in_kernel - 1) // 2, ) self.timesteps = nn.SinusoidalPositionalEncoding( config.block_out_channels[0], max_freq=1, min_freq=math.exp( -math.log(10000) + 2 * math.log(10000) / config.block_out_channels[0] ), scale=1.0, cos_first=True, full_turns=False, ) self.time_embedding = TimestepEmbedding( config.block_out_channels[0], config.block_out_channels[0] * 4, ) if config.addition_embed_type == "text_time": self.add_time_proj = nn.SinusoidalPositionalEncoding( config.addition_time_embed_dim, max_freq=1, min_freq=math.exp( -math.log(10000) + 2 * math.log(10000) / config.addition_time_embed_dim ), scale=1.0, cos_first=True, full_turns=False, ) self.add_embedding = TimestepEmbedding( config.projection_class_embeddings_input_dim, config.block_out_channels[0] * 4, ) # Make the downsampling blocks block_channels = [config.block_out_channels[0]] + list( config.block_out_channels ) self.down_blocks = [ UNetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=config.block_out_channels[0] * 4, num_layers=config.layers_per_block[i], transformer_layers_per_block=config.transformer_layers_per_block[i], num_attention_heads=config.num_attention_heads[i], cross_attention_dim=config.cross_attention_dim[i], resnet_groups=config.norm_num_groups, add_downsample=(i < len(config.block_out_channels) - 1), add_upsample=False, add_cross_attention="CrossAttn" in config.down_block_types[i], ) for i, (in_channels, out_channels) in enumerate( zip(block_channels, block_channels[1:]) ) ] # Make the middle block self.mid_blocks = [ ResnetBlock2D( in_channels=config.block_out_channels[-1], out_channels=config.block_out_channels[-1], temb_channels=config.block_out_channels[0] * 4, groups=config.norm_num_groups, ), Transformer2D( in_channels=config.block_out_channels[-1], model_dims=config.block_out_channels[-1], num_heads=config.num_attention_heads[-1], num_layers=config.transformer_layers_per_block[-1], encoder_dims=config.cross_attention_dim[-1], ), ResnetBlock2D( in_channels=config.block_out_channels[-1], out_channels=config.block_out_channels[-1], temb_channels=config.block_out_channels[0] * 4, groups=config.norm_num_groups, ), ] # Make the upsampling blocks block_channels = ( [config.block_out_channels[0]] + list(config.block_out_channels) + [config.block_out_channels[-1]] ) self.up_blocks = [ UNetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=config.block_out_channels[0] * 4, prev_out_channels=prev_out_channels, num_layers=config.layers_per_block[i] + 1, transformer_layers_per_block=config.transformer_layers_per_block[i], num_attention_heads=config.num_attention_heads[i], cross_attention_dim=config.cross_attention_dim[i], resnet_groups=config.norm_num_groups, add_downsample=False, add_upsample=(i > 0), add_cross_attention="CrossAttn" in config.up_block_types[i], ) for i, (in_channels, out_channels, prev_out_channels) in reversed( list( enumerate( zip(block_channels, block_channels[1:], block_channels[2:]) ) ) ) ] self.conv_norm_out = nn.GroupNorm( config.norm_num_groups, config.block_out_channels[0], pytorch_compatible=True, ) self.conv_out = nn.Conv2d( config.block_out_channels[0], config.out_channels, config.conv_out_kernel, padding=(config.conv_out_kernel - 1) // 2, ) def __call__( self, x, timestep, encoder_x, attn_mask=None, encoder_attn_mask=None, text_time=None, ): # Compute the time embeddings temb = self.timesteps(timestep).astype(x.dtype) temb = self.time_embedding(temb) # Add the extra text_time conditioning if text_time is not None: text_emb, time_ids = text_time emb = self.add_time_proj(time_ids).flatten(1).astype(x.dtype) emb = mx.concatenate([text_emb, emb], axis=-1) emb = self.add_embedding(emb) temb = temb + emb # Preprocess the input x = self.conv_in(x) # Run the downsampling part of the unet residuals = [x] for block in self.down_blocks: x, res = block( x, encoder_x=encoder_x, temb=temb, attn_mask=attn_mask, encoder_attn_mask=encoder_attn_mask, ) residuals.extend(res) # Run the middle part of the unet x = self.mid_blocks[0](x, temb) x = self.mid_blocks[1](x, encoder_x, attn_mask, encoder_attn_mask) x = self.mid_blocks[2](x, temb) # Run the upsampling part of the unet for block in self.up_blocks: x, _ = block( x, encoder_x=encoder_x, temb=temb, attn_mask=attn_mask, encoder_attn_mask=encoder_attn_mask, residual_hidden_states=residuals, ) # Postprocess the output x = self.conv_norm_out(x) x = nn.silu(x) x = self.conv_out(x) return x
// Copyright © 2024 Apple Inc. import Foundation import MLX import MLXNN // port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/unet.py func upsampleNearest(_ x: MLXArray, scale: Int = 2) -> MLXArray { precondition(x.ndim == 4) let (B, H, W, C) = x.shape4 var x = broadcast( x[0..., 0..., .newAxis, 0..., .newAxis, 0...], to: [B, H, scale, W, scale, C]) x = x.reshaped(B, H * scale, W * scale, C) return x } class TimestepEmbedding: Module, UnaryLayer { @ModuleInfo(key: "linear_1") var linear1: Linear @ModuleInfo(key: "linear_2") var linear2: Linear init(inputChannels: Int, timeEmbedDimensions: Int) { self._linear1.wrappedValue = Linear(inputChannels, timeEmbedDimensions) self._linear2.wrappedValue = Linear(timeEmbedDimensions, timeEmbedDimensions) } func callAsFunction(_ x: MLXArray) -> MLXArray { var x = linear1(x) x = silu(x) x = linear2(x) return x } } class TransformerBlock: Module { let norm1: LayerNorm let attn1: MultiHeadAttention let norm2: LayerNorm let attn2: MultiHeadAttention let norm3: LayerNorm @ModuleInfo var linear1: Linear @ModuleInfo var linear2: Linear @ModuleInfo var linear3: Linear init( modelDimensions: Int, numHeads: Int, hiddenDimensions: Int? = nil, memoryDimensions: Int? = nil ) { norm1 = LayerNorm(dimensions: modelDimensions) attn1 = MultiHeadAttention(dimensions: modelDimensions, numHeads: numHeads) // we want to self.attn1.out_proj.bias = mx.zeros(model_dims) turn enable the // bias in one of the four Linears attached to attn1. Since bias is nil we can't // update it so just replace the layer. attn1.update( modules: ModuleChildren( values: ["out_proj": .value(Linear(modelDimensions, modelDimensions, bias: true))])) let memoryDimensions = memoryDimensions ?? modelDimensions self.norm2 = LayerNorm(dimensions: modelDimensions) self.attn2 = MultiHeadAttention( dimensions: modelDimensions, numHeads: numHeads, keyInputDimensions: memoryDimensions) attn2.update( modules: ModuleChildren( values: ["out_proj": .value(Linear(modelDimensions, modelDimensions, bias: true))])) let hiddenDimensions = hiddenDimensions ?? (4 * modelDimensions) self.norm3 = LayerNorm(dimensions: modelDimensions) self.linear1 = Linear(modelDimensions, hiddenDimensions) self.linear2 = Linear(modelDimensions, hiddenDimensions) self.linear3 = Linear(hiddenDimensions, modelDimensions) } func callAsFunction( _ x: MLXArray, memory: MLXArray, attentionMask: MLXArray?, memoryMask: MLXArray? ) -> MLXArray { var x = x // self attention var y = norm1(x) y = attn1(y, keys: y, values: y, mask: attentionMask) x = x + y // cross attention y = norm2(x) y = attn2(y, keys: memory, values: memory, mask: memoryMask) x = x + y // FFN y = norm3(x) let ya = linear1(y) let yb = linear2(y) y = ya * gelu(yb) y = linear3(y) x = x + y return x } } /// A transformer model for inputs with 2 spatial dimensions class Transformer2D: Module { let norm: GroupNorm @ModuleInfo(key: "proj_in") var projectIn: Linear @ModuleInfo(key: "transformer_blocks") var transformerBlocks: [TransformerBlock] @ModuleInfo(key: "proj_out") var projectOut: Linear init( inputChannels: Int, modelDimensions: Int, encoderDimensions: Int, numHeads: Int, numLayers: Int, groupCount: Int = 32 ) { self.norm = GroupNorm( groupCount: groupCount, dimensions: inputChannels, pytorchCompatible: true) self._projectIn.wrappedValue = Linear(inputChannels, modelDimensions) self._transformerBlocks.wrappedValue = (0 ..< numLayers) .map { _ in TransformerBlock( modelDimensions: modelDimensions, numHeads: numHeads, memoryDimensions: encoderDimensions) } self._projectOut.wrappedValue = Linear(modelDimensions, inputChannels) } func callAsFunction( _ x: MLXArray, encoderX: MLXArray, attentionMask: MLXArray?, encoderAttentionMask: MLXArray? ) -> MLXArray { let inputX = x let dtype = x.dtype var x = x // Perform the input norm and projection let (B, H, W, C) = x.shape4 x = norm(x).reshaped(B, -1, C) x = projectIn(x) // apply the transformer for block in transformerBlocks { x = block( x, memory: encoderX, attentionMask: attentionMask, memoryMask: encoderAttentionMask) } // apply the output projection and reshape x = projectOut(x) x = x.reshaped(B, H, W, C) return x + inputX } } class ResnetBlock2D: Module { let norm1: GroupNorm let conv1: Conv2d @ModuleInfo(key: "time_emb_proj") var timeEmbedProjection: Linear? let norm2: GroupNorm let conv2: Conv2d @ModuleInfo(key: "conv_shortcut") var convolutionShortcut: Linear? init( inputChannels: Int, outputChannels: Int? = nil, groupCount: Int = 32, timeEmbedChannels: Int? = nil ) { let outputChannels = outputChannels ?? inputChannels self.norm1 = GroupNorm( groupCount: groupCount, dimensions: inputChannels, pytorchCompatible: true) self.conv1 = Conv2d( inputChannels: inputChannels, outputChannels: outputChannels, kernelSize: 3, stride: 1, padding: 1) if let timeEmbedChannels { self._timeEmbedProjection.wrappedValue = Linear(timeEmbedChannels, outputChannels) } self.norm2 = GroupNorm( groupCount: groupCount, dimensions: outputChannels, pytorchCompatible: true) self.conv2 = Conv2d( inputChannels: outputChannels, outputChannels: outputChannels, kernelSize: 3, stride: 1, padding: 1) if inputChannels != outputChannels { self._convolutionShortcut.wrappedValue = Linear(inputChannels, outputChannels) } } func callAsFunction(_ x: MLXArray, timeEmbedding: MLXArray? = nil) -> MLXArray { let dtype = x.dtype var y = norm1(x) y = silu(y) y = conv1(y) if var timeEmbedding, let timeEmbedProjection { timeEmbedding = timeEmbedProjection(silu(timeEmbedding)) y = y + timeEmbedding[0..., .newAxis, .newAxis, 0...] } y = norm2(y) y = silu(y) y = conv2(y) if let convolutionShortcut { return y + convolutionShortcut(x) } else { return y + x } } } class UNetBlock2D: Module { let resnets: [ResnetBlock2D] let attentions: [Transformer2D]? let downsample: Conv2d? let upsample: Conv2d? init( inputChannels: Int, outputChannels: Int, timeEmbedChannels: Int, previousOutChannels: Int? = nil, numLayers: Int = 1, transformerLayersPerBlock: Int = 1, numHeads: Int = 8, crossAttentionDimension: Int = 1280, resnetGroups: Int = 32, addDownSample: Bool = true, addUpSample: Bool = true, addCrossAttention: Bool = true ) { // Prepare the inputChannelsArray for the resnets let inputChannelsArray: [Int] if let previousOutChannels { let inputChannelsBuild = [previousOutChannels] + Array(repeating: outputChannels, count: numLayers - 1) let resChannelsArray = Array(repeating: outputChannels, count: numLayers - 1) + [inputChannels] inputChannelsArray = zip(inputChannelsBuild, resChannelsArray).map { $0.0 + $0.1 } } else { inputChannelsArray = [inputChannels] + Array(repeating: outputChannels, count: numLayers - 1) } // Add resnet blocks that also process the time embedding self.resnets = inputChannelsArray .map { ic in ResnetBlock2D( inputChannels: ic, outputChannels: outputChannels, groupCount: resnetGroups, timeEmbedChannels: timeEmbedChannels) } // Add optional cross attention layers if addCrossAttention { self.attentions = (0 ..< numLayers) .map { _ in Transformer2D( inputChannels: outputChannels, modelDimensions: outputChannels, encoderDimensions: crossAttentionDimension, numHeads: numHeads, numLayers: transformerLayersPerBlock) } } else { self.attentions = nil } // Add an optional downsampling layer if addDownSample { self.downsample = Conv2d( inputChannels: outputChannels, outputChannels: outputChannels, kernelSize: 3, stride: 2, padding: 1) } else { self.downsample = nil } // or upsampling layer if addUpSample { self.upsample = Conv2d( inputChannels: outputChannels, outputChannels: outputChannels, kernelSize: 3, stride: 1, padding: 1) } else { self.upsample = nil } } func callAsFunction( _ x: MLXArray, encoderX: MLXArray, timeEmbedding: MLXArray? = nil, attentionMask: MLXArray? = nil, encoderAttentionMask: MLXArray? = nil, residualHiddenStates: [MLXArray]? = nil ) -> (MLXArray, [MLXArray], [MLXArray]) { var x = x var outputStates = [MLXArray]() var residualHiddenStates = residualHiddenStates for i in 0 ..< resnets.count { if residualHiddenStates != nil { x = concatenated([x, residualHiddenStates!.removeLast()], axis: -1) } x = resnets[i](x, timeEmbedding: timeEmbedding) if let attentions { x = attentions[i]( x, encoderX: encoderX, attentionMask: attentionMask, encoderAttentionMask: encoderAttentionMask) } outputStates.append(x) } if let downsample { x = downsample(x) outputStates.append(x) } if let upsample { x = upsample(upsampleNearest(x)) outputStates.append(x) } if let residualHiddenStates { return (x, outputStates, residualHiddenStates) } else { return (x, outputStates, []) } } } class UNetModel: Module { @ModuleInfo(key: "conv_in") var convIn: Conv2d let timesteps: SinusoidalPositionalEncoding @ModuleInfo(key: "time_embedding") var timeEmbedding: TimestepEmbedding @ModuleInfo(key: "addition_embed_type") var addTimeProj: SinusoidalPositionalEncoding? @ModuleInfo(key: "add_embedding") var addEmbedding: TimestepEmbedding? @ModuleInfo(key: "down_blocks") var downBlocks: [UNetBlock2D] @ModuleInfo(key: "mid_blocks") var midBlocks: (ResnetBlock2D, Transformer2D, ResnetBlock2D) @ModuleInfo(key: "up_blocks") var upBlocks: [UNetBlock2D] @ModuleInfo(key: "conv_norm_out") var convNormOut: GroupNorm @ModuleInfo(key: "conv_out") var convOut: Conv2d init(configuration: UNetConfiguration) { let channels0 = configuration.blockOutChannels[0] self._convIn.wrappedValue = Conv2d( inputChannels: configuration.inputChannels, outputChannels: channels0, kernelSize: .init(configuration.convolutionInKernel), padding: .init((configuration.convolutionInKernel - 1) / 2)) self.timesteps = SinusoidalPositionalEncoding( dimensions: channels0, minFrequency: exp(-log(10_000) + 2 * log(10_000) / Float(channels0)), maxFrequency: 1, scale: 1, cosineFirst: true, fullTurns: false) self._timeEmbedding.wrappedValue = TimestepEmbedding( inputChannels: channels0, timeEmbedDimensions: channels0 * 4) if configuration.additionEmbedType == "text_time", let additionTimeEmbedDimension = configuration.additionTimeEmbedDimension, let projectionClassEmbeddingsInputDimension = configuration .projectionClassEmbeddingsInputDimension { self._addTimeProj.wrappedValue = SinusoidalPositionalEncoding( dimensions: additionTimeEmbedDimension, minFrequency: exp( -log(10_000) + 2 * log(10_000) / Float(additionTimeEmbedDimension)), maxFrequency: 1, scale: 1, cosineFirst: true, fullTurns: false) self._addEmbedding.wrappedValue = TimestepEmbedding( inputChannels: projectionClassEmbeddingsInputDimension, timeEmbedDimensions: channels0 * 4) } // make the downsampling blocks let downblockChannels = [channels0] + configuration.blockOutChannels self._downBlocks.wrappedValue = zip(downblockChannels, downblockChannels.dropFirst()) .enumerated() .map { (i, pair) in let (inChannels, outChannels) = pair return UNetBlock2D( inputChannels: inChannels, outputChannels: outChannels, timeEmbedChannels: channels0 * 4, numLayers: configuration.layersPerBlock[i], transformerLayersPerBlock: configuration.transformerLayersPerBlock[i], numHeads: configuration.numHeads[i], crossAttentionDimension: configuration.crossAttentionDimension[i], resnetGroups: configuration.normNumGroups, addDownSample: i < configuration.blockOutChannels.count - 1, addUpSample: false, addCrossAttention: configuration.downBlockTypes[i].contains("CrossAttn") ) } // make the middle block let channelsLast = configuration.blockOutChannels.last! self._midBlocks.wrappedValue = ( ResnetBlock2D( inputChannels: channelsLast, outputChannels: channelsLast, groupCount: configuration.normNumGroups, timeEmbedChannels: channels0 * 4 ), Transformer2D( inputChannels: channelsLast, modelDimensions: channelsLast, encoderDimensions: configuration.crossAttentionDimension.last!, numHeads: configuration.numHeads.last!, numLayers: configuration.transformerLayersPerBlock.last! ), ResnetBlock2D( inputChannels: channelsLast, outputChannels: channelsLast, groupCount: configuration.normNumGroups, timeEmbedChannels: channels0 * 4 ) ) // make the upsampling blocks let upblockChannels = [channels0] + configuration.blockOutChannels + [configuration.blockOutChannels.last!] self._upBlocks.wrappedValue = zip(upblockChannels, zip(upblockChannels.dropFirst(), upblockChannels.dropFirst(2))) .enumerated() .reversed() .map { (i, triple) in let (inChannels, (outChannels, prevOutChannels)) = triple return UNetBlock2D( inputChannels: inChannels, outputChannels: outChannels, timeEmbedChannels: channels0 * 4, previousOutChannels: prevOutChannels, numLayers: configuration.layersPerBlock[i] + 1, transformerLayersPerBlock: configuration.transformerLayersPerBlock[i], numHeads: configuration.numHeads[i], crossAttentionDimension: configuration.crossAttentionDimension[i], resnetGroups: configuration.normNumGroups, addDownSample: false, addUpSample: i > 0, addCrossAttention: configuration.upBlockTypes[i].contains("CrossAttn") ) } self._convNormOut.wrappedValue = GroupNorm( groupCount: configuration.normNumGroups, dimensions: channels0, pytorchCompatible: true) self._convOut.wrappedValue = Conv2d( inputChannels: channels0, outputChannels: configuration.outputChannels, kernelSize: .init(configuration.convolutionOutKernel), padding: .init((configuration.convolutionOutKernel - 1) / 2)) } func callAsFunction( _ x: MLXArray, timestep: MLXArray, encoderX: MLXArray, attentionMask: MLXArray? = nil, encoderAttentionMask: MLXArray? = nil, textTime: (MLXArray, MLXArray)? = nil ) -> MLXArray { // compute the time embeddings var temb = timesteps(timestep).asType(x.dtype) temb = timeEmbedding(temb) // add the extra textTime conditioning if let (textEmbedding, timeIds) = textTime, let addTimeProj, let addEmbedding { var emb = addTimeProj(timeIds).flattened(start: 1).asType(x.dtype) emb = concatenated([textEmbedding, emb], axis: -1) emb = addEmbedding(emb) temb = temb + emb } // preprocess the input var x = convIn(x) // run the downsampling part of the unet var residuals = [x] for block in self.downBlocks { let res: [MLXArray] (x, res, _) = block( x, encoderX: encoderX, timeEmbedding: temb, attentionMask: attentionMask, encoderAttentionMask: encoderAttentionMask) residuals.append(contentsOf: res) } // run the middle part of the unet x = midBlocks.0(x, timeEmbedding: temb) x = midBlocks.1( x, encoderX: encoderX, attentionMask: attentionMask, encoderAttentionMask: encoderAttentionMask) x = midBlocks.2(x, timeEmbedding: temb) // run the upsampling part of the unet for block in self.upBlocks { (x, _, residuals) = block( x, encoderX: encoderX, timeEmbedding: temb, attentionMask: attentionMask, encoderAttentionMask: encoderAttentionMask, residualHiddenStates: residuals) } // postprocess the output let dtype = x.dtype x = convNormOut(x) x = silu(x) x = convOut(x) return x } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
StableDiffusion
VAE
# Copyright © 2023 Apple Inc. import math from typing import List import mlx.core as mx import mlx.nn as nn from .config import AutoencoderConfig from .unet import ResnetBlock2D, upsample_nearest class Attention(nn.Module): """A single head unmasked attention for use with the VAE.""" def __init__(self, dims: int, norm_groups: int = 32): super().__init__() self.group_norm = nn.GroupNorm(norm_groups, dims, pytorch_compatible=True) self.query_proj = nn.Linear(dims, dims) self.key_proj = nn.Linear(dims, dims) self.value_proj = nn.Linear(dims, dims) self.out_proj = nn.Linear(dims, dims) def __call__(self, x): B, H, W, C = x.shape y = self.group_norm(x) queries = self.query_proj(y).reshape(B, H * W, C) keys = self.key_proj(y).reshape(B, H * W, C) values = self.value_proj(y).reshape(B, H * W, C) scale = 1 / math.sqrt(queries.shape[-1]) scores = (queries * scale) @ keys.transpose(0, 2, 1) attn = mx.softmax(scores, axis=-1) y = (attn @ values).reshape(B, H, W, C) y = self.out_proj(y) x = x + y return x class EncoderDecoderBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, num_layers: int = 1, resnet_groups: int = 32, add_downsample=True, add_upsample=True, ): super().__init__() # Add the resnet blocks self.resnets = [ ResnetBlock2D( in_channels=in_channels if i == 0 else out_channels, out_channels=out_channels, groups=resnet_groups, ) for i in range(num_layers) ] # Add an optional downsampling layer if add_downsample: self.downsample = nn.Conv2d( out_channels, out_channels, kernel_size=3, stride=2, padding=0 ) # or upsampling layer if add_upsample: self.upsample = nn.Conv2d( out_channels, out_channels, kernel_size=3, stride=1, padding=1 ) def __call__(self, x): for resnet in self.resnets: x = resnet(x) if "downsample" in self: x = mx.pad(x, [(0, 0), (0, 1), (0, 1), (0, 0)]) x = self.downsample(x) if "upsample" in self: x = self.upsample(upsample_nearest(x)) return x class Encoder(nn.Module): """Implements the encoder side of the Autoencoder.""" def __init__( self, in_channels: int, out_channels: int, block_out_channels: List[int] = [64], layers_per_block: int = 2, resnet_groups: int = 32, ): super().__init__() self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=3, stride=1, padding=1 ) channels = [block_out_channels[0]] + list(block_out_channels) self.down_blocks = [ EncoderDecoderBlock2D( in_channels, out_channels, num_layers=layers_per_block, resnet_groups=resnet_groups, add_downsample=i < len(block_out_channels) - 1, add_upsample=False, ) for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:])) ] self.mid_blocks = [ ResnetBlock2D( in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], groups=resnet_groups, ), Attention(block_out_channels[-1], resnet_groups), ResnetBlock2D( in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], groups=resnet_groups, ), ] self.conv_norm_out = nn.GroupNorm( resnet_groups, block_out_channels[-1], pytorch_compatible=True ) self.conv_out = nn.Conv2d(block_out_channels[-1], out_channels, 3, padding=1) def __call__(self, x): x = self.conv_in(x) for l in self.down_blocks: x = l(x) x = self.mid_blocks[0](x) x = self.mid_blocks[1](x) x = self.mid_blocks[2](x) x = self.conv_norm_out(x) x = nn.silu(x) x = self.conv_out(x) return x class Decoder(nn.Module): """Implements the decoder side of the Autoencoder.""" def __init__( self, in_channels: int, out_channels: int, block_out_channels: List[int] = [64], layers_per_block: int = 2, resnet_groups: int = 32, ): super().__init__() self.conv_in = nn.Conv2d( in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1 ) self.mid_blocks = [ ResnetBlock2D( in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], groups=resnet_groups, ), Attention(block_out_channels[-1], resnet_groups), ResnetBlock2D( in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], groups=resnet_groups, ), ] channels = list(reversed(block_out_channels)) channels = [channels[0]] + channels self.up_blocks = [ EncoderDecoderBlock2D( in_channels, out_channels, num_layers=layers_per_block, resnet_groups=resnet_groups, add_downsample=False, add_upsample=i < len(block_out_channels) - 1, ) for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:])) ] self.conv_norm_out = nn.GroupNorm( resnet_groups, block_out_channels[0], pytorch_compatible=True ) self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1) def __call__(self, x): x = self.conv_in(x) x = self.mid_blocks[0](x) x = self.mid_blocks[1](x) x = self.mid_blocks[2](x) for l in self.up_blocks: x = l(x) x = self.conv_norm_out(x) x = nn.silu(x) x = self.conv_out(x) return x class Autoencoder(nn.Module): """The autoencoder that allows us to perform diffusion in the latent space.""" def __init__(self, config: AutoencoderConfig): super().__init__() self.latent_channels = config.latent_channels_in self.scaling_factor = config.scaling_factor self.encoder = Encoder( config.in_channels, config.latent_channels_out, config.block_out_channels, config.layers_per_block, resnet_groups=config.norm_num_groups, ) self.decoder = Decoder( config.latent_channels_in, config.out_channels, config.block_out_channels, config.layers_per_block + 1, resnet_groups=config.norm_num_groups, ) self.quant_proj = nn.Linear( config.latent_channels_out, config.latent_channels_out ) self.post_quant_proj = nn.Linear( config.latent_channels_in, config.latent_channels_in ) def decode(self, z): z = z / self.scaling_factor return self.decoder(self.post_quant_proj(z)) def encode(self, x): x = self.encoder(x) x = self.quant_proj(x) mean, logvar = x.split(2, axis=-1) mean = mean * self.scaling_factor logvar = logvar + 2 * math.log(self.scaling_factor) return mean, logvar def __call__(self, x, key=None): mean, logvar = self.encode(x) z = mx.random.normal(mean.shape, key=key) * mx.exp(0.5 * logvar) + mean x_hat = self.decode(z) return dict(x_hat=x_hat, z=z, mean=mean, logvar=logvar)
// Copyright © 2024 Apple Inc. import Foundation import MLX import MLXNN // port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/vae.py class Attention: Module, UnaryLayer { @ModuleInfo(key: "group_norm") public var groupNorm: GroupNorm @ModuleInfo(key: "query_proj") public var queryProjection: Linear @ModuleInfo(key: "key_proj") public var keyProjection: Linear @ModuleInfo(key: "value_proj") public var valueProjection: Linear @ModuleInfo(key: "out_proj") public var outProjection: Linear init(dimensions: Int, groupCount: Int = 32) { self._groupNorm.wrappedValue = GroupNorm( groupCount: groupCount, dimensions: dimensions, pytorchCompatible: true) self._queryProjection.wrappedValue = Linear(dimensions, dimensions) self._keyProjection.wrappedValue = Linear(dimensions, dimensions) self._valueProjection.wrappedValue = Linear(dimensions, dimensions) self._outProjection.wrappedValue = Linear(dimensions, dimensions) } func callAsFunction(_ x: MLXArray) -> MLXArray { let (B, H, W, C) = x.shape4 var y = groupNorm(x) let queries = queryProjection(y).reshaped(B, H * W, C) let keys = keyProjection(y).reshaped(B, H * W, C) let values = valueProjection(y).reshaped(B, H * W, C) let scale = 1 / sqrt(Float(queries.dim(-1))) let scores = (queries * scale).matmul(keys.transposed(0, 2, 1)) let attention = softmax(scores, axis: -1) y = matmul(attention, values).reshaped(B, H, W, C) y = outProjection(y) return x + y } } class EncoderDecoderBlock2D: Module, UnaryLayer { let resnets: [ResnetBlock2D] let downsample: Conv2d? let upsample: Conv2d? init( inputChannels: Int, outputChannels: Int, numLayers: Int = 1, resnetGroups: Int = 32, addDownSample: Bool = true, addUpSample: Bool = true ) { // Add the resnet blocks self.resnets = (0 ..< numLayers) .map { i in ResnetBlock2D( inputChannels: i == 0 ? inputChannels : outputChannels, outputChannels: outputChannels, groupCount: resnetGroups) } // Add an optional downsampling layer if addDownSample { self.downsample = Conv2d( inputChannels: outputChannels, outputChannels: outputChannels, kernelSize: 3, stride: 2, padding: 0) } else { self.downsample = nil } // or upsampling layer if addUpSample { self.upsample = Conv2d( inputChannels: outputChannels, outputChannels: outputChannels, kernelSize: 3, stride: 1, padding: 1) } else { self.upsample = nil } } func callAsFunction(_ x: MLXArray) -> MLXArray { var x = x for resnet in resnets { x = resnet(x) } if let downsample { x = padded(x, widths: [[0, 0], [0, 1], [0, 1], [0, 0]]) x = downsample(x) } if let upsample { x = upsample(upsampleNearest(x)) } return x } } /// Implements the encoder side of the Autoencoder class VAEncoder: Module, UnaryLayer { @ModuleInfo(key: "conv_in") var convIn: Conv2d @ModuleInfo(key: "down_blocks") var downBlocks: [EncoderDecoderBlock2D] @ModuleInfo(key: "mid_blocks") var midBlocks: (ResnetBlock2D, Attention, ResnetBlock2D) @ModuleInfo(key: "conv_norm_out") var convNormOut: GroupNorm @ModuleInfo(key: "conv_out") var convOut: Conv2d init( inputChannels: Int, outputChannels: Int, blockOutChannels: [Int] = [64], layersPerBlock: Int = 2, resnetGroups: Int = 32 ) { let channels0 = blockOutChannels[0] self._convIn.wrappedValue = Conv2d( inputChannels: inputChannels, outputChannels: channels0, kernelSize: 3, stride: 1, padding: 1) let downblockChannels = [channels0] + blockOutChannels self._downBlocks.wrappedValue = zip(downblockChannels, downblockChannels.dropFirst()) .enumerated() .map { (i, pair) in let (inChannels, outChannels) = pair return EncoderDecoderBlock2D( inputChannels: inChannels, outputChannels: outChannels, numLayers: layersPerBlock, resnetGroups: resnetGroups, addDownSample: i < blockOutChannels.count - 1, addUpSample: false ) } let channelsLast = blockOutChannels.last! self._midBlocks.wrappedValue = ( ResnetBlock2D( inputChannels: channelsLast, outputChannels: channelsLast, groupCount: resnetGroups ), Attention(dimensions: channelsLast, groupCount: resnetGroups), ResnetBlock2D( inputChannels: channelsLast, outputChannels: channelsLast, groupCount: resnetGroups ) ) self._convNormOut.wrappedValue = GroupNorm( groupCount: resnetGroups, dimensions: channelsLast, pytorchCompatible: true) self._convOut.wrappedValue = Conv2d( inputChannels: channelsLast, outputChannels: outputChannels, kernelSize: 3, padding: 1) } func callAsFunction(_ x: MLXArray) -> MLXArray { var x = convIn(x) for l in downBlocks { x = l(x) } x = midBlocks.0(x) x = midBlocks.1(x) x = midBlocks.2(x) x = convNormOut(x) x = silu(x) x = convOut(x) return x } } /// Implements the decoder side of the Autoencoder class VADecoder: Module, UnaryLayer { @ModuleInfo(key: "conv_in") var convIn: Conv2d @ModuleInfo(key: "mid_blocks") var midBlocks: (ResnetBlock2D, Attention, ResnetBlock2D) @ModuleInfo(key: "up_blocks") var upBlocks: [EncoderDecoderBlock2D] @ModuleInfo(key: "conv_norm_out") var convNormOut: GroupNorm @ModuleInfo(key: "conv_out") var convOut: Conv2d init( inputChannels: Int, outputChannels: Int, blockOutChannels: [Int] = [64], layersPerBlock: Int = 2, resnetGroups: Int = 32 ) { let channels0 = blockOutChannels[0] let channelsLast = blockOutChannels.last! self._convIn.wrappedValue = Conv2d( inputChannels: inputChannels, outputChannels: channelsLast, kernelSize: 3, stride: 1, padding: 1) self._midBlocks.wrappedValue = ( ResnetBlock2D( inputChannels: channelsLast, outputChannels: channelsLast, groupCount: resnetGroups ), Attention(dimensions: channelsLast, groupCount: resnetGroups), ResnetBlock2D( inputChannels: channelsLast, outputChannels: channelsLast, groupCount: resnetGroups ) ) let channels = [channelsLast] + blockOutChannels.reversed() self._upBlocks.wrappedValue = zip(channels, channels.dropFirst()) .enumerated() .map { (i, pair) in let (inChannels, outChannels) = pair return EncoderDecoderBlock2D( inputChannels: inChannels, outputChannels: outChannels, numLayers: layersPerBlock, resnetGroups: resnetGroups, addDownSample: false, addUpSample: i < blockOutChannels.count - 1 ) } self._convNormOut.wrappedValue = GroupNorm( groupCount: resnetGroups, dimensions: channels0, pytorchCompatible: true) self._convOut.wrappedValue = Conv2d( inputChannels: channels0, outputChannels: outputChannels, kernelSize: 3, padding: 1) } func callAsFunction(_ x: MLXArray) -> MLXArray { var x = convIn(x) x = midBlocks.0(x) x = midBlocks.1(x) x = midBlocks.2(x) for l in upBlocks { x = l(x) } x = convNormOut(x) x = silu(x) x = convOut(x) return x } } /// The autoencoder that allows us to perform diffusion in the latent space class Autoencoder: Module { let latentChannels: Int let scalingFactor: Float let encoder: VAEncoder let decoder: VADecoder @ModuleInfo(key: "quant_proj") public var quantProjection: Linear @ModuleInfo(key: "post_quant_proj") public var postQuantProjection: Linear init(configuration: AutoencoderConfiguration) { self.latentChannels = configuration.latentChannelsIn self.scalingFactor = configuration.scalingFactor self.encoder = VAEncoder( inputChannels: configuration.inputChannels, outputChannels: configuration.latentChannelsOut, blockOutChannels: configuration.blockOutChannels, layersPerBlock: configuration.layersPerBlock, resnetGroups: configuration.normNumGroups) self.decoder = VADecoder( inputChannels: configuration.latentChannelsIn, outputChannels: configuration.outputChannels, blockOutChannels: configuration.blockOutChannels, layersPerBlock: configuration.layersPerBlock + 1, resnetGroups: configuration.normNumGroups) self._quantProjection.wrappedValue = Linear( configuration.latentChannelsIn, configuration.latentChannelsOut) self._postQuantProjection.wrappedValue = Linear( configuration.latentChannelsIn, configuration.latentChannelsIn) } func decode(_ z: MLXArray) -> MLXArray { let z = z / scalingFactor return decoder(postQuantProjection(z)) } func encode(_ x: MLXArray) -> (MLXArray, MLXArray) { var x = encoder(x) x = quantProjection(x) var (mean, logvar) = x.split(axis: -1) mean = mean * scalingFactor logvar = logvar + 2 * log(scalingFactor) return (mean, logvar) } struct Result { let xHat: MLXArray let z: MLXArray let mean: MLXArray let logvar: MLXArray } func callAsFunction(_ x: MLXArray, key: MLXArray? = nil) -> Result { let (mean, logvar) = encode(x) let z = MLXRandom.normal(mean.shape, key: key) * exp(0.5 * logvar) + mean let xHat = decode(z) return Result(xHat: xHat, z: z, mean: mean, logvar: logvar) } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
StableDiffusion
CLIP
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from typing import List, Optional import mlx.core as mx import mlx.nn as nn from .config import CLIPTextModelConfig _ACTIVATIONS = {"quick_gelu": nn.gelu_fast_approx, "gelu": nn.gelu} @dataclass class CLIPOutput: # The last_hidden_state indexed at the EOS token and possibly projected if # the model has a projection layer pooled_output: Optional[mx.array] = None # The full sequence output of the transformer after the final layernorm last_hidden_state: Optional[mx.array] = None # A list of hidden states corresponding to the outputs of the transformer layers hidden_states: Optional[List[mx.array]] = None class CLIPEncoderLayer(nn.Module): """The transformer encoder layer from CLIP.""" def __init__(self, model_dims: int, num_heads: int, activation: str): super().__init__() self.layer_norm1 = nn.LayerNorm(model_dims) self.layer_norm2 = nn.LayerNorm(model_dims) self.attention = nn.MultiHeadAttention(model_dims, num_heads) # Add biases to the attention projections to match CLIP self.attention.query_proj.bias = mx.zeros(model_dims) self.attention.key_proj.bias = mx.zeros(model_dims) self.attention.value_proj.bias = mx.zeros(model_dims) self.attention.out_proj.bias = mx.zeros(model_dims) self.linear1 = nn.Linear(model_dims, 4 * model_dims) self.linear2 = nn.Linear(4 * model_dims, model_dims) self.act = _ACTIVATIONS[activation] def __call__(self, x, attn_mask=None): y = self.layer_norm1(x) y = self.attention(y, y, y, attn_mask) x = y + x y = self.layer_norm2(x) y = self.linear1(y) y = self.act(y) y = self.linear2(y) x = y + x return x class CLIPTextModel(nn.Module): """Implements the text encoder transformer from CLIP.""" def __init__(self, config: CLIPTextModelConfig): super().__init__() self.token_embedding = nn.Embedding(config.vocab_size, config.model_dims) self.position_embedding = nn.Embedding(config.max_length, config.model_dims) self.layers = [ CLIPEncoderLayer(config.model_dims, config.num_heads, config.hidden_act) for i in range(config.num_layers) ] self.final_layer_norm = nn.LayerNorm(config.model_dims) if config.projection_dim is not None: self.text_projection = nn.Linear( config.model_dims, config.projection_dim, bias=False ) def _get_mask(self, N, dtype): indices = mx.arange(N) mask = indices[:, None] < indices[None] mask = mask.astype(dtype) * (-6e4 if dtype == mx.float16 else -1e9) return mask def __call__(self, x): # Extract some shapes B, N = x.shape eos_tokens = x.argmax(-1) # Compute the embeddings x = self.token_embedding(x) x = x + self.position_embedding.weight[:N] # Compute the features from the transformer mask = self._get_mask(N, x.dtype) hidden_states = [] for l in self.layers: x = l(x, mask) hidden_states.append(x) # Apply the final layernorm and return x = self.final_layer_norm(x) last_hidden_state = x # Select the EOS token pooled_output = x[mx.arange(len(x)), eos_tokens] if "text_projection" in self: pooled_output = self.text_projection(pooled_output) return CLIPOutput( pooled_output=pooled_output, last_hidden_state=last_hidden_state, hidden_states=hidden_states, )
// Copyright © 2024 Apple Inc. import Foundation import MLX import MLXNN // port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/clip.py struct CLIPOutput { /// The lastHiddenState indexed at the EOS token and possibly projected if /// the model has a projection layer public var pooledOutput: MLXArray /// The full sequence output of the transformer after the final layernorm public var lastHiddenState: MLXArray /// A list of hidden states corresponding to the outputs of the transformer layers public var hiddenStates: [MLXArray] } /// The transformer encoder layer from CLIP class CLIPEncoderLayer: Module { @ModuleInfo(key: "layer_norm1") var layerNorm1: LayerNorm @ModuleInfo(key: "layer_norm2") var layerNorm2: LayerNorm let attention: MultiHeadAttention @ModuleInfo var linear1: Linear @ModuleInfo var linear2: Linear let activation: (MLXArray) -> MLXArray init(modelDimensions: Int, numHeads: Int, activation: @escaping (MLXArray) -> MLXArray) { self._layerNorm1.wrappedValue = LayerNorm(dimensions: modelDimensions) self._layerNorm2.wrappedValue = LayerNorm(dimensions: modelDimensions) self.attention = MultiHeadAttention( dimensions: modelDimensions, numHeads: numHeads, bias: true) self.linear1 = Linear(modelDimensions, 4 * modelDimensions) self.linear2 = Linear(4 * modelDimensions, modelDimensions) self.activation = activation } func callAsFunction(_ x: MLXArray, attentionMask: MLXArray? = nil) -> MLXArray { var y = layerNorm1(x) y = attention(y, keys: y, values: y, mask: attentionMask) var x = y + x y = layerNorm2(x) y = linear1(y) y = activation(y) y = linear2(y) x = y + x return x } } /// Implements the text encoder transformer from CLIP class CLIPTextModel: Module { @ModuleInfo(key: "token_embedding") var tokenEmbedding: Embedding @ModuleInfo(key: "position_embedding") var positionEmbedding: Embedding let layers: [CLIPEncoderLayer] @ModuleInfo(key: "final_layer_norm") var finalLayerNorm: LayerNorm @ModuleInfo(key: "text_projection") var textProjection: Linear? init(configuration: CLIPTextModelConfiguration) { self._tokenEmbedding.wrappedValue = Embedding( embeddingCount: configuration.vocabularySize, dimensions: configuration.modelDimensions) self._positionEmbedding.wrappedValue = Embedding( embeddingCount: configuration.maxLength, dimensions: configuration.modelDimensions) self.layers = (0 ..< configuration.numLayers) .map { _ in CLIPEncoderLayer( modelDimensions: configuration.modelDimensions, numHeads: configuration.numHeads, activation: configuration.hiddenActivation.activation) } self._finalLayerNorm.wrappedValue = LayerNorm(dimensions: configuration.modelDimensions) if let projectionDimensions = configuration.projectionDimensions { self._textProjection.wrappedValue = Linear( configuration.modelDimensions, projectionDimensions, bias: false) } else { self._textProjection.wrappedValue = nil } } func mask(_ N: Int, _ dType: DType) -> MLXArray { let indices = MLXArray(0 ..< Int32(N)) var mask = indices[0..., .newAxis] .< indices[.newAxis] mask = mask.asType(dType) * (dType == .float16 ? -6e4 : -1e9) return mask } func callAsFunction(_ x: MLXArray) -> CLIPOutput { var x = x let (_, N) = x.shape2 let eosTokens = x.argMax(axis: -1) // compute the embeddings x = tokenEmbedding(x) x = x + positionEmbedding.weight[..<N] // compute the features from the transformer let mask = mask(N, x.dtype) var hiddenStates = [MLXArray]() for l in layers { x = l(x, attentionMask: mask) hiddenStates.append(x) } // apply the final layernorm x = finalLayerNorm(x) let lastHiddenState = x // select the EOS token var pooledOutput = x[MLXArray(0 ..< x.count), eosTokens] if let textProjection { pooledOutput = textProjection(pooledOutput) } return CLIPOutput( pooledOutput: pooledOutput, lastHiddenState: lastHiddenState, hiddenStates: hiddenStates ) } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
StableDiffusion
Config
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from typing import Optional, Tuple @dataclass class AutoencoderConfig: in_channels: int = 3 out_channels: int = 3 latent_channels_out: int = 8 latent_channels_in: int = 4 block_out_channels: Tuple[int] = (128, 256, 512, 512) layers_per_block: int = 2 norm_num_groups: int = 32 scaling_factor: float = 0.18215 @dataclass class CLIPTextModelConfig: num_layers: int = 23 model_dims: int = 1024 num_heads: int = 16 max_length: int = 77 vocab_size: int = 49408 projection_dim: Optional[int] = None hidden_act: str = "quick_gelu" @dataclass class UNetConfig: in_channels: int = 4 out_channels: int = 4 conv_in_kernel: int = 3 conv_out_kernel: int = 3 block_out_channels: Tuple[int] = (320, 640, 1280, 1280) layers_per_block: Tuple[int] = (2, 2, 2, 2) mid_block_layers: int = 2 transformer_layers_per_block: Tuple[int] = (1, 1, 1, 1) num_attention_heads: Tuple[int] = (5, 10, 20, 20) cross_attention_dim: Tuple[int] = (1024,) * 4 norm_num_groups: int = 32 down_block_types: Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) up_block_types: Tuple[str] = ( "UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", ) addition_embed_type: Optional[str] = None addition_time_embed_dim: Optional[int] = None projection_class_embeddings_input_dim: Optional[int] = None @dataclass class DiffusionConfig: beta_schedule: str = "scaled_linear" beta_start: float = 0.00085 beta_end: float = 0.012 num_train_steps: int = 1000
// Copyright © 2024 Apple Inc. import Foundation import MLX import MLXNN // port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/config.py /// Configuration for ``Autoencoder`` struct AutoencoderConfiguration: Codable { public var inputChannels = 3 public var outputChannels = 3 public var latentChannelsOut: Int { latentChannelsIn * 2 } public var latentChannelsIn = 4 public var blockOutChannels = [128, 256, 512, 512] public var layersPerBlock = 2 public var normNumGroups = 32 public var scalingFactor: Float = 0.18215 enum CodingKeys: String, CodingKey { case inputChannels = "in_channels" case outputChannels = "out_channels" case latentChannelsIn = "latent_channels" case blockOutChannels = "block_out_channels" case layersPerBlock = "layers_per_block" case normNumGroups = "norm_num_groups" case scalingFactor = "scaling_factor" } public init(from decoder: any Decoder) throws { let container: KeyedDecodingContainer<AutoencoderConfiguration.CodingKeys> = try decoder.container(keyedBy: AutoencoderConfiguration.CodingKeys.self) // load_autoencoder() self.scalingFactor = try container.decodeIfPresent(Float.self, forKey: .scalingFactor) ?? 0.18215 self.inputChannels = try container.decode(Int.self, forKey: .inputChannels) self.outputChannels = try container.decode(Int.self, forKey: .outputChannels) self.latentChannelsIn = try container.decode(Int.self, forKey: .latentChannelsIn) self.blockOutChannels = try container.decode([Int].self, forKey: .blockOutChannels) self.layersPerBlock = try container.decode(Int.self, forKey: .layersPerBlock) self.normNumGroups = try container.decode(Int.self, forKey: .normNumGroups) } public func encode(to encoder: any Encoder) throws { var container: KeyedEncodingContainer<AutoencoderConfiguration.CodingKeys> = encoder.container(keyedBy: AutoencoderConfiguration.CodingKeys.self) try container.encode(self.inputChannels, forKey: .inputChannels) try container.encode(self.outputChannels, forKey: .outputChannels) try container.encode(self.latentChannelsIn, forKey: .latentChannelsIn) try container.encode(self.blockOutChannels, forKey: .blockOutChannels) try container.encode(self.layersPerBlock, forKey: .layersPerBlock) try container.encode(self.normNumGroups, forKey: .normNumGroups) try container.encode(self.scalingFactor, forKey: .scalingFactor) } } /// Configuration for ``CLIPTextModel`` struct CLIPTextModelConfiguration: Codable { public enum ClipActivation: String, Codable { case fast = "quick_gelu" case gelu = "gelu" var activation: (MLXArray) -> MLXArray { switch self { case .fast: MLXNN.geluFastApproximate case .gelu: MLXNN.gelu } } } public var numLayers = 23 public var modelDimensions = 1024 public var numHeads = 16 public var maxLength = 77 public var vocabularySize = 49408 public var projectionDimensions: Int? = nil public var hiddenActivation: ClipActivation = .fast enum CodingKeys: String, CodingKey { case numLayers = "num_hidden_layers" case modelDimensions = "hidden_size" case numHeads = "num_attention_heads" case maxLength = "max_position_embeddings" case vocabularySize = "vocab_size" case projectionDimensions = "projection_dim" case hiddenActivation = "hidden_act" case architectures = "architectures" } public init(from decoder: any Decoder) throws { let container: KeyedDecodingContainer<CLIPTextModelConfiguration.CodingKeys> = try decoder.container(keyedBy: CLIPTextModelConfiguration.CodingKeys.self) // see load_text_encoder let architectures = try container.decode([String].self, forKey: .architectures) let withProjection = architectures[0].contains("WithProjection") self.projectionDimensions = withProjection ? try container.decodeIfPresent(Int.self, forKey: .projectionDimensions) : nil self.hiddenActivation = try container.decodeIfPresent( CLIPTextModelConfiguration.ClipActivation.self, forKey: .hiddenActivation) ?? .fast self.numLayers = try container.decode(Int.self, forKey: .numLayers) self.modelDimensions = try container.decode(Int.self, forKey: .modelDimensions) self.numHeads = try container.decode(Int.self, forKey: .numHeads) self.maxLength = try container.decode(Int.self, forKey: .maxLength) self.vocabularySize = try container.decode(Int.self, forKey: .vocabularySize) } public func encode(to encoder: any Encoder) throws { var container: KeyedEncodingContainer<CLIPTextModelConfiguration.CodingKeys> = encoder.container(keyedBy: CLIPTextModelConfiguration.CodingKeys.self) if projectionDimensions != nil { try container.encode(["WithProjection"], forKey: .architectures) } else { try container.encode(["Other"], forKey: .architectures) } try container.encode(self.numLayers, forKey: .numLayers) try container.encode(self.modelDimensions, forKey: .modelDimensions) try container.encode(self.numHeads, forKey: .numHeads) try container.encode(self.maxLength, forKey: .maxLength) try container.encode(self.vocabularySize, forKey: .vocabularySize) try container.encodeIfPresent(self.projectionDimensions, forKey: .projectionDimensions) try container.encode(self.hiddenActivation, forKey: .hiddenActivation) } } /// Configuration for ``UNetModel`` struct UNetConfiguration: Codable { public var inputChannels = 4 public var outputChannels = 4 public var convolutionInKernel = 3 public var convolutionOutKernel = 3 public var blockOutChannels = [320, 640, 1280, 1280] public var layersPerBlock = [2, 2, 2, 2] public var midBlockLayers = 2 public var transformerLayersPerBlock = [2, 2, 2, 2] public var numHeads = [5, 10, 20, 20] public var crossAttentionDimension = [1024, 1024, 1024, 1024] public var normNumGroups = 32 public var downBlockTypes: [String] = [] public var upBlockTypes: [String] = [] public var additionEmbedType: String? = nil public var additionTimeEmbedDimension: Int? = nil public var projectionClassEmbeddingsInputDimension: Int? = nil enum CodingKeys: String, CodingKey { case inputChannels = "in_channels" case outputChannels = "out_channels" case convolutionInKernel = "conv_in_kernel" case convolutionOutKernel = "conv_out_kernel" case blockOutChannels = "block_out_channels" case layersPerBlock = "layers_per_block" case midBlockLayers = "mid_block_layers" case transformerLayersPerBlock = "transformer_layers_per_block" case numHeads = "attention_head_dim" case crossAttentionDimension = "cross_attention_dim" case normNumGroups = "norm_num_groups" case downBlockTypes = "down_block_types" case upBlockTypes = "up_block_types" case additionEmbedType = "addition_embed_type" case additionTimeEmbedDimension = "addition_time_embed_dim" case projectionClassEmbeddingsInputDimension = "projection_class_embeddings_input_dim" } public init() { } public init(from decoder: Decoder) throws { let container: KeyedDecodingContainer<UNetConfiguration.CodingKeys> = try decoder.container( keyedBy: UNetConfiguration.CodingKeys.self) // customizations based on def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False): // // Note: the encode() writes out the internal format (and this can load it back in) self.blockOutChannels = try container.decode([Int].self, forKey: .blockOutChannels) let nBlocks = blockOutChannels.count self.layersPerBlock = try (try? container.decode([Int].self, forKey: .layersPerBlock)) ?? Array(repeating: container.decode(Int.self, forKey: .layersPerBlock), count: nBlocks) self.transformerLayersPerBlock = (try? container.decode([Int].self, forKey: .transformerLayersPerBlock)) ?? [1, 1, 1, 1] self.numHeads = try (try? container.decodeIfPresent([Int].self, forKey: .numHeads)) ?? Array(repeating: container.decode(Int.self, forKey: .numHeads), count: nBlocks) self.crossAttentionDimension = try (try? container.decode([Int].self, forKey: .crossAttentionDimension)) ?? Array( repeating: container.decode(Int.self, forKey: .crossAttentionDimension), count: nBlocks) self.upBlockTypes = try container.decode([String].self, forKey: .upBlockTypes).reversed() self.convolutionInKernel = try container.decodeIfPresent(Int.self, forKey: .convolutionInKernel) ?? 3 self.convolutionOutKernel = try container.decodeIfPresent(Int.self, forKey: .convolutionOutKernel) ?? 3 self.midBlockLayers = try container.decodeIfPresent(Int.self, forKey: .midBlockLayers) ?? 2 self.inputChannels = try container.decode(Int.self, forKey: .inputChannels) self.outputChannels = try container.decode(Int.self, forKey: .outputChannels) self.normNumGroups = try container.decode(Int.self, forKey: .normNumGroups) self.downBlockTypes = try container.decode([String].self, forKey: .downBlockTypes) self.additionEmbedType = try container.decodeIfPresent( String.self, forKey: .additionEmbedType) self.additionTimeEmbedDimension = try container.decodeIfPresent( Int.self, forKey: .additionTimeEmbedDimension) self.projectionClassEmbeddingsInputDimension = try container.decodeIfPresent( Int.self, forKey: .projectionClassEmbeddingsInputDimension) } public func encode(to encoder: Encoder) throws { var container: KeyedEncodingContainer<UNetConfiguration.CodingKeys> = encoder.container( keyedBy: UNetConfiguration.CodingKeys.self) try container.encode(self.upBlockTypes.reversed(), forKey: .upBlockTypes) try container.encode(self.inputChannels, forKey: .inputChannels) try container.encode(self.outputChannels, forKey: .outputChannels) try container.encode(self.convolutionInKernel, forKey: .convolutionInKernel) try container.encode(self.convolutionOutKernel, forKey: .convolutionOutKernel) try container.encode(self.blockOutChannels, forKey: .blockOutChannels) try container.encode(self.layersPerBlock, forKey: .layersPerBlock) try container.encode(self.midBlockLayers, forKey: .midBlockLayers) try container.encode(self.transformerLayersPerBlock, forKey: .transformerLayersPerBlock) try container.encode(self.numHeads, forKey: .numHeads) try container.encode(self.crossAttentionDimension, forKey: .crossAttentionDimension) try container.encode(self.normNumGroups, forKey: .normNumGroups) try container.encode(self.downBlockTypes, forKey: .downBlockTypes) try container.encodeIfPresent(self.additionEmbedType, forKey: .additionEmbedType) try container.encodeIfPresent( self.additionTimeEmbedDimension, forKey: .additionTimeEmbedDimension) try container.encodeIfPresent( self.projectionClassEmbeddingsInputDimension, forKey: .projectionClassEmbeddingsInputDimension) } } /// Configuration for ``StableDiffusion`` public struct DiffusionConfiguration: Codable { public enum BetaSchedule: String, Codable { case linear = "linear" case scaledLinear = "scaled_linear" } public var betaSchedule = BetaSchedule.scaledLinear public var betaStart: Float = 0.00085 public var betaEnd: Float = 0.012 public var trainSteps = 3 enum CodingKeys: String, CodingKey { case betaSchedule = "beta_schedule" case betaStart = "beta_start" case betaEnd = "beta_end" case trainSteps = "num_train_timesteps" } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
StableDiffusion
Load
# Copyright © 2023-2024 Apple Inc. import json from typing import Optional import mlx.core as mx from huggingface_hub import hf_hub_download from mlx.utils import tree_unflatten from .clip import CLIPTextModel from .config import AutoencoderConfig, CLIPTextModelConfig, DiffusionConfig, UNetConfig from .tokenizer import Tokenizer from .unet import UNetModel from .vae import Autoencoder _DEFAULT_MODEL = "stabilityai/stable-diffusion-2-1-base" _MODELS = { # See https://huggingface.co/stabilityai/sdxl-turbo for the model details and license "stabilityai/sdxl-turbo": { "unet_config": "unet/config.json", "unet": "unet/diffusion_pytorch_model.safetensors", "text_encoder_config": "text_encoder/config.json", "text_encoder": "text_encoder/model.safetensors", "text_encoder_2_config": "text_encoder_2/config.json", "text_encoder_2": "text_encoder_2/model.safetensors", "vae_config": "vae/config.json", "vae": "vae/diffusion_pytorch_model.safetensors", "diffusion_config": "scheduler/scheduler_config.json", "tokenizer_vocab": "tokenizer/vocab.json", "tokenizer_merges": "tokenizer/merges.txt", "tokenizer_2_vocab": "tokenizer_2/vocab.json", "tokenizer_2_merges": "tokenizer_2/merges.txt", }, # See https://huggingface.co/stabilityai/stable-diffusion-2-1-base for the model details and license "stabilityai/stable-diffusion-2-1-base": { "unet_config": "unet/config.json", "unet": "unet/diffusion_pytorch_model.safetensors", "text_encoder_config": "text_encoder/config.json", "text_encoder": "text_encoder/model.safetensors", "vae_config": "vae/config.json", "vae": "vae/diffusion_pytorch_model.safetensors", "diffusion_config": "scheduler/scheduler_config.json", "tokenizer_vocab": "tokenizer/vocab.json", "tokenizer_merges": "tokenizer/merges.txt", }, } def map_unet_weights(key, value): # Map up/downsampling if "downsamplers" in key: key = key.replace("downsamplers.0.conv", "downsample") if "upsamplers" in key: key = key.replace("upsamplers.0.conv", "upsample") # Map the mid block if "mid_block.resnets.0" in key: key = key.replace("mid_block.resnets.0", "mid_blocks.0") if "mid_block.attentions.0" in key: key = key.replace("mid_block.attentions.0", "mid_blocks.1") if "mid_block.resnets.1" in key: key = key.replace("mid_block.resnets.1", "mid_blocks.2") # Map attention layers if "to_k" in key: key = key.replace("to_k", "key_proj") if "to_out.0" in key: key = key.replace("to_out.0", "out_proj") if "to_q" in key: key = key.replace("to_q", "query_proj") if "to_v" in key: key = key.replace("to_v", "value_proj") # Map transformer ffn if "ff.net.2" in key: key = key.replace("ff.net.2", "linear3") if "ff.net.0" in key: k1 = key.replace("ff.net.0.proj", "linear1") k2 = key.replace("ff.net.0.proj", "linear2") v1, v2 = mx.split(value, 2) return [(k1, v1), (k2, v2)] if "conv_shortcut.weight" in key: value = value.squeeze() # Transform the weights from 1x1 convs to linear if len(value.shape) == 4 and ("proj_in" in key or "proj_out" in key): value = value.squeeze() if len(value.shape) == 4: value = value.transpose(0, 2, 3, 1) value = value.reshape(-1).reshape(value.shape) return [(key, value)] def map_clip_text_encoder_weights(key, value): # Remove prefixes if key.startswith("text_model."): key = key[11:] if key.startswith("embeddings."): key = key[11:] if key.startswith("encoder."): key = key[8:] # Map attention layers if "self_attn." in key: key = key.replace("self_attn.", "attention.") if "q_proj." in key: key = key.replace("q_proj.", "query_proj.") if "k_proj." in key: key = key.replace("k_proj.", "key_proj.") if "v_proj." in key: key = key.replace("v_proj.", "value_proj.") # Map ffn layers if "mlp.fc1" in key: key = key.replace("mlp.fc1", "linear1") if "mlp.fc2" in key: key = key.replace("mlp.fc2", "linear2") return [(key, value)] def map_vae_weights(key, value): # Map up/downsampling if "downsamplers" in key: key = key.replace("downsamplers.0.conv", "downsample") if "upsamplers" in key: key = key.replace("upsamplers.0.conv", "upsample") # Map attention layers if "to_k" in key: key = key.replace("to_k", "key_proj") if "to_out.0" in key: key = key.replace("to_out.0", "out_proj") if "to_q" in key: key = key.replace("to_q", "query_proj") if "to_v" in key: key = key.replace("to_v", "value_proj") # Map the mid block if "mid_block.resnets.0" in key: key = key.replace("mid_block.resnets.0", "mid_blocks.0") if "mid_block.attentions.0" in key: key = key.replace("mid_block.attentions.0", "mid_blocks.1") if "mid_block.resnets.1" in key: key = key.replace("mid_block.resnets.1", "mid_blocks.2") # Map the quant/post_quant layers if "quant_conv" in key: key = key.replace("quant_conv", "quant_proj") value = value.squeeze() # Map the conv_shortcut to linear if "conv_shortcut.weight" in key: value = value.squeeze() if len(value.shape) == 4: value = value.transpose(0, 2, 3, 1) value = value.reshape(-1).reshape(value.shape) return [(key, value)] def _flatten(params): return [(k, v) for p in params for (k, v) in p] def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False): dtype = mx.float16 if float16 else mx.float32 weights = mx.load(weight_file) weights = _flatten([mapper(k, v.astype(dtype)) for k, v in weights.items()]) model.update(tree_unflatten(weights)) def _check_key(key: str, part: str): if key not in _MODELS: raise ValueError( f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}" ) def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False): """Load the stable diffusion UNet from Hugging Face Hub.""" _check_key(key, "load_unet") # Download the config and create the model unet_config = _MODELS[key]["unet_config"] with open(hf_hub_download(key, unet_config)) as f: config = json.load(f) n_blocks = len(config["block_out_channels"]) model = UNetModel( UNetConfig( in_channels=config["in_channels"], out_channels=config["out_channels"], block_out_channels=config["block_out_channels"], layers_per_block=[config["layers_per_block"]] * n_blocks, transformer_layers_per_block=config.get( "transformer_layers_per_block", (1,) * 4 ), num_attention_heads=( [config["attention_head_dim"]] * n_blocks if isinstance(config["attention_head_dim"], int) else config["attention_head_dim"] ), cross_attention_dim=[config["cross_attention_dim"]] * n_blocks, norm_num_groups=config["norm_num_groups"], down_block_types=config["down_block_types"], up_block_types=config["up_block_types"][::-1], addition_embed_type=config.get("addition_embed_type", None), addition_time_embed_dim=config.get("addition_time_embed_dim", None), projection_class_embeddings_input_dim=config.get( "projection_class_embeddings_input_dim", None ), ) ) # Download the weights and map them into the model unet_weights = _MODELS[key]["unet"] weight_file = hf_hub_download(key, unet_weights) _load_safetensor_weights(map_unet_weights, model, weight_file, float16) return model def load_text_encoder( key: str = _DEFAULT_MODEL, float16: bool = False, model_key: str = "text_encoder", config_key: Optional[str] = None, ): """Load the stable diffusion text encoder from Hugging Face Hub.""" _check_key(key, "load_text_encoder") config_key = config_key or (model_key + "_config") # Download the config and create the model text_encoder_config = _MODELS[key][config_key] with open(hf_hub_download(key, text_encoder_config)) as f: config = json.load(f) with_projection = "WithProjection" in config["architectures"][0] model = CLIPTextModel( CLIPTextModelConfig( num_layers=config["num_hidden_layers"], model_dims=config["hidden_size"], num_heads=config["num_attention_heads"], max_length=config["max_position_embeddings"], vocab_size=config["vocab_size"], projection_dim=config["projection_dim"] if with_projection else None, hidden_act=config.get("hidden_act", "quick_gelu"), ) ) # Download the weights and map them into the model text_encoder_weights = _MODELS[key][model_key] weight_file = hf_hub_download(key, text_encoder_weights) _load_safetensor_weights(map_clip_text_encoder_weights, model, weight_file, float16) return model def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False): """Load the stable diffusion autoencoder from Hugging Face Hub.""" _check_key(key, "load_autoencoder") # Download the config and create the model vae_config = _MODELS[key]["vae_config"] with open(hf_hub_download(key, vae_config)) as f: config = json.load(f) model = Autoencoder( AutoencoderConfig( in_channels=config["in_channels"], out_channels=config["out_channels"], latent_channels_out=2 * config["latent_channels"], latent_channels_in=config["latent_channels"], block_out_channels=config["block_out_channels"], layers_per_block=config["layers_per_block"], norm_num_groups=config["norm_num_groups"], scaling_factor=config.get("scaling_factor", 0.18215), ) ) # Download the weights and map them into the model vae_weights = _MODELS[key]["vae"] weight_file = hf_hub_download(key, vae_weights) _load_safetensor_weights(map_vae_weights, model, weight_file, float16) return model def load_diffusion_config(key: str = _DEFAULT_MODEL): """Load the stable diffusion config from Hugging Face Hub.""" _check_key(key, "load_diffusion_config") diffusion_config = _MODELS[key]["diffusion_config"] with open(hf_hub_download(key, diffusion_config)) as f: config = json.load(f) return DiffusionConfig( beta_start=config["beta_start"], beta_end=config["beta_end"], beta_schedule=config["beta_schedule"], num_train_steps=config["num_train_timesteps"], ) def load_tokenizer( key: str = _DEFAULT_MODEL, vocab_key: str = "tokenizer_vocab", merges_key: str = "tokenizer_merges", ): _check_key(key, "load_tokenizer") vocab_file = hf_hub_download(key, _MODELS[key][vocab_key]) with open(vocab_file, encoding="utf-8") as f: vocab = json.load(f) merges_file = hf_hub_download(key, _MODELS[key][merges_key]) with open(merges_file, encoding="utf-8") as f: bpe_merges = f.read().strip().split("\n")[1 : 49152 - 256 - 2 + 1] bpe_merges = [tuple(m.split()) for m in bpe_merges] bpe_ranks = dict(map(reversed, enumerate(bpe_merges))) return Tokenizer(bpe_ranks, vocab)
// Copyright © 2024 Apple Inc. import Foundation import Hub import MLX import MLXNN // port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/model_io.py /// Configuration for loading stable diffusion weights. /// /// These options can be tuned to conserve memory. public struct LoadConfiguration: Sendable { /// convert weights to float16 public var float16 = true /// quantize weights public var quantize = false public var dType: DType { float16 ? .float16 : .float32 } public init(float16: Bool = true, quantize: Bool = false) { self.float16 = float16 self.quantize = quantize } } /// Parameters for evaluating a stable diffusion prompt and generating latents public struct EvaluateParameters: Sendable { /// `cfg` value from the preset public var cfgWeight: Float /// number of steps -- default is from the preset public var steps: Int /// number of images to generate at a time public var imageCount = 1 public var decodingBatchSize = 1 /// size of the latent tensor -- the result image is a factor of 8 larger than this public var latentSize = [64, 64] public var seed: UInt64 public var prompt = "" public var negativePrompt = "" public init( cfgWeight: Float, steps: Int, imageCount: Int = 1, decodingBatchSize: Int = 1, latentSize: [Int] = [64, 64], seed: UInt64? = nil, prompt: String = "", negativePrompt: String = "" ) { self.cfgWeight = cfgWeight self.steps = steps self.imageCount = imageCount self.decodingBatchSize = decodingBatchSize self.latentSize = latentSize self.seed = seed ?? UInt64(Date.timeIntervalSinceReferenceDate * 1000) self.prompt = prompt self.negativePrompt = negativePrompt } } /// File types for ``StableDiffusionConfiguration/files``. Used by the presets to provide /// relative file paths for different types of files. enum FileKey { case unetConfig case unetWeights case textEncoderConfig case textEncoderWeights case textEncoderConfig2 case textEncoderWeights2 case vaeConfig case vaeWeights case diffusionConfig case tokenizerVocabulary case tokenizerMerges case tokenizerVocabulary2 case tokenizerMerges2 } /// Stable diffusion configuration -- this selects the model to load. /// /// Use the preset values: /// - ``presetSDXLTurbo`` /// - ``presetStableDiffusion21Base`` /// /// or use the enum (convenient for command line tools): /// /// - ``Preset/sdxlTurbo`` /// - ``Preset/sdxlTurbo`` /// /// Call ``download(hub:progressHandler:)`` to download the weights, then /// ``textToImageGenerator(hub:configuration:)`` or /// ``imageToImageGenerator(hub:configuration:)`` to produce the ``ImageGenerator``. /// /// The ``ImageGenerator`` has a method to generate the latents: /// - ``TextToImageGenerator/generateLatents(parameters:)`` /// - ``ImageToImageGenerator/generateLatents(image:parameters:strength:)`` /// /// Evaluate each of the latents from that iterator and use the decoder to turn the last latent /// into an image: /// /// - ``ImageGenerator/decode(xt:)`` /// /// Finally use ``Image`` to save it to a file or convert to a CGImage for display. public struct StableDiffusionConfiguration: Sendable { public let id: String let files: [FileKey: String] public let defaultParameters: @Sendable () -> EvaluateParameters let factory: @Sendable (HubApi, StableDiffusionConfiguration, LoadConfiguration) throws -> StableDiffusion public func download( hub: HubApi = HubApi(), progressHandler: @escaping (Progress) -> Void = { _ in } ) async throws { let repo = Hub.Repo(id: self.id) try await hub.snapshot( from: repo, matching: Array(files.values), progressHandler: progressHandler) } public func textToImageGenerator(hub: HubApi = HubApi(), configuration: LoadConfiguration) throws -> TextToImageGenerator? { try factory(hub, self, configuration) as? TextToImageGenerator } public func imageToImageGenerator(hub: HubApi = HubApi(), configuration: LoadConfiguration) throws -> ImageToImageGenerator? { try factory(hub, self, configuration) as? ImageToImageGenerator } public enum Preset: String, Codable, CaseIterable, Sendable { case base case sdxlTurbo = "sdxl-turbo" public var configuration: StableDiffusionConfiguration { switch self { case .base: presetStableDiffusion21Base case .sdxlTurbo: presetSDXLTurbo } } } /// See https://huggingface.co/stabilityai/sdxl-turbo for the model details and license public static let presetSDXLTurbo = StableDiffusionConfiguration( id: "stabilityai/sdxl-turbo", files: [ .unetConfig: "unet/config.json", .unetWeights: "unet/diffusion_pytorch_model.safetensors", .textEncoderConfig: "text_encoder/config.json", .textEncoderWeights: "text_encoder/model.safetensors", .textEncoderConfig2: "text_encoder_2/config.json", .textEncoderWeights2: "text_encoder_2/model.safetensors", .vaeConfig: "vae/config.json", .vaeWeights: "vae/diffusion_pytorch_model.safetensors", .diffusionConfig: "scheduler/scheduler_config.json", .tokenizerVocabulary: "tokenizer/vocab.json", .tokenizerMerges: "tokenizer/merges.txt", .tokenizerVocabulary2: "tokenizer_2/vocab.json", .tokenizerMerges2: "tokenizer_2/merges.txt", ], defaultParameters: { EvaluateParameters(cfgWeight: 0, steps: 2) }, factory: { hub, sdConfiguration, loadConfiguration in let sd = try StableDiffusionXL( hub: hub, configuration: sdConfiguration, dType: loadConfiguration.dType) if loadConfiguration.quantize { quantize(model: sd.textEncoder, filter: { k, m in m is Linear }) quantize(model: sd.textEncoder2, filter: { k, m in m is Linear }) quantize(model: sd.unet, groupSize: 32, bits: 8) } return sd } ) /// See https://huggingface.co/stabilityai/stable-diffusion-2-1-base for the model details and license public static let presetStableDiffusion21Base = StableDiffusionConfiguration( id: "stabilityai/stable-diffusion-2-1-base", files: [ .unetConfig: "unet/config.json", .unetWeights: "unet/diffusion_pytorch_model.safetensors", .textEncoderConfig: "text_encoder/config.json", .textEncoderWeights: "text_encoder/model.safetensors", .vaeConfig: "vae/config.json", .vaeWeights: "vae/diffusion_pytorch_model.safetensors", .diffusionConfig: "scheduler/scheduler_config.json", .tokenizerVocabulary: "tokenizer/vocab.json", .tokenizerMerges: "tokenizer/merges.txt", ], defaultParameters: { EvaluateParameters(cfgWeight: 7.5, steps: 50) }, factory: { hub, sdConfiguration, loadConfiguration in let sd = try StableDiffusionBase( hub: hub, configuration: sdConfiguration, dType: loadConfiguration.dType) if loadConfiguration.quantize { quantize(model: sd.textEncoder, filter: { k, m in m is Linear }) quantize(model: sd.unet, groupSize: 32, bits: 8) } return sd } ) } // MARK: - Key Mapping func keyReplace(_ replace: String, _ with: String) -> @Sendable (String) -> String? { return { [replace, with] key in if key.contains(replace) { return key.replacingOccurrences(of: replace, with: with) } return nil } } func dropPrefix(_ prefix: String) -> @Sendable (String) -> String? { return { [prefix] key in if key.hasPrefix(prefix) { return String(key.dropFirst(prefix.count)) } return nil } } // see map_unet_weights() let unetRules: [@Sendable (String) -> String?] = [ // Map up/downsampling keyReplace("downsamplers.0.conv", "downsample"), keyReplace("upsamplers.0.conv", "upsample"), // Map the mid block keyReplace("mid_block.resnets.0", "mid_blocks.0"), keyReplace("mid_block.attentions.0", "mid_blocks.1"), keyReplace("mid_block.resnets.1", "mid_blocks.2"), // Map attention layers keyReplace("to_k", "key_proj"), keyReplace("to_out.0", "out_proj"), keyReplace("to_q", "query_proj"), keyReplace("to_v", "value_proj"), // Map transformer ffn keyReplace("ff.net.2", "linear3"), ] func unetRemap(key: String, value: MLXArray) -> [(String, MLXArray)] { var key = key var value = value for rule in unetRules { key = rule(key) ?? key } // Map transformer ffn if key.contains("ff.net.0") { let k1 = key.replacingOccurrences(of: "ff.net.0.proj", with: "linear1") let k2 = key.replacingOccurrences(of: "ff.net.0.proj", with: "linear2") let (v1, v2) = value.split() return [(k1, v1), (k2, v2)] } if key.contains("conv_shortcut.weight") { value = value.squeezed() } // Transform the weights from 1x1 convs to linear if value.ndim == 4 && (key.contains("proj_in") || key.contains("proj_out")) { value = value.squeezed() } if value.ndim == 4 { value = value.transposed(0, 2, 3, 1) value = value.reshaped(-1).reshaped(value.shape) } return [(key, value)] } let clipRules: [@Sendable (String) -> String?] = [ dropPrefix("text_model."), dropPrefix("embeddings."), dropPrefix("encoder."), // Map attention layers keyReplace("self_attn.", "attention."), keyReplace("q_proj.", "query_proj."), keyReplace("k_proj.", "key_proj."), keyReplace("v_proj.", "value_proj."), // Map ffn layers keyReplace("mlp.fc1", "linear1"), keyReplace("mlp.fc2", "linear2"), ] func clipRemap(key: String, value: MLXArray) -> [(String, MLXArray)] { var key = key for rule in clipRules { key = rule(key) ?? key } // not used if key == "position_ids" { return [] } return [(key, value)] } let vaeRules: [@Sendable (String) -> String?] = [ // Map up/downsampling keyReplace("downsamplers.0.conv", "downsample"), keyReplace("upsamplers.0.conv", "upsample"), // Map attention layers keyReplace("to_k", "key_proj"), keyReplace("to_out.0", "out_proj"), keyReplace("to_q", "query_proj"), keyReplace("to_v", "value_proj"), // Map the mid block keyReplace("mid_block.resnets.0", "mid_blocks.0"), keyReplace("mid_block.attentions.0", "mid_blocks.1"), keyReplace("mid_block.resnets.1", "mid_blocks.2"), keyReplace("mid_blocks.1.key.", "mid_blocks.1.key_proj."), keyReplace("mid_blocks.1.query.", "mid_blocks.1.query_proj."), keyReplace("mid_blocks.1.value.", "mid_blocks.1.value_proj."), keyReplace("mid_blocks.1.proj_attn.", "mid_blocks.1.out_proj."), ] func vaeRemap(key: String, value: MLXArray) -> [(String, MLXArray)] { var key = key var value = value for rule in vaeRules { key = rule(key) ?? key } // Map the quant/post_quant layers if key.contains("quant_conv") { key = key.replacingOccurrences(of: "quant_conv", with: "quant_proj") value = value.squeezed() } // Map the conv_shortcut to linear if key.contains("conv_shortcut.weight") { value = value.squeezed() } if value.ndim == 4 { value = value.transposed(0, 2, 3, 1) value = value.reshaped(-1).reshaped(value.shape) } return [(key, value)] } func loadWeights( url: URL, model: Module, mapper: (String, MLXArray) -> [(String, MLXArray)], dType: DType ) throws { let weights = try loadArrays(url: url).flatMap { mapper($0.key, $0.value.asType(dType)) } // Note: not using verifier because some shapes change upon load try model.update(parameters: ModuleParameters.unflattened(weights), verify: .none) } // MARK: - Loading func resolve(hub: HubApi, configuration: StableDiffusionConfiguration, key: FileKey) -> URL { precondition( configuration.files[key] != nil, "configuration \(configuration.id) missing key: \(key)") let repo = Hub.Repo(id: configuration.id) let directory = hub.localRepoLocation(repo) return directory.appending(component: configuration.files[key]!) } func loadConfiguration<T: Decodable>( hub: HubApi, configuration: StableDiffusionConfiguration, key: FileKey, type: T.Type ) throws -> T { let url = resolve(hub: hub, configuration: configuration, key: key) return try JSONDecoder().decode(T.self, from: Data(contentsOf: url)) } func loadUnet(hub: HubApi, configuration: StableDiffusionConfiguration, dType: DType) throws -> UNetModel { let unetConfiguration = try loadConfiguration( hub: hub, configuration: configuration, key: .unetConfig, type: UNetConfiguration.self) let model = UNetModel(configuration: unetConfiguration) let weightsURL = resolve(hub: hub, configuration: configuration, key: .unetWeights) try loadWeights(url: weightsURL, model: model, mapper: unetRemap, dType: dType) return model } func loadTextEncoder( hub: HubApi, configuration: StableDiffusionConfiguration, configKey: FileKey = .textEncoderConfig, weightsKey: FileKey = .textEncoderWeights, dType: DType ) throws -> CLIPTextModel { let clipConfiguration = try loadConfiguration( hub: hub, configuration: configuration, key: configKey, type: CLIPTextModelConfiguration.self) let model = CLIPTextModel(configuration: clipConfiguration) let weightsURL = resolve(hub: hub, configuration: configuration, key: weightsKey) try loadWeights(url: weightsURL, model: model, mapper: clipRemap, dType: dType) return model } func loadAutoEncoder(hub: HubApi, configuration: StableDiffusionConfiguration, dType: DType) throws -> Autoencoder { let autoEncoderConfiguration = try loadConfiguration( hub: hub, configuration: configuration, key: .vaeConfig, type: AutoencoderConfiguration.self ) let model = Autoencoder(configuration: autoEncoderConfiguration) let weightsURL = resolve(hub: hub, configuration: configuration, key: .vaeWeights) try loadWeights(url: weightsURL, model: model, mapper: vaeRemap, dType: dType) return model } func loadDiffusionConfiguration(hub: HubApi, configuration: StableDiffusionConfiguration) throws -> DiffusionConfiguration { try loadConfiguration( hub: hub, configuration: configuration, key: .diffusionConfig, type: DiffusionConfiguration.self) } // MARK: - Tokenizer func loadTokenizer( hub: HubApi, configuration: StableDiffusionConfiguration, vocabulary: FileKey = .tokenizerVocabulary, merges: FileKey = .tokenizerMerges ) throws -> CLIPTokenizer { let vocabularyURL = resolve(hub: hub, configuration: configuration, key: vocabulary) let mergesURL = resolve(hub: hub, configuration: configuration, key: merges) let vocabulary = try JSONDecoder().decode( [String: Int].self, from: Data(contentsOf: vocabularyURL)) let merges = try String(contentsOf: mergesURL) .components(separatedBy: .newlines) // first line is a comment .dropFirst() .filter { !$0.isEmpty } return CLIPTokenizer(merges: merges, vocabulary: vocabulary) }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
StableDiffusion
Sampler
# Copyright © 2023 Apple Inc. import mlx.core as mx from .config import DiffusionConfig def _linspace(a, b, num): x = mx.arange(0, num) / (num - 1) return (b - a) * x + a def _interp(y, x_new): """Interpolate the function defined by (arange(0, len(y)), y) at positions x_new.""" x_low = x_new.astype(mx.int32) x_high = mx.minimum(x_low + 1, len(y) - 1) y_low = y[x_low] y_high = y[x_high] delta_x = x_new - x_low y_new = y_low * (1 - delta_x) + delta_x * y_high return y_new class SimpleEulerSampler: """A simple Euler integrator that can be used to sample from our diffusion models. The method ``step()`` performs one Euler step from x_t to x_t_prev. """ def __init__(self, config: DiffusionConfig): # Compute the noise schedule if config.beta_schedule == "linear": betas = _linspace( config.beta_start, config.beta_end, config.num_train_steps ) elif config.beta_schedule == "scaled_linear": betas = _linspace( config.beta_start**0.5, config.beta_end**0.5, config.num_train_steps ).square() else: raise NotImplementedError(f"{config.beta_schedule} is not implemented.") alphas = 1 - betas alphas_cumprod = mx.cumprod(alphas) self._sigmas = mx.concatenate( [mx.zeros(1), ((1 - alphas_cumprod) / alphas_cumprod).sqrt()] ) @property def max_time(self): return len(self._sigmas) - 1 def sample_prior(self, shape, dtype=mx.float32, key=None): noise = mx.random.normal(shape, key=key) return ( noise * self._sigmas[-1] * (self._sigmas[-1].square() + 1).rsqrt() ).astype(dtype) def add_noise(self, x, t, key=None): noise = mx.random.normal(x.shape, key=key) s = self.sigmas(t) return (x + noise * s) * (s.square() + 1).rsqrt() def sigmas(self, t): return _interp(self._sigmas, t) def timesteps(self, num_steps: int, start_time=None, dtype=mx.float32): start_time = start_time or (len(self._sigmas) - 1) assert 0 < start_time <= (len(self._sigmas) - 1) steps = _linspace(start_time, 0, num_steps + 1).astype(dtype) return list(zip(steps, steps[1:])) def step(self, eps_pred, x_t, t, t_prev): sigma = self.sigmas(t).astype(eps_pred.dtype) sigma_prev = self.sigmas(t_prev).astype(eps_pred.dtype) dt = sigma_prev - sigma x_t_prev = (sigma.square() + 1).sqrt() * x_t + eps_pred * dt x_t_prev = x_t_prev * (sigma_prev.square() + 1).rsqrt() return x_t_prev class SimpleEulerAncestralSampler(SimpleEulerSampler): def step(self, eps_pred, x_t, t, t_prev): sigma = self.sigmas(t).astype(eps_pred.dtype) sigma_prev = self.sigmas(t_prev).astype(eps_pred.dtype) sigma2 = sigma.square() sigma_prev2 = sigma_prev.square() sigma_up = (sigma_prev2 * (sigma2 - sigma_prev2) / sigma2).sqrt() sigma_down = (sigma_prev2 - sigma_up**2).sqrt() dt = sigma_down - sigma x_t_prev = (sigma2 + 1).sqrt() * x_t + eps_pred * dt noise = mx.random.normal(x_t_prev.shape).astype(x_t_prev.dtype) x_t_prev = x_t_prev + noise * sigma_up x_t_prev = x_t_prev * (sigma_prev2 + 1).rsqrt() return x_t_prev
// Copyright © 2024 Apple Inc. import Foundation import MLX // port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/sampler.py /// Interpolate the function defined by `(0 ..< y.count) y)` at positions `xNew`. func interpolate(y: MLXArray, xNew: MLXArray) -> MLXArray { let xLow = xNew.asType(.int32) let xHigh = minimum(xLow + 1, y.count - 1) let yLow = y[xLow] let yHigh = y[xHigh] let deltaX = xNew - xLow let yNew = yLow * (1 - deltaX) + deltaX * yHigh return yNew } /// A simple Euler integrator that can be used to sample from our diffusion models. /// /// The method ``step()`` performs one Euler step from `x_t` to `x_t_prev`. class SimpleEulerSampler { let sigmas: MLXArray public init(configuration: DiffusionConfiguration) { let betas: MLXArray // compute the noise schedule switch configuration.betaSchedule { case .linear: betas = MLXArray.linspace( configuration.betaStart, configuration.betaEnd, count: configuration.trainSteps) case .scaledLinear: betas = MLXArray.linspace( sqrt(configuration.betaStart), sqrt(configuration.betaEnd), count: configuration.trainSteps ).square() } let alphas = 1 - betas let alphasCumprod = cumprod(alphas) self.sigmas = concatenated([ MLXArray.zeros([1]), ((1 - alphasCumprod) / alphasCumprod).sqrt(), ]) } public var maxTime: Int { sigmas.count - 1 } public func samplePrior(shape: [Int], dType: DType = .float32, key: MLXArray? = nil) -> MLXArray { let noise = MLXRandom.normal(shape, key: key) return (noise * sigmas[-1] * (sigmas[-1].square() + 1).rsqrt()).asType(dType) } public func addNoise(x: MLXArray, t: MLXArray, key: MLXArray? = nil) -> MLXArray { let noise = MLXRandom.normal(x.shape, key: key) let s = sigmas(t) return (x + noise * s) * (s.square() + 1).rsqrt() } public func sigmas(_ t: MLXArray) -> MLXArray { interpolate(y: sigmas, xNew: t) } public func timeSteps(steps: Int, start: Int? = nil, dType: DType = .float32) -> [( MLXArray, MLXArray )] { let start = start ?? (sigmas.count - 1) precondition(0 < start) precondition(start <= sigmas.count - 1) let steps = MLX.linspace(start, 0, count: steps + 1).asType(dType) return Array(zip(steps, steps[1...])) } open func step(epsPred: MLXArray, xt: MLXArray, t: MLXArray, tPrev: MLXArray) -> MLXArray { let dtype = epsPred.dtype let sigma = sigmas(t).asType(dtype) let sigmaPrev = sigmas(tPrev).asType(dtype) let dt = sigmaPrev - sigma var xtPrev = (sigma.square() + 1).sqrt() * xt + epsPred * dt xtPrev = xtPrev * (sigmaPrev.square() + 1).rsqrt() return xtPrev } } class SimpleEulerAncestralSampler: SimpleEulerSampler { open override func step(epsPred: MLXArray, xt: MLXArray, t: MLXArray, tPrev: MLXArray) -> MLXArray { let dtype = epsPred.dtype let sigma = sigmas(t).asType(dtype) let sigmaPrev = sigmas(tPrev).asType(dtype) let sigma2 = sigma.square() let sigmaPrev2 = sigmaPrev.square() let sigmaUp = (sigmaPrev2 * (sigma2 - sigmaPrev2) / sigma2).sqrt() let sigmaDown = (sigmaPrev2 - sigmaUp ** 2).sqrt() let dt = sigmaDown - sigma var xtPrev = (sigma2 + 1).sqrt() * xt + epsPred * dt let noise = MLXRandom.normal(xtPrev.shape).asType(xtPrev.dtype) xtPrev = xtPrev + noise * sigmaUp xtPrev = xtPrev * (sigmaPrev2 + 1).rsqrt() return xtPrev } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
Bitnet
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from functools import partial from typing import Any, Dict, Optional, Union import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention from .bitlinear_layers import BitLinear from .rope_utils import initialize_rope @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int num_key_value_heads: int rms_norm_eps: float vocab_size: int head_dim: Optional[int] = None max_position_embeddings: Optional[int] = None attention_bias: bool = False mlp_bias: bool = False rope_theta: float = 10000 rope_traditional: bool = False rope_scaling: Optional[Dict[str, Union[float, str]]] = None tie_word_embeddings: bool = True class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads self.n_kv_heads = n_kv_heads = args.num_key_value_heads self.head_dim = head_dim = args.head_dim or args.hidden_size // n_heads self.scale = head_dim**-0.5 attention_bias = args.attention_bias self.q_proj = BitLinear(dim, n_heads * head_dim, bias=attention_bias) self.k_proj = BitLinear(dim, n_kv_heads * head_dim, bias=attention_bias) self.v_proj = BitLinear(dim, n_kv_heads * head_dim, bias=attention_bias) self.o_proj = BitLinear(n_heads * head_dim, dim, bias=attention_bias) self.rope = initialize_rope( self.head_dim, args.rope_theta, args.rope_traditional, args.rope_scaling, args.max_position_embeddings, ) self.attn_sub_norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) # Prepare the queries, keys and values for the attention computation queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) output = self.attn_sub_norm(output) output = self.o_proj(output) return output @partial(mx.compile, shapeless=True) def relu2(x): return mx.square(nn.relu(x)) class MLP(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size hidden_dim = args.intermediate_size if hasattr(args, "mlp_bias"): mlp_bias = args.mlp_bias else: mlp_bias = False self.gate_proj = BitLinear(dim, hidden_dim, bias=mlp_bias) self.down_proj = BitLinear(hidden_dim, dim, bias=mlp_bias) self.up_proj = BitLinear(dim, hidden_dim, bias=mlp_bias) self.ffn_sub_norm = nn.RMSNorm(args.intermediate_size, eps=args.rms_norm_eps) def __call__(self, x) -> mx.array: x = relu2(self.gate_proj(x)) * self.up_proj(x) x = self.ffn_sub_norm(x) x = self.down_proj(x) return x class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.num_attention_heads = args.num_attention_heads self.hidden_size = args.hidden_size self.self_attn = Attention(args) self.mlp = MLP(args) self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.post_attention_layernorm = nn.RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.self_attn(self.input_layernorm(x), mask, cache) h = x + r r = self.mlp(self.post_attention_layernorm(h)) out = h + r return out class LlamaModel(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_hidden_layers = args.num_hidden_layers self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ TransformerBlock(args=args) for _ in range(args.num_hidden_layers) ] self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): h = self.embed_tokens(inputs) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, cache=c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.model_type = args.model_type self.model = LlamaModel(args) if not args.tie_word_embeddings: self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): out = self.model(inputs, mask, cache) if self.args.tie_word_embeddings: out = self.model.embed_tokens.as_linear(out) else: out = self.lm_head(out) return out def sanitize(self, weights): # Remove unused precomputed rotary freqs weights = { k: v for k, v in weights.items() if "self_attn.rotary_emb.inv_freq" not in k } if self.args.tie_word_embeddings: weights.pop("lm_head.weight", None) return weights @property def layers(self): return self.model.layers
// // Bitnet.swift // mlx-swift-examples // // Created by John Mai on 2025/6/12. // import Foundation import MLX import MLXFast import MLXLMCommon import MLXNN import Tokenizers // port of https://github.com/ml-explore/mlx-lm/blob/main/mlx_lm/models/bitnet.py private func makeBitLinearKernel() -> MLXFast.MLXFastKernel { let source = """ constexpr int M = 4; constexpr int BLOCK = 32; uint tid = thread_position_in_grid.y; uint in_offset = thread_position_in_grid.x; uint batch_idx = tid / (out_features / 4); uint row_idx = tid % (out_features / 4); float sum[4] = {0.0}; for (uint i = in_offset * M; i < in_features; i += BLOCK * M) { float v[M]; for (int j=0; j<M; j++) { v[j] = x[batch_idx * in_features + i + j]; } for (int j=0; j<M; j++) { uint8_t w = packed_weights[row_idx * in_features + i + j]; sum[0] += v[j] * ((w & 3) - 1); sum[1] += v[j] * (((w >> 2) & 3) - 1); sum[2] += v[j] * (((w >> 4) & 3) - 1); sum[3] += v[j] * (((w >> 6) & 3) - 1); } } for (int j=0; j<4; j++) { sum[j] = simd_sum(sum[j]); } // Apply weight scaling by diving them or multiplying them if (in_offset == 0) { float scale = invert_weight_scales ? 1 / weight_scale[0] : weight_scale[0]; for (int i=0; i<4; i++) { out[batch_idx * out_features + row_idx + i * (out_features/4)] = static_cast<T>(sum[i] * scale); } } """ return metalKernel( name: "bitlinear_matmul", inputNames: ["x", "packed_weights", "weight_scale"], outputNames: ["out"], source: source ) } private final class BitLinearKernelManager: @unchecked Sendable { static let shared = BitLinearKernelManager() let bitlinearKernel: MLXFast.MLXFastKernel private init() { bitlinearKernel = makeBitLinearKernel() } } private class BitLinear: Module { let inFeatures: Int let outFeatures: Int let invertWeightScales: Bool let weight: MLXArray let bias: MLXArray? @ModuleInfo(key: "weight_scale") var weightScale: MLXArray init( _ inFeatures: Int, _ outFeatures: Int, bias: Bool = true, invertWeightScales: Bool = false ) { self.inFeatures = inFeatures self.outFeatures = outFeatures let packedOutFeatures = Int(floor(Double(outFeatures + 3) / 4.0)) self.weight = MLXArray.zeros([packedOutFeatures, inFeatures], dtype: .uint8) self.invertWeightScales = invertWeightScales self._weightScale.wrappedValue = MLXArray([1.0]) if bias { self.bias = MLXArray.zeros([outFeatures]) } else { self.bias = nil } super.init() } private func executeMatmulKernel(_ x: MLXArray, _ packedWeights: MLXArray) -> MLXArray { let originalShape = x.shape var x = x if originalShape.count > 2 { x = x.reshaped(-1, originalShape[originalShape.count - 1]) } let totalBatchElements = x.dim(0) let inFeatures = x.dim(1) let outFeatures = self.outFeatures let dtype = self.weightScale.dtype assert(x.dtype == dtype, "Wrong type for input.") var outputs = BitLinearKernelManager.shared.bitlinearKernel( [x, packedWeights, weightScale], template: [ ("T", dtype), ("invert_weight_scales", invertWeightScales), ("in_features", inFeatures), ("out_features", outFeatures), ], grid: (32, Int(floor(Double(totalBatchElements * outFeatures / 4))), 1), threadGroup: (32, 1, 1), outputShapes: [[totalBatchElements, outFeatures]], outputDTypes: [dtype] )[0] if originalShape.count > 2 { outputs = outputs.reshaped(Array(originalShape.dropLast()) + [outFeatures]) } return outputs } func callAsFunction(_ x: MLXArray) -> MLXArray { var y = executeMatmulKernel(x, weight) if let bias { y = y + bias } return y } } // MARK: - Model Configuration public struct BitnetConfiguration: Codable, Sendable { var modelType: String var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var rmsNormEps: Float var vocabularySize: Int var headDimensions: Int? var maxPositionEmbeddings: Int? var kvHeads: Int? var attentionBias: Bool var mlpBias: Bool var ropeTheta: Float var ropeTraditional: Bool var ropeScaling: [String: StringOrNumber]? var tieWordEmbeddings: Bool public init( modelType: String = "bitnet", hiddenSize: Int, hiddenLayers: Int, intermediateSize: Int, attentionHeads: Int, rmsNormEps: Float, vocabularySize: Int, headDimensions: Int? = nil, maxPositionEmbeddings: Int? = nil, kvHeads: Int? = nil, attentionBias: Bool = false, mlpBias: Bool = false, ropeTheta: Float = 10000, ropeTraditional: Bool = false, ropeScaling: [String: StringOrNumber]? = nil, tieWordEmbeddings: Bool = true ) { self.modelType = modelType self.hiddenSize = hiddenSize self.hiddenLayers = hiddenLayers self.intermediateSize = intermediateSize self.attentionHeads = attentionHeads self.rmsNormEps = rmsNormEps self.vocabularySize = vocabularySize self.headDimensions = headDimensions self.maxPositionEmbeddings = maxPositionEmbeddings self.kvHeads = kvHeads ?? attentionHeads self.attentionBias = attentionBias self.mlpBias = mlpBias self.ropeTheta = ropeTheta self.ropeTraditional = ropeTraditional self.ropeScaling = ropeScaling self.tieWordEmbeddings = tieWordEmbeddings } var resolvedKvHeads: Int { kvHeads ?? attentionHeads } var resolvedHeadDimensions: Int { headDimensions ?? (hiddenSize / attentionHeads) } enum CodingKeys: String, CodingKey { case modelType = "model_type" case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case rmsNormEps = "rms_norm_eps" case vocabularySize = "vocab_size" case headDimensions = "head_dim" case maxPositionEmbeddings = "max_position_embeddings" case kvHeads = "num_key_value_heads" case attentionBias = "attention_bias" case mlpBias = "mlp_bias" case ropeTheta = "rope_theta" case ropeTraditional = "rope_traditional" case ropeScaling = "rope_scaling" case tieWordEmbeddings = "tie_word_embeddings" } public init(from decoder: Swift.Decoder) throws { let container = try decoder.container(keyedBy: CodingKeys.self) modelType = try container.decodeIfPresent(String.self, forKey: .modelType) ?? "bitnet" hiddenSize = try container.decode(Int.self, forKey: .hiddenSize) hiddenLayers = try container.decode(Int.self, forKey: .hiddenLayers) intermediateSize = try container.decode(Int.self, forKey: .intermediateSize) attentionHeads = try container.decode(Int.self, forKey: .attentionHeads) rmsNormEps = try container.decode(Float.self, forKey: .rmsNormEps) vocabularySize = try container.decode(Int.self, forKey: .vocabularySize) headDimensions = try container.decodeIfPresent(Int.self, forKey: .headDimensions) maxPositionEmbeddings = try container.decodeIfPresent( Int.self, forKey: .maxPositionEmbeddings ) kvHeads = try container.decodeIfPresent(Int.self, forKey: .kvHeads) ?? attentionHeads attentionBias = try container.decodeIfPresent(Bool.self, forKey: .attentionBias) ?? false mlpBias = try container.decodeIfPresent(Bool.self, forKey: .mlpBias) ?? false ropeTheta = try container.decodeIfPresent(Float.self, forKey: .ropeTheta) ?? 10000 ropeTraditional = try container.decodeIfPresent(Bool.self, forKey: .ropeTraditional) ?? false ropeScaling = try container.decodeIfPresent( [String: StringOrNumber].self, forKey: .ropeScaling ) tieWordEmbeddings = try container.decodeIfPresent(Bool.self, forKey: .tieWordEmbeddings) ?? true } } // MARK: - Attention private class Attention: Module { let args: BitnetConfiguration let scale: Float @ModuleInfo(key: "q_proj") var qProj: BitLinear @ModuleInfo(key: "k_proj") var kProj: BitLinear @ModuleInfo(key: "v_proj") var vProj: BitLinear @ModuleInfo(key: "o_proj") var oProj: BitLinear @ModuleInfo(key: "attn_sub_norm") var attnSubNorm: RMSNorm let rope: RoPE init(_ args: BitnetConfiguration) { self.args = args let dim = args.hiddenSize let headDim = args.resolvedHeadDimensions let nHeads = args.attentionHeads let nKvHeads = args.resolvedKvHeads scale = pow(Float(headDim), -0.5) _qProj.wrappedValue = BitLinear(dim, nHeads * headDim, bias: args.attentionBias) _kProj.wrappedValue = BitLinear(dim, nKvHeads * headDim, bias: args.attentionBias) _vProj.wrappedValue = BitLinear(dim, nKvHeads * headDim, bias: args.attentionBias) _oProj.wrappedValue = BitLinear(nHeads * headDim, dim, bias: args.attentionBias) _attnSubNorm.wrappedValue = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) let ropeScale: Float if let ropeScaling = args.ropeScaling, ropeScaling["type"] == .string("linear"), let factor = ropeScaling["factor"] { if let v = factor.asFloat() { ropeScale = 1 / v } else { fatalError("ropeScaling.factor must be a float") } } else { ropeScale = 1 } rope = RoPE( dimensions: headDim, traditional: args.ropeTraditional, base: args.ropeTheta, scale: ropeScale ) } func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var queries = qProj(x) var keys = kProj(x) var values = vProj(x) queries = queries.reshaped(B, L, args.attentionHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, args.resolvedKvHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, args.resolvedKvHeads, -1).transposed(0, 2, 1, 3) if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) (keys, values) = cache.update(keys: keys, values: values) } else { queries = rope(queries) keys = rope(keys) } let output = MLXFast.scaledDotProductAttention( queries: queries, keys: keys, values: values, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) let normedOutput = attnSubNorm(output) return oProj(normedOutput) } } // MARK: - MLP private class MLP: Module { @ModuleInfo(key: "gate_proj") var gateProj: BitLinear @ModuleInfo(key: "down_proj") var downProj: BitLinear @ModuleInfo(key: "up_proj") var upProj: BitLinear @ModuleInfo(key: "ffn_sub_norm") var ffnSubNorm: RMSNorm init(_ args: BitnetConfiguration) { let dim = args.hiddenSize let hiddenDim = args.intermediateSize _gateProj.wrappedValue = BitLinear(dim, hiddenDim, bias: args.mlpBias) _downProj.wrappedValue = BitLinear(hiddenDim, dim, bias: args.mlpBias) _upProj.wrappedValue = BitLinear(dim, hiddenDim, bias: args.mlpBias) _ffnSubNorm.wrappedValue = RMSNorm(dimensions: args.intermediateSize, eps: args.rmsNormEps) } func callAsFunction(_ x: MLXArray) -> MLXArray { let gated = reluSquared(gateProj(x)) * upProj(x) let normed = ffnSubNorm(gated) return downProj(normed) } } // MARK: - Transformer Block private class TransformerBlock: Module { @ModuleInfo(key: "self_attn") var attention: Attention var mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: RMSNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: RMSNorm init(_ args: BitnetConfiguration) { _attention.wrappedValue = Attention(args) mlp = MLP(args) _inputLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps ) _postAttentionLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps ) } func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var r = attention(inputLayerNorm(x), mask: mask, cache: cache) let h = x + r r = mlp(postAttentionLayerNorm(h)) let out = h + r return out } } // MARK: - Bitnet Model Inner private class BitnetModelInner: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding fileprivate let layers: [TransformerBlock] var norm: RMSNorm init(_ args: BitnetConfiguration) { precondition(args.vocabularySize > 0) _embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize ) layers = (0 ..< args.hiddenLayers).map { _ in TransformerBlock(args) } norm = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) } func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = embedTokens(inputs) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } // MARK: - Bitnet Model public class BitnetModel: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] fileprivate let model: BitnetModelInner let configuration: BitnetConfiguration @ModuleInfo(key: "lm_head") var lmHead: Linear? public init(_ args: BitnetConfiguration) { configuration = args vocabularySize = args.vocabularySize kvHeads = (0 ..< args.hiddenLayers).map { _ in args.resolvedKvHeads } model = BitnetModelInner(args) if !args.tieWordEmbeddings { _lmHead.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: false) } } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { let out = model(inputs, cache: cache) if let lmHead { return lmHead(out) } else { return model.embedTokens.asLinear(out) } } public func sanitize(weights: [String: MLXArray]) -> [String: MLXArray] { var weights = weights weights = weights.filter { !$0.key.contains("self_attn.rotary_emb.inv_freq") } if configuration.tieWordEmbeddings { weights["lm_head.weight"] = nil } return weights } } extension BitnetModel: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.attention, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
Cohere
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from typing import Any, Optional import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int = 8192 num_hidden_layers: int = 40 intermediate_size: int = 22528 num_attention_heads: int = 64 num_key_value_heads: int = 64 rope_theta: float = 8000000.0 vocab_size: int = 256000 layer_norm_eps: float = 1e-05 logit_scale: float = 0.0625 attention_bias: bool = False layer_norm_bias: bool = False use_qk_norm: bool = False class LayerNorm2D(nn.Module): def __init__(self, d1, d2, eps): super().__init__() self.weight = mx.zeros((d1, d2)) self.eps = eps def __call__(self, x): return self.weight * mx.fast.layer_norm(x, None, None, self.eps) class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads self.n_kv_heads = n_kv_heads = args.num_key_value_heads head_dim = args.hidden_size // args.num_attention_heads self.scale = head_dim**-0.5 attetion_bias = args.attention_bias self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=attetion_bias) self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=attetion_bias) self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=attetion_bias) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=attetion_bias) self.use_qk_norm = args.use_qk_norm if self.use_qk_norm: self.q_norm = LayerNorm2D(self.n_heads, head_dim, eps=args.layer_norm_eps) self.k_norm = LayerNorm2D( self.n_kv_heads, head_dim, eps=args.layer_norm_eps ) self.rope = nn.RoPE(head_dim, traditional=True, base=args.rope_theta) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) queries = queries.reshape(B, L, self.n_heads, -1) keys = keys.reshape(B, L, self.n_kv_heads, -1) if self.use_qk_norm: queries = self.q_norm(queries) keys = self.k_norm(keys) queries = queries.transpose(0, 2, 1, 3) keys = keys.transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class MLP(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.gate_proj = nn.Linear(dim, hidden_dim, bias=False) self.up_proj = nn.Linear(dim, hidden_dim, bias=False) self.down_proj = nn.Linear(hidden_dim, dim, bias=False) def __call__(self, x): return self.down_proj(nn.silu(self.gate_proj(x)) * self.up_proj(x)) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.hidden_size = args.hidden_size self.n_heads = args.num_attention_heads self.self_attn = Attention(args) self.mlp = MLP(args.hidden_size, args.intermediate_size) self.input_layernorm = nn.LayerNorm( args.hidden_size, eps=args.layer_norm_eps, bias=args.layer_norm_bias ) self.args = args def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: h = self.input_layernorm(x) attn_h = self.self_attn(h, mask, cache) ff_h = self.mlp(h) return attn_h + ff_h + x class CohereModel(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_hidden_layers = args.num_hidden_layers assert self.vocab_size > 0 self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ TransformerBlock(args=args) for _ in range(args.num_hidden_layers) ] self.norm = nn.LayerNorm( args.hidden_size, eps=args.layer_norm_eps, bias=args.layer_norm_bias ) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): h = self.embed_tokens(inputs) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.model_type = args.model_type self.model = CohereModel(args) self.args = args def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): out = self.model(inputs, mask, cache) out = self.model.embed_tokens.as_linear(out) out = out * self.model.args.logit_scale return out @property def layers(self): return self.model.layers
import Foundation import MLX import MLXLMCommon import MLXNN // port of https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/models/cohere.py private class Attention: Module { let args: CohereConfiguration let scale: Float @ModuleInfo(key: "q_proj") var wq: Linear @ModuleInfo(key: "k_proj") var wk: Linear @ModuleInfo(key: "v_proj") var wv: Linear @ModuleInfo(key: "o_proj") var wo: Linear let rope: RoPE public init(_ args: CohereConfiguration) { self.args = args let dim = args.hiddenSize let heads = args.attentionHeads let kvHeads = args.kvHeads let headDim = args.hiddenSize / heads self.scale = pow(Float(headDim), -0.5) self._wq.wrappedValue = Linear(dim, heads * headDim, bias: false) self._wk.wrappedValue = Linear(dim, kvHeads * headDim, bias: false) self._wv.wrappedValue = Linear(dim, kvHeads * headDim, bias: false) self._wo.wrappedValue = Linear(heads * headDim, dim, bias: false) self.rope = RoPE( dimensions: headDim, traditional: args.ropeTraditional, base: args.ropeTheta) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var queries = wq(x) var keys = wk(x) var values = wv(x) // prepare the queries, keys and values for the attention computation queries = queries.reshaped(B, L, args.attentionHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return wo(output) } } private class MLP: Module, UnaryLayer { @ModuleInfo(key: "gate_proj") var gate: Linear @ModuleInfo(key: "down_proj") var down: Linear @ModuleInfo(key: "up_proj") var up: Linear public init(dimensions: Int, hiddenDimensions: Int) { self._gate.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) self._up.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) self._down.wrappedValue = Linear(hiddenDimensions, dimensions, bias: false) } public func callAsFunction(_ x: MLXArray) -> MLXArray { down(silu(gate(x)) * up(x)) } } private class TransformerBlock: Module { @ModuleInfo(key: "self_attn") var attention: Attention let mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: LayerNorm public init(_ args: CohereConfiguration) { self._attention.wrappedValue = Attention(args) self.mlp = MLP(dimensions: args.hiddenSize, hiddenDimensions: args.intermediateSize) self._inputLayerNorm.wrappedValue = LayerNorm( dimensions: args.hiddenSize, eps: args.layerNormEps) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let h = inputLayerNorm(x) let attnH = attention(h, mask: mask, cache: cache) let ffH = mlp(h) return attnH + ffH + x } } public class CohereModelInner: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding fileprivate let layers: [TransformerBlock] let norm: LayerNorm public init(_ args: CohereConfiguration) { precondition(args.vocabularySize > 0) self._embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers) .map { _ in TransformerBlock(args) } self.norm = LayerNorm(dimensions: args.hiddenSize, eps: args.layerNormEps) } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = embedTokens(inputs) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } public class CohereModel: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] let model: CohereModelInner let logitScale: Float public init(_ args: CohereConfiguration) { self.vocabularySize = args.vocabularySize self.kvHeads = (0 ..< args.hiddenLayers).map { _ in args.kvHeads } self.model = CohereModelInner(args) self.logitScale = args.logitScale } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { var out = model(inputs, cache: cache) out = model.embedTokens.asLinear(out) out = out * self.logitScale return out } } public struct CohereConfiguration: Codable, Sendable { var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var layerNormEps: Float var vocabularySize: Int var kvHeads: Int var ropeTheta: Float = 8000000.0 var ropeTraditional: Bool = true var ropeScaling: [String: StringOrNumber]? = nil var logitScale: Float enum CodingKeys: String, CodingKey { case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case kvHeads = "num_key_value_heads" case ropeTheta = "rope_theta" case vocabularySize = "vocab_size" case layerNormEps = "layer_norm_eps" case logitScale = "logit_scale" case ropeTraditional = "rope_traditional" case ropeScaling = "rope_scaling" } public init(from decoder: Decoder) throws { // custom implementation to handle optional keys with required values let container: KeyedDecodingContainer<CohereConfiguration.CodingKeys> = try decoder.container( keyedBy: CohereConfiguration.CodingKeys.self) self.hiddenSize = try container.decode( Int.self, forKey: CohereConfiguration.CodingKeys.hiddenSize) self.hiddenLayers = try container.decode( Int.self, forKey: CohereConfiguration.CodingKeys.hiddenLayers) self.intermediateSize = try container.decode( Int.self, forKey: CohereConfiguration.CodingKeys.intermediateSize) self.attentionHeads = try container.decode( Int.self, forKey: CohereConfiguration.CodingKeys.attentionHeads) self.layerNormEps = try container.decode( Float.self, forKey: CohereConfiguration.CodingKeys.layerNormEps) self.vocabularySize = try container.decode( Int.self, forKey: CohereConfiguration.CodingKeys.vocabularySize) self.kvHeads = try container.decode( Int.self, forKey: CohereConfiguration.CodingKeys.kvHeads) self.ropeTheta = try container.decodeIfPresent( Float.self, forKey: CohereConfiguration.CodingKeys.ropeTheta) ?? 8000000.0 self.ropeScaling = try container.decodeIfPresent( [String: StringOrNumber].self, forKey: CohereConfiguration.CodingKeys.ropeScaling) self.logitScale = try container.decode( Float.self, forKey: CohereConfiguration.CodingKeys.logitScale) } } // MARK: - LoRA extension CohereModel: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.attention, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
GLM
# Copyright © 2025 Apple Inc. from dataclasses import dataclass from typing import Any, Optional import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int attention_bias: bool head_dim: int rms_norm_eps: float vocab_size: int num_key_value_heads: int partial_rotary_factor: float rope_theta: float rope_traditional: bool = True max_position_embeddings: int = 32768 class Glm4MLP(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.gate_up_proj = nn.Linear( args.hidden_size, 2 * args.intermediate_size, bias=False ) self.down_proj = nn.Linear(args.intermediate_size, args.hidden_size, bias=False) def __call__(self, x) -> mx.array: x = self.gate_up_proj(x) gate, up_states = mx.split(x, 2, axis=-1) return self.down_proj(nn.silu(gate) * up_states) class Glm4Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.head_dim = getattr( args, "head_dim", args.hidden_size // args.num_attention_heads ) self.n_heads = args.num_attention_heads self.n_kv_heads = args.num_key_value_heads self.scale = self.head_dim**-0.5 self.q_proj = nn.Linear( args.hidden_size, args.num_attention_heads * self.head_dim, bias=args.attention_bias, ) self.k_proj = nn.Linear( args.hidden_size, args.num_key_value_heads * self.head_dim, bias=args.attention_bias, ) self.v_proj = nn.Linear( args.hidden_size, args.num_key_value_heads * self.head_dim, bias=args.attention_bias, ) self.o_proj = nn.Linear( args.num_attention_heads * self.head_dim, args.hidden_size, bias=False ) self.rope = nn.RoPE( dims=int(self.head_dim * args.partial_rotary_factor), base=args.rope_theta, traditional=args.rope_traditional, ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None ) -> mx.array: B, L, D = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class Glm4DecoderLayer(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.self_attn = Glm4Attention(args=args) self.mlp = Glm4MLP(args) self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.post_attention_layernorm = nn.RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) self.post_self_attn_layernorm = nn.RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) self.post_mlp_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None ) -> mx.array: x = x + self.post_self_attn_layernorm( self.self_attn(self.input_layernorm(x), mask, cache) ) residual = x x = ( self.post_mlp_layernorm(self.mlp(self.post_attention_layernorm(x))) + residual ) return x class Glm4Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ Glm4DecoderLayer(args=args) for _ in range(args.num_hidden_layers) ] self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ): h = self.embed_tokens(inputs) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, cache=c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.model_type = args.model_type self.model = Glm4Model(args) self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False) def __call__( self, inputs: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ): out = self.model(inputs, mask, cache) return self.lm_head(out) @property def layers(self): return self.model.layers
// // GLM4.swift // LLM // // Created by John Mai on 2025/5/1. // import Foundation import MLX import MLXLMCommon import MLXNN // port of https://github.com/ml-explore/mlx-lm/blob/main/mlx_lm/models/glm4.py private class Attention: Module { let args: GLM4Configuration let scale: Float @ModuleInfo(key: "q_proj") var wq: Linear @ModuleInfo(key: "k_proj") var wk: Linear @ModuleInfo(key: "v_proj") var wv: Linear @ModuleInfo(key: "o_proj") var wo: Linear let rope: RoPE public init(_ args: GLM4Configuration) { self.args = args let headDim = args.headDim > 0 ? args.headDim : args.hiddenSize / args.attentionHeads self.scale = pow(Float(headDim), -0.5) _wq.wrappedValue = Linear( args.hiddenSize, args.attentionHeads * headDim, bias: args.attentionBias) _wk.wrappedValue = Linear(args.hiddenSize, args.kvHeads * headDim, bias: args.attentionBias) _wv.wrappedValue = Linear(args.hiddenSize, args.kvHeads * headDim, bias: args.attentionBias) _wo.wrappedValue = Linear(args.attentionHeads * headDim, args.hiddenSize, bias: false) self.rope = RoPE( dimensions: Int(Float(headDim) * args.partialRotaryFactor), traditional: args.ropeTraditional, base: args.ropeTheta) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var queries = wq(x) var keys = wk(x) var values = wv(x) queries = queries.reshaped(B, L, args.attentionHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return wo(output) } } private class MLP: Module, UnaryLayer { @ModuleInfo(key: "gate_up_proj") var gateUp: Linear @ModuleInfo(key: "down_proj") var down: Linear public init(_ args: GLM4Configuration) { _gateUp.wrappedValue = Linear(args.hiddenSize, 2 * args.intermediateSize, bias: false) _down.wrappedValue = Linear(args.intermediateSize, args.hiddenSize, bias: false) } public func callAsFunction(_ x: MLXArray) -> MLXArray { let x = gateUp(x) let chunks = split(x, parts: 2, axis: -1) return down(silu(chunks[0]) * chunks[1]) } } private class GLM4DecoderLayer: Module { @ModuleInfo(key: "self_attn") var attention: Attention let mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: RMSNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: RMSNorm @ModuleInfo(key: "post_self_attn_layernorm") var postSelfAttnLayerNorm: RMSNorm @ModuleInfo(key: "post_mlp_layernorm") var postMlpLayerNorm: RMSNorm public init(_ args: GLM4Configuration) { _attention.wrappedValue = Attention(args) self.mlp = MLP(args) _inputLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) _postAttentionLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) _postSelfAttnLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) _postMlpLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var x = x + postSelfAttnLayerNorm( attention(inputLayerNorm(x), mask: mask, cache: cache) ) let residual = x x = postMlpLayerNorm(mlp(postAttentionLayerNorm(x))) + residual return x } } private class GLM4ModelInner: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding fileprivate let layers: [GLM4DecoderLayer] let norm: RMSNorm public init(_ args: GLM4Configuration) { precondition(args.vocabularySize > 0) _embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers) .map { _ in GLM4DecoderLayer(args) } self.norm = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = embedTokens(inputs) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } public class GLM4Model: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] private let model: GLM4ModelInner let configuration: GLM4Configuration let modelType: String @ModuleInfo(key: "lm_head") var lmHead: Linear public init(_ args: GLM4Configuration) { self.configuration = args self.vocabularySize = args.vocabularySize self.kvHeads = (0 ..< args.hiddenLayers).map { _ in args.kvHeads } self.modelType = args.modelType self.model = GLM4ModelInner(args) _lmHead.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: false) } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { let out = model(inputs, cache: cache) return lmHead(out) } public func sanitize(weights: [String: MLXArray]) -> [String: MLXArray] { var weights = weights if configuration.tieWordEmbeddings { weights["lm_head.weight"] = nil } return weights } } public struct GLM4Configuration: Codable, Sendable { var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var attentionBias: Bool var headDim: Int var rmsNormEps: Float var vocabularySize: Int var kvHeads: Int var partialRotaryFactor: Float var ropeTheta: Float = 10000.0 var ropeTraditional: Bool = true var tieWordEmbeddings = false var maxPositionEmbeddings: Int = 32768 var modelType: String enum CodingKeys: String, CodingKey { case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case attentionBias = "attention_bias" case headDim = "head_dim" case rmsNormEps = "rms_norm_eps" case vocabularySize = "vocab_size" case kvHeads = "num_key_value_heads" case partialRotaryFactor = "partial_rotary_factor" case ropeTheta = "rope_theta" case ropeTraditional = "rope_traditional" case tieWordEmbeddings = "tie_word_embeddings" case maxPositionEmbeddings = "max_position_embeddings" case modelType = "model_type" } public init(from decoder: Decoder) throws { let container: KeyedDecodingContainer<GLM4Configuration.CodingKeys> = try decoder.container( keyedBy: GLM4Configuration.CodingKeys.self) self.modelType = try container.decode( String.self, forKey: GLM4Configuration.CodingKeys.modelType) self.hiddenSize = try container.decode( Int.self, forKey: GLM4Configuration.CodingKeys.hiddenSize) self.hiddenLayers = try container.decode( Int.self, forKey: GLM4Configuration.CodingKeys.hiddenLayers) self.intermediateSize = try container.decode( Int.self, forKey: GLM4Configuration.CodingKeys.intermediateSize) self.attentionHeads = try container.decode( Int.self, forKey: GLM4Configuration.CodingKeys.attentionHeads) self.attentionBias = try container.decode( Bool.self, forKey: GLM4Configuration.CodingKeys.attentionBias) self.headDim = try container.decode( Int.self, forKey: GLM4Configuration.CodingKeys.headDim) self.rmsNormEps = try container.decode( Float.self, forKey: GLM4Configuration.CodingKeys.rmsNormEps) self.vocabularySize = try container.decode( Int.self, forKey: GLM4Configuration.CodingKeys.vocabularySize) self.kvHeads = try container.decode(Int.self, forKey: GLM4Configuration.CodingKeys.kvHeads) self.partialRotaryFactor = try container.decode( Float.self, forKey: GLM4Configuration.CodingKeys.partialRotaryFactor) self.ropeTheta = try container.decodeIfPresent( Float.self, forKey: GLM4Configuration.CodingKeys.ropeTheta) ?? 10000.0 self.ropeTraditional = try container.decodeIfPresent( Bool.self, forKey: GLM4Configuration.CodingKeys.ropeTraditional) ?? true self.tieWordEmbeddings = try container.decodeIfPresent(Bool.self, forKey: .tieWordEmbeddings) ?? false self.maxPositionEmbeddings = try container.decodeIfPresent(Int.self, forKey: .maxPositionEmbeddings) ?? 32768 } } // MARK: - LoRA extension GLM4Model: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.attention, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
Gemma
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from typing import Any, Optional import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int head_dim: int rms_norm_eps: float vocab_size: int num_key_value_heads: int rope_theta: float = 10000 rope_traditional: bool = False class RMSNorm(nn.Module): def __init__(self, dims: int, eps: float = 1e-5): super().__init__() self.weight = mx.ones((dims,)) self.eps = eps def __call__(self, x): return mx.fast.rms_norm(x, 1.0 + self.weight, self.eps) class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads self.n_kv_heads = n_kv_heads = args.num_key_value_heads self.head_dim = head_dim = args.head_dim self.scale = head_dim**-0.5 self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=False) self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=False) self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=False) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=False) self.rope = nn.RoPE( head_dim, traditional=args.rope_traditional, base=args.rope_theta, ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) # Prepare the queries, keys and values for the attention computation queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class MLP(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.gate_proj = nn.Linear(dim, hidden_dim, bias=False) self.down_proj = nn.Linear(hidden_dim, dim, bias=False) self.up_proj = nn.Linear(dim, hidden_dim, bias=False) def __call__(self, x) -> mx.array: return self.down_proj(nn.gelu(self.gate_proj(x)) * self.up_proj(x)) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.num_attention_heads = args.num_attention_heads self.hidden_size = args.hidden_size self.self_attn = Attention(args) self.mlp = MLP(args.hidden_size, args.intermediate_size) self.input_layernorm = RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.post_attention_layernorm = RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.args = args def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.self_attn(self.input_layernorm(x), mask, cache) h = x + r r = self.mlp(self.post_attention_layernorm(h)) out = h + r return out class GemmaModel(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_hidden_layers = args.num_hidden_layers assert self.vocab_size > 0 self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ TransformerBlock(args=args) for _ in range(args.num_hidden_layers) ] self.norm = RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): h = self.embed_tokens(inputs) h = h * (self.args.hidden_size**0.5) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.model_type = args.model_type self.model = GemmaModel(args) self.args = args def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): out = self.model(inputs, mask, cache) out = self.model.embed_tokens.as_linear(out) return out @property def layers(self): return self.model.layers
// Copyright © 2024 Apple Inc. import Foundation import MLX import MLXLMCommon import MLXNN import Tokenizers // Port of https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/models/gemma.py // Specialized norm for Gemma private class RMSNorm: Module, UnaryLayer { let weight: MLXArray let eps: Float public init(dimensions: Int, eps: Float = 1e-5) { self.weight = MLXArray.ones([dimensions]) self.eps = eps } public func callAsFunction(_ x: MLXArray) -> MLXArray { return MLXFast.rmsNorm(x, weight: 1.0 + self.weight, eps: self.eps) } } private class Attention: Module { let args: GemmaConfiguration let nHeads: Int let nKVHeads: Int let headDim: Int let scale: Float @ModuleInfo(key: "q_proj") var wq: Linear @ModuleInfo(key: "k_proj") var wk: Linear @ModuleInfo(key: "v_proj") var wv: Linear @ModuleInfo(key: "o_proj") var wo: Linear let rope: RoPE public init(_ args: GemmaConfiguration) { self.args = args let dim = args.hiddenSize self.nHeads = args.attentionHeads self.nKVHeads = args.kvHeads self.headDim = args.headDimensions self.scale = pow(Float(headDim), -0.5) self._wq.wrappedValue = Linear(dim, nHeads * headDim, bias: false) self._wk.wrappedValue = Linear(dim, nKVHeads * headDim, bias: false) self._wv.wrappedValue = Linear(dim, nKVHeads * headDim, bias: false) self._wo.wrappedValue = Linear(nHeads * headDim, dim, bias: false) self.rope = RoPE( dimensions: headDim, traditional: args.ropeTraditional, base: args.ropeTheta) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var queries = wq(x) var keys = wk(x) var values = wv(x) // Prepare the queries, keys and values for the attention computation queries = queries.reshaped(B, L, nHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, nKVHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, nKVHeads, -1).transposed(0, 2, 1, 3) if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return wo(output) } } private class MLP: Module, UnaryLayer { @ModuleInfo(key: "gate_proj") var gate: Linear @ModuleInfo(key: "down_proj") var down: Linear @ModuleInfo(key: "up_proj") var up: Linear public init(dimensions: Int, hiddenDimensions: Int) { self._gate.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) self._down.wrappedValue = Linear(hiddenDimensions, dimensions, bias: false) self._up.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) } public func callAsFunction(_ x: MLXArray) -> MLXArray { down(gelu(gate(x)) * up(x)) } } private class TransformerBlock: Module { @ModuleInfo(key: "self_attn") var attention: Attention let mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: Gemma.RMSNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: Gemma.RMSNorm public init(_ args: GemmaConfiguration) { self._attention.wrappedValue = Attention(args) self.mlp = MLP(dimensions: args.hiddenSize, hiddenDimensions: args.intermediateSize) self._inputLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) self._postAttentionLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var r = attention(inputLayerNorm(x), mask: mask, cache: cache) let h = x + r r = mlp(postAttentionLayerNorm(h)) return h + r } } private class GemmaModelInner: Module { let args: GemmaConfiguration let vocabularySize: Int let numHiddenLayers: Int @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding fileprivate let layers: [TransformerBlock] fileprivate let norm: Gemma.RMSNorm public init(_ args: GemmaConfiguration) { precondition(args.vocabularySize > 0) self.args = args self.vocabularySize = args.vocabularySize self.numHiddenLayers = args.hiddenLayers self._embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers) .map { _ in TransformerBlock(args) } self.norm = Gemma.RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = embedTokens(inputs) h = h * pow(Float(args.hiddenSize), 0.5) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } public class GemmaModel: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] let modelType: String private let model: GemmaModelInner public init(_ args: GemmaConfiguration) { self.modelType = args.modelType self.vocabularySize = args.vocabularySize self.kvHeads = Array(repeating: args.kvHeads, count: args.hiddenLayers) self.model = GemmaModelInner(args) } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { let out = model(inputs, cache: cache) return model.embedTokens.asLinear(out) } public func messageGenerator(tokenizer: any Tokenizer) -> any MessageGenerator { NoSystemMessageGenerator() } } public struct GemmaConfiguration: Codable, Sendable { var modelType: String var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var headDimensions: Int var rmsNormEps: Float var vocabularySize: Int var kvHeads: Int private let _ropeTheta: Float? public var ropeTheta: Float { _ropeTheta ?? 10_000 } private let _ropeTraditional: Bool? public var ropeTraditional: Bool { _ropeTraditional ?? false } enum CodingKeys: String, CodingKey { case modelType = "model_type" case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case headDimensions = "head_dim" case rmsNormEps = "rms_norm_eps" case vocabularySize = "vocab_size" case kvHeads = "num_key_value_heads" case _ropeTheta = "rope_theta" case _ropeTraditional = "rope_traditional" } } // MARK: - LoRA extension GemmaModel: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.attention, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
Gemma2
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from typing import Any, Optional import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int head_dim: int rms_norm_eps: float vocab_size: int num_key_value_heads: int rope_theta: float = 10000 rope_traditional: bool = False attn_logit_softcapping: float = 50.0 final_logit_softcapping: float = 30.0 query_pre_attn_scalar: float = 144.0 class RMSNorm(nn.Module): def __init__(self, dims: int, eps: float = 1e-5): super().__init__() self.weight = mx.ones((dims,)) self.eps = eps def __call__(self, x): return mx.fast.rms_norm(x, 1.0 + self.weight, self.eps) class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads self.n_kv_heads = n_kv_heads = args.num_key_value_heads self.repeats = n_heads // n_kv_heads self.head_dim = head_dim = args.head_dim self.scale = 1.0 / (args.query_pre_attn_scalar**0.5) self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=False) self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=False) self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=False) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=False) self.attn_logit_softcapping = args.attn_logit_softcapping self.rope = nn.RoPE( head_dim, traditional=args.rope_traditional, base=args.rope_theta, ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) queries = queries * self.scale if self.repeats > 1: queries = queries.reshape( B, self.n_kv_heads, self.repeats, L, self.head_dim ) keys = mx.expand_dims(keys, 2) values = mx.expand_dims(values, 2) scores = queries @ keys.swapaxes(-1, -2) scores = mx.tanh(scores / self.attn_logit_softcapping) scores *= self.attn_logit_softcapping if mask is not None: if mask.dtype == mx.bool_: scores = mx.where( mask, scores, mx.array(mx.finfo(scores.dtype).min, scores.dtype) ) else: scores = scores + mask scores = mx.softmax(scores, precise=True, axis=-1) output = scores @ values if self.repeats > 1: output = output.reshape(B, self.n_heads, L, self.head_dim) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class MLP(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.gate_proj = nn.Linear(dim, hidden_dim, bias=False) self.down_proj = nn.Linear(hidden_dim, dim, bias=False) self.up_proj = nn.Linear(dim, hidden_dim, bias=False) def __call__(self, x) -> mx.array: return self.down_proj(nn.gelu_approx(self.gate_proj(x)) * self.up_proj(x)) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.num_attention_heads = args.num_attention_heads self.hidden_size = args.hidden_size self.self_attn = Attention(args) self.mlp = MLP(args.hidden_size, args.intermediate_size) self.input_layernorm = RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.pre_feedforward_layernorm = RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) self.post_feedforward_layernorm = RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) self.post_attention_layernorm = RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.args = args def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.self_attn(self.input_layernorm(x), mask, cache) h = x + self.post_attention_layernorm(r) r = self.mlp(self.pre_feedforward_layernorm(h)) out = h + self.post_feedforward_layernorm(r) return out class GemmaModel(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_hidden_layers = args.num_hidden_layers assert self.vocab_size > 0 self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ TransformerBlock(args=args) for _ in range(args.num_hidden_layers) ] self.norm = RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): h = self.embed_tokens(inputs) h = h * (self.args.hidden_size**0.5) if mask is None: mask = create_attention_mask(h, cache, return_array=True) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.model_type = args.model_type self.final_logit_softcapping = args.final_logit_softcapping self.model = GemmaModel(args) self.args = args def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): out = self.model(inputs, mask, cache) out = self.model.embed_tokens.as_linear(out) out = mx.tanh(out / self.final_logit_softcapping) out = out * self.final_logit_softcapping return out @property def layers(self): return self.model.layers
// Copyright © 2024 Apple Inc. import Foundation import MLX import MLXLMCommon import MLXNN import Tokenizers // Port of https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/models/gemma2.py private class Attention: Module { let args: Gemma2Configuration let scale: Float let logitSoftCap: Float let headDim: Int let nHeads: Int let nKVHeads: Int let repeats: Int @ModuleInfo(key: "q_proj") var wq: Linear @ModuleInfo(key: "k_proj") var wk: Linear @ModuleInfo(key: "v_proj") var wv: Linear @ModuleInfo(key: "o_proj") var wo: Linear let rope: RoPE public init(_ args: Gemma2Configuration) { self.args = args let dim = args.hiddenSize self.nHeads = args.attentionHeads self.nKVHeads = args.kvHeads self.repeats = args.attentionHeads / args.kvHeads self.headDim = args.headDimensions self.scale = 1.0 / pow(Float(args.queryPreAttnScalar), 0.5) self._wq.wrappedValue = Linear(dim, nHeads * headDim, bias: false) self._wk.wrappedValue = Linear(dim, nKVHeads * headDim, bias: false) self._wv.wrappedValue = Linear(dim, nKVHeads * headDim, bias: false) self._wo.wrappedValue = Linear(nHeads * headDim, dim, bias: false) self.logitSoftCap = args.attnLogitSoftcapping self.rope = RoPE( dimensions: headDim, traditional: args.ropeTraditional, base: args.ropeTheta) } public func callAsFunction( _ x: MLXArray, mask: MLXArray?, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var queries = wq(x) var keys = wk(x) var values = wv(x) queries = queries.reshaped(B, L, nHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, nKVHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, nKVHeads, -1).transposed(0, 2, 1, 3) if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) (keys, values) = cache.update(keys: keys, values: values) } else { queries = rope(queries) keys = rope(keys) } queries = queries * self.scale if repeats > 1 { queries = queries.reshaped([B, nKVHeads, repeats, L, headDim]) keys = expandedDimensions(keys, axes: [2]) values = expandedDimensions(values, axes: [2]) } var scores = matmul(queries, keys.swappedAxes(-1, -2)) scores = tanh(scores / logitSoftCap) * logitSoftCap if let mask { scores = scores + mask } scores = softmax(scores, axis: -1, precise: true) var output = matmul(scores, values) if repeats > 1 { output = output.reshaped([B, nHeads, L, headDim]) } output = output.transposed(0, 2, 1, 3).reshaped(B, L, -1) return wo(output) } } private class MLP: Module, UnaryLayer { @ModuleInfo(key: "gate_proj") var gate: Linear @ModuleInfo(key: "down_proj") var down: Linear @ModuleInfo(key: "up_proj") var up: Linear public init(dimensions: Int, hiddenDimensions: Int) { self._gate.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) self._down.wrappedValue = Linear(hiddenDimensions, dimensions, bias: false) self._up.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) } public func callAsFunction(_ x: MLXArray) -> MLXArray { down(gelu(gate(x)) * up(x)) } } // Minimal changes from Gemma TransformerBlock private class TransformerBlock: Module { @ModuleInfo(key: "self_attn") var attention: Attention let mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: Gemma.RMSNorm @ModuleInfo(key: "pre_feedforward_layernorm") var preFeedforwardLayerNorm: Gemma.RMSNorm @ModuleInfo(key: "post_feedforward_layernorm") var postFeedforwardLayerNorm: Gemma.RMSNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: Gemma.RMSNorm public init(_ args: Gemma2Configuration) { self._attention.wrappedValue = Attention(args) self.mlp = MLP(dimensions: args.hiddenSize, hiddenDimensions: args.intermediateSize) self._inputLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) self._preFeedforwardLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) self._postFeedforwardLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) self._postAttentionLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) } public func callAsFunction( _ x: MLXArray, mask: MLXArray?, cache: KVCache? ) -> MLXArray { var r = attention(inputLayerNorm(x), mask: mask, cache: cache) let h = x + postAttentionLayerNorm(r) r = mlp(preFeedforwardLayerNorm(h)) let out = h + postFeedforwardLayerNorm(r) return out } } // Uses Gemma2TransformerBlock, otherwise same as GemmaModelInner private class ModelInner: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding fileprivate let layers: [TransformerBlock] fileprivate let norm: Gemma.RMSNorm let hiddenScale: Float public init(_ args: Gemma2Configuration) { precondition(args.vocabularySize > 0) self._embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.hiddenScale = pow(Float(args.hiddenSize), 0.5) self.layers = (0 ..< args.hiddenLayers) .map { _ in TransformerBlock(args) } self.norm = Gemma.RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = embedTokens(inputs) h = h * hiddenScale let mask: MLXArray? = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } // Uses Gemma2ModelInner, otherwise same as GemmaModel public class Gemma2Model: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] private let model: ModelInner let logitSoftCap: Float public init(_ args: Gemma2Configuration) { self.vocabularySize = args.vocabularySize self.kvHeads = Array(repeating: args.kvHeads, count: args.hiddenLayers) self.model = ModelInner(args) self.logitSoftCap = args.finalLogitSoftcapping } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { var out = model(inputs, cache: cache) out = model.embedTokens.asLinear(out) out = tanh(out / logitSoftCap) * logitSoftCap return out } public func messageGenerator(tokenizer: any Tokenizer) -> any MessageGenerator { NoSystemMessageGenerator() } } public struct Gemma2Configuration: Codable { var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var headDimensions: Int var rmsNormEps: Float var vocabularySize: Int var kvHeads: Int var ropeTheta: Float = 10_000 var ropeTraditional: Bool = false var attnLogitSoftcapping: Float = 50.0 var finalLogitSoftcapping: Float = 30.0 var queryPreAttnScalar: Float = 144.0 enum CodingKeys: String, CodingKey { case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case headDimensions = "head_dim" case rmsNormEps = "rms_norm_eps" case vocabularySize = "vocab_size" case kvHeads = "num_key_value_heads" case ropeTheta = "rope_theta" case ropeTraditional = "rope_traditional" case attnLogitSoftcapping = "attn_logit_softcapping" case finalLogitSoftcapping = "final_logit_softcapping" case queryPreAttnScalar = "query_pre_attn_scalar" } public init(from decoder: Swift.Decoder) throws { // Custom implementation to handle optional keys with required values let container: KeyedDecodingContainer<CodingKeys> = try decoder.container( keyedBy: CodingKeys.self) self.hiddenSize = try container.decode( Int.self, forKey: CodingKeys.hiddenSize) self.hiddenLayers = try container.decode( Int.self, forKey: CodingKeys.hiddenLayers) self.intermediateSize = try container.decode( Int.self, forKey: CodingKeys.intermediateSize) self.attentionHeads = try container.decode( Int.self, forKey: CodingKeys.attentionHeads) self.headDimensions = try container.decode( Int.self, forKey: CodingKeys.headDimensions) self.rmsNormEps = try container.decode( Float.self, forKey: CodingKeys.rmsNormEps) self.vocabularySize = try container.decode( Int.self, forKey: CodingKeys.vocabularySize) self.kvHeads = try container.decode(Int.self, forKey: CodingKeys.kvHeads) self.ropeTheta = try container.decodeIfPresent(Float.self, forKey: CodingKeys.ropeTheta) ?? 10_000 self.ropeTraditional = try container.decodeIfPresent( Bool.self, forKey: CodingKeys.ropeTraditional) ?? false self.attnLogitSoftcapping = try container.decode( Float.self, forKey: CodingKeys.attnLogitSoftcapping) self.finalLogitSoftcapping = try container.decode( Float.self, forKey: CodingKeys.finalLogitSoftcapping) self.queryPreAttnScalar = try container.decode( Float.self, forKey: CodingKeys.queryPreAttnScalar) } } // MARK: - LoRA extension Gemma2Model: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.attention, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
Gemma3Text
# Copyright © 2025 Apple Inc. from dataclasses import dataclass from functools import partial from typing import Any, Optional import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention from .cache import KVCache, RotatingKVCache @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int = 1152 num_hidden_layers: int = 26 intermediate_size: int = 6912 num_attention_heads: int = 4 head_dim: int = 256 rms_norm_eps: float = 1.0e-6 vocab_size: int = 262144 num_key_value_heads: int = 1 rope_global_base_freq: float = 1_000_000.0 rope_local_base_freq: float = 10_000.0 rope_traditional: bool = False query_pre_attn_scalar: float = 256 sliding_window: int = 512 sliding_window_pattern: int = 6 class Attention(nn.Module): def __init__(self, args: ModelArgs, layer_idx: int): super().__init__() dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads self.n_kv_heads = n_kv_heads = args.num_key_value_heads self.repeats = n_heads // n_kv_heads self.head_dim = head_dim = args.head_dim self.layer_idx = layer_idx self.scale = args.query_pre_attn_scalar**-0.5 self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=False) self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=False) self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=False) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=False) self.q_norm = RMSNorm(dims=head_dim, eps=args.rms_norm_eps) self.k_norm = RMSNorm(dims=head_dim, eps=args.rms_norm_eps) self.is_sliding = (layer_idx + 1) % args.sliding_window_pattern != 0 self.rope = nn.RoPE( head_dim, traditional=args.rope_traditional, base=( args.rope_local_base_freq if self.is_sliding else args.rope_global_base_freq ), ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, _ = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) queries = self.q_norm(queries) keys = self.k_norm(keys) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) # Sliding window if isinstance(mask, mx.array) and mask.shape[-1] != keys.shape[-2]: mask = mask[..., -keys.shape[-2] :] output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class RMSNorm(nn.Module): def __init__(self, dims: int, eps: float = 1e-5): super().__init__() self.weight = mx.ones((dims,)) self.eps = eps def __call__(self, x): return mx.fast.rms_norm(x, 1.0 + self.weight, self.eps) class MLP(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.gate_proj = nn.Linear(dim, hidden_dim, bias=False) self.down_proj = nn.Linear(hidden_dim, dim, bias=False) self.up_proj = nn.Linear(dim, hidden_dim, bias=False) def __call__(self, x) -> mx.array: return self.down_proj(nn.gelu_approx(self.gate_proj(x)) * self.up_proj(x)) @partial(mx.compile, shapeless=True) def clip_residual(x, y): if x.dtype != mx.float16: return x + y bound = mx.finfo(mx.float16).max return mx.clip(x.astype(mx.float32) + y.astype(mx.float32), -bound, bound).astype( mx.float16 ) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs, layer_idx: int): super().__init__() self.num_attention_heads = args.num_attention_heads self.hidden_size = args.hidden_size self.self_attn = Attention(args, layer_idx) self.mlp = MLP(args.hidden_size, args.intermediate_size) self.input_layernorm = RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.post_attention_layernorm = RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.pre_feedforward_layernorm = RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) self.post_feedforward_layernorm = RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.self_attn(self.input_layernorm(x), mask, cache) h = clip_residual(x, self.post_attention_layernorm(r)) r = self.mlp(self.pre_feedforward_layernorm(h)) out = clip_residual(h, self.post_feedforward_layernorm(r)) return out class Gemma3Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_hidden_layers = args.num_hidden_layers assert self.vocab_size > 0 self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ TransformerBlock(args=args, layer_idx=layer_idx) for layer_idx in range(args.num_hidden_layers) ] self.norm = RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, input_embeddings: Optional[mx.array] = None, ): if input_embeddings is not None: h = input_embeddings else: h = self.embed_tokens(inputs) h *= mx.array(self.args.hidden_size**0.5, mx.bfloat16).astype(h.dtype) if cache is None: cache = [None] * len(self.layers) if mask is None: j = self.args.sliding_window_pattern full_mask = create_attention_mask(h, cache[j - 1 : j]) sliding_window_mask = create_attention_mask(h, cache) for i, (layer, c) in enumerate(zip(self.layers, cache)): is_global = ( i % self.args.sliding_window_pattern == self.args.sliding_window_pattern - 1 ) local_mask = mask if mask is None and is_global: local_mask = full_mask elif mask is None: local_mask = sliding_window_mask h = layer(h, local_mask, c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.model_type = args.model_type self.model = Gemma3Model(args) self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False) def __call__( self, inputs: mx.array, cache=None, mask: Optional[mx.array] = None, input_embeddings: Optional[mx.array] = None, ): out = self.model(inputs, mask, cache, input_embeddings) out = self.lm_head(out) return out def sanitize(self, weights): weights = dict(weights) if "lm_head.weight" not in weights: weights["lm_head.weight"] = weights["model.embed_tokens.weight"] return weights @property def layers(self): return self.model.layers def make_cache(self): caches = [] for i in range(self.args.num_hidden_layers): if ( i % self.args.sliding_window_pattern == self.args.sliding_window_pattern - 1 ): caches.append(KVCache()) else: caches.append( RotatingKVCache(max_size=self.args.sliding_window, keep=0) ) return caches
// // Gemma3Text.swift // mlx-swift-examples // // Created by Anthony DePasquale on 14.03.2025. // // Based on https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/models/gemma3_text.py import Foundation import MLX import MLXFast import MLXLLM import MLXLMCommon import MLXNN public struct Gemma3TextConfiguration: Codable { let modelType: String let hiddenSize: Int let hiddenLayers: Int let intermediateSize: Int let attentionHeads: Int let headDim: Int let rmsNormEps: Float let vocabularySize: Int let kvHeads: Int let ropeGlobalBaseFreq: Float let ropeLocalBaseFreq: Float let ropeTraditional: Bool let queryPreAttnScalar: Float let slidingWindow: Int let slidingWindowPattern: Int enum CodingKeys: String, CodingKey { case modelType = "model_type" case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case headDim = "head_dim" case rmsNormEps = "rms_norm_eps" case vocabularySize = "vocab_size" case kvHeads = "num_key_value_heads" case ropeGlobalBaseFreq = "rope_global_base_freq" case ropeLocalBaseFreq = "rope_local_base_freq" case ropeTraditional = "rope_traditional" case queryPreAttnScalar = "query_pre_attn_scalar" case slidingWindow = "sliding_window" case slidingWindowPattern = "sliding_window_pattern" } enum VLMCodingKeys: String, CodingKey { case textConfig = "text_config" } public init(from decoder: Decoder) throws { let nestedContainer = try decoder.container(keyedBy: VLMCodingKeys.self) // in the case of VLM models convertered using mlx_lm.convert // the configuration will still match the VLMs and be under text_config let container = if nestedContainer.contains(.textConfig) { try nestedContainer.nestedContainer(keyedBy: CodingKeys.self, forKey: .textConfig) } else { try decoder.container(keyedBy: CodingKeys.self) } modelType = try container.decode(String.self, forKey: .modelType) hiddenSize = try container.decode(Int.self, forKey: .hiddenSize) hiddenLayers = try container.decode(Int.self, forKey: .hiddenLayers) intermediateSize = try container.decode(Int.self, forKey: .intermediateSize) attentionHeads = try container.decodeIfPresent(Int.self, forKey: .attentionHeads) ?? 4 headDim = try container.decodeIfPresent(Int.self, forKey: .headDim) ?? 256 rmsNormEps = try container.decodeIfPresent(Float.self, forKey: .rmsNormEps) ?? 1.0e-6 vocabularySize = try container.decodeIfPresent(Int.self, forKey: .vocabularySize) ?? 262144 kvHeads = try container.decodeIfPresent(Int.self, forKey: .kvHeads) ?? 1 ropeGlobalBaseFreq = try container.decodeIfPresent(Float.self, forKey: .ropeGlobalBaseFreq) ?? 1_000_000.0 ropeLocalBaseFreq = try container.decodeIfPresent(Float.self, forKey: .ropeLocalBaseFreq) ?? 10_000.0 ropeTraditional = try container.decodeIfPresent(Bool.self, forKey: .ropeTraditional) ?? false queryPreAttnScalar = try container.decodeIfPresent(Float.self, forKey: .queryPreAttnScalar) ?? 256 slidingWindow = try container.decodeIfPresent(Int.self, forKey: .slidingWindow) ?? 512 slidingWindowPattern = try container.decodeIfPresent(Int.self, forKey: .slidingWindowPattern) ?? 6 } } private class Attention: Module { let nHeads: Int let nKVHeads: Int let repeats: Int let headDim: Int let layerIdx: Int let scale: Float let isSliding: Bool let slidingWindow: Int let slidingWindowPattern: Int @ModuleInfo(key: "q_proj") var queryProj: Linear @ModuleInfo(key: "k_proj") var keyProj: Linear @ModuleInfo(key: "v_proj") var valueProj: Linear @ModuleInfo(key: "o_proj") var outputProj: Linear @ModuleInfo(key: "q_norm") var queryNorm: Gemma.RMSNorm @ModuleInfo(key: "k_norm") var keyNorm: Gemma.RMSNorm @ModuleInfo var rope: RoPE init(_ config: Gemma3TextConfiguration, layerIdx: Int) { let dim = config.hiddenSize self.nHeads = config.attentionHeads self.nKVHeads = config.kvHeads self.repeats = nHeads / nKVHeads self.headDim = config.headDim self.layerIdx = layerIdx self.slidingWindow = config.slidingWindow self.slidingWindowPattern = config.slidingWindowPattern self.scale = pow(config.queryPreAttnScalar, -0.5) self._queryProj.wrappedValue = Linear(dim, nHeads * headDim, bias: false) self._keyProj.wrappedValue = Linear(dim, nKVHeads * headDim, bias: false) self._valueProj.wrappedValue = Linear(dim, nKVHeads * headDim, bias: false) self._outputProj.wrappedValue = Linear(nHeads * headDim, dim, bias: false) self._queryNorm.wrappedValue = Gemma.RMSNorm( dimensions: headDim, eps: config.rmsNormEps) self._keyNorm.wrappedValue = Gemma.RMSNorm(dimensions: headDim, eps: config.rmsNormEps) self.isSliding = (layerIdx + 1) % config.slidingWindowPattern != 0 let baseFreq = isSliding ? config.ropeLocalBaseFreq : config.ropeGlobalBaseFreq self._rope.wrappedValue = RoPE( dimensions: headDim, traditional: config.ropeTraditional, base: baseFreq ) super.init() } func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? = nil ) -> MLXArray { let (B, L, _) = (x.dim(0), x.dim(1), x.dim(2)) var queries = queryProj(x) var keys = keyProj(x) var values = valueProj(x) queries = queries.reshaped(B, L, nHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, nKVHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, nKVHeads, -1).transposed(0, 2, 1, 3) queries = queryNorm(queries) keys = keyNorm(keys) if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } // Sliding window masking var finalMask = mask if case .array(let maskArray) = mask { let keySeqLen = keys.shape[2] if maskArray.shape.last! != keySeqLen { let slicedMask = maskArray[.ellipsis, (-keySeqLen)...] finalMask = .array(slicedMask) } } let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: finalMask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return outputProj(output) } } private class MLP: Module { @ModuleInfo(key: "gate_proj") var gateProj: Linear @ModuleInfo(key: "down_proj") var downProj: Linear @ModuleInfo(key: "up_proj") var upProj: Linear init(dimensions: Int, hiddenDimensions: Int) { self._gateProj.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) self._downProj.wrappedValue = Linear(hiddenDimensions, dimensions, bias: false) self._upProj.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) super.init() } func callAsFunction(_ x: MLXArray) -> MLXArray { return downProj(geluApproximate(gateProj(x)) * upProj(x)) } } private class TransformerBlock: Module { @ModuleInfo(key: "self_attn") var selfAttention: Attention @ModuleInfo var mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: Gemma.RMSNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: Gemma.RMSNorm @ModuleInfo(key: "pre_feedforward_layernorm") var preFeedforwardLayerNorm: Gemma.RMSNorm @ModuleInfo(key: "post_feedforward_layernorm") var postFeedforwardLayerNorm: Gemma.RMSNorm let numAttentionHeads: Int let hiddenSize: Int let layerIdx: Int init(_ config: Gemma3TextConfiguration, layerIdx: Int) { self.numAttentionHeads = config.attentionHeads self.hiddenSize = config.hiddenSize self.layerIdx = layerIdx self._selfAttention.wrappedValue = Attention(config, layerIdx: layerIdx) self.mlp = MLP(dimensions: config.hiddenSize, hiddenDimensions: config.intermediateSize) self._inputLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: config.hiddenSize, eps: config.rmsNormEps) self._postAttentionLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: config.hiddenSize, eps: config.rmsNormEps) self._preFeedforwardLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: config.hiddenSize, eps: config.rmsNormEps) self._postFeedforwardLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: config.hiddenSize, eps: config.rmsNormEps) super.init() } func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? = nil ) -> MLXArray { let inputNorm = inputLayerNorm(x) let r = selfAttention(inputNorm, mask: mask, cache: cache) let attnNorm = postAttentionLayerNorm(r) let h = Gemma.clipResidual(x, attnNorm) let preMLPNorm = preFeedforwardLayerNorm(h) let r2 = mlp(preMLPNorm) let postMLPNorm = postFeedforwardLayerNorm(r2) let out = Gemma.clipResidual(h, postMLPNorm) return out } } private class Gemma3Model: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding @ModuleInfo var layers: [TransformerBlock] @ModuleInfo var norm: Gemma.RMSNorm let config: Gemma3TextConfiguration init(_ config: Gemma3TextConfiguration) { self.config = config self._embedTokens.wrappedValue = Embedding( embeddingCount: config.vocabularySize, dimensions: config.hiddenSize ) self._layers.wrappedValue = (0 ..< config.hiddenLayers).map { layerIdx in TransformerBlock(config, layerIdx: layerIdx) } self.norm = Gemma.RMSNorm(dimensions: config.hiddenSize, eps: config.rmsNormEps) super.init() } func callAsFunction( _ inputs: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode? = nil, cache: [KVCache?]? = nil ) -> MLXArray { var h: MLXArray h = embedTokens(inputs) let scale = MLXArray(sqrt(Float(config.hiddenSize)), dtype: .bfloat16) h = h * scale.asType(h.dtype) var layerCache = cache if layerCache == nil { layerCache = Array(repeating: nil as KVCache?, count: layers.count) } // Create attention masks var fullMask: MLXFast.ScaledDotProductAttentionMaskMode = .none var slidingWindowMask: MLXFast.ScaledDotProductAttentionMaskMode = .none if mask == nil { let j = config.slidingWindowPattern let globalLayerCache: [KVCache] if j > 0 && j <= (layerCache?.count ?? 0), let globalCache = layerCache?[j - 1] { globalLayerCache = [globalCache] } else { globalLayerCache = [] } fullMask = createAttentionMask(h: h, cache: globalLayerCache) let allCaches = layerCache?.compactMap { $0 } ?? [] slidingWindowMask = createAttentionMask(h: h, cache: allCaches) } for (i, layer) in layers.enumerated() { let isGlobal = (i % config.slidingWindowPattern == config.slidingWindowPattern - 1) let localMask: MLXFast.ScaledDotProductAttentionMaskMode if let mask { localMask = mask } else if isGlobal { localMask = fullMask } else { localMask = slidingWindowMask } h = layer(h, mask: localMask, cache: layerCache?[i]) } return norm(h) } } public class Gemma3TextModel: Module, LLMModel { @ModuleInfo private var model: Gemma3Model @ModuleInfo(key: "lm_head") var lmHead: Linear public let config: Gemma3TextConfiguration public var vocabularySize: Int { config.vocabularySize } public init(_ config: Gemma3TextConfiguration) { self.config = config self.model = Gemma3Model(config) self._lmHead.wrappedValue = Linear(config.hiddenSize, config.vocabularySize, bias: false) super.init() } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var out = model(inputs, mask: nil, cache: cache) out = lmHead(out) return out } public func sanitize(weights: [String: MLXArray]) -> [String: MLXArray] { var processedWeights = weights // VLM models converted using mlx_vlm.convert will still have // the weights are under a language_model key let unflattened = ModuleParameters.unflattened(weights) if let lm = unflattened["language_model"] { processedWeights = Dictionary(uniqueKeysWithValues: lm.flattened()) } if processedWeights["lm_head.weight"] == nil { if let embedWeight = processedWeights["model.embed_tokens.weight"] { processedWeights["lm_head.weight"] = embedWeight } } return processedWeights } public func newCache(parameters: GenerateParameters? = nil) -> [KVCache] { var caches = [KVCache]() let slidingWindow = config.slidingWindow let slidingWindowPattern = config.slidingWindowPattern for i in 0 ..< config.hiddenLayers { let isGlobalLayer = (i % slidingWindowPattern == slidingWindowPattern - 1) if isGlobalLayer { // For global layers, use standard cache but with reasonable step size for long sequences let cache = StandardKVCache() cache.step = 1024 // Larger step size for efficiency with long sequences caches.append(cache) } else { // For sliding window layers, use rotating cache caches.append( RotatingKVCache(maxSize: slidingWindow, keep: 0) ) } } return caches } /// Handles prompt processing for sequences public func prepare( _ input: LMInput, cache: [KVCache], windowSize: Int? = nil ) throws -> PrepareResult { let promptTokens = input.text.tokens let promptCount = promptTokens.shape[0] guard promptCount > 0 else { print("Warning: Preparing with empty prompt tokens.") let emptyToken = MLXArray(Int32(0))[0 ..< 0] return .tokens(.init(tokens: emptyToken)) } return .tokens(input.text) } } extension Gemma3TextModel: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.selfAttention, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
Granite
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from typing import Any, Dict, Optional, Union import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention from .rope_utils import initialize_rope @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int rms_norm_eps: float vocab_size: int logits_scaling: float attention_multiplier: float embedding_multiplier: float residual_multiplier: float max_position_embeddings: int num_key_value_heads: int attention_bias: bool mlp_bias: bool rope_theta: float rope_scaling: Optional[Dict[str, Union[float, str]]] = None tie_word_embeddings: bool = True class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads self.n_kv_heads = n_kv_heads = args.num_key_value_heads self.head_dim = head_dim = args.hidden_size // n_heads self.scale = args.attention_multiplier attention_bias = args.attention_bias self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=attention_bias) self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=attention_bias) self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=attention_bias) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=attention_bias) self.rope = initialize_rope( self.head_dim, args.rope_theta, False, args.rope_scaling, args.max_position_embeddings, ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) # Prepare the queries, keys and values for the attention computation queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class MLP(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size hidden_dim = args.intermediate_size if hasattr(args, "mlp_bias"): mlp_bias = args.mlp_bias else: mlp_bias = False self.gate_proj = nn.Linear(dim, hidden_dim, bias=mlp_bias) self.down_proj = nn.Linear(hidden_dim, dim, bias=mlp_bias) self.up_proj = nn.Linear(dim, hidden_dim, bias=mlp_bias) def __call__(self, x) -> mx.array: return self.down_proj(nn.silu(self.gate_proj(x)) * self.up_proj(x)) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.num_attention_heads = args.num_attention_heads self.hidden_size = args.hidden_size self.self_attn = Attention(args) self.mlp = MLP(args) self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.post_attention_layernorm = nn.RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) self.residual_multiplier = args.residual_multiplier def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.self_attn(self.input_layernorm(x), mask, cache) h = x + r * self.residual_multiplier r = self.mlp(self.post_attention_layernorm(h)) out = h + r * self.residual_multiplier return out class GraniteModel(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_hidden_layers = args.num_hidden_layers assert self.vocab_size > 0 self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ TransformerBlock(args=args) for _ in range(args.num_hidden_layers) ] self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.embedding_multiplier = args.embedding_multiplier def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): h = self.embed_tokens(inputs) * self.embedding_multiplier if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, cache=c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.model_type = args.model_type self.model = GraniteModel(args) if not args.tie_word_embeddings: self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False) self.logits_scaling = args.logits_scaling def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): out = self.model(inputs, mask, cache) if self.args.tie_word_embeddings: out = self.model.embed_tokens.as_linear(out) else: out = self.lm_head(out) return out / self.logits_scaling @property def layers(self): return self.model.layers
// // Granite.swift // mlx-swift-examples // // Created by Sachin Desai on 4/25/25. // // Port of https://github.com/ml-explore/mlx-lm/blob/main/mlx_lm/models/granite.py import Foundation import MLX import MLXLMCommon import MLXNN private class Attention: Module { let args: GraniteConfiguration let scale: Float @ModuleInfo(key: "q_proj") var wq: Linear @ModuleInfo(key: "k_proj") var wk: Linear @ModuleInfo(key: "v_proj") var wv: Linear @ModuleInfo(key: "o_proj") var wo: Linear let rope: RoPE public init(_ args: GraniteConfiguration) { self.args = args let dim = args.hiddenSize let nHeads = args.attentionHeads let nKvHeads = args.kvHeads let headDim = dim / nHeads self.scale = args.attentionMultiplier let attentionBias = args.attentionBias self._wq.wrappedValue = Linear(dim, nHeads * headDim, bias: attentionBias) self._wk.wrappedValue = Linear(dim, nKvHeads * headDim, bias: attentionBias) self._wv.wrappedValue = Linear(dim, nKvHeads * headDim, bias: attentionBias) self._wo.wrappedValue = Linear(nHeads * headDim, dim, bias: attentionBias) let ropeScale: Float if let ropeScaling = args.ropeScaling, ropeScaling["type"] == .string("linear"), let factor = ropeScaling["factor"] { if let v = factor.asFloat() { ropeScale = 1 / v } else { fatalError("ropeScaling.factor must be a float") } } else { ropeScale = 1 } rope = RoPE(dimensions: headDim, traditional: false, base: args.ropeTheta, scale: ropeScale) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var queries = wq(x) var keys = wk(x) var values = wv(x) // prepare the queries, keys and values for the attention computation queries = queries.reshaped(B, L, args.attentionHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return wo(output) } } private class MLP: Module, UnaryLayer { @ModuleInfo(key: "gate_proj") var gate: Linear @ModuleInfo(key: "down_proj") var down: Linear @ModuleInfo(key: "up_proj") var up: Linear public init(_ args: GraniteConfiguration) { let dim = args.hiddenSize let hiddenDim = args.intermediateSize let mlpBias = args.mlpBias self._gate.wrappedValue = Linear(dim, hiddenDim, bias: mlpBias) self._down.wrappedValue = Linear(hiddenDim, dim, bias: mlpBias) self._up.wrappedValue = Linear(dim, hiddenDim, bias: mlpBias) } public func callAsFunction(_ x: MLXArray) -> MLXArray { down(silu(gate(x)) * up(x)) } } private class TransformerBlock: Module { @ModuleInfo(key: "self_attn") var attention: Attention let mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: RMSNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: RMSNorm let residualMultiplier: Float public init(_ args: GraniteConfiguration) { let attentionHeads = args.attentionHeads let hiddenSize = args.hiddenSize self._attention.wrappedValue = Attention(args) self.mlp = MLP(args) self._inputLayerNorm.wrappedValue = RMSNorm( dimensions: hiddenSize, eps: args.rmsNormEps) self._postAttentionLayerNorm.wrappedValue = RMSNorm( dimensions: hiddenSize, eps: args.rmsNormEps) self.residualMultiplier = args.residualMultiplier } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var r = attention(inputLayerNorm(x), mask: mask, cache: cache) let h = x + r * residualMultiplier r = mlp(postAttentionLayerNorm(h)) let out = h + r * residualMultiplier return out } } private class GraniteModelInner: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding fileprivate let layers: [TransformerBlock] let norm: RMSNorm let embeddingMultiplier: Float public init(_ args: GraniteConfiguration) { precondition(args.vocabularySize > 0) self._embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers) .map { _ in TransformerBlock(args) } self.norm = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) self.embeddingMultiplier = args.embeddingMultiplier } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = embedTokens(inputs) * embeddingMultiplier let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } public class GraniteModel: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] let logitsScaling: Float private let model: GraniteModelInner let configuration: GraniteConfiguration @ModuleInfo(key: "lm_head") var lmHead: Linear? public init(_ args: GraniteConfiguration) { self.configuration = args self.vocabularySize = args.vocabularySize self.kvHeads = (0 ..< args.hiddenLayers).map { _ in args.kvHeads } self.model = GraniteModelInner(args) if !args.tieWordEmbeddings { self._lmHead.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: false) } self.logitsScaling = args.logitsScaling } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { var out = model(inputs, cache: cache) if let lmHead { out = lmHead(out) } else { out = model.embedTokens.asLinear(out) } return out / logitsScaling } } public struct GraniteConfiguration: Codable, Sendable { var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var rmsNormEps: Float var vocabularySize: Int var logitsScaling: Float var attentionMultiplier: Float var embeddingMultiplier: Float var residualMultiplier: Float var maxPositionEmbeddings: Int var kvHeads: Int var attentionBias: Bool var mlpBias: Bool var ropeTheta: Float var ropeTraditional: Bool = false var ropeScaling: [String: StringOrNumber]? = nil var tieWordEmbeddings: Bool = true enum CodingKeys: String, CodingKey { case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case rmsNormEps = "rms_norm_eps" case vocabularySize = "vocab_size" case logitsScaling = "logits_scaling" case attentionMultiplier = "attention_multiplier" case embeddingMultiplier = "embedding_multiplier" case residualMultiplier = "residual_multiplier" case maxPositionEmbeddings = "max_position_embeddings" case kvHeads = "num_key_value_heads" case attentionBias = "attention_bias" case mlpBias = "mlp_bias" case ropeTheta = "rope_theta" case ropeScaling = "rope_scaling" case tieWordEmbeddings = "tie_word_embeddings" } public init(from decoder: Decoder) throws { let container: KeyedDecodingContainer<GraniteConfiguration.CodingKeys> = try decoder.container(keyedBy: GraniteConfiguration.CodingKeys.self) self.hiddenSize = try container.decode(Int.self, forKey: .hiddenSize) self.hiddenLayers = try container.decode(Int.self, forKey: .hiddenLayers) self.intermediateSize = try container.decode(Int.self, forKey: .intermediateSize) self.attentionHeads = try container.decode(Int.self, forKey: .attentionHeads) self.rmsNormEps = try container.decode(Float.self, forKey: .rmsNormEps) self.vocabularySize = try container.decode(Int.self, forKey: .vocabularySize) self.logitsScaling = try container.decode(Float.self, forKey: .logitsScaling) self.attentionMultiplier = try container.decode(Float.self, forKey: .attentionMultiplier) self.embeddingMultiplier = try container.decode(Float.self, forKey: .embeddingMultiplier) self.residualMultiplier = try container.decode(Float.self, forKey: .residualMultiplier) self.maxPositionEmbeddings = try container.decode(Int.self, forKey: .maxPositionEmbeddings) self.kvHeads = try container.decode(Int.self, forKey: .kvHeads) self.attentionBias = try container.decode(Bool.self, forKey: .attentionBias) self.mlpBias = try container.decode(Bool.self, forKey: .mlpBias) ?? false self.ropeTheta = try container.decodeIfPresent(Float.self, forKey: .ropeTheta) ?? 10000000.0 self.ropeScaling = try container.decodeIfPresent( [String: StringOrNumber].self, forKey: .ropeScaling) self.tieWordEmbeddings = try container.decode(Bool.self, forKey: .tieWordEmbeddings) } } // MARK: - LoRA extension GraniteModel: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.attention, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
InternLM2
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from typing import Any, Dict, Optional, Union import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int rms_norm_eps: float vocab_size: int bias: bool = True max_position_embeddings: int = 32768 num_key_value_heads: int = None rope_theta: float = 10000 rope_traditional: bool = False rope_scaling: Optional[Dict[str, Union[float, str]]] = None tie_word_embeddings: bool = False def __post_init__(self): if self.num_key_value_heads is None: self.num_key_value_heads = self.num_attention_heads if self.rope_scaling: required_keys = {"factor", "type"} if not all(key in self.rope_scaling for key in required_keys): raise ValueError(f"rope_scaling must contain keys {required_keys}") if self.rope_scaling["type"] not in ["linear", "dynamic"]: raise ValueError( "rope_scaling 'type' currently only supports 'linear' or 'dynamic" ) class DynamicNTKScalingRoPE(nn.Module): """Implements the rotary positional encoding with Dynamic NTK scaling.""" def __init__( self, dims: int, max_position_embeddings: int = 2048, traditional: bool = False, base: float = 10000, scale: float = 1.0, ): super().__init__() self.max_position_embeddings = max_position_embeddings self.original_base = base self.dims = dims self.traditional = traditional self.scale = scale def extra_repr(self): return f"{self.dims}, traditional={self.traditional}, max_position_embeddings={self.max_position_embeddings}, scaling_factor={self.scaling_factor}" def __call__(self, x, offset: int = 0): seq_len = x.shape[1] + offset if seq_len > self.max_position_embeddings: base = self.original_base * ( (self.scale * seq_len / self.max_position_embeddings) - (self.scale - 1) ) ** (self.dims / (self.dims - 2)) else: base = self.original_base return mx.fast.rope( x, self.dims, traditional=self.traditional, base=base, scale=self.scale, offset=offset, ) class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads self.n_kv_heads = n_kv_heads = args.num_key_value_heads self.n_kv_groups = n_heads // args.num_key_value_heads self.head_dim = head_dim = args.hidden_size // n_heads self.scale = head_dim**-0.5 self.wqkv = nn.Linear( dim, (n_heads + 2 * n_kv_heads) * head_dim, bias=args.bias ) self.wo = nn.Linear(n_heads * head_dim, dim, bias=args.bias) rope_scale = ( 1 / args.rope_scaling["factor"] if args.rope_scaling is not None and args.rope_scaling["type"] == "linear" else 2.0 ) self.rope = DynamicNTKScalingRoPE( head_dim, max_position_embeddings=args.max_position_embeddings, traditional=args.rope_traditional, base=args.rope_theta, scale=rope_scale, ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape qkv_states = self.wqkv(x) qkv_states = qkv_states.reshape(B, L, -1, 2 + self.n_kv_groups, self.head_dim) queries = qkv_states[..., : self.n_kv_groups, :] queries = queries.reshape(B, L, -1, self.head_dim) keys = qkv_states[..., -2, :] values = qkv_states[..., -1, :] # Prepare the queries, keys and values for the attention computation queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.wo(output) class MLP(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.w1 = nn.Linear(dim, hidden_dim, bias=False) self.w2 = nn.Linear(hidden_dim, dim, bias=False) self.w3 = nn.Linear(dim, hidden_dim, bias=False) def __call__(self, x) -> mx.array: return self.w2(nn.silu(self.w1(x)) * self.w3(x)) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.attention = Attention(args) self.feed_forward = MLP(args.hidden_size, args.intermediate_size) self.attention_norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.ffn_norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.attention(self.attention_norm(x), mask, cache) h = x + r r = self.feed_forward(self.ffn_norm(h)) out = h + r return out class InternLM2Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() assert args.vocab_size > 0 self.tok_embeddings = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ TransformerBlock(args=args) for _ in range(args.num_hidden_layers) ] self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): h = self.tok_embeddings(inputs) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, cache=c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.model_type = args.model_type self.model = InternLM2Model(args) if not args.tie_word_embeddings: self.output = nn.Linear(args.hidden_size, args.vocab_size, bias=False) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): out = self.model(inputs, mask, cache) if self.args.tie_word_embeddings: out = self.model.tok_embeddings.as_linear(out) else: out = self.output(out) return out def sanitize(self, weights): # Remove unused precomputed rotary freqs return {k: v for k, v in weights.items() if "attention.rope.inv_freq" not in k} @property def layers(self): return self.model.layers
// Copyright © 2024 Apple Inc. import Foundation import MLX import MLXLMCommon import MLXNN // Port of https://github.com/maiqingqiang/mlx-examples/blob/main/llms/mlx_lm/models/internlm2.py private class DynamicNTKScalingRoPE: Module { let dims: Int let maxPositionEmbeddings: Int let traditional: Bool let originalBase: Float var scale: Float init( dims: Int, maxPositionEmbeddings: Int = 2048, traditional: Bool = false, base: Float = 10000, scale: Float = 1.0 ) { self.dims = dims self.maxPositionEmbeddings = maxPositionEmbeddings self.traditional = traditional self.originalBase = base self.scale = scale } func callAsFunction(_ x: MLXArray, offset: Int = 0) -> MLXArray { let seqLen = x.dim(1) + offset var base = originalBase if seqLen > maxPositionEmbeddings { base *= pow( (scale * Float(seqLen) / Float(maxPositionEmbeddings)) - (scale - 1), Float(dims) / Float(dims - 2)) } return MLXFast.RoPE( x, dimensions: dims, traditional: traditional, base: base, scale: scale, offset: offset) } } private class Attention: Module { let args: InternLM2Configuration let scale: Float let heads: Int let kvHeads: Int let kvGroups: Int let headDim: Int @ModuleInfo(key: "wqkv") var wqkv: Linear @ModuleInfo(key: "wo") var wo: Linear let rope: DynamicNTKScalingRoPE init(_ args: InternLM2Configuration) { self.args = args let dim = args.hiddenSize self.heads = args.attentionHeads self.kvHeads = args.kvHeads self.kvGroups = args.kvGroups self.headDim = args.hiddenSize / self.heads self.scale = pow(Float(headDim), -0.5) self._wqkv.wrappedValue = Linear( dim, (self.heads + 2 * self.kvHeads) * self.headDim, bias: args.bias) self._wo.wrappedValue = Linear(self.heads * self.headDim, dim, bias: args.bias) let ropeScale: Float if let ropeScaling = args.ropeScaling, ropeScaling["type"] == .string("linear"), let factor = ropeScaling["factor"] { if let v = factor.asFloat() { ropeScale = 1 / v } else { fatalError("ropeScaling.factor must be a float") } } else { ropeScale = 1 } self.rope = DynamicNTKScalingRoPE( dims: self.headDim, maxPositionEmbeddings: args.maxPositionEmbeddings, traditional: args.ropeTraditional, base: args.ropeTheta, scale: ropeScale ) } func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var qkvStates = wqkv(x) qkvStates = qkvStates.reshaped(B, L, -1, 2 + self.kvGroups, self.headDim) var queries = qkvStates[.ellipsis, ..<self.kvGroups, 0...] queries = queries.reshaped(B, L, -1, self.headDim) var keys = qkvStates[.ellipsis, -2, 0...] var values = qkvStates[.ellipsis, -1, 0...] // prepare the queries, keys and values for the attention computation queries = queries.reshaped(B, L, args.attentionHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return wo(output) } } private class MLP: Module, UnaryLayer { @ModuleInfo(key: "w1") var w1: Linear @ModuleInfo(key: "w2") var w2: Linear @ModuleInfo(key: "w3") var w3: Linear init(dim: Int, hiddenDim: Int) { self._w1.wrappedValue = Linear(dim, hiddenDim, bias: false) self._w2.wrappedValue = Linear(hiddenDim, dim, bias: false) self._w3.wrappedValue = Linear(dim, hiddenDim, bias: false) } func callAsFunction(_ x: MLXArray) -> MLXArray { return w2(silu(w1(x)) * w3(x)) } } private class TransformerBlock: Module { @ModuleInfo(key: "attention") var attention: Attention @ModuleInfo(key: "feed_forward") var feedForward: MLP @ModuleInfo(key: "attention_norm") var attentionNorm: RMSNorm @ModuleInfo(key: "ffn_norm") var ffnNorm: RMSNorm init(_ args: InternLM2Configuration) { self._attention.wrappedValue = Attention(args) self._feedForward.wrappedValue = MLP(dim: args.hiddenSize, hiddenDim: args.intermediateSize) self._attentionNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) self._ffnNorm.wrappedValue = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) } func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var r = attention(attentionNorm(x), mask: mask, cache: cache) let h = x + r r = feedForward(ffnNorm(h)) let out = h + r return out } } private class InternLM2ModelInner: Module { @ModuleInfo(key: "tok_embeddings") var tokEmbeddings: Embedding let layers: [TransformerBlock] let norm: RMSNorm init(_ args: InternLM2Configuration) { precondition(args.vocabularySize > 0) self._tokEmbeddings.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers).map { _ in TransformerBlock(args) } self.norm = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) } func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = tokEmbeddings(inputs) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } public class InternLM2Model: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] fileprivate let model: InternLM2ModelInner @ModuleInfo(key: "output") var output: Linear? public init(_ args: InternLM2Configuration) { self.vocabularySize = args.vocabularySize self.kvHeads = (0 ..< args.hiddenLayers).map { _ in args.kvHeads } self.model = InternLM2ModelInner(args) if !args.tieWordEmbeddings { self._output.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: false) } } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { let out = model(inputs, cache: cache) if let output { return output(out) } else { return model.tokEmbeddings.asLinear(out) } } public func sanitize(weights: [String: MLXArray]) -> [String: MLXArray] { // Remove unused precomputed rotary frequencies weights.filter { !$0.key.contains("attention.rope.inv_freq") } } } extension InternLM2Model: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.attention, ["q_proj", "v_proj"]) } } } public struct InternLM2Configuration: Codable, Sendable { var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var rmsNormEps: Float var vocabularySize: Int var kvHeads: Int var maxPositionEmbeddings: Int = 32768 var ropeTheta: Float = 10000 var ropeTraditional: Bool = false var ropeScaling: [String: StringOrNumber]? var tieWordEmbeddings: Bool = false var bias: Bool = true var kvGroups: Int { attentionHeads / kvHeads } enum CodingKeys: String, CodingKey { case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case rmsNormEps = "rms_norm_eps" case vocabularySize = "vocab_size" case kvHeads = "num_key_value_heads" case maxPositionEmbeddings = "max_position_embeddings" case ropeTheta = "rope_theta" case ropeTraditional = "rope_traditional" case ropeScaling = "rope_scaling" case tieWordEmbeddings = "tie_word_embeddings" case bias = "bias" } public init(from decoder: Decoder) throws { let container = try decoder.container(keyedBy: CodingKeys.self) hiddenSize = try container.decode(Int.self, forKey: .hiddenSize) hiddenLayers = try container.decode(Int.self, forKey: .hiddenLayers) intermediateSize = try container.decode(Int.self, forKey: .intermediateSize) attentionHeads = try container.decode(Int.self, forKey: .attentionHeads) rmsNormEps = try container.decode(Float.self, forKey: .rmsNormEps) vocabularySize = try container.decode(Int.self, forKey: .vocabularySize) kvHeads = try container.decodeIfPresent(Int.self, forKey: .kvHeads) ?? attentionHeads maxPositionEmbeddings = try container.decode(Int.self, forKey: .maxPositionEmbeddings) if let ropeTheta = try container.decodeIfPresent(Float.self, forKey: .ropeTheta) { self.ropeTheta = ropeTheta } if let ropeTraditional = try container.decodeIfPresent(Bool.self, forKey: .ropeTraditional) { self.ropeTraditional = ropeTraditional } ropeScaling = try container.decodeIfPresent( [String: StringOrNumber].self, forKey: .ropeScaling) if let tieWordEmbeddings = try container.decodeIfPresent( Bool.self, forKey: .tieWordEmbeddings) { self.tieWordEmbeddings = tieWordEmbeddings } if let bias = try container.decodeIfPresent(Bool.self, forKey: .bias) { self.bias = bias } if let ropeScaling { let requiredKeys: Set<String> = ["factor", "type"] let keys = Set(ropeScaling.keys) if !requiredKeys.isSubset(of: keys) { throw DecodingError.dataCorruptedError( forKey: .ropeScaling, in: container, debugDescription: "rope_scaling must contain keys \(requiredKeys)" ) } if let type = ropeScaling["type"], type != .string("linear") && type != .string("dynamic") { throw DecodingError.dataCorruptedError( forKey: .ropeScaling, in: container, debugDescription: "rope_scaling 'type' currently only supports 'linear' or 'dynamic'" ) } } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
Llama
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from typing import Any, Dict, Optional, Union import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention from .rope_utils import initialize_rope @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int rms_norm_eps: float vocab_size: int head_dim: Optional[int] = None max_position_embeddings: Optional[int] = None num_key_value_heads: Optional[int] = None attention_bias: bool = False mlp_bias: bool = False rope_theta: float = 10000 rope_traditional: bool = False rope_scaling: Optional[Dict[str, Union[float, str]]] = None tie_word_embeddings: bool = True def __post_init__(self): if self.num_key_value_heads is None: self.num_key_value_heads = self.num_attention_heads class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads self.n_kv_heads = n_kv_heads = args.num_key_value_heads self.head_dim = head_dim = args.head_dim or args.hidden_size // n_heads self.scale = head_dim**-0.5 if hasattr(args, "attention_bias"): attention_bias = args.attention_bias else: attention_bias = False self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=attention_bias) self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=attention_bias) self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=attention_bias) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=attention_bias) self.rope = initialize_rope( self.head_dim, args.rope_theta, args.rope_traditional, args.rope_scaling, args.max_position_embeddings, ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) # Prepare the queries, keys and values for the attention computation queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class MLP(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size hidden_dim = args.intermediate_size if hasattr(args, "mlp_bias"): mlp_bias = args.mlp_bias else: mlp_bias = False self.gate_proj = nn.Linear(dim, hidden_dim, bias=mlp_bias) self.down_proj = nn.Linear(hidden_dim, dim, bias=mlp_bias) self.up_proj = nn.Linear(dim, hidden_dim, bias=mlp_bias) def __call__(self, x) -> mx.array: return self.down_proj(nn.silu(self.gate_proj(x)) * self.up_proj(x)) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.num_attention_heads = args.num_attention_heads self.hidden_size = args.hidden_size self.self_attn = Attention(args) self.mlp = MLP(args) self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.post_attention_layernorm = nn.RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) self.args = args def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.self_attn(self.input_layernorm(x), mask, cache) h = x + r r = self.mlp(self.post_attention_layernorm(h)) out = h + r return out class LlamaModel(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_hidden_layers = args.num_hidden_layers assert self.vocab_size > 0 self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ TransformerBlock(args=args) for _ in range(args.num_hidden_layers) ] self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, input_embeddings: Optional[mx.array] = None, ): if input_embeddings is not None: h = input_embeddings else: h = self.embed_tokens(inputs) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, cache=c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.model_type = args.model_type self.model = LlamaModel(args) if not args.tie_word_embeddings: self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, input_embeddings: Optional[mx.array] = None, ): out = self.model(inputs, mask, cache, input_embeddings) if self.args.tie_word_embeddings: out = self.model.embed_tokens.as_linear(out) else: out = self.lm_head(out) return out def sanitize(self, weights): # Remove unused precomputed rotary freqs weights = { k: v for k, v in weights.items() if "self_attn.rotary_emb.inv_freq" not in k } if self.args.tie_word_embeddings: weights.pop("lm_head.weight", None) return weights @property def layers(self): return self.model.layers
// Copyright © 2024 Apple Inc. import Foundation import MLX import MLXLMCommon import MLXNN import Tokenizers // port of https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/models/llama.py func computeBaseFrequency( base: Float, dims: Int, ropeType: String, ropeScaling: [String: StringOrNumber]? ) -> Float { if ropeType != "llama3" { return base } guard let ropeScaling = ropeScaling else { return base } guard case .float(let factor) = ropeScaling["factor"], case .float(let lowFreqFactor) = ropeScaling["low_freq_factor"] ?? .float(1.0), case .float(let highFreqFactor) = ropeScaling["high_freq_factor"] ?? .float(4.0), case .float(let oldContextLen) = ropeScaling["original_max_position_embeddings"] ?? .float(8192) else { return base } let lowFreqWavelen = oldContextLen / lowFreqFactor let highFreqWavelen = oldContextLen / highFreqFactor let freqs = (0 ..< dims).compactMap { index -> Float? in if index % 2 == 0 { return pow(base, Float(index) / Float(dims)) } return nil } let newBaseFreqs = freqs.map { freq -> Float in let wavelen = 2 * .pi / freq let smooth = max( 0, min(1, (wavelen - highFreqWavelen) / (lowFreqWavelen - highFreqWavelen))) return freq * ((1 - smooth) * factor + smooth) } return newBaseFreqs.reduce(0, +) / Float(newBaseFreqs.count) } private class DynamicNTKScalingRoPE: Module { let dims: Int let maxPositionEmbeddings: Int let traditional: Bool var base: Float? let scale: Float let ropeType: String let ropeScaling: [String: StringOrNumber]? var freqs: MLXArray? init( dims: Int, maxPositionEmbeddings: Int?, traditional: Bool = false, base: Float = 10000, scale: Float = 1.0, ropeType: String = "default", ropeScaling: [String: StringOrNumber]? = nil ) { self.dims = dims self.maxPositionEmbeddings = maxPositionEmbeddings ?? 2048 self.traditional = traditional self.base = base self.scale = scale self.ropeType = ropeType self.ropeScaling = ropeScaling super.init() computeFreqs() } private func computeFreqs() { if ropeType != "llama3" { freqs = nil return } guard let ropeScaling = ropeScaling, case .float(let factor) = ropeScaling["factor"], case .float(let lowFreqFactor) = ropeScaling["low_freq_factor"] ?? .float(1.0), case .float(let highFreqFactor) = ropeScaling["high_freq_factor"] ?? .float(4.0), case .float(let oldContextLen) = ropeScaling["original_max_position_embeddings"] ?? .float(8192), let base else { freqs = nil return } let lowFreqWavelen = oldContextLen / lowFreqFactor let highFreqWavelen = oldContextLen / highFreqFactor let indices = MLXArray(stride(from: 0, to: dims, by: 2)) var frequencies = MLX.pow(base, indices / Float(dims)) let wavelens = 2 * Float.pi * frequencies frequencies = MLX.where( wavelens .> MLXArray(lowFreqWavelen), frequencies * factor, frequencies) let isMediumFreq = MLX.logicalAnd( wavelens .> MLXArray(highFreqWavelen), wavelens .< MLXArray(lowFreqWavelen) ) let smoothFactors = (oldContextLen / wavelens - lowFreqFactor) / (highFreqFactor - lowFreqFactor) let smoothFreqs = frequencies / ((1 - smoothFactors) / factor + smoothFactors) freqs = MLX.where(isMediumFreq, smoothFreqs, frequencies) self.base = nil } func callAsFunction(_ x: MLXArray, offset: Int = 0) -> MLXArray { MLXFast.RoPE( x, dimensions: dims, traditional: traditional, base: base, scale: scale, offset: offset, freqs: freqs ) } } private class Attention: Module { let args: LlamaConfiguration let scale: Float @ModuleInfo(key: "q_proj") var wq: Linear @ModuleInfo(key: "k_proj") var wk: Linear @ModuleInfo(key: "v_proj") var wv: Linear @ModuleInfo(key: "o_proj") var wo: Linear let rope: DynamicNTKScalingRoPE init(_ args: LlamaConfiguration) { self.args = args let dim = args.hiddenSize let heads = args.attentionHeads let kvHeads = args.kvHeads let headDim = args.resolvedHeadDimensions self.scale = pow(Float(headDim), -0.5) self._wq.wrappedValue = Linear(dim, heads * headDim, bias: args.attentionBias) self._wk.wrappedValue = Linear(dim, kvHeads * headDim, bias: args.attentionBias) self._wv.wrappedValue = Linear(dim, kvHeads * headDim, bias: args.attentionBias) self._wo.wrappedValue = Linear(heads * headDim, dim, bias: args.attentionBias) self.rope = DynamicNTKScalingRoPE( dims: headDim, maxPositionEmbeddings: args.maxPositionEmbeddings, traditional: args.ropeTraditional, base: args.ropeTheta, scale: 1.0, ropeType: { if case .string(let value) = args.ropeScaling?["type"] { return value } else { return "default" } }(), ropeScaling: args.ropeScaling) } func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var queries = wq(x) var keys = wk(x) var values = wv(x) // Prepare the queries, keys and values for the attention computation queries = queries.reshaped(B, L, args.attentionHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return wo(output) } } private class MLP: Module, UnaryLayer { @ModuleInfo(key: "gate_proj") var gate: Linear @ModuleInfo(key: "down_proj") var down: Linear @ModuleInfo(key: "up_proj") var up: Linear init(_ args: LlamaConfiguration) { self._gate.wrappedValue = Linear(args.hiddenSize, args.intermediateSize, bias: args.mlpBias) self._down.wrappedValue = Linear(args.intermediateSize, args.hiddenSize, bias: args.mlpBias) self._up.wrappedValue = Linear(args.hiddenSize, args.intermediateSize, bias: args.mlpBias) } func callAsFunction(_ x: MLXArray) -> MLXArray { let activation = silu(gate(x)) return down(activation * up(x)) } } private class TransformerBlock: Module { @ModuleInfo(key: "self_attn") var attention: Attention @ModuleInfo(key: "mlp") var mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: RMSNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: RMSNorm init(_ args: LlamaConfiguration) { self._attention.wrappedValue = Attention(args) self._mlp.wrappedValue = MLP(args) self._inputLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) self._postAttentionLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) } func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var r = attention(inputLayerNorm(x), mask: mask, cache: cache) let h = x + r r = mlp(postAttentionLayerNorm(h)) let out = h + r return out } } private class LlamaModelInner: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding let layers: [TransformerBlock] let norm: RMSNorm init(_ args: LlamaConfiguration) { precondition(args.vocabularySize > 0) self._embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers).map { _ in TransformerBlock(args) } self.norm = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) } func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = embedTokens(inputs) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } /// Model for Llama and Mistral model types. public class LlamaModel: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] fileprivate let model: LlamaModelInner @ModuleInfo(key: "lm_head") var lmHead: Linear? public init(_ args: LlamaConfiguration) { self.vocabularySize = args.vocabularySize self.kvHeads = (0 ..< args.hiddenLayers).map { _ in args.kvHeads } self.model = LlamaModelInner(args) if !args.tieWordEmbeddings { self._lmHead.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: false) } } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { let out = model(inputs, cache: cache) if let lmHead { return lmHead(out) } else { return model.embedTokens.asLinear(out) } } public func sanitize(weights: [String: MLXArray]) -> [String: MLXArray] { // Remove unused precomputed rotary frequencies weights.filter { !$0.key.contains("self_attn.rotary_emb.inv_freq") } } public func messageGenerator(tokenizer: any Tokenizer) -> any MessageGenerator { // some models allow the system role and some do not -- this is enforced // by the chat template (code). do { let probe = [ [ "role": "system", "content": "test", ] ] _ = try tokenizer.applyChatTemplate(messages: probe) return DefaultMessageGenerator() } catch { return NoSystemMessageGenerator() } } } public struct LlamaConfiguration: Codable, Sendable { var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var headDimensions: Int? var rmsNormEps: Float var vocabularySize: Int var kvHeads: Int var maxPositionEmbeddings: Int? var ropeTheta: Float = 10_000 var ropeTraditional: Bool = false var ropeScaling: [String: StringOrNumber]? var tieWordEmbeddings: Bool = true var attentionBias: Bool = false var mlpBias: Bool = false public init( hiddenSize: Int, hiddenLayers: Int, intermediateSize: Int, attentionHeads: Int, headDimensions: Int? = nil, rmsNormEps: Float, vocabularySize: Int, kvHeads: Int, maxPositionEmbeddings: Int? = nil, ropeTheta: Float = 10_000, ropeTraditional: Bool = false, ropeScaling: [String: StringOrNumber]? = nil, tieWordEmbeddings: Bool = true, attentionBias: Bool = false, mlpBias: Bool = false ) { self.hiddenSize = hiddenSize self.hiddenLayers = hiddenLayers self.intermediateSize = intermediateSize self.attentionHeads = attentionHeads self.headDimensions = headDimensions self.rmsNormEps = rmsNormEps self.vocabularySize = vocabularySize self.kvHeads = kvHeads self.maxPositionEmbeddings = maxPositionEmbeddings self.ropeTheta = ropeTheta self.ropeTraditional = ropeTraditional self.ropeScaling = ropeScaling self.tieWordEmbeddings = tieWordEmbeddings self.attentionBias = attentionBias self.mlpBias = mlpBias } var resolvedHeadDimensions: Int { headDimensions ?? (hiddenSize / attentionHeads) } enum CodingKeys: String, CodingKey { case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case headDimensions = "head_dim" case rmsNormEps = "rms_norm_eps" case vocabularySize = "vocab_size" case kvHeads = "num_key_value_heads" case maxPositionEmbeddings = "max_position_embeddings" case ropeTheta = "rope_theta" case ropeTraditional = "rope_traditional" case ropeScaling = "rope_scaling" case tieWordEmbeddings = "tie_word_embeddings" case attentionBias = "attention_bias" case mlpBias = "mlp_bias" } public init(from decoder: Swift.Decoder) throws { let container = try decoder.container(keyedBy: CodingKeys.self) hiddenSize = try container.decode(Int.self, forKey: .hiddenSize) hiddenLayers = try container.decode(Int.self, forKey: .hiddenLayers) intermediateSize = try container.decode(Int.self, forKey: .intermediateSize) attentionHeads = try container.decode(Int.self, forKey: .attentionHeads) headDimensions = try container.decodeIfPresent(Int.self, forKey: .headDimensions) rmsNormEps = try container.decode(Float.self, forKey: .rmsNormEps) vocabularySize = try container.decode(Int.self, forKey: .vocabularySize) kvHeads = try container.decodeIfPresent(Int.self, forKey: .kvHeads) ?? attentionHeads maxPositionEmbeddings = try container.decodeIfPresent( Int.self, forKey: .maxPositionEmbeddings) if let ropeTheta = try container.decodeIfPresent(Float.self, forKey: .ropeTheta) { self.ropeTheta = ropeTheta } if let ropeTraditional = try container.decodeIfPresent(Bool.self, forKey: .ropeTraditional) { self.ropeTraditional = ropeTraditional } ropeScaling = try container.decodeIfPresent( [String: StringOrNumber].self, forKey: .ropeScaling) if let tieWordEmbeddings = try container.decodeIfPresent( Bool.self, forKey: .tieWordEmbeddings) { self.tieWordEmbeddings = tieWordEmbeddings } if let attentionBias = try container.decodeIfPresent(Bool.self, forKey: .attentionBias) { self.attentionBias = attentionBias } if let mlpBias = try container.decodeIfPresent(Bool.self, forKey: .mlpBias) { self.mlpBias = mlpBias } if let ropeScaling { if ropeScaling["factor"] == nil { throw DecodingError.dataCorruptedError( forKey: .ropeScaling, in: container, debugDescription: "rope_scaling must contain 'factor'") } if let ropeType = ropeScaling["type"] ?? ropeScaling["rope_type"] { if case .string = ropeType { let options = [ StringOrNumber.string("linear"), StringOrNumber.string("dynamic"), StringOrNumber.string("llama3"), ] if !options.contains(ropeType) { throw DecodingError.dataCorruptedError( forKey: .ropeScaling, in: container, debugDescription: "rope_scaling 'type' currently only supports 'linear', 'dynamic', or 'llama3'" ) } } } else { throw DecodingError.dataCorruptedError( forKey: .ropeScaling, in: container, debugDescription: "rope_scaling must contain either 'type' or 'rope_type'") } } } } // MARK: - LoRA extension LlamaModel: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.attention, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
MiMo
# Copyright © 2023-2025 Apple Inc. from dataclasses import dataclass from typing import Any, Dict, Optional, Union import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention from .rope_utils import initialize_rope @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int rms_norm_eps: float vocab_size: int num_key_value_heads: int max_position_embeddings: int = 32768 rope_theta: float = 10000.0 rope_traditional: bool = False rope_scaling: Optional[Dict[str, Union[float, str]]] = None tie_word_embeddings: bool = False num_nextn_predict_layers: int = 2 class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads assert args.num_key_value_heads is not None self.n_kv_heads = n_kv_heads = args.num_key_value_heads head_dim = args.hidden_size // n_heads self.scale = head_dim**-0.5 self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=True) self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=True) self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=True) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=False) self.rope = initialize_rope( head_dim, base=args.rope_theta, traditional=args.rope_traditional, scaling_config=args.rope_scaling, max_position_embeddings=args.max_position_embeddings, ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class MLP(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.gate_proj = nn.Linear(dim, hidden_dim, bias=False) self.down_proj = nn.Linear(hidden_dim, dim, bias=False) self.up_proj = nn.Linear(dim, hidden_dim, bias=False) def __call__(self, x) -> mx.array: return self.down_proj(nn.silu(self.gate_proj(x)) * self.up_proj(x)) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.num_attention_heads = args.num_attention_heads self.hidden_size = args.hidden_size self.self_attn = Attention(args) self.mlp = MLP(args.hidden_size, args.intermediate_size) self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.post_attention_layernorm = nn.RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) self.args = args def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.self_attn(self.input_layernorm(x), mask, cache) h = x + r r = self.mlp(self.post_attention_layernorm(h)) out = h + r return out class MiMoModel(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_hidden_layers = args.num_hidden_layers self.num_nextn_predict_layers = args.num_nextn_predict_layers assert self.vocab_size > 0 self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ TransformerBlock(args=args) for _ in range(args.num_hidden_layers) ] self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): h = self.embed_tokens(inputs) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, c) h = self.norm(h) return h class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.model_type = args.model_type self.model = MiMoModel(args) if not args.tie_word_embeddings: self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): out = self.model(inputs, mask, cache) if self.args.tie_word_embeddings: out = self.model.embed_tokens.as_linear(out) else: out = self.lm_head(out) return out def sanitize(self, weights): if self.args.tie_word_embeddings: weights.pop("lm_head.weight", None) return { k: v for k, v in weights.items() if "self_attn.rotary_emb.inv_freq" not in k and not k.startswith("model.mtp_layers.") } @property def layers(self): return self.model.layers
// // MiMo.swift // LLM // // Created by John Mai on 2025/5/3. // import Foundation import MLX import MLXLMCommon import MLXNN private class Attention: Module { let args: MiMoConfiguration let scale: Float @ModuleInfo(key: "q_proj") var wq: Linear @ModuleInfo(key: "k_proj") var wk: Linear @ModuleInfo(key: "v_proj") var wv: Linear @ModuleInfo(key: "o_proj") var wo: Linear let rope: RoPE public init(_ args: MiMoConfiguration) { self.args = args let dim = args.hiddenSize let heads = args.attentionHeads let kvHeads = args.kvHeads let headDim = args.hiddenSize / heads self.scale = pow(Float(headDim), -0.5) _wq.wrappedValue = Linear(dim, heads * headDim, bias: true) _wk.wrappedValue = Linear(dim, kvHeads * headDim, bias: true) _wv.wrappedValue = Linear(dim, kvHeads * headDim, bias: true) _wo.wrappedValue = Linear(heads * headDim, dim, bias: false) let ropeScale: Float if let ropeScaling = args.ropeScaling, ropeScaling["type"] == .string("linear"), let factor = ropeScaling["factor"] { if let v = factor.asFloat() { ropeScale = 1 / v } else { fatalError("ropeScaling.factor must be a float") } } else { ropeScale = 1 } self.rope = RoPE( dimensions: headDim, traditional: args.ropeTraditional, base: args.ropeTheta, scale: ropeScale) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var queries = wq(x) var keys = wk(x) var values = wv(x) // prepare the queries, keys and values for the attention computation queries = queries.reshaped(B, L, args.attentionHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return wo(output) } } private class MLP: Module, UnaryLayer { @ModuleInfo(key: "gate_proj") var gate: Linear @ModuleInfo(key: "down_proj") var down: Linear @ModuleInfo(key: "up_proj") var up: Linear public init(dimensions: Int, hiddenDimensions: Int) { _gate.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) _down.wrappedValue = Linear(hiddenDimensions, dimensions, bias: false) _up.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) } public func callAsFunction(_ x: MLXArray) -> MLXArray { down(silu(gate(x)) * up(x)) } } private class TransformerBlock: Module { @ModuleInfo(key: "self_attn") var attention: Attention let mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: RMSNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: RMSNorm public init(_ args: MiMoConfiguration) { _attention.wrappedValue = Attention(args) self.mlp = MLP(dimensions: args.hiddenSize, hiddenDimensions: args.intermediateSize) _inputLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) _postAttentionLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var r = attention(inputLayerNorm(x), mask: mask, cache: cache) let h = x + r r = mlp(postAttentionLayerNorm(h)) let out = h + r return out } } private class MiMoModelInner: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding fileprivate let layers: [TransformerBlock] let norm: RMSNorm let numNextnPredictLayers: Int public init(_ args: MiMoConfiguration) { precondition(args.vocabularySize > 0) _embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers).map { _ in TransformerBlock(args) } self.norm = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) self.numNextnPredictLayers = args.numNextnPredictLayers } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = embedTokens(inputs) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } public class MiMoModel: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] private let model: MiMoModelInner let configuration: MiMoConfiguration @ModuleInfo(key: "lm_head") var lmHead: Linear? public init(_ args: MiMoConfiguration) { self.configuration = args self.vocabularySize = args.vocabularySize self.kvHeads = (0 ..< args.hiddenLayers).map { _ in args.kvHeads } self.model = MiMoModelInner(args) if !args.tieWordEmbeddings { _lmHead.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: false) } } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { let out = model(inputs, cache: cache) if let lmHead = lmHead { return lmHead(out) } else { return model.embedTokens.asLinear(out) } } public func sanitize(weights: [String: MLXArray]) -> [String: MLXArray] { var weights = weights if configuration.tieWordEmbeddings { weights.removeValue(forKey: "lm_head.weight") } // Remove unused precomputed rotary freqs and mtp_layers return weights.filter { key, _ in !key.contains("self_attn.rotary_emb.inv_freq") && !key.hasPrefix("model.mtp_layers.") } } } public struct MiMoConfiguration: Codable, Sendable { var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var rmsNormEps: Float var vocabularySize: Int var kvHeads: Int var maxPositionEmbeddings: Int var ropeTheta: Float var ropeTraditional: Bool var ropeScaling: [String: StringOrNumber]? var tieWordEmbeddings: Bool var numNextnPredictLayers: Int enum CodingKeys: String, CodingKey { case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case rmsNormEps = "rms_norm_eps" case vocabularySize = "vocab_size" case kvHeads = "num_key_value_heads" case maxPositionEmbeddings = "max_position_embeddings" case ropeTheta = "rope_theta" case ropeTraditional = "rope_traditional" case ropeScaling = "rope_scaling" case tieWordEmbeddings = "tie_word_embeddings" case numNextnPredictLayers = "num_nextn_predict_layers" } public init(from decoder: Decoder) throws { let container = try decoder.container(keyedBy: CodingKeys.self) self.hiddenSize = try container.decode(Int.self, forKey: .hiddenSize) self.hiddenLayers = try container.decode(Int.self, forKey: .hiddenLayers) self.intermediateSize = try container.decode(Int.self, forKey: .intermediateSize) self.attentionHeads = try container.decode(Int.self, forKey: .attentionHeads) self.rmsNormEps = try container.decode(Float.self, forKey: .rmsNormEps) self.vocabularySize = try container.decode(Int.self, forKey: .vocabularySize) self.kvHeads = try container.decode(Int.self, forKey: .kvHeads) self.maxPositionEmbeddings = try container.decodeIfPresent(Int.self, forKey: .maxPositionEmbeddings) ?? 32768 self.ropeTheta = try container.decodeIfPresent(Float.self, forKey: .ropeTheta) ?? 10000.0 self.ropeTraditional = try container.decodeIfPresent(Bool.self, forKey: .ropeTraditional) ?? false self.ropeScaling = try container.decodeIfPresent( [String: StringOrNumber].self, forKey: .ropeScaling) self.tieWordEmbeddings = try container.decodeIfPresent(Bool.self, forKey: .tieWordEmbeddings) ?? false self.numNextnPredictLayers = try container.decodeIfPresent(Int.self, forKey: .numNextnPredictLayers) ?? 2 } } // MARK: - LoRA extension MiMoModel: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.attention, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
OpenELM
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from typing import Any, Dict, List, Optional, Union import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention @dataclass class ModelArgs(BaseModelArgs): model_type: str head_dim: int num_transformer_layers: int model_dim: int vocab_size: int ffn_dim_divisor: int num_query_heads: List num_kv_heads: List ffn_multipliers: List ffn_with_glu: bool = True normalize_qk_projections: bool = True share_input_output_layers: bool = True rms_norm_eps: float = 1e-6 rope_freq_constant: float = 10000 def make_divisible( v: Union[float, int], divisor: Optional[int] = 8, min_value: Optional[Union[float, int]] = None, ) -> Union[float, int]: """ This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by the divisor It can be seen at: https://github.com/tensorflow/models/blob/2cfc99eff5e5eb729c6793d2f3d03aa1c9be2b15/research/slim/nets/mobilenet/mobilenet.py#L62 Args: v: input value divisor: default to 8 min_value: minimum divisor value Returns: new_v: new divisible value """ if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v class Attention(nn.Module): def __init__(self, args: ModelArgs, layer_id: int): super().__init__() self.head_dim = head_dim = args.head_dim self.layer_id = layer_id self.model_dim = model_dim = args.model_dim self.n_heads = n_heads = args.num_query_heads[layer_id] self.n_kv_heads = n_kv_heads = args.num_kv_heads[layer_id] self.scale = head_dim**-0.5 op_size = (n_heads + (n_kv_heads * 2)) * head_dim self.qkv_proj = nn.Linear(model_dim, op_size, bias=False) self.out_proj = nn.Linear(n_heads * head_dim, model_dim, bias=False) self.normalize_qk_projections = args.normalize_qk_projections if self.normalize_qk_projections: self.q_norm = nn.RMSNorm(head_dim, eps=args.rms_norm_eps) self.k_norm = nn.RMSNorm(head_dim, eps=args.rms_norm_eps) self.rope = nn.RoPE(head_dim, traditional=False, base=args.rope_freq_constant) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape qkv = self.qkv_proj(x) qkv = qkv.reshape( B, L, self.n_heads + (self.n_kv_heads * 2), self.head_dim ).transpose(0, 2, 1, 3) queries, keys, values = mx.split( qkv, [self.n_heads, self.n_heads + self.n_kv_heads], axis=1 ) # Prepare the queries, keys and values for the attention computation if self.normalize_qk_projections: queries = self.q_norm(queries) keys = self.k_norm(keys) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.out_proj(output) class MLP(nn.Module): def __init__(self, args: ModelArgs, layer_id: int): super().__init__() self.args = args dim = args.model_dim ffn_multiplier = args.ffn_multipliers[layer_id] intermediate_dim = int( make_divisible( ffn_multiplier * args.model_dim, divisor=args.ffn_dim_divisor, ) ) self.proj_1 = nn.Linear(dim, 2 * intermediate_dim, bias=False) self.proj_2 = nn.Linear(intermediate_dim, dim, bias=False) def __call__(self, x) -> mx.array: x = self.proj_1(x) gate, x = mx.split(x, 2, axis=-1) return self.proj_2(nn.silu(gate) * x) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs, layer_id: int): super().__init__() dim = args.model_dim self.attn = Attention(args, layer_id=layer_id) self.ffn = MLP(args, layer_id=layer_id) self.ffn_norm = nn.RMSNorm(dim, eps=args.rms_norm_eps) self.attn_norm = nn.RMSNorm(dim, eps=args.rms_norm_eps) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.attn(self.attn_norm(x), mask, cache) h = x + r r = self.ffn(self.ffn_norm(h)) out = h + r return out class OpenELMModel(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_transformer_layers = args.num_transformer_layers assert self.vocab_size > 0 self.token_embeddings = nn.Embedding(args.vocab_size, args.model_dim) self.layers = [ TransformerBlock(args, layer_id=layer_id) for layer_id in range(self.num_transformer_layers) ] self.norm = nn.RMSNorm(args.model_dim, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): h = self.token_embeddings(inputs) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, cache=c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.model_type = args.model_type self.transformer = OpenELMModel(args) if not args.share_input_output_layers: self.lm_head = nn.Linear(args.model_dim, args.vocab_size, bias=False) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): out = self.transformer(inputs, mask, cache) if self.args.share_input_output_layers: out = self.transformer.token_embeddings.as_linear(out) else: out = self.lm_head(out) return out @property def layers(self): return self.transformer.layers
// // OpenELM.swift // LLM // // Created by Sachin Desai on 2024/4/27. // import Foundation import MLX import MLXLMCommon import MLXNN func computeHeads(modelDim: Int, headDim: Int) -> Int { assert(modelDim % headDim == 0, "modelDim must be divisible by headDim") return modelDim / headDim } func makeDivisible(_ v: Float, divisor: Int = 8, minValue: Float? = nil) -> Int { let minVal = minValue ?? Float(divisor) var roundDown = max(minVal, Float(Int((v + Float(divisor) / 2) / Float(divisor)) * divisor)) if roundDown < 0.9 * v { roundDown += Float(divisor) } return Int(roundDown) } private class MultiHeadCausalAttention: Module { let scale: Float let heads: Int let headDim: Int let kvHeads: Int @ModuleInfo(key: "qkv_proj") var qkvProj: Linear @ModuleInfo(key: "out_proj") var outProj: Linear @ModuleInfo(key: "q_norm") var qNorm: RMSNorm? @ModuleInfo(key: "k_norm") var kNorm: RMSNorm? let rope: RoPE public init(_ args: OpenElmConfiguration, layerId: Int) { self.headDim = args.headDimensions let modelDim = args.modelDim self.heads = args.numQueryHeads[layerId] self.kvHeads = args.kvHeads[layerId] self.scale = pow(Float(headDim), -0.5) let opSize = (heads + (kvHeads * 2)) * headDim self._qkvProj.wrappedValue = Linear(modelDim, opSize, bias: false) self._outProj.wrappedValue = Linear(heads * headDim, modelDim, bias: false) if args.normalizeQkProjections { self._qNorm.wrappedValue = RMSNorm(dimensions: headDim, eps: args.rmsNormEps) self._kNorm.wrappedValue = RMSNorm(dimensions: headDim, eps: args.rmsNormEps) } self.rope = RoPE( dimensions: headDim, traditional: args.ropeTraditional, base: args.ropeTheta) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) let qkv = qkvProj(x).reshaped(B, L, heads + (kvHeads * 2), headDim).transposed(0, 2, 1, 3) let qkvSplit = split(qkv, indices: [heads, heads + kvHeads], axis: 1) var queries = qkvSplit[0] var keys = qkvSplit[1] var values = qkvSplit[2] if let qNorm, let kNorm { queries = qNorm(queries) keys = kNorm(keys) } if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, heads * headDim) return outProj(output) } } private class FeedForwardNetwork: Module, UnaryLayer { @ModuleInfo var proj_1: Linear @ModuleInfo var proj_2: Linear public init(_ args: OpenElmConfiguration, layedId: Int) { let dim = args.modelDim let ffnMultiplier = args.ffnMultipliers[layedId] let intermediateDim = Int( makeDivisible(Float(ffnMultiplier) * Float(dim), divisor: args.ffnDimDivisor)) self.proj_1 = Linear(dim, 2 * intermediateDim, bias: false) self.proj_2 = Linear(intermediateDim, dim, bias: false) } public func callAsFunction(_ x: MLXArray) -> MLXArray { let a = proj_1(x) let b = split(a, parts: 2, axis: -1) let gate = b[0] let x = b[1] return proj_2(silu(gate) * x) } } private class TransformerDecoderLayer: Module { @ModuleInfo(key: "attn") var attn: MultiHeadCausalAttention let ffn: FeedForwardNetwork @ModuleInfo(key: "ffn_norm") var ffnNorm: RMSNorm @ModuleInfo(key: "attn_norm") var attnNorm: RMSNorm public init(_ args: OpenElmConfiguration, layerId: Int) { let dim = args.modelDim self._attn.wrappedValue = MultiHeadCausalAttention(args, layerId: layerId) self.ffn = FeedForwardNetwork(args, layedId: layerId) self._ffnNorm.wrappedValue = RMSNorm(dimensions: dim, eps: args.rmsNormEps) self._attnNorm.wrappedValue = RMSNorm(dimensions: dim, eps: args.rmsNormEps) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var r = attn(attnNorm(x), mask: mask, cache: cache) let h = x + r r = ffn(ffnNorm(h)) let out = h + r return out } } class OpenELMModelInner: Module { @ModuleInfo(key: "token_embeddings") var embedTokens: Embedding fileprivate let layers: [TransformerDecoderLayer] fileprivate let norm: RMSNorm public init(_ args: OpenElmConfiguration) { precondition(args.vocabularySize > 0) self._embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.modelDim) self.layers = (0 ..< args.numTransformerLayers) .map { layerId in TransformerDecoderLayer(args, layerId: layerId) } self.norm = RMSNorm(dimensions: args.modelDim, eps: args.rmsNormEps) } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = embedTokens(inputs) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } public class OpenELMModel: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] let transformer: OpenELMModelInner @ModuleInfo(key: "lm_head") var lmHead: Linear? public init(_ args: OpenElmConfiguration) { self.vocabularySize = args.vocabularySize self.kvHeads = args.kvHeads self.transformer = OpenELMModelInner(args) if !args.shareInputOutputLayers { self._lmHead.wrappedValue = Linear( args.numTransformerLayers, args.vocabularySize, bias: false) } } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { var out = transformer(inputs, cache: cache) if let lmHead { out = lmHead(out) } else { out = transformer.embedTokens.asLinear(out) } return out } } public struct OpenElmConfiguration: Codable, Sendable { var modelType: String var headDimensions: Int var numTransformerLayers: Int var modelDim: Int var vocabularySize: Int var ffnDimDivisor: Int var numQueryHeads: [Int] = [] var kvHeads: [Int] = [] var ffnWithGlu: Bool = true var normalizeQkProjections: Bool = true var shareInputOutputLayers: Bool = true var rmsNormEps: Float = 1e-6 var ropeTheta: Float = 10_000 var ropeTraditional: Bool = false var numGqaGroups: Int = 4 var ffnMultipliers: [Float] = [0.5, 4.0] var qkvMultiplier: [Float] = [0.5, 1.0] enum CodingKeys: String, CodingKey { case modelType = "model_type" case headDimensions = "head_dim" case numTransformerLayers = "num_transformer_layers" case modelDim = "model_dim" case vocabularySize = "vocab_size" case ffnDimDivisor = "ffn_dim_divisor" case ffnMultipliers = "ffn_multipliers" case ffnWithGlu = "ffn_with_glu" case normalizeQkProjections = "normalize_qk_projections" case shareInputOutputLayers = "share_input_output_layers" } public init(from decoder: Decoder) throws { // custom implementation to handle optional keys with required values let container: KeyedDecodingContainer<OpenElmConfiguration.CodingKeys> = try decoder.container( keyedBy: OpenElmConfiguration.CodingKeys.self) self.modelType = try container.decode( String.self, forKey: OpenElmConfiguration.CodingKeys.modelType) self.headDimensions = try container.decode( Int.self, forKey: OpenElmConfiguration.CodingKeys.headDimensions) self.numTransformerLayers = try container.decode( Int.self, forKey: OpenElmConfiguration.CodingKeys.numTransformerLayers) self.modelDim = try container.decode( Int.self, forKey: OpenElmConfiguration.CodingKeys.modelDim) self.vocabularySize = try container.decode( Int.self, forKey: OpenElmConfiguration.CodingKeys.vocabularySize) self.ffnDimDivisor = try container.decode( Int.self, forKey: OpenElmConfiguration.CodingKeys.ffnDimDivisor) let qkvMultipliers = stride( from: qkvMultiplier[0], through: qkvMultiplier[1], by: (qkvMultiplier[1] - qkvMultiplier[0]) / Float(numTransformerLayers - 1) ) .map { round($0 * 100) / 100 } let headMultipleOf = numGqaGroups let queryDims = qkvMultipliers.map { a in makeDivisible(Float(self.modelDim) * a, divisor: self.headDimensions * headMultipleOf) } self.numQueryHeads = queryDims.map { qDim in Int(computeHeads(modelDim: qDim, headDim: self.headDimensions)) } self.kvHeads = self.numQueryHeads.map { qHeads in qHeads / numGqaGroups } self.ffnMultipliers = stride( from: ffnMultipliers[0], through: ffnMultipliers[1], by: (ffnMultipliers[1] - ffnMultipliers[0]) / Float(numTransformerLayers - 1) ) .map { round($0 * 100) / 100 } self.ffnWithGlu = try container.decodeIfPresent( Bool.self, forKey: OpenElmConfiguration.CodingKeys.ffnWithGlu) ?? true self.normalizeQkProjections = try container.decodeIfPresent( Bool.self, forKey: OpenElmConfiguration.CodingKeys.normalizeQkProjections) ?? true self.shareInputOutputLayers = try container.decodeIfPresent( Bool.self, forKey: OpenElmConfiguration.CodingKeys.shareInputOutputLayers) ?? true } } // MARK: - LoRA extension OpenELMModel: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { transformer.layers.map { ($0.attn, ["qkv_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
Phi
# Copyright © 2023-2024 Apple Inc. import math from dataclasses import dataclass import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention @dataclass class ModelArgs(BaseModelArgs): model_type: str = "phi" max_position_embeddings: int = 2048 vocab_size: int = 51200 hidden_size: int = 2560 num_attention_heads: int = 32 num_hidden_layers: int = 32 num_key_value_heads: int = 32 partial_rotary_factor: float = 0.4 intermediate_size: int = 10240 layer_norm_eps: float = 1e-5 rope_theta: float = 10000.0 def __post_init__(self): if self.num_key_value_heads is None: self.num_key_value_heads = self.num_attention_heads class PhiAttention(nn.Module): def __init__(self, config: ModelArgs): super().__init__() self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.repeats = self.num_heads // self.num_key_value_heads self.rope_theta = config.rope_theta self.partial_rotary_factor = config.partial_rotary_factor if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear( self.hidden_size, self.num_heads * self.head_dim, bias=True ) self.k_proj = nn.Linear( self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True ) self.v_proj = nn.Linear( self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True ) self.dense = nn.Linear( self.num_heads * self.head_dim, self.hidden_size, bias=True ) self.rope = nn.RoPE( int(self.partial_rotary_factor * self.head_dim), traditional=False, base=self.rope_theta, ) def __call__(self, x, mask=None, cache=None): queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) # Extract some shapes B, L, D = queries.shape n_heads, n_kv_heads = self.num_heads, self.num_key_value_heads # Prepare the queries, keys and values for the attention computation queries = queries.reshape( B, L, n_heads, -1, ).moveaxis(1, 2) keys = keys.reshape(B, L, n_kv_heads, -1).moveaxis(1, 2) values = values.reshape(B, L, n_kv_heads, -1).moveaxis(1, 2) # Add RoPE to the queries and keys and combine them with the cache if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) scale = math.sqrt(1 / queries.shape[-1]) output = scaled_dot_product_attention( queries.astype(mx.float32), keys, values, cache=cache, scale=scale, mask=mask, ).astype(values.dtype) output = output.moveaxis(2, 1).reshape(B, L, -1) return self.dense(output) class PhiMLP(nn.Module): def __init__(self, config: ModelArgs): super().__init__() self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def __call__(self, x) -> mx.array: return self.fc2(nn.gelu_approx(self.fc1(x))) class PhiDecoderLayer(nn.Module): def __init__(self, config: ModelArgs): super().__init__() self.self_attn = PhiAttention(config=config) self.input_layernorm = nn.LayerNorm( config.hidden_size, eps=config.layer_norm_eps ) self.mlp = PhiMLP(config) def __call__(self, x, mask, cache): h = self.input_layernorm(x) attn_h = self.self_attn(h, mask, cache) ff_h = self.mlp(h) return attn_h + ff_h + x class PhiModel(nn.Module): def __init__(self, config: ModelArgs): super().__init__() self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size) self.layers = [PhiDecoderLayer(config) for i in range(config.num_hidden_layers)] self.final_layernorm = nn.LayerNorm( config.hidden_size, eps=config.layer_norm_eps ) def __call__(self, x, mask, cache): x = self.embed_tokens(x) if mask is None: mask = create_attention_mask(x, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): x = layer(x, mask, c) return self.final_layernorm(x) class Model(nn.Module): def __init__(self, config: ModelArgs): super().__init__() self.model_type = config.model_type self.model = PhiModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=True) self.args = config def __call__( self, x: mx.array, mask: mx.array = None, cache=None, ) -> mx.array: y = self.model(x, mask, cache) return self.lm_head(y) @property def layers(self): return self.model.layers
// Copyright © 2024 Apple Inc. import Foundation import MLX import MLXLMCommon import MLXNN // https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/models/phi.py private class PhiAttention: Module { let args: PhiConfiguration let heads: Int let headDim: Int @ModuleInfo(key: "q_proj") var wq: Linear @ModuleInfo(key: "k_proj") var wk: Linear @ModuleInfo(key: "v_proj") var wv: Linear @ModuleInfo(key: "dense") var dense: Linear let rope: RoPE public init(_ args: PhiConfiguration) { self.args = args let hiddenSize = args.hiddenSize self.heads = args.attentionHeads self.headDim = args.hiddenSize / heads let kvHeads = args.kvHeads if headDim * heads != hiddenSize { fatalError("hidden_size must be divisible by num_heads") } self._wq.wrappedValue = Linear(hiddenSize, heads * headDim, bias: true) self._wk.wrappedValue = Linear(hiddenSize, kvHeads * headDim, bias: true) self._wv.wrappedValue = Linear(hiddenSize, kvHeads * headDim, bias: true) self._dense.wrappedValue = Linear(heads * headDim, hiddenSize, bias: true) self.rope = RoPE( dimensions: Int(args.partialRotaryFactor * Float(headDim)), traditional: false, base: args.ropeTheta) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var queries = wq(x) var keys = wk(x) var values = wv(x) // prepare the queries, keys and values for the attention computation queries = queries.reshaped(B, L, heads, headDim).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, args.kvHeads, headDim).transposed(0, 2, 1, 3) values = values.reshaped(B, L, args.kvHeads, headDim).transposed(0, 2, 1, 3) // Add RoPE to the queries and keys and combine them with the cache if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } // Finally perform the attention computation let scale = sqrt(1 / Float(queries.dim(-1))) let output = attentionWithCacheUpdate( queries: queries.asType(.float32), keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .asType(values.dtype) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return dense(output) } } private class PhiMLP: Module, UnaryLayer { @ModuleInfo var fc1: Linear @ModuleInfo var fc2: Linear @ModuleInfo var act: GELU public init(_ config: PhiConfiguration) { self.fc1 = Linear(config.hiddenSize, config.intermediateSize) self.fc2 = Linear(config.intermediateSize, config.hiddenSize) self.act = GELU(approximation: .precise) } public func callAsFunction(_ x: MLXArray) -> MLXArray { fc2(act(fc1(x))) } } private class PhiDecoderLayer: Module { @ModuleInfo(key: "self_attn") var selfAttention: PhiAttention @ModuleInfo(key: "input_layernorm") var inputLayerNorm: LayerNorm var mlp: PhiMLP public init(_ config: PhiConfiguration) { self._selfAttention.wrappedValue = PhiAttention(config) self._inputLayerNorm.wrappedValue = LayerNorm( dimensions: config.hiddenSize, eps: config.layerNormEps) self.mlp = PhiMLP(config) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let h = inputLayerNorm(x) let attentionH = selfAttention(h, mask: mask, cache: cache) let ffH = mlp(h) return attentionH + ffH + x } } private class PhiModelInner: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding @ModuleInfo var layers: [PhiDecoderLayer] @ModuleInfo(key: "final_layernorm") var finalLayerNorm: LayerNorm public init(_ args: PhiConfiguration) { self._embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers) .map { _ in PhiDecoderLayer(args) } self._finalLayerNorm.wrappedValue = LayerNorm( dimensions: args.hiddenSize, eps: args.layerNormEps) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: [KVCache]? = nil ) -> MLXArray { var x = embedTokens(x) for (i, layer) in layers.enumerated() { x = layer(x, mask: mask, cache: cache?[i]) } return finalLayerNorm(x) } } public class PhiModel: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] fileprivate let model: PhiModelInner @ModuleInfo(key: "lm_head") var lmHead: Linear public init(_ args: PhiConfiguration) { self.vocabularySize = args.vocabularySize self.kvHeads = (0 ..< args.hiddenLayers).map { _ in args.kvHeads } self.model = PhiModelInner(args) self._lmHead.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: true) } public func callAsFunction(_ x: MLXArray, cache: [KVCache]?) -> MLXArray { let mask = createAttentionMask(h: x, cache: cache) let y = model(x, mask: mask, cache: cache) return lmHead(y) } } public struct PhiConfiguration: Codable, Sendable { var maxPositionalEmbeddings = 2048 var vocabularySize = 51200 var hiddenSize = 2560 var attentionHeads = 32 var hiddenLayers = 32 var kvHeads = 32 var partialRotaryFactor: Float = 0.4 var intermediateSize = 10240 var layerNormEps: Float = 1e-5 var ropeTheta: Float = 10_000 enum CodingKeys: String, CodingKey { case maxPositionalEmbeddings = "max_position_embeddings" case vocabularySize = "vocab_size" case hiddenSize = "hidden_size" case attentionHeads = "num_attention_heads" case hiddenLayers = "num_hidden_layers" case kvHeads = "num_key_value_heads" case partialRotaryFactor = "partial_rotary_factor" case intermediateSize = "intermediate_size" case layerNormEps = "layer_norm_eps" case ropeTheta = "rope_theta" } public init(from decoder: Decoder) throws { let container: KeyedDecodingContainer<PhiConfiguration.CodingKeys> = try decoder.container( keyedBy: PhiConfiguration.CodingKeys.self) self.maxPositionalEmbeddings = try container.decode( Int.self, forKey: PhiConfiguration.CodingKeys.maxPositionalEmbeddings) self.vocabularySize = try container.decode( Int.self, forKey: PhiConfiguration.CodingKeys.vocabularySize) self.hiddenSize = try container.decode( Int.self, forKey: PhiConfiguration.CodingKeys.hiddenSize) self.attentionHeads = try container.decode( Int.self, forKey: PhiConfiguration.CodingKeys.attentionHeads) self.hiddenLayers = try container.decode( Int.self, forKey: PhiConfiguration.CodingKeys.hiddenLayers) self.kvHeads = try container.decodeIfPresent(Int.self, forKey: PhiConfiguration.CodingKeys.kvHeads) ?? attentionHeads self.partialRotaryFactor = try container.decode( Float.self, forKey: PhiConfiguration.CodingKeys.partialRotaryFactor) self.intermediateSize = try container.decode( Int.self, forKey: PhiConfiguration.CodingKeys.intermediateSize) self.layerNormEps = try container.decode( Float.self, forKey: PhiConfiguration.CodingKeys.layerNormEps) self.ropeTheta = try container.decodeIfPresent(Float.self, forKey: PhiConfiguration.CodingKeys.ropeTheta) ?? 10_000 } } // MARK: - LoRA extension PhiModel: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.selfAttention, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
Phi3
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention from .rope_utils import SuScaledRoPE @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int rms_norm_eps: float vocab_size: int num_key_value_heads: Optional[int] = None rope_theta: float = 10000 rope_traditional: bool = False rope_scaling: Optional[Dict[str, Union[float, List[float]]]] = None partial_rotary_factor: float = 1.0 max_position_embeddings: int = 131072 original_max_position_embeddings: int = 4096 tie_word_embeddings: bool = False def __post_init__(self): if self.num_key_value_heads is None: self.num_key_value_heads = self.num_attention_heads if self.rope_scaling: required_keys = {"long_factor", "type"} if not all(key in self.rope_scaling for key in required_keys): raise ValueError(f"rope_scaling must contain keys {required_keys}") if self.rope_scaling["type"] not in ["longrope", "su", "linear"]: print( "[WARNING] rope_scaling 'type' currently only supports 'linear', 'su', and 'longrope'; setting rope scaling to false." ) self.rope_scaling = None class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads assert args.num_key_value_heads is not None self.n_kv_heads = n_kv_heads = args.num_key_value_heads self.num_hidden_layers = args.num_hidden_layers self.head_dim = head_dim = args.hidden_size // n_heads self.scale = head_dim**-0.5 op_size = n_heads * head_dim + 2 * (n_kv_heads * head_dim) self.qkv_proj = nn.Linear(dim, op_size, bias=False) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=False) rope_dim = int(head_dim * args.partial_rotary_factor) if args.rope_scaling and args.rope_scaling["type"] in ["longrope", "su"]: self.rope = SuScaledRoPE( rope_dim, base=args.rope_theta, max_position_embeddings=args.max_position_embeddings, original_max_position_embeddings=args.original_max_position_embeddings, short_factor=args.rope_scaling["short_factor"], long_factor=args.rope_scaling["long_factor"], ) else: rope_scale = 1.0 if args.rope_scaling and args.rope_scaling["type"] == "linear": assert isinstance(args.rope_scaling["factor"], float) rope_scale = 1 / args.rope_scaling["factor"] self.rope = nn.RoPE( rope_dim, traditional=args.rope_traditional, base=args.rope_theta, scale=rope_scale, ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape qkv = self.qkv_proj(x) query_pos = self.n_heads * self.head_dim queries, keys, values = mx.split( qkv, [query_pos, query_pos + self.n_kv_heads * self.head_dim], axis=-1 ) # Prepare the queries, keys and values for the attention computation queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class MLP(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.gate_up_proj = nn.Linear(dim, 2 * hidden_dim, bias=False) self.down_proj = nn.Linear(hidden_dim, dim, bias=False) def __call__(self, x) -> mx.array: x = self.gate_up_proj(x) gate, x = mx.split(x, 2, axis=-1) return self.down_proj(nn.silu(gate) * x) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.num_attention_heads = args.num_attention_heads self.hidden_size = args.hidden_size self.self_attn = Attention(args) self.mlp = MLP(args.hidden_size, args.intermediate_size) self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.post_attention_layernorm = nn.RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) self.args = args def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.self_attn(self.input_layernorm(x), mask, cache) h = x + r r = self.mlp(self.post_attention_layernorm(h)) out = h + r return out class Phi3Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_hidden_layers = args.num_hidden_layers assert self.vocab_size > 0 self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ TransformerBlock(args=args) for _ in range(args.num_hidden_layers) ] self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): h = self.embed_tokens(inputs) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.model_type = args.model_type self.model = Phi3Model(args) if not args.tie_word_embeddings: self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False) self.args = args def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): out = self.model(inputs, mask, cache) if self.args.tie_word_embeddings: out = self.model.embed_tokens.as_linear(out) else: out = self.lm_head(out) return out @property def layers(self): return self.model.layers
// Copyright © 2024 Apple Inc. import Foundation import MLX import MLXLMCommon import MLXNN private class Attention: Module { let args: Phi3Configuration let scale: Float let heads: Int let kvHeads: Int let headDim: Int let ropeDim: Int @ModuleInfo(key: "qkv_proj") var wqkv: Linear @ModuleInfo(key: "o_proj") var wo: Linear enum PositionalEncoding { case rope(RoPE) case suScaledRotaryEmbedding(SuScaledRotaryEmbedding) func applyEncoding(_ x: MLXArray, offset: Int = 0) -> MLXArray { switch self { case .rope(let rope): return rope.callAsFunction(x, offset: offset) case .suScaledRotaryEmbedding(let suScaledRotaryEmbedding): return suScaledRotaryEmbedding.callAsFunction(x, offset: offset) } } } let rope: PositionalEncoding public init(_ args: Phi3Configuration) { self.args = args let dim = args.hiddenSize self.heads = args.attentionHeads self.kvHeads = args.kvHeads self.headDim = args.hiddenSize / heads self.ropeDim = Int(Float(headDim) * args.partialRotaryFactor) self.scale = pow(Float(headDim), -0.5) self._wqkv.wrappedValue = Linear(dim, (heads + 2 * kvHeads) * headDim, bias: false) self._wo.wrappedValue = Linear(heads * headDim, dim, bias: false) let ropeScale: Float if let ropeScaling = args.ropeScaling, ropeScaling.type == "linear", let factor = ropeScaling.factor { ropeScale = 1 / factor } else { ropeScale = 1 } if let ropeScaling = args.ropeScaling, ropeScaling.type == "su" || ropeScaling.type == "longrope", let shortFactor = ropeScaling.shortFactor, let longFactor = ropeScaling.longFactor { self.rope = .suScaledRotaryEmbedding( SuScaledRotaryEmbedding( dimensions: ropeDim, base: args.ropeTheta, maxPositionEmbeddings: args.maxPositionEmbeddings, originalMaxPositionEmbeddings: args.originalMaxPositionEmbeddings, longFactor: longFactor)) } else { self.rope = .rope( RoPE( dimensions: ropeDim, traditional: args.ropeTraditional, base: args.ropeTheta, scale: ropeScale)) } } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) let queryPos = heads * headDim let qkv = split(wqkv(x), indices: [queryPos, queryPos + kvHeads * headDim], axis: -1) var queries = qkv[0] var keys = qkv[1] var values = qkv[2] // prepare the queries, keys and values for the attention computation queries = queries.reshaped(B, L, args.attentionHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) if let cache { queries = rope.applyEncoding(queries, offset: cache.offset) keys = rope.applyEncoding(keys, offset: cache.offset) } else { queries = rope.applyEncoding(queries) keys = rope.applyEncoding(keys) } let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return wo(output) } } private class MLP: Module, UnaryLayer { @ModuleInfo(key: "gate_up_proj") var gate_up: Linear @ModuleInfo(key: "down_proj") var down: Linear public init(dimensions: Int, hiddenDimensions: Int) { self._gate_up.wrappedValue = Linear(dimensions, 2 * hiddenDimensions, bias: false) self._down.wrappedValue = Linear(hiddenDimensions, dimensions, bias: false) } public func callAsFunction(_ x: MLXArray) -> MLXArray { let gu = split(gate_up(x), parts: 2, axis: -1) return down(silu(gu[0]) * gu[1]) } } private class TransformerBlock: Module { @ModuleInfo(key: "self_attn") var attention: Attention let mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: RMSNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: RMSNorm public init(_ args: Phi3Configuration) { self._attention.wrappedValue = Attention(args) self.mlp = MLP(dimensions: args.hiddenSize, hiddenDimensions: args.intermediateSize) self._inputLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) self._postAttentionLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var r = attention(inputLayerNorm(x), mask: mask, cache: cache) let h = x + r r = mlp(postAttentionLayerNorm(h)) let out = h + r return out } } private class Phi3ModelInner: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding fileprivate let layers: [TransformerBlock] let norm: RMSNorm let args: Phi3Configuration public init(_ args: Phi3Configuration) { precondition(args.vocabularySize > 0) self.args = args self._embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers) .map { _ in TransformerBlock(args) } self.norm = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { var h = embedTokens(inputs) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } public class Phi3Model: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] private let model: Phi3ModelInner private let args: Phi3Configuration @ModuleInfo(key: "lm_head") var lmHead: Linear? public init(_ args: Phi3Configuration) { self.vocabularySize = args.vocabularySize self.kvHeads = (0 ..< args.hiddenLayers).map { _ in args.kvHeads } self.model = Phi3ModelInner(args) self.args = args if !args.tieWordEmbeddings { self._lmHead.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: false) } } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { let out = model(inputs, cache: cache) if args.tieWordEmbeddings { return model.embedTokens.asLinear(out) } else if let lmHead { return lmHead(out) } else { fatalError( "Model configuration error: Neither tied embeddings nor lm_head is available") } } } struct RopeScalingWithFactorArrays: Codable { let longFactor: [Float]? let shortFactor: [Float]? let factor: Float? let type: String? let longMScale: Float? let shortMScale: Float? enum CodingKeys: String, CodingKey { case type case factor case longFactor = "long_factor" case shortFactor = "short_factor" case longMScale = "long_mscale" case shortMScale = "short_mscale" } } public struct Phi3Configuration: Codable, Sendable { var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var rmsNormEps: Float var vocabularySize: Int var kvHeads: Int var ropeTheta: Float = 10_000 var ropeTraditional: Bool = false var ropeScaling: RopeScalingWithFactorArrays? var partialRotaryFactor: Float = 1.0 var maxPositionEmbeddings: Int var originalMaxPositionEmbeddings: Int var tieWordEmbeddings: Bool = false enum CodingKeys: String, CodingKey { case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case rmsNormEps = "rms_norm_eps" case vocabularySize = "vocab_size" case kvHeads = "num_key_value_heads" case ropeTheta = "rope_theta" case ropeTraditional = "rope_traditional" case ropeScaling = "rope_scaling" case partialRotaryFactor = "partial_rotary_factor" case maxPositionEmbeddings = "max_position_embeddings" case originalMaxPositionEmbeddings = "original_max_position_embeddings" case tieWordEmbeddings = "tie_word_embeddings" } public init(from decoder: Decoder) throws { // custom implementation to handle optional keys with required values let container: KeyedDecodingContainer<Phi3Configuration.CodingKeys> = try decoder.container( keyedBy: Phi3Configuration.CodingKeys.self) hiddenSize = try container.decode(Int.self, forKey: Phi3Configuration.CodingKeys.hiddenSize) hiddenLayers = try container.decode( Int.self, forKey: Phi3Configuration.CodingKeys.hiddenLayers) intermediateSize = try container.decode( Int.self, forKey: Phi3Configuration.CodingKeys.intermediateSize) attentionHeads = try container.decode( Int.self, forKey: Phi3Configuration.CodingKeys.attentionHeads) rmsNormEps = try container.decode( Float.self, forKey: Phi3Configuration.CodingKeys.rmsNormEps) vocabularySize = try container.decode( Int.self, forKey: Phi3Configuration.CodingKeys.vocabularySize) kvHeads = try container.decode(Int.self, forKey: Phi3Configuration.CodingKeys.kvHeads) ropeTheta = try container.decodeIfPresent( Float.self, forKey: Phi3Configuration.CodingKeys.ropeTheta) ?? 10_000 ropeTraditional = try container.decodeIfPresent( Bool.self, forKey: Phi3Configuration.CodingKeys.ropeTraditional) ?? false ropeScaling = try container.decodeIfPresent( RopeScalingWithFactorArrays.self, forKey: .ropeScaling) partialRotaryFactor = try container.decodeIfPresent( Float.self, forKey: .partialRotaryFactor) ?? 1.0 maxPositionEmbeddings = try container.decode(Int.self, forKey: .maxPositionEmbeddings) originalMaxPositionEmbeddings = try container.decode( Int.self, forKey: .originalMaxPositionEmbeddings) tieWordEmbeddings = try container.decodeIfPresent( Bool.self, forKey: .tieWordEmbeddings) ?? false } } // MARK: - LoRA extension Phi3Model: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.attention, ["qkv_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
PhiMoE
# Copyright © 2024 Apple Inc. import math from dataclasses import dataclass from typing import Dict, List, Optional, Union import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention from .rope_utils import SuScaledRoPE from .switch_layers import SwitchGLU @dataclass class ModelArgs(BaseModelArgs): model_type: str = "phimoe" vocab_size: int = 32064 hidden_size: int = 4096 intermediate_size: int = 6400 num_hidden_layers: int = 32 num_attention_heads: int = 32 num_key_value_heads: int = 8 max_position_embeddings: int = 131072 original_max_position_embeddings: int = 4096 rms_norm_eps: float = 1e-6 rope_scaling: Dict[str, Union[float, List[float]]] = None num_local_experts: int = 16 num_experts_per_tok: int = 2 rope_theta: float = 10000.0 class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads self.n_kv_heads = n_kv_heads = args.num_key_value_heads head_dim = args.hidden_size // n_heads self.scale = head_dim**-0.5 self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=True) self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=True) self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=True) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=True) self.rope = SuScaledRoPE( head_dim, base=args.rope_theta, max_position_embeddings=args.max_position_embeddings, original_max_position_embeddings=args.original_max_position_embeddings, short_factor=args.rope_scaling["short_factor"], long_factor=args.rope_scaling["long_factor"], short_mscale=args.rope_scaling["short_mscale"], long_mscale=args.rope_scaling["long_mscale"], ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache=None, ) -> mx.array: B, L, D = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) # Prepare the queries, keys and values for the attention computation queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class PhiMoESparseMoeBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.hidden_dim = args.hidden_size self.ffn_dim = args.intermediate_size self.num_experts = args.num_local_experts self.top_k = args.num_experts_per_tok self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False) self.switch_mlp = SwitchGLU(self.hidden_dim, self.ffn_dim, self.num_experts) def __call__(self, x: mx.array) -> mx.array: gates = self.gate(x) k = self.top_k inds = mx.stop_gradient(mx.argpartition(-gates, kth=k - 1, axis=-1)[..., :k]) scores = mx.take_along_axis(gates, inds, axis=-1) scores = mx.softmax(scores, axis=-1, precise=True) y = self.switch_mlp(x, inds) y = (y * scores[..., None]).sum(axis=-2) return y class PhiMoEDecoderLayer(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.hidden_size = args.hidden_size self.self_attn = Attention(args) self.block_sparse_moe = PhiMoESparseMoeBlock(args) self.input_layernorm = nn.LayerNorm(args.hidden_size, eps=args.rms_norm_eps) self.post_attention_layernorm = nn.LayerNorm( args.hidden_size, eps=args.rms_norm_eps ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache=None, ) -> mx.array: residual = x hidden_states = self.input_layernorm(x) hidden_states = self.self_attn(hidden_states, mask=mask, cache=cache) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.block_sparse_moe(hidden_states) hidden_states = residual + hidden_states return hidden_states class PhiMoEModel(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [PhiMoEDecoderLayer(args) for _ in range(args.num_hidden_layers)] self.norm = nn.LayerNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ) -> mx.array: h = self.embed_tokens(inputs) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.model_type = args.model_type self.args = args self.model = PhiMoEModel(args) self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=True) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): out = self.model(inputs, mask, cache) return self.lm_head(out) def sanitize(self, weights): if "model.layers.0.block_sparse_moe.experts.0.w1.weight" not in weights: return weights for l in range(self.args.num_hidden_layers): prefix = f"model.layers.{l}" for n, m in [("w1", "gate_proj"), ("w2", "down_proj"), ("w3", "up_proj")]: for k in ["weight", "scales", "biases"]: if f"{prefix}.block_sparse_moe.experts.0.{n}.{k}" in weights: to_join = [ weights.pop( f"{prefix}.block_sparse_moe.experts.{e}.{n}.{k}" ) for e in range(self.args.num_local_experts) ] weights[f"{prefix}.block_sparse_moe.switch_mlp.{m}.{k}"] = ( mx.stack(to_join) ) return weights @property def layers(self): return self.model.layers
import Foundation import MLX import MLXLMCommon import MLXNN // Port of https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/models/phimoe.py public struct PhiMoEConfiguration: Codable, Sendable { var modelType: String = "phimoe" var vocabularySize: Int = 32064 var hiddenSize: Int = 4096 var intermediateSize: Int = 6400 var hiddenLayers: Int = 32 var attentionHeads: Int = 32 var kvHeads: Int = 8 var maxPositionEmbeddings: Int = 131072 var originalMaxPositionEmbeddings: Int = 4096 var rmsNormEps: Float = 1e-6 var ropeScaling: RopeScalingWithFactorArrays? var numLocalExperts: Int = 16 var numExpertsPerToken: Int = 2 var ropeTheta: Float = 10000.0 enum CodingKeys: String, CodingKey { case modelType = "model_type" case vocabularySize = "vocab_size" case hiddenSize = "hidden_size" case intermediateSize = "intermediate_size" case hiddenLayers = "num_hidden_layers" case attentionHeads = "num_attention_heads" case kvHeads = "num_key_value_heads" case maxPositionEmbeddings = "max_position_embeddings" case originalMaxPositionEmbeddings = "original_max_position_embeddings" case rmsNormEps = "rms_norm_eps" case ropeScaling = "rope_scaling" case numLocalExperts = "num_local_experts" case numExpertsPerToken = "num_experts_per_tok" case ropeTheta = "rope_theta" } } private class Attention: Module { let args: PhiMoEConfiguration let scale: Float @ModuleInfo(key: "q_proj") var wq: Linear @ModuleInfo(key: "k_proj") var wk: Linear @ModuleInfo(key: "v_proj") var wv: Linear @ModuleInfo(key: "o_proj") var wo: Linear let rope: SuScaledRotaryEmbedding init(_ args: PhiMoEConfiguration) { self.args = args let dim = args.hiddenSize let heads = args.attentionHeads let kvHeads = args.kvHeads let headDim = args.hiddenSize / heads self.scale = pow(Float(headDim), -0.5) self._wq.wrappedValue = Linear(dim, heads * headDim, bias: true) self._wk.wrappedValue = Linear(dim, kvHeads * headDim, bias: true) self._wv.wrappedValue = Linear(dim, kvHeads * headDim, bias: true) self._wo.wrappedValue = Linear(heads * headDim, dim, bias: true) self.rope = SuScaledRotaryEmbedding( dimensions: headDim, base: args.ropeTheta, maxPositionEmbeddings: args.maxPositionEmbeddings, originalMaxPositionEmbeddings: args.originalMaxPositionEmbeddings, longFactor: args.ropeScaling?.longFactor as? [Float] ?? [1.0], longMScale: args.ropeScaling?.longMScale as? Float ) } func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L, _) = (x.dim(0), x.dim(1), x.dim(2)) let queries = wq(x) let keys = wk(x) let values = wv(x) // Prepare the queries, keys and values for the attention computation var q = queries.reshaped(B, L, args.attentionHeads, -1).transposed(0, 2, 1, 3) var k = keys.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) var v = values.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) if let cache { q = rope(q, offset: cache.offset) k = rope(k, offset: cache.offset) } else { q = rope(q) k = rope(k) } let output = attentionWithCacheUpdate( queries: q, keys: k, values: v, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return wo(output) } } private class PhiMoESparseMoeBlock: Module { let hiddenDim: Int let ffnDim: Int let numExperts: Int let topK: Int @ModuleInfo(key: "gate") var gate: Linear @ModuleInfo(key: "switch_mlp") var switchMLP: SwitchGLU init(_ args: PhiMoEConfiguration) { self.hiddenDim = args.hiddenSize self.ffnDim = args.intermediateSize self.numExperts = args.numLocalExperts self.topK = args.numExpertsPerToken self._gate.wrappedValue = Linear(hiddenDim, numExperts, bias: false) self._switchMLP.wrappedValue = SwitchGLU( inputDims: hiddenDim, hiddenDims: ffnDim, numExperts: numExperts) } func callAsFunction(_ x: MLXArray) -> MLXArray { let gates = gate(x) let k = self.topK let inds = MLX.stopGradient( MLX.argPartition( -gates, kth: k - 1, axis: -1 )[.ellipsis, ..<k]) let scores = MLX.softmax(MLX.takeAlong(gates, inds, axis: -1), axis: -1, precise: true) let y = switchMLP(x, inds) return (y * scores[.ellipsis, .newAxis]).sum(axis: -2) } } private class PhiMoEDecoderLayer: Module { let hiddenSize: Int @ModuleInfo(key: "self_attn") var selfAttn: Attention @ModuleInfo(key: "block_sparse_moe") var blockSparseMoe: PhiMoESparseMoeBlock @ModuleInfo(key: "input_layernorm") var inputLayerNorm: LayerNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: LayerNorm init(_ args: PhiMoEConfiguration) { self.hiddenSize = args.hiddenSize self._selfAttn.wrappedValue = Attention(args) self._blockSparseMoe.wrappedValue = PhiMoESparseMoeBlock(args) self._inputLayerNorm.wrappedValue = LayerNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) self._postAttentionLayerNorm.wrappedValue = LayerNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) } func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var residual = x var hiddenStates = inputLayerNorm(x) hiddenStates = selfAttn(hiddenStates, mask: mask, cache: cache) hiddenStates = residual + hiddenStates residual = hiddenStates hiddenStates = postAttentionLayerNorm(hiddenStates) hiddenStates = blockSparseMoe(hiddenStates) hiddenStates = residual + hiddenStates return hiddenStates } } private class PhiMoEModelInner: Module { let args: PhiMoEConfiguration @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding let layers: [PhiMoEDecoderLayer] @ModuleInfo(key: "norm") var norm: LayerNorm init(_ args: PhiMoEConfiguration) { self.args = args self._embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers).map { _ in PhiMoEDecoderLayer(args) } self._norm.wrappedValue = LayerNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) } func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { var h = embedTokens(inputs) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } public class PhiMoEModel: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] fileprivate let model: PhiMoEModelInner @ModuleInfo(key: "lm_head") var lmHead: Linear public init(_ args: PhiMoEConfiguration) { self.vocabularySize = args.vocabularySize self.kvHeads = Array(repeating: args.kvHeads, count: args.hiddenLayers) self.model = PhiMoEModelInner(args) self._lmHead.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: true) } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { let out = model(inputs, cache: cache) return lmHead(out) } public func sanitize(weights: [String: MLXArray]) -> [String: MLXArray] { var sanitizedWeights = weights if sanitizedWeights["model.layers.0.block_sparse_moe.experts.0.w1.weight"] == nil { return sanitizedWeights } for l in 0 ..< model.args.hiddenLayers { let prefix = "model.layers.\(l)" for (n, m) in [("w1", "gate_proj"), ("w2", "down_proj"), ("w3", "up_proj")] { for k in ["weight", "scales", "biases"] { if sanitizedWeights["\(prefix).block_sparse_moe.experts.0.\(n).\(k)"] != nil { let toJoin = (0 ..< model.args.numLocalExperts).map { e in sanitizedWeights.removeValue( forKey: "\(prefix).block_sparse_moe.experts.\(e).\(n).\(k)")! } sanitizedWeights["\(prefix).block_sparse_moe.switch_mlp.\(m).\(k)"] = MLX.stacked(toJoin) } } } } return sanitizedWeights } } // MARK: - LoRA extension PhiMoEModel: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.selfAttn, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
Qwen2
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from typing import Any, Dict, Optional, Union import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention from .rope_utils import initialize_rope @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int rms_norm_eps: float vocab_size: int num_key_value_heads: int max_position_embeddings: int = 32768 rope_theta: float = 1000000 rope_traditional: bool = False rope_scaling: Optional[Dict[str, Union[float, str]]] = None tie_word_embeddings: bool = True class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads assert args.num_key_value_heads is not None self.n_kv_heads = n_kv_heads = args.num_key_value_heads head_dim = args.hidden_size // n_heads self.scale = head_dim**-0.5 self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=True) self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=True) self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=True) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=False) self.rope = initialize_rope( head_dim, base=args.rope_theta, traditional=args.rope_traditional, scaling_config=args.rope_scaling, max_position_embeddings=args.max_position_embeddings, ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) # Prepare the queries, keys and values for the attention computation queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class MLP(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.gate_proj = nn.Linear(dim, hidden_dim, bias=False) self.down_proj = nn.Linear(hidden_dim, dim, bias=False) self.up_proj = nn.Linear(dim, hidden_dim, bias=False) def __call__(self, x) -> mx.array: return self.down_proj(nn.silu(self.gate_proj(x)) * self.up_proj(x)) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.num_attention_heads = args.num_attention_heads self.hidden_size = args.hidden_size self.self_attn = Attention(args) self.mlp = MLP(args.hidden_size, args.intermediate_size) self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.post_attention_layernorm = nn.RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) self.args = args def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.self_attn(self.input_layernorm(x), mask, cache) h = x + r r = self.mlp(self.post_attention_layernorm(h)) out = h + r return out class Qwen2Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_hidden_layers = args.num_hidden_layers assert self.vocab_size > 0 self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ TransformerBlock(args=args) for _ in range(args.num_hidden_layers) ] self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, input_embeddings: Optional[mx.array] = None, ): if input_embeddings is not None: h = input_embeddings else: h = self.embed_tokens(inputs) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.model_type = args.model_type self.model = Qwen2Model(args) if not args.tie_word_embeddings: self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, input_embeddings: Optional[mx.array] = None, ): out = self.model(inputs, mask, cache, input_embeddings) if self.args.tie_word_embeddings: out = self.model.embed_tokens.as_linear(out) else: out = self.lm_head(out) return out def sanitize(self, weights): if self.args.tie_word_embeddings: weights.pop("lm_head.weight", None) # Remove unused precomputed rotary freqs return { k: v for k, v in weights.items() if "self_attn.rotary_emb.inv_freq" not in k } @property def layers(self): return self.model.layers
// // Qwen2.swift // LLM // // Created by John Mai on 2024/3/3. // import Foundation import MLX import MLXLMCommon import MLXNN // port of https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/models/qwen2.py private class Attention: Module { let args: Qwen2Configuration let scale: Float @ModuleInfo(key: "q_proj") var wq: Linear @ModuleInfo(key: "k_proj") var wk: Linear @ModuleInfo(key: "v_proj") var wv: Linear @ModuleInfo(key: "o_proj") var wo: Linear let rope: RoPE public init(_ args: Qwen2Configuration) { self.args = args let dim = args.hiddenSize let heads = args.attentionHeads let kvHeads = args.kvHeads let headDim = args.hiddenSize / heads self.scale = pow(Float(headDim), -0.5) _wq.wrappedValue = Linear(dim, heads * headDim, bias: true) _wk.wrappedValue = Linear(dim, kvHeads * headDim, bias: true) _wv.wrappedValue = Linear(dim, kvHeads * headDim, bias: true) _wo.wrappedValue = Linear(heads * headDim, dim, bias: false) let ropeScale: Float if let ropeScaling = args.ropeScaling, ropeScaling["type"] == .string("linear"), let factor = ropeScaling["factor"] { if let v = factor.asFloat() { ropeScale = 1 / v } else { fatalError("ropeScaling.factor must be a float") } } else { ropeScale = 1 } self.rope = RoPE( dimensions: headDim, traditional: args.ropeTraditional, base: args.ropeTheta, scale: ropeScale) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var queries = wq(x) var keys = wk(x) var values = wv(x) // prepare the queries, keys and values for the attention computation queries = queries.reshaped(B, L, args.attentionHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return wo(output) } } private class MLP: Module, UnaryLayer { @ModuleInfo(key: "gate_proj") var gate: Linear @ModuleInfo(key: "down_proj") var down: Linear @ModuleInfo(key: "up_proj") var up: Linear public init(dimensions: Int, hiddenDimensions: Int) { _gate.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) _down.wrappedValue = Linear(hiddenDimensions, dimensions, bias: false) _up.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) } public func callAsFunction(_ x: MLXArray) -> MLXArray { down(silu(gate(x)) * up(x)) } } private class TransformerBlock: Module { @ModuleInfo(key: "self_attn") var attention: Attention let mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: RMSNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: RMSNorm public init(_ args: Qwen2Configuration) { _attention.wrappedValue = Attention(args) self.mlp = MLP(dimensions: args.hiddenSize, hiddenDimensions: args.intermediateSize) _inputLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) _postAttentionLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var r = attention(inputLayerNorm(x), mask: mask, cache: cache) let h = x + r r = mlp(postAttentionLayerNorm(h)) let out = h + r return out } } private class Qwen2ModelInner: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding fileprivate let layers: [TransformerBlock] let norm: RMSNorm public init(_ args: Qwen2Configuration) { precondition(args.vocabularySize > 0) _embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers) .map { _ in TransformerBlock(args) } self.norm = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = embedTokens(inputs) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } public class Qwen2Model: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] private let model: Qwen2ModelInner let configuration: Qwen2Configuration @ModuleInfo(key: "lm_head") var lmHead: Linear? public init(_ args: Qwen2Configuration) { self.configuration = args self.vocabularySize = args.vocabularySize self.kvHeads = (0 ..< args.hiddenLayers).map { _ in args.kvHeads } self.model = Qwen2ModelInner(args) if !args.tieWordEmbeddings { _lmHead.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: false) } } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { var out = model(inputs, cache: cache) if let lmHead { out = lmHead(out) } else { out = model.embedTokens.asLinear(out) } return out } public func sanitize(weights: [String: MLXArray]) -> [String: MLXArray] { var weights = weights if configuration.tieWordEmbeddings { weights["lm_head.weight"] = nil } // Remove unused precomputed rotary freqs return weights.filter { !$0.key.contains("self_attn.rotary_emb.inv_freq") } } } public struct Qwen2Configuration: Codable, Sendable { var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var rmsNormEps: Float var vocabularySize: Int var kvHeads: Int var ropeTheta: Float = 1_000_000 var ropeTraditional: Bool = false var ropeScaling: [String: StringOrNumber]? = nil var tieWordEmbeddings = false enum CodingKeys: String, CodingKey { case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case rmsNormEps = "rms_norm_eps" case vocabularySize = "vocab_size" case kvHeads = "num_key_value_heads" case ropeTheta = "rope_theta" case ropeTraditional = "rope_traditional" case ropeScaling = "rope_scaling" case tieWordEmbeddings = "tie_word_embeddings" } public init(from decoder: Decoder) throws { // custom implementation to handle optional keys with required values let container: KeyedDecodingContainer<Qwen2Configuration.CodingKeys> = try decoder.container( keyedBy: Qwen2Configuration.CodingKeys.self) self.hiddenSize = try container.decode( Int.self, forKey: Qwen2Configuration.CodingKeys.hiddenSize) self.hiddenLayers = try container.decode( Int.self, forKey: Qwen2Configuration.CodingKeys.hiddenLayers) self.intermediateSize = try container.decode( Int.self, forKey: Qwen2Configuration.CodingKeys.intermediateSize) self.attentionHeads = try container.decode( Int.self, forKey: Qwen2Configuration.CodingKeys.attentionHeads) self.rmsNormEps = try container.decode( Float.self, forKey: Qwen2Configuration.CodingKeys.rmsNormEps) self.vocabularySize = try container.decode( Int.self, forKey: Qwen2Configuration.CodingKeys.vocabularySize) self.kvHeads = try container.decode(Int.self, forKey: Qwen2Configuration.CodingKeys.kvHeads) self.ropeTheta = try container.decodeIfPresent( Float.self, forKey: Qwen2Configuration.CodingKeys.ropeTheta) ?? 1_000_000 self.ropeTraditional = try container.decodeIfPresent( Bool.self, forKey: Qwen2Configuration.CodingKeys.ropeTraditional) ?? false self.ropeScaling = try container.decodeIfPresent( [String: StringOrNumber].self, forKey: Qwen2Configuration.CodingKeys.ropeScaling) self.tieWordEmbeddings = try container.decodeIfPresent(Bool.self, forKey: .tieWordEmbeddings) ?? false } } // MARK: - LoRA extension Qwen2Model: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.attention, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
Qwen3
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from typing import Any, Dict, Optional, Union import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention from .rope_utils import initialize_rope @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int rms_norm_eps: float vocab_size: int num_key_value_heads: int max_position_embeddings: int rope_theta: float head_dim: int tie_word_embeddings: bool rope_scaling: Optional[Dict[str, Union[float, str]]] = None class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads assert args.num_key_value_heads is not None self.n_kv_heads = n_kv_heads = args.num_key_value_heads head_dim = args.head_dim self.scale = head_dim**-0.5 self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=False) self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=False) self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=False) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=False) self.q_norm = nn.RMSNorm(head_dim, eps=args.rms_norm_eps) self.k_norm = nn.RMSNorm(head_dim, eps=args.rms_norm_eps) self.rope = initialize_rope( head_dim, base=args.rope_theta, traditional=False, scaling_config=args.rope_scaling, max_position_embeddings=args.max_position_embeddings, ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) queries = self.q_norm(queries.reshape(B, L, self.n_heads, -1)).transpose( 0, 2, 1, 3 ) keys = self.k_norm(keys.reshape(B, L, self.n_kv_heads, -1)).transpose( 0, 2, 1, 3 ) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class MLP(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.gate_proj = nn.Linear(dim, hidden_dim, bias=False) self.down_proj = nn.Linear(hidden_dim, dim, bias=False) self.up_proj = nn.Linear(dim, hidden_dim, bias=False) def __call__(self, x) -> mx.array: return self.down_proj(nn.silu(self.gate_proj(x)) * self.up_proj(x)) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.num_attention_heads = args.num_attention_heads self.hidden_size = args.hidden_size self.self_attn = Attention(args) self.mlp = MLP(args.hidden_size, args.intermediate_size) self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.post_attention_layernorm = nn.RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) self.args = args def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.self_attn(self.input_layernorm(x), mask, cache) h = x + r r = self.mlp(self.post_attention_layernorm(h)) out = h + r return out class Qwen3Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_hidden_layers = args.num_hidden_layers assert self.vocab_size > 0 self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ TransformerBlock(args=args) for _ in range(args.num_hidden_layers) ] self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): h = self.embed_tokens(inputs) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.model_type = args.model_type self.model = Qwen3Model(args) if not args.tie_word_embeddings: self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): out = self.model(inputs, mask, cache) if self.args.tie_word_embeddings: out = self.model.embed_tokens.as_linear(out) else: out = self.lm_head(out) return out def sanitize(self, weights): if self.args.tie_word_embeddings: weights.pop("lm_head.weight", None) return weights @property def layers(self): return self.model.layers
// // Qwen3.swift // LLM // // Created by John Mai on 2025/4/28. // import Foundation import MLX import MLXLMCommon import MLXNN // port of https://github.com/ml-explore/mlx-lm/blob/main/mlx_lm/models/qwen3.py private class Attention: Module { let args: Qwen3Configuration let scale: Float @ModuleInfo(key: "q_proj") var wq: Linear @ModuleInfo(key: "k_proj") var wk: Linear @ModuleInfo(key: "v_proj") var wv: Linear @ModuleInfo(key: "o_proj") var wo: Linear @ModuleInfo(key: "q_norm") var qNorm: RMSNorm @ModuleInfo(key: "k_norm") var kNorm: RMSNorm let rope: RoPE public init(_ args: Qwen3Configuration) { self.args = args let dim = args.hiddenSize let heads = args.attentionHeads let kvHeads = args.kvHeads let headDim = args.headDim self.scale = pow(Float(headDim), -0.5) _wq.wrappedValue = Linear(dim, heads * headDim, bias: false) _wk.wrappedValue = Linear(dim, kvHeads * headDim, bias: false) _wv.wrappedValue = Linear(dim, kvHeads * headDim, bias: false) _wo.wrappedValue = Linear(heads * headDim, dim, bias: false) _qNorm.wrappedValue = RMSNorm(dimensions: headDim, eps: args.rmsNormEps) _kNorm.wrappedValue = RMSNorm(dimensions: headDim, eps: args.rmsNormEps) let ropeScale: Float if let ropeScaling = args.ropeScaling, ropeScaling["type"] == .string("linear"), let factor = ropeScaling["factor"] { if let v = factor.asFloat() { ropeScale = 1 / v } else { fatalError("ropeScaling.factor must be a float") } } else { ropeScale = 1 } self.rope = RoPE( dimensions: headDim, traditional: false, base: args.ropeTheta, scale: ropeScale) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var queries = wq(x) var keys = wk(x) var values = wv(x) // prepare the queries, keys and values for the attention computation queries = qNorm(queries.reshaped(B, L, args.attentionHeads, -1)).transposed(0, 2, 1, 3) keys = kNorm(keys.reshaped(B, L, args.kvHeads, -1)).transposed(0, 2, 1, 3) values = values.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) // Apply RoPE positioning if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } // Use the automatic attention router that handles both quantized and regular caches let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return wo(output) } } private class MLP: Module, UnaryLayer { @ModuleInfo(key: "gate_proj") var gate: Linear @ModuleInfo(key: "down_proj") var down: Linear @ModuleInfo(key: "up_proj") var up: Linear public init(dimensions: Int, hiddenDimensions: Int) { _gate.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) _down.wrappedValue = Linear(hiddenDimensions, dimensions, bias: false) _up.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) } public func callAsFunction(_ x: MLXArray) -> MLXArray { down(silu(gate(x)) * up(x)) } } private class TransformerBlock: Module { @ModuleInfo(key: "self_attn") var attention: Attention let mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: RMSNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: RMSNorm public init(_ args: Qwen3Configuration) { _attention.wrappedValue = Attention(args) self.mlp = MLP(dimensions: args.hiddenSize, hiddenDimensions: args.intermediateSize) _inputLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) _postAttentionLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var r = attention(inputLayerNorm(x), mask: mask, cache: cache) let h = x + r r = mlp(postAttentionLayerNorm(h)) let out = h + r return out } } private class Qwen3ModelInner: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding fileprivate let layers: [TransformerBlock] let norm: RMSNorm public init(_ args: Qwen3Configuration) { precondition(args.vocabularySize > 0) _embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers) .map { _ in TransformerBlock(args) } self.norm = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = embedTokens(inputs) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } public class Qwen3Model: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] private let model: Qwen3ModelInner let configuration: Qwen3Configuration @ModuleInfo(key: "lm_head") var lmHead: Linear? public init(_ args: Qwen3Configuration) { self.configuration = args self.vocabularySize = args.vocabularySize self.kvHeads = (0 ..< args.hiddenLayers).map { _ in args.kvHeads } self.model = Qwen3ModelInner(args) if !args.tieWordEmbeddings { _lmHead.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: false) } } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { var out = model(inputs, cache: cache) if let lmHead { out = lmHead(out) } else { out = model.embedTokens.asLinear(out) } return out } public func sanitize(weights: [String: MLXArray]) -> [String: MLXArray] { var weights = weights if configuration.tieWordEmbeddings { weights["lm_head.weight"] = nil } return weights } } public struct Qwen3Configuration: Codable, Sendable { var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var rmsNormEps: Float var vocabularySize: Int var kvHeads: Int var ropeTheta: Float = 1_000_000 var headDim: Int var ropeScaling: [String: StringOrNumber]? = nil var tieWordEmbeddings = false var maxPositionEmbeddings: Int = 32768 enum CodingKeys: String, CodingKey { case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case rmsNormEps = "rms_norm_eps" case vocabularySize = "vocab_size" case kvHeads = "num_key_value_heads" case ropeTheta = "rope_theta" case headDim = "head_dim" case ropeScaling = "rope_scaling" case tieWordEmbeddings = "tie_word_embeddings" case maxPositionEmbeddings = "max_position_embeddings" } public init(from decoder: Decoder) throws { // custom implementation to handle optional keys with required values let container: KeyedDecodingContainer<Qwen3Configuration.CodingKeys> = try decoder.container( keyedBy: Qwen3Configuration.CodingKeys.self) self.hiddenSize = try container.decode( Int.self, forKey: Qwen3Configuration.CodingKeys.hiddenSize) self.hiddenLayers = try container.decode( Int.self, forKey: Qwen3Configuration.CodingKeys.hiddenLayers) self.intermediateSize = try container.decode( Int.self, forKey: Qwen3Configuration.CodingKeys.intermediateSize) self.attentionHeads = try container.decode( Int.self, forKey: Qwen3Configuration.CodingKeys.attentionHeads) self.rmsNormEps = try container.decode( Float.self, forKey: Qwen3Configuration.CodingKeys.rmsNormEps) self.vocabularySize = try container.decode( Int.self, forKey: Qwen3Configuration.CodingKeys.vocabularySize) self.kvHeads = try container.decode(Int.self, forKey: Qwen3Configuration.CodingKeys.kvHeads) self.ropeTheta = try container.decodeIfPresent( Float.self, forKey: Qwen3Configuration.CodingKeys.ropeTheta) ?? 1_000_000 self.headDim = try container.decode( Int.self, forKey: Qwen3Configuration.CodingKeys.headDim) self.ropeScaling = try container.decodeIfPresent( [String: StringOrNumber].self, forKey: Qwen3Configuration.CodingKeys.ropeScaling) self.tieWordEmbeddings = try container.decodeIfPresent(Bool.self, forKey: .tieWordEmbeddings) ?? false self.maxPositionEmbeddings = try container.decodeIfPresent(Int.self, forKey: .maxPositionEmbeddings) ?? 32768 } } // MARK: - LoRA extension Qwen3Model: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.attention, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
Qwen3MoE
# Copyright © 2025 Apple Inc. from dataclasses import dataclass from typing import Any, Dict, List, Optional, Union import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention from .switch_layers import SwitchGLU @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int num_experts: int num_experts_per_tok: int decoder_sparse_step: int mlp_only_layers: List[int] moe_intermediate_size: int rms_norm_eps: float vocab_size: int num_key_value_heads: int head_dim: int rope_theta: float tie_word_embeddings: bool max_position_embeddings: int norm_topk_prob: bool rope_scaling: Optional[Dict[str, Union[float, str]]] = None class Attention(nn.Module): def __init__(self, args: ModelArgs, layer_idx: int): super().__init__() dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads assert args.num_key_value_heads is not None self.n_kv_heads = n_kv_heads = args.num_key_value_heads head_dim = getattr( args, "head_dim", args.hidden_size // args.num_attention_heads ) self.scale = head_dim**-0.5 self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=False) self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=False) self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=False) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=False) self.q_norm = nn.RMSNorm(head_dim, eps=args.rms_norm_eps) self.k_norm = nn.RMSNorm(head_dim, eps=args.rms_norm_eps) self.rope = nn.RoPE( head_dim, traditional=False, base=args.rope_theta, ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) # Prepare the queries, keys and values for the attention computation queries = self.q_norm(queries.reshape(B, L, self.n_heads, -1)).transpose( 0, 2, 1, 3 ) keys = self.k_norm(keys.reshape(B, L, self.n_kv_heads, -1)).transpose( 0, 2, 1, 3 ) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class MLP(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.gate_proj = nn.Linear(dim, hidden_dim, bias=False) self.down_proj = nn.Linear(hidden_dim, dim, bias=False) self.up_proj = nn.Linear(dim, hidden_dim, bias=False) def __call__(self, x) -> mx.array: return self.down_proj(nn.silu(self.gate_proj(x)) * self.up_proj(x)) class Qwen3MoeSparseMoeBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() dim = args.hidden_size intermediate_size = args.moe_intermediate_size self.num_experts = num_experts = args.num_experts self.top_k = args.num_experts_per_tok self.norm_topk_prob = args.norm_topk_prob self.gate = nn.Linear(dim, num_experts, bias=False) self.switch_mlp = SwitchGLU(dim, intermediate_size, num_experts) def __call__( self, x: mx.array, ): gates = self.gate(x) gates = mx.softmax(gates, axis=-1, precise=True) k = self.top_k inds = mx.stop_gradient(mx.argpartition(-gates, kth=k - 1, axis=-1)[..., :k]) scores = mx.take_along_axis(gates, inds, axis=-1) if self.norm_topk_prob: scores /= mx.sum(scores, axis=-1, keepdims=True) y = self.switch_mlp(x, inds) y = (y * scores[..., None]).sum(axis=-2) return y class Qwen3MoeDecoderLayer(nn.Module): def __init__(self, args: ModelArgs, layer_idx: int): super().__init__() self.hidden_size = args.hidden_size self.self_attn = Attention(args, layer_idx) self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) self.post_attention_layernorm = nn.RMSNorm( args.hidden_size, eps=args.rms_norm_eps ) self.args = args if (layer_idx not in args.mlp_only_layers) and ( args.num_experts > 0 and (layer_idx + 1) % args.decoder_sparse_step == 0 ): self.mlp = Qwen3MoeSparseMoeBlock(args) else: self.mlp = MLP(args.hidden_size, args.intermediate_size) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.self_attn(self.input_layernorm(x), mask, cache) h = x + r r = self.mlp(self.post_attention_layernorm(h)) out = h + r return out class Qwen3MoeModel(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_hidden_layers = args.num_hidden_layers assert self.vocab_size > 0 self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ Qwen3MoeDecoderLayer(args=args, layer_idx=i) for i in range(args.num_hidden_layers) ] self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): h = self.embed_tokens(inputs) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.model_type = args.model_type self.model = Qwen3MoeModel(args) self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): out = self.model(inputs, mask, cache) return self.lm_head(out) def sanitize(self, weights): if "model.layers.0.mlp.experts.0.up_proj.weight" not in weights: return weights for l in range(self.args.num_hidden_layers): prefix = f"model.layers.{l}" for n in ["up_proj", "down_proj", "gate_proj"]: if f"{prefix}.mlp.experts.0.{n}.weight" in weights: to_join = [ weights.pop(f"{prefix}.mlp.experts.{e}.{n}.weight") for e in range(self.args.num_experts) ] weights[f"{prefix}.mlp.switch_mlp.{n}.weight"] = mx.stack(to_join) return weights @property def layers(self): return self.model.layers import inspect from dataclasses import dataclass from functools import partial from typing import Any, Dict, List, Optional, Union import mlx.core as mx import mlx.nn as nn from ..base import ( LanguageModelOutput, create_attention_mask, scaled_dot_product_attention, ) from ..cache import KVCache, RotatingKVCache @dataclass class TextConfig: model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int = 8 head_dim: int = 256 rms_norm_eps: float = 1.0e-6 vocab_size: int = 262208 num_key_value_heads: int = 4 rope_global_base_freq: float = 1_000_000.0 rope_local_base_freq: float = 10_000.0 rope_traditional: bool = False query_pre_attn_scalar: float = 256 sliding_window: int = 1024 rope_scaling: Optional[Dict[str, Union[float, List[float]]]] = None mm_tokens_per_image: int = 256 sliding_window_pattern: int = 6 max_position_embeddings: int = 4096 @classmethod def from_dict(cls, params): return cls( **{ k: v for k, v in params.items() if k in inspect.signature(cls).parameters } ) class RMSNorm(nn.Module): def __init__(self, dims: int, eps: float = 1e-5): super().__init__() self.weight = mx.ones((dims,)) self.eps = eps def __call__(self, x): return mx.fast.rms_norm(x, 1.0 + self.weight, self.eps) class Attention(nn.Module): def __init__(self, config: TextConfig, layer_idx: int): super().__init__() dim = config.hidden_size self.n_heads = n_heads = config.num_attention_heads self.n_kv_heads = n_kv_heads = config.num_key_value_heads self.repeats = n_heads // n_kv_heads self.head_dim = head_dim = config.head_dim self.layer_idx = layer_idx self.scale = config.query_pre_attn_scalar**-0.5 self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=False) self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=False) self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=False) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=False) self.q_norm = RMSNorm(dims=head_dim, eps=config.rms_norm_eps) self.k_norm = RMSNorm(dims=head_dim, eps=config.rms_norm_eps) self.is_sliding = (layer_idx + 1) % config.sliding_window_pattern != 0 self.rope = nn.RoPE( head_dim, traditional=config.rope_traditional, base=( config.rope_local_base_freq if self.is_sliding else config.rope_global_base_freq ), ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, _ = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) queries = self.q_norm(queries) keys = self.k_norm(keys) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) # Sliding window if mask is not None and isinstance(mask, mx.array): if mask.shape[-1] != keys.shape[-2]: mask = mask[..., -keys.shape[-2] :] output = scaled_dot_product_attention( queries, keys, values, cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class MLP(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.gate_proj = nn.Linear(dim, hidden_dim, bias=False) self.down_proj = nn.Linear(hidden_dim, dim, bias=False) self.up_proj = nn.Linear(dim, hidden_dim, bias=False) def __call__(self, x) -> mx.array: # This should not be GELU approx, jax.nn.gelu return self.down_proj(nn.gelu_approx(self.gate_proj(x)) * self.up_proj(x)) @partial(mx.compile, shapeless=True) def clip_residual(x, y=None): bound = mx.finfo(mx.float16).max if y is None: if x.dtype == mx.float16: return mx.clip(x.astype(mx.float32), -bound, bound).astype(mx.float16) else: return x if x.dtype != mx.float16: return x + y return mx.clip(x.astype(mx.float32) + y.astype(mx.float32), -bound, bound).astype( mx.float16 ) class TransformerBlock(nn.Module): def __init__(self, config: TextConfig, layer_idx: int): super().__init__() self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.self_attn = Attention(config, layer_idx) self.mlp = MLP(config.hidden_size, config.intermediate_size) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm( config.hidden_size, eps=config.rms_norm_eps ) self.pre_feedforward_layernorm = RMSNorm( config.hidden_size, eps=config.rms_norm_eps ) self.post_feedforward_layernorm = RMSNorm( config.hidden_size, eps=config.rms_norm_eps ) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: # Clip the input to avoid overflow in float16 # Float16 has a max value of 65504. When values exceed this limit, they become inf. # Example: If x contains 70000.0 in float16, it becomes inf, causing gradient issues. # We upcast to float32 for operations that might exceed the limit, then clip and # convert back to float16 to maintain numerical stability. # Clip input to avoid overflow in float16 x = clip_residual(x) # Self-attention block r = self.self_attn(self.input_layernorm(x), mask, cache) h = self.post_attention_layernorm(r) # Add residual connection with overflow protection for float16 h = clip_residual(x + h) # MLP block r = self.mlp(self.pre_feedforward_layernorm(h)) out = self.post_feedforward_layernorm(r) # Add residual connection with overflow protection for float16 out = clip_residual(h + out) return out class Gemma3Model(nn.Module): def __init__(self, config: TextConfig): super().__init__() self.config = config self.vocab_size = config.vocab_size self.num_hidden_layers = config.num_hidden_layers assert self.vocab_size > 0 self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size) self.layers = [ TransformerBlock(config=config, layer_idx=layer_idx) for layer_idx in range(config.num_hidden_layers) ] self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) def __call__( self, inputs: mx.array, inputs_embeds: mx.array = None, mask: mx.array = None, cache=None, ): if inputs_embeds is None: h = self.embed_tokens(inputs) else: h = inputs_embeds h *= mx.array(self.config.hidden_size**0.5, mx.bfloat16).astype(h.dtype) if cache is None: cache = [None] * len(self.layers) if mask is None: j = self.config.sliding_window_pattern full_mask = create_attention_mask(h, cache[j - 1 : j]) sliding_window_mask = create_attention_mask(h, cache) for i, (layer, c) in enumerate(zip(self.layers, cache)): is_global = ( i % self.config.sliding_window_pattern == self.config.sliding_window_pattern - 1 ) local_mask = mask if mask is None and is_global: local_mask = full_mask elif mask is None: local_mask = sliding_window_mask h = layer(h, local_mask, c) return self.norm(h) class LanguageModel(nn.Module): def __init__(self, config: TextConfig): super().__init__() self.config = config self.model_type = config.model_type self.model = Gemma3Model(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) def __call__( self, inputs: mx.array, inputs_embeds: Optional[mx.array] = None, mask: Optional[mx.array] = None, cache=None, ): out = self.model(inputs, inputs_embeds=inputs_embeds, mask=mask, cache=cache) out = self.lm_head(out) return LanguageModelOutput(logits=out) def sanitize(self, weights): if "lm_head.weight" not in weights: weights["language_model.lm_head.weight"] = weights[ "language_model.model.embed_tokens.weight" ] return { k: v for k, v in weights.items() if "self_attn.rotary_emb.inv_freq" not in k } @property def layers(self): return self.model.layers @property def head_dim(self): return self.config.head_dim @property def n_kv_heads(self): return self.config.num_key_value_heads def make_cache(self): caches = [] for i in range(self.config.num_hidden_layers): if ( i % self.config.sliding_window_pattern == self.config.sliding_window_pattern - 1 ): caches.append(KVCache()) else: caches.append( RotatingKVCache( max_size=self.config.sliding_window, keep=0, ) ) return caches import inspect from dataclasses import dataclass from typing import Optional import mlx.core as mx import mlx.nn as nn import numpy as np @dataclass class VisionConfig: model_type: str num_hidden_layers: int hidden_size: int intermediate_size: int num_attention_heads: int patch_size: int image_size: int = 224 num_channels: int = 3 layer_norm_eps: float = 1e-6 @classmethod def from_dict(cls, params): return cls( **{ k: v for k, v in params.items() if k in inspect.signature(cls).parameters } ) def check_array_shape(arr): shape = arr.shape # Check if the shape has 4 dimensions if len(shape) != 4: return False out_channels, kH, KW, _ = shape # Check if out_channels is the largest, and kH and KW are the same if (out_channels >= kH) and (out_channels >= KW) and (kH == KW): return True else: return False class Attention(nn.Module): def __init__( self, dims: int, num_heads: int, query_input_dims: Optional[int] = None, key_input_dims: Optional[int] = None, value_input_dims: Optional[int] = None, value_dims: Optional[int] = None, value_output_dims: Optional[int] = None, bias: bool = True, ): super().__init__() if (dims % num_heads) != 0: raise ValueError( "The input feature dimensions should be divisible by the " f"number of heads ({dims} % {num_heads}) != 0" ) query_input_dims = query_input_dims or dims key_input_dims = key_input_dims or dims value_input_dims = value_input_dims or key_input_dims value_dims = value_dims or dims value_output_dims = value_output_dims or dims self.num_heads = num_heads head_dim = dims // num_heads self.scale = head_dim**-0.5 self.q_proj = nn.Linear(query_input_dims, dims, bias=bias) self.k_proj = nn.Linear(key_input_dims, dims, bias=bias) self.v_proj = nn.Linear(value_input_dims, value_dims, bias=bias) self.out_proj = nn.Linear(value_dims, value_output_dims, bias=bias) def __call__(self, x, mask=None): queries = self.q_proj(x) keys = self.k_proj(x) values = self.v_proj(x) num_heads = self.num_heads B, L, D = queries.shape _, S, _ = keys.shape queries = queries.reshape(B, L, num_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, S, num_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, S, num_heads, -1).transpose(0, 2, 1, 3) output = mx.fast.scaled_dot_product_attention( queries, keys, values, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.out_proj(output) class MLP(nn.Module): def __init__(self, config: VisionConfig): super().__init__() self.activation_fn = nn.GELU(approx="precise") self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=True) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size, bias=True) def __call__(self, x: mx.array) -> mx.array: x = self.fc1(x) x = self.activation_fn(x) x = self.fc2(x) return x class EncoderLayer(nn.Module): def __init__(self, config: VisionConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = Attention( config.hidden_size, config.num_attention_heads, bias=True ) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = MLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def __call__(self, x: mx.array, mask: Optional[mx.array] = None) -> mx.array: r = self.self_attn(self.layer_norm1(x), mask) h = x + r r = self.mlp(self.layer_norm2(h)) return h + r class Encoder(nn.Module): def __init__(self, config: VisionConfig): super().__init__() self.layers = [EncoderLayer(config) for _ in range(config.num_hidden_layers)] def __call__( self, x: mx.array, output_hidden_states: Optional[bool] = None, mask: Optional[mx.array] = None, ) -> mx.array: encoder_states = (x,) if output_hidden_states else None h = x for l in self.layers: x = l(x, mask=mask) if output_hidden_states: encoder_states = encoder_states + (x,) h = x return (h, encoder_states) class VisionEmbeddings(nn.Module): def __init__(self, config: VisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) def __call__(self, x: mx.array) -> mx.array: patch_embeddings = self.patch_embedding(x) patch_embeddings = mx.flatten(patch_embeddings, start_axis=1, end_axis=2) position_ids = mx.array(np.arange(self.num_positions)[None, :]) embeddings = patch_embeddings embeddings += self.position_embedding(position_ids) return embeddings class SigLipVisionModel(nn.Module): def __init__(self, config: VisionConfig): super().__init__() self.embeddings = VisionEmbeddings(config) self.encoder = Encoder(config) self.post_layernorm = nn.LayerNorm(config.hidden_size) def __call__( self, x: mx.array, output_hidden_states: Optional[bool] = None, ) -> mx.array: x = self.embeddings(x) encoder_outputs = self.encoder( x=x, output_hidden_states=output_hidden_states, mask=None ) pooler_output = self.post_layernorm(encoder_outputs[0]) return pooler_output, x, encoder_outputs[-1] class VisionModel(nn.Module): def __init__(self, config: VisionConfig): super().__init__() self.model_type = config.model_type if self.model_type not in ["siglip_vision_model", "gemma3", "gemma3_vision"]: raise ValueError(f"Unsupported model type: {self.model_type}") self.vision_model = SigLipVisionModel(config) def __call__( self, x: mx.array, output_hidden_states: Optional[bool] = None ) -> mx.array: return self.vision_model(x, output_hidden_states) def sanitize(self, weights): sanitized_weights = {} for k, v in weights.items(): if "patch_embedding.weight" in k: # PyTorch conv2d weight tensors have shape: # [out_channels, in_channels, kH, KW] # MLX conv2d expects the weight be of shape: # [out_channels, kH, KW, in_channels] if check_array_shape(v): sanitized_weights[k] = v else: sanitized_weights[k] = v.transpose(0, 2, 3, 1) else: sanitized_weights[k] = v return sanitized_weights
// // Qwen3MoE.swift // LLM // // Created by John Mai on 2025/4/30. // import Foundation import MLX import MLXLMCommon import MLXNN // port of https://github.com/ml-explore/mlx-lm/blob/main/mlx_lm/models/qwen3_moe.py private class Attention: Module { let args: Qwen3MoEConfiguration let scale: Float @ModuleInfo(key: "q_proj") var wq: Linear @ModuleInfo(key: "k_proj") var wk: Linear @ModuleInfo(key: "v_proj") var wv: Linear @ModuleInfo(key: "o_proj") var wo: Linear @ModuleInfo(key: "q_norm") var qNorm: RMSNorm @ModuleInfo(key: "k_norm") var kNorm: RMSNorm let rope: RoPE public init(_ args: Qwen3MoEConfiguration, layerIdx: Int) { self.args = args let dim = args.hiddenSize let heads = args.attentionHeads let kvHeads = args.kvHeads let headDim = args.headDim self.scale = pow(Float(headDim), -0.5) _wq.wrappedValue = Linear(dim, heads * headDim, bias: false) _wk.wrappedValue = Linear(dim, kvHeads * headDim, bias: false) _wv.wrappedValue = Linear(dim, kvHeads * headDim, bias: false) _wo.wrappedValue = Linear(heads * headDim, dim, bias: false) _qNorm.wrappedValue = RMSNorm(dimensions: headDim, eps: args.rmsNormEps) _kNorm.wrappedValue = RMSNorm(dimensions: headDim, eps: args.rmsNormEps) let ropeScale: Float if let ropeScaling = args.ropeScaling, ropeScaling["type"] == .string("linear"), let factor = ropeScaling["factor"] { if let v = factor.asFloat() { ropeScale = 1 / v } else { fatalError("ropeScaling.factor must be a float") } } else { ropeScale = 1 } self.rope = RoPE( dimensions: headDim, traditional: false, base: args.ropeTheta, scale: ropeScale) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var queries = wq(x) var keys = wk(x) var values = wv(x) // prepare the queries, keys and values for the attention computation queries = qNorm(queries.reshaped(B, L, args.attentionHeads, -1)).transposed(0, 2, 1, 3) keys = kNorm(keys.reshaped(B, L, args.kvHeads, -1)).transposed(0, 2, 1, 3) values = values.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return wo(output) } } private class MLP: Module, UnaryLayer { @ModuleInfo(key: "gate_proj") var gate: Linear @ModuleInfo(key: "down_proj") var down: Linear @ModuleInfo(key: "up_proj") var up: Linear public init(dimensions: Int, hiddenDimensions: Int) { _gate.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) _down.wrappedValue = Linear(hiddenDimensions, dimensions, bias: false) _up.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) } public func callAsFunction(_ x: MLXArray) -> MLXArray { down(silu(gate(x)) * up(x)) } } private class Qwen3MoESparseMoeBlock: Module, UnaryLayer { let numExperts: Int let topK: Int let normTopkProb: Bool @ModuleInfo(key: "gate") var gate: Linear @ModuleInfo(key: "switch_mlp") var switchMLP: SwitchGLU init(_ args: Qwen3MoEConfiguration) { self.numExperts = args.numExperts self.topK = args.numExpertsPerToken self.normTopkProb = args.normTopkProb _gate.wrappedValue = Linear(args.hiddenSize, numExperts, bias: false) _switchMLP.wrappedValue = SwitchGLU( inputDims: args.hiddenSize, hiddenDims: args.moeIntermediateSize, numExperts: numExperts ) } func callAsFunction(_ x: MLXArray) -> MLXArray { let gates = gate(x) let softGates = MLX.softmax(gates, axis: -1, precise: true) let k = topK let inds = MLX.argPartition(-gates, kth: k - 1, axis: -1)[.ellipsis, ..<k] var scores = MLX.takeAlong(softGates, inds, axis: -1) if normTopkProb { scores = scores / MLX.sum(scores, axis: -1, keepDims: true) } let y = switchMLP(x, inds) return (y * scores[.ellipsis, .newAxis]).sum(axis: -2) } } private class Qwen3MoeDecoderLayer: Module { let args: Qwen3MoEConfiguration let layerIdx: Int @ModuleInfo(key: "self_attn") var selfAttn: Attention @ModuleInfo(key: "input_layernorm") var inputLayerNorm: RMSNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: RMSNorm fileprivate let mlp: UnaryLayer init(_ args: Qwen3MoEConfiguration, layerIdx: Int) { self.args = args self.layerIdx = layerIdx _selfAttn.wrappedValue = Attention(args, layerIdx: layerIdx) _inputLayerNorm.wrappedValue = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) _postAttentionLayerNorm.wrappedValue = RMSNorm( dimensions: args.hiddenSize, eps: args.rmsNormEps) if !args.mlpOnlyLayers.contains(layerIdx), args.numExperts > 0, (layerIdx + 1) % args.decoderSparseStep == 0 { self.mlp = Qwen3MoESparseMoeBlock(args) } else { self.mlp = MLP(dimensions: args.hiddenSize, hiddenDimensions: args.intermediateSize) } } func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var r = selfAttn(inputLayerNorm(x), mask: mask, cache: cache) let h = x + r r = mlp(postAttentionLayerNorm(h)) let out = h + r return out } } private class Qwen3MoEModelInner: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding fileprivate let layers: [Qwen3MoeDecoderLayer] let norm: RMSNorm let args: Qwen3MoEConfiguration init(_ args: Qwen3MoEConfiguration) { self.args = args precondition(args.vocabularySize > 0) _embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers) .map { i in Qwen3MoeDecoderLayer(args, layerIdx: i) } self.norm = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps) } func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = embedTokens(inputs) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } public class Qwen3MoEModel: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] fileprivate let model: Qwen3MoEModelInner let configuration: Qwen3MoEConfiguration @ModuleInfo(key: "lm_head") var lmHead: Linear? public init(_ args: Qwen3MoEConfiguration) { self.configuration = args self.vocabularySize = args.vocabularySize self.kvHeads = (0 ..< args.hiddenLayers).map { _ in args.kvHeads } self.model = Qwen3MoEModelInner(args) if !args.tieWordEmbeddings { _lmHead.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: false) } } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { var out = model(inputs, cache: cache) if let lmHead { out = lmHead(out) } else { out = model.embedTokens.asLinear(out) } return out } public func sanitize(weights: [String: MLXArray]) -> [String: MLXArray] { var sanitizedWeights = weights if configuration.tieWordEmbeddings { sanitizedWeights["lm_head.weight"] = nil } if sanitizedWeights["model.layers.0.mlp.experts.0.up_proj.weight"] == nil { return sanitizedWeights } for l in 0 ..< configuration.hiddenLayers { let prefix = "model.layers.\(l)" for n in ["up_proj", "down_proj", "gate_proj"] { if sanitizedWeights["\(prefix).mlp.experts.0.\(n).weight"] != nil { let toJoin = (0 ..< configuration.numExperts).map { e in sanitizedWeights.removeValue( forKey: "\(prefix).mlp.experts.\(e).\(n).weight")! } sanitizedWeights["\(prefix).mlp.switch_mlp.\(n).weight"] = MLX.stacked(toJoin) } } } return sanitizedWeights } } public struct Qwen3MoEConfiguration: Codable, Sendable { var modelType: String = "qwen3_moe" var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var numExperts: Int var numExpertsPerToken: Int var decoderSparseStep: Int var mlpOnlyLayers: [Int] var moeIntermediateSize: Int var rmsNormEps: Float var vocabularySize: Int var kvHeads: Int var headDim: Int var ropeTheta: Float = 1_000_000 var tieWordEmbeddings: Bool = false var maxPositionEmbeddings: Int = 32768 var normTopkProb: Bool = false var ropeScaling: [String: StringOrNumber]? = nil enum CodingKeys: String, CodingKey { case modelType = "model_type" case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case numExperts = "num_experts" case numExpertsPerToken = "num_experts_per_tok" case decoderSparseStep = "decoder_sparse_step" case mlpOnlyLayers = "mlp_only_layers" case moeIntermediateSize = "moe_intermediate_size" case rmsNormEps = "rms_norm_eps" case vocabularySize = "vocab_size" case kvHeads = "num_key_value_heads" case headDim = "head_dim" case ropeTheta = "rope_theta" case tieWordEmbeddings = "tie_word_embeddings" case maxPositionEmbeddings = "max_position_embeddings" case normTopkProb = "norm_topk_prob" case ropeScaling = "rope_scaling" } public init(from decoder: Decoder) throws { let container = try decoder.container(keyedBy: CodingKeys.self) self.modelType = try container.decodeIfPresent(String.self, forKey: .modelType) ?? "qwen3_moe" self.hiddenSize = try container.decode(Int.self, forKey: .hiddenSize) self.hiddenLayers = try container.decode(Int.self, forKey: .hiddenLayers) self.intermediateSize = try container.decode(Int.self, forKey: .intermediateSize) self.attentionHeads = try container.decode(Int.self, forKey: .attentionHeads) self.numExperts = try container.decode(Int.self, forKey: .numExperts) self.numExpertsPerToken = try container.decode(Int.self, forKey: .numExpertsPerToken) self.decoderSparseStep = try container.decode(Int.self, forKey: .decoderSparseStep) self.mlpOnlyLayers = try container.decode([Int].self, forKey: .mlpOnlyLayers) self.moeIntermediateSize = try container.decode(Int.self, forKey: .moeIntermediateSize) self.rmsNormEps = try container.decode(Float.self, forKey: .rmsNormEps) self.vocabularySize = try container.decode(Int.self, forKey: .vocabularySize) self.kvHeads = try container.decode(Int.self, forKey: .kvHeads) self.headDim = try container.decode(Int.self, forKey: .headDim) self.ropeTheta = try container.decodeIfPresent(Float.self, forKey: .ropeTheta) ?? 1_000_000 self.tieWordEmbeddings = try container.decodeIfPresent(Bool.self, forKey: .tieWordEmbeddings) ?? false self.maxPositionEmbeddings = try container.decodeIfPresent(Int.self, forKey: .maxPositionEmbeddings) ?? 32768 self.normTopkProb = try container.decodeIfPresent(Bool.self, forKey: .normTopkProb) ?? false self.ropeScaling = try container.decodeIfPresent( [String: StringOrNumber].self, forKey: .ropeScaling) } } // MARK: - LoRA extension Qwen3MoEModel: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.selfAttn, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
LM
Starcoder2
# Copyright © 2023-2024 Apple Inc. from dataclasses import dataclass from typing import Any, Optional import mlx.core as mx import mlx.nn as nn from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention @dataclass class ModelArgs(BaseModelArgs): model_type: str hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int num_key_value_heads: int norm_epsilon: float = 1e-5 vocab_size: int = 49152 rope_theta: float = 100000 tie_word_embeddings: bool = True class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args dim = args.hidden_size self.n_heads = n_heads = args.num_attention_heads self.n_kv_heads = n_kv_heads = args.num_key_value_heads head_dim = args.hidden_size // args.num_attention_heads self.scale = head_dim**-0.5 self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=True) self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=True) self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=True) self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=True) self.rope = nn.RoPE(head_dim, traditional=False, base=args.rope_theta) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: B, L, D = x.shape queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x) # Prepare the queries, keys and values for the attention computation queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) if cache is not None: queries = self.rope(queries, offset=cache.offset) keys = self.rope(keys, offset=cache.offset) keys, values = cache.update_and_fetch(keys, values) else: queries = self.rope(queries) keys = self.rope(keys) output = scaled_dot_product_attention( queries, keys, values, cache=cache, scale=self.scale, mask=mask ) output = output.transpose(0, 2, 1, 3).reshape(B, L, -1) return self.o_proj(output) class MLP(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.c_fc = nn.Linear(dim, hidden_dim, bias=True) self.c_proj = nn.Linear(hidden_dim, dim, bias=True) def __call__(self, x): return self.c_proj(nn.gelu(self.c_fc(x))) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.hidden_size = args.hidden_size self.n_heads = args.num_attention_heads self.self_attn = Attention(args) self.mlp = MLP(args.hidden_size, args.intermediate_size) self.input_layernorm = nn.LayerNorm(args.hidden_size, eps=args.norm_epsilon) self.post_attention_layernorm = nn.LayerNorm( args.hidden_size, eps=args.norm_epsilon ) self.args = args def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None, ) -> mx.array: r = self.self_attn(self.input_layernorm(x), mask, cache) h = x + r r = self.mlp(self.post_attention_layernorm(h)) out = h + r return out class Starcoder2Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.num_hidden_layers = args.num_hidden_layers assert self.vocab_size > 0 self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size) self.layers = [ TransformerBlock(args=args) for _ in range(args.num_hidden_layers) ] self.norm = nn.LayerNorm(args.hidden_size, eps=args.norm_epsilon) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): h = self.embed_tokens(inputs) if mask is None: mask = create_attention_mask(h, cache) if cache is None: cache = [None] * len(self.layers) for layer, c in zip(self.layers, cache): h = layer(h, mask, c) return self.norm(h) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.args = args self.model_type = args.model_type self.model = Starcoder2Model(args) if not args.tie_word_embeddings: self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False) def __call__( self, inputs: mx.array, mask: mx.array = None, cache=None, ): out = self.model(inputs, mask, cache) if self.args.tie_word_embeddings: out = self.model.embed_tokens.as_linear(out) else: out = self.lm_head(out) return out @property def layers(self): return self.model.layers
// // Starcoder2.swift // LLM // // Created by John Mai on 2024/3/7. // import Foundation import MLX import MLXLMCommon import MLXNN // port of https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/models/starcoder2.py private class Attention: Module { let args: Starcoder2Configuration let scale: Float @ModuleInfo(key: "q_proj") var wq: Linear @ModuleInfo(key: "k_proj") var wk: Linear @ModuleInfo(key: "v_proj") var wv: Linear @ModuleInfo(key: "o_proj") var wo: Linear let rope: RoPE public init(_ args: Starcoder2Configuration) { self.args = args let dim = args.hiddenSize let heads = args.attentionHeads let kvHeads = args.kvHeads let headDim = args.hiddenSize / heads self.scale = pow(Float(headDim), -0.5) _wq.wrappedValue = Linear(dim, heads * headDim, bias: true) _wk.wrappedValue = Linear(dim, kvHeads * headDim, bias: true) _wv.wrappedValue = Linear(dim, kvHeads * headDim, bias: true) _wo.wrappedValue = Linear(heads * headDim, dim, bias: true) self.rope = RoPE(dimensions: headDim, traditional: false, base: args.ropeTheta) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { let (B, L) = (x.dim(0), x.dim(1)) var queries = wq(x) var keys = wk(x) var values = wv(x) // prepare the queries, keys and values for the attention computation queries = queries.reshaped(B, L, args.attentionHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3) if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return wo(output) } } private class MLP: Module, UnaryLayer { @ModuleInfo(key: "c_fc") var cFc: Linear @ModuleInfo(key: "c_proj") var cProj: Linear public init(dimensions: Int, hiddenDimensions: Int) { _cFc.wrappedValue = Linear(dimensions, hiddenDimensions, bias: true) _cProj.wrappedValue = Linear(hiddenDimensions, dimensions, bias: true) } public func callAsFunction(_ x: MLXArray) -> MLXArray { cProj(gelu(cFc(x))) } } private class TransformerBlock: Module { @ModuleInfo(key: "self_attn") var attention: Attention let mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: LayerNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: LayerNorm public init(_ args: Starcoder2Configuration) { _attention.wrappedValue = Attention(args) self.mlp = MLP(dimensions: args.hiddenSize, hiddenDimensions: args.intermediateSize) _inputLayerNorm.wrappedValue = LayerNorm( dimensions: args.hiddenSize, eps: args.normEpsilon) _postAttentionLayerNorm.wrappedValue = LayerNorm( dimensions: args.hiddenSize, eps: args.normEpsilon) } public func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? ) -> MLXArray { var r = attention(inputLayerNorm(x), mask: mask, cache: cache) let h = x + r r = mlp(postAttentionLayerNorm(h)) let out = h + r return out } } private class Starcoder2ModelInner: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding fileprivate let layers: [TransformerBlock] let norm: LayerNorm public init(_ args: Starcoder2Configuration) { precondition(args.vocabularySize > 0) _embedTokens.wrappedValue = Embedding( embeddingCount: args.vocabularySize, dimensions: args.hiddenSize) self.layers = (0 ..< args.hiddenLayers) .map { _ in TransformerBlock(args) } self.norm = LayerNorm(dimensions: args.hiddenSize, eps: args.normEpsilon) } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray { var h = embedTokens(inputs) let mask = createAttentionMask(h: h, cache: cache) for (i, layer) in layers.enumerated() { h = layer(h, mask: mask, cache: cache?[i]) } return norm(h) } } public class Starcoder2Model: Module, LLMModel, KVCacheDimensionProvider { public let vocabularySize: Int public let kvHeads: [Int] public let tieWordEmbeddings: Bool private let model: Starcoder2ModelInner @ModuleInfo(key: "lm_head") var lmHead: Linear public init(_ args: Starcoder2Configuration) { self.vocabularySize = args.vocabularySize self.kvHeads = (0 ..< args.hiddenLayers).map { _ in args.kvHeads } self.model = Starcoder2ModelInner(args) self.tieWordEmbeddings = args.tieWordEmbeddings if !self.tieWordEmbeddings { _lmHead.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: false) } } public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray { var out = model(inputs, cache: cache) if !tieWordEmbeddings { return lmHead(out) } else { out = model.embedTokens.asLinear(out) return out } } } public struct Starcoder2Configuration: Codable, Sendable { var hiddenSize: Int var hiddenLayers: Int var intermediateSize: Int var attentionHeads: Int var kvHeads: Int var maxPositionEmbeddings: Int = 16384 var normEpsilon: Float = 1e-5 var normType: String = "layer_norm" var vocabularySize: Int = 49152 var ropeTheta: Float = 100000 var tieWordEmbeddings: Bool = true enum CodingKeys: String, CodingKey { case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case kvHeads = "num_key_value_heads" case maxPositionEmbeddings = "max_position_embeddings" case normEpsilon = "norm_epsilon" case normType = "norm_type" case vocabularySize = "vocab_size" case ropeTheta = "rope_theta" case tieWordEmbeddings = "tie_word_embeddings" } public init(from decoder: Decoder) throws { // custom implementation to handle optional keys with required values let container: KeyedDecodingContainer<Starcoder2Configuration.CodingKeys> = try decoder.container( keyedBy: Starcoder2Configuration.CodingKeys.self) self.hiddenSize = try container.decode( Int.self, forKey: Starcoder2Configuration.CodingKeys.hiddenSize) self.hiddenLayers = try container.decode( Int.self, forKey: Starcoder2Configuration.CodingKeys.hiddenLayers) self.intermediateSize = try container.decode( Int.self, forKey: Starcoder2Configuration.CodingKeys.intermediateSize) self.attentionHeads = try container.decode( Int.self, forKey: Starcoder2Configuration.CodingKeys.attentionHeads) self.kvHeads = try container.decode( Int.self, forKey: Starcoder2Configuration.CodingKeys.kvHeads) self.maxPositionEmbeddings = try container.decodeIfPresent( Int.self, forKey: Starcoder2Configuration.CodingKeys.maxPositionEmbeddings) ?? 16384 self.normEpsilon = try container.decodeIfPresent( Float.self, forKey: Starcoder2Configuration.CodingKeys.normEpsilon) ?? 1e-5 self.normType = try container.decodeIfPresent( String.self, forKey: Starcoder2Configuration.CodingKeys.normType) ?? "layer_norm" self.vocabularySize = try container.decodeIfPresent( Int.self, forKey: Starcoder2Configuration.CodingKeys.vocabularySize) ?? 49152 self.ropeTheta = try container.decodeIfPresent( Float.self, forKey: Starcoder2Configuration.CodingKeys.ropeTheta) ?? 100000 self.tieWordEmbeddings = try container.decodeIfPresent( Bool.self, forKey: Starcoder2Configuration.CodingKeys.tieWordEmbeddings) ?? true } } // MARK: - LoRA extension Starcoder2Model: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { model.layers.map { ($0.attention, ["q_proj", "v_proj"]) } } }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
VLM
Gemma3
import glob import inspect import json from dataclasses import dataclass from pathlib import Path from typing import List, Optional import mlx.core as mx import mlx.nn as nn import numpy as np from huggingface_hub import snapshot_download from .language import LanguageModel, RMSNorm, TextConfig from .vision import VisionConfig, VisionModel @dataclass class ModelConfig: text_config: TextConfig vision_config: VisionConfig model_type: str vocab_size: int = 257152 ignore_index: int = -100 image_token_index: int = 262144 hidden_size: int = 2048 pad_token_id: int = 0 eos_token_id: Optional[List[int]] = None @classmethod def from_dict(cls, params): return cls( **{ k: v for k, v in params.items() if k in inspect.signature(cls).parameters } ) class Gemma3MultiModalProjector(nn.Module): def __init__(self, config: ModelConfig): super().__init__() self.mm_input_projection_weight = mx.ones( (config.vision_config.hidden_size, config.text_config.hidden_size) ) self.mm_soft_emb_norm = RMSNorm( config.vision_config.hidden_size, eps=config.vision_config.layer_norm_eps ) self.patches_per_image = int( config.vision_config.image_size // config.vision_config.patch_size ) self.tokens_per_side = int(config.text_config.mm_tokens_per_image**0.5) self.kernel_size = self.patches_per_image // self.tokens_per_side self.avg_pool = nn.AvgPool2d( kernel_size=self.kernel_size, stride=self.kernel_size ) def __call__(self, x: mx.array) -> mx.array: b, _, l = x.shape reshaped_vision_outputs = x.transpose(0, 2, 1) reshaped_vision_outputs = reshaped_vision_outputs.reshape( b, l, self.patches_per_image, self.patches_per_image ) # Transpose to place h, w in indices 1, 2 reshaped_vision_outputs = reshaped_vision_outputs.transpose(0, 2, 3, 1) pooled_vision_outputs = self.avg_pool(reshaped_vision_outputs) pooled_vision_outputs = pooled_vision_outputs.transpose(0, 3, 1, 2).flatten(2) pooled_vision_outputs = pooled_vision_outputs.transpose(0, 2, 1) normed_vision_outputs = self.mm_soft_emb_norm(pooled_vision_outputs) projected_vision_outputs = mx.einsum( "btm,md->btd", normed_vision_outputs, self.mm_input_projection_weight ) return projected_vision_outputs.astype(x.dtype) def masked_scatter( final_embedding: mx.array, image_mask_expanded: mx.array, scaled_image_features: mx.array, ): # Reshape the tensors to 1D final_embedding_shape = final_embedding.shape scaled_image_features_flattened = mx.flatten(scaled_image_features) final_embedding_flattened = mx.flatten(final_embedding) image_mask_expanded_flattened = mx.flatten(image_mask_expanded) # Scatter the scaled image features into the special image token positions image_positions = mx.array(np.where(image_mask_expanded_flattened)[0], mx.uint32) final_embedding_flattened[image_positions] = scaled_image_features_flattened # Reshape back to the original shape final_embedding = mx.reshape(final_embedding_flattened, final_embedding_shape) return final_embedding class Model(nn.Module): def __init__(self, config: ModelConfig): super().__init__() self.model_type = config.model_type self.config = config self.vision_tower = VisionModel(config.vision_config) self.language_model = LanguageModel(config.text_config) self.multi_modal_projector = Gemma3MultiModalProjector(config) def get_input_embeddings( self, input_ids: Optional[mx.array] = None, pixel_values: Optional[mx.array] = None, mask: Optional[mx.array] = None, ): if pixel_values is None: return self.language_model.model.embed_tokens(input_ids), None inputs_embeds = self.language_model.model.embed_tokens(input_ids) hidden_state, _, _ = self.vision_tower( pixel_values.transpose(0, 2, 3, 1).astype(inputs_embeds.dtype), output_hidden_states=True, ) image_features = self.multi_modal_projector(hidden_state) final_inputs_embeds, final_attention_mask_4d = ( self.prepare_inputs_for_multimodal( self.config.hidden_size, self.config.pad_token_id, self.config.image_token_index, image_features, inputs_embeds, input_ids, mask, ) ) return final_inputs_embeds, final_attention_mask_4d @staticmethod def prepare_inputs_for_multimodal( hidden_size, pad_token_id, image_token_index, image_features, inputs_embeds, input_ids, attention_mask, ): _, _, embed_dim = image_features.shape batch_size, sequence_length = input_ids.shape scaled_image_features = image_features / (hidden_size**0.5) final_embedding = mx.zeros((batch_size, sequence_length, embed_dim)) pad_token_id = pad_token_id pad_token_id = pad_token_id if pad_token_id is not None else 0 text_mask = (input_ids != image_token_index) & (input_ids != pad_token_id) image_mask = input_ids == image_token_index pad_mask = input_ids == pad_token_id # expand masks to match embedding dimension text_mask_expanded = mx.expand_dims(text_mask, -1) text_mask_expanded = mx.repeat(text_mask_expanded, embed_dim, axis=-1) pad_mask_expanded = mx.expand_dims(pad_mask, -1) pad_mask_expanded = mx.repeat(pad_mask_expanded, embed_dim, axis=-1) image_mask_expanded = mx.expand_dims(image_mask, -1) image_mask_expanded = mx.repeat(image_mask_expanded, embed_dim, axis=-1) # insert padding and text token embeddings final_embedding = mx.where(text_mask_expanded, inputs_embeds, final_embedding) final_embedding = mx.where( pad_mask_expanded, mx.zeros_like(final_embedding), final_embedding ) # insert image token embeddings final_embedding = masked_scatter( final_embedding, image_mask_expanded, scaled_image_features ) attention_mask_expanded_1 = mx.expand_dims(attention_mask, 1) attention_mask_expanded_2 = mx.expand_dims(attention_mask, 2) final_attention_mask_4d = attention_mask_expanded_1 * attention_mask_expanded_2 final_attention_mask_4d = final_attention_mask_4d final_attention_mask_4d = mx.expand_dims(final_attention_mask_4d, 1) final_embedding = mx.array(final_embedding) return final_embedding.astype(inputs_embeds.dtype), final_attention_mask_4d def __call__( self, input_ids: mx.array, pixel_values: mx.array, mask: Optional[mx.array] = None, cache: Optional[mx.array] = None, **kwargs, ): input_embeddings, final_attention_mask_4d = self.get_input_embeddings( input_ids, pixel_values, mask ) logits = self.language_model( inputs=input_ids, cache=cache, inputs_embeds=input_embeddings, ) return logits
import CoreImage import MLX import MLXFast import MLXLMCommon import MLXNN import Tokenizers // Based on https://github.com/Blaizzy/mlx-vlm/tree/main/mlx_vlm/models/gemma3 // MARK: - Text Configuration public struct Gemma3TextConfiguration: Codable, Sendable { public let modelType: String public let hiddenSize: Int public let hiddenLayers: Int public let intermediateSize: Int public let slidingWindow: Int public let ropeScaling: [String: StringOrNumber]? public let finalLogitSoftcapping: Float? public let vocabularySize: Int = 262208 public let rmsNormEps: Float = 1.0e-6 // Decoded from JSON when present, with fallback if not private let _attentionHeads: Int? private let _kvHeads: Int? private let _headDim: Int? private let _queryPreAttnScalar: Float? // Not included in 4B model config.json, included for 12B and 27B models public var attentionHeads: Int { _attentionHeads ?? 8 } // Not included in 4B model config.json, included for 12B and 27B models public var kvHeads: Int { _kvHeads ?? 4 } // Not included in 4B and 12B model config.json, included for 27B model public var headDim: Int { _headDim ?? 256 } // Not included in 4B and 12B model config.json, included for 27B model public var queryPreAttnScalar: Float { _queryPreAttnScalar ?? 256 } public let ropeGlobalBaseFreq: Float = 1_000_000.0 public let ropeLocalBaseFreq: Float = 10_000.0 public let ropeTraditional: Bool = false public let mmTokensPerImage: Int = 256 public let slidingWindowPattern: Int = 6 public let maxPositionEmbeddings: Int = 4096 enum CodingKeys: String, CodingKey { case modelType = "model_type" case hiddenSize = "hidden_size" case hiddenLayers = "num_hidden_layers" case intermediateSize = "intermediate_size" case slidingWindow = "sliding_window" case ropeScaling = "rope_scaling" case finalLogitSoftcapping = "final_logit_softcapping" case _attentionHeads = "num_attention_heads" case _kvHeads = "num_key_value_heads" case _headDim = "head_dim" case _queryPreAttnScalar = "query_pre_attn_scalar" } } // MARK: - Vision Configuration public struct Gemma3VisionConfiguration: Codable, Sendable { public let modelType: String public let hiddenLayers: Int public let hiddenSize: Int public let intermediateSize: Int public let attentionHeads: Int public let patchSize: Int public let imageSize: Int public let numChannels: Int = 3 public let layerNormEps: Float = 1e-6 enum CodingKeys: String, CodingKey { case modelType = "model_type" case hiddenLayers = "num_hidden_layers" case hiddenSize = "hidden_size" case intermediateSize = "intermediate_size" case attentionHeads = "num_attention_heads" case patchSize = "patch_size" case imageSize = "image_size" } } // MARK: - Quantization Configuration public struct QuantizationConfig: Codable, Sendable { public let groupSize: Int public let bits: Int enum CodingKeys: String, CodingKey { case groupSize = "group_size" case bits } } // MARK: - Model Configuration public struct Gemma3Configuration: Codable, Sendable { public let textConfiguration: Gemma3TextConfiguration public let visionConfiguration: Gemma3VisionConfiguration public let modelType: String public let mmTokensPerImage: Int public let quantization: QuantizationConfig? private let _vocabularySize: Int? private let _padTokenId: Int? // Computed properties that use the text configuration or provide defaults public var vocabularySize: Int { _vocabularySize ?? textConfiguration.vocabularySize } public var hiddenSize: Int { textConfiguration.hiddenSize } public var padTokenId: Int { _padTokenId ?? 0 } enum CodingKeys: String, CodingKey { case textConfiguration = "text_config" case visionConfiguration = "vision_config" case modelType = "model_type" case mmTokensPerImage = "mm_tokens_per_image" case quantization case _vocabularySize = "vocab_size" case _padTokenId = "pad_token_id" } } // MARK: - Attention private class Attention: Module { let numHeads: Int let numKVHeads: Int let repeats: Int let headDim: Int let layerIdx: Int let scale: Float let isSliding: Bool @ModuleInfo(key: "q_proj") var queryProj: Linear @ModuleInfo(key: "k_proj") var keyProj: Linear @ModuleInfo(key: "v_proj") var valueProj: Linear @ModuleInfo(key: "o_proj") var outputProj: Linear @ModuleInfo(key: "q_norm") var queryNorm: Gemma.RMSNorm @ModuleInfo(key: "k_norm") var keyNorm: Gemma.RMSNorm @ModuleInfo var rope: RoPE init(config: Gemma3TextConfiguration, layerIdx: Int) { let dim = config.hiddenSize self.numHeads = config.attentionHeads self.numKVHeads = config.kvHeads self.repeats = numHeads / numKVHeads self.headDim = config.headDim self.layerIdx = layerIdx self.scale = pow(config.queryPreAttnScalar, -0.5) self._queryProj.wrappedValue = Linear(dim, numHeads * headDim, bias: false) self._keyProj.wrappedValue = Linear(dim, numKVHeads * headDim, bias: false) self._valueProj.wrappedValue = Linear(dim, numKVHeads * headDim, bias: false) self._outputProj.wrappedValue = Linear(numHeads * headDim, dim, bias: false) self._queryNorm.wrappedValue = Gemma.RMSNorm( dimensions: headDim, eps: config.rmsNormEps) self._keyNorm.wrappedValue = Gemma.RMSNorm(dimensions: headDim, eps: config.rmsNormEps) // Gemma3 uses sliding window attention pattern self.isSliding = (layerIdx + 1) % config.slidingWindowPattern != 0 let baseFreq = isSliding ? config.ropeLocalBaseFreq : config.ropeGlobalBaseFreq self._rope.wrappedValue = RoPE( dimensions: headDim, traditional: config.ropeTraditional, base: baseFreq ) } func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? = nil ) -> MLXArray { let (B, L, _) = (x.dim(0), x.dim(1), x.dim(2)) var queries = queryProj(x) var keys = keyProj(x) var values = valueProj(x) // Reshape for multi-head attention queries = queries.reshaped(B, L, numHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, L, numKVHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, L, numKVHeads, -1).transposed(0, 2, 1, 3) // Apply normalization queries = queryNorm(queries) keys = keyNorm(keys) // Apply rotary position embedding if let cache { queries = rope(queries, offset: cache.offset) keys = rope(keys, offset: cache.offset) } else { queries = rope(queries) keys = rope(keys) } // Handle sliding window masking var finalMask = mask if case .array(let maskArray) = mask, maskArray.shape.last! != keys.shape[2] { let keyLen = keys.shape[2] let slicedMask = maskArray[.ellipsis, (-keyLen)...] finalMask = .array(slicedMask) } // Scaled dot-product attention with native GQA support let output = attentionWithCacheUpdate( queries: queries, keys: keys, values: values, cache: cache, scale: scale, mask: finalMask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return outputProj(output) } } // MARK: - MLP private class MLP: Module, UnaryLayer { @ModuleInfo(key: "gate_proj") var gateProj: Linear @ModuleInfo(key: "down_proj") var downProj: Linear @ModuleInfo(key: "up_proj") var upProj: Linear init(dimensions: Int, hiddenDimensions: Int) { self._gateProj.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) self._downProj.wrappedValue = Linear(hiddenDimensions, dimensions, bias: false) self._upProj.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false) } func callAsFunction(_ x: MLXArray) -> MLXArray { downProj(geluApproximate(gateProj(x)) * upProj(x)) } } // MARK: - TransformerBlock private class TransformerBlock: Module { @ModuleInfo(key: "self_attn") var selfAttention: Attention @ModuleInfo var mlp: MLP @ModuleInfo(key: "input_layernorm") var inputLayerNorm: Gemma.RMSNorm @ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: Gemma.RMSNorm @ModuleInfo(key: "pre_feedforward_layernorm") var preFeedforwardLayerNorm: Gemma.RMSNorm @ModuleInfo(key: "post_feedforward_layernorm") var postFeedforwardLayerNorm: Gemma.RMSNorm let numAttentionHeads: Int let hiddenSize: Int init(config: Gemma3TextConfiguration, layerIdx: Int) { self.numAttentionHeads = config.attentionHeads self.hiddenSize = config.hiddenSize self._selfAttention.wrappedValue = Attention(config: config, layerIdx: layerIdx) self.mlp = MLP(dimensions: config.hiddenSize, hiddenDimensions: config.intermediateSize) self._inputLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: config.hiddenSize, eps: config.rmsNormEps) self._postAttentionLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: config.hiddenSize, eps: config.rmsNormEps) self._preFeedforwardLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: config.hiddenSize, eps: config.rmsNormEps) self._postFeedforwardLayerNorm.wrappedValue = Gemma.RMSNorm( dimensions: config.hiddenSize, eps: config.rmsNormEps) } func callAsFunction( _ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache? = nil ) -> MLXArray { let r = selfAttention(inputLayerNorm(x), mask: mask, cache: cache) let h = Gemma.clipResidual(x, postAttentionLayerNorm(r)) let r2 = mlp(preFeedforwardLayerNorm(h)) let out = Gemma.clipResidual(h, postFeedforwardLayerNorm(r2)) return out } } // MARK: - GemmaModel private class GemmaModel: Module { @ModuleInfo(key: "embed_tokens") var embedTokens: Embedding @ModuleInfo var layers: [TransformerBlock] @ModuleInfo var norm: Gemma.RMSNorm let config: Gemma3TextConfiguration init(_ config: Gemma3TextConfiguration) { self.config = config self._embedTokens.wrappedValue = Embedding( embeddingCount: config.vocabularySize, dimensions: config.hiddenSize ) self._layers.wrappedValue = (0 ..< config.hiddenLayers).map { layerIdx in TransformerBlock(config: config, layerIdx: layerIdx) } self.norm = Gemma.RMSNorm(dimensions: config.hiddenSize, eps: config.rmsNormEps) } func callAsFunction( _ inputs: MLXArray? = nil, inputEmbedding: MLXArray? = nil, mask: MLXFast.ScaledDotProductAttentionMaskMode? = nil, cache: [KVCache?]? = nil ) -> MLXArray { var h: MLXArray if let inputEmbedding = inputEmbedding { h = inputEmbedding } else if let inputs = inputs { h = embedTokens(inputs) } else { fatalError("Either inputs or inputEmbedding must be provided") } // Apply embedding scaling let scale = MLXArray(sqrtf(Float(config.hiddenSize)), dtype: .bfloat16).asType( inputs?.dtype ?? h.dtype) h = h * scale var layerCache = cache if layerCache == nil { layerCache = Array(repeating: nil as KVCache?, count: layers.count) } // Create attention masks for global and sliding window layers var fullMask: MLXFast.ScaledDotProductAttentionMaskMode = .none var slidingWindowMask: MLXFast.ScaledDotProductAttentionMaskMode = .none if mask == nil { let j = config.slidingWindowPattern if j > 0 && j <= layerCache!.count { let globalCacheSlice = layerCache![(j - 1) ..< j].compactMap { $0 } fullMask = createAttentionMask(h: h, cache: globalCacheSlice) } slidingWindowMask = createAttentionMask(h: h, cache: layerCache?.compactMap { $0 }) } for (i, layer) in layers.enumerated() { let isGlobal = (i % config.slidingWindowPattern == config.slidingWindowPattern - 1) let localMask: MLXFast.ScaledDotProductAttentionMaskMode if let mask { localMask = mask } else if isGlobal { localMask = fullMask } else { localMask = slidingWindowMask } h = layer(h, mask: localMask, cache: layerCache?[i]) } return norm(h) } } // MARK: - LanguageModel private class LanguageModel: Module, KVCacheDimensionProvider { @ModuleInfo var model: GemmaModel @ModuleInfo(key: "lm_head") var lmHead: Module // Can be Linear or QuantizedLinear let config: Gemma3TextConfiguration var kvHeads: [Int] init(_ config: Gemma3TextConfiguration) { self.config = config self.model = GemmaModel(config) self._lmHead.wrappedValue = Linear(config.hiddenSize, config.vocabularySize, bias: false) self.kvHeads = Array(repeating: config.kvHeads, count: config.hiddenLayers) } /// Creates appropriate cache types for each layer public func newCache(parameters: GenerateParameters?) -> [any KVCache] { var caches: [any KVCache] = [] let slidingWindow = config.slidingWindow > 0 ? config.slidingWindow : 4096 let slidingWindowPattern = config.slidingWindowPattern for i in 0 ..< config.hiddenLayers { let isGlobalLayer = (i % slidingWindowPattern == slidingWindowPattern - 1) if isGlobalLayer { caches.append(StandardKVCache()) } else { caches.append(RotatingKVCache(maxSize: slidingWindow, keep: 0)) } } return caches } func callAsFunction( _ inputs: MLXArray? = nil, cache: [KVCache]? = nil, inputEmbedding: MLXArray? = nil, mask: MLXFast.ScaledDotProductAttentionMaskMode? = nil ) -> LMOutput { let optionalCache = cache?.map { $0 as KVCache? } let out = model(inputs, inputEmbedding: inputEmbedding, mask: mask, cache: optionalCache) // Call the lmHead (works whether it's Linear or QuantizedLinear) var finalLogits: MLXArray if let linear = lmHead as? Linear { finalLogits = linear(out) } else if let quantized = lmHead as? QuantizedLinear { finalLogits = quantized(out) } else { fatalError("lmHead must be Linear or QuantizedLinear") } // Apply final logit softcapping if configured if let softcap = config.finalLogitSoftcapping, softcap > 0 { let scale = MLXArray(softcap) finalLogits = tanh(finalLogits / scale) * scale } return LMOutput(logits: finalLogits) } func sanitize(weights: [String: MLXArray], quantizationConfig: QuantizationConfig? = nil) -> [String: MLXArray] { var processedWeights = weights // Check if we have quantized weights let hasQuantizedLmHead = hasQuantizedWeights( layerPath: "language_model.lm_head", in: weights) if hasQuantizedLmHead { // Use quantization config from model configuration if available let groupSize = quantizationConfig?.groupSize ?? 64 let bits = quantizationConfig?.bits ?? 4 // Only quantize layers that actually have quantized weights quantize(model: self) { path, module in // Check each specific layer path for quantized weights let fullPath = "language_model.\(path)" if weights["\(fullPath).scales"] != nil && weights["\(fullPath).biases"] != nil && weights["\(fullPath).weight"]?.dtype == .uint32 { return (groupSize, bits) } return nil } } else { // Handle weight tying for regular (non-quantized) lm_head if processedWeights["language_model.lm_head.weight"] == nil { if let embedWeight = processedWeights["language_model.model.embed_tokens.weight"] { processedWeights["language_model.lm_head.weight"] = embedWeight } } } // Remove unused precomputed rotary freqs return processedWeights.filter { key, _ in !key.contains("self_attn.rotary_emb.inv_freq") } } /// Check if a layer has quantized weights private func hasQuantizedWeights(layerPath: String, in weights: [String: MLXArray]) -> Bool { let scalesKey = "\(layerPath).scales" let biasesKey = "\(layerPath).biases" let weightKey = "\(layerPath).weight" let hasScales = weights[scalesKey] != nil let hasBiases = weights[biasesKey] != nil let hasWeight = weights[weightKey]?.dtype == .uint32 return hasScales && hasBiases && hasWeight } } // MARK: - Vision Model Components private class VisionAttention: Module { @ModuleInfo(key: "q_proj") var queryProj: Linear @ModuleInfo(key: "k_proj") var keyProj: Linear @ModuleInfo(key: "v_proj") var valueProj: Linear @ModuleInfo(key: "out_proj") var outputProj: Linear let numHeads: Int let scale: Float init( dimensions: Int, numHeads: Int, queryInputDimensions: Int? = nil, keyInputDimensions: Int? = nil, valueInputDimensions: Int? = nil, valueDimensions: Int? = nil, valueOutputDimensions: Int? = nil, bias: Bool = true ) { if dimensions % numHeads != 0 { fatalError("The input feature dimensions should be divisible by the number of heads") } self.numHeads = numHeads let headDim = dimensions / numHeads self.scale = pow(Float(headDim), -0.5) let queryInputDims = queryInputDimensions ?? dimensions let keyInputDims = keyInputDimensions ?? dimensions let valueInputDims = valueInputDimensions ?? keyInputDims let valueDims = valueDimensions ?? dimensions let valueOutputDims = valueOutputDimensions ?? dimensions self._queryProj.wrappedValue = Linear(queryInputDims, dimensions, bias: bias) self._keyProj.wrappedValue = Linear(keyInputDims, dimensions, bias: bias) self._valueProj.wrappedValue = Linear(valueInputDims, valueDims, bias: bias) self._outputProj.wrappedValue = Linear(valueDims, valueOutputDims, bias: bias) } func callAsFunction(_ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode = .none) -> MLXArray { var queries = queryProj(x) var keys = keyProj(x) var values = valueProj(x) let (B, L, _) = (queries.dim(0), queries.dim(1), queries.dim(2)) let S = keys.dim(1) queries = queries.reshaped(B, L, numHeads, -1).transposed(0, 2, 1, 3) keys = keys.reshaped(B, S, numHeads, -1).transposed(0, 2, 1, 3) values = values.reshaped(B, S, numHeads, -1).transposed(0, 2, 1, 3) let output = MLXFast.scaledDotProductAttention( queries: queries, keys: keys, values: values, scale: scale, mask: mask ) .transposed(0, 2, 1, 3) .reshaped(B, L, -1) return outputProj(output) } } private class VisionMLP: Module, UnaryLayer { @ModuleInfo(key: "fc1") var fc1: Linear @ModuleInfo(key: "fc2") var fc2: Linear @ModuleInfo var activationFn: GELU init(config: Gemma3VisionConfiguration) { self.activationFn = GELU(approximation: .precise) self._fc1.wrappedValue = Linear(config.hiddenSize, config.intermediateSize, bias: true) self._fc2.wrappedValue = Linear(config.intermediateSize, config.hiddenSize, bias: true) } func callAsFunction(_ x: MLXArray) -> MLXArray { var x = fc1(x) x = activationFn(x) return fc2(x) } } private class EncoderLayer: Module { @ModuleInfo(key: "self_attn") var selfAttention: VisionAttention @ModuleInfo(key: "layer_norm1") var layerNorm1: LayerNorm @ModuleInfo var mlp: VisionMLP @ModuleInfo(key: "layer_norm2") var layerNorm2: LayerNorm let embedDim: Int init(config: Gemma3VisionConfiguration) { self.embedDim = config.hiddenSize self._selfAttention.wrappedValue = VisionAttention( dimensions: config.hiddenSize, numHeads: config.attentionHeads, bias: true ) self._layerNorm1.wrappedValue = LayerNorm(dimensions: embedDim, eps: config.layerNormEps) self.mlp = VisionMLP(config: config) self._layerNorm2.wrappedValue = LayerNorm(dimensions: embedDim, eps: config.layerNormEps) } func callAsFunction(_ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode = .none) -> MLXArray { let r = selfAttention(layerNorm1(x), mask: mask) let h = x + r let r2 = mlp(layerNorm2(h)) return h + r2 } } private class Encoder: Module { @ModuleInfo var layers: [EncoderLayer] init(config: Gemma3VisionConfiguration) { self._layers.wrappedValue = (0 ..< config.hiddenLayers).map { _ in EncoderLayer(config: config) } } func callAsFunction( _ x: MLXArray, outputHiddenStates: Bool = false, mask: MLXFast.ScaledDotProductAttentionMaskMode = .none ) -> (MLXArray, [MLXArray]?) { var encoderStates: [MLXArray]? = outputHiddenStates ? [x] : nil var h = x for layer in layers { h = layer(h, mask: mask) if outputHiddenStates { encoderStates?.append(h) } } return (h, encoderStates) } } private class VisionEmbeddings: Module, UnaryLayer { @ModuleInfo(key: "patch_embedding") var patchEmbedding: Conv2d @ModuleInfo(key: "position_embedding") var positionEmbedding: Embedding let config: Gemma3VisionConfiguration let embedDim: Int let imageSize: Int let patchSize: Int let numPatches: Int let numPositions: Int init(config: Gemma3VisionConfiguration) { self.config = config self.embedDim = config.hiddenSize self.imageSize = config.imageSize self.patchSize = config.patchSize self._patchEmbedding.wrappedValue = Conv2d( inputChannels: config.numChannels, outputChannels: embedDim, kernelSize: IntOrPair(patchSize), stride: IntOrPair(patchSize) ) self.numPatches = (imageSize / patchSize) * (imageSize / patchSize) self.numPositions = numPatches self._positionEmbedding.wrappedValue = Embedding( embeddingCount: numPositions, dimensions: embedDim ) } func callAsFunction(_ x: MLXArray) -> MLXArray { var patchEmbeddings = patchEmbedding(x) patchEmbeddings = flattened(patchEmbeddings, start: 1, end: 2) // Check if we have the expected number of patches (safety net for config mismatches) let actualNumPatches = patchEmbeddings.dim(1) let useNumPositions = min(actualNumPatches, numPositions) // Use position IDs from 0 to numPositions let positionIds = MLXArray(Array(0 ..< useNumPositions))[.newAxis, 0...] var embeddings = patchEmbeddings // Add position embeddings only to the patches we have positions for if useNumPositions == actualNumPatches { // Normal case: add position embeddings to all patches embeddings = embeddings + positionEmbedding(positionIds) } else { // Safety case: only add to first N patches to avoid broadcast error let positionedPatches = embeddings[0..., ..<useNumPositions, 0...] + positionEmbedding(positionIds) let remainingPatches = embeddings[0..., useNumPositions..., 0...] embeddings = concatenated([positionedPatches, remainingPatches], axis: 1) } return embeddings } } private class SigLipVisionModel: Module { @ModuleInfo var embeddings: VisionEmbeddings @ModuleInfo var encoder: Encoder @ModuleInfo(key: "post_layernorm") var postLayerNorm: LayerNorm init(config: Gemma3VisionConfiguration) { self.embeddings = VisionEmbeddings(config: config) self.encoder = Encoder(config: config) self._postLayerNorm.wrappedValue = LayerNorm(dimensions: config.hiddenSize) super.init() } func callAsFunction( _ x: MLXArray, outputHiddenStates: Bool = false ) -> (MLXArray, MLXArray, [MLXArray]?) { let x = embeddings(x) let (encoderOutput, encoderStates) = encoder( x, outputHiddenStates: outputHiddenStates, mask: .none ) let poolerOutput = postLayerNorm(encoderOutput) return (poolerOutput, x, encoderStates) } } private class VisionModel: Module { @ModuleInfo(key: "vision_model") var visionModel: SigLipVisionModel let modelType: String init(config: Gemma3VisionConfiguration) { self.modelType = config.modelType self._visionModel.wrappedValue = SigLipVisionModel(config: config) } func callAsFunction( _ x: MLXArray, outputHiddenStates: Bool = false ) -> (MLXArray, MLXArray, [MLXArray]?) { visionModel(x, outputHiddenStates: outputHiddenStates) } /// Check if array is already in MLX format for conv2d weights private func checkArrayShape(_ arr: MLXArray) -> Bool { let shape = arr.shape // Check if the shape has 4 dimensions guard shape.count == 4 else { return false } let (outChannels, kH, kW, _) = (shape[0], shape[1], shape[2], shape[3]) // Check if out_channels is the largest, and kH and kW are the same return (outChannels >= kH) && (outChannels >= kW) && (kH == kW) } func sanitize(weights: [String: MLXArray]) -> [String: MLXArray] { var sanitizedWeights = [String: MLXArray]() for (k, v) in weights { // Handle vision model quantized weights if they exist if k.contains("vision_tower") && hasQuantizedWeights(layerPath: k, in: weights) { // Keep quantized weights as-is - they will be handled by QuantizedLinear at runtime sanitizedWeights[k] = v } else if k.contains("patch_embedding.weight") { // PyTorch conv2d weight tensors have shape: // [out_channels, in_channels, kH, KW] // MLX conv2d expects the weight be of shape: // [out_channels, kH, KW, in_channels] if checkArrayShape(v) { sanitizedWeights[k] = v } else { sanitizedWeights[k] = v.transposed(0, 2, 3, 1) } } else { sanitizedWeights[k] = v } } return sanitizedWeights } /// Check if a layer has quantized weights (copied from LanguageModel) private func hasQuantizedWeights(layerPath: String, in weights: [String: MLXArray]) -> Bool { let scalesKey = "\(layerPath).scales" let biasesKey = "\(layerPath).biases" let weightKey = "\(layerPath).weight" return weights[scalesKey] != nil && weights[biasesKey] != nil && weights[weightKey]?.dtype == .uint32 } } // MARK: - Multimodal Projector class Gemma3MultiModalProjector: Module, UnaryLayer { @ModuleInfo(key: "mm_input_projection_weight") var mmInputProjectionWeight: MLXArray @ModuleInfo(key: "mm_soft_emb_norm") var mmSoftEmbNorm: Gemma.RMSNorm @ModuleInfo var avgPool: AvgPool2d let config: Gemma3Configuration let patchesPerImage: Int let tokensPerSide: Int let kernelSize: Int init(config: Gemma3Configuration) { self.config = config self._mmInputProjectionWeight.wrappedValue = ones([ config.visionConfiguration.hiddenSize, config.textConfiguration.hiddenSize, ]) self._mmSoftEmbNorm.wrappedValue = Gemma.RMSNorm( dimensions: config.visionConfiguration.hiddenSize, eps: config.visionConfiguration.layerNormEps ) self.patchesPerImage = config.visionConfiguration.imageSize / config.visionConfiguration.patchSize self.tokensPerSide = Int(sqrt(Double(config.mmTokensPerImage))) self.kernelSize = patchesPerImage / tokensPerSide self.avgPool = AvgPool2d( kernelSize: IntOrPair(kernelSize), stride: IntOrPair(kernelSize) ) } func callAsFunction(_ x: MLXArray) -> MLXArray { let (b, _, l) = (x.dim(0), x.dim(1), x.dim(2)) // Use fixed config values var reshapedVisionOutputs = x.transposed(0, 2, 1) reshapedVisionOutputs = reshapedVisionOutputs.reshaped( b, l, patchesPerImage, patchesPerImage ) // Transpose to place spatial dimensions in indices 1, 2 reshapedVisionOutputs = reshapedVisionOutputs.transposed(0, 2, 3, 1) // Use fixed average pooling var pooledVisionOutputs = avgPool(reshapedVisionOutputs) pooledVisionOutputs = pooledVisionOutputs.transposed(0, 3, 1, 2).flattened(start: 2) pooledVisionOutputs = pooledVisionOutputs.transposed(0, 2, 1) let normedVisionOutputs = mmSoftEmbNorm(pooledVisionOutputs) let projectedVisionOutputs = einsum( "btm,md->btd", normedVisionOutputs, mmInputProjectionWeight ) return projectedVisionOutputs.asType(x.dtype) } } /// Inserts image features into text embeddings at specified token positions /// Implements the multimodal fusion approach used in Gemma3 VLM private func maskedScatter( finalEmbedding: MLXArray, imageMaskExpanded: MLXArray, scaledImageFeatures: MLXArray ) -> MLXArray { // Reshape the tensors to 1D let finalEmbeddingShape = finalEmbedding.shape let scaledImageFeaturesFlattened = scaledImageFeatures.flattened() let finalEmbeddingFlattened = finalEmbedding.flattened() let imageMaskExpandedFlattened = imageMaskExpanded.flattened() let maskValues = imageMaskExpandedFlattened.asArray(Bool.self) let imagePositionIndices = maskValues.enumerated().compactMap { index, value in value ? UInt32(index) : nil } guard !imagePositionIndices.isEmpty else { return finalEmbedding } // Scatter the scaled image features into the special image token positions let imagePositions = MLXArray(imagePositionIndices) guard scaledImageFeaturesFlattened.shape[0] == imagePositions.shape[0] else { fatalError( """ Critical error in maskedScatter: Size mismatch between image features and positions. Image features: \(scaledImageFeaturesFlattened.shape[0]) Image positions: \(imagePositions.shape[0]) """) } finalEmbeddingFlattened[imagePositions] = scaledImageFeaturesFlattened return finalEmbeddingFlattened.reshaped(finalEmbeddingShape) } // MARK: - Gemma 3 Model public class Gemma3: Module, VLMModel, KVCacheDimensionProvider { @ModuleInfo(key: "vision_tower") private var visionTower: VisionModel @ModuleInfo(key: "language_model") private var languageModel: LanguageModel @ModuleInfo(key: "multi_modal_projector") var multiModalProjector: Gemma3MultiModalProjector public let config: Gemma3Configuration public var vocabularySize: Int { config.vocabularySize } public var kvHeads: [Int] { languageModel.kvHeads } /// Create cache with proper types for each layer public func newCache(parameters: GenerateParameters?) -> [any KVCache] { return languageModel.newCache(parameters: parameters) } public init(_ config: Gemma3Configuration) { self.config = config self._visionTower.wrappedValue = VisionModel(config: config.visionConfiguration) self._languageModel.wrappedValue = LanguageModel(config.textConfiguration) self._multiModalProjector.wrappedValue = Gemma3MultiModalProjector(config: config) } private func getInputEmbeddings( inputIds: MLXArray? = nil, pixelValues: MLXArray? = nil, mask: MLXArray? = nil ) -> (MLXArray, MLXArray?) { guard let pixelValues else { return (languageModel.model.embedTokens(inputIds!), nil) } let inputsEmbeds = languageModel.model.embedTokens(inputIds!) // Process image through vision tower let processedPixels = pixelValues.transposed(0, 2, 3, 1).asType(inputsEmbeds.dtype) let (hiddenState, _, _) = visionTower( processedPixels, outputHiddenStates: true ) let imageFeatures = multiModalProjector(hiddenState) let (finalEmbedding, finalAttentionMask4d) = prepareInputsForMultimodal( imageFeatures: imageFeatures, inputsEmbeds: inputsEmbeds, inputIds: inputIds!, attentionMask: mask ) return (finalEmbedding, finalAttentionMask4d) } private func prepareInputsForMultimodal( imageFeatures: MLXArray, inputsEmbeds: MLXArray, inputIds: MLXArray, attentionMask: MLXArray? ) -> (MLXArray, MLXArray?) { let embedDim = inputsEmbeds.dim(2) let batchSize = inputIds.dim(0) let sequenceLength = inputIds.dim(1) // Scale image features to match text embedding magnitude let scaledImageFeatures = imageFeatures / sqrt(Float(config.textConfiguration.hiddenSize)) // Use input embeddings as starting point var finalEmbedding = inputsEmbeds let padTokenId = config.padTokenId let imageTokenId = 262144 // Image token used after expansion // Create masks for different token types let textMask = MLX.logicalAnd( MLX.notEqual(inputIds, MLXArray(imageTokenId)), MLX.notEqual(inputIds, MLXArray(padTokenId)) ) let imageMask = MLX.equal(inputIds, MLXArray(imageTokenId)) let padMask = MLX.equal(inputIds, MLXArray(padTokenId)) // Expand masks to match embedding dimension var imageMaskExpanded = expandedDimensions(imageMask, axis: -1) imageMaskExpanded = repeated(imageMaskExpanded, count: embedDim, axis: -1) // Apply pad mask to final embedding var padMaskExpanded = expandedDimensions(padMask, axis: -1) padMaskExpanded = repeated(padMaskExpanded, count: embedDim, axis: -1) finalEmbedding = MLX.where( padMaskExpanded, MLXArray.zeros(like: finalEmbedding), finalEmbedding) // Insert image token embeddings using masked_scatter finalEmbedding = maskedScatter( finalEmbedding: finalEmbedding, imageMaskExpanded: imageMaskExpanded, scaledImageFeatures: scaledImageFeatures ) var finalAttentionMask4d: MLXArray? = nil if let attentionMask = attentionMask { let attentionMaskExpanded1 = expandedDimensions(attentionMask, axis: 1) let attentionMaskExpanded2 = expandedDimensions(attentionMask, axis: 2) finalAttentionMask4d = attentionMaskExpanded1 * attentionMaskExpanded2 finalAttentionMask4d = expandedDimensions(finalAttentionMask4d!, axis: 1) } return (finalEmbedding.asType(inputsEmbeds.dtype), finalAttentionMask4d) } public func prepare(_ input: LMInput, cache: [any KVCache], windowSize: Int?) throws -> PrepareResult { guard let imagePixels = input.image?.pixels else { // Text-only input let convertedCache = cache.compactMap { $0 as? KVCache } let result = languageModel( input.text.tokens, cache: convertedCache, inputEmbedding: nil, mask: nil) return .logits(result) } let (inputEmbeddings, _) = getInputEmbeddings( inputIds: input.text.tokens, pixelValues: imagePixels, mask: input.text.mask ) let convertedCache = cache.compactMap { $0 as? KVCache } // Use causal masking for text generation let maskMode: MLXFast.ScaledDotProductAttentionMaskMode = .causal let result = languageModel( nil, // Pass nil for tokens when using embeddings cache: convertedCache, inputEmbedding: inputEmbeddings, mask: maskMode ) return .logits(result) } public func callAsFunction(_ inputs: MLXArray, cache: [any KVCache]?) -> MLXArray { return languageModel(inputs, cache: cache).logits } public func sanitize(weights: [String: MLXArray]) -> [String: MLXArray] { let lmHeadKeys = weights.keys.filter { $0.contains("lm_head") } // Also check attention layer structures let attnKeys = weights.keys.filter { $0.contains("self_attn") && ($0.contains("q_proj") || $0.contains("k_proj") || $0.contains("v_proj") || $0.contains("o_proj")) } // Handle language model sanitization first (quantization, weight tying, etc.) var processedWeights = languageModel.sanitize( weights: weights, quantizationConfig: config.quantization) // Handle vision model sanitization (conv2d weight reshaping, etc.) processedWeights = visionTower.sanitize(weights: processedWeights) return processedWeights } } public class Gemma3Processor: UserInputProcessor { private let config: Gemma3ProcessorConfiguration private let tokenizer: any Tokenizer public init(_ config: Gemma3ProcessorConfiguration, tokenizer: any Tokenizer) { self.config = config self.tokenizer = tokenizer } public func preprocess(images: [CIImage], processing: UserInput.Processing?) throws -> ( MLXArray, THW ) { var userProcessing = processing ?? UserInput.Processing() // Always use the vision configuration's imageSize. Ignore UserInput resize setting. let targetSize = CGSize(width: config.imageSize, height: config.imageSize) // Force the correct size for vision model alignment userProcessing.resize = targetSize let processedImages = try images.map { image in let processedImage = MediaProcessing.apply(image, processing: userProcessing) let srgbImage = MediaProcessing.inSRGBToneCurveSpace(processedImage) let resizedImage = try MediaProcessing.resampleBicubic(srgbImage, to: targetSize) let normalizedImage = MediaProcessing.normalize( resizedImage, mean: config.imageMeanTuple, std: config.imageStdTuple) return MediaProcessing.asMLXArray(normalizedImage) } let pixelValues = concatenated(processedImages) return (pixelValues, THW(images.count, config.imageSize, config.imageSize)) } public func prepare(input: UserInput) async throws -> LMInput { // Use structured content message generator for Gemma3's chat template let messages = Qwen2VLMessageGenerator().generate(from: input) var promptTokens = try tokenizer.applyChatTemplate(messages: messages) // Process images if any var processedImage: LMInput.ProcessedImage? if !input.images.isEmpty { let imagePixelsAndFrames = try input.images.map { try preprocess(images: [$0.asCIImage()], processing: input.processing) } let imagePixelsConcatenated = concatenated(imagePixelsAndFrames.map { $0.0 }) processedImage = LMInput.ProcessedImage( pixels: imagePixelsConcatenated, frames: imagePixelsAndFrames.map { $0.1 } ) // Expand single <start_of_image> token to multiple image tokens let startOfImageTokenId = 255999 let imageTokenId = 262144 let numImageTokens = config.imageSeqLength // 256 var expandedTokens: [Int] = [] for token in promptTokens { if token == startOfImageTokenId { // Replace with 256 image tokens expandedTokens.append( contentsOf: Array(repeating: imageTokenId, count: numImageTokens)) } else { expandedTokens.append(token) } } promptTokens = expandedTokens } let promptArray = MLXArray(promptTokens).expandedDimensions(axis: 0) let mask = ones(like: promptArray).asType(.int8) return LMInput( text: .init(tokens: promptArray, mask: mask), image: processedImage ) } } public struct Gemma3ProcessorConfiguration: Codable, Sendable { // Fields from the preprocessor_config.json public let processorClass: String public let imageProcessorType: String public let doNormalize: Bool public let doRescale: Bool public let doResize: Bool public let imageMean: [CGFloat] public let imageStd: [CGFloat] public let imageSeqLength: Int public let resample: Int public let rescaleFactor: Float public let size: ImageSize // Optional fields public let doConvertRgb: Bool? public let doPanAndScan: Bool? public let panAndScanMaxNumCrops: Int? public let panAndScanMinCropSize: Int? public let panAndScanMinRatioToActivate: Float? // Image token identifier from model configuration public let imageTokenId: Int = 262144 public struct ImageSize: Codable, Sendable { public let height: Int public let width: Int } // Computed properties for convenience public var imageSize: Int { size.height } public var imageMeanTuple: (CGFloat, CGFloat, CGFloat) { (imageMean[0], imageMean[1], imageMean[2]) } public var imageStdTuple: (CGFloat, CGFloat, CGFloat) { (imageStd[0], imageStd[1], imageStd[2]) } enum CodingKeys: String, CodingKey { case processorClass = "processor_class" case imageProcessorType = "image_processor_type" case doNormalize = "do_normalize" case doRescale = "do_rescale" case doResize = "do_resize" case doConvertRgb = "do_convert_rgb" case doPanAndScan = "do_pan_and_scan" case imageMean = "image_mean" case imageStd = "image_std" case imageSeqLength = "image_seq_length" case resample case rescaleFactor = "rescale_factor" case size case panAndScanMaxNumCrops = "pan_and_scan_max_num_crops" case panAndScanMinCropSize = "pan_and_scan_min_crop_size" case panAndScanMinRatioToActivate = "pan_and_scan_min_ratio_to_activate" } } extension Gemma3: LoRAModel { public func loraLinearLayers() -> LoRALinearLayers { return languageModel.model.layers.map { ($0.selfAttention, ["q_proj", "v_proj"]) } } }
Python versions at https://github.com/Blaizzy/mlx-vlm usually have multiple files. This one combines the 3 .py files into one cell. Evaluate future format for more VLM examples. Stopping here for now.
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null