| ///MARK: This is the generated class file, useful for proper implementation. | |
| //Created by InspiratioNULL on 1/20/2026 | |
| // CLIP_TextEncoder.swift | |
| // | |
| // This file was automatically generated and should not be edited. | |
| // | |
| import CoreML | |
| /// Model Prediction Input Type | |
| @available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *) | |
| @available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *) | |
| public class CLIP_TextEncoderInput : MLFeatureProvider { | |
| /// text as 1 by 77 matrix of 32-bit integers | |
| /// text as 1 by 77 matrix of 32-bit integers | |
| public var text: MLMultiArray | |
| public var featureNames: Set<String> { ["text"] } | |
| public func featureValue(for featureName: String) -> MLFeatureValue? { | |
| if featureName == "text" { | |
| return MLFeatureValue(multiArray: text) | |
| } | |
| return nil | |
| } | |
| public init(text: MLMultiArray) { | |
| self.text = text | |
| } | |
| public convenience init(text: MLShapedArray<Int32>) { | |
| self.init(text: MLMultiArray(text)) | |
| } | |
| } | |
| /// Model Prediction Output Type | |
| @available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *) | |
| @available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *) | |
| public class CLIP_TextEncoderOutput : MLFeatureProvider { | |
| /// Source provided by CoreML | |
| private let provider : MLFeatureProvider | |
| /// var_1317 as 1 by 512 matrix of floats | |
| /// var_1317 as 1 by 512 matrix of floats | |
| public var var_1317: MLMultiArray { | |
| provider.featureValue(for: "var_1317")!.multiArrayValue! | |
| } | |
| /// var_1317 as 1 by 512 matrix of floats | |
| /// var_1317 as 1 by 512 matrix of floats | |
| public var var_1317ShapedArray: MLShapedArray<Float> { | |
| MLShapedArray<Float>(var_1317) | |
| } | |
| public var featureNames: Set<String> { | |
| provider.featureNames | |
| } | |
| public func featureValue(for featureName: String) -> MLFeatureValue? { | |
| provider.featureValue(for: featureName) | |
| } | |
| public init(var_1317: MLMultiArray) { | |
| self.provider = try! MLDictionaryFeatureProvider(dictionary: ["var_1317" : MLFeatureValue(multiArray: var_1317)]) | |
| } | |
| public init(features: MLFeatureProvider) { | |
| self.provider = features | |
| } | |
| } | |
| /// Class for model loading and prediction | |
| @available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *) | |
| @available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *) | |
| public class CLIP_TextEncoder { | |
| public let model: MLModel | |
| /// URL of model assuming it was installed in the same bundle as this class | |
| /// URL of model assuming it was installed in the same bundle as this class | |
| public class var urlOfModelInThisBundle : URL { | |
| let bundle = Bundle(for: self) | |
| return bundle.url(forResource: "CLIP_TextEncoder", withExtension:"mlmodelc")! | |
| } | |
| /** | |
| Construct CLIP_TextEncoder instance with an existing MLModel object. | |
| Usually the application does not use this initializer unless it makes a subclass of CLIP_TextEncoder. | |
| Such application may want to use `MLModel(contentsOfURL:configuration:)` and `CLIP_TextEncoder.urlOfModelInThisBundle` to create a MLModel object to pass-in. | |
| - parameters: | |
| - model: MLModel object | |
| */ | |
| public init(model: MLModel) { | |
| self.model = model | |
| } | |
| /** | |
| Construct a model with configuration | |
| - parameters: | |
| - configuration: the desired model configuration | |
| - throws: an NSError object that describes the problem | |
| */ | |
| public convenience init(configuration: MLModelConfiguration = MLModelConfiguration()) throws { | |
| try self.init(contentsOf: type(of:self).urlOfModelInThisBundle, configuration: configuration) | |
| } | |
| /** | |
| Construct CLIP_TextEncoder instance with explicit path to mlmodelc file | |
| - parameters: | |
| - modelURL: the file url of the model | |
| - throws: an NSError object that describes the problem | |
| */ | |
| public convenience init(contentsOf modelURL: URL) throws { | |
| try self.init(model: MLModel(contentsOf: modelURL)) | |
| } | |
| /** | |
| Construct a model with URL of the .mlmodelc directory and configuration | |
| - parameters: | |
| - modelURL: the file url of the model | |
| - configuration: the desired model configuration | |
| - throws: an NSError object that describes the problem | |
| */ | |
| public convenience init(contentsOf modelURL: URL, configuration: MLModelConfiguration) throws { | |
| try self.init(model: MLModel(contentsOf: modelURL, configuration: configuration)) | |
| } | |
| /** | |
| Construct CLIP_TextEncoder instance asynchronously with optional configuration. | |
| Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread. | |
| - parameters: | |
| - configuration: the desired model configuration | |
| - handler: the completion handler to be called when the model loading completes successfully or unsuccessfully | |
| */ | |
| public class func load(configuration: MLModelConfiguration = MLModelConfiguration(), completionHandler handler: @escaping (Swift.Result<CLIP_TextEncoder, Error>) -> Void) { | |
| load(contentsOf: self.urlOfModelInThisBundle, configuration: configuration, completionHandler: handler) | |
| } | |
| /** | |
| Construct CLIP_TextEncoder instance asynchronously with optional configuration. | |
| Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread. | |
| - parameters: | |
| - configuration: the desired model configuration | |
| */ | |
| public class func load(configuration: MLModelConfiguration = MLModelConfiguration()) async throws -> CLIP_TextEncoder { | |
| try await load(contentsOf: self.urlOfModelInThisBundle, configuration: configuration) | |
| } | |
| /** | |
| Construct CLIP_TextEncoder instance asynchronously with URL of the .mlmodelc directory with optional configuration. | |
| Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread. | |
| - parameters: | |
| - modelURL: the URL to the model | |
| - configuration: the desired model configuration | |
| - handler: the completion handler to be called when the model loading completes successfully or unsuccessfully | |
| */ | |
| public class func load(contentsOf modelURL: URL, configuration: MLModelConfiguration = MLModelConfiguration(), completionHandler handler: @escaping (Swift.Result<CLIP_TextEncoder, Error>) -> Void) { | |
| MLModel.load(contentsOf: modelURL, configuration: configuration) { result in | |
| switch result { | |
| case .failure(let error): | |
| handler(.failure(error)) | |
| case .success(let model): | |
| handler(.success(CLIP_TextEncoder(model: model))) | |
| } | |
| } | |
| } | |
| /** | |
| Construct CLIP_TextEncoder instance asynchronously with URL of the .mlmodelc directory with optional configuration. | |
| Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread. | |
| - parameters: | |
| - modelURL: the URL to the model | |
| - configuration: the desired model configuration | |
| */ | |
| public class func load(contentsOf modelURL: URL, configuration: MLModelConfiguration = MLModelConfiguration()) async throws -> CLIP_TextEncoder { | |
| let model = try await MLModel.load(contentsOf: modelURL, configuration: configuration) | |
| return CLIP_TextEncoder(model: model) | |
| } | |
| /** | |
| Make a prediction using the structured interface | |
| It uses the default function if the model has multiple functions. | |
| - parameters: | |
| - input: the input to the prediction as CLIP_TextEncoderInput | |
| - throws: an NSError object that describes the problem | |
| - returns: the result of the prediction as CLIP_TextEncoderOutput | |
| */ | |
| public func prediction(input: CLIP_TextEncoderInput) throws -> CLIP_TextEncoderOutput { | |
| try prediction(input: input, options: MLPredictionOptions()) | |
| } | |
| /** | |
| Make a prediction using the structured interface | |
| It uses the default function if the model has multiple functions. | |
| - parameters: | |
| - input: the input to the prediction as CLIP_TextEncoderInput | |
| - options: prediction options | |
| - throws: an NSError object that describes the problem | |
| - returns: the result of the prediction as CLIP_TextEncoderOutput | |
| */ | |
| public func prediction(input: CLIP_TextEncoderInput, options: MLPredictionOptions) throws -> CLIP_TextEncoderOutput { | |
| let outFeatures = try model.prediction(from: input, options: options) | |
| return CLIP_TextEncoderOutput(features: outFeatures) | |
| } | |
| /** | |
| Make an asynchronous prediction using the structured interface | |
| It uses the default function if the model has multiple functions. | |
| - parameters: | |
| - input: the input to the prediction as CLIP_TextEncoderInput | |
| - options: prediction options | |
| - throws: an NSError object that describes the problem | |
| - returns: the result of the prediction as CLIP_TextEncoderOutput | |
| */ | |
| @available(macOS 14.0, iOS 17.0, tvOS 17.0, watchOS 10.0, visionOS 1.0, *) | |
| public func prediction(input: CLIP_TextEncoderInput, options: MLPredictionOptions = MLPredictionOptions()) async throws -> CLIP_TextEncoderOutput { | |
| let outFeatures = try await model.prediction(from: input, options: options) | |
| return CLIP_TextEncoderOutput(features: outFeatures) | |
| } | |
| /** | |
| Make a prediction using the convenience interface | |
| It uses the default function if the model has multiple functions. | |
| - parameters: | |
| - text: 1 by 77 matrix of 32-bit integers | |
| - throws: an NSError object that describes the problem | |
| - returns: the result of the prediction as CLIP_TextEncoderOutput | |
| */ | |
| public func prediction(text: MLMultiArray) throws -> CLIP_TextEncoderOutput { | |
| let input_ = CLIP_TextEncoderInput(text: text) | |
| return try prediction(input: input_) | |
| } | |
| /** | |
| Make a prediction using the convenience interface | |
| It uses the default function if the model has multiple functions. | |
| - parameters: | |
| - text: 1 by 77 matrix of 32-bit integers | |
| - throws: an NSError object that describes the problem | |
| - returns: the result of the prediction as CLIP_TextEncoderOutput | |
| */ | |
| public func prediction(text: MLShapedArray<Int32>) throws -> CLIP_TextEncoderOutput { | |
| let input_ = CLIP_TextEncoderInput(text: text) | |
| return try prediction(input: input_) | |
| } | |
| /** | |
| Make a batch prediction using the structured interface | |
| It uses the default function if the model has multiple functions. | |
| - parameters: | |
| - inputs: the inputs to the prediction as [CLIP_TextEncoderInput] | |
| - options: prediction options | |
| - throws: an NSError object that describes the problem | |
| - returns: the result of the prediction as [CLIP_TextEncoderOutput] | |
| */ | |
| public func predictions(inputs: [CLIP_TextEncoderInput], options: MLPredictionOptions = MLPredictionOptions()) throws -> [CLIP_TextEncoderOutput] { | |
| let batchIn = MLArrayBatchProvider(array: inputs) | |
| let batchOut = try model.predictions(from: batchIn, options: options) | |
| var results : [CLIP_TextEncoderOutput] = [] | |
| results.reserveCapacity(inputs.count) | |
| for i in 0..<batchOut.count { | |
| let outProvider = batchOut.features(at: i) | |
| let result = CLIP_TextEncoderOutput(features: outProvider) | |
| results.append(result) | |
| } | |
| return results | |
| } | |
| } | |