File size: 12,065 Bytes
f24f82b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 |
///MARK: This is the generated class file, useful for proper implementation.
//Created by InspiratioNULL on 1/20/2026
// CLIP_TextEncoder.swift
//
// This file was automatically generated and should not be edited.
//
import CoreML
/// Model Prediction Input Type
@available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *)
@available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *)
public class CLIP_TextEncoderInput : MLFeatureProvider {
/// text as 1 by 77 matrix of 32-bit integers
/// text as 1 by 77 matrix of 32-bit integers
public var text: MLMultiArray
public var featureNames: Set<String> { ["text"] }
public func featureValue(for featureName: String) -> MLFeatureValue? {
if featureName == "text" {
return MLFeatureValue(multiArray: text)
}
return nil
}
public init(text: MLMultiArray) {
self.text = text
}
public convenience init(text: MLShapedArray<Int32>) {
self.init(text: MLMultiArray(text))
}
}
/// Model Prediction Output Type
@available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *)
@available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *)
public class CLIP_TextEncoderOutput : MLFeatureProvider {
/// Source provided by CoreML
private let provider : MLFeatureProvider
/// var_1317 as 1 by 512 matrix of floats
/// var_1317 as 1 by 512 matrix of floats
public var var_1317: MLMultiArray {
provider.featureValue(for: "var_1317")!.multiArrayValue!
}
/// var_1317 as 1 by 512 matrix of floats
/// var_1317 as 1 by 512 matrix of floats
public var var_1317ShapedArray: MLShapedArray<Float> {
MLShapedArray<Float>(var_1317)
}
public var featureNames: Set<String> {
provider.featureNames
}
public func featureValue(for featureName: String) -> MLFeatureValue? {
provider.featureValue(for: featureName)
}
public init(var_1317: MLMultiArray) {
self.provider = try! MLDictionaryFeatureProvider(dictionary: ["var_1317" : MLFeatureValue(multiArray: var_1317)])
}
public init(features: MLFeatureProvider) {
self.provider = features
}
}
/// Class for model loading and prediction
@available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *)
@available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *)
public class CLIP_TextEncoder {
public let model: MLModel
/// URL of model assuming it was installed in the same bundle as this class
/// URL of model assuming it was installed in the same bundle as this class
public class var urlOfModelInThisBundle : URL {
let bundle = Bundle(for: self)
return bundle.url(forResource: "CLIP_TextEncoder", withExtension:"mlmodelc")!
}
/**
Construct CLIP_TextEncoder instance with an existing MLModel object.
Usually the application does not use this initializer unless it makes a subclass of CLIP_TextEncoder.
Such application may want to use `MLModel(contentsOfURL:configuration:)` and `CLIP_TextEncoder.urlOfModelInThisBundle` to create a MLModel object to pass-in.
- parameters:
- model: MLModel object
*/
public init(model: MLModel) {
self.model = model
}
/**
Construct a model with configuration
- parameters:
- configuration: the desired model configuration
- throws: an NSError object that describes the problem
*/
public convenience init(configuration: MLModelConfiguration = MLModelConfiguration()) throws {
try self.init(contentsOf: type(of:self).urlOfModelInThisBundle, configuration: configuration)
}
/**
Construct CLIP_TextEncoder instance with explicit path to mlmodelc file
- parameters:
- modelURL: the file url of the model
- throws: an NSError object that describes the problem
*/
public convenience init(contentsOf modelURL: URL) throws {
try self.init(model: MLModel(contentsOf: modelURL))
}
/**
Construct a model with URL of the .mlmodelc directory and configuration
- parameters:
- modelURL: the file url of the model
- configuration: the desired model configuration
- throws: an NSError object that describes the problem
*/
public convenience init(contentsOf modelURL: URL, configuration: MLModelConfiguration) throws {
try self.init(model: MLModel(contentsOf: modelURL, configuration: configuration))
}
/**
Construct CLIP_TextEncoder instance asynchronously with optional configuration.
Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
- parameters:
- configuration: the desired model configuration
- handler: the completion handler to be called when the model loading completes successfully or unsuccessfully
*/
public class func load(configuration: MLModelConfiguration = MLModelConfiguration(), completionHandler handler: @escaping (Swift.Result<CLIP_TextEncoder, Error>) -> Void) {
load(contentsOf: self.urlOfModelInThisBundle, configuration: configuration, completionHandler: handler)
}
/**
Construct CLIP_TextEncoder instance asynchronously with optional configuration.
Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
- parameters:
- configuration: the desired model configuration
*/
public class func load(configuration: MLModelConfiguration = MLModelConfiguration()) async throws -> CLIP_TextEncoder {
try await load(contentsOf: self.urlOfModelInThisBundle, configuration: configuration)
}
/**
Construct CLIP_TextEncoder instance asynchronously with URL of the .mlmodelc directory with optional configuration.
Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
- parameters:
- modelURL: the URL to the model
- configuration: the desired model configuration
- handler: the completion handler to be called when the model loading completes successfully or unsuccessfully
*/
public class func load(contentsOf modelURL: URL, configuration: MLModelConfiguration = MLModelConfiguration(), completionHandler handler: @escaping (Swift.Result<CLIP_TextEncoder, Error>) -> Void) {
MLModel.load(contentsOf: modelURL, configuration: configuration) { result in
switch result {
case .failure(let error):
handler(.failure(error))
case .success(let model):
handler(.success(CLIP_TextEncoder(model: model)))
}
}
}
/**
Construct CLIP_TextEncoder instance asynchronously with URL of the .mlmodelc directory with optional configuration.
Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
- parameters:
- modelURL: the URL to the model
- configuration: the desired model configuration
*/
public class func load(contentsOf modelURL: URL, configuration: MLModelConfiguration = MLModelConfiguration()) async throws -> CLIP_TextEncoder {
let model = try await MLModel.load(contentsOf: modelURL, configuration: configuration)
return CLIP_TextEncoder(model: model)
}
/**
Make a prediction using the structured interface
It uses the default function if the model has multiple functions.
- parameters:
- input: the input to the prediction as CLIP_TextEncoderInput
- throws: an NSError object that describes the problem
- returns: the result of the prediction as CLIP_TextEncoderOutput
*/
public func prediction(input: CLIP_TextEncoderInput) throws -> CLIP_TextEncoderOutput {
try prediction(input: input, options: MLPredictionOptions())
}
/**
Make a prediction using the structured interface
It uses the default function if the model has multiple functions.
- parameters:
- input: the input to the prediction as CLIP_TextEncoderInput
- options: prediction options
- throws: an NSError object that describes the problem
- returns: the result of the prediction as CLIP_TextEncoderOutput
*/
public func prediction(input: CLIP_TextEncoderInput, options: MLPredictionOptions) throws -> CLIP_TextEncoderOutput {
let outFeatures = try model.prediction(from: input, options: options)
return CLIP_TextEncoderOutput(features: outFeatures)
}
/**
Make an asynchronous prediction using the structured interface
It uses the default function if the model has multiple functions.
- parameters:
- input: the input to the prediction as CLIP_TextEncoderInput
- options: prediction options
- throws: an NSError object that describes the problem
- returns: the result of the prediction as CLIP_TextEncoderOutput
*/
@available(macOS 14.0, iOS 17.0, tvOS 17.0, watchOS 10.0, visionOS 1.0, *)
public func prediction(input: CLIP_TextEncoderInput, options: MLPredictionOptions = MLPredictionOptions()) async throws -> CLIP_TextEncoderOutput {
let outFeatures = try await model.prediction(from: input, options: options)
return CLIP_TextEncoderOutput(features: outFeatures)
}
/**
Make a prediction using the convenience interface
It uses the default function if the model has multiple functions.
- parameters:
- text: 1 by 77 matrix of 32-bit integers
- throws: an NSError object that describes the problem
- returns: the result of the prediction as CLIP_TextEncoderOutput
*/
public func prediction(text: MLMultiArray) throws -> CLIP_TextEncoderOutput {
let input_ = CLIP_TextEncoderInput(text: text)
return try prediction(input: input_)
}
/**
Make a prediction using the convenience interface
It uses the default function if the model has multiple functions.
- parameters:
- text: 1 by 77 matrix of 32-bit integers
- throws: an NSError object that describes the problem
- returns: the result of the prediction as CLIP_TextEncoderOutput
*/
public func prediction(text: MLShapedArray<Int32>) throws -> CLIP_TextEncoderOutput {
let input_ = CLIP_TextEncoderInput(text: text)
return try prediction(input: input_)
}
/**
Make a batch prediction using the structured interface
It uses the default function if the model has multiple functions.
- parameters:
- inputs: the inputs to the prediction as [CLIP_TextEncoderInput]
- options: prediction options
- throws: an NSError object that describes the problem
- returns: the result of the prediction as [CLIP_TextEncoderOutput]
*/
public func predictions(inputs: [CLIP_TextEncoderInput], options: MLPredictionOptions = MLPredictionOptions()) throws -> [CLIP_TextEncoderOutput] {
let batchIn = MLArrayBatchProvider(array: inputs)
let batchOut = try model.predictions(from: batchIn, options: options)
var results : [CLIP_TextEncoderOutput] = []
results.reserveCapacity(inputs.count)
for i in 0..<batchOut.count {
let outProvider = batchOut.features(at: i)
let result = CLIP_TextEncoderOutput(features: outProvider)
results.append(result)
}
return results
}
}
|