Spaces:
Running
Running
File size: 1,290 Bytes
accf76b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 | /**
* Configuration for LFM2-VL-450M Demo
* WebGPU inference with ONNX models from HuggingFace Hub
*/
const HF_BASE = 'https://huggingface.co/onnx-community/LFM2-VL-450M-ONNX/resolve/main';
// Model configurations
export const MODELS = {
'LFM2-VL-450M-FP16': {
id: 'LFM2-VL-450M-FP16',
path: HF_BASE,
label: 'FP16 (Half Precision)',
size: '~1.05 GB',
quantization: { decoder: 'fp16', visionEncoder: 'fp16' }
},
'LFM2-VL-450M-FP32': {
id: 'LFM2-VL-450M-FP32',
path: HF_BASE,
label: 'FP32 (Full Precision)',
size: '~2.1 GB',
quantization: { decoder: null, visionEncoder: null }
}
};
// Default settings
export const DEFAULT_CONFIG = {
defaultModel: 'LFM2-VL-450M-FP16',
maxNewTokens: 512,
temperature: 0.0
};
// Get config with optional env overrides
export function getConfig() {
const config = { ...DEFAULT_CONFIG };
if (typeof import.meta !== 'undefined' && import.meta.env) {
if (import.meta.env.VITE_DEFAULT_MODEL) {
config.defaultModel = import.meta.env.VITE_DEFAULT_MODEL;
}
}
return config;
}
// Get model configuration by ID
export function getModelConfig(modelId) {
return MODELS[modelId];
}
// Get all available models
export function getAvailableModels() {
return Object.values(MODELS);
}
|