LFM2-VL-450M-WebGPU / config.js
shubeydoo's picture
Liquid AI LFM2-VL-450M-WebGPU Demo
accf76b
/**
* Configuration for LFM2-VL-450M Demo
* WebGPU inference with ONNX models from HuggingFace Hub
*/
const HF_BASE = 'https://huggingface.co/onnx-community/LFM2-VL-450M-ONNX/resolve/main';
// Model configurations
export const MODELS = {
'LFM2-VL-450M-FP16': {
id: 'LFM2-VL-450M-FP16',
path: HF_BASE,
label: 'FP16 (Half Precision)',
size: '~1.05 GB',
quantization: { decoder: 'fp16', visionEncoder: 'fp16' }
},
'LFM2-VL-450M-FP32': {
id: 'LFM2-VL-450M-FP32',
path: HF_BASE,
label: 'FP32 (Full Precision)',
size: '~2.1 GB',
quantization: { decoder: null, visionEncoder: null }
}
};
// Default settings
export const DEFAULT_CONFIG = {
defaultModel: 'LFM2-VL-450M-FP16',
maxNewTokens: 512,
temperature: 0.0
};
// Get config with optional env overrides
export function getConfig() {
const config = { ...DEFAULT_CONFIG };
if (typeof import.meta !== 'undefined' && import.meta.env) {
if (import.meta.env.VITE_DEFAULT_MODEL) {
config.defaultModel = import.meta.env.VITE_DEFAULT_MODEL;
}
}
return config;
}
// Get model configuration by ID
export function getModelConfig(modelId) {
return MODELS[modelId];
}
// Get all available models
export function getAvailableModels() {
return Object.values(MODELS);
}