Spaces:
Runtime error
Runtime error
| # Model Configuration for Vision Language Models and Language Models | |
| # This file contains model configurations for easy integration | |
| models: | |
| # InternVL Vision-Language Models | |
| InternVL3-8B: | |
| name: "InternVL3-8B" | |
| model_id: "OpenGVLab/InternVL3-8B" | |
| model_type: "internvl" | |
| description: "Fastest model, good for quick processing" | |
| supported_quantizations: | |
| - "non-quantized(fp16)" | |
| - "quantized(8bit)" | |
| default_quantization: "non-quantized(fp16)" | |
| InternVL3-14B: | |
| name: "InternVL3-14B" | |
| model_id: "OpenGVLab/InternVL3-14B" | |
| model_type: "internvl" | |
| description: "Balanced performance and quality" | |
| supported_quantizations: | |
| - "non-quantized(fp16)" | |
| - "quantized(8bit)" | |
| default_quantization: "quantized(8bit)" | |
| InternVL3-38B: | |
| name: "InternVL3-38B" | |
| model_id: "OpenGVLab/InternVL3-38B" | |
| model_type: "internvl" | |
| description: "Highest quality, requires significant GPU memory" | |
| supported_quantizations: | |
| - "non-quantized(fp16)" | |
| - "quantized(8bit)" | |
| default_quantization: "quantized(8bit)" | |
| InternVL3_5-8B: | |
| name: "InternVL3_5-8B" | |
| model_id: "OpenGVLab/InternVL3_5-8B" | |
| model_type: "internvl" | |
| description: "Fastest model, good for quick processing" | |
| supported_quantizations: | |
| - "non-quantized(fp16)" | |
| - "quantized(8bit)" | |
| default_quantization: "non-quantized(fp16)" | |
| # Qwen Language Models (Text-only) | |
| Qwen2.5-7B-Instruct: | |
| name: "Qwen2.5-7B-Instruct" | |
| model_id: "Qwen/Qwen2.5-7B-Instruct" | |
| model_type: "qwen" | |
| description: "Qwen2.5 7B instruction-tuned model for text generation" | |
| supported_quantizations: | |
| - "non-quantized(fp16)" | |
| - "quantized(8bit)" | |
| default_quantization: "quantized(8bit)" | |
| Qwen2.5-14B-Instruct: | |
| name: "Qwen2.5-14B-Instruct" | |
| model_id: "Qwen/Qwen2.5-14B-Instruct" | |
| model_type: "qwen" | |
| description: "Qwen2.5 14B instruction-tuned model for better text generation" | |
| supported_quantizations: | |
| - "non-quantized(fp16)" | |
| - "quantized(8bit)" | |
| default_quantization: "quantized(8bit)" | |
| # Default model selection | |
| default_model: "InternVL3-8B" |