SmolVLM 500M Instruct β GGUF
SmolVLM-500M-Instruct quantized to GGUF format for llama.cpp, packaged for use with the RunAnywhere SDK.
Files:
SmolVLM-500M-Instruct-Q8_0.ggufβ Language model (~416 MB)mmproj-SmolVLM-500M-Instruct-f16.ggufβ Vision encoder (~190 MB)
Usage with RunAnywhere SDK
Swift (iOS / macOS)
import RunAnywhere
RunAnywhere.registerModel(
id: "smolvlm-500m-instruct-q8_0",
name: "SmolVLM 500M Instruct Q8_0",
repo: "runanywhere/SmolVLM-500M-Instruct-GGUF",
files: ["SmolVLM-500M-Instruct-Q8_0.gguf", "mmproj-SmolVLM-500M-Instruct-f16.gguf"],
framework: .llamaCpp,
modality: .multimodal,
memoryRequirement: 600_000_000
)
// VLM inference with image
let result = try await RunAnywhere.generateVLM(
prompt: "Describe what you see in this image.",
image: imageData,
modelId: "smolvlm-500m-instruct-q8_0"
)
Kotlin (Android / JVM)
import com.runanywhere.sdk.RunAnywhere
import com.runanywhere.sdk.models.*
RunAnywhere.registerModel(
id = "smolvlm-500m-instruct-q8_0",
name = "SmolVLM 500M Instruct Q8_0",
repo = "runanywhere/SmolVLM-500M-Instruct-GGUF",
files = listOf("SmolVLM-500M-Instruct-Q8_0.gguf", "mmproj-SmolVLM-500M-Instruct-f16.gguf"),
framework = InferenceFramework.LLAMA_CPP,
modality = ModelCategory.MULTIMODAL,
memoryRequirement = 600_000_000L
)
val result = RunAnywhere.generateVLM(
prompt = "Describe what you see.",
image = imageData,
modelId = "smolvlm-500m-instruct-q8_0"
)
Web (TypeScript)
import { RunAnywhere, LLMFramework, ModelCategory } from '@anthropic/runanywhere-web';
RunAnywhere.registerModels([{
id: 'smolvlm-500m-instruct-q8_0',
name: 'SmolVLM 500M Instruct Q8_0',
repo: 'runanywhere/SmolVLM-500M-Instruct-GGUF',
files: ['SmolVLM-500M-Instruct-Q8_0.gguf', 'mmproj-SmolVLM-500M-Instruct-f16.gguf'],
framework: LLMFramework.LlamaCpp,
modality: ModelCategory.Multimodal,
memoryRequirement: 600_000_000,
}]);
await RunAnywhere.downloadModel('smolvlm-500m-instruct-q8_0');
await RunAnywhere.loadModel('smolvlm-500m-instruct-q8_0');
const result = await RunAnywhere.generateVLM('Describe what you see.', imageData, 'smolvlm-500m-instruct-q8_0');
React Native (TypeScript)
import { RunAnywhere } from 'runanywhere-react-native';
RunAnywhere.registerModel({
id: 'smolvlm-500m-instruct-q8_0',
name: 'SmolVLM 500M Instruct Q8_0',
repo: 'runanywhere/SmolVLM-500M-Instruct-GGUF',
files: ['SmolVLM-500M-Instruct-Q8_0.gguf', 'mmproj-SmolVLM-500M-Instruct-f16.gguf'],
framework: 'llamaCpp',
modality: 'multimodal',
memoryRequirement: 600_000_000,
});
const result = await RunAnywhere.generateVLM('Describe what you see.', imageData, 'smolvlm-500m-instruct-q8_0');
Flutter (Dart)
import 'package:runanywhere_flutter/runanywhere_flutter.dart';
RunAnywhere.registerModel(
id: 'smolvlm-500m-instruct-q8_0',
name: 'SmolVLM 500M Instruct Q8_0',
repo: 'runanywhere/SmolVLM-500M-Instruct-GGUF',
files: ['SmolVLM-500M-Instruct-Q8_0.gguf', 'mmproj-SmolVLM-500M-Instruct-f16.gguf'],
framework: InferenceFramework.llamaCpp,
modality: ModelCategory.multimodal,
memoryRequirement: 600000000,
);
final result = await RunAnywhere.generateVLM('Describe what you see.', imageData, 'smolvlm-500m-instruct-q8_0');
Model Details
| Property | Value |
|---|---|
| Base Model | SmolVLM-500M-Instruct |
| Parameters | 500M |
| Quantization | Q8_0 |
| Runtime | llama.cpp (with multimodal/mtmd) |
| Vision Encoder | SigLIP (F16) |
Attribution
Original model by HuggingFace. GGUF conversion by ggml-org.
- Downloads last month
- 21
Hardware compatibility
Log In
to add your hardware
8-bit