File size: 6,634 Bytes
6c470c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
{
"bomFormat": "CycloneDX",
"specVersion": "1.6",
"serialNumber": "urn:uuid:f3d4b532-6fad-4b9a-b4cb-24dc7ba6e74a",
"version": 1,
"metadata": {
"timestamp": "2025-06-05T09:39:11.687113+00:00",
"component": {
"type": "machine-learning-model",
"bom-ref": "OpenGVLab/InternVL3-78B-e9d921a6-e013-5632-9136-d28da087616e",
"name": "OpenGVLab/InternVL3-78B",
"externalReferences": [
{
"url": "https://huggingface.co/OpenGVLab/InternVL3-78B",
"type": "documentation"
}
],
"modelCard": {
"modelParameters": {
"task": "image-text-to-text",
"architectureFamily": "internvl_chat",
"modelArchitecture": "InternVLChatModel",
"datasets": [
{
"ref": "OpenGVLab/MMPR-v1.2-f5ad7f01-75b1-5539-aff3-747fe24b14f6"
}
]
},
"properties": [
{
"name": "library_name",
"value": "transformers"
},
{
"name": "base_model",
"value": "OpenGVLab/InternVL3-78B-Instruct"
},
{
"name": "base_model_relation",
"value": "finetune"
}
]
},
"authors": [
{
"name": "OpenGVLab"
}
],
"licenses": [
{
"license": {
"name": "qwen",
"url": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct/blob/main/LICENSE"
}
}
],
"description": "We introduce InternVL3, an advanced multimodal large language model (MLLM) series that demonstrates superior overall performance.Compared to InternVL 2.5, InternVL3 exhibits superior multimodal perception and reasoning capabilities, while further extending its multimodal capabilities to encompass tool usage, GUI agents, industrial image analysis, 3D vision perception, and more.Additionally, we compare InternVL3 with Qwen2.5 Chat models, whose corresponding pre-trained base models are employed as the initialization of the langauge component in InternVL3. Benefitting from Native Multimodal Pre-Training, the InternVL3 series achieves even better overall text performance than the Qwen2.5 series.",
"tags": [
"transformers",
"safetensors",
"internvl_chat",
"feature-extraction",
"internvl",
"custom_code",
"image-text-to-text",
"conversational",
"multilingual",
"dataset:OpenGVLab/MMPR-v1.2",
"arxiv:2312.14238",
"arxiv:2404.16821",
"arxiv:2412.05271",
"arxiv:2411.10442",
"arxiv:2504.10479",
"arxiv:2412.09616",
"base_model:OpenGVLab/InternVL3-78B-Instruct",
"base_model:finetune:OpenGVLab/InternVL3-78B-Instruct",
"license:other",
"region:us"
]
}
},
"components": [
{
"type": "data",
"bom-ref": "OpenGVLab/MMPR-v1.2-f5ad7f01-75b1-5539-aff3-747fe24b14f6",
"name": "OpenGVLab/MMPR-v1.2",
"data": [
{
"type": "dataset",
"bom-ref": "OpenGVLab/MMPR-v1.2-f5ad7f01-75b1-5539-aff3-747fe24b14f6",
"name": "OpenGVLab/MMPR-v1.2",
"contents": {
"url": "https://huggingface.co/datasets/OpenGVLab/MMPR-v1.2",
"properties": [
{
"name": "task_categories",
"value": "visual-question-answering"
},
{
"name": "language",
"value": "en"
},
{
"name": "size_categories",
"value": "1M<n<10M"
},
{
"name": "pretty_name",
"value": "MMPR-v1.2"
},
{
"name": "configs",
"value": "Name of the dataset subset: default {\"split\": \"train\", \"path\": \"annotations.zip\"}"
},
{
"name": "license",
"value": "mit"
}
]
},
"governance": {
"owners": [
{
"organization": {
"name": "OpenGVLab",
"url": "https://huggingface.co/OpenGVLab"
}
}
]
},
"description": "\n\t\n\t\t\n\t\tMMPR-v1.2\n\t\n\n[\ud83d\udcc2 GitHub] [\ud83c\udd95 Blog] [\ud83d\udcdc Paper] [\ud83d\udcd6 Documents]\nThis is a newer version of MMPR and MMPR-v1.1, which includes additional data sources to enhance the data diversity and greatly improves the overall performance of InternVL3 across all scales. The prompts used to build this dataset is released in MMPR-v1.2-prompts.\nTo unzip the archive of images, please first run cat images.zip_* > images.zip and then run unzip images.zip.\n\n\n\t\n\t\t\n\t\tIntroduction\n\t\n\nMMPR is a large-scale and\u2026 See the full description on the dataset page: https://huggingface.co/datasets/OpenGVLab/MMPR-v1.2."
}
]
}
]
} |