add AIBOM
#12
by
RiccardoDav
- opened
internlm_internlm2_5-7b-chat.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bomFormat": "CycloneDX",
|
| 3 |
+
"specVersion": "1.6",
|
| 4 |
+
"serialNumber": "urn:uuid:ff6193bd-9554-4b7f-a58c-756d9db69bcb",
|
| 5 |
+
"version": 1,
|
| 6 |
+
"metadata": {
|
| 7 |
+
"timestamp": "2025-06-05T09:35:32.373679+00:00",
|
| 8 |
+
"component": {
|
| 9 |
+
"type": "machine-learning-model",
|
| 10 |
+
"bom-ref": "internlm/internlm2_5-7b-chat-32b070af-4c58-586a-a31a-ee5c97440384",
|
| 11 |
+
"name": "internlm/internlm2_5-7b-chat",
|
| 12 |
+
"externalReferences": [
|
| 13 |
+
{
|
| 14 |
+
"url": "https://huggingface.co/internlm/internlm2_5-7b-chat",
|
| 15 |
+
"type": "documentation"
|
| 16 |
+
}
|
| 17 |
+
],
|
| 18 |
+
"modelCard": {
|
| 19 |
+
"modelParameters": {
|
| 20 |
+
"task": "text-generation",
|
| 21 |
+
"architectureFamily": "internlm2",
|
| 22 |
+
"modelArchitecture": "InternLM2ForCausalLM"
|
| 23 |
+
},
|
| 24 |
+
"properties": [
|
| 25 |
+
{
|
| 26 |
+
"name": "library_name",
|
| 27 |
+
"value": "transformers"
|
| 28 |
+
}
|
| 29 |
+
]
|
| 30 |
+
},
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"name": "internlm"
|
| 34 |
+
}
|
| 35 |
+
],
|
| 36 |
+
"licenses": [
|
| 37 |
+
{
|
| 38 |
+
"license": {
|
| 39 |
+
"name": "other"
|
| 40 |
+
}
|
| 41 |
+
}
|
| 42 |
+
],
|
| 43 |
+
"description": "InternLM2.5 has open-sourced a 7 billion parameter base model and a chat model tailored for practical scenarios. The model has the following characteristics:- **Outstanding reasoning capability**: State-of-the-art performance on Math reasoning, surpassing models like Llama3 and Gemma2-9B.- **1M Context window**: Nearly perfect at finding needles in the haystack with 1M-long context, with leading performance on long-context tasks like LongBench. Try it with [LMDeploy](https://github.com/InternLM/InternLM/blob/main/chat/lmdeploy.md) for 1M-context inference.- **Stronger tool use**: InternLM2.5 supports gathering information from more than 100 web pages, corresponding implementation has be released in [MindSearch](https://github.com/InternLM/MindSearch). InternLM2.5 has better tool utilization-related capabilities in instruction following, tool selection and reflection. See [examples](https://github.com/InternLM/InternLM/blob/main/agent/lagent.md).",
|
| 44 |
+
"tags": [
|
| 45 |
+
"transformers",
|
| 46 |
+
"safetensors",
|
| 47 |
+
"internlm2",
|
| 48 |
+
"text-generation",
|
| 49 |
+
"conversational",
|
| 50 |
+
"custom_code",
|
| 51 |
+
"arxiv:2403.17297",
|
| 52 |
+
"license:other",
|
| 53 |
+
"autotrain_compatible",
|
| 54 |
+
"region:us"
|
| 55 |
+
]
|
| 56 |
+
}
|
| 57 |
+
}
|
| 58 |
+
}
|