File size: 8,605 Bytes
6d72cc1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
{
"bomFormat": "CycloneDX",
"specVersion": "1.6",
"serialNumber": "urn:uuid:e0a98ae4-ad3d-487c-a284-cb9382968df5",
"version": 1,
"metadata": {
"timestamp": "2025-10-07T08:05:04.094127+00:00",
"component": {
"type": "machine-learning-model",
"bom-ref": "AdaptLLM/finance-LLM-5d23c55e-22c4-55cb-9c9f-dcaaadc7903d",
"licenses": [],
"externalReferences": [
{
"url": "https://huggingface.co/AdaptLLM/finance-LLM",
"type": "documentation"
}
],
"modelCard": {
"modelParameters": {
"datasets": [
{
"ref": "Open-Orca/OpenOrca-bd2dde5e-b85b-5436-a786-d44f16da11b9"
},
{
"ref": "GAIR/lima-afa8f631-d0ed-59c0-a5a1-170c80a5117e"
},
{
"ref": "WizardLM/WizardLM_evol_instruct_V2_196k-3de546b1-38de-5aa5-8f76-d810e237648b"
}
],
"task": "text-generation",
"architectureFamily": "llama",
"modelArchitecture": "LLaMAForCausalLM"
},
"properties": [
{
"name": "library_name",
"value": "transformers"
}
]
},
"name": "AdaptLLM/finance-LLM",
"authors": [
{
"name": "AdaptLLM"
}
],
"tags": [
"transformers",
"pytorch",
"safetensors",
"llama",
"text-generation",
"finance",
"en",
"dataset:Open-Orca/OpenOrca",
"dataset:GAIR/lima",
"dataset:WizardLM/WizardLM_evol_instruct_V2_196k",
"arxiv:2309.09530",
"arxiv:2411.19930",
"arxiv:2406.14491",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
]
}
},
"components": [
{
"type": "data",
"bom-ref": "Open-Orca/OpenOrca-bd2dde5e-b85b-5436-a786-d44f16da11b9",
"name": "Open-Orca/OpenOrca",
"data": [
{
"type": "dataset",
"bom-ref": "Open-Orca/OpenOrca-bd2dde5e-b85b-5436-a786-d44f16da11b9",
"name": "Open-Orca/OpenOrca",
"contents": {
"url": "https://huggingface.co/datasets/Open-Orca/OpenOrca",
"properties": [
{
"name": "task_categories",
"value": "conversational, text-classification, token-classification, table-question-answering, question-answering, zero-shot-classification, summarization, feature-extraction, text-generation, text2text-generation"
},
{
"name": "language",
"value": "en"
},
{
"name": "size_categories",
"value": "10M<n<100M"
},
{
"name": "pretty_name",
"value": "OpenOrca"
},
{
"name": "license",
"value": "mit"
}
]
},
"description": "\ud83d\udc0b The OpenOrca Dataset! \ud83d\udc0b\n\n\n\nWe are thrilled to announce the release of the OpenOrca dataset!\nThis rich collection of augmented FLAN data aligns, as best as possible, with the distributions outlined in the Orca paper.\nIt has been instrumental in generating high-performing model checkpoints and serves as a valuable resource for all NLP researchers and developers!\n\n\t\n\t\t\n\t\n\t\n\t\tOfficial Models\n\t\n\n\n\t\n\t\n\t\n\t\tMistral-7B-OpenOrca\n\t\n\nOur latest model, the first 7B to score better overall than all\u2026 See the full description on the dataset page: https://huggingface.co/datasets/Open-Orca/OpenOrca.",
"governance": {
"owners": [
{
"organization": {
"name": "Open-Orca",
"url": "https://huggingface.co/Open-Orca"
}
}
]
}
}
]
},
{
"type": "data",
"bom-ref": "GAIR/lima-afa8f631-d0ed-59c0-a5a1-170c80a5117e",
"name": "GAIR/lima",
"data": [
{
"type": "dataset",
"bom-ref": "GAIR/lima-afa8f631-d0ed-59c0-a5a1-170c80a5117e",
"name": "GAIR/lima",
"contents": {
"url": "https://huggingface.co/datasets/GAIR/lima",
"properties": [
{
"name": "license",
"value": "other"
}
]
},
"description": "A high-quality dataset for efficient instruction tuning.",
"governance": {
"owners": [
{
"organization": {
"name": "GAIR",
"url": "https://huggingface.co/GAIR"
}
}
]
}
}
]
},
{
"type": "data",
"bom-ref": "WizardLM/WizardLM_evol_instruct_V2_196k-3de546b1-38de-5aa5-8f76-d810e237648b",
"name": "WizardLM/WizardLM_evol_instruct_V2_196k",
"data": [
{
"type": "dataset",
"bom-ref": "WizardLM/WizardLM_evol_instruct_V2_196k-3de546b1-38de-5aa5-8f76-d810e237648b",
"name": "WizardLM/WizardLM_evol_instruct_V2_196k",
"contents": {
"url": "https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k",
"properties": [
{
"name": "license",
"value": "mit"
}
]
},
"description": "\n\t\n\t\t\n\t\tNews\n\t\n\n\n\ud83d\udd25 \ud83d\udd25 \ud83d\udd25 [08/11/2023] We release WizardMath Models.\n\ud83d\udd25 Our WizardMath-70B-V1.0 model slightly outperforms some closed-source LLMs on the GSM8K, including ChatGPT 3.5, Claude Instant 1 and PaLM 2 540B.\n\ud83d\udd25 Our WizardMath-70B-V1.0 model achieves 81.6 pass@1 on the GSM8k Benchmarks, which is 24.8 points higher than the SOTA open-source LLM.\n\ud83d\udd25 Our WizardMath-70B-V1.0 model achieves 22.7 pass@1 on the MATH Benchmarks, which is 9.2 points higher than the SOTA open-source LLM.\u2026 See the full description on the dataset page: https://huggingface.co/datasets/WizardLMTeam/WizardLM_evol_instruct_V2_196k.",
"governance": {
"owners": [
{
"organization": {
"name": "WizardLMTeam",
"url": "https://huggingface.co/WizardLMTeam"
}
}
]
}
}
]
}
]
} |