| { | |
| "bomFormat": "CycloneDX", | |
| "specVersion": "1.6", | |
| "serialNumber": "urn:uuid:6abaa880-fd87-409b-b3dd-f7017767e93d", | |
| "version": 1, | |
| "metadata": { | |
| "timestamp": "2025-07-10T09:01:27.226304+00:00", | |
| "component": { | |
| "type": "machine-learning-model", | |
| "bom-ref": "AI-MO/NuminaMath-7B-TIR-25bf135d-b699-508e-a81d-4ff4bb0efb1d", | |
| "name": "AI-MO/NuminaMath-7B-TIR", | |
| "externalReferences": [ | |
| { | |
| "url": "https://huggingface.co/AI-MO/NuminaMath-7B-TIR", | |
| "type": "documentation" | |
| } | |
| ], | |
| "modelCard": { | |
| "modelParameters": { | |
| "task": "text-generation", | |
| "architectureFamily": "llama", | |
| "modelArchitecture": "LlamaForCausalLM" | |
| }, | |
| "properties": [ | |
| { | |
| "name": "library_name", | |
| "value": "transformers" | |
| }, | |
| { | |
| "name": "base_model", | |
| "value": "deepseek-ai/deepseek-math-7b-base" | |
| } | |
| ], | |
| "consideration": { | |
| "useCases": "Here's how you can run the model using the `pipeline()` function from \ud83e\udd17 Transformers:```pythonimport reimport torchfrom transformers import pipelinepipe = pipeline(\"text-generation\", model=\"AI-MO/NuminaMath-7B-TIR\", torch_dtype=torch.bfloat16, device_map=\"auto\")messages = [{\"role\": \"user\", \"content\": \"For how many values of the constant $k$ will the polynomial $x^{2}+kx+36$ have two distinct integer roots?\"},]prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)gen_config = {\"max_new_tokens\": 1024,\"do_sample\": False,\"stop_strings\": [\"```output\"], # Generate until Python code block is complete\"tokenizer\": pipe.tokenizer,}outputs = pipe(prompt, **gen_config)text = outputs[0][\"generated_text\"]print(text)" | |
| } | |
| }, | |
| "authors": [ | |
| { | |
| "name": "AI-MO" | |
| } | |
| ], | |
| "licenses": [ | |
| { | |
| "license": { | |
| "id": "Apache-2.0", | |
| "url": "https://spdx.org/licenses/Apache-2.0.html" | |
| } | |
| } | |
| ], | |
| "description": "- **Model type:** A 7B parameter math LLM fine-tuned in two stages of supervised fine-tuning, first on a dataset with math problem-solution pairs and then on a synthetic dataset with examples of multi-step generations using tool-integrated reasoning.- **Language(s) (NLP):** Primarily English- **License:** Apache 2.0- **Finetuned from model:** [deepseek-ai/deepseek-math-7b-base](https://huggingface.co/deepseek-ai/deepseek-math-7b-base)", | |
| "tags": [ | |
| "transformers", | |
| "safetensors", | |
| "llama", | |
| "text-generation", | |
| "alignment-handbook", | |
| "generated_from_trainer", | |
| "conversational", | |
| "arxiv:2309.17452", | |
| "base_model:deepseek-ai/deepseek-math-7b-base", | |
| "base_model:finetune:deepseek-ai/deepseek-math-7b-base", | |
| "license:apache-2.0", | |
| "autotrain_compatible", | |
| "text-generation-inference", | |
| "endpoints_compatible", | |
| "region:us" | |
| ] | |
| } | |
| } | |
| } |