Update README.md
Browse files
README.md
CHANGED
|
@@ -4,14 +4,14 @@ license: apache-2.0
|
|
| 4 |
extra_gated_description: If you want to learn more about how we process your personal data, please read our <a href="https://mistral.ai/terms/">Privacy Policy</a>.
|
| 5 |
---
|
| 6 |
|
| 7 |
-
# Model Card for Mathstral-
|
| 8 |
|
| 9 |
Mathstral 7B is a model specializing in mathematical and scientific tasks, based on Mistral 7B.
|
| 10 |
You can read more in the [official blog post](https://mistral.ai/news/mathstral/).
|
| 11 |
|
| 12 |
## Installation
|
| 13 |
|
| 14 |
-
It is recommended to use `mistralai/
|
| 15 |
|
| 16 |
|
| 17 |
```
|
|
@@ -25,10 +25,10 @@ pip install mistral_inference>=1.2.0
|
|
| 25 |
from huggingface_hub import snapshot_download
|
| 26 |
from pathlib import Path
|
| 27 |
|
| 28 |
-
mistral_models_path = Path.home().joinpath('mistral_models', '
|
| 29 |
mistral_models_path.mkdir(parents=True, exist_ok=True)
|
| 30 |
|
| 31 |
-
snapshot_download(repo_id="mistralai/
|
| 32 |
```
|
| 33 |
|
| 34 |
### Chat
|
|
@@ -36,7 +36,7 @@ snapshot_download(repo_id="mistralai/mathstral-7B-v0.1", allow_patterns=["params
|
|
| 36 |
After installing `mistral_inference`, a `mistral-demo` CLI command should be available in your environment.
|
| 37 |
|
| 38 |
```
|
| 39 |
-
mistral-chat $HOME/mistral_models/
|
| 40 |
```
|
| 41 |
|
| 42 |
You can then start chatting with the model, *e.g.* prompt it with something like:
|
|
@@ -52,7 +52,7 @@ To use this model within the `transformers` library, install the latest release
|
|
| 52 |
from transformers import pipeline
|
| 53 |
import torch
|
| 54 |
|
| 55 |
-
checkpoint = "mistralai/
|
| 56 |
pipe = pipeline("text-generation", checkpoint, device_map="auto", torch_dtype=torch.bfloat16)
|
| 57 |
|
| 58 |
prompt = [{"role": "user", "content": "What are the roots of unity?"}]
|
|
@@ -68,7 +68,7 @@ You can also manually tokenize the input and generate text from the model, rathe
|
|
| 68 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 69 |
import torch
|
| 70 |
|
| 71 |
-
checkpoint = "mistralai/
|
| 72 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
| 73 |
model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto", torch_dtype=torch.bfloat16)
|
| 74 |
|
|
|
|
| 4 |
extra_gated_description: If you want to learn more about how we process your personal data, please read our <a href="https://mistral.ai/terms/">Privacy Policy</a>.
|
| 5 |
---
|
| 6 |
|
| 7 |
+
# Model Card for Mathstral-7b-v0.1
|
| 8 |
|
| 9 |
Mathstral 7B is a model specializing in mathematical and scientific tasks, based on Mistral 7B.
|
| 10 |
You can read more in the [official blog post](https://mistral.ai/news/mathstral/).
|
| 11 |
|
| 12 |
## Installation
|
| 13 |
|
| 14 |
+
It is recommended to use `mistralai/Mathstral-7b-v0.1` with [mistral-inference](https://github.com/mistralai/mistral-inference)
|
| 15 |
|
| 16 |
|
| 17 |
```
|
|
|
|
| 25 |
from huggingface_hub import snapshot_download
|
| 26 |
from pathlib import Path
|
| 27 |
|
| 28 |
+
mistral_models_path = Path.home().joinpath('mistral_models', 'Mathstral-7b-v0.1')
|
| 29 |
mistral_models_path.mkdir(parents=True, exist_ok=True)
|
| 30 |
|
| 31 |
+
snapshot_download(repo_id="mistralai/Mathstral-7b-v0.1", allow_patterns=["params.json", "consolidated.safetensors", "tokenizer.model.v3"], local_dir=mistral_models_path)
|
| 32 |
```
|
| 33 |
|
| 34 |
### Chat
|
|
|
|
| 36 |
After installing `mistral_inference`, a `mistral-demo` CLI command should be available in your environment.
|
| 37 |
|
| 38 |
```
|
| 39 |
+
mistral-chat $HOME/mistral_models/Mathstral-7b-v0.1 --instruct --max_tokens 256
|
| 40 |
```
|
| 41 |
|
| 42 |
You can then start chatting with the model, *e.g.* prompt it with something like:
|
|
|
|
| 52 |
from transformers import pipeline
|
| 53 |
import torch
|
| 54 |
|
| 55 |
+
checkpoint = "mistralai/Mathstral-7b-v0.1"
|
| 56 |
pipe = pipeline("text-generation", checkpoint, device_map="auto", torch_dtype=torch.bfloat16)
|
| 57 |
|
| 58 |
prompt = [{"role": "user", "content": "What are the roots of unity?"}]
|
|
|
|
| 68 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 69 |
import torch
|
| 70 |
|
| 71 |
+
checkpoint = "mistralai/Mathstral-7b-v0.1"
|
| 72 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
| 73 |
model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto", torch_dtype=torch.bfloat16)
|
| 74 |
|