Update README.md
Browse files
README.md
CHANGED
|
@@ -31,12 +31,22 @@ The additional details of the Aquila model will be presented in the official tec
|
|
| 31 |
Aquila2-7B is a base model that can be used for continuation.
|
| 32 |
|
| 33 |
```python
|
| 34 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 35 |
import torch
|
|
|
|
|
|
|
|
|
|
| 36 |
device = torch.device("cuda")
|
| 37 |
model_info = "BAAI/Aquila2-7B"
|
| 38 |
tokenizer = AutoTokenizer.from_pretrained(model_info, trust_remote_code=True)
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
model.eval()
|
| 41 |
model.to(device)
|
| 42 |
text = "杭州亚运会的亮点和期待 2023年9月23日至10月8日,杭州将举办第19届亚洲运动会"
|
|
|
|
| 31 |
Aquila2-7B is a base model that can be used for continuation.
|
| 32 |
|
| 33 |
```python
|
|
|
|
| 34 |
import torch
|
| 35 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 36 |
+
from transformers import BitsAndBytesConfig
|
| 37 |
+
|
| 38 |
device = torch.device("cuda")
|
| 39 |
model_info = "BAAI/Aquila2-7B"
|
| 40 |
tokenizer = AutoTokenizer.from_pretrained(model_info, trust_remote_code=True)
|
| 41 |
+
quantization_config=BitsAndBytesConfig(
|
| 42 |
+
load_in_4bit=True,
|
| 43 |
+
bnb_4bit_use_double_quant=True,
|
| 44 |
+
bnb_4bit_quant_type="nf4",
|
| 45 |
+
bnb_4bit_compute_dtype=torch.bfloat16,
|
| 46 |
+
)
|
| 47 |
+
model = AutoModelForCausalLM.from_pretrained(model_info, trust_remote_code=True, torch_dtype=torch.float16,
|
| 48 |
+
# quantization_config=quantization_config, # Uncomment this line for 4bit quantization
|
| 49 |
+
)
|
| 50 |
model.eval()
|
| 51 |
model.to(device)
|
| 52 |
text = "杭州亚运会的亮点和期待 2023年9月23日至10月8日,杭州将举办第19届亚洲运动会"
|