zzzlift
commited on
Commit
·
a582dc5
1
Parent(s):
7f77677
create
Browse files- README.md +57 -0
- config.json +34 -0
- merges.txt +0 -0
- special_tokens_map.json +6 -0
- tokenizer.json +0 -0
- tokenizer_config.json +11 -0
- vocab.json +0 -0
README.md
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## bloom-7b-rl
|
| 2 |
+
|
| 3 |
+
This is a reinforcement learning enhanced bloom model (bloom-rl), fine-tuned based on bloom-7b (Muennighoff et al.).
|
| 4 |
+
|
| 5 |
+
### Usage
|
| 6 |
+
|
| 7 |
+
If you don't have a good GPU (mem > 20G) then use the code below:
|
| 8 |
+
|
| 9 |
+
```python
|
| 10 |
+
# pip install -q transformers accelerate
|
| 11 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 12 |
+
|
| 13 |
+
checkpoint = "hongyin/bloom-7b-rl"
|
| 14 |
+
|
| 15 |
+
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
| 16 |
+
model = AutoModelForCausalLM.from_pretrained(checkpoint)
|
| 17 |
+
|
| 18 |
+
inputs = tokenizer.encode("Translate to Chinese: I love you.", return_tensors="pt")
|
| 19 |
+
outputs = model.generate(inputs)
|
| 20 |
+
print(tokenizer.decode(outputs[0]))
|
| 21 |
+
|
| 22 |
+
Translate to Chinese: I love you. 翻译:我爱你
|
| 23 |
+
```
|
| 24 |
+
|
| 25 |
+
If you have a good GPU (mem > 20G) then use the code below:
|
| 26 |
+
|
| 27 |
+
```python
|
| 28 |
+
# pip install -q transformers accelerate
|
| 29 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 30 |
+
|
| 31 |
+
checkpoint = "hongyin/bloom-7b-rl"
|
| 32 |
+
|
| 33 |
+
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
| 34 |
+
model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype="auto", device_map="auto")
|
| 35 |
+
|
| 36 |
+
inputs = tokenizer.encode("Translate to Chinese: I love you.", return_tensors="pt").to("cuda")
|
| 37 |
+
outputs = model.generate(inputs)
|
| 38 |
+
print(tokenizer.decode(outputs[0]))
|
| 39 |
+
|
| 40 |
+
Translate to Chinese: I love you. 翻译:我爱你
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
## Bibtex entry and citation info
|
| 44 |
+
Please cite if you find it helpful.
|
| 45 |
+
```
|
| 46 |
+
@article{zhu2023metaaid,
|
| 47 |
+
title={MetaAID 2.0: An Extensible Framework for Developing Metaverse Applications via Human-controllable Pre-trained Models},
|
| 48 |
+
author={Zhu, Hongyin},
|
| 49 |
+
journal={arXiv preprint arXiv:2302.13173},
|
| 50 |
+
year={2023}
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
---
|
| 56 |
+
license: other
|
| 57 |
+
---
|
config.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "bloom-7b1-ckpt",
|
| 3 |
+
"apply_residual_connection_post_layernorm": false,
|
| 4 |
+
"architectures": [
|
| 5 |
+
"BloomForCausalLM"
|
| 6 |
+
],
|
| 7 |
+
"attention_dropout": 0.0,
|
| 8 |
+
"attention_softmax_in_fp32": true,
|
| 9 |
+
"bias_dropout_fusion": true,
|
| 10 |
+
"bos_token_id": 1,
|
| 11 |
+
"dropout": 0.0,
|
| 12 |
+
"end_token_id": 2,
|
| 13 |
+
"eos_token_id": 2,
|
| 14 |
+
"hidden_dropout": 0.0,
|
| 15 |
+
"hidden_size": 4096,
|
| 16 |
+
"initializer_range": 0.02,
|
| 17 |
+
"layer_norm_epsilon": 1e-05,
|
| 18 |
+
"masked_softmax_fusion": true,
|
| 19 |
+
"model_type": "bloom",
|
| 20 |
+
"n_head": 32,
|
| 21 |
+
"n_inner": null,
|
| 22 |
+
"n_layer": 30,
|
| 23 |
+
"offset_alibi": 100,
|
| 24 |
+
"pad_token_id": 2,
|
| 25 |
+
"pretraining_tp": 1,
|
| 26 |
+
"skip_bias_add": true,
|
| 27 |
+
"skip_bias_add_qkv": false,
|
| 28 |
+
"slow_but_exact": false,
|
| 29 |
+
"torch_dtype": "float32",
|
| 30 |
+
"transformers_version": "4.28.0",
|
| 31 |
+
"unk_token_id": 0,
|
| 32 |
+
"use_cache": true,
|
| 33 |
+
"vocab_size": 250680
|
| 34 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<s>",
|
| 3 |
+
"eos_token": "</s>",
|
| 4 |
+
"pad_token": "<pad>",
|
| 5 |
+
"unk_token": "<unk>"
|
| 6 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"bos_token": "<s>",
|
| 4 |
+
"eos_token": "</s>",
|
| 5 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 6 |
+
"pad_token": "<pad>",
|
| 7 |
+
"padding_side": "left",
|
| 8 |
+
"special_tokens_map_file": null,
|
| 9 |
+
"tokenizer_class": "BloomTokenizer",
|
| 10 |
+
"unk_token": "<unk>"
|
| 11 |
+
}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|