Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- CLIP-ViT-H-14-laion2B-s32B-b79K/.mdl +0 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/.msc +0 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/.mv +1 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/config.json +179 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/configuration.json +1 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/merges.txt +0 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/model.safetensors +3 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/open_clip_config.json +31 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/open_clip_model.safetensors +3 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/open_clip_pytorch_model.bin +3 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/preprocessor_config.json +19 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/pytorch_model.bin +3 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/special_tokens_map.json +1 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/tokenizer.json +0 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/tokenizer_config.json +34 -0
- CLIP-ViT-H-14-laion2B-s32B-b79K/vocab.json +0 -0
- HPS_v2/HPS_v2.1_compressed.safetensors +3 -0
- HPS_v2/HPS_v2_compressed.safetensors +3 -0
- ImageReward/ImageReward.safetensors +3 -0
- ImageReward/med_config.json +22 -0
- MPS_overall_checkpoint/.mdl +0 -0
- MPS_overall_checkpoint/.msc +0 -0
- MPS_overall_checkpoint/.mv +1 -0
- MPS_overall_checkpoint/MPS_overall_checkpoint_diffsynth.safetensors +3 -0
- MPS_overall_checkpoint/configuration.json +1 -0
- PickScore_v1/config.json +35 -0
- PickScore_v1/merges.txt +0 -0
- PickScore_v1/model.safetensors +3 -0
- PickScore_v1/special_tokens_map.json +30 -0
- PickScore_v1/tokenizer.json +0 -0
- PickScore_v1/tokenizer_config.json +31 -0
- PickScore_v1/vocab.json +0 -0
- README.md +24 -0
- README_from_modelscope.md +47 -0
- aesthetic-predictor/sac+logos+ava1-l14-linearMSE.safetensors +3 -0
- bert-base-uncased/.mdl +0 -0
- bert-base-uncased/.msc +0 -0
- bert-base-uncased/.mv +1 -0
- bert-base-uncased/LICENSE +201 -0
- bert-base-uncased/README.md +251 -0
- bert-base-uncased/config.json +23 -0
- bert-base-uncased/configuration.json +1 -0
- bert-base-uncased/coreml/fill-mask/float32_model.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
- bert-base-uncased/coreml/fill-mask/float32_model.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
- bert-base-uncased/coreml/fill-mask/float32_model.mlpackage/Manifest.json +18 -0
- bert-base-uncased/flax_model.msgpack +3 -0
- bert-base-uncased/model.onnx +3 -0
- bert-base-uncased/model.safetensors +3 -0
- bert-base-uncased/pytorch_model.bin +3 -0
- bert-base-uncased/rust_model.ot +3 -0
CLIP-ViT-H-14-laion2B-s32B-b79K/.mdl
ADDED
|
Binary file (60 Bytes). View file
|
|
|
CLIP-ViT-H-14-laion2B-s32B-b79K/.msc
ADDED
|
Binary file (1.05 kB). View file
|
|
|
CLIP-ViT-H-14-laion2B-s32B-b79K/.mv
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
Revision:master,CreatedAt:1737604041
|
CLIP-ViT-H-14-laion2B-s32B-b79K/config.json
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"CLIPModel"
|
| 4 |
+
],
|
| 5 |
+
"initializer_factor": 1.0,
|
| 6 |
+
"logit_scale_init_value": 2.6592,
|
| 7 |
+
"model_type": "clip",
|
| 8 |
+
"projection_dim": 1024,
|
| 9 |
+
"text_config": {
|
| 10 |
+
"_name_or_path": "",
|
| 11 |
+
"add_cross_attention": false,
|
| 12 |
+
"architectures": null,
|
| 13 |
+
"attention_dropout": 0.0,
|
| 14 |
+
"bad_words_ids": null,
|
| 15 |
+
"bos_token_id": 0,
|
| 16 |
+
"chunk_size_feed_forward": 0,
|
| 17 |
+
"cross_attention_hidden_size": null,
|
| 18 |
+
"decoder_start_token_id": null,
|
| 19 |
+
"diversity_penalty": 0.0,
|
| 20 |
+
"do_sample": false,
|
| 21 |
+
"dropout": 0.0,
|
| 22 |
+
"early_stopping": false,
|
| 23 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 24 |
+
"eos_token_id": 2,
|
| 25 |
+
"exponential_decay_length_penalty": null,
|
| 26 |
+
"finetuning_task": null,
|
| 27 |
+
"forced_bos_token_id": null,
|
| 28 |
+
"forced_eos_token_id": null,
|
| 29 |
+
"hidden_act": "gelu",
|
| 30 |
+
"hidden_size": 1024,
|
| 31 |
+
"id2label": {
|
| 32 |
+
"0": "LABEL_0",
|
| 33 |
+
"1": "LABEL_1"
|
| 34 |
+
},
|
| 35 |
+
"initializer_factor": 1.0,
|
| 36 |
+
"initializer_range": 0.02,
|
| 37 |
+
"intermediate_size": 4096,
|
| 38 |
+
"is_decoder": false,
|
| 39 |
+
"is_encoder_decoder": false,
|
| 40 |
+
"label2id": {
|
| 41 |
+
"LABEL_0": 0,
|
| 42 |
+
"LABEL_1": 1
|
| 43 |
+
},
|
| 44 |
+
"layer_norm_eps": 1e-05,
|
| 45 |
+
"length_penalty": 1.0,
|
| 46 |
+
"max_length": 20,
|
| 47 |
+
"max_position_embeddings": 77,
|
| 48 |
+
"min_length": 0,
|
| 49 |
+
"model_type": "clip_text_model",
|
| 50 |
+
"no_repeat_ngram_size": 0,
|
| 51 |
+
"num_attention_heads": 16,
|
| 52 |
+
"num_beam_groups": 1,
|
| 53 |
+
"num_beams": 1,
|
| 54 |
+
"num_hidden_layers": 24,
|
| 55 |
+
"num_return_sequences": 1,
|
| 56 |
+
"output_attentions": false,
|
| 57 |
+
"output_hidden_states": false,
|
| 58 |
+
"output_scores": false,
|
| 59 |
+
"pad_token_id": 1,
|
| 60 |
+
"prefix": null,
|
| 61 |
+
"problem_type": null,
|
| 62 |
+
"projection_dim": 1024,
|
| 63 |
+
"pruned_heads": {},
|
| 64 |
+
"remove_invalid_values": false,
|
| 65 |
+
"repetition_penalty": 1.0,
|
| 66 |
+
"return_dict": true,
|
| 67 |
+
"return_dict_in_generate": false,
|
| 68 |
+
"sep_token_id": null,
|
| 69 |
+
"task_specific_params": null,
|
| 70 |
+
"temperature": 1.0,
|
| 71 |
+
"tf_legacy_loss": false,
|
| 72 |
+
"tie_encoder_decoder": false,
|
| 73 |
+
"tie_word_embeddings": true,
|
| 74 |
+
"tokenizer_class": null,
|
| 75 |
+
"top_k": 50,
|
| 76 |
+
"top_p": 1.0,
|
| 77 |
+
"torch_dtype": null,
|
| 78 |
+
"torchscript": false,
|
| 79 |
+
"transformers_version": "4.21.3",
|
| 80 |
+
"typical_p": 1.0,
|
| 81 |
+
"use_bfloat16": false,
|
| 82 |
+
"vocab_size": 49408
|
| 83 |
+
},
|
| 84 |
+
"text_config_dict": {
|
| 85 |
+
"hidden_act": "gelu",
|
| 86 |
+
"hidden_size": 1024,
|
| 87 |
+
"intermediate_size": 4096,
|
| 88 |
+
"num_attention_heads": 16,
|
| 89 |
+
"num_hidden_layers": 24,
|
| 90 |
+
"projection_dim": 1024
|
| 91 |
+
},
|
| 92 |
+
"torch_dtype": "float32",
|
| 93 |
+
"transformers_version": null,
|
| 94 |
+
"vision_config": {
|
| 95 |
+
"_name_or_path": "",
|
| 96 |
+
"add_cross_attention": false,
|
| 97 |
+
"architectures": null,
|
| 98 |
+
"attention_dropout": 0.0,
|
| 99 |
+
"bad_words_ids": null,
|
| 100 |
+
"bos_token_id": null,
|
| 101 |
+
"chunk_size_feed_forward": 0,
|
| 102 |
+
"cross_attention_hidden_size": null,
|
| 103 |
+
"decoder_start_token_id": null,
|
| 104 |
+
"diversity_penalty": 0.0,
|
| 105 |
+
"do_sample": false,
|
| 106 |
+
"dropout": 0.0,
|
| 107 |
+
"early_stopping": false,
|
| 108 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 109 |
+
"eos_token_id": null,
|
| 110 |
+
"exponential_decay_length_penalty": null,
|
| 111 |
+
"finetuning_task": null,
|
| 112 |
+
"forced_bos_token_id": null,
|
| 113 |
+
"forced_eos_token_id": null,
|
| 114 |
+
"hidden_act": "gelu",
|
| 115 |
+
"hidden_size": 1280,
|
| 116 |
+
"id2label": {
|
| 117 |
+
"0": "LABEL_0",
|
| 118 |
+
"1": "LABEL_1"
|
| 119 |
+
},
|
| 120 |
+
"image_size": 224,
|
| 121 |
+
"initializer_factor": 1.0,
|
| 122 |
+
"initializer_range": 0.02,
|
| 123 |
+
"intermediate_size": 5120,
|
| 124 |
+
"is_decoder": false,
|
| 125 |
+
"is_encoder_decoder": false,
|
| 126 |
+
"label2id": {
|
| 127 |
+
"LABEL_0": 0,
|
| 128 |
+
"LABEL_1": 1
|
| 129 |
+
},
|
| 130 |
+
"layer_norm_eps": 1e-05,
|
| 131 |
+
"length_penalty": 1.0,
|
| 132 |
+
"max_length": 20,
|
| 133 |
+
"min_length": 0,
|
| 134 |
+
"model_type": "clip_vision_model",
|
| 135 |
+
"no_repeat_ngram_size": 0,
|
| 136 |
+
"num_attention_heads": 16,
|
| 137 |
+
"num_beam_groups": 1,
|
| 138 |
+
"num_beams": 1,
|
| 139 |
+
"num_channels": 3,
|
| 140 |
+
"num_hidden_layers": 32,
|
| 141 |
+
"num_return_sequences": 1,
|
| 142 |
+
"output_attentions": false,
|
| 143 |
+
"output_hidden_states": false,
|
| 144 |
+
"output_scores": false,
|
| 145 |
+
"pad_token_id": null,
|
| 146 |
+
"patch_size": 14,
|
| 147 |
+
"prefix": null,
|
| 148 |
+
"problem_type": null,
|
| 149 |
+
"projection_dim": 1024,
|
| 150 |
+
"pruned_heads": {},
|
| 151 |
+
"remove_invalid_values": false,
|
| 152 |
+
"repetition_penalty": 1.0,
|
| 153 |
+
"return_dict": true,
|
| 154 |
+
"return_dict_in_generate": false,
|
| 155 |
+
"sep_token_id": null,
|
| 156 |
+
"task_specific_params": null,
|
| 157 |
+
"temperature": 1.0,
|
| 158 |
+
"tf_legacy_loss": false,
|
| 159 |
+
"tie_encoder_decoder": false,
|
| 160 |
+
"tie_word_embeddings": true,
|
| 161 |
+
"tokenizer_class": null,
|
| 162 |
+
"top_k": 50,
|
| 163 |
+
"top_p": 1.0,
|
| 164 |
+
"torch_dtype": null,
|
| 165 |
+
"torchscript": false,
|
| 166 |
+
"transformers_version": "4.21.3",
|
| 167 |
+
"typical_p": 1.0,
|
| 168 |
+
"use_bfloat16": false
|
| 169 |
+
},
|
| 170 |
+
"vision_config_dict": {
|
| 171 |
+
"hidden_act": "gelu",
|
| 172 |
+
"hidden_size": 1280,
|
| 173 |
+
"intermediate_size": 5120,
|
| 174 |
+
"num_attention_heads": 16,
|
| 175 |
+
"num_hidden_layers": 32,
|
| 176 |
+
"patch_size": 14,
|
| 177 |
+
"projection_dim": 1024
|
| 178 |
+
}
|
| 179 |
+
}
|
CLIP-ViT-H-14-laion2B-s32B-b79K/configuration.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"framework": "pytorch", "task": "zero-shot-image-classification", "allow_remote": true}
|
CLIP-ViT-H-14-laion2B-s32B-b79K/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
CLIP-ViT-H-14-laion2B-s32B-b79K/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:036e6e2bd49697511f4f8b8cb5ee465f93025f7a69a145eadeb9a881ace9b18d
|
| 3 |
+
size 3944552236
|
CLIP-ViT-H-14-laion2B-s32B-b79K/open_clip_config.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model_cfg": {
|
| 3 |
+
"embed_dim": 1024,
|
| 4 |
+
"vision_cfg": {
|
| 5 |
+
"image_size": 224,
|
| 6 |
+
"layers": 32,
|
| 7 |
+
"width": 1280,
|
| 8 |
+
"head_width": 80,
|
| 9 |
+
"patch_size": 14
|
| 10 |
+
},
|
| 11 |
+
"text_cfg": {
|
| 12 |
+
"context_length": 77,
|
| 13 |
+
"vocab_size": 49408,
|
| 14 |
+
"width": 1024,
|
| 15 |
+
"heads": 16,
|
| 16 |
+
"layers": 24
|
| 17 |
+
}
|
| 18 |
+
},
|
| 19 |
+
"preprocess_cfg": {
|
| 20 |
+
"mean": [
|
| 21 |
+
0.48145466,
|
| 22 |
+
0.4578275,
|
| 23 |
+
0.40821073
|
| 24 |
+
],
|
| 25 |
+
"std": [
|
| 26 |
+
0.26862954,
|
| 27 |
+
0.26130258,
|
| 28 |
+
0.27577711
|
| 29 |
+
]
|
| 30 |
+
}
|
| 31 |
+
}
|
CLIP-ViT-H-14-laion2B-s32B-b79K/open_clip_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0084e75319a50ad85ef45377bad5bc38f2f58824459eb690048d51c9f8863be5
|
| 3 |
+
size 3944517836
|
CLIP-ViT-H-14-laion2B-s32B-b79K/open_clip_pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9a78ef8e8c73fd0df621682e7a8e8eb36c6916cb3c16b291a082ecd52ab79cc4
|
| 3 |
+
size 3944692325
|
CLIP-ViT-H-14-laion2B-s32B-b79K/preprocessor_config.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"crop_size": 224,
|
| 3 |
+
"do_center_crop": true,
|
| 4 |
+
"do_normalize": true,
|
| 5 |
+
"do_resize": true,
|
| 6 |
+
"feature_extractor_type": "CLIPFeatureExtractor",
|
| 7 |
+
"image_mean": [
|
| 8 |
+
0.48145466,
|
| 9 |
+
0.4578275,
|
| 10 |
+
0.40821073
|
| 11 |
+
],
|
| 12 |
+
"image_std": [
|
| 13 |
+
0.26862954,
|
| 14 |
+
0.26130258,
|
| 15 |
+
0.27577711
|
| 16 |
+
],
|
| 17 |
+
"resample": 3,
|
| 18 |
+
"size": 224
|
| 19 |
+
}
|
CLIP-ViT-H-14-laion2B-s32B-b79K/pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d6d1e3e93c27d6e430db9d20c151d1baa4dc98b503b136b7d85a63f4b02c9749
|
| 3 |
+
size 3944736097
|
CLIP-ViT-H-14-laion2B-s32B-b79K/special_tokens_map.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
|
CLIP-ViT-H-14-laion2B-s32B-b79K/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
CLIP-ViT-H-14-laion2B-s32B-b79K/tokenizer_config.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"unk_token": {
|
| 3 |
+
"content": "<|endoftext|>",
|
| 4 |
+
"single_word": false,
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"__type": "AddedToken"
|
| 9 |
+
},
|
| 10 |
+
"bos_token": {
|
| 11 |
+
"content": "<|startoftext|>",
|
| 12 |
+
"single_word": false,
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"rstrip": false,
|
| 15 |
+
"normalized": true,
|
| 16 |
+
"__type": "AddedToken"
|
| 17 |
+
},
|
| 18 |
+
"eos_token": {
|
| 19 |
+
"content": "<|endoftext|>",
|
| 20 |
+
"single_word": false,
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"rstrip": false,
|
| 23 |
+
"normalized": true,
|
| 24 |
+
"__type": "AddedToken"
|
| 25 |
+
},
|
| 26 |
+
"pad_token": "<|endoftext|>",
|
| 27 |
+
"add_prefix_space": false,
|
| 28 |
+
"errors": "replace",
|
| 29 |
+
"do_lower_case": true,
|
| 30 |
+
"name_or_path": "openai/clip-vit-base-patch32",
|
| 31 |
+
"model_max_length": 77,
|
| 32 |
+
"special_tokens_map_file": "./special_tokens_map.json",
|
| 33 |
+
"tokenizer_class": "CLIPTokenizer"
|
| 34 |
+
}
|
CLIP-ViT-H-14-laion2B-s32B-b79K/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
HPS_v2/HPS_v2.1_compressed.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:833ee243a86eeed6c23469bd1e5c440684f1cb416c454ebed6756643368876dc
|
| 3 |
+
size 1972298538
|
HPS_v2/HPS_v2_compressed.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c9d89d82a8b2d52e4c3a6b193c83613c115b72cbad5e5a2da206386bb9f9a60
|
| 3 |
+
size 1972298538
|
ImageReward/ImageReward.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bee65c169a546a3f15be322bba94caf26d4028b18b62e0c63df11fb408f46c24
|
| 3 |
+
size 1786745628
|
ImageReward/med_config.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"BertModel"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.1,
|
| 6 |
+
"hidden_act": "gelu",
|
| 7 |
+
"hidden_dropout_prob": 0.1,
|
| 8 |
+
"hidden_size": 768,
|
| 9 |
+
"initializer_range": 0.02,
|
| 10 |
+
"intermediate_size": 3072,
|
| 11 |
+
"layer_norm_eps": 1e-12,
|
| 12 |
+
"max_position_embeddings": 512,
|
| 13 |
+
"model_type": "bert",
|
| 14 |
+
"num_attention_heads": 12,
|
| 15 |
+
"num_hidden_layers": 12,
|
| 16 |
+
"pad_token_id": 0,
|
| 17 |
+
"type_vocab_size": 2,
|
| 18 |
+
"vocab_size": 30524,
|
| 19 |
+
"encoder_width": 768,
|
| 20 |
+
"add_cross_attention": true
|
| 21 |
+
}
|
| 22 |
+
|
MPS_overall_checkpoint/.mdl
ADDED
|
Binary file (59 Bytes). View file
|
|
|
MPS_overall_checkpoint/.msc
ADDED
|
Binary file (245 Bytes). View file
|
|
|
MPS_overall_checkpoint/.mv
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
Revision:master,CreatedAt:1732009312
|
MPS_overall_checkpoint/MPS_overall_checkpoint_diffsynth.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6e916bd991e752d30b684165c584737a839dba9b08cca5e7e23e29be3992cbdd
|
| 3 |
+
size 2209355522
|
MPS_overall_checkpoint/configuration.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"framework": "pytorch", "task": "5", "allow_remote": true}
|
PickScore_v1/config.json
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "yuvalkirstain/PickScore_v1",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"CLIPModel"
|
| 5 |
+
],
|
| 6 |
+
"initializer_factor": 1.0,
|
| 7 |
+
"logit_scale_init_value": 2.6592,
|
| 8 |
+
"model_type": "clip",
|
| 9 |
+
"projection_dim": 1024,
|
| 10 |
+
"text_config": {
|
| 11 |
+
"bos_token_id": 0,
|
| 12 |
+
"dropout": 0.0,
|
| 13 |
+
"eos_token_id": 2,
|
| 14 |
+
"hidden_act": "gelu",
|
| 15 |
+
"hidden_size": 1024,
|
| 16 |
+
"intermediate_size": 4096,
|
| 17 |
+
"model_type": "clip_text_model",
|
| 18 |
+
"num_attention_heads": 16,
|
| 19 |
+
"num_hidden_layers": 24,
|
| 20 |
+
"projection_dim": 1024
|
| 21 |
+
},
|
| 22 |
+
"torch_dtype": "float32",
|
| 23 |
+
"transformers_version": "4.46.2",
|
| 24 |
+
"vision_config": {
|
| 25 |
+
"dropout": 0.0,
|
| 26 |
+
"hidden_act": "gelu",
|
| 27 |
+
"hidden_size": 1280,
|
| 28 |
+
"intermediate_size": 5120,
|
| 29 |
+
"model_type": "clip_vision_model",
|
| 30 |
+
"num_attention_heads": 16,
|
| 31 |
+
"num_hidden_layers": 32,
|
| 32 |
+
"patch_size": 14,
|
| 33 |
+
"projection_dim": 1024
|
| 34 |
+
}
|
| 35 |
+
}
|
PickScore_v1/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
PickScore_v1/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:744151efd8b04c050cb1021dd8168935ce2b48bd7746538850cee652fdebfc9d
|
| 3 |
+
size 3944549372
|
PickScore_v1/special_tokens_map.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<|startoftext|>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": true,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "<|endoftext|>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": {
|
| 17 |
+
"content": "<|endoftext|>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"unk_token": {
|
| 24 |
+
"content": "<|endoftext|>",
|
| 25 |
+
"lstrip": false,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
}
|
| 30 |
+
}
|
PickScore_v1/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
PickScore_v1/tokenizer_config.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"49406": {
|
| 5 |
+
"content": "<|startoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"49407": {
|
| 13 |
+
"content": "<|endoftext|>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
}
|
| 20 |
+
},
|
| 21 |
+
"bos_token": "<|startoftext|>",
|
| 22 |
+
"clean_up_tokenization_spaces": true,
|
| 23 |
+
"do_lower_case": true,
|
| 24 |
+
"eos_token": "<|endoftext|>",
|
| 25 |
+
"errors": "replace",
|
| 26 |
+
"model_max_length": 77,
|
| 27 |
+
"pad_token": "<|endoftext|>",
|
| 28 |
+
"processor_class": "CLIPProcessor",
|
| 29 |
+
"tokenizer_class": "CLIPTokenizer",
|
| 30 |
+
"unk_token": "<|endoftext|>"
|
| 31 |
+
}
|
PickScore_v1/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
README.md
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
---
|
| 4 |
+
### The current model contributor has not provided a more detailed model description. Model files and weights can be accessed on the "Model Files" page.
|
| 5 |
+
#### You can download the model using the `git clone` command below, or via the ModelScope SDK.
|
| 6 |
+
|
| 7 |
+
SDK Download
|
| 8 |
+
```bash
|
| 9 |
+
# Install ModelScope
|
| 10 |
+
pip install modelscope
|
| 11 |
+
```
|
| 12 |
+
```python
|
| 13 |
+
# SDK model download
|
| 14 |
+
from modelscope import snapshot_download
|
| 15 |
+
model_dir = snapshot_download('DiffSynth-Studio/QualityMetric_reward_pretrained')
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
Git Download
|
| 19 |
+
```
|
| 20 |
+
# Git model download
|
| 21 |
+
git clone https://www.modelscope.cn/DiffSynth-Studio/QualityMetric_reward_pretrained.git
|
| 22 |
+
```
|
| 23 |
+
|
| 24 |
+
<p style="color: lightgrey;">If you are the contributor of this model, we invite you to improve the model card content according to the <a href="https://modelscope.cn/docs/ModelScope%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%85%A5%E6%B5%81%E7%A8%8B%E6%A6%82%E8%A7%88" style="color: lightgrey; text-decoration: underline;">model contribution guidelines</a>.</p>
|
README_from_modelscope.md
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: Apache License 2.0
|
| 3 |
+
|
| 4 |
+
#model-type:
|
| 5 |
+
##如 gpt、phi、llama、chatglm、baichuan 等
|
| 6 |
+
#- gpt
|
| 7 |
+
|
| 8 |
+
#domain:
|
| 9 |
+
##如 nlp、cv、audio、multi-modal
|
| 10 |
+
#- nlp
|
| 11 |
+
|
| 12 |
+
#language:
|
| 13 |
+
##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa
|
| 14 |
+
#- cn
|
| 15 |
+
|
| 16 |
+
#metrics:
|
| 17 |
+
##如 CIDEr、Blue、ROUGE 等
|
| 18 |
+
#- CIDEr
|
| 19 |
+
|
| 20 |
+
#tags:
|
| 21 |
+
##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他
|
| 22 |
+
#- pretrained
|
| 23 |
+
|
| 24 |
+
#tools:
|
| 25 |
+
##如 vllm、fastchat、llamacpp、AdaSeq 等
|
| 26 |
+
#- vllm
|
| 27 |
+
---
|
| 28 |
+
### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
|
| 29 |
+
#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型
|
| 30 |
+
|
| 31 |
+
SDK下载
|
| 32 |
+
```bash
|
| 33 |
+
#安装ModelScope
|
| 34 |
+
pip install modelscope
|
| 35 |
+
```
|
| 36 |
+
```python
|
| 37 |
+
#SDK模型下载
|
| 38 |
+
from modelscope import snapshot_download
|
| 39 |
+
model_dir = snapshot_download('DiffSynth-Studio/QualityMetric_reward_pretrained')
|
| 40 |
+
```
|
| 41 |
+
Git下载
|
| 42 |
+
```
|
| 43 |
+
#Git模型下载
|
| 44 |
+
git clone https://www.modelscope.cn/DiffSynth-Studio/QualityMetric_reward_pretrained.git
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
<p style="color: lightgrey;">如果您是本模型的贡献者,我们邀请您根据<a href="https://modelscope.cn/docs/ModelScope%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%85%A5%E6%B5%81%E7%A8%8B%E6%A6%82%E8%A7%88" style="color: lightgrey; text-decoration: underline;">模型贡献文档</a>,及时完善模型卡片内容。</p>
|
aesthetic-predictor/sac+logos+ava1-l14-linearMSE.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ca3d182bacb66cfb8b3062347d2656dbeae6f8ef90cec45f01775fcaf3e58c67
|
| 3 |
+
size 3712718
|
bert-base-uncased/.mdl
ADDED
|
Binary file (54 Bytes). View file
|
|
|
bert-base-uncased/.msc
ADDED
|
Binary file (1.28 kB). View file
|
|
|
bert-base-uncased/.mv
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
Revision:v1.0.0,CreatedAt:1694506367
|
bert-base-uncased/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
bert-base-uncased/README.md
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
language: en
|
| 3 |
+
tags:
|
| 4 |
+
- exbert
|
| 5 |
+
license: apache-2.0
|
| 6 |
+
datasets:
|
| 7 |
+
- bookcorpus
|
| 8 |
+
- wikipedia
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# BERT base model (uncased)
|
| 12 |
+
|
| 13 |
+
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
|
| 14 |
+
[this paper](https://arxiv.org/abs/1810.04805) and first released in
|
| 15 |
+
[this repository](https://github.com/google-research/bert). This model is uncased: it does not make a difference
|
| 16 |
+
between english and English.
|
| 17 |
+
|
| 18 |
+
Disclaimer: The team releasing BERT did not write a model card for this model so this model card has been written by
|
| 19 |
+
the Hugging Face team.
|
| 20 |
+
|
| 21 |
+
## Model description
|
| 22 |
+
|
| 23 |
+
BERT is a transformers model pretrained on a large corpus of English data in a self-supervised fashion. This means it
|
| 24 |
+
was pretrained on the raw texts only, with no humans labeling them in any way (which is why it can use lots of
|
| 25 |
+
publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it
|
| 26 |
+
was pretrained with two objectives:
|
| 27 |
+
|
| 28 |
+
- Masked language modeling (MLM): taking a sentence, the model randomly masks 15% of the words in the input then run
|
| 29 |
+
the entire masked sentence through the model and has to predict the masked words. This is different from traditional
|
| 30 |
+
recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like
|
| 31 |
+
GPT which internally masks the future tokens. It allows the model to learn a bidirectional representation of the
|
| 32 |
+
sentence.
|
| 33 |
+
- Next sentence prediction (NSP): the models concatenates two masked sentences as inputs during pretraining. Sometimes
|
| 34 |
+
they correspond to sentences that were next to each other in the original text, sometimes not. The model then has to
|
| 35 |
+
predict if the two sentences were following each other or not.
|
| 36 |
+
|
| 37 |
+
This way, the model learns an inner representation of the English language that can then be used to extract features
|
| 38 |
+
useful for downstream tasks: if you have a dataset of labeled sentences, for instance, you can train a standard
|
| 39 |
+
classifier using the features produced by the BERT model as inputs.
|
| 40 |
+
|
| 41 |
+
## Model variations
|
| 42 |
+
|
| 43 |
+
BERT has originally been released in base and large variations, for cased and uncased input text. The uncased models also strips out an accent markers.
|
| 44 |
+
Chinese and multilingual uncased and cased versions followed shortly after.
|
| 45 |
+
Modified preprocessing with whole word masking has replaced subpiece masking in a following work, with the release of two models.
|
| 46 |
+
Other 24 smaller models are released afterward.
|
| 47 |
+
|
| 48 |
+
The detailed release history can be found on the [google-research/bert readme](https://github.com/google-research/bert/blob/master/README.md) on github.
|
| 49 |
+
|
| 50 |
+
| Model | #params | Language |
|
| 51 |
+
|------------------------|--------------------------------|-------|
|
| 52 |
+
| [`bert-base-uncased`](https://huggingface.co/bert-base-uncased) | 110M | English |
|
| 53 |
+
| [`bert-large-uncased`](https://huggingface.co/bert-large-uncased) | 340M | English | sub
|
| 54 |
+
| [`bert-base-cased`](https://huggingface.co/bert-base-cased) | 110M | English |
|
| 55 |
+
| [`bert-large-cased`](https://huggingface.co/bert-large-cased) | 340M | English |
|
| 56 |
+
| [`bert-base-chinese`](https://huggingface.co/bert-base-chinese) | 110M | Chinese |
|
| 57 |
+
| [`bert-base-multilingual-cased`](https://huggingface.co/bert-base-multilingual-cased) | 110M | Multiple |
|
| 58 |
+
| [`bert-large-uncased-whole-word-masking`](https://huggingface.co/bert-large-uncased-whole-word-masking) | 340M | English |
|
| 59 |
+
| [`bert-large-cased-whole-word-masking`](https://huggingface.co/bert-large-cased-whole-word-masking) | 340M | English |
|
| 60 |
+
|
| 61 |
+
## Intended uses & limitations
|
| 62 |
+
|
| 63 |
+
You can use the raw model for either masked language modeling or next sentence prediction, but it's mostly intended to
|
| 64 |
+
be fine-tuned on a downstream task. See the [model hub](https://huggingface.co/models?filter=bert) to look for
|
| 65 |
+
fine-tuned versions of a task that interests you.
|
| 66 |
+
|
| 67 |
+
Note that this model is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked)
|
| 68 |
+
to make decisions, such as sequence classification, token classification or question answering. For tasks such as text
|
| 69 |
+
generation you should look at model like GPT2.
|
| 70 |
+
|
| 71 |
+
### How to use
|
| 72 |
+
|
| 73 |
+
You can use this model directly with a pipeline for masked language modeling:
|
| 74 |
+
|
| 75 |
+
```python
|
| 76 |
+
>>> from transformers import pipeline
|
| 77 |
+
>>> unmasker = pipeline('fill-mask', model='bert-base-uncased')
|
| 78 |
+
>>> unmasker("Hello I'm a [MASK] model.")
|
| 79 |
+
|
| 80 |
+
[{'sequence': "[CLS] hello i'm a fashion model. [SEP]",
|
| 81 |
+
'score': 0.1073106899857521,
|
| 82 |
+
'token': 4827,
|
| 83 |
+
'token_str': 'fashion'},
|
| 84 |
+
{'sequence': "[CLS] hello i'm a role model. [SEP]",
|
| 85 |
+
'score': 0.08774490654468536,
|
| 86 |
+
'token': 2535,
|
| 87 |
+
'token_str': 'role'},
|
| 88 |
+
{'sequence': "[CLS] hello i'm a new model. [SEP]",
|
| 89 |
+
'score': 0.05338378623127937,
|
| 90 |
+
'token': 2047,
|
| 91 |
+
'token_str': 'new'},
|
| 92 |
+
{'sequence': "[CLS] hello i'm a super model. [SEP]",
|
| 93 |
+
'score': 0.04667217284440994,
|
| 94 |
+
'token': 3565,
|
| 95 |
+
'token_str': 'super'},
|
| 96 |
+
{'sequence': "[CLS] hello i'm a fine model. [SEP]",
|
| 97 |
+
'score': 0.027095865458250046,
|
| 98 |
+
'token': 2986,
|
| 99 |
+
'token_str': 'fine'}]
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
Here is how to use this model to get the features of a given text in PyTorch:
|
| 103 |
+
|
| 104 |
+
```python
|
| 105 |
+
from transformers import BertTokenizer, BertModel
|
| 106 |
+
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
| 107 |
+
model = BertModel.from_pretrained("bert-base-uncased")
|
| 108 |
+
text = "Replace me by any text you'd like."
|
| 109 |
+
encoded_input = tokenizer(text, return_tensors='pt')
|
| 110 |
+
output = model(**encoded_input)
|
| 111 |
+
```
|
| 112 |
+
|
| 113 |
+
and in TensorFlow:
|
| 114 |
+
|
| 115 |
+
```python
|
| 116 |
+
from transformers import BertTokenizer, TFBertModel
|
| 117 |
+
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
| 118 |
+
model = TFBertModel.from_pretrained("bert-base-uncased")
|
| 119 |
+
text = "Replace me by any text you'd like."
|
| 120 |
+
encoded_input = tokenizer(text, return_tensors='tf')
|
| 121 |
+
output = model(encoded_input)
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
### Limitations and bias
|
| 125 |
+
|
| 126 |
+
Even if the training data used for this model could be characterized as fairly neutral, this model can have biased
|
| 127 |
+
predictions:
|
| 128 |
+
|
| 129 |
+
```python
|
| 130 |
+
>>> from transformers import pipeline
|
| 131 |
+
>>> unmasker = pipeline('fill-mask', model='bert-base-uncased')
|
| 132 |
+
>>> unmasker("The man worked as a [MASK].")
|
| 133 |
+
|
| 134 |
+
[{'sequence': '[CLS] the man worked as a carpenter. [SEP]',
|
| 135 |
+
'score': 0.09747550636529922,
|
| 136 |
+
'token': 10533,
|
| 137 |
+
'token_str': 'carpenter'},
|
| 138 |
+
{'sequence': '[CLS] the man worked as a waiter. [SEP]',
|
| 139 |
+
'score': 0.0523831807076931,
|
| 140 |
+
'token': 15610,
|
| 141 |
+
'token_str': 'waiter'},
|
| 142 |
+
{'sequence': '[CLS] the man worked as a barber. [SEP]',
|
| 143 |
+
'score': 0.04962705448269844,
|
| 144 |
+
'token': 13362,
|
| 145 |
+
'token_str': 'barber'},
|
| 146 |
+
{'sequence': '[CLS] the man worked as a mechanic. [SEP]',
|
| 147 |
+
'score': 0.03788609802722931,
|
| 148 |
+
'token': 15893,
|
| 149 |
+
'token_str': 'mechanic'},
|
| 150 |
+
{'sequence': '[CLS] the man worked as a salesman. [SEP]',
|
| 151 |
+
'score': 0.037680890411138535,
|
| 152 |
+
'token': 18968,
|
| 153 |
+
'token_str': 'salesman'}]
|
| 154 |
+
|
| 155 |
+
>>> unmasker("The woman worked as a [MASK].")
|
| 156 |
+
|
| 157 |
+
[{'sequence': '[CLS] the woman worked as a nurse. [SEP]',
|
| 158 |
+
'score': 0.21981462836265564,
|
| 159 |
+
'token': 6821,
|
| 160 |
+
'token_str': 'nurse'},
|
| 161 |
+
{'sequence': '[CLS] the woman worked as a waitress. [SEP]',
|
| 162 |
+
'score': 0.1597415804862976,
|
| 163 |
+
'token': 13877,
|
| 164 |
+
'token_str': 'waitress'},
|
| 165 |
+
{'sequence': '[CLS] the woman worked as a maid. [SEP]',
|
| 166 |
+
'score': 0.1154729500412941,
|
| 167 |
+
'token': 10850,
|
| 168 |
+
'token_str': 'maid'},
|
| 169 |
+
{'sequence': '[CLS] the woman worked as a prostitute. [SEP]',
|
| 170 |
+
'score': 0.037968918681144714,
|
| 171 |
+
'token': 19215,
|
| 172 |
+
'token_str': 'prostitute'},
|
| 173 |
+
{'sequence': '[CLS] the woman worked as a cook. [SEP]',
|
| 174 |
+
'score': 0.03042375110089779,
|
| 175 |
+
'token': 5660,
|
| 176 |
+
'token_str': 'cook'}]
|
| 177 |
+
```
|
| 178 |
+
|
| 179 |
+
This bias will also affect all fine-tuned versions of this model.
|
| 180 |
+
|
| 181 |
+
## Training data
|
| 182 |
+
|
| 183 |
+
The BERT model was pretrained on [BookCorpus](https://yknzhu.wixsite.com/mbweb), a dataset consisting of 11,038
|
| 184 |
+
unpublished books and [English Wikipedia](https://en.wikipedia.org/wiki/English_Wikipedia) (excluding lists, tables and
|
| 185 |
+
headers).
|
| 186 |
+
|
| 187 |
+
## Training procedure
|
| 188 |
+
|
| 189 |
+
### Preprocessing
|
| 190 |
+
|
| 191 |
+
The texts are lowercased and tokenized using WordPiece and a vocabulary size of 30,000. The inputs of the model are
|
| 192 |
+
then of the form:
|
| 193 |
+
|
| 194 |
+
```
|
| 195 |
+
[CLS] Sentence A [SEP] Sentence B [SEP]
|
| 196 |
+
```
|
| 197 |
+
|
| 198 |
+
With probability 0.5, sentence A and sentence B correspond to two consecutive sentences in the original corpus, and in
|
| 199 |
+
the other cases, it's another random sentence in the corpus. Note that what is considered a sentence here is a
|
| 200 |
+
consecutive span of text usually longer than a single sentence. The only constrain is that the result with the two
|
| 201 |
+
"sentences" has a combined length of less than 512 tokens.
|
| 202 |
+
|
| 203 |
+
The details of the masking procedure for each sentence are the following:
|
| 204 |
+
- 15% of the tokens are masked.
|
| 205 |
+
- In 80% of the cases, the masked tokens are replaced by `[MASK]`.
|
| 206 |
+
- In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace.
|
| 207 |
+
- In the 10% remaining cases, the masked tokens are left as is.
|
| 208 |
+
|
| 209 |
+
### Pretraining
|
| 210 |
+
|
| 211 |
+
The model was trained on 4 cloud TPUs in Pod configuration (16 TPU chips total) for one million steps with a batch size
|
| 212 |
+
of 256. The sequence length was limited to 128 tokens for 90% of the steps and 512 for the remaining 10%. The optimizer
|
| 213 |
+
used is Adam with a learning rate of 1e-4, \\(\beta_{1} = 0.9\\) and \\(\beta_{2} = 0.999\\), a weight decay of 0.01,
|
| 214 |
+
learning rate warmup for 10,000 steps and linear decay of the learning rate after.
|
| 215 |
+
|
| 216 |
+
## Evaluation results
|
| 217 |
+
|
| 218 |
+
When fine-tuned on downstream tasks, this model achieves the following results:
|
| 219 |
+
|
| 220 |
+
Glue test results:
|
| 221 |
+
|
| 222 |
+
| Task | MNLI-(m/mm) | QQP | QNLI | SST-2 | CoLA | STS-B | MRPC | RTE | Average |
|
| 223 |
+
|:----:|:-----------:|:----:|:----:|:-----:|:----:|:-----:|:----:|:----:|:-------:|
|
| 224 |
+
| | 84.6/83.4 | 71.2 | 90.5 | 93.5 | 52.1 | 85.8 | 88.9 | 66.4 | 79.6 |
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
### BibTeX entry and citation info
|
| 228 |
+
|
| 229 |
+
```bibtex
|
| 230 |
+
@article{DBLP:journals/corr/abs-1810-04805,
|
| 231 |
+
author = {Jacob Devlin and
|
| 232 |
+
Ming{-}Wei Chang and
|
| 233 |
+
Kenton Lee and
|
| 234 |
+
Kristina Toutanova},
|
| 235 |
+
title = {{BERT:} Pre-training of Deep Bidirectional Transformers for Language
|
| 236 |
+
Understanding},
|
| 237 |
+
journal = {CoRR},
|
| 238 |
+
volume = {abs/1810.04805},
|
| 239 |
+
year = {2018},
|
| 240 |
+
url = {http://arxiv.org/abs/1810.04805},
|
| 241 |
+
archivePrefix = {arXiv},
|
| 242 |
+
eprint = {1810.04805},
|
| 243 |
+
timestamp = {Tue, 30 Oct 2018 20:39:56 +0100},
|
| 244 |
+
biburl = {https://dblp.org/rec/journals/corr/abs-1810-04805.bib},
|
| 245 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
| 246 |
+
}
|
| 247 |
+
```
|
| 248 |
+
|
| 249 |
+
<a href="https://huggingface.co/exbert/?model=bert-base-uncased">
|
| 250 |
+
<img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png">
|
| 251 |
+
</a>
|
bert-base-uncased/config.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"BertForMaskedLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.1,
|
| 6 |
+
"gradient_checkpointing": false,
|
| 7 |
+
"hidden_act": "gelu",
|
| 8 |
+
"hidden_dropout_prob": 0.1,
|
| 9 |
+
"hidden_size": 768,
|
| 10 |
+
"initializer_range": 0.02,
|
| 11 |
+
"intermediate_size": 3072,
|
| 12 |
+
"layer_norm_eps": 1e-12,
|
| 13 |
+
"max_position_embeddings": 512,
|
| 14 |
+
"model_type": "bert",
|
| 15 |
+
"num_attention_heads": 12,
|
| 16 |
+
"num_hidden_layers": 12,
|
| 17 |
+
"pad_token_id": 0,
|
| 18 |
+
"position_embedding_type": "absolute",
|
| 19 |
+
"transformers_version": "4.6.0.dev0",
|
| 20 |
+
"type_vocab_size": 2,
|
| 21 |
+
"use_cache": true,
|
| 22 |
+
"vocab_size": 30522
|
| 23 |
+
}
|
bert-base-uncased/configuration.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"framework": "pytorch", "task": "backbone", "allow_remote": true}
|
bert-base-uncased/coreml/fill-mask/float32_model.mlpackage/Data/com.apple.CoreML/model.mlmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:59ebda1b73ce46947d8e6be8b39f018aae7d6c4d5809537225fdaaadd940e993
|
| 3 |
+
size 164911
|
bert-base-uncased/coreml/fill-mask/float32_model.mlpackage/Data/com.apple.CoreML/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c0c9f4914b4f0053785e96b8583cd59cda63c2f1b6e95a03970d772bf097e289
|
| 3 |
+
size 531833856
|
bert-base-uncased/coreml/fill-mask/float32_model.mlpackage/Manifest.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"fileFormatVersion": "1.0.0",
|
| 3 |
+
"itemInfoEntries": {
|
| 4 |
+
"9D749A46-ADA0-43CA-B5C2-8E722B91F41E": {
|
| 5 |
+
"author": "com.apple.CoreML",
|
| 6 |
+
"description": "CoreML Model Specification",
|
| 7 |
+
"name": "model.mlmodel",
|
| 8 |
+
"path": "com.apple.CoreML/model.mlmodel"
|
| 9 |
+
},
|
| 10 |
+
"D545B13F-2D5E-4CFB-BFF1-C10E9EFD70DA": {
|
| 11 |
+
"author": "com.apple.CoreML",
|
| 12 |
+
"description": "CoreML Model Weights",
|
| 13 |
+
"name": "weights",
|
| 14 |
+
"path": "com.apple.CoreML/weights"
|
| 15 |
+
}
|
| 16 |
+
},
|
| 17 |
+
"rootModelIdentifier": "9D749A46-ADA0-43CA-B5C2-8E722B91F41E"
|
| 18 |
+
}
|
bert-base-uncased/flax_model.msgpack
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ea201fabe466ef7182f1f687fb5be4b62a73d3a78883f11264ff7f682cdb54bf
|
| 3 |
+
size 438064459
|
bert-base-uncased/model.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:44d7a2896d341c51fb1eba89aea3a590e6af0ce33e25481136f7eeecb62e5f7f
|
| 3 |
+
size 532091246
|
bert-base-uncased/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:68d45e234eb4a928074dfd868cead0219ab85354cc53d20e772753c6bb9169d3
|
| 3 |
+
size 440449768
|
bert-base-uncased/pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:097417381d6c7230bd9e3557456d726de6e83245ec8b24f529f60198a67b203a
|
| 3 |
+
size 440473133
|
bert-base-uncased/rust_model.ot
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:afd9aa425fd45c5655d3d43a0d041f9b76729bf475d6c017a0e9304a38f89972
|
| 3 |
+
size 534240408
|