Upload 12 files
Browse files- README.md +33 -0
- config.json +75 -0
- merges.txt +0 -0
- model.safetensors +3 -0
- rng_state.pth +3 -0
- scheduler.pt +3 -0
- special_tokens_map.json +15 -0
- tokenizer.json +0 -0
- tokenizer_config.json +57 -0
- trainer_state.json +0 -0
- training_args.bin +3 -0
- vocab.json +0 -0
README.md
CHANGED
|
@@ -1,3 +1,36 @@
|
|
| 1 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
license: apache-2.0
|
|
|
|
|
|
|
|
|
|
| 3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
language: en
|
| 3 |
+
tags:
|
| 4 |
+
- text-classification
|
| 5 |
+
- hazard-detection
|
| 6 |
+
datasets:
|
| 7 |
+
- your-dataset-name
|
| 8 |
license: apache-2.0
|
| 9 |
+
model_name: Quintu/roberta-large-1280-product
|
| 10 |
+
library_name: transformers
|
| 11 |
+
pipeline_tag: text-classification
|
| 12 |
---
|
| 13 |
+
# Quintu/roberta-large-1280-product
|
| 14 |
+
|
| 15 |
+
Mô hình `Quintu/roberta-large-1280-product` được thiết kế để thực hiện phân loại văn bản liên quan đến phát hiện sản phẩm.
|
| 16 |
+
|
| 17 |
+
## Cách sử dụng
|
| 18 |
+
|
| 19 |
+
Dưới đây là cách sử dụng mô hình này với thư viện `transformers`:
|
| 20 |
+
|
| 21 |
+
```python
|
| 22 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 23 |
+
|
| 24 |
+
# Tải mô hình và tokenizer
|
| 25 |
+
model_name = "Quintu/roberta-large-1280-product"
|
| 26 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 27 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
| 28 |
+
|
| 29 |
+
# Sử dụng mô hình để phân loại văn bản
|
| 30 |
+
text = "This is an example text to classify."
|
| 31 |
+
inputs = tokenizer(text, return_tensors="pt")
|
| 32 |
+
outputs = model(**inputs)
|
| 33 |
+
|
| 34 |
+
# Dự đoán
|
| 35 |
+
logits = outputs.logits
|
| 36 |
+
print(logits)
|
config.json
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "FacebookAI/roberta-large",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"RobertaForSequenceClassification"
|
| 5 |
+
],
|
| 6 |
+
"attention_probs_dropout_prob": 0.1,
|
| 7 |
+
"bos_token_id": 0,
|
| 8 |
+
"classifier_dropout": null,
|
| 9 |
+
"eos_token_id": 2,
|
| 10 |
+
"hidden_act": "gelu",
|
| 11 |
+
"hidden_dropout_prob": 0.1,
|
| 12 |
+
"hidden_size": 1024,
|
| 13 |
+
"id2label": {
|
| 14 |
+
"0": "alcoholic beverages",
|
| 15 |
+
"1": "cereals and bakery products",
|
| 16 |
+
"2": "cocoa and cocoa preparations, coffee and tea",
|
| 17 |
+
"3": "confectionery",
|
| 18 |
+
"4": "dietetic foods, food supplements, fortified foods",
|
| 19 |
+
"5": "fats and oils",
|
| 20 |
+
"6": "feed materials",
|
| 21 |
+
"7": "food additives and flavourings",
|
| 22 |
+
"8": "food contact materials",
|
| 23 |
+
"9": "fruits and vegetables",
|
| 24 |
+
"10": "herbs and spices",
|
| 25 |
+
"11": "honey and royal jelly",
|
| 26 |
+
"12": "ices and desserts",
|
| 27 |
+
"13": "meat, egg and dairy products",
|
| 28 |
+
"14": "non-alcoholic beverages",
|
| 29 |
+
"15": "nuts, nut products and seeds",
|
| 30 |
+
"16": "other food product / mixed",
|
| 31 |
+
"17": "pet feed",
|
| 32 |
+
"18": "prepared dishes and snacks",
|
| 33 |
+
"19": "seafood",
|
| 34 |
+
"20": "soups, broths, sauces and condiments",
|
| 35 |
+
"21": "sugars and syrups"
|
| 36 |
+
},
|
| 37 |
+
"initializer_range": 0.02,
|
| 38 |
+
"intermediate_size": 4096,
|
| 39 |
+
"label2id": {
|
| 40 |
+
"alcoholic beverages": 0,
|
| 41 |
+
"cereals and bakery products": 1,
|
| 42 |
+
"cocoa and cocoa preparations, coffee and tea": 2,
|
| 43 |
+
"confectionery": 3,
|
| 44 |
+
"dietetic foods, food supplements, fortified foods": 4,
|
| 45 |
+
"fats and oils": 5,
|
| 46 |
+
"feed materials": 6,
|
| 47 |
+
"food additives and flavourings": 7,
|
| 48 |
+
"food contact materials": 8,
|
| 49 |
+
"fruits and vegetables": 9,
|
| 50 |
+
"herbs and spices": 10,
|
| 51 |
+
"honey and royal jelly": 11,
|
| 52 |
+
"ices and desserts": 12,
|
| 53 |
+
"meat, egg and dairy products": 13,
|
| 54 |
+
"non-alcoholic beverages": 14,
|
| 55 |
+
"nuts, nut products and seeds": 15,
|
| 56 |
+
"other food product / mixed": 16,
|
| 57 |
+
"pet feed": 17,
|
| 58 |
+
"prepared dishes and snacks": 18,
|
| 59 |
+
"seafood": 19,
|
| 60 |
+
"soups, broths, sauces and condiments": 20,
|
| 61 |
+
"sugars and syrups": 21
|
| 62 |
+
},
|
| 63 |
+
"layer_norm_eps": 1e-05,
|
| 64 |
+
"max_position_embeddings": 514,
|
| 65 |
+
"model_type": "roberta",
|
| 66 |
+
"num_attention_heads": 16,
|
| 67 |
+
"num_hidden_layers": 24,
|
| 68 |
+
"pad_token_id": 1,
|
| 69 |
+
"position_embedding_type": "absolute",
|
| 70 |
+
"torch_dtype": "float32",
|
| 71 |
+
"transformers_version": "4.44.2",
|
| 72 |
+
"type_vocab_size": 1,
|
| 73 |
+
"use_cache": true,
|
| 74 |
+
"vocab_size": 50265
|
| 75 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:36ff39e05c7060351e3446611bbb374eb0649c8631718b2f408ec60f4ea7187a
|
| 3 |
+
size 1421577416
|
rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6093c66e8ba31c4412cd5af2416b89cbaf26cb670a75f147fbd704268104187a
|
| 3 |
+
size 14244
|
scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:acf8ba5c7bc6f32216dbde8a7cde7b5976d9d1be226ec4f79d98c6a96b816dca
|
| 3 |
+
size 1064
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<s>",
|
| 3 |
+
"cls_token": "<s>",
|
| 4 |
+
"eos_token": "</s>",
|
| 5 |
+
"mask_token": {
|
| 6 |
+
"content": "<mask>",
|
| 7 |
+
"lstrip": true,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false
|
| 11 |
+
},
|
| 12 |
+
"pad_token": "<pad>",
|
| 13 |
+
"sep_token": "</s>",
|
| 14 |
+
"unk_token": "<unk>"
|
| 15 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"0": {
|
| 5 |
+
"content": "<s>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"1": {
|
| 13 |
+
"content": "<pad>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": true,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
},
|
| 20 |
+
"2": {
|
| 21 |
+
"content": "</s>",
|
| 22 |
+
"lstrip": false,
|
| 23 |
+
"normalized": true,
|
| 24 |
+
"rstrip": false,
|
| 25 |
+
"single_word": false,
|
| 26 |
+
"special": true
|
| 27 |
+
},
|
| 28 |
+
"3": {
|
| 29 |
+
"content": "<unk>",
|
| 30 |
+
"lstrip": false,
|
| 31 |
+
"normalized": true,
|
| 32 |
+
"rstrip": false,
|
| 33 |
+
"single_word": false,
|
| 34 |
+
"special": true
|
| 35 |
+
},
|
| 36 |
+
"50264": {
|
| 37 |
+
"content": "<mask>",
|
| 38 |
+
"lstrip": true,
|
| 39 |
+
"normalized": false,
|
| 40 |
+
"rstrip": false,
|
| 41 |
+
"single_word": false,
|
| 42 |
+
"special": true
|
| 43 |
+
}
|
| 44 |
+
},
|
| 45 |
+
"bos_token": "<s>",
|
| 46 |
+
"clean_up_tokenization_spaces": true,
|
| 47 |
+
"cls_token": "<s>",
|
| 48 |
+
"eos_token": "</s>",
|
| 49 |
+
"errors": "replace",
|
| 50 |
+
"mask_token": "<mask>",
|
| 51 |
+
"model_max_length": 512,
|
| 52 |
+
"pad_token": "<pad>",
|
| 53 |
+
"sep_token": "</s>",
|
| 54 |
+
"tokenizer_class": "RobertaTokenizer",
|
| 55 |
+
"trim_offsets": true,
|
| 56 |
+
"unk_token": "<unk>"
|
| 57 |
+
}
|
trainer_state.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb860d611504c1ee9ef4db9d134aa0b26574b52720e4520993d1b007e2d2687f
|
| 3 |
+
size 5240
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|