CYONG commited on
Commit
8a3c32b
·
1 Parent(s): 58adac4

Training in progress epoch 0

Browse files
Files changed (5) hide show
  1. README.md +9 -9
  2. config.json +70 -15
  3. tf_model.h5 +2 -2
  4. tokenizer_config.json +4 -1
  5. vocab.txt +0 -0
README.md CHANGED
@@ -1,6 +1,5 @@
1
  ---
2
- license: apache-2.0
3
- base_model: distilbert-base-multilingual-cased
4
  tags:
5
  - generated_from_keras_callback
6
  model-index:
@@ -13,10 +12,11 @@ probably proofread and complete it, then remove this comment. -->
13
 
14
  # CYONG/v1
15
 
16
- This model is a fine-tuned version of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - Train Loss: 1.3818
19
- - Validation Loss: 0.8440
 
20
  - Epoch: 0
21
 
22
  ## Model description
@@ -36,14 +36,14 @@ More information needed
36
  ### Training hyperparameters
37
 
38
  The following hyperparameters were used during training:
39
- - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 1510100, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False}
40
  - training_precision: float32
41
 
42
  ### Training results
43
 
44
- | Train Loss | Validation Loss | Epoch |
45
- |:----------:|:---------------:|:-----:|
46
- | 1.3818 | 0.8440 | 0 |
47
 
48
 
49
  ### Framework versions
 
1
  ---
2
+ base_model: monologg/koelectra-small-finetuned-sentiment
 
3
  tags:
4
  - generated_from_keras_callback
5
  model-index:
 
12
 
13
  # CYONG/v1
14
 
15
+ This model is a fine-tuned version of [monologg/koelectra-small-finetuned-sentiment](https://huggingface.co/monologg/koelectra-small-finetuned-sentiment) on an unknown dataset.
16
  It achieves the following results on the evaluation set:
17
+ - Train Loss: 3.0778
18
+ - Validation Loss: 3.0413
19
+ - Train Accuracy: 0.5828
20
  - Epoch: 0
21
 
22
  ## Model description
 
36
  ### Training hyperparameters
37
 
38
  The following hyperparameters were used during training:
39
+ - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 95, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False}
40
  - training_precision: float32
41
 
42
  ### Training results
43
 
44
+ | Train Loss | Validation Loss | Train Accuracy | Epoch |
45
+ |:----------:|:---------------:|:--------------:|:-----:|
46
+ | 3.0778 | 3.0413 | 0.5828 | 0 |
47
 
48
 
49
  ### Framework versions
config.json CHANGED
@@ -1,24 +1,79 @@
1
  {
2
- "_name_or_path": "distilbert-base-multilingual-cased",
3
- "activation": "gelu",
4
  "architectures": [
5
- "DistilBertForQuestionAnswering"
6
  ],
7
- "attention_dropout": 0.1,
8
- "dim": 768,
9
- "dropout": 0.1,
10
- "hidden_dim": 3072,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  "initializer_range": 0.02,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  "max_position_embeddings": 512,
13
- "model_type": "distilbert",
14
- "n_heads": 12,
15
- "n_layers": 6,
16
  "output_past": true,
17
  "pad_token_id": 0,
18
- "qa_dropout": 0.1,
19
- "seq_classif_dropout": 0.2,
20
- "sinusoidal_pos_embds": false,
21
- "tie_weights_": true,
 
22
  "transformers_version": "4.31.0",
23
- "vocab_size": 119547
 
 
24
  }
 
1
  {
2
+ "_name_or_path": "monologg/koelectra-small-finetuned-sentiment",
3
+ "_num_labels": 2,
4
  "architectures": [
5
+ "ElectraForSequenceClassification"
6
  ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "embedding_size": 128,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 256,
13
+ "id2label": {
14
+ "0": "quota",
15
+ "1": "\uae30\ubd80 \uc694\uccad",
16
+ "2": "\uac8c\uc2dc\uae00",
17
+ "3": "\ub178\ud2b8",
18
+ "4": "\uc774\ubbf8\uc9c0",
19
+ "5": "\uc804\uccb4",
20
+ "6": "\ucd2c\uc601",
21
+ "7": "\ubb38\uc11c\uc778\uc2dd",
22
+ "8": "\ube5b \ubc1d\uae30",
23
+ "9": "\ub354\ubcf4\uae30",
24
+ "10": "\ubb38\uc790\uc2a4\uce94",
25
+ "11": "AI\ubaa8\ub4dc",
26
+ "12": "\uc0c9\uc0c1\uc778\uc2dd",
27
+ "13": "\ud604\uc7ac\ubaa8\ub4dc",
28
+ "14": "\uc774\ubbf8\uc9c0\ubb18\uc0ac",
29
+ "15": "\ubb3c\uac74\ucc3e\uae30",
30
+ "16": "\uc5bc\uad74\uc778\uc2dd",
31
+ "17": "\ub9ac\uc5bc\uc544\uc774\uc988",
32
+ "18": "\uc637\uc778\uc2dd",
33
+ "19": "\uc9c0\ud3d0\uc778\uc2dd",
34
+ "20": "\ub3cb\ubcf4\uae30",
35
+ "21": "\ubb38\uc790\uc778\uc2dd"
36
+ },
37
  "initializer_range": 0.02,
38
+ "intermediate_size": 1024,
39
+ "label2id": {
40
+ "AI\ubaa8\ub4dc": 11,
41
+ "quota": 0,
42
+ "\uac8c\uc2dc\uae00": 2,
43
+ "\uae30\ubd80 \uc694\uccad": 1,
44
+ "\ub178\ud2b8": 3,
45
+ "\ub354\ubcf4\uae30": 9,
46
+ "\ub3cb\ubcf4\uae30": 20,
47
+ "\ub9ac\uc5bc\uc544\uc774\uc988": 17,
48
+ "\ubb38\uc11c\uc778\uc2dd": 7,
49
+ "\ubb38\uc790\uc2a4\uce94": 10,
50
+ "\ubb38\uc790\uc778\uc2dd": 21,
51
+ "\ubb3c\uac74\ucc3e\uae30": 15,
52
+ "\ube5b \ubc1d\uae30": 8,
53
+ "\uc0c9\uc0c1\uc778\uc2dd": 12,
54
+ "\uc5bc\uad74\uc778\uc2dd": 16,
55
+ "\uc637\uc778\uc2dd": 18,
56
+ "\uc774\ubbf8\uc9c0": 4,
57
+ "\uc774\ubbf8\uc9c0\ubb18\uc0ac": 14,
58
+ "\uc804\uccb4": 5,
59
+ "\uc9c0\ud3d0\uc778\uc2dd": 19,
60
+ "\ucd2c\uc601": 6,
61
+ "\ud604\uc7ac\ubaa8\ub4dc": 13
62
+ },
63
+ "layer_norm_eps": 1e-12,
64
  "max_position_embeddings": 512,
65
+ "model_type": "electra",
66
+ "num_attention_heads": 4,
67
+ "num_hidden_layers": 12,
68
  "output_past": true,
69
  "pad_token_id": 0,
70
+ "position_embedding_type": "absolute",
71
+ "summary_activation": "gelu",
72
+ "summary_last_dropout": 0.1,
73
+ "summary_type": "first",
74
+ "summary_use_proj": true,
75
  "transformers_version": "4.31.0",
76
+ "type_vocab_size": 2,
77
+ "use_cache": true,
78
+ "vocab_size": 32200
79
  }
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:09d8225d6a34531f6da389f7cf1f2d41b46bf3c68481d27c48f030c780c247f7
3
- size 539068456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1092d6e85ccdab854d2f6ba0c2be3b92f1674eef878ed2c20d131d3bce337161
3
+ size 55349584
tokenizer_config.json CHANGED
@@ -1,13 +1,16 @@
1
  {
2
  "clean_up_tokenization_spaces": true,
3
  "cls_token": "[CLS]",
 
4
  "do_lower_case": false,
5
  "mask_token": "[MASK]",
 
6
  "model_max_length": 512,
 
7
  "pad_token": "[PAD]",
8
  "sep_token": "[SEP]",
9
  "strip_accents": null,
10
  "tokenize_chinese_chars": true,
11
- "tokenizer_class": "DistilBertTokenizer",
12
  "unk_token": "[UNK]"
13
  }
 
1
  {
2
  "clean_up_tokenization_spaces": true,
3
  "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
  "do_lower_case": false,
6
  "mask_token": "[MASK]",
7
+ "max_len": 512,
8
  "model_max_length": 512,
9
+ "never_split": null,
10
  "pad_token": "[PAD]",
11
  "sep_token": "[SEP]",
12
  "strip_accents": null,
13
  "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "ElectraTokenizer",
15
  "unk_token": "[UNK]"
16
  }
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff