QuantaSparkLabs commited on
Commit
051b6dc
·
verified ·
1 Parent(s): 6f129f3

quantasparklabs/prompt-intent-mini

Browse files
Files changed (4) hide show
  1. README.md +53 -0
  2. config.json +42 -0
  3. model.safetensors +3 -0
  4. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: distilbert-base-uncased
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: intent_model
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # intent_model
16
+
17
+ This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 5e-05
37
+ - train_batch_size: 16
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
41
+ - lr_scheduler_type: linear
42
+ - num_epochs: 5
43
+
44
+ ### Training results
45
+
46
+
47
+
48
+ ### Framework versions
49
+
50
+ - Transformers 5.0.0
51
+ - Pytorch 2.10.0+cu128
52
+ - Datasets 4.0.0
53
+ - Tokenizers 0.22.2
config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "architectures": [
4
+ "DistilBertForSequenceClassification"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "bos_token_id": null,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "dtype": "float32",
11
+ "eos_token_id": null,
12
+ "hidden_dim": 3072,
13
+ "id2label": {
14
+ "0": "question",
15
+ "1": "code",
16
+ "2": "creative",
17
+ "3": "chat",
18
+ "4": "math"
19
+ },
20
+ "initializer_range": 0.02,
21
+ "label2id": {
22
+ "chat": 3,
23
+ "code": 1,
24
+ "creative": 2,
25
+ "math": 4,
26
+ "question": 0
27
+ },
28
+ "max_position_embeddings": 512,
29
+ "model_type": "distilbert",
30
+ "n_heads": 12,
31
+ "n_layers": 6,
32
+ "pad_token_id": 0,
33
+ "problem_type": "single_label_classification",
34
+ "qa_dropout": 0.1,
35
+ "seq_classif_dropout": 0.2,
36
+ "sinusoidal_pos_embds": false,
37
+ "tie_weights_": true,
38
+ "tie_word_embeddings": true,
39
+ "transformers_version": "5.0.0",
40
+ "use_cache": false,
41
+ "vocab_size": 30522
42
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4198570c51f854b62611f469b2801715c991a801f514702f0669af026e2fa5af
3
+ size 267841796
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:811bab6d49b3cbb03e68f25a1a8c59fdf02f270705d81788f3f27a6dae27cdf2
3
+ size 5137