Create .config.yaml
Browse files- .config.yaml +44 -0
.config.yaml
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# config.yaml
|
| 2 |
+
|
| 3 |
+
model:
|
| 4 |
+
name: "bert-base-uncased"
|
| 5 |
+
type: "transformer"
|
| 6 |
+
provider: "huggingface"
|
| 7 |
+
tokenizer: "bert-base-uncased"
|
| 8 |
+
revision: "main"
|
| 9 |
+
|
| 10 |
+
hyperparameters:
|
| 11 |
+
batch_size: 32
|
| 12 |
+
learning_rate: 2e-5
|
| 13 |
+
epochs: 3
|
| 14 |
+
max_seq_length: 128
|
| 15 |
+
optimizer: "AdamW"
|
| 16 |
+
weight_decay: 0.01
|
| 17 |
+
dropout: 0.1
|
| 18 |
+
|
| 19 |
+
task:
|
| 20 |
+
type: "text-classification" # choices: text-generation, summarization, question-answering, etc.
|
| 21 |
+
dataset: "imdb"
|
| 22 |
+
metric: "accuracy"
|
| 23 |
+
augmentations:
|
| 24 |
+
- "synonym_replacement"
|
| 25 |
+
- "random_insertion"
|
| 26 |
+
|
| 27 |
+
deployment:
|
| 28 |
+
device: "cuda" # options: cuda, cpu, mps, tpu
|
| 29 |
+
precision: "float16" # options: float32, float16, bfloat16
|
| 30 |
+
cache_dir: "./model_cache"
|
| 31 |
+
save_steps: 500
|
| 32 |
+
|
| 33 |
+
logging:
|
| 34 |
+
use_comet: true
|
| 35 |
+
comet_workspace: "your-comet-workspace"
|
| 36 |
+
experiment_name: "bert_imdb_classification"
|
| 37 |
+
log_interval: 50
|
| 38 |
+
|
| 39 |
+
evaluation:
|
| 40 |
+
validation_split: 0.1
|
| 41 |
+
test_split: 0.1
|
| 42 |
+
early_stopping: true
|
| 43 |
+
patience: 2
|
| 44 |
+
checkpoint_metric: "val_loss"
|