File size: 1,627 Bytes
a179149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
_config_info:
  create_time: '2023-12-12 10:00:56'
  use_default_base_config: true
  config_filepath:
  - /data/yangl/LDNet-main/conf/Pretrain_ld.yaml
task_type: SchemaGuidedInstructBertTask
task_name: LDNet_Pretrain
comment: ~~content as label, (start, end + 1) span
output_dir: LDNet_outputs
task_dir: LDNet_outputs/LDNet_Pretrain
dump_cache_dir: LDNet_outputs/LDNet_Pretrain/cache
regenerate_cache: false
data_dir: resources/Mirror/v1.4_sampled_v3/merged/all_excluded
train_filepath: resources/Mirror/v1.4_sampled_v3/merged/all_excluded/train.jsonl
dev_filepath: resources/Mirror/v1.4_sampled_v3/merged/all_excluded/dev.jsonl
test_filepath: resources/Mirror/v1.4_sampled_v3/merged/all_excluded/test.jsonl
random_seed: 1227
num_epochs: 3
num_steps: -1
warmup_proportion: 0.1
epoch_patience: -1
step_patience: -1
batch_size: 64
learning_rate: 2.0e-05
max_grad_norm: 1.0
skip_train: false
debug_mode: false
grad_accum_steps: 1
resumed_training_path: null
step_eval_interval: 5000
epoch_eval_interval: 1
eval_on_data:
- train
select_best_on_data: train
select_best_by_key: loss
best_metric_field: micro.f1
save_every_ckpt: true
save_best_ckpt: true
final_eval_on_test: false
main_process_logging: true
max_seq_len: 512
label_span: tag
mode: span
stream_mode: false
bce: true
kd: false
kd_file: None
fewshot: false
zeroshot: false
generate_logits: false
plm_dir: microsoft/deberta-v3-large
base_model_path: null
train_batch_size: 4
eval_batch_size: 4
other_learning_rate: 0.0001
weight_decay: 0.1
dropout: 0.3
use_rope: true
biaffine_size: 512
include_package:
- src.task
use_default_base_config: true
lddrop: false
droprate: 1.0