pmpc commited on
Commit
1c9e1d0
·
1 Parent(s): ef65040

Delete config.json

Browse files
Files changed (1) hide show
  1. config.json +0 -146
config.json DELETED
@@ -1,146 +0,0 @@
1
- [paths]
2
- train = "/kaggle/input/binarydocs-to-train-spacy-ner-component/ler_train.spacy"
3
- dev = "/kaggle/input/binarydocs-to-train-spacy-ner-component/ler_dev.spacy"
4
- vectors = null
5
- init_tok2vec = null
6
-
7
- [system]
8
- gpu_allocator = "pytorch"
9
- seed = 0
10
-
11
- [nlp]
12
- lang = "de"
13
- pipeline = ["transformer","ner"]
14
- batch_size = 128
15
- disabled = []
16
- before_creation = null
17
- after_creation = null
18
- after_pipeline_creation = null
19
- tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}
20
-
21
- [components]
22
-
23
- [components.ner]
24
- factory = "ner"
25
- incorrect_spans_key = null
26
- moves = null
27
- scorer = {"@scorers":"spacy.ner_scorer.v1"}
28
- update_with_oracle_cut_size = 100
29
-
30
- [components.ner.model]
31
- @architectures = "spacy.TransitionBasedParser.v2"
32
- state_type = "ner"
33
- extra_state_tokens = false
34
- hidden_width = 64
35
- maxout_pieces = 2
36
- use_upper = false
37
- nO = null
38
-
39
- [components.ner.model.tok2vec]
40
- @architectures = "spacy-transformers.TransformerListener.v1"
41
- grad_factor = 1.0
42
- pooling = {"@layers":"reduce_mean.v1"}
43
- upstream = "*"
44
-
45
- [components.transformer]
46
- factory = "transformer"
47
- max_batch_items = 4096
48
- set_extra_annotations = {"@annotation_setters":"spacy-transformers.null_annotation_setter.v1"}
49
-
50
- [components.transformer.model]
51
- @architectures = "spacy-transformers.TransformerModel.v3"
52
- name = "bert-base-german-cased"
53
- mixed_precision = false
54
-
55
- [components.transformer.model.get_spans]
56
- @span_getters = "spacy-transformers.strided_spans.v1"
57
- window = 128
58
- stride = 96
59
-
60
- [components.transformer.model.grad_scaler_config]
61
-
62
- [components.transformer.model.tokenizer_config]
63
- use_fast = true
64
-
65
- [components.transformer.model.transformer_config]
66
-
67
- [corpora]
68
-
69
- [corpora.dev]
70
- @readers = "spacy.Corpus.v1"
71
- path = ${paths.dev}
72
- max_length = 0
73
- gold_preproc = false
74
- limit = 0
75
- augmenter = null
76
-
77
- [corpora.train]
78
- @readers = "spacy.Corpus.v1"
79
- path = ${paths.train}
80
- max_length = 0
81
- gold_preproc = false
82
- limit = 0
83
- augmenter = null
84
-
85
- [training]
86
- accumulate_gradient = 3
87
- dev_corpus = "corpora.dev"
88
- train_corpus = "corpora.train"
89
- seed = ${system.seed}
90
- gpu_allocator = ${system.gpu_allocator}
91
- dropout = 0.1
92
- patience = 1600
93
- max_epochs = 0
94
- max_steps = 20000
95
- eval_frequency = 200
96
- frozen_components = []
97
- annotating_components = []
98
- before_to_disk = null
99
- before_update = null
100
-
101
- [training.batcher]
102
- @batchers = "spacy.batch_by_padded.v1"
103
- discard_oversize = true
104
- size = 2000
105
- buffer = 256
106
- get_length = null
107
-
108
- [training.logger]
109
- @loggers = "spacy.ConsoleLogger.v1"
110
- progress_bar = false
111
-
112
- [training.optimizer]
113
- @optimizers = "Adam.v1"
114
- beta1 = 0.9
115
- beta2 = 0.999
116
- L2_is_weight_decay = true
117
- L2 = 0.01
118
- grad_clip = 1.0
119
- use_averages = false
120
- eps = 0.00000001
121
-
122
- [training.optimizer.learn_rate]
123
- @schedules = "warmup_linear.v1"
124
- warmup_steps = 250
125
- total_steps = 20000
126
- initial_rate = 0.00005
127
-
128
- [training.score_weights]
129
- ents_f = 1.0
130
- ents_p = 0.0
131
- ents_r = 0.0
132
- ents_per_type = null
133
-
134
- [pretraining]
135
-
136
- [initialize]
137
- vectors = ${paths.vectors}
138
- init_tok2vec = ${paths.init_tok2vec}
139
- vocab_data = null
140
- lookups = null
141
- before_init = null
142
- after_init = null
143
-
144
- [initialize.components]
145
-
146
- [initialize.tokenizer]