christophalt commited on
Commit
5e4e0b8
·
1 Parent(s): 5d594aa

Upload all datasets to hub

Browse files

Commit from https://github.com/huggingface/datasets/pie/commit/44e2b49f756ae55906addbd4cbcb339698ea0e7c

dummy/all/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c87f10be43d5d59229987aac5c5e25231dacf25a7a0d2a75714aa97359b6b41
3
+ size 1671
dummy/bag/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:860a95cc9c3d97e8c2c710b848e01ddeb72cc354136a1aa4c048e1501facfacb
3
+ size 1671
dummy/bfh/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:860a95cc9c3d97e8c2c710b848e01ddeb72cc354136a1aa4c048e1501facfacb
3
+ size 1671
dummy/bgh/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:860a95cc9c3d97e8c2c710b848e01ddeb72cc354136a1aa4c048e1501facfacb
3
+ size 1671
dummy/bpatg/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:860a95cc9c3d97e8c2c710b848e01ddeb72cc354136a1aa4c048e1501facfacb
3
+ size 1671
dummy/bsg/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:860a95cc9c3d97e8c2c710b848e01ddeb72cc354136a1aa4c048e1501facfacb
3
+ size 1671
dummy/bverfg/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:860a95cc9c3d97e8c2c710b848e01ddeb72cc354136a1aa4c048e1501facfacb
3
+ size 1671
dummy/bverwg/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c87f10be43d5d59229987aac5c5e25231dacf25a7a0d2a75714aa97359b6b41
3
+ size 1671
german_legal_entity_recognition.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ import datasets
4
+ import pytorch_ie.data.builder
5
+ from pytorch_ie.annotations import LabeledSpan
6
+ from pytorch_ie.core import AnnotationList, annotation_field
7
+ from pytorch_ie.documents import TextDocument
8
+ from pytorch_ie.utils.span import tokens_and_tags_to_text_and_labeled_spans
9
+
10
+ _VERSION = "1.0.0"
11
+ _COURTS = ["bag", "bfh", "bgh", "bpatg", "bsg", "bverfg", "bverwg"]
12
+ _COURTS_FILEPATHS = {court: f"{court}.conll" for court in _COURTS}
13
+ _ALL = "all"
14
+
15
+
16
+ class GermanLegalEntityRecognitionConfig(datasets.BuilderConfig):
17
+ def __init__(self, *args, courts=None, **kwargs):
18
+ super().__init__(*args, version=datasets.Version(_VERSION, ""), **kwargs)
19
+ self.courts = courts
20
+
21
+ @property
22
+ def filepaths(self):
23
+ return [_COURTS_FILEPATHS[court] for court in self.courts]
24
+
25
+
26
+ @dataclass
27
+ class GermanLegalEntityRecognitionDocument(TextDocument):
28
+ entities: AnnotationList[LabeledSpan] = annotation_field(target="text")
29
+
30
+
31
+ class GermanLegalEntityRecognition(pytorch_ie.data.builder.GeneratorBasedBuilder):
32
+ DOCUMENT_TYPE = GermanLegalEntityRecognitionDocument
33
+
34
+ BASE_DATASET_PATH = "german_legal_entity_recognition"
35
+
36
+ BUILDER_CONFIGS = [
37
+ GermanLegalEntityRecognitionConfig(
38
+ name=court, courts=[court], description=f"Court. {court}."
39
+ )
40
+ for court in _COURTS
41
+ ] + [
42
+ GermanLegalEntityRecognitionConfig(
43
+ name=_ALL, courts=_COURTS, description="All courts included."
44
+ )
45
+ ]
46
+ BUILDER_CONFIG_CLASS = GermanLegalEntityRecognitionConfig
47
+ DEFAULT_CONFIG_NAME = _ALL # type: ignore
48
+
49
+ def _generate_document_kwargs(self, dataset):
50
+ return {"int_to_str": dataset.features["ner_tags"].feature.int2str}
51
+
52
+ def _generate_document(self, example, int_to_str):
53
+ doc_id = example["id"]
54
+ tokens = example["tokens"]
55
+ ner_tags = [int_to_str(tag) for tag in example["ner_tags"]]
56
+
57
+ text, ner_spans = tokens_and_tags_to_text_and_labeled_spans(tokens=tokens, tags=ner_tags)
58
+
59
+ document = GermanLegalEntityRecognitionDocument(text=text, id=doc_id)
60
+
61
+ for span in sorted(ner_spans, key=lambda span: span.start):
62
+ document.entities.append(span)
63
+
64
+ return document