epptt commited on
Commit
e55daa6
·
1 Parent(s): 3a0aec3

Upload erukaLabels.py

Browse files
Files changed (1) hide show
  1. erukaLabels.py +22 -25
erukaLabels.py CHANGED
@@ -1,5 +1,5 @@
1
  # -*- coding: utf-8 -*-
2
- """Untitled0.ipynb
3
 
4
  Automatically generated by Colaboratory.
5
 
@@ -7,6 +7,7 @@ Original file is located at
7
  https://colab.research.google.com/drive/1p0VRh0b-OtHjNNLIcNUPb2BaoiE9Mh7O
8
  """
9
 
 
10
  # coding=utf-8
11
  import json
12
  import os
@@ -25,8 +26,8 @@ def normalize_bbox(bbox, size):
25
  return [
26
  int(1000 * bbox[0] / size[0]),
27
  int(1000 * bbox[1] / size[1]),
28
- int(1000 * bbox[2] / size[0]),
29
- int(1000 * bbox[3] / size[1]),
30
  ]
31
 
32
  logger = datasets.logging.get_logger(__name__)
@@ -58,7 +59,6 @@ class FunsdConfig(datasets.BuilderConfig):
58
  """
59
  super(FunsdConfig, self).__init__(**kwargs)
60
 
61
-
62
  class Funsd(datasets.GeneratorBasedBuilder):
63
  """Conll2003 dataset."""
64
 
@@ -89,13 +89,13 @@ class Funsd(datasets.GeneratorBasedBuilder):
89
 
90
  def _split_generators(self, dl_manager):
91
  """Returns SplitGenerators."""
92
- downloaded_file = dl_manager.download_and_extract("https://guillaumejaume.github.io/FUNSD/dataset.zip")
93
  return [
94
  datasets.SplitGenerator(
95
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"}
96
  ),
97
  datasets.SplitGenerator(
98
- name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"}
99
  ),
100
  ]
101
 
@@ -122,28 +122,25 @@ class Funsd(datasets.GeneratorBasedBuilder):
122
  with open(file_path, "r", encoding="utf8") as f:
123
  data = json.load(f)
124
  image_path = os.path.join(img_dir, file)
125
- image_path = image_path.replace("json", "png")
 
 
126
  image, size = load_image(image_path)
127
- for item in data["form"]:
 
 
 
 
128
  cur_line_bboxes = []
129
- words, label = item["words"], item["label"]
130
- words = [w for w in words if w["text"].strip() != ""]
131
  if len(words) == 0:
132
  continue
133
- if label == "other":
134
- for w in words:
135
- tokens.append(w["text"])
136
- ner_tags.append("O")
137
- cur_line_bboxes.append(normalize_bbox(w["box"], size))
138
- else:
139
- tokens.append(words[0]["text"])
140
- ner_tags.append("B-" + label.upper())
141
- cur_line_bboxes.append(normalize_bbox(words[0]["box"], size))
142
- for w in words[1:]:
143
- tokens.append(w["text"])
144
- ner_tags.append("I-" + label.upper())
145
- cur_line_bboxes.append(normalize_bbox(w["box"], size))
146
  cur_line_bboxes = self.get_line_bbox(cur_line_bboxes)
147
  bboxes.extend(cur_line_bboxes)
148
  yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags,
149
- "image": image}
 
 
1
  # -*- coding: utf-8 -*-
2
+ """erukaLabels.ipynb
3
 
4
  Automatically generated by Colaboratory.
5
 
 
7
  https://colab.research.google.com/drive/1p0VRh0b-OtHjNNLIcNUPb2BaoiE9Mh7O
8
  """
9
 
10
+
11
  # coding=utf-8
12
  import json
13
  import os
 
26
  return [
27
  int(1000 * bbox[0] / size[0]),
28
  int(1000 * bbox[1] / size[1]),
29
+ int(1000 * bbox[4] / size[0]),
30
+ int(1000 * bbox[5] / size[1]),
31
  ]
32
 
33
  logger = datasets.logging.get_logger(__name__)
 
59
  """
60
  super(FunsdConfig, self).__init__(**kwargs)
61
 
 
62
  class Funsd(datasets.GeneratorBasedBuilder):
63
  """Conll2003 dataset."""
64
 
 
89
 
90
  def _split_generators(self, dl_manager):
91
  """Returns SplitGenerators."""
92
+ downloaded_file = dl_manager.download_and_extract("dataset_eruka.zip")
93
  return [
94
  datasets.SplitGenerator(
95
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset_eruka/training_data/"}
96
  ),
97
  datasets.SplitGenerator(
98
+ name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset_eruka/testing_data/"}
99
  ),
100
  ]
101
 
 
122
  with open(file_path, "r", encoding="utf8") as f:
123
  data = json.load(f)
124
  image_path = os.path.join(img_dir, file)
125
+
126
+ # changed
127
+ image_path = image_path.replace("json", "jpg")
128
  image, size = load_image(image_path)
129
+
130
+ #new
131
+ ddata_path = data["analyzeResult"]["pages"][0]["words"]
132
+
133
+ for item in ddata_path:
134
  cur_line_bboxes = []
135
+ words, label = [item["content"]], "other"
 
136
  if len(words) == 0:
137
  continue
138
+ tokens.append(words[0])
139
+ ner_tags.append("O")
140
+ cur_line_bboxes.append(normalize_bbox(item["polygon"], size))
141
+
 
 
 
 
 
 
 
 
 
142
  cur_line_bboxes = self.get_line_bbox(cur_line_bboxes)
143
  bboxes.extend(cur_line_bboxes)
144
  yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags,
145
+ "image": image}
146
+