dinhquangson commited on
Commit
f84df9a
·
1 Parent(s): fb8897f

Update FUNSD.py

Browse files
Files changed (1) hide show
  1. FUNSD.py +33 -22
FUNSD.py CHANGED
@@ -125,22 +125,26 @@ class Funsd(datasets.GeneratorBasedBuilder):
125
  ]
126
 
127
  def _generate_examples(self, filepath):
128
- logger.info("Generating examples from = %s", filepath)
129
- with open(filepath[0], "r", encoding="utf-8") as f:
130
- data = json.load(f)
131
-
132
- for doc in data["documents"]:
133
- doc["img"]["fpath"] = os.path.join(filepath[1], doc["img"]["fname"])
134
- image, size = load_image(doc["img"]["fpath"], size=224)
135
- original_image, _ = load_image(doc["img"]["fpath"])
136
- document = doc["document"]
137
- tokenized_doc = {"input_ids": [], "bbox": [], "labels": []}
138
  entities = []
139
  relations = []
140
- id2label = {}
141
  entity_id_to_index_map = {}
142
  empty_entity = set()
143
- for line in document:
 
 
 
 
 
 
 
 
144
  if len(line["text"]) == 0:
145
  empty_entity.add(line["id"])
146
  continue
@@ -192,6 +196,21 @@ class Funsd(datasets.GeneratorBasedBuilder):
192
  )
193
  for i in tokenized_doc:
194
  tokenized_doc[i] = tokenized_doc[i] + tokenized_inputs[i]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
  relations = list(set(relations))
196
  relations = [rel for rel in relations if rel[0] not in empty_entity and rel[1] not in empty_entity]
197
  kvrelations = []
@@ -257,13 +276,5 @@ class Funsd(datasets.GeneratorBasedBuilder):
257
  "end_index": relation["end_index"] - index,
258
  }
259
  )
260
- item.update(
261
- {
262
- "id": f"{doc['id']}_{chunk_id}",
263
- "image": image,
264
- "original_image": original_image,
265
- "entities": entities_in_this_span,
266
- "relations": relations_in_this_span,
267
- }
268
- )
269
- yield f"{doc['id']}_{chunk_id}", item
 
125
  ]
126
 
127
  def _generate_examples(self, filepath):
128
+ logger.info("Generating examples from = %s", filepath)
129
+ ann_dir = os.path.join(filepath, "annotations")
130
+ img_dir = os.path.join(filepath, "images")
131
+ for guid, file in enumerate(sorted(os.listdir(ann_dir))):
132
+ words = []
133
+ bboxes = []
134
+ ner_tags = []
 
 
 
135
  entities = []
136
  relations = []
 
137
  entity_id_to_index_map = {}
138
  empty_entity = set()
139
+ file_path = os.path.join(ann_dir, file)
140
+ with open(file_path, "r", encoding="utf8") as f:
141
+ data = json.load(f)
142
+ image_path = os.path.join(img_dir, file)
143
+ image_path = image_path.replace("json", "png")
144
+ image, size = load_image(image_path)
145
+ for item in data["form"]:
146
+ words_example, label = item["words"], item["label"]
147
+ words_example = [w for w in words_example if w["text"].strip() != ""]
148
  if len(line["text"]) == 0:
149
  empty_entity.add(line["id"])
150
  continue
 
196
  )
197
  for i in tokenized_doc:
198
  tokenized_doc[i] = tokenized_doc[i] + tokenized_inputs[i]
199
+ if len(words_example) == 0:
200
+ continue
201
+ if label == "other":
202
+ for w in words_example:
203
+ words.append(w["text"])
204
+ ner_tags.append("O")
205
+ bboxes.append(normalize_bbox(w["box"], size))
206
+ else:
207
+ words.append(words_example[0]["text"])
208
+ ner_tags.append("B-" + label.upper())
209
+ bboxes.append(normalize_bbox(words_example[0]["box"], size))
210
+ for w in words_example[1:]:
211
+ words.append(w["text"])
212
+ ner_tags.append("I-" + label.upper())
213
+ bboxes.append(normalize_bbox(w["box"], size))
214
  relations = list(set(relations))
215
  relations = [rel for rel in relations if rel[0] not in empty_entity and rel[1] not in empty_entity]
216
  kvrelations = []
 
276
  "end_index": relation["end_index"] - index,
277
  }
278
  )
279
+ yield guid, {"id": str(guid), "words": words, "bboxes": bboxes, "ner_tags": ner_tags, "image_path": image_path, "entities": entities, "relations": relations}
280
+