| | import os |
| | import random |
| |
|
| | from datasets import load_dataset |
| |
|
| |
|
| | def whitespace_tokenize_with_offsets(text): |
| | tokens = [] |
| | start_tok_offsets = [] |
| | end_tok_offsets = [] |
| | current_token = "" |
| | current_token_start = None |
| |
|
| | for i, char in enumerate(text): |
| | if char.isspace(): |
| | if current_token: |
| | tokens.append(current_token) |
| | start_tok_offsets.append(current_token_start) |
| | end_tok_offsets.append(i) |
| | current_token = "" |
| | current_token_start = None |
| | else: |
| | if current_token == "": |
| | current_token_start = i |
| | current_token += char |
| |
|
| | |
| | if current_token: |
| | tokens.append(current_token) |
| | start_tok_offsets.append(current_token_start) |
| | end_tok_offsets.append(len(text)) |
| |
|
| | return tokens, start_tok_offsets, end_tok_offsets |
| |
|
| |
|
| | def proc_dataset(dataset, max_text_length=200): |
| | r = [] |
| | for doc in dataset: |
| | text = doc["text"] |
| | covered_entities = set() |
| | for ent_id, entity in enumerate(doc["entities"]): |
| | if ent_id in covered_entities: |
| | continue |
| | target_text = text |
| | if len(text) > max_text_length: |
| | tokens, start_tok_offsets, end_tok_offsets = whitespace_tokenize_with_offsets(text) |
| | entity_start = entity["start"] |
| | entity_end = entity["end"] |
| |
|
| | |
| | entity_start_idx = None |
| | entity_end_idx = None |
| | for idx, (start, end) in enumerate(zip(start_tok_offsets, end_tok_offsets)): |
| | if start <= entity_start < end: |
| | entity_start_idx = idx |
| | if start < entity_end <= end: |
| | entity_end_idx = idx |
| | break |
| |
|
| | if entity_start_idx is None or entity_end_idx is None: |
| | continue |
| |
|
| | allowed_tokens = max_text_length - len(tokens[entity_start_idx:entity_end_idx + 1]) - 20 |
| | before_tokens = random.randint(0, int(allowed_tokens * 0.8)) |
| | after_tokens = allowed_tokens - before_tokens |
| |
|
| | |
| | if entity_start_idx - before_tokens < 0: |
| | after_tokens += - (entity_start_idx - before_tokens) |
| | elif entity_end_idx + after_tokens + 1 >= len(tokens): |
| | before_tokens += entity_end_idx + after_tokens + 1 - len(tokens) |
| | start_idx = max(0, entity_start_idx - before_tokens) |
| | end_idx = min(len(tokens), entity_end_idx + after_tokens + 1) |
| |
|
| | |
| | initial_text = "" |
| | if start_idx > 20: |
| | initial_text = text[:end_tok_offsets[20]] + "... " |
| |
|
| | |
| | start_offset = start_tok_offsets[start_idx] |
| | end_offset = end_tok_offsets[end_idx - 1] |
| |
|
| | target_text = initial_text + text[start_offset:end_offset] |
| |
|
| | |
| | this_answer_entities = [ent_id] |
| | answers = [entity["content"]] |
| | for ent_id2, entity2 in enumerate(doc["entities"]): |
| | if ent_id2 == ent_id: |
| | continue |
| | |
| | if entity2["category_str"] == entity["category_str"]: |
| | |
| | |
| | if entity2["content"] in target_text: |
| | this_answer_entities.append(ent_id2) |
| | answers.append(entity2["content"]) |
| |
|
| | covered_entities.update(this_answer_entities) |
| |
|
| | r.append({ |
| | "label": entity["category_str"], |
| | "answers": list(set(answers)), |
| | "text": target_text, |
| | }) |
| | return r |
| |
|
| |
|
| | d = load_dataset("fewshot-goes-multilingual/cs_czech-named-entity-corpus_2.0") |
| | train = list(d['train']) |
| | random.shuffle(train) |
| | new_dataset_train = proc_dataset(train[3000:]) |
| | dataset_test_ftrain = proc_dataset(train[:3000]) |
| | dataset_val = proc_dataset(d['validation']) |
| | dataset_test = proc_dataset(d['test']) |
| |
|
| | |
| | new_dataset_test = dataset_test_ftrain + dataset_val + dataset_test |
| | random.shuffle(new_dataset_test) |
| |
|
| | |
| | os.makedirs(".data/hf_dataset/czner_2.0", exist_ok=True) |
| | import jsonlines |
| |
|
| | |
| | print("train", len(new_dataset_train)) |
| | print("test", len(new_dataset_test)) |
| |
|
| | with jsonlines.open(".data/hf_dataset/czner_2.0/train.jsonl", "w") as f: |
| | f.write_all(new_dataset_train) |
| | with jsonlines.open(".data/hf_dataset/czner_2.0/test.jsonl", "w") as f: |
| | f.write_all(new_dataset_test) |
| |
|