Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
1K<n<10K
License:
| import datasets | |
| logger = datasets.logging.get_logger(__name__) | |
| _URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/" | |
| _TRAINING_FILE = "wnut17train%20(1).conll" | |
| _DEV_FILE = "indian_ner_dev.conll" | |
| _TEST_FILE = "indian_ner_test.conll" | |
| class indian_namesConfig(datasets.BuilderConfig): | |
| """The WNUT 17 Emerging Entities Dataset.""" | |
| def __init__(self, **kwargs): | |
| """BuilderConfig for WNUT 17. | |
| Args: | |
| **kwargs: keyword arguments forwarded to super. | |
| """ | |
| super(indian_namesConfig, self).__init__(**kwargs) | |
| class indian_names(datasets.GeneratorBasedBuilder): | |
| """The WNUT 17 Emerging Entities Dataset.""" | |
| BUILDER_CONFIGS = [ | |
| indian_namesConfig( | |
| name="indian_names", version=datasets.Version("1.0.0"), description="The WNUT 17 Emerging Entities Dataset" | |
| ), | |
| ] | |
| def _info(self): | |
| return datasets.DatasetInfo( | |
| features=datasets.Features( | |
| { | |
| "id": datasets.Value("string"), | |
| "tokens": datasets.Sequence(datasets.Value("string")), | |
| "ner_tags": datasets.Sequence( | |
| datasets.features.ClassLabel( | |
| names=[ | |
| "O", | |
| "B-corporation", | |
| "I-corporation", | |
| "B-person", | |
| "I-person", | |
| ] | |
| ) | |
| ), | |
| } | |
| ), | |
| supervised_keys=None, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| """Returns SplitGenerators.""" | |
| urls_to_download = { | |
| "train": f"{_URL}{_TRAINING_FILE}", | |
| "dev": f"{_URL}{_DEV_FILE}", | |
| "test": f"{_URL}{_TEST_FILE}", | |
| } | |
| downloaded_files = dl_manager.download_and_extract(urls_to_download) | |
| return [ | |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), | |
| datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), | |
| datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), | |
| ] | |
| def _generate_examples(self, filepath): | |
| logger.info("⏳ Generating examples from = %s", filepath) | |
| with open(filepath, encoding="utf-8") as f: | |
| current_tokens = [] | |
| current_labels = [] | |
| sentence_counter = 0 | |
| for row in f: | |
| row = row.rstrip() | |
| if row: | |
| if "\t" in row: | |
| token, label = row.split("\t") | |
| current_tokens.append(token) | |
| current_labels.append(label) | |
| else: | |
| # Handle cases where the delimiter is missing | |
| # You can choose to skip these rows or handle them differently | |
| logger.warning(f"Delimiter missing in row: {row}") | |
| else: | |
| # New sentence | |
| if not current_tokens: | |
| # Consecutive empty lines will cause empty sentences | |
| continue | |
| assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels" | |
| sentence = ( | |
| sentence_counter, | |
| { | |
| "id": str(sentence_counter), | |
| "tokens": current_tokens, | |
| "ner_tags": current_labels, | |
| }, | |
| ) | |
| sentence_counter += 1 | |
| current_tokens = [] | |
| current_labels = [] | |
| yield sentence | |
| # Don't forget the last sentence in the dataset 🧐 | |
| if current_tokens: | |
| yield sentence_counter, { | |
| "id": str(sentence_counter), | |
| "tokens": current_tokens, | |
| "ner_tags": current_labels, | |
| } |