Datasets:
Tasks:
Text Classification
Sub-tasks:
sentiment-classification
Languages:
Chinese
Size:
100K - 1M
Tags:
jd
License:
| from datasets import Value, ClassLabel | |
| import datasets | |
| _JD21_CITATION = """\ | |
| """ | |
| _JD21_DESCRIPTION = """\ | |
| GLUE, the General Language Understanding Evaluation benchmark | |
| (https://gluebenchmark.com/) is a collection of resources for training, | |
| evaluating, and analyzing natural language understanding systems. | |
| """ | |
| class JD21Config(datasets.BuilderConfig): | |
| def __init__( | |
| self, | |
| text_features, | |
| label_column, | |
| data_url, | |
| data_dir, | |
| citation, | |
| url, | |
| label_classes=None, | |
| process_label=lambda x: x, | |
| **kwargs, | |
| ): | |
| super(JD21Config, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) | |
| self.text_features = text_features | |
| self.label_column = label_column | |
| self.label_classes = label_classes | |
| self.data_url = data_url | |
| self.data_dir = data_dir | |
| self.citation = citation | |
| self.url = url | |
| self.process_label = process_label | |
| class JD21(datasets.GeneratorBasedBuilder): | |
| domain_list = ['褪黑素', '维生素', '无线耳机', '蛋白粉', '游戏机', '电视', 'MacBook', '洗面奶', '智能手表', '吹风机', '小米手机', '红米手机', '护肤品', | |
| '电动牙刷', 'iPhone', '海鲜', '酒', '平板电脑', '修复霜', '运动鞋', '智能手环'] | |
| BUILDER_CONFIGS = [ | |
| JD21Config(name=domain_name, | |
| description= f'comments of JD {domain_name}.', | |
| text_features={'sentence':'sentence', 'domain':'domain'}, | |
| label_classes=['POS','NEG'], | |
| label_column='label', | |
| citation="", | |
| data_dir= "", | |
| data_url = r"https://huggingface.co/datasets/kuroneko5943/jd21/resolve/main/", | |
| url='https://github.com/ws719547997/LNB-DA') | |
| for domain_name in domain_list | |
| ] | |
| def _info(self): | |
| features = {'id':Value(dtype='int32', id=None), | |
| 'domain':Value(dtype='string', id=None), | |
| 'label':ClassLabel(num_classes=2, names=['POS', 'NEG'], names_file=None, id=None), | |
| 'rank':Value(dtype='int32', id=None), | |
| 'sentence':Value(dtype='string', id=None)} | |
| return datasets.DatasetInfo( | |
| description=_JD21_DESCRIPTION, | |
| features=datasets.Features(features), | |
| homepage=self.config.url, | |
| citation=self.config.citation + "\n" + _JD21_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| test_file = rf'{self.config.data_url}test/{self.config.name}.txt' | |
| dev_file = rf'{self.config.data_url}dev/{self.config.name}.txt' | |
| train_file = rf'{self.config.data_url}train/{self.config.name}.txt' | |
| return [datasets.SplitGenerator(name=datasets.Split.TEST, | |
| gen_kwargs={ | |
| "data_file": dl_manager.download(test_file), | |
| "split": "test", | |
| },), | |
| datasets.SplitGenerator(name=datasets.Split.VALIDATION, | |
| gen_kwargs={ | |
| "data_file": dl_manager.download(dev_file), | |
| "split": "dev", | |
| },), | |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, | |
| gen_kwargs={ | |
| "data_file": dl_manager.download(train_file), | |
| "split": "train", | |
| },)] | |
| def _generate_examples(self, data_file, split): | |
| with open(data_file, 'r', encoding='utf-8') as f: | |
| for line in f: | |
| lin = line.strip() | |
| if not lin: | |
| continue | |
| lin_sp = lin.split('\t') | |
| if len(lin_sp) < 5: | |
| continue | |
| # id, {example} | |
| yield lin_sp[0], {'sentence':lin_sp[4],'domain':lin_sp[1], 'label':lin_sp[2], 'id':lin_sp[0], 'rank':lin_sp[3]} | |