EC2 Default User commited on
Commit
6a2d9c0
·
1 Parent(s): 0b5fe18

remove file

Browse files
Files changed (1) hide show
  1. oas-paired-sequence-data.py +0 -141
oas-paired-sequence-data.py DELETED
@@ -1,141 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """Paired sequences from the Observed Antibody Space database"""
16
-
17
-
18
- import csv
19
- import json
20
- import os
21
-
22
- import datasets
23
-
24
- _CITATION = """\
25
- @article{Olsen_Boyles_Deane_2022,
26
- title={Observed Antibody Space: A diverse database of cleaned, annotated, and translated unpaired and paired antibody sequences},
27
- volume={31}, rights={© 2021 The Authors. Protein Science published by Wiley Periodicals LLC on behalf of The Protein Society.},
28
- ISSN={1469-896X}, DOI={10.1002/pro.4205},
29
- number={1}, journal={Protein Science}, author={Olsen, Tobias H. and Boyles, Fergus and Deane, Charlotte M.},
30
- year={2022}, pages={141–146}, language={en} }
31
-
32
- """
33
- _DESCRIPTION = """\
34
- Paired heavy and light chain antibody sequences for multiple species.
35
- """
36
-
37
- _HOMEPAGE = "https://opig.stats.ox.ac.uk/webapps/oas/"
38
-
39
- _LICENSE = "cc-by-4.0"
40
-
41
- _URLS = {
42
- "human": "human.parquet",
43
- "rat_SD": "rat_SD.parquet",
44
- "mouse_BALB_c": "mouse_BALB_c.parquet",
45
- "mouse_C57BL_6": "mouse_C57BL_6.parquet",
46
- }
47
-
48
- _FEATURES = datasets.Features(
49
- {
50
- "pair_id": datasets.Value("string"),
51
- "sequence_alignment_aa_heavy": datasets.Value("string"),
52
- "cdr1_aa_heavy": datasets.Value("string"),
53
- "cdr2_aa_heavy": datasets.Value("string"),
54
- "cdr3_aa_heavy": datasets.Value("string"),
55
- "sequence_alignment_aa_light": datasets.Value("string"),
56
- "cdr1_aa_light": datasets.Value("string"),
57
- "cdr2_aa_light": datasets.Value("string"),
58
- "cdr3_aa_light": datasets.Value("string"),
59
- }
60
- )
61
-
62
-
63
- class OasPairedSequenceData(datasets.GeneratorBasedBuilder):
64
- """OAS paired sequence data."""
65
-
66
- VERSION = datasets.Version("1.1.0")
67
-
68
- # You will be able to load one or the other configurations in the following list with
69
- # data = datasets.load_dataset('my_dataset', 'first_domain')
70
- # data = datasets.load_dataset('my_dataset', 'second_domain')
71
- BUILDER_CONFIGS = [
72
- datasets.BuilderConfig(name="human", version=VERSION, description="Human"),
73
- datasets.BuilderConfig(name="rat_SD", version=VERSION, description="rat_SD"),
74
- datasets.BuilderConfig(
75
- name="mouse_BALB_c", version=VERSION, description="mouse_BALB_c"
76
- ),
77
- datasets.BuilderConfig(
78
- name="mouse_C57BL_6", version=VERSION, description="mouse_C57BL_6"
79
- ),
80
- ]
81
-
82
- def _info(self):
83
- return datasets.DatasetInfo(
84
- description=_DESCRIPTION,
85
- features=_FEATURES,
86
- homepage=_HOMEPAGE,
87
- license=_LICENSE,
88
- citation=_CITATION,
89
- )
90
-
91
- def _split_generators(self, dl_manager):
92
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
93
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
94
-
95
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
96
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
97
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
98
- urls = _URLS[self.config.name]
99
- data = dl_manager.download_and_extract(urls)
100
- return [
101
- datasets.SplitGenerator(
102
- name=datasets.Split.TRAIN,
103
- # These kwargs will be passed to _generate_examples
104
- gen_kwargs={
105
- # "filepath": os.path.join(data_dir, "train.jsonl"),
106
- "filepath": data,
107
- "split": "train",
108
- },
109
- ),
110
- # datasets.SplitGenerator(
111
- # name=datasets.Split.VALIDATION,
112
- # # These kwargs will be passed to _generate_examples
113
- # gen_kwargs={
114
- # "filepath": os.path.join(data_dir, "dev.jsonl"),
115
- # "split": "dev",
116
- # },
117
- # ),
118
- # datasets.SplitGenerator(
119
- # name=datasets.Split.TEST,
120
- # # These kwargs will be passed to _generate_examples
121
- # gen_kwargs={
122
- # "filepath": os.path.join(data_dir, "test.jsonl"),
123
- # "split": "test",
124
- # },
125
- # ),
126
- ]
127
-
128
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
129
- def _generate_examples(self, filepath, split):
130
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
131
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
132
- with open(filepath, encoding="utf-8") as f:
133
- for key, row in enumerate(f):
134
- data = json.loads(row)
135
- yield key, data
136
- # yield key, {
137
- # "sentence": data["sentence"],
138
- # "option2": data["option2"],
139
- # "second_domain_answer": ""
140
- # }
141
-