Bgoood commited on
Commit
857fc7a
·
1 Parent(s): 48ff445
Files changed (3) hide show
  1. README.md +91 -0
  2. omim.csv +3 -0
  3. vep_omim.py +109 -0
README.md ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - found
4
+ language:
5
+ - en
6
+ license: mit
7
+ multilinguality: monolingual
8
+ pretty_name: vep_mendelian_traits_chr11_split
9
+ size_categories:
10
+ - 10K<n<100K
11
+ source_datasets:
12
+ - original
13
+ task_categories:
14
+ - sequence-modeling
15
+ task_ids:
16
+ - sequence-classification
17
+ ---
18
+ # vep_clinvar_chr1_split
19
+
20
+ - 字段: ref, alt, label, chromosome, position
21
+ - 划分: chromosome=1为test,其余为train
22
+ - 支持自动生成ref/alt序列
23
+
24
+ ## 用法
25
+
26
+ ```python
27
+ from datasets import load_dataset
28
+
29
+ ds = load_dataset(
30
+ "Bgoood/vep_mendelian_traits_chr11_split",
31
+ sequence_length=2048,
32
+ fasta_path="/path/to/hg38.fa.gz",
33
+ data_dir="."
34
+ )
35
+ ```
36
+ ```
37
+
38
+ ---
39
+
40
+ ## 5. 上传到 HuggingFace
41
+
42
+ 1. **初始化git repo(如果还没有)**
43
+ ```bash
44
+ git lfs install
45
+ git clone https://huggingface.co/datasets/Bgoood/vep_mendelian_traits_chr11_split
46
+ cd vep_mendelian_traits_chr11_split
47
+ # 把 train.csv, test.csv, vep_mendelian_traits_chr11_split.py, README.md 放到这个目录
48
+ git add .
49
+ git commit -m "init dataset with script"
50
+ git push
51
+ ```
52
+
53
+ 2. **或者直接网页上传**
54
+ 在你的数据集页面,点击“Add file”,上传上述文件。
55
+
56
+ ---
57
+
58
+ ## 6. 用户使用方式
59
+
60
+ 用户只需这样调用即可自动生成ref/alt序列:
61
+
62
+ ```python
63
+ from datasets import load_dataset
64
+
65
+ ds = load_dataset(
66
+ "Bgoood/vep_mendelian_traits_chr11_split",
67
+ sequence_length=2048,
68
+ fasta_path="/path/to/hg38.fa.gz",
69
+ data_dir="."
70
+ )
71
+ ```
72
+
73
+ ---
74
+
75
+ ## 7. 依赖
76
+
77
+ 确保用户环境已安装:
78
+ ```bash
79
+ pip install datasets pyfaidx pandas
80
+ ```
81
+
82
+ ---
83
+
84
+ ## 8. 注意事项
85
+
86
+ - `fasta_path` 必须是本地可访问的 hg38.fa.gz 路径。
87
+ - 你上传到HF的数据集只需包含原始csv和脚本,不需要包含fasta文件。
88
+
89
+ ---
90
+
91
+ 如需自动化脚本生成csv、或有其他定制需求,请随时告知!
omim.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fc127098a5182ef41e2308b637201463eb201ad867b2f143387cc9d648fb836
3
+ size 69715279
vep_omim.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import gzip
4
+ from datasets import GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split, BuilderConfig, Features, Value
5
+ from Bio import SeqIO
6
+ from typing import Dict, Any
7
+
8
+ class VepOmimConfig(BuilderConfig):
9
+ def __init__(self, sequence_length=2048, fasta_path=None, **kwargs):
10
+ super().__init__(**kwargs)
11
+ self._sequence_length = sequence_length
12
+ self._fasta_path = fasta_path
13
+
14
+ @property
15
+ def sequence_length(self):
16
+ return self._sequence_length
17
+
18
+ @property
19
+ def fasta_path(self):
20
+ return self._fasta_path
21
+
22
+ class VepOmimSplit(GeneratorBasedBuilder):
23
+ BUILDER_CONFIG_CLASS = VepOmimConfig
24
+ BUILDER_CONFIGS = [
25
+ VepOmimConfig(name="default", sequence_length=2048, fasta_path=None)
26
+ ]
27
+ DEFAULT_CONFIG_NAME = "default"
28
+
29
+ def _info(self):
30
+ return DatasetInfo(
31
+ features=Features({
32
+ "ref_forward_sequence": Value("string"),
33
+ "alt_forward_sequence": Value("string"),
34
+ "label": Value("int32"),
35
+ "chromosome": Value("string"),
36
+ "position": Value("int32"),
37
+ "ref": Value("string"),
38
+ "alt": Value("string"),
39
+ "consequence": Value("string"),
40
+ })
41
+ )
42
+
43
+ def _split_generators(self, dl_manager):
44
+ data_files = {
45
+ "omim": "omim.csv",
46
+ }
47
+ downloaded_files = dl_manager.download(data_files)
48
+ return [
49
+ SplitGenerator(name="omim", gen_kwargs={"filepath": downloaded_files["omim"]}), # type: ignore
50
+ ]
51
+
52
+ def _load_fasta_sequences(self, fasta_path: str) -> Dict[str, str]:
53
+ """加载 fasta 序列到内存字典,支持 gzip 压缩"""
54
+ sequences = {}
55
+ if fasta_path.endswith('.gz'):
56
+ with gzip.open(fasta_path, 'rt') as f:
57
+ for record in SeqIO.parse(f, 'fasta'):
58
+ sequences[record.id] = str(record.seq)
59
+ else:
60
+ with open(fasta_path, 'r') as f:
61
+ for record in SeqIO.parse(f, 'fasta'):
62
+ sequences[record.id] = str(record.seq)
63
+ return sequences
64
+
65
+ def _generate_examples(self, filepath: str):
66
+ df = pd.read_csv(filepath)
67
+ config: VepOmimConfig = self.config # type: ignore
68
+ seq_len = config.sequence_length
69
+ fasta_path = config.fasta_path
70
+ if fasta_path is None:
71
+ raise ValueError("You must provide fasta_path when loading the dataset!")
72
+
73
+ # 加载所有序列到内存
74
+ sequences = self._load_fasta_sequences(fasta_path)
75
+
76
+ for idx, row in df.iterrows():
77
+ chrom = str(row['chromosome'])
78
+ if not chrom.startswith('chr'):
79
+ chrom = 'chr' + chrom
80
+
81
+ if chrom not in sequences:
82
+ raise ValueError(f"Chromosome {chrom} not found in fasta. Available: {list(sequences.keys())[:5]}...")
83
+
84
+ pos = int(row['position'])
85
+ ref = str(row['ref'])
86
+ alt = str(row['alt'])
87
+ half = seq_len // 2
88
+ start = max(0, pos - half - 1) # 0-based indexing
89
+ end = pos + half - 1
90
+
91
+ seq = sequences[chrom][start:end]
92
+ seq_list = list(seq)
93
+ center_idx = half
94
+ ref_seq = seq_list.copy()
95
+ ref_seq[center_idx] = ref
96
+ ref_seq = ''.join(ref_seq)
97
+ alt_seq = seq_list.copy()
98
+ alt_seq[center_idx] = alt
99
+ alt_seq = ''.join(alt_seq)
100
+ yield idx, {
101
+ "ref_forward_sequence": ref_seq,
102
+ "alt_forward_sequence": alt_seq,
103
+ "label": int(row["label"]),
104
+ "chromosome": str(row["chromosome"]),
105
+ "position": int(row["position"]),
106
+ "consequence": row["consequence"],
107
+ "ref": row["ref"],
108
+ "alt": row["alt"]
109
+ }