Hani Park commited on
Commit
666b630
·
1 Parent(s): 19b5a18

Fix README and upload script for v1.2.0

Browse files
Files changed (2) hide show
  1. README.md +5 -32
  2. prepare_hf_upload.py +2 -17
README.md CHANGED
@@ -4,7 +4,7 @@ language:
4
  - en
5
  tags:
6
  - bioassay
7
- pretty_name: CHAFF
8
  size_categories:
9
  - 100K<n<1M
10
  dataset_info:
@@ -12,43 +12,16 @@ dataset_info:
12
  features:
13
  - name: Type
14
  dtype: string
 
 
15
  - name: AID
16
  dtype: int64
17
- - name: CID
18
- dtype: int64
19
- - name: SMILES
20
  dtype: string
21
- splits:
22
- - name: train
23
- num_bytes: 5834311
24
- num_examples: 69777
25
- download_size: 1913364
26
- dataset_size: 5834311
27
- - config_name: ChAFF
28
- features:
29
- - name: Type
30
  dtype: string
31
- - name: AID
32
- dtype: int64
33
- - name: CID
34
- dtype: int64
35
  - name: SMILES
36
  dtype: string
37
- splits:
38
- - name: train
39
- num_bytes: 5970580
40
- num_examples: 71400
41
- download_size: 1963227
42
- dataset_size: 5970580
43
- configs:
44
- - config_name: CHAFF
45
- data_files:
46
- - split: train
47
- path: CHAFF/train-*
48
- - config_name: ChAFF
49
- data_files:
50
- - split: train
51
- path: ChAFF/train-*
52
  ---
53
 
54
  # ChAFF datasets
 
4
  - en
5
  tags:
6
  - bioassay
7
+ pretty_name: ChAFF
8
  size_categories:
9
  - 100K<n<1M
10
  dataset_info:
 
12
  features:
13
  - name: Type
14
  dtype: string
15
+ - name: DatasetName
16
+ dtype: string
17
  - name: AID
18
  dtype: int64
19
+ - name: ID
 
 
20
  dtype: string
21
+ - name: IDType
 
 
 
 
 
 
 
 
22
  dtype: string
 
 
 
 
23
  - name: SMILES
24
  dtype: string
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  ---
26
 
27
  # ChAFF datasets
prepare_hf_upload.py CHANGED
@@ -2,8 +2,8 @@ import os
2
  import shutil
3
 
4
  # Path
5
- csv_source_folder = "/home/HuggingFaceFinal"
6
- hf_upload_folder = "/home/HuggingFaceUpload"
7
  data_folder = os.path.join(hf_upload_folder, "data")
8
 
9
  os.makedirs(data_folder, exist_ok=True)
@@ -18,18 +18,3 @@ for file in os.listdir(csv_source_folder):
18
  csv_files.append(file)
19
 
20
  print(f"Number of copied CSV files: {len(csv_files)}")
21
-
22
-
23
- yaml_path = os.path.join(hf_upload_folder, "dataset.yaml")
24
- with open(yaml_path, "w") as f:
25
- f.write("dataset_info:\n")
26
- f.write(" features:\n")
27
- for col in ["Type", "DatasetName", "AID", "ID", "IDType", "SMILES"]:
28
- f.write(f" - name: {col}\n")
29
- f.write(" dtype: string\n")
30
- f.write(" splits:\n")
31
- for fname in csv_files:
32
- split_name = os.path.splitext(fname)[0]
33
- f.write(f" - name: {split_name}\n")
34
-
35
- print("dataset.yaml created")
 
2
  import shutil
3
 
4
  # Path
5
+ csv_source_folder = "./HuggingFaceFinal"
6
+ hf_upload_folder = "./HuggingFaceUpload"
7
  data_folder = os.path.join(hf_upload_folder, "data")
8
 
9
  os.makedirs(data_folder, exist_ok=True)
 
18
  csv_files.append(file)
19
 
20
  print(f"Number of copied CSV files: {len(csv_files)}")