haneulpark commited on
Commit
f9d1f4f
·
verified ·
1 Parent(s): 2cc3d16

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -10
README.md CHANGED
@@ -140,7 +140,7 @@ then, from within python load the datasets library
140
 
141
  and load one of the `AttentiveSkin` datasets, e.g.,
142
 
143
- >>> AttentiveSkin = datasets.load_dataset("maomlab/AttentiveSkin", name = "AttentiveSkin")
144
  Downloading readme: 100%|██████████| 6.40/6.40 [00:00<00:00, 11.7kkB/s]
145
  Downloading data: 100%|██████████| 468/468 [00:00<00:00, 4.88MkB/s]
146
  Downloading data: 100%|██████████| 1.41k/1.41k [00:00<00:00, 11.9MkB/s]
@@ -149,18 +149,18 @@ and load one of the `AttentiveSkin` datasets, e.g.,
149
 
150
  and inspecting the loaded dataset
151
 
152
- >>> AttentiveSkin
153
- AttentiveSkin
154
  DatasetDict({
155
- test: Dataset({
156
- features: ['Name', 'Synonym', 'CAS RN', 'GHS', 'Detailed Page', 'Evidence', 'OECD TG 404', 'Data Source', 'Frequency', 'SMILES', 'SMILES URL', 'SMILES Source', 'Canonical SMILES', 'Split', 'ClusterNo', 'MolCount', 'group'],
157
- num_rows: 803
158
  })
159
- train: Dataset({
160
- features: ['Name', 'Synonym', 'CAS RN', 'GHS', 'Detailed Page', 'Evidence', 'OECD TG 404', 'Data Source', 'Frequency', 'SMILES', 'SMILES URL', 'SMILES Source', 'Canonical SMILES', 'Split', 'ClusterNo', 'MolCount', 'group'],
161
- num_rows: 2416
162
  })
163
- })
 
164
 
165
 
166
  ### Use a dataset to train a model
 
140
 
141
  and load one of the `AttentiveSkin` datasets, e.g.,
142
 
143
+ >>> Corr_Neg = datasets.load_dataset("maomlab/AttentiveSkin", name = 'Corr_Neg')
144
  Downloading readme: 100%|██████████| 6.40/6.40 [00:00<00:00, 11.7kkB/s]
145
  Downloading data: 100%|██████████| 468/468 [00:00<00:00, 4.88MkB/s]
146
  Downloading data: 100%|██████████| 1.41k/1.41k [00:00<00:00, 11.9MkB/s]
 
149
 
150
  and inspecting the loaded dataset
151
 
152
+ >>> Corr_Neg
 
153
  DatasetDict({
154
+ test: Dataset({
155
+ features: ['Name', 'Synonym', 'CAS RN', 'GHS', 'Detailed Page', 'Evidence', 'OECD TG 404', 'Data Source', 'Frequency', 'SMILES', 'SMILES URL', 'SMILES Source', 'Canonical SMILES', 'Split'],
156
+ num_rows: 181
157
  })
158
+ train: Dataset({
159
+ features: ['Name', 'Synonym', 'CAS RN', 'GHS', 'Detailed Page', 'Evidence', 'OECD TG 404', 'Data Source', 'Frequency', 'SMILES', 'SMILES URL', 'SMILES Source', 'Canonical SMILES', 'Split'],
160
+ num_rows: 1755
161
  })
162
+ })
163
+
164
 
165
 
166
  ### Use a dataset to train a model