Datasets:

Modalities:
Image
DOI:
License:
smanepal commited on
Commit
24abb30
·
1 Parent(s): f5e11ff
Pipfile DELETED
@@ -1,11 +0,0 @@
1
- [[source]]
2
- url = "https://pypi.org/simple"
3
- verify_ssl = true
4
- name = "pypi"
5
-
6
- [packages]
7
-
8
- [dev-packages]
9
-
10
- [requires]
11
- python_version = "3.9"
 
 
 
 
 
 
 
 
 
 
 
 
check_json.py DELETED
@@ -1,18 +0,0 @@
1
- import json
2
-
3
- with open("dataset_infos.json") as f:
4
- root = json.load(f)
5
-
6
- # Should be a dict with exactly one key
7
- assert isinstance(root, dict), "Root must be a dict"
8
- assert len(root) == 1, "Root must contain exactly one config"
9
- config_name, info = next(iter(root.items()))
10
-
11
- # Info must itself be a dict
12
- assert isinstance(info, dict), f"Value for config '{config_name}' is not a dict"
13
-
14
- # It must have all required keys
15
- for key in ("features", "splits", "dataset_size"):
16
- assert key in info, f"Missing '{key}' in config '{config_name}'"
17
-
18
- print("✅ dataset_infos.json structure looks good under config:", config_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json CHANGED
@@ -1,42 +1,29 @@
1
  {
2
  "default": {
 
 
 
 
 
3
  "features": {
4
  "cluster": {
5
- "dtype": "int64",
6
- "_type": "Value"
7
  },
8
  "number": {
9
- "dtype": "int64",
10
- "_type": "Value"
11
  },
12
  "hfm": {
13
- "feature": {
14
- "dtype": "float32",
15
- "_type": "Value"
16
- },
17
- "_type": "Sequence"
18
  },
19
  "imagenet": {
20
- "feature": {
21
- "dtype": "float32",
22
- "_type": "Value"
23
- },
24
- "_type": "Sequence"
25
  },
26
  "cp": {
27
- "feature": {
28
- "dtype": "float64",
29
- "_type": "Value"
30
- },
31
- "_type": "Sequence"
32
- }
33
- },
34
- "splits": {
35
- "train": {
36
- "num_examples": 9000,
37
- "num_bytes": 57378375
38
  }
39
  },
40
- "dataset_size": 57378375
 
 
41
  }
42
  }
 
1
  {
2
  "default": {
3
+ "splits": {
4
+ "train": {
5
+ "num_examples": 9000
6
+ }
7
+ },
8
  "features": {
9
  "cluster": {
10
+ "dtype": "int64"
 
11
  },
12
  "number": {
13
+ "dtype": "int64"
 
14
  },
15
  "hfm": {
16
+ "dtype": "list<element: float>"
 
 
 
 
17
  },
18
  "imagenet": {
19
+ "dtype": "list<element: float>"
 
 
 
 
20
  },
21
  "cp": {
22
+ "dtype": "list<element: double>"
 
 
 
 
 
 
 
 
 
 
23
  }
24
  },
25
+ "supervised_keys": null,
26
+ "download_size": null,
27
+ "dataset_size": null
28
  }
29
  }
embeddings_pcs_shape_sample10.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d1e2828016240512ae5c9fece010a389c94d2e0566cbbbd747d007855b40f9b
3
- size 85306
 
 
 
 
generate_info.py DELETED
@@ -1,132 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- generate_info.py
4
-
5
- Scan all .parquet files in a given directory for schema & metadata,
6
- and write a valid Hugging Face `dataset_infos.json` with a top-level config name.
7
- """
8
-
9
- import glob
10
- import os
11
- import json
12
- import argparse
13
- import sys
14
-
15
- # Try using pyarrow for fast schema inspection & list detection
16
- USE_PYARROW = False
17
- try:
18
- import pyarrow.parquet as pq
19
- import pyarrow as pa
20
- USE_PYARROW = True
21
- except ImportError:
22
- import pandas as pd
23
-
24
- # Primitive type mapping (map Arrow string repr → HF dtype)
25
- PRIMITIVE_MAP = {
26
- "int64": "int64",
27
- "int32": "int32",
28
- "float64": "float32", # HF uses float32
29
- "double": "float32",
30
- "float32": "float32",
31
- "string": "string",
32
- "binary": "binary",
33
- }
34
-
35
- def inspect_parquet(path):
36
- """
37
- Return (features_dict, num_rows, num_bytes) for a single Parquet file.
38
- Detects primitive and list types via pyarrow if available.
39
- """
40
- if USE_PYARROW:
41
- pf = pq.ParquetFile(path)
42
- schema = pf.schema_arrow
43
- feats = {}
44
- for field in schema:
45
- name = field.name
46
- dtype = field.type
47
- dtype_str = str(dtype)
48
- if pa.types.is_list(dtype):
49
- # List-of-primitive case
50
- elem_str = str(dtype.value_type)
51
- mapped = PRIMITIVE_MAP.get(elem_str, elem_str)
52
- feats[name] = {
53
- "_type": "Sequence",
54
- "feature": {"dtype": mapped},
55
- "length": -1
56
- }
57
- else:
58
- # Primitive case
59
- mapped = PRIMITIVE_MAP.get(dtype_str, dtype_str)
60
- feats[name] = {"dtype": mapped}
61
- num_rows = pf.metadata.num_rows
62
- else:
63
- # Fallback: load full table with pandas (no list detection)
64
- df = pd.read_parquet(path)
65
- feats = {
66
- col: {"dtype": PRIMITIVE_MAP.get(str(dt), str(dt))}
67
- for col, dt in df.dtypes.items()
68
- }
69
- num_rows = len(df)
70
-
71
- size_bytes = os.path.getsize(path)
72
- return feats, num_rows, size_bytes
73
-
74
- def main():
75
- parser = argparse.ArgumentParser(
76
- description="Generate HF-style dataset_infos.json from Parquet files"
77
- )
78
- parser.add_argument(
79
- "-d", "--parquet-dir",
80
- default=".",
81
- help="Directory containing .parquet files"
82
- )
83
- parser.add_argument(
84
- "-p", "--pattern",
85
- default="*.parquet",
86
- help="Glob pattern to match Parquet files"
87
- )
88
- parser.add_argument(
89
- "-o", "--output",
90
- default="dataset_infos.json",
91
- help="Output JSON filename"
92
- )
93
- args = parser.parse_args()
94
-
95
- pattern = os.path.join(args.parquet_dir, args.pattern)
96
- files = sorted(glob.glob(pattern))
97
- if not files:
98
- sys.stderr.write(f"No files found matching: {pattern}\n")
99
- sys.exit(1)
100
-
101
- # Extract schema & row count from first file
102
- features, row_count, _ = inspect_parquet(files[0])
103
- if not features:
104
- sys.stderr.write("No features detected—check your schema!\n")
105
- sys.exit(1)
106
-
107
- # Sum byte sizes across all files
108
- total_bytes = sum(inspect_parquet(f)[2] for f in files)
109
-
110
- # Build the dataset info under the "default" config
111
- dataset_infos = {
112
- "default": {
113
- "features": features,
114
- "splits": {
115
- "train": {
116
- "num_examples": row_count,
117
- "num_bytes": total_bytes
118
- }
119
- },
120
- "dataset_size": total_bytes
121
- }
122
- }
123
-
124
- # Write to disk
125
- with open(args.output, "w") as fp:
126
- json.dump(dataset_infos, fp, indent=2)
127
-
128
- print(f"Wrote {args.output} ({len(files)} files, {total_bytes} bytes):")
129
- print(json.dumps(dataset_infos, indent=2))
130
-
131
- if __name__ == "__main__":
132
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
generate_manually.py DELETED
@@ -1,54 +0,0 @@
1
- #!/usr/bin/env python3
2
- import json
3
- import os
4
- from datasets import load_dataset
5
-
6
- # --- CONFIGURATION: list your local Parquet files here ---
7
- data_files = {
8
- "train": [
9
- "embeddings_pcs_shape.parquet",
10
- "embeddings_pcs_texture.parquet",
11
- "embeddings_pcs_width.parquet",
12
- ]
13
- }
14
-
15
- # 1. Load the Parquets as a single-train-split Dataset
16
- ds = load_dataset(
17
- "parquet",
18
- data_files=data_files,
19
- split="train"
20
- )
21
-
22
- # 2. Extract the metadata from ds.info
23
- info = ds.info
24
-
25
- # Features: convert to plain dict
26
- features_dict = info.features.to_dict()
27
-
28
- # Splits: collect num_examples & num_bytes
29
- splits_dict = {
30
- split_name: {
31
- "num_examples": split_info.num_examples,
32
- "num_bytes": split_info.num_bytes,
33
- }
34
- for split_name, split_info in info.splits.items()
35
- }
36
-
37
- # Dataset size
38
- dataset_size = info.dataset_size
39
-
40
- # 3. Wrap under "default" config
41
- final = {
42
- "default": {
43
- "features": features_dict,
44
- "splits": splits_dict,
45
- "dataset_size": dataset_size,
46
- }
47
- }
48
-
49
- # 4. Write out the JSON
50
- with open("dataset_infos.json", "w") as f:
51
- json.dump(final, f, indent=2)
52
-
53
- print("✅ Wrote dataset_infos.json:")
54
- print(json.dumps(final, indent=2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
hf-test DELETED
@@ -1 +0,0 @@
1
- Subproject commit 34e7b813658391d045af7f1a9b17e13343444a18
 
 
sample_parquets/embeddings_pcs_texture_sample10.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ecc6faad45bda889882b081ae6cfe7712e55a6caefd61af55ddb294ca8d6501
3
- size 85360
 
 
 
 
sample_parquets/embeddings_pcs_width_sample10.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3cf1a5749fa3e851c828f37458c800cb6219d162c6570fd17a1a81650c4bdd8
3
- size 85533
 
 
 
 
split_parquet.py DELETED
@@ -1,55 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- split_parquet.py
4
-
5
- Creates a smaller sample of existing Parquet files by taking the first N rows
6
- from each and writing them to new files with a `_sample` suffix.
7
- """
8
-
9
- import pandas as pd
10
- import glob
11
- import os
12
- import argparse
13
-
14
- def split_parquet(input_dir, pattern, output_dir, nrows):
15
- os.makedirs(output_dir, exist_ok=True)
16
- files = glob.glob(os.path.join(input_dir, pattern))
17
- if not files:
18
- print(f"No files found matching {pattern} in {input_dir}")
19
- return
20
-
21
- for path in files:
22
- df = pd.read_parquet(path, engine="pyarrow")
23
- sample = df.head(nrows)
24
- base = os.path.basename(path)
25
- out_name = base.replace(".parquet", f"_sample{nrows}.parquet")
26
- out_path = os.path.join(output_dir, out_name)
27
- sample.to_parquet(out_path, index=False, engine="pyarrow")
28
- print(f"Wrote {nrows} rows to {out_path}")
29
-
30
- if __name__ == "__main__":
31
- parser = argparse.ArgumentParser(description="Split Parquet into small samples")
32
- parser.add_argument(
33
- "-i", "--input-dir",
34
- default=".",
35
- help="Directory with original Parquet files"
36
- )
37
- parser.add_argument(
38
- "-p", "--pattern",
39
- default="*.parquet",
40
- help="Glob pattern for original Parquet files"
41
- )
42
- parser.add_argument(
43
- "-o", "--output-dir",
44
- default="sample_parquets",
45
- help="Directory for sample files"
46
- )
47
- parser.add_argument(
48
- "-n", "--nrows",
49
- type=int,
50
- default=10,
51
- help="Number of rows per sample file"
52
- )
53
- args = parser.parse_args()
54
-
55
- split_parquet(args.input_dir, args.pattern, args.output_dir, args.nrows)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test.py DELETED
@@ -1,6 +0,0 @@
1
- from datasets import load_dataset
2
- ds = load_dataset(
3
- "Deepcell/parametric-cell-shapes",
4
- download_mode="force_redownload" # ensure no cache is used
5
- )
6
- print(ds)