OtoroLin commited on
Commit
5002088
·
1 Parent(s): b3d8a4b

Enable dataset download, not yet finishing the nesting enumerating of folders

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. .gitignore +2 -0
  3. HyperForensics-plus-plus.py +57 -31
  4. data.tar.gz +2 -2
  5. download_testing.py +0 -6
  6. zipping.sh +0 -12
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ data.tar.gz filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ download_testing.py
2
+ zipping.sh
HyperForensics-plus-plus.py CHANGED
@@ -15,6 +15,8 @@
15
  import csv
16
  import json
17
  import os
 
 
18
 
19
  import datasets
20
 
@@ -64,12 +66,12 @@ class HyperForensicsPlusPlus(datasets.GeneratorBasedBuilder):
64
  def _info(self):
65
  features = datasets.Features(
66
  {
67
- "origin": datasets.Image(), # The original HSI
68
  "label": datasets.Value("string"), # The label of the image
69
- "forgery": datasets.Image(), # The HSI after forgery
70
  "method": datasets.Value("string"), # The forgery method used
71
  # The bounding box of the forgery area, in the format [x1, x2, y1, y2, z1, y2]
72
- "bbox": datasets.Sequence(feature=datasets.Value(dtype='int8'), length=6)
73
  }
74
  )
75
 
@@ -95,12 +97,14 @@ class HyperForensicsPlusPlus(datasets.GeneratorBasedBuilder):
95
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
96
  urls = _URL
97
  data_dir = dl_manager.download_and_extract(urls)
 
 
98
  return [
99
  datasets.SplitGenerator(
100
  name=datasets.Split.TRAIN,
101
  # These kwargs will be passed to _generate_examples
102
  gen_kwargs={
103
- "filepath": os.path.join(data_dir, "metadata.jsonl"),
104
  "split": "train",
105
  },
106
  ),
@@ -108,36 +112,58 @@ class HyperForensicsPlusPlus(datasets.GeneratorBasedBuilder):
108
  name=datasets.Split.VALIDATION,
109
  # These kwargs will be passed to _generate_examples
110
  gen_kwargs={
111
- "filepath": os.path.join(data_dir, "metadata.jsonl"),
112
- "split": "val",
113
- },
114
- ),
115
- datasets.SplitGenerator(
116
- name=datasets.Split.TEST,
117
- # These kwargs will be passed to _generate_examples
118
- gen_kwargs={
119
- "filepath": os.path.join(data_dir, "metadata.jsonl"),
120
- "split": "test"
121
  },
122
  ),
 
 
 
 
 
 
 
 
123
  ]
 
 
 
 
 
 
 
 
124
 
125
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
126
  def _generate_examples(self, filepath, split):
127
-
128
- with open(filepath, encoding="utf-8") as f:
129
- for key, row in enumerate(f):
130
- data = json.loads(row)
131
- if self.config.name == "first_domain":
132
- # Yields examples as (key, example) tuples
133
- yield key, {
134
- "sentence": data["sentence"],
135
- "option1": data["option1"],
136
- "answer": "" if split == "test" else data["answer"],
137
- }
138
- else:
139
- yield key, {
140
- "sentence": data["sentence"],
141
- "option2": data["option2"],
142
- "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
143
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  import csv
16
  import json
17
  import os
18
+ import numpy as np
19
+ import tifffile as tiff # Install with `pip install tifffile`
20
 
21
  import datasets
22
 
 
66
  def _info(self):
67
  features = datasets.Features(
68
  {
69
+ "origin": datasets.Array3D(dtype="int16", shape=(256, 256, 172)), # The original HSI
70
  "label": datasets.Value("string"), # The label of the image
71
+ "forgery": datasets.Array3D(dtype="int16", shape=(256, 256, 172)), # The HSI after forgery
72
  "method": datasets.Value("string"), # The forgery method used
73
  # The bounding box of the forgery area, in the format [x1, x2, y1, y2, z1, y2]
74
+ "bbox": datasets.Sequence(feature=datasets.Value(dtype='int16'), length=6)
75
  }
76
  )
77
 
 
97
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
98
  urls = _URL
99
  data_dir = dl_manager.download_and_extract(urls)
100
+
101
+
102
  return [
103
  datasets.SplitGenerator(
104
  name=datasets.Split.TRAIN,
105
  # These kwargs will be passed to _generate_examples
106
  gen_kwargs={
107
+ "filepath": data_dir,
108
  "split": "train",
109
  },
110
  ),
 
112
  name=datasets.Split.VALIDATION,
113
  # These kwargs will be passed to _generate_examples
114
  gen_kwargs={
115
+ "filepath": data_dir,
116
+ "split": "validation",
 
 
 
 
 
 
 
 
117
  },
118
  ),
119
+ #datasets.SplitGenerator(
120
+ # name=datasets.Split.TEST,
121
+ # # These kwargs will be passed to _generate_examples
122
+ # gen_kwargs={
123
+ # "filepath": data_dir,
124
+ # "split": "testing"
125
+ # },
126
+ #),
127
  ]
128
+
129
+ def _load_npy_as_image(self, npy_path):
130
+ """
131
+ Load a .npy file and convert it to a PIL Image for datasets.Image(). Not using in current scope.
132
+ """
133
+ array = np.load(npy_path) # Load the .npy file as a NumPy array
134
+ image = tiff.imread(npy_path) # Convert to a PIL Image
135
+ return image
136
 
137
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
138
  def _generate_examples(self, filepath, split):
139
+ filepath = os.path.join(filepath, "data_testing")
140
+ with open(os.path.join(filepath, "metadata.jsonl"), encoding="utf-8") as f:
141
+ metadata = json.load(f) # Load the nested JSON object (train, validation, testing)
142
+
143
+ # Select the appropriate split (train, validation, or testing)
144
+ records = metadata[split]
145
+
146
+ for key, record in enumerate(records):
147
+ file_prefix = record["file_prefix"]
148
+ label = record["label"]
149
+ bbox = record["bbox"]
150
+
151
+ # Construct paths for the origin and forgery files
152
+ origin_path = os.path.join(
153
+ filepath, "ADMM-ADAM", "config0", f"{file_prefix}_inpaint_result(0).npy"
154
+ )
155
+ forgery_path = os.path.join(
156
+ filepath, "ADMM-ADAM", "config0", f"{file_prefix}_inpaint_result(0).npy"
157
+ )
158
+
159
+ # Load the .npy files as images
160
+ origin_image = np.load(origin_path) #np.load(origin_path)
161
+ forgery_image = np.load(forgery_path) #np.load(forgery_path)
162
+ # Yield the example
163
+ yield key, {
164
+ "origin": origin_image,
165
+ "label": label,
166
+ "forgery": forgery_image,
167
+ "method": "ADMM-ADAM", # Hardcoded for now; can be dynamic if needed
168
+ "bbox": bbox,
169
+ }
data.tar.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50c1387476e53b21681a90c897e45d6c404995b9cc21e2d2e4f468c10068884c
3
- size 4638814869
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:313ddddbb44d0946609a73c368eee0216919ba55c2229f5def9a754ae0c63a67
3
+ size 4651559462
download_testing.py DELETED
@@ -1,6 +0,0 @@
1
- import datasets
2
-
3
- dl_manager = datasets.DownloadManager()
4
- data_dir = dl_manager.download_and_extract("https://huggingface.co/datasets/OtoroLin/HyperForensics-plus-plus/resolve/main/data.tar.gz")
5
-
6
- print(data_dir)
 
 
 
 
 
 
 
zipping.sh DELETED
@@ -1,12 +0,0 @@
1
- #!/bin/bash
2
-
3
- # Define the folder to zip and the output zip file
4
- FOLDER_TO_ZIP="../data_testing"
5
- OUTPUT_TAR="./data.tar.gz"
6
-
7
- # Create the zip file
8
- echo "Zipping folder: $FOLDER_TO_ZIP"
9
- tar -czvf "$OUTPUT_TAR" "$FOLDER_TO_ZIP"
10
-
11
- # Confirm completion
12
- echo "Folder zipped successfully to: $OUTPUT_TAR"