FaVOSsubmission commited on
Commit
731dbf8
·
verified ·
1 Parent(s): d3ef029

Upload folder using huggingface_hub

Browse files
Files changed (7) hide show
  1. Annotations.zip +3 -0
  2. JPEGImages.zip +3 -0
  3. README.md +49 -3
  4. eval_jnf_v.py +391 -0
  5. favos-20.txt +100 -0
  6. favos-40.txt +100 -0
  7. requirements.txt +5 -0
Annotations.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e409046ae90cf324b4d9d801ed2d2b9d377b1fb76472099cb1d5628af166076
3
+ size 81620534
JPEGImages.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56077e69e1b0c27c3d5439732b69ce5e9fe8cd801c04fe40cc8cdc1ed7384171
3
+ size 5018261297
README.md CHANGED
@@ -1,3 +1,49 @@
1
- ---
2
- license: cc-by-nc-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FaVOS
2
+
3
+ ## Download
4
+
5
+ ```bash
6
+ huggingface-cli download FaVOSsubmission/FaVOS \
7
+ --repo-type dataset \
8
+ --include "JPEGImages.zip" \
9
+ --include "Annotations.zip" \
10
+ --include "favos-20.txt" \
11
+ --include "favos-40.txt" \
12
+ --local-dir data
13
+
14
+ unzip -q data/JPEGImages.zip -d data
15
+ unzip -q data/Annotations.zip -d data
16
+ ```
17
+
18
+ ## J&F_v Evaluation
19
+
20
+ Assume the dataset is downloaded under `FaVOS/data/`:
21
+
22
+ ```text
23
+ FaVOS/data/
24
+ JPEGImages/
25
+ Annotations/
26
+ favos-20.txt
27
+ favos-40.txt
28
+ ```
29
+
30
+ Install dependencies:
31
+
32
+ ```bash
33
+ pip install -r requirements.txt
34
+ ```
35
+
36
+ Evaluate predictions:
37
+
38
+ ```bash
39
+ python eval_jnf_v.py \
40
+ --pred-root /path/to/predictions \
41
+ --output-dir results/jf_v
42
+ ```
43
+
44
+ Prediction masks should be indexed PNG files in one of these layouts:
45
+
46
+ ```text
47
+ /path/to/predictions/Annotations/<video_id>/<frame>.png
48
+ /path/to/predictions/<video_id>/<frame>.png
49
+ ```
eval_jnf_v.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import csv
6
+ import json
7
+ import os
8
+ from concurrent.futures import ProcessPoolExecutor, as_completed
9
+ from dataclasses import asdict, dataclass
10
+ from pathlib import Path
11
+
12
+ import numpy as np
13
+ from PIL import Image
14
+ from scipy.ndimage import distance_transform_edt
15
+ from tqdm.auto import tqdm
16
+
17
+
18
+ DATASET_ROOT = Path("data")
19
+ SPLITS = ("favos-20", "favos-40")
20
+ OUTPUT_DIR = Path("results/jf_v")
21
+ BOUND_TH = 0.008
22
+ WORKERS = min(8, os.cpu_count() or 1)
23
+
24
+
25
+ @dataclass(frozen=True)
26
+ class JNFVResult:
27
+ j_v: float
28
+ f_v: float
29
+ jnf_v: float
30
+ precision_v: float
31
+ recall_v: float
32
+ intersection_volume: int
33
+ union_volume: int
34
+ pred_boundary_voxels: int
35
+ gt_boundary_voxels: int
36
+
37
+
38
+ @dataclass(frozen=True)
39
+ class ObjectResult:
40
+ split: str
41
+ video_id: str
42
+ object_id: int
43
+ num_frames: int
44
+ j_v: float
45
+ f_v: float
46
+ jnf_v: float
47
+ precision_v: float
48
+ recall_v: float
49
+ intersection_volume: int
50
+ union_volume: int
51
+ pred_boundary_voxels: int
52
+ gt_boundary_voxels: int
53
+
54
+
55
+ @dataclass(frozen=True)
56
+ class VideoResult:
57
+ split: str
58
+ video_id: str
59
+ num_objects: int
60
+ num_frames: int
61
+ j_v: float
62
+ f_v: float
63
+ jnf_v: float
64
+
65
+
66
+ def parse_args() -> argparse.Namespace:
67
+ parser = argparse.ArgumentParser(
68
+ description=(
69
+ "Evaluate J&F_v on FaVOS indexed-PNG predictions. "
70
+ "The dataset is expected under data/."
71
+ )
72
+ )
73
+ parser.add_argument(
74
+ "--pred-root",
75
+ type=Path,
76
+ required=True,
77
+ help="Prediction root. Accepts either <root>/Annotations/<video_id> or <root>/<video_id>.",
78
+ )
79
+ parser.add_argument("--output-dir", type=Path, default=OUTPUT_DIR)
80
+ return parser.parse_args()
81
+
82
+
83
+ def numeric_pngs(path: Path) -> list[Path]:
84
+ return sorted(
85
+ [p for p in path.glob("*.png") if p.is_file() and not p.name.startswith("._")],
86
+ key=lambda p: int(p.stem),
87
+ )
88
+
89
+
90
+ def load_label(path: Path) -> np.ndarray:
91
+ with Image.open(path) as image:
92
+ return np.asarray(image, dtype=np.uint8)
93
+
94
+
95
+ def read_split(dataset_root: Path, split: str) -> list[str]:
96
+ path = dataset_root / f"{split}.txt"
97
+ if not path.is_file():
98
+ raise FileNotFoundError(f"Missing split file: {path}")
99
+ ids = [line.strip() for line in path.read_text(encoding="utf-8").splitlines()]
100
+ return list(dict.fromkeys(video_id for video_id in ids if video_id and not video_id.startswith("#")))
101
+
102
+
103
+ def resolve_pred_root(pred_root: Path) -> Path:
104
+ annotations = pred_root / "Annotations"
105
+ return annotations if annotations.is_dir() else pred_root
106
+
107
+
108
+ def load_video(gt_dir: Path, pred_dir: Path) -> tuple[np.ndarray, np.ndarray]:
109
+ gt_paths = numeric_pngs(gt_dir)
110
+ pred_by_name = {p.name: p for p in numeric_pngs(pred_dir)}
111
+ if not gt_paths:
112
+ raise FileNotFoundError(f"No GT masks found in {gt_dir}")
113
+
114
+ missing = [p.name for p in gt_paths if p.name not in pred_by_name]
115
+ if missing:
116
+ raise FileNotFoundError(f"Missing {len(missing)} prediction masks in {pred_dir}: {missing[:5]}")
117
+
118
+ gt_paths = gt_paths[1:]
119
+ if not gt_paths:
120
+ raise ValueError(f"No evaluation frames found in {gt_dir}")
121
+
122
+ gt = np.stack([load_label(path) for path in gt_paths], axis=0)
123
+ pred = np.stack([load_label(pred_by_name[path.name]) for path in gt_paths], axis=0)
124
+ if gt.shape != pred.shape:
125
+ raise ValueError(f"Shape mismatch for {gt_dir.name}: GT {gt.shape}, pred {pred.shape}")
126
+ return gt, pred
127
+
128
+
129
+ def as_bool_volume(volume: np.ndarray) -> np.ndarray:
130
+ if volume.ndim != 3:
131
+ raise ValueError(f"Expected (T,H,W), got {volume.shape}")
132
+ return volume.astype(bool)
133
+
134
+
135
+ def mean_visible_area(volume: np.ndarray) -> float:
136
+ areas = np.sum(volume, axis=(1, 2), dtype=np.int64)
137
+ visible = areas[areas > 0]
138
+ return float(np.mean(visible)) if visible.size else 0.0
139
+
140
+
141
+ def spatial_radius(shape_2d: tuple[int, int], object_area: float, bound_th: float) -> int:
142
+ radius = bound_th if bound_th >= 1 else bound_th * np.linalg.norm(shape_2d)
143
+ if object_area > 0:
144
+ radius = min(radius, 0.1 * np.sqrt(object_area))
145
+ return max(int(np.ceil(radius)), 0)
146
+
147
+
148
+ def boundary_volume(volume: np.ndarray) -> np.ndarray:
149
+ volume = volume.astype(bool)
150
+ boundary = np.zeros_like(volume, dtype=bool)
151
+ for dt in (0, 1):
152
+ for dy in (0, 1):
153
+ for dx in (0, 1):
154
+ if dt == 0 and dy == 0 and dx == 0:
155
+ continue
156
+ src = (
157
+ slice(0, volume.shape[0] - dt),
158
+ slice(0, volume.shape[1] - dy),
159
+ slice(0, volume.shape[2] - dx),
160
+ )
161
+ dst = (
162
+ slice(dt, volume.shape[0]),
163
+ slice(dy, volume.shape[1]),
164
+ slice(dx, volume.shape[2]),
165
+ )
166
+ boundary[src] |= volume[src] ^ volume[dst]
167
+ return boundary
168
+
169
+
170
+ def count_matches(source_boundary: np.ndarray, target_boundaries: np.ndarray, radius: int) -> int:
171
+ if not np.any(source_boundary) or not np.any(target_boundaries):
172
+ return 0
173
+ matched = np.zeros_like(source_boundary, dtype=bool)
174
+ for target in target_boundaries:
175
+ if np.any(target):
176
+ matched |= distance_transform_edt(~target) <= radius
177
+ return int(np.sum(source_boundary & matched))
178
+
179
+
180
+ def compute_jnf_v(
181
+ gt_volume: np.ndarray,
182
+ pred_volume: np.ndarray,
183
+ ) -> JNFVResult:
184
+ gt = as_bool_volume(gt_volume)
185
+ pred = as_bool_volume(pred_volume)
186
+ if gt.shape != pred.shape:
187
+ raise ValueError(f"Shape mismatch: GT {gt.shape}, pred {pred.shape}")
188
+
189
+ intersections = np.sum(gt & pred, axis=(1, 2), dtype=np.int64)
190
+ unions = np.sum(gt | pred, axis=(1, 2), dtype=np.int64)
191
+ total_intersection = int(np.sum(intersections, dtype=np.int64))
192
+ total_union = int(np.sum(unions, dtype=np.int64))
193
+ if total_union == 0:
194
+ j_v = 1.0
195
+ else:
196
+ nonempty = unions > 0
197
+ j_v = float(np.sum((unions[nonempty] / total_union) * (intersections[nonempty] / unions[nonempty])))
198
+
199
+ radius = spatial_radius(gt.shape[1:], mean_visible_area(gt), BOUND_TH)
200
+ gt_boundary = boundary_volume(gt)
201
+ pred_boundary = boundary_volume(pred)
202
+
203
+ matched_pred = 0
204
+ matched_gt = 0
205
+ total_pred_boundary = 0
206
+ total_gt_boundary = 0
207
+ for t in range(gt.shape[0]):
208
+ gt_t = gt_boundary[t]
209
+ pred_t = pred_boundary[t]
210
+ n_gt = int(np.sum(gt_t))
211
+ n_pred = int(np.sum(pred_t))
212
+ total_gt_boundary += n_gt
213
+ total_pred_boundary += n_pred
214
+ if n_gt:
215
+ matched_gt += count_matches(gt_t, pred_boundary[t : t + 1], radius)
216
+ if n_pred:
217
+ matched_pred += count_matches(pred_t, gt_boundary[t : t + 1], radius)
218
+
219
+ if total_pred_boundary == 0 and total_gt_boundary == 0:
220
+ precision = 1.0
221
+ recall = 1.0
222
+ elif total_pred_boundary == 0:
223
+ precision = 1.0
224
+ recall = 0.0
225
+ elif total_gt_boundary == 0:
226
+ precision = 0.0
227
+ recall = 1.0
228
+ else:
229
+ precision = float(matched_pred / total_pred_boundary)
230
+ recall = float(matched_gt / total_gt_boundary)
231
+ f_v = 0.0 if precision + recall == 0 else float(2.0 * precision * recall / (precision + recall))
232
+
233
+ return JNFVResult(
234
+ j_v=j_v,
235
+ f_v=f_v,
236
+ jnf_v=(j_v + f_v) / 2.0,
237
+ precision_v=precision,
238
+ recall_v=recall,
239
+ intersection_volume=total_intersection,
240
+ union_volume=total_union,
241
+ pred_boundary_voxels=total_pred_boundary,
242
+ gt_boundary_voxels=total_gt_boundary,
243
+ )
244
+
245
+
246
+ def evaluate_video(task: tuple[str, Path, Path]) -> dict[str, object]:
247
+ video_id, gt_root, pred_root = task
248
+ try:
249
+ gt, pred = load_video(gt_root / video_id, pred_root / video_id)
250
+ object_ids = [int(v) for v in np.unique(gt) if int(v) != 0]
251
+ if not object_ids:
252
+ raise ValueError(f"No foreground objects in {gt_root / video_id}")
253
+
254
+ objects: list[ObjectResult] = []
255
+ for object_id in object_ids:
256
+ pred_obj = pred == object_id
257
+ result = compute_jnf_v(
258
+ gt == object_id,
259
+ pred_obj,
260
+ )
261
+ objects.append(
262
+ ObjectResult(
263
+ split="all",
264
+ video_id=video_id,
265
+ object_id=object_id,
266
+ num_frames=int(gt.shape[0]),
267
+ **asdict(result),
268
+ )
269
+ )
270
+
271
+ video = VideoResult(
272
+ split="all",
273
+ video_id=video_id,
274
+ num_objects=len(objects),
275
+ num_frames=int(gt.shape[0]),
276
+ j_v=float(np.mean([row.j_v for row in objects])),
277
+ f_v=float(np.mean([row.f_v for row in objects])),
278
+ jnf_v=float(np.mean([row.jnf_v for row in objects])),
279
+ )
280
+ return {"video_id": video_id, "objects": objects, "video": video, "error": None}
281
+ except Exception as exc:
282
+ return {"video_id": video_id, "objects": [], "video": None, "error": {"video_id": video_id, "error": str(exc)}}
283
+
284
+
285
+ def write_csv(path: Path, rows: list[dict[str, object]]) -> None:
286
+ if not rows:
287
+ return
288
+ path.parent.mkdir(parents=True, exist_ok=True)
289
+ with path.open("w", newline="", encoding="utf-8") as handle:
290
+ writer = csv.DictWriter(handle, fieldnames=list(rows[0].keys()))
291
+ writer.writeheader()
292
+ writer.writerows(rows)
293
+
294
+
295
+ def summarize(split: str, objects: list[ObjectResult], videos: list[VideoResult]) -> dict[str, object]:
296
+ return {
297
+ "split": split,
298
+ "num_videos": len(videos),
299
+ "num_objects": len(objects),
300
+ "num_object_frames": int(sum(row.num_frames for row in objects)),
301
+ "j_v": float(np.mean([row.j_v for row in objects])) if objects else 0.0,
302
+ "f_v": float(np.mean([row.f_v for row in objects])) if objects else 0.0,
303
+ "jnf_v": float(np.mean([row.jnf_v for row in objects])) if objects else 0.0,
304
+ "per_video_j_v": float(np.mean([row.j_v for row in videos])) if videos else 0.0,
305
+ "per_video_f_v": float(np.mean([row.f_v for row in videos])) if videos else 0.0,
306
+ "per_video_jnf_v": float(np.mean([row.jnf_v for row in videos])) if videos else 0.0,
307
+ }
308
+
309
+
310
+ def main() -> int:
311
+ args = parse_args()
312
+ dataset_root = DATASET_ROOT
313
+ gt_root = dataset_root / "Annotations"
314
+ pred_root = resolve_pred_root(args.pred_root)
315
+ if not gt_root.is_dir():
316
+ raise FileNotFoundError(f"Missing GT root: {gt_root}")
317
+ if not pred_root.is_dir():
318
+ raise FileNotFoundError(f"Missing prediction root: {pred_root}")
319
+
320
+ splits = {split: read_split(dataset_root, split) for split in SPLITS}
321
+ unique_ids = sorted({video_id for ids in splits.values() for video_id in ids})
322
+ if not unique_ids:
323
+ raise ValueError("No videos selected for evaluation.")
324
+
325
+ tasks = [
326
+ (
327
+ video_id,
328
+ gt_root,
329
+ pred_root,
330
+ )
331
+ for video_id in unique_ids
332
+ ]
333
+
334
+ objects_by_video: dict[str, list[ObjectResult]] = {}
335
+ videos_by_video: dict[str, VideoResult] = {}
336
+ max_workers = min(max(WORKERS, 1), len(tasks))
337
+
338
+ if max_workers == 1:
339
+ iterator = tqdm(tasks, desc="Evaluating", unit="video")
340
+ for task in iterator:
341
+ result = evaluate_video(task)
342
+ if result["error"] is not None:
343
+ raise RuntimeError(f"Failed evaluating {result['video_id']}: {result['error']['error']}")
344
+ objects_by_video[result["video_id"]] = result["objects"]
345
+ videos_by_video[result["video_id"]] = result["video"]
346
+ else:
347
+ with ProcessPoolExecutor(max_workers=max_workers) as executor:
348
+ futures = [executor.submit(evaluate_video, task) for task in tasks]
349
+ iterator = as_completed(futures)
350
+ iterator = tqdm(iterator, total=len(futures), desc="Evaluating", unit="video")
351
+ for future in iterator:
352
+ result = future.result()
353
+ if result["error"] is not None:
354
+ raise RuntimeError(f"Failed evaluating {result['video_id']}: {result['error']['error']}")
355
+ objects_by_video[result["video_id"]] = result["objects"]
356
+ videos_by_video[result["video_id"]] = result["video"]
357
+
358
+ all_objects: list[ObjectResult] = []
359
+ all_videos: list[VideoResult] = []
360
+ summaries: dict[str, dict[str, object]] = {}
361
+ for split, ids in splits.items():
362
+ split_objects: list[ObjectResult] = []
363
+ split_videos: list[VideoResult] = []
364
+ for video_id in ids:
365
+ for row in objects_by_video.get(video_id, []):
366
+ split_objects.append(ObjectResult(split=split, **{k: v for k, v in asdict(row).items() if k != "split"}))
367
+ if video_id in videos_by_video:
368
+ row = videos_by_video[video_id]
369
+ split_videos.append(VideoResult(split=split, **{k: v for k, v in asdict(row).items() if k != "split"}))
370
+ all_objects.extend(split_objects)
371
+ all_videos.extend(split_videos)
372
+ summaries[split] = summarize(split, split_objects, split_videos)
373
+
374
+ args.output_dir.mkdir(parents=True, exist_ok=True)
375
+ write_csv(args.output_dir / "per_object_jf_v.csv", [asdict(row) for row in all_objects])
376
+ write_csv(args.output_dir / "per_video_jf_v.csv", [asdict(row) for row in all_videos])
377
+ summary = {
378
+ "splits": summaries,
379
+ }
380
+ (args.output_dir / "summary.json").write_text(json.dumps(summary, indent=2, sort_keys=True) + "\n", encoding="utf-8")
381
+
382
+ for split, row in summaries.items():
383
+ print(
384
+ f"{split}: videos={row['num_videos']} objects={row['num_objects']} "
385
+ f"Jv={row['j_v']:.6f} Fv={row['f_v']:.6f} J&F_v={row['jnf_v']:.6f}"
386
+ )
387
+ return 0
388
+
389
+
390
+ if __name__ == "__main__":
391
+ raise SystemExit(main())
favos-20.txt ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 510ca352de434ce047880f09b49d4fb1
2
+ 79e6485ce65bcff58059665348851c3a
3
+ 423e5ffc5bced29d9ad33aae06f672fd
4
+ d7cc905aff0a9c4337ae23def1152f62
5
+ 1763fc52bde3718770285f2f105a11cb
6
+ d4edc6d59d1273f8c57041a073f8025a
7
+ ff1222464c9dca5e29dd249e70217051
8
+ e628894ab73cdb0976cfb28900459f85
9
+ 8274ee49efaaeeb44fecee4c4c07e4df
10
+ 2f4cb6ffb406a4dc10074e1138fef7b9
11
+ c23cc5bf4c7c04233ad9a3e8f137c21b
12
+ bee9a94d93e00c24576985f359af91e7
13
+ 00c92e9a7ec54d0c3e857c157103f99c
14
+ 153b704766c803680be1069f23866c0d
15
+ 1e5d0cc1ba9e55408e46837424b288a2
16
+ 0a6f61f31a7c26f6218bc88c089efad6
17
+ 5f5167989358d014efc38ccc795b5b06
18
+ 7e833a447115188d4eb75a47cb199a93
19
+ 0ee9797f6ee00377c04fc8641e6c34b9
20
+ 85fe83123ac1f1fda088cc84d06407e7
21
+ 38da9241ba734fc0c459618c7486eb89
22
+ 25957feda7d3739aaf14c005feee1ef4
23
+ c87e2a1aecabf67710f8da466f590119
24
+ 0dde0554b6c9fed87fab441945b174cc
25
+ f3227d95d567045a0756d262af5f8810
26
+ 9ebd5ab53f3fdc74e9b5779d7d33a0bc
27
+ acdf7be22f237731a575509e0f8bd562
28
+ d3eae82414483d0ed4b307c2ed9308dc
29
+ 34267d0c93e49c569ba8385af38d6c3c
30
+ 6d1b4a7a8b9e62ecbd0f059aba830fed
31
+ 8cc0996707f3c98847d6accd398cae4a
32
+ c7efe295f111c89c5446c1896635c56f
33
+ dd3faeadfc531bb409591733db39e91e
34
+ 7d7932c43d06cf7009bc77b5fb5887a7
35
+ a2269e73813affec4f49c6febd85f93d
36
+ b82baa030353a7072908f27342f9de0e
37
+ fceb1e96b99293ca080b76fd678b4b6b
38
+ 85883f9eb511a404cbb1db27297b21e7
39
+ 979098af4c56441a25dadecbee5e69e2
40
+ c80d646e03734936754a6ddbb458afc3
41
+ da0940b2ccd52aad70867952bb3cf6e6
42
+ 7bacee56c447ffab952faa788cbd5c17
43
+ 9fc9ab7f048a1340fdd97ae78b8f2806
44
+ 19c78585aa31b4d07a914de97a7e4b66
45
+ 64e63ff853a8a4543e72538c43a952ac
46
+ 51ba04e5ce9ac6b0dbd733701b390e2b
47
+ 96aa657b5e118fa1968f4cc5193a336f
48
+ bfbf0477617337d3eaff9b7fcf3ebd68
49
+ e61f89e98345af7a873f262ec554ca56
50
+ 9f29fb0d57673bf3980d991555ea5220
51
+ 50b6658d8d2710fa42ccda4386198584
52
+ 90505bb788b5fedc7d0a01e7fd0df4d0
53
+ d8e3a1ee59b4a01d9935fcf74897a993
54
+ de24a6d99245d646a211d8238766f4ef
55
+ 7a7e24b51fc6d4b6d81e6f95eb279c37
56
+ 5e1e500cac40625eb6838427411a5708
57
+ 990b0902db2522a6b70391283b69d1c5
58
+ 1b3910140cae94b1c8ff9b50496faee2
59
+ 2ef88ebd235b393d723838f59856821e
60
+ 995c7b782b8cdf7eb7384e4817185d2e
61
+ 3b376c82b85ff4285a4722ca312f84d8
62
+ 270d5b7afd61336af86b020d20deb765
63
+ ee210b1ce55ef31ef383aebda8a4341b
64
+ 2ee8130f4ea648a3484b9e9e2b55637c
65
+ 61c8d0df22325001b0afba3fabcdf882
66
+ e5603ce1b13cc4ebc56afe013ee3c0c8
67
+ 7e2020fb5885e05fd13d4ad2e932550b
68
+ d226434439d033ee9fbd501b6cc4d5f9
69
+ 83c3e21fba30c40d5f28f33b056ba374
70
+ 3cd95d40559fa81acba263bd77e59da6
71
+ f3598c3a5b2d094cb3fcf2dbced80e52
72
+ 3c36b6c7d6d6120f496666706be114b1
73
+ 8c8f9d28ca4f35048a8b1d610150efc5
74
+ 72cb7a00ca57a18c4e3fb852b78c33ef
75
+ d7cb2e252b48cb82c329c64b043b0421
76
+ dfbac4e6a082e805468529a391e2b594
77
+ 4538bc2348c23748c7146071ae7b48b3
78
+ 58fb9ec9133eb0f56ccb1d0f4fe754d4
79
+ 05385ba87b8eb13f760700e31dd6f95a
80
+ 5025e98042f5fc3e54bbc9f81151bd7e
81
+ c4f2af9ae8283cfd84c8f6a5f29f3260
82
+ 5a33a5383d9754278cdca828f4186567
83
+ b3865a1c6b1033408c39e940d6dd1059
84
+ 145228a3367ae38b3873004db7271788
85
+ 3327744fbe807ee40e7e2b64609817db
86
+ b791939c01330564e1c8673171219161
87
+ 8317e25281cb9402701973753dc999b0
88
+ f84c7f2017aaec513f6c886fa9a5654a
89
+ 7f661ad675d0139b2645293e6d7b91e8
90
+ b4f0e4d5c1041249d8f6e9bf41763695
91
+ e0b0165909cd2a6ffe20d3f809f4a937
92
+ 99b0480e105bc4a1410c39daa7696b17
93
+ 43f0500520c50fc31c56e28167f6e51b
94
+ 496957948545345c33b8837dc236f96a
95
+ 6684eb2794511605a0604c1c472f6e5f
96
+ 1997593e511d401515fef7479057e6bb
97
+ fba1e215b4e94802573bf986c2445c54
98
+ f876c2f2a3ca802ae1109f81a70bb3c0
99
+ ed1a4227cb95f651078e3e6f78eb4ce0
100
+ 92d12c7fa5635a251de006b7b7c4eba6
favos-40.txt ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 4750de5a4ed73ac3ed7eda996d0267d7
2
+ 94253e245eac9a7966f320ddc12aef77
3
+ d4daf55888ec2412f3364d727ea59676
4
+ 56a8974858d46772b14884c3b3818acc
5
+ 8171d12d7221df620aca9604337011ba
6
+ aabcaea6a028bbb9d493d5d9e54a3aa2
7
+ cc417bba46653c48e721b868aa22f8d0
8
+ 825f26bdc37c37f5a3b126d9ddf21294
9
+ 7579cb830d7d9f60a4df6d66919141bb
10
+ ea004c817ad6ed08772b078395f40ea4
11
+ 188153a03c31d5a3e177a792a39609aa
12
+ 3fcdb07cf93310be97fb559ac63ec6ee
13
+ 3e5e416c827648535a0b7ce27e99b8f7
14
+ 890634d1700d7c1c1ca7f3854b25cc15
15
+ fd3b065ab8013a2e79ed62be1c7b8432
16
+ bad2a11729d1433453255ed44a252894
17
+ 7f70a2127da7fa077f4c2f25b8856115
18
+ 0961c51cfd94d7cc6027adb8c5ea351a
19
+ cce57a8b3f84e83c9e2b4cdd5e912df1
20
+ 7023cddaddd36e9d1aa893b6b2ffefb7
21
+ d098c0baf60420840561bbb1d8c11f03
22
+ 80727c8eab53b9d4d3f2c65dd6260f4f
23
+ 14683ed05ff1f4249a866300fa740afd
24
+ 11507069f926df5d0e120a0e925a44f4
25
+ e25386a824771c3830277870e219330d
26
+ 9aeb0ae9f74e2cc72dc9ab05ed827509
27
+ 9a3fa34af8963d03315cda02686f8a6d
28
+ be05d0f2a0a11b41cdf27dd786a7d374
29
+ d81bcac5eb3b1ba82920c01f2bcff550
30
+ bc767f91bed2677aa84edc1cf6cc4d8e
31
+ eb97baf602d734a4f79fc91b84d5ab90
32
+ 6038639557c363c42eb271cfc2bc14c9
33
+ 0ce210231a7803ee7ea39873190b446c
34
+ c17639673399150246480ad5944724c2
35
+ 507e1dc1fea9826562b7ec570e242fa0
36
+ ed7a8041a84fb90ba764af9639f71120
37
+ 639d041857f19c990a6a9bfb2f681f1f
38
+ b3c8a9f1e3981587a0cc318c020389fc
39
+ 956dd29b84e65251d74dc0921ff00d3a
40
+ 3c541ecada626e6a7490f0d466e5cb5a
41
+ 027361154062d57242859b91bb31f458
42
+ ce642bffcf1b4f332d3ee3194aa8c910
43
+ d9ca643c71220034c1f509bf3bb7bb6c
44
+ 6548bf59ebdbb9d0f87a86e5c0f62008
45
+ 46021c4ab2273a426179f20746a64056
46
+ 622a28c3b083546daf33020da1a60d8b
47
+ 4fd9b7e01f69ae0bc7010c704413ee7c
48
+ 9493e12c06bd851f232449da1ebeb455
49
+ e3097db434b19ef61e18be0aad0998a4
50
+ a9fbc0b25ef74ddf903793f1ec5e3d40
51
+ e85ba3fcc2ca9aef56eb4e24f51b557a
52
+ 083c4d4e5e9a5a797fb73861f6e0db7c
53
+ 6d5e9284da79564aa97332349105ae69
54
+ 6f020ff2874d30ca512629d4173ce9b1
55
+ 261bc95d11516aca19aa790652054a49
56
+ 6482e2044989f4c0188cc6c7fa04ed39
57
+ 1b3aa93dcd35bcc2969b6c6ea3df8957
58
+ 6c84d376b4525a0987f612911d588a17
59
+ b9f4a9bb89e5d0e772fe89e026489c84
60
+ 0fe93221b3cfc9d3144d986ed4d154dd
61
+ 8a404976cbb72a1b4420cbaeebf779df
62
+ 31794cb13399f114ca2d48ab9705e263
63
+ 1b1e5e98ceadae3c97ec3e697edb504e
64
+ 74f3fdfbdb7e1d9c77dbb8393a2930c4
65
+ 193986c0dadff682425d2503994f6f74
66
+ 6fa3bc477c4d141e2bc02536fd9d341d
67
+ 443d366bdbafde1df65854c63999f276
68
+ a5cbeceefb0bbe01164f9d071265515d
69
+ 613959df1f96df73142c053ef4149521
70
+ 341abea5d530fb03c0543c77e9b7547a
71
+ 0e3606e492c97f16b9b00938992b43fa
72
+ 25095ccdf53e5212769155a3ac35883a
73
+ e632f5529cdaa16297f6ece0d85f528a
74
+ 3d0fd71dbd7995dfe9790b3b6664ec4d
75
+ 905cd1071101c1f5021580349d81b8d7
76
+ d1ece9bbd0b7943b807d7a6bca660521
77
+ 5c392f7e62c0833639b9f294f4f4047a
78
+ ab9850dc9ce66da7a38e2f31de75c3be
79
+ 598aa6e379588116c20fbe0c9ac618cf
80
+ 4fcac6d55bba3bdc0cf7b0173f9fc3bf
81
+ bf9167f01ab7ab3a2fa9479cf6468575
82
+ 67d270ad20e288b49dbe8d4118da4dca
83
+ 7f63e595c5852986a6e5577c1d10f081
84
+ b9458c4ddd1d97ecb10488394fbaae3b
85
+ e446850c7bb6f71273bb2980a57e61e3
86
+ 9599d68f7d2d5a42a048c7fea7160de6
87
+ 0ddb3ccdb8cd010c226b30a13da8c867
88
+ 8e0d47249ed76464abd46a9099d36988
89
+ 5c57659d33c5c763be2d39c789c24fcb
90
+ 73e765d9e7d10d5e352e0a0ca8872939
91
+ c3382e7f8c961173514ba7c0e6ea29ff
92
+ 18c22726d269d275640765ce76b6214e
93
+ 0318cc320c7f8cde3d189e12e4ad49d1
94
+ e55bd1ae4cfa387a56402d567bfba8ac
95
+ e374cccf987d407ce4d4b76f2fa08118
96
+ 8d3c0469eee6e9d09eeb59fae481af0a
97
+ 34ef04cd9fe808c3b69855fd559c6f0a
98
+ f91be6e652955638cfcd765284efbd64
99
+ 42fb18ae777530addb2d71c8bd97e9b8
100
+ 6a6ce5aaf815cd2e8788d220d5b758a6
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ numpy
2
+ pillow
3
+ scipy
4
+ tqdm
5
+ huggingface_hub