Datasets:
duytranus
commited on
Commit
·
db06072
1
Parent(s):
45fabf9
initial dataset commit
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +24 -54
- README.md +164 -0
- data/multi_test.json +0 -0
- data/multi_train.json +0 -0
- data/single_test.json +0 -0
- data/single_train.json +0 -0
- folder.py +63 -0
- images/10.jpg +3 -0
- images/100.jpg +3 -0
- images/1000.jpg +3 -0
- images/1002.jpg +3 -0
- images/1003.jpg +3 -0
- images/1004.jpg +3 -0
- images/1008.jpg +3 -0
- images/1009.jpg +3 -0
- images/1011.jpg +3 -0
- images/1012.jpg +3 -0
- images/1013.jpg +3 -0
- images/1017.jpg +3 -0
- images/1018.jpg +3 -0
- images/1023.jpg +3 -0
- images/1026.jpg +3 -0
- images/1028.jpg +3 -0
- images/1034.jpg +3 -0
- images/1035.jpg +3 -0
- images/1036.jpg +3 -0
- images/1037.jpg +3 -0
- images/1040.jpg +3 -0
- images/1050.jpg +3 -0
- images/1051.jpg +3 -0
- images/1056.jpg +3 -0
- images/1057.jpg +3 -0
- images/1058.jpg +3 -0
- images/106.jpg +3 -0
- images/1060.jpg +3 -0
- images/1063.jpg +3 -0
- images/1065.jpg +3 -0
- images/1066.jpg +3 -0
- images/1069.jpg +3 -0
- images/1073.jpg +3 -0
- images/1076.jpg +3 -0
- images/1077.jpg +3 -0
- images/1082.jpg +3 -0
- images/1087.jpg +3 -0
- images/1089.jpg +3 -0
- images/1090.jpg +3 -0
- images/1091.jpg +3 -0
- images/1095.jpg +3 -0
- images/1096.jpg +3 -0
- images/1097.jpg +3 -0
.gitattributes
CHANGED
|
@@ -1,59 +1,29 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
*.
|
| 5 |
-
*.
|
| 6 |
-
*.
|
| 7 |
-
*.
|
| 8 |
-
*.
|
| 9 |
-
|
| 10 |
-
*.
|
| 11 |
-
|
| 12 |
-
*.
|
| 13 |
-
|
| 14 |
-
*.
|
| 15 |
-
|
| 16 |
-
*.
|
| 17 |
-
|
| 18 |
-
*.
|
| 19 |
-
|
| 20 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.
|
| 22 |
-
*.
|
| 23 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 36 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 37 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 38 |
-
# Audio files - uncompressed
|
| 39 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 40 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 41 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 42 |
-
# Audio files - compressed
|
| 43 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 44 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 46 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 47 |
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 48 |
-
|
| 49 |
-
*.
|
| 50 |
-
*.
|
| 51 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
| 52 |
-
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 53 |
-
# Image files - compressed
|
| 54 |
-
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
-
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
-
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 57 |
-
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
# Auto detect text files and perform LF normalization
|
| 2 |
+
* text=auto
|
| 3 |
+
# Images
|
| 4 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
# JSON files
|
| 10 |
+
*.json text
|
| 11 |
+
# Markdown files
|
| 12 |
+
*.md text
|
| 13 |
+
# Python files
|
| 14 |
+
*.py text
|
| 15 |
+
# Text files
|
| 16 |
+
*.txt text
|
| 17 |
+
# CSV files
|
| 18 |
+
*.csv filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
images/* filter=lfs diff=lfs merge=lfs -text
|
| 20 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 29 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
language:
|
| 3 |
+
- vi
|
| 4 |
+
- en
|
| 5 |
+
task_categories:
|
| 6 |
+
- visual-question-answering
|
| 7 |
+
- question-answering
|
| 8 |
+
tags:
|
| 9 |
+
- infographic
|
| 10 |
+
- vietnamese
|
| 11 |
+
- vqa
|
| 12 |
+
- document-understanding
|
| 13 |
+
size_categories:
|
| 14 |
+
- 10K<n<100K
|
| 15 |
+
---
|
| 16 |
+
|
| 17 |
+
# ViInfographicVQA
|
| 18 |
+
|
| 19 |
+
## Overview
|
| 20 |
+
|
| 21 |
+
**ViInfographicVQA** is a Vietnamese **Visual Question Answering (VQA)** benchmark for **infographic understanding**.
|
| 22 |
+
It evaluates models’ ability to **read, reason, and synthesize information** from data-rich, layout-heavy visuals that mix **text, charts, maps, and design elements**.
|
| 23 |
+
|
| 24 |
+
Two settings are provided:
|
| 25 |
+
- **Single-image VQA** – questions answered from one infographic.
|
| 26 |
+
- **Multi-image VQA** – questions requiring reasoning across multiple, semantically related infographics.
|
| 27 |
+
|
| 28 |
+
---
|
| 29 |
+
|
| 30 |
+
## 📊 Dataset Summary
|
| 31 |
+
|
| 32 |
+
| Split | #Images | #QAs | Description |
|
| 33 |
+
|----------------------|--------:|------:|-------------------------------------------|
|
| 34 |
+
| Single-image (train) | 1,787 | 12,521| VQA on individual infographics |
|
| 35 |
+
| Single-image (test) | 193 | 1,374 | Held-out evaluation |
|
| 36 |
+
| Multi-image (train) | 5,861 | 5,878 | Cross-image reasoning (training) |
|
| 37 |
+
| Multi-image (test) | 653 | 636 | Cross-image reasoning (test) |
|
| 38 |
+
| **Total** | **6,747** | **20,409** | Across all splits |
|
| 39 |
+
|
| 40 |
+
- **Language:** Vietnamese
|
| 41 |
+
- **Domains:** Economy, Healthcare, Education, Society & Culture, Disasters & Accidents, Sports & Arts, Weather, etc.
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
## 🗂️ Repository Layout
|
| 45 |
+
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
ViInfographicVQA/
|
| 49 |
+
├── images/ # all image files (referenced by filename)
|
| 50 |
+
├── <parquet files> # four splits stored as parquet shards on the Hub
|
| 51 |
+
└── README.md
|
| 52 |
+
|
| 53 |
+
````
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
## 🚀 Quickstart
|
| 57 |
+
|
| 58 |
+
```python
|
| 59 |
+
from datasets import load_dataset
|
| 60 |
+
|
| 61 |
+
# Load all splits (parquet)
|
| 62 |
+
ds = load_dataset("VLAI-AIVN/ViInfographicVQA")
|
| 63 |
+
|
| 64 |
+
single_train = ds["single_train"]
|
| 65 |
+
multi_train = ds["multi_train"]
|
| 66 |
+
|
| 67 |
+
# Each sample:
|
| 68 |
+
# - images_paths: list of filenames (relative to `images/`)
|
| 69 |
+
# - image: preview Image() (the first file)
|
| 70 |
+
ex = multi_train[0]
|
| 71 |
+
print(ex["images_paths"]) # e.g. ["13321.jpg", "13028.jpg", "13458.jpg"]
|
| 72 |
+
preview = ex["image"] # PIL.Image preview (for quick visualization)
|
| 73 |
+
````
|
| 74 |
+
|
| 75 |
+
### Read **all images** for multi-image samples (no local download)
|
| 76 |
+
|
| 77 |
+
Use Hub file URIs, then cast to `Image()`:
|
| 78 |
+
|
| 79 |
+
```python
|
| 80 |
+
from datasets import Image, Sequence, load_dataset
|
| 81 |
+
|
| 82 |
+
ds = load_dataset("VLAI-AIVN/ViInfographicVQA")
|
| 83 |
+
repo_base = "hf://datasets/VLAI-AIVN/ViInfographicVQA/images"
|
| 84 |
+
|
| 85 |
+
def add_full_paths(example):
|
| 86 |
+
example["images_full"] = [f"{repo_base}/{fn}" for fn in example["images_paths"]]
|
| 87 |
+
return example
|
| 88 |
+
|
| 89 |
+
multi = ds["multi_train"].map(add_full_paths, remove_columns=[])
|
| 90 |
+
multi = multi.cast_column("images_full", Sequence(Image()))
|
| 91 |
+
|
| 92 |
+
all_imgs = multi[0]["images_full"] # list[PIL.Image] — all referenced images
|
| 93 |
+
```
|
| 94 |
+
|
| 95 |
+
### Streaming (large-scale training)
|
| 96 |
+
|
| 97 |
+
```python
|
| 98 |
+
from datasets import load_dataset, Image, Sequence
|
| 99 |
+
|
| 100 |
+
ds = load_dataset("VLAI-AIVN/ViInfographicVQA", streaming=True)
|
| 101 |
+
repo_base = "hf://datasets/VLAI-AIVN/ViInfographicVQA/images"
|
| 102 |
+
|
| 103 |
+
def add_full_paths(example):
|
| 104 |
+
example["images_full"] = [f"{repo_base}/{fn}" for fn in example["images_paths"]]
|
| 105 |
+
return example
|
| 106 |
+
|
| 107 |
+
multi_stream = ds["multi_train"].map(add_full_paths)
|
| 108 |
+
multi_stream = multi_stream.cast_column("images_full", Sequence(Image()))
|
| 109 |
+
|
| 110 |
+
ex = next(iter(multi_stream))
|
| 111 |
+
imgs = ex["images_full"] # list of PIL.Image (lazy/streamed)
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
### Local download (offline use)
|
| 115 |
+
|
| 116 |
+
```python
|
| 117 |
+
from huggingface_hub import snapshot_download
|
| 118 |
+
from datasets import load_dataset
|
| 119 |
+
|
| 120 |
+
# Download the entire dataset repo locally (parquet + images)
|
| 121 |
+
local_dir = snapshot_download(repo_id="VLAI-AIVN/ViInfographicVQA", repo_type="dataset")
|
| 122 |
+
|
| 123 |
+
# Load from disk
|
| 124 |
+
ds = load_dataset(local_dir)
|
| 125 |
+
|
| 126 |
+
# Reconstruct absolute paths to images on disk if needed:
|
| 127 |
+
import os
|
| 128 |
+
images_root = os.path.join(local_dir, "images")
|
| 129 |
+
def to_abs(example):
|
| 130 |
+
example["images_abs"] = [os.path.join(images_root, fn) for fn in example["images_paths"]]
|
| 131 |
+
return example
|
| 132 |
+
|
| 133 |
+
multi_local = ds["multi_train"].map(to_abs)
|
| 134 |
+
print(multi_local[0]["images_abs"][:3]) # ['/.../images/13321.jpg', ...]
|
| 135 |
+
```
|
| 136 |
+
|
| 137 |
+
> **Speed tip:** set `HF_HUB_ENABLE_HF_TRANSFER=1` to accelerate uploads/downloads.
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
## 🔍 Research Applications
|
| 141 |
+
|
| 142 |
+
* Multimodal reasoning on charts, tables, and dense text
|
| 143 |
+
* Cross-image synthesis and comparison
|
| 144 |
+
* Low-resource VQA in Vietnamese
|
| 145 |
+
* Evaluation of OCR, layout parsing, and numerical reasoning
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
## 🧮 Evaluation
|
| 149 |
+
|
| 150 |
+
We use **Average Normalized Levenshtein Similarity (ANLS)** for string-based answer evaluation, which tolerates minor textual variations while penalizing semantic errors.
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
## 📚 Citation
|
| 154 |
+
|
| 155 |
+
If you use this dataset, please cite:
|
| 156 |
+
|
| 157 |
+
```bibtex
|
| 158 |
+
@article{van2025viinfographicvqa,
|
| 159 |
+
title={ViInfographicVQA: A Benchmark for Single and Multi-image Visual Question Answering on Vietnamese Infographics},
|
| 160 |
+
author={Van-Dinh, Tue-Thu and Tran, Hoang-Duy and Duong, Truong-Binh and Pham, Mai-Hanh and Le-Nguyen, Binh-Nam and Nguyen, Quoc-Thai},
|
| 161 |
+
journal={Proceedings of the AAAI Conference on Artificial Intelligence},
|
| 162 |
+
year={2026}
|
| 163 |
+
}
|
| 164 |
+
```
|
data/multi_test.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/multi_train.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/single_test.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/single_train.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
folder.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import argparse
|
| 3 |
+
import logging
|
| 4 |
+
from huggingface_hub import HfApi, login
|
| 5 |
+
|
| 6 |
+
logging.basicConfig(level=logging.INFO)
|
| 7 |
+
logger = logging.getLogger("hf-uploader")
|
| 8 |
+
|
| 9 |
+
def main():
|
| 10 |
+
p = argparse.ArgumentParser(description="Upload images/ folder and README.md to a HF dataset repo")
|
| 11 |
+
p.add_argument("--repo_id", required=True, help="e.g. VLAI-AIVN/ViInfographicVQA")
|
| 12 |
+
p.add_argument("--hf_token", default=None, help="Optional; or use `huggingface-cli login`")
|
| 13 |
+
p.add_argument("--images_dir", default="images", help="Local images folder to upload")
|
| 14 |
+
p.add_argument("--readme", default="README.md", help="Local README.md to upload")
|
| 15 |
+
p.add_argument("--branch", default=None, help="Optional target revision/branch (e.g., parquet-v1)")
|
| 16 |
+
p.add_argument("--commit_message", default="Upload images folder and README", help="Commit message")
|
| 17 |
+
p.add_argument("--skip_images", action="store_true", help="Skip uploading images folder")
|
| 18 |
+
p.add_argument("--skip_readme", action="store_true", help="Skip uploading README.md")
|
| 19 |
+
args = p.parse_args()
|
| 20 |
+
|
| 21 |
+
if args.hf_token:
|
| 22 |
+
login(token=args.hf_token)
|
| 23 |
+
|
| 24 |
+
api = HfApi()
|
| 25 |
+
|
| 26 |
+
# Upload images folder
|
| 27 |
+
if not args.skip_images:
|
| 28 |
+
if not os.path.isdir(args.images_dir):
|
| 29 |
+
raise FileNotFoundError(f"images_dir not found: {args.images_dir}")
|
| 30 |
+
logger.info(f"Uploading folder '{args.images_dir}' -> {args.repo_id}/images (repo_type=dataset)")
|
| 31 |
+
api.upload_folder(
|
| 32 |
+
folder_path=args.images_dir,
|
| 33 |
+
repo_id=args.repo_id,
|
| 34 |
+
repo_type="dataset",
|
| 35 |
+
path_in_repo="images",
|
| 36 |
+
commit_message=args.commit_message,
|
| 37 |
+
revision=args.branch,
|
| 38 |
+
allow_patterns=None, # or e.g. ["*.jpg","*.png"]
|
| 39 |
+
ignore_patterns=None,
|
| 40 |
+
)
|
| 41 |
+
logger.info("✅ Images uploaded")
|
| 42 |
+
|
| 43 |
+
# Upload README.md
|
| 44 |
+
if not args.skip_readme:
|
| 45 |
+
if not os.path.isfile(args.readme):
|
| 46 |
+
raise FileNotFoundError(f"README not found: {args.readme}")
|
| 47 |
+
logger.info(f"Uploading README.md -> {args.repo_id}/README.md (repo_type=dataset)")
|
| 48 |
+
api.upload_file(
|
| 49 |
+
path_or_fileobj=args.readme,
|
| 50 |
+
path_in_repo="README.md",
|
| 51 |
+
repo_id=args.repo_id,
|
| 52 |
+
repo_type="dataset",
|
| 53 |
+
commit_message=args.commit_message,
|
| 54 |
+
revision=args.branch,
|
| 55 |
+
)
|
| 56 |
+
logger.info("✅ README uploaded")
|
| 57 |
+
|
| 58 |
+
logger.info("🎉 Done.")
|
| 59 |
+
|
| 60 |
+
if __name__ == "__main__":
|
| 61 |
+
# Speed tip for large uploads:
|
| 62 |
+
# export HF_HUB_ENABLE_HF_TRANSFER=1
|
| 63 |
+
main()
|
images/10.jpg
ADDED
|
Git LFS Details
|
images/100.jpg
ADDED
|
Git LFS Details
|
images/1000.jpg
ADDED
|
Git LFS Details
|
images/1002.jpg
ADDED
|
Git LFS Details
|
images/1003.jpg
ADDED
|
Git LFS Details
|
images/1004.jpg
ADDED
|
Git LFS Details
|
images/1008.jpg
ADDED
|
Git LFS Details
|
images/1009.jpg
ADDED
|
Git LFS Details
|
images/1011.jpg
ADDED
|
Git LFS Details
|
images/1012.jpg
ADDED
|
Git LFS Details
|
images/1013.jpg
ADDED
|
Git LFS Details
|
images/1017.jpg
ADDED
|
Git LFS Details
|
images/1018.jpg
ADDED
|
Git LFS Details
|
images/1023.jpg
ADDED
|
Git LFS Details
|
images/1026.jpg
ADDED
|
Git LFS Details
|
images/1028.jpg
ADDED
|
Git LFS Details
|
images/1034.jpg
ADDED
|
Git LFS Details
|
images/1035.jpg
ADDED
|
Git LFS Details
|
images/1036.jpg
ADDED
|
Git LFS Details
|
images/1037.jpg
ADDED
|
Git LFS Details
|
images/1040.jpg
ADDED
|
Git LFS Details
|
images/1050.jpg
ADDED
|
Git LFS Details
|
images/1051.jpg
ADDED
|
Git LFS Details
|
images/1056.jpg
ADDED
|
Git LFS Details
|
images/1057.jpg
ADDED
|
Git LFS Details
|
images/1058.jpg
ADDED
|
Git LFS Details
|
images/106.jpg
ADDED
|
Git LFS Details
|
images/1060.jpg
ADDED
|
Git LFS Details
|
images/1063.jpg
ADDED
|
Git LFS Details
|
images/1065.jpg
ADDED
|
Git LFS Details
|
images/1066.jpg
ADDED
|
Git LFS Details
|
images/1069.jpg
ADDED
|
Git LFS Details
|
images/1073.jpg
ADDED
|
Git LFS Details
|
images/1076.jpg
ADDED
|
Git LFS Details
|
images/1077.jpg
ADDED
|
Git LFS Details
|
images/1082.jpg
ADDED
|
Git LFS Details
|
images/1087.jpg
ADDED
|
Git LFS Details
|
images/1089.jpg
ADDED
|
Git LFS Details
|
images/1090.jpg
ADDED
|
Git LFS Details
|
images/1091.jpg
ADDED
|
Git LFS Details
|
images/1095.jpg
ADDED
|
Git LFS Details
|
images/1096.jpg
ADDED
|
Git LFS Details
|
images/1097.jpg
ADDED
|
Git LFS Details
|