Commit ·
d1a7ad2
1
Parent(s): 4bfef74
Update beyond_web_scraping.py
Browse files- beyond_web_scraping.py +20 -11
beyond_web_scraping.py
CHANGED
|
@@ -19,9 +19,9 @@ import datasets
|
|
| 19 |
import json
|
| 20 |
from huggingface_hub import hf_hub_url
|
| 21 |
|
| 22 |
-
_INPUT_CSV = "
|
| 23 |
-
|
| 24 |
-
_REPO_ID = "nlphuji/
|
| 25 |
|
| 26 |
class Dataset(datasets.GeneratorBasedBuilder):
|
| 27 |
VERSION = datasets.Version("1.1.0")
|
|
@@ -34,12 +34,21 @@ class Dataset(datasets.GeneratorBasedBuilder):
|
|
| 34 |
features=datasets.Features(
|
| 35 |
{
|
| 36 |
"image": datasets.Image(),
|
| 37 |
-
"
|
| 38 |
-
"
|
| 39 |
-
"
|
| 40 |
-
"
|
| 41 |
-
"
|
| 42 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
}
|
| 44 |
),
|
| 45 |
task_templates=[],
|
|
@@ -51,7 +60,7 @@ class Dataset(datasets.GeneratorBasedBuilder):
|
|
| 51 |
repo_id = _REPO_ID
|
| 52 |
data_dir_125 = dl_manager.download_and_extract({
|
| 53 |
"examples_csv": hf_hub_url(repo_id=repo_id, repo_type='dataset', filename=_INPUT_CSV),
|
| 54 |
-
"images_dir": hf_hub_url(repo_id=repo_id, repo_type='dataset', filename=f"{
|
| 55 |
})
|
| 56 |
|
| 57 |
return [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=data_dir_125)]
|
|
@@ -63,6 +72,6 @@ class Dataset(datasets.GeneratorBasedBuilder):
|
|
| 63 |
|
| 64 |
for r_idx, r in df.iterrows():
|
| 65 |
r_dict = r.to_dict()
|
| 66 |
-
image_path = os.path.join(images_dir,
|
| 67 |
r_dict['image'] = image_path
|
| 68 |
yield r_idx, r_dict
|
|
|
|
| 19 |
import json
|
| 20 |
from huggingface_hub import hf_hub_url
|
| 21 |
|
| 22 |
+
_INPUT_CSV = "test_set.csv"
|
| 23 |
+
_INPUT_IMAGES = 'geode_test_images'
|
| 24 |
+
_REPO_ID = "nlphuji/beyond_web_scraping"
|
| 25 |
|
| 26 |
class Dataset(datasets.GeneratorBasedBuilder):
|
| 27 |
VERSION = datasets.Version("1.1.0")
|
|
|
|
| 34 |
features=datasets.Features(
|
| 35 |
{
|
| 36 |
"image": datasets.Image(),
|
| 37 |
+
"file_path": datasets.Value('string'),
|
| 38 |
+
"object": datasets.Value('string'),
|
| 39 |
+
"region": datasets.Value('string'),
|
| 40 |
+
"ip_country": datasets.Value('string'),
|
| 41 |
+
"date": datasets.Value('string'),
|
| 42 |
+
"make": datasets.Value('string'),
|
| 43 |
+
"make": datasets.Value('string'),
|
| 44 |
+
"model": datasets.Value('string'),
|
| 45 |
+
"gps_position": datasets.Value('string'),
|
| 46 |
+
"gps_altitude": datasets.Value('string'),
|
| 47 |
+
"resolution": datasets.Value('string'),
|
| 48 |
+
"licence_plate": datasets.Value('string'),
|
| 49 |
+
"people_in_background": datasets.Value('string'),
|
| 50 |
+
"tree_tag": datasets.Value('string'),
|
| 51 |
+
"short_file_path": datasets.Value('string'),
|
| 52 |
}
|
| 53 |
),
|
| 54 |
task_templates=[],
|
|
|
|
| 60 |
repo_id = _REPO_ID
|
| 61 |
data_dir_125 = dl_manager.download_and_extract({
|
| 62 |
"examples_csv": hf_hub_url(repo_id=repo_id, repo_type='dataset', filename=_INPUT_CSV),
|
| 63 |
+
"images_dir": hf_hub_url(repo_id=repo_id, repo_type='dataset', filename=f"{_INPUT_IMAGES}.zip")
|
| 64 |
})
|
| 65 |
|
| 66 |
return [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=data_dir_125)]
|
|
|
|
| 72 |
|
| 73 |
for r_idx, r in df.iterrows():
|
| 74 |
r_dict = r.to_dict()
|
| 75 |
+
image_path = os.path.join(images_dir, _INPUT_IMAGES, r_dict['file_path'])
|
| 76 |
r_dict['image'] = image_path
|
| 77 |
yield r_idx, r_dict
|