fdschmidt93 commited on
Commit
05c1d1d
·
1 Parent(s): f81a2e2

chore: copy mvl-sib200.py to mvl-sib.py

Browse files
Files changed (1) hide show
  1. mvl-sib.py +706 -0
mvl-sib.py ADDED
@@ -0,0 +1,706 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import random
3
+ from itertools import combinations
4
+ from pathlib import Path
5
+ from typing import Any, Dict, List, Union
6
+
7
+ import datasets
8
+ import numpy as np
9
+ import pandas as pd
10
+
11
+
12
+ # fmt: off
13
+ LANGS = [
14
+ "ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab",
15
+ "aka_Latn", "als_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "arb_Latn", "ars_Arab",
16
+ "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab",
17
+ "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng",
18
+ "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl",
19
+ "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn",
20
+ "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn",
21
+ "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "fij_Latn", "fin_Latn",
22
+ "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gaz_Latn", "gla_Latn", "gle_Latn",
23
+ "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva",
24
+ "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn",
25
+ "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn",
26
+ "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "kaz_Cyrl", "kbp_Latn", "kea_Latn",
27
+ "khk_Cyrl", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kmr_Latn",
28
+ "knc_Arab", "knc_Latn", "kon_Latn", "kor_Hang", "lao_Laoo", "lij_Latn", "lim_Latn",
29
+ "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn",
30
+ "luo_Latn", "lus_Latn", "lvs_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva",
31
+ "min_Arab", "min_Latn", "mkd_Cyrl", "mlt_Latn", "mni_Beng", "mos_Latn", "mri_Latn",
32
+ "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nqo_Nkoo", "nso_Latn",
33
+ "nus_Latn", "nya_Latn", "oci_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn",
34
+ "pbt_Arab", "pes_Arab", "plt_Latn", "pol_Latn", "por_Latn", "prs_Arab", "quy_Latn",
35
+ "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Olck", "scn_Latn",
36
+ "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab",
37
+ "som_Latn", "sot_Latn", "spa_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn",
38
+ "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "taq_Latn", "taq_Tfng", "tat_Cyrl",
39
+ "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "tpi_Latn", "tsn_Latn",
40
+ "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab",
41
+ "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn",
42
+ "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant",
43
+ "zsm_Latn", "zul_Latn"
44
+ ]
45
+ # fmt: on
46
+
47
+ # For interactive usage:
48
+ # Attempt to find the script directory if __file__ is defined, otherwise default to current working directory.
49
+ try:
50
+ cwd = Path(__file__).parent
51
+ except NameError as _:
52
+ cwd = Path.cwd()
53
+
54
+ SEED: int = 42
55
+ N: int = 1004 # length of pooled train, dev, and test splits
56
+ UPSAMPLING_FACTOR: int = 3
57
+ NUM_NEGATIVES: int = 3
58
+ NUM_REFERENCES: int = 5
59
+ NUM_EXAMPLES_PER_OPTION: int = 1
60
+
61
+ CATEGORIES: List[str] = [
62
+ "entertainment",
63
+ "geography",
64
+ "health",
65
+ "politics",
66
+ "science",
67
+ "sports",
68
+ "travel",
69
+ ]
70
+
71
+ # URLs for downloading SIB .tsv data and images.
72
+ _SIB_URL: str = "https://huggingface.co/datasets/wuenlp/mvl-sib200/resolve/main/data/sib200/{lang}/{split}.tsv"
73
+ _IMG_URL: str = "https://huggingface.co/datasets/wuenlp/mvl-sib200/resolve/main/data/images/sib200/{category}_{no}.jpg"
74
+
75
+ # Placeholder for dataset description: fill or extend as needed.
76
+ _DESCRIPTION: str = (
77
+ "MVLSIB is a multilingual dataset designed to provide sentence-image pairs "
78
+ "spanning multiple languages and categories. The goal is to support tasks such as "
79
+ "multimodal classification, cross-lingual information retrieval, and more. "
80
+ "Each row contains a textual entry (sentence) along with category information, "
81
+ "and the dataset also includes image references for the same set of categories."
82
+ )
83
+
84
+
85
+ def read_tsv_to_dict_list(file_path: Union[str, Path]) -> List[Dict[str, Any]]:
86
+ """
87
+ Reads a TSV file with columns 'index_id', 'category', and 'text' into a list of dictionaries.
88
+
89
+ The TSV is expected to have the following columns (in order):
90
+ 1. index_id
91
+ 2. category
92
+ 3. text
93
+
94
+ Parameters
95
+ ----------
96
+ file_path : Union[str, Path]
97
+ The path to the TSV file.
98
+
99
+ Returns
100
+ -------
101
+ List[Dict[str, Any]]
102
+ A list of dictionaries, where each element has keys:
103
+ - 'index_id': int
104
+ - 'category': str
105
+ - 'text': str
106
+
107
+ Raises
108
+ ------
109
+ ValueError
110
+ If the TSV headers do not match the expected format.
111
+ """
112
+ data: List[Dict[str, Any]] = []
113
+ expected_headers = ["index_id", "category", "text"]
114
+
115
+ with open(file_path, mode="r", encoding="utf-8") as tsvfile:
116
+ reader = csv.DictReader(tsvfile, delimiter="\t")
117
+
118
+ # Validate headers
119
+ if reader.fieldnames != expected_headers:
120
+ raise ValueError(
121
+ f"Expected headers {expected_headers}, but got {reader.fieldnames}"
122
+ )
123
+
124
+ # Start enumerating from line 2 to account for the header line
125
+ for _, row in enumerate(reader, start=2):
126
+ #
127
+ if all(
128
+ (row[key].strip() == key) or (row[key].strip() == "")
129
+ for key in expected_headers
130
+ ):
131
+ continue
132
+ # Convert index_id to integer
133
+ index_id = int(row["index_id"])
134
+ # Strip leading/trailing whitespace
135
+ category = row["category"].strip()
136
+ text = row["text"].strip()
137
+
138
+ # Append the processed row to data
139
+ data.append({"index_id": index_id, "category": category, "text": text})
140
+
141
+ return data
142
+
143
+
144
+ def read_lang_tsv(filepaths: List[str]) -> List[Dict[str, Any]]:
145
+ """
146
+ Reads a list of TSV file paths containing SIB data in the same language
147
+ and merges them into a single, sorted list of dictionaries.
148
+
149
+ Specifically:
150
+ 1. Calls `read_tsv_to_dict_list` for each file path.
151
+ 2. Merges all resulting dictionaries.
152
+ 3. Sorts by 'index_id'.
153
+
154
+ Also normalizes the category "science/technology" to "science" for internal consistency.
155
+
156
+ Parameters
157
+ ----------
158
+ filepaths : List[str]
159
+ A list of TSV file paths for a specific language.
160
+
161
+ Returns
162
+ -------
163
+ List[Dict[str, Any]]
164
+ A list of dictionaries sorted by 'index_id' with normalized categories.
165
+ """
166
+ # Read each file into a list of dicts
167
+ dicos = [read_tsv_to_dict_list(path) for path in filepaths]
168
+ # Flatten and sort by index_id
169
+ out: List[Dict[str, Any]] = sorted(
170
+ [line for dico in dicos for line in dico], key=lambda row: row["index_id"]
171
+ )
172
+ # Normalize "science/technology" to "science"
173
+ for line in out:
174
+ if line["category"] == "science/technology":
175
+ line["category"] = "science"
176
+ return out
177
+
178
+
179
+ def replicate_and_negatives(
180
+ df: pd.DataFrame,
181
+ num_replicates: int = 3,
182
+ num_negatives: int = 4,
183
+ num_positives: int = 4,
184
+ seed: int = 42,
185
+ ) -> pd.DataFrame:
186
+ """
187
+ Create multiple replicated rows from the input DataFrame `df` and
188
+ sample negative and positive examples for each row.
189
+
190
+ *Negative* samples are drawn from rows whose category is different
191
+ from the row's category. **Additionally, each negative example for
192
+ a given row is drawn from a distinct category among the negatives,
193
+ if there are enough categories to do so without replacement.**
194
+
195
+ *Positive* samples are drawn from rows of the same category (excluding
196
+ the row's own 'index_id').
197
+
198
+ Parameters
199
+ ----------
200
+ df : pd.DataFrame
201
+ The original input DataFrame with columns ['index_id', 'category', 'text'].
202
+ num_replicates : int, optional
203
+ Number of times to replicate each row, by default 2.
204
+ num_negatives : int, optional
205
+ Number of negative samples to pick for each row, by default 2.
206
+ num_positives : int, optional
207
+ Number of positive samples to pick for each row, by default 2.
208
+ seed : int, optional
209
+ Seed for random operations, by default 42.
210
+
211
+ Returns
212
+ -------
213
+ pd.DataFrame
214
+ A new DataFrame containing replicated rows plus columns:
215
+ - neg_id_i, neg_cat_i, neg_text_i for i in [0 .. num_negatives-1]
216
+ - pos_id_i, pos_cat_i, pos_text_i for i in [0 .. num_positives-1]
217
+
218
+ Notes
219
+ -----
220
+ - Negative examples for a row are taken from distinct categories
221
+ (other than the row's category) if enough categories exist. If
222
+ fewer categories exist than `num_negatives`, we sample categories
223
+ with replacement, so some duplicates may appear.
224
+ - Positive sampling excludes the row's own 'index_id'.
225
+ If there are fewer available positives than `num_positives`,
226
+ we sample with replacement.
227
+ """
228
+
229
+ rng = np.random.default_rng(seed=seed)
230
+
231
+ # --- 1) Replicate the DataFrame k (=num_replicates) times ---
232
+ df_new = pd.concat([df] * num_replicates, ignore_index=True)
233
+
234
+ # --- 2) Create empty columns for negative and positive samples ---
235
+ for i in range(num_negatives):
236
+ df_new[f"neg_id_{i}"] = None
237
+ df_new[f"neg_cat_{i}"] = None
238
+ df_new[f"neg_text_{i}"] = None
239
+
240
+ for i in range(num_positives):
241
+ df_new[f"pos_id_{i}"] = None
242
+ df_new[f"pos_cat_{i}"] = None
243
+ df_new[f"pos_text_{i}"] = None
244
+
245
+ # --- Precompute a dictionary of all rows by category (for negatives sampling) ---
246
+ # Key: category -> DataFrame of that category
247
+ unique_cats = df_new["category"].unique()
248
+ cat_to_df: Dict[str, pd.DataFrame] = {}
249
+ for c in unique_cats:
250
+ cat_to_df[c] = df_new[df_new["category"] == c].reset_index(drop=True)
251
+
252
+ # --- 4) Build a "positive pool" dictionary by category ---
253
+ # For positive sampling, we exclude the row's own 'index_id' in each row's step
254
+ pos_pool_by_cat = {}
255
+ for c in unique_cats:
256
+ pos_pool_by_cat[c] = df.loc[
257
+ df["category"] == c, ["index_id", "category", "text"]
258
+ ].reset_index(drop=True)
259
+
260
+ # --- 5) Group df_new by category and populate negative/positive samples ---
261
+ grouped = df_new.groupby("category", group_keys=False)
262
+ output_chunks: List[pd.DataFrame] = []
263
+
264
+ for cat, group_df in grouped:
265
+ g_size = len(group_df)
266
+
267
+ # The preallocated arrays for negative and positive columns will be filled for each row individually, i.e., sampling of negative categories and samples will be done per row
268
+ # Prepare arrays for final negative columns
269
+ neg_id_cols = [np.empty(g_size, dtype=object) for _ in range(num_negatives)]
270
+ neg_cat_cols = [np.empty(g_size, dtype=object) for _ in range(num_negatives)]
271
+ neg_text_cols = [np.empty(g_size, dtype=object) for _ in range(num_negatives)]
272
+
273
+ # Prepare arrays for final positive columns
274
+ pos_id_cols = [np.empty(g_size, dtype=object) for _ in range(num_positives)]
275
+ pos_cat_cols = [np.empty(g_size, dtype=object) for _ in range(num_positives)]
276
+ pos_text_cols = [np.empty(g_size, dtype=object) for _ in range(num_positives)]
277
+
278
+ # For convenience, get all categories *except* the current one (cat)
279
+ # We'll sample from these as negative categories
280
+ negative_candidate_cats = [c for c in unique_cats if c != cat]
281
+
282
+ # For each row in the current group
283
+ row_ids_for_group = group_df["index_id"].to_numpy()
284
+ for i_row in range(g_size):
285
+ row_id = row_ids_for_group[i_row]
286
+
287
+ # ------------- Negative Sampling -------------
288
+ # 1) Choose distinct categories if possible. If not enough categories
289
+ # exist to cover num_negatives, we sample categories with replacement.
290
+ replace_for_cats = len(negative_candidate_cats) < num_negatives
291
+ chosen_neg_cats = rng.choice(
292
+ negative_candidate_cats, size=num_negatives, replace=replace_for_cats
293
+ )
294
+
295
+ # 2) For each chosen negative category, pick a random row
296
+ for j, neg_cat in enumerate(chosen_neg_cats):
297
+ neg_pool = cat_to_df[neg_cat]
298
+ pick_idx = rng.integers(len(neg_pool)) # random index
299
+ neg_id_cols[j][i_row] = neg_pool["index_id"].iloc[pick_idx]
300
+ neg_cat_cols[j][i_row] = neg_pool["category"].iloc[pick_idx]
301
+ neg_text_cols[j][i_row] = neg_pool["text"].iloc[pick_idx]
302
+
303
+ # ------------- Positive Sampling -------------
304
+ pos_pool_cat = pos_pool_by_cat[cat]
305
+ # Exclude the row's own ID in the sampling
306
+ valid_mask = pos_pool_cat["index_id"] != row_id
307
+ valid_pos_pool = pos_pool_cat[valid_mask]
308
+ # If not enough positives remain, sample with replacement
309
+ replace_pos_for_row = len(valid_pos_pool) < num_positives
310
+
311
+ if len(valid_pos_pool) == 0:
312
+ # Edge case: if there's literally no other row of the same category,
313
+ # we won't be able to sample. You could decide to fill with NaN
314
+ # or replicate the single example. Here we do the "safe" approach
315
+ # of sampling from the entire cat's pool if possible.
316
+ valid_pos_pool = pos_pool_cat
317
+ replace_pos_for_row = True
318
+
319
+ valid_idx_array = valid_pos_pool.index.to_numpy()
320
+ chosen_indices = rng.choice(
321
+ valid_idx_array, size=num_positives, replace=replace_pos_for_row
322
+ )
323
+ for j in range(num_positives):
324
+ pick_idx = chosen_indices[j]
325
+ pos_id_cols[j][i_row] = valid_pos_pool["index_id"].loc[pick_idx]
326
+ pos_cat_cols[j][i_row] = valid_pos_pool["category"].loc[pick_idx]
327
+ pos_text_cols[j][i_row] = valid_pos_pool["text"].loc[pick_idx]
328
+
329
+ # Attach negative columns to group_df
330
+ for j in range(num_negatives):
331
+ group_df[f"neg_id_{j}"] = neg_id_cols[j]
332
+ group_df[f"neg_cat_{j}"] = neg_cat_cols[j]
333
+ group_df[f"neg_text_{j}"] = neg_text_cols[j]
334
+
335
+ # Attach positive columns to group_df
336
+ for j in range(num_positives):
337
+ group_df[f"pos_id_{j}"] = pos_id_cols[j]
338
+ group_df[f"pos_cat_{j}"] = pos_cat_cols[j]
339
+ group_df[f"pos_text_{j}"] = pos_text_cols[j]
340
+
341
+ output_chunks.append(group_df)
342
+
343
+ # --- 6) Combine all chunks and restore index order ---
344
+ df_out = pd.concat(output_chunks, axis=0)
345
+ df_out.sort_index(inplace=True)
346
+ return df_out
347
+
348
+
349
+ def get_reference_image_ids(
350
+ N: int, num_images: int, k: int, seed: int
351
+ ) -> List[List[int]]:
352
+ """
353
+ Generates reference image ID combinations for each row in a dataset of size N.
354
+
355
+ We pick (k)-combinations from the range [1 .. num_images-1]. Then we sample
356
+ from these combinations (with replacement) for each of N rows, and shuffle them
357
+ in a reproducible manner.
358
+
359
+ Parameters
360
+ ----------
361
+ N : int
362
+ Number of rows in the dataset.
363
+ num_images : int
364
+ Total number of images available per category.
365
+ k : int
366
+ Number of images to select in each combination.
367
+ seed : int
368
+ Global seed for random operations.
369
+
370
+ Returns
371
+ -------
372
+ List[List[int]]
373
+ A list of length N, where each element is a list of k unique image IDs.
374
+
375
+ Notes
376
+ -----
377
+ - We use Python's `random.choices` to draw from all possible k-combinations.
378
+ - Each combination is then locally shuffled to remove ordering biases.
379
+ """
380
+ all_combinations = list(combinations(range(0, num_images), k))
381
+ random.seed(seed)
382
+ sampled_combinations = [list(x) for x in random.choices(all_combinations, k=N)]
383
+
384
+ for i, tuple_ in enumerate(sampled_combinations):
385
+ # Use a unique seed for each shuffle to ensure reproducibility
386
+ random.seed(seed + i)
387
+ random.shuffle(tuple_)
388
+ return sampled_combinations
389
+
390
+
391
+ class MVLSIBConfig(datasets.BuilderConfig):
392
+ """
393
+ Configuration class for the MVLSIB (Multilingual Visual Language SIB) dataset.
394
+
395
+ Parameters
396
+ ----------
397
+ name : str
398
+ The configuration name, typically in the format "task.lang".
399
+ upsampling_factor : int, optional
400
+ How many times to replicate each row for additional sampling variety, default: 3.
401
+ num_references : int, optional
402
+ Number of positive references to sample for each row, default: 5.
403
+ num_negatives : int, optional
404
+ Number of negative samples to pair with each row, default: 3.
405
+ seed : int, optional
406
+ Seed for random operations, default: 42.
407
+ """
408
+
409
+ def __init__(
410
+ self,
411
+ name: str,
412
+ upsampling_factor: int = UPSAMPLING_FACTOR,
413
+ num_references: int = NUM_REFERENCES,
414
+ num_negatives: int = NUM_NEGATIVES,
415
+ seed: int = SEED,
416
+ **kwargs: Any,
417
+ ):
418
+ super(MVLSIBConfig, self).__init__(**kwargs)
419
+ self.name: str = name
420
+ self.task, self.lang = name.split(".")
421
+ self.upsampling_factor: int = upsampling_factor
422
+ self.num_references: int = num_references
423
+ self.num_negatives: int = num_negatives
424
+ self.seed: int = seed
425
+
426
+
427
+ def _builder_configs() -> List[MVLSIBConfig]:
428
+ """
429
+ Internal helper to build the list of MVLSIBConfig objects
430
+ for all tasks ('img2sent', 'sent2img') and all available languages in LANGS.
431
+
432
+ Returns
433
+ -------
434
+ List[MVLSIBConfig]
435
+ A list of dataset configuration objects, each specifying a (task, language) pair.
436
+ """
437
+ configs: List[MVLSIBConfig] = []
438
+ for task in ("img2sent", "sent2img"):
439
+ for lang in LANGS:
440
+ cfg = MVLSIBConfig(
441
+ name=f"{task}.{lang}",
442
+ version=datasets.Version("1.0.0"),
443
+ description=f"MVLSIB: {task}.{lang}",
444
+ )
445
+ configs.append(cfg)
446
+ return configs
447
+
448
+
449
+ class MVLSIB(datasets.GeneratorBasedBuilder):
450
+ """
451
+ MVLSIB is a multilingual dataset that provides matched
452
+ (sentence -> image) or (image -> sentence) examples for
453
+ classification or retrieval tasks.
454
+
455
+ Each configuration is specified by a task (img2sent or sent2img)
456
+ and a language code, e.g. 'img2sent.eng_Latn'.
457
+
458
+ The dataset is structured such that each row includes:
459
+ - A set of reference items (images or sentences, depending on the task).
460
+ - A set of 4 possible answers (1 positive, 3 negative).
461
+ - A label indicating which of the 4 answers is correct.
462
+ """
463
+
464
+ BUILDER_CONFIGS = _builder_configs()
465
+ BUILDER_CONFIG_CLASS = MVLSIBConfig
466
+
467
+ def _info(self) -> datasets.DatasetInfo:
468
+ """
469
+ Returns the dataset metadata, including features.
470
+
471
+ The dataset has two major tasks:
472
+ - 'img2sent': Given reference images, choose the best matching sentence.
473
+ - 'sent2img': Given reference sentences, choose the best matching image.
474
+
475
+ Each example row in 'img2sent' includes:
476
+ - images (list of str URLs to images)
477
+ - sentences (list of str, one positive, three negatives)
478
+ - categories (list of str categories matching each sentence)
479
+ - label (int specifying which of the sentences is correct)
480
+ - id (an integer ID)
481
+ - index_id (the original row ID from the SIB .tsv)
482
+
483
+ Each example row in 'sent2img' includes:
484
+ - sentences (list of str, the positive reference sentences)
485
+ - images (list of str URLs to images, one positive, three negatives)
486
+ - categories (list of str categories matching each image)
487
+ - label (int specifying which of the images is correct)
488
+ - id (an integer ID)
489
+ - index_id (the original row ID from the SIB .tsv)
490
+
491
+ Returns
492
+ -------
493
+ datasets.DatasetInfo
494
+ The Hugging Face DatasetInfo object describing the dataset features,
495
+ licensing, homepage, citation, etc.
496
+ """
497
+ from datasets import DatasetInfo, Features, Sequence, Value
498
+
499
+ img2sents = Features(
500
+ {
501
+ "images": Sequence(Value("string")),
502
+ "sentences": Sequence(Value("string")),
503
+ "categories": Sequence(Value("string")),
504
+ "label": Value("int8"),
505
+ "id": Value("int64"),
506
+ "index_id": Value("int64"),
507
+ }
508
+ )
509
+ sent2imgs = Features(
510
+ {
511
+ "sentences": Sequence(Value("string")),
512
+ "images": Sequence(Value("string")),
513
+ "categories": Sequence(Value("string")),
514
+ "label": Value("int8"),
515
+ "id": Value("int64"),
516
+ "index_id": Value("int64"),
517
+ }
518
+ )
519
+
520
+ features = {
521
+ "img2sent": img2sents,
522
+ "sent2img": sent2imgs,
523
+ }
524
+
525
+ return DatasetInfo(
526
+ description=_DESCRIPTION,
527
+ features=features[self.config.task],
528
+ supervised_keys=None,
529
+ )
530
+
531
+ def _split_generators(
532
+ self, dl_manager: datasets.DownloadManager, *args: Any, **kwargs: Any
533
+ ) -> List[datasets.SplitGenerator]:
534
+ """
535
+ Defines the splits of the dataset. In this case, we only produce a single 'test' split,
536
+ but in principle, you can define train/dev/test or others.
537
+
538
+ Parameters
539
+ ----------
540
+ dl_manager : datasets.DownloadManager
541
+ The Hugging Face DownloadManager used to download files.
542
+
543
+ Returns
544
+ -------
545
+ List[datasets.SplitGenerator]
546
+ A list of SplitGenerator objects. Each defines a split name
547
+ and a gen_kwargs dict for the `_generate_examples` method.
548
+ """
549
+ # Download SIB tsv files for train, dev, and test
550
+ files = dl_manager.download(
551
+ [
552
+ _SIB_URL.format(lang=self.config.lang, split=split)
553
+ for split in ("train", "dev", "test")
554
+ ]
555
+ )
556
+ # Download images for each category
557
+ images: Dict[str, List[str]] = {}
558
+ for cat in CATEGORIES:
559
+ images[cat] = []
560
+ for i in range(10):
561
+ images[cat].append(
562
+ dl_manager.download(_IMG_URL.format(category=cat, no=i))
563
+ )
564
+
565
+ return [
566
+ datasets.SplitGenerator(
567
+ name="test",
568
+ gen_kwargs={"sib_filepaths": files, "images_filepaths": images},
569
+ ),
570
+ ]
571
+
572
+ def _generate_examples(
573
+ self,
574
+ sib_filepaths: List[str],
575
+ images_filepaths: Dict[str, List[str]],
576
+ *args: Any,
577
+ **kwargs: Any,
578
+ ) -> Any:
579
+ """
580
+ Generator function that yields dataset examples in the format needed by
581
+ Hugging Face Datasets.
582
+
583
+ Depending on the task (img2sent or sent2img), the function constructs examples where:
584
+ - img2sent: reference images, 4 candidate sentences (1 positive, 3 negative)
585
+ - sent2img: reference sentences, 4 candidate images (1 positive, 3 negative)
586
+
587
+ Parameters
588
+ ----------
589
+ sib_filepaths : List[str]
590
+ The downloaded .tsv file paths (train/dev/test) for the specified language.
591
+ images_filepaths : Dict[str, List[str]]
592
+ A dictionary from category -> list of 10 image URLs, as downloaded from `_split_generators`.
593
+
594
+ Yields
595
+ ------
596
+ Tuple[int, Dict[str, Any]]
597
+ A tuple where the first element is an integer index,
598
+ and the second is a dictionary matching the features specification
599
+ of the dataset.
600
+ """
601
+ # Read the SIB .tsv files for the given language and combine into a single DataFrame
602
+ records = read_lang_tsv(sib_filepaths)
603
+ df = pd.DataFrame.from_records(records)
604
+
605
+ # Expand the dataset with negative and positive samples
606
+ ext_df = replicate_and_negatives(
607
+ df,
608
+ num_replicates=self.config.upsampling_factor,
609
+ num_negatives=self.config.num_negatives,
610
+ # every line already has a positive
611
+ num_positives=self.config.num_references - 1,
612
+ seed=self.config.seed,
613
+ )
614
+
615
+ sent_ids = list(range(self.config.num_negatives + 1))
616
+ N = len(ext_df)
617
+ num_images = len(next(iter(images_filepaths.values()))) # e.g., 10 images/cat
618
+
619
+ if self.config.task == "img2sent":
620
+ # Pre-generate image ID combinations for each row
621
+ image_ids = get_reference_image_ids(
622
+ N=N,
623
+ num_images=num_images,
624
+ k=self.config.num_references,
625
+ seed=self.config.seed,
626
+ )
627
+ for i, row in ext_df.iterrows():
628
+ # Construct the list of candidate sentences (pos + neg)
629
+ text = [row["text"]]
630
+ categories = [row["category"]]
631
+ for j in range(self.config.num_negatives):
632
+ text.append(row[f"neg_text_{j}"])
633
+ categories.append(row[f"neg_cat_{j}"])
634
+
635
+ # Shuffle candidate sentences in a reproducible manner
636
+ random.seed(i)
637
+ random.shuffle(sent_ids)
638
+ label = sent_ids[0]
639
+
640
+ # Reorder sentences and categories according to the shuffled indices
641
+ _, categories_shuffled = zip(*sorted(zip(sent_ids, categories)))
642
+ _, sentences_shuffled = zip(*sorted(zip(sent_ids, text)))
643
+
644
+ # Fetch the reference images for the row
645
+ row_image_ids = image_ids[i]
646
+ cat = row["category"]
647
+ cat_images = images_filepaths[cat]
648
+ row_images = [
649
+ cat_images[row_image_ids[j]]
650
+ for j in range(self.config.num_references)
651
+ ]
652
+
653
+ yield (
654
+ i,
655
+ {
656
+ "id": i,
657
+ "index_id": row["index_id"],
658
+ "images": row_images,
659
+ "categories": categories_shuffled,
660
+ "sentences": sentences_shuffled,
661
+ "label": label,
662
+ },
663
+ )
664
+ else:
665
+ # sent2img: We first sample image indices (pos + neg) for each row
666
+ rng = np.random.default_rng(seed=self.config.seed)
667
+ choice_image_ids = rng.integers(
668
+ 0, num_images, (N, 1 + self.config.num_negatives)
669
+ ).tolist()
670
+
671
+ for i, row in ext_df.iterrows():
672
+ # The positive text
673
+ pos_text = [row["text"]]
674
+ # For the negative categories, we gather them similarly
675
+ cats = [row["category"]]
676
+ for j in range(self.config.num_negatives):
677
+ cats.append(row[f"neg_cat_{j}"])
678
+ for j in range(self.config.num_references - 1):
679
+ pos_text.append(row[f"pos_text_{j}"])
680
+
681
+ random.seed(i)
682
+ random.shuffle(sent_ids)
683
+ label = sent_ids[0]
684
+
685
+ # Reorder categories based on the shuffled indices
686
+ # NOTE: positive text is quasi-shuffled already
687
+ _, categories_shuffled = zip(*sorted(zip(sent_ids, cats)))
688
+
689
+ # Match the categories to the sampled image indices
690
+ row_image_ids = choice_image_ids[i]
691
+ row_images = [
692
+ images_filepaths[cat][idx]
693
+ for idx, cat in zip(row_image_ids, categories_shuffled)
694
+ ]
695
+
696
+ yield (
697
+ i,
698
+ {
699
+ "id": i,
700
+ "index_id": row["index_id"],
701
+ "images": row_images,
702
+ "categories": categories_shuffled,
703
+ "sentences": pos_text,
704
+ "label": label,
705
+ },
706
+ )