| import argparse |
| import polars as pl |
| import os |
|
|
| MAPPING = {"E": 3, "S": 2, "C": 1, "I": 0} |
| COLUMNS = [ |
| "product_description", |
| "product_bullet_point", |
| "product_brand", |
| "product_color", |
| ] |
|
|
| if __name__ == "__main__": |
| parser = argparse.ArgumentParser() |
| parser.add_argument( |
| "--source", type=str, required=True, help="path to parquet files" |
| ) |
| parser.add_argument("--small", action="store_true", help="select small subset") |
| parser.add_argument("--split", type=str, choices=["train", "test"], required=True) |
| parser.add_argument( |
| "--locale", |
| type=str, |
| default="us", |
| choices=["us", "es", "jp"], |
| help="language to select", |
| ) |
|
|
| args = parser.parse_args() |
|
|
| products = pl.read_parquet( |
| os.path.join(args.source, "shopping_queries_dataset_products.parquet") |
| ) |
|
|
| examples = pl.read_parquet( |
| os.path.join(args.source, "shopping_queries_dataset_examples.parquet") |
| ) |
| merged = examples.join(products, on=pl.col("product_id")) |
|
|
| merged = merged.select(pl.all().exclude("^__index_level_.*$")) |
| merged = merged.with_columns( |
| label=pl.col("esci_label").replace(MAPPING).cast(pl.Int32), |
| id=pl.col("example_id").cast(pl.String), |
| query_id=pl.col("query_id").cast(pl.String), |
| product_id=pl.col("product_id").cast(pl.String), |
| title=pl.col("product_title"), |
| text=pl.concat_str( |
| [pl.lit(f"{col}: ") + pl.col(col).fill_null("") for col in COLUMNS], |
| separator="\n", |
| ), |
| ) |
|
|
| print(f"loaded {len(merged)} source rows") |
| merged = merged.filter(pl.col("split") == args.split) |
| print(f"split filtering done: {len(merged)} rows") |
| merged = merged.filter(pl.col("small_version") == args.small) |
| print(f"size filtering done: {len(merged)} rows") |
| merged = merged.filter(pl.col("product_locale") == args.locale) |
| print(f"locale filtering done: {len(merged)} rows") |
| merged.write_ndjson(f"{args.split}.jsonl") |
|
|