davanstrien
HF Staff
Upload batch_classify_arxiv_incremental.py with huggingface_hub
2e7261e
verified
| # /// script | |
| # requires-python = ">=3.10" | |
| # dependencies = [ | |
| # "datasets>=2.0", | |
| # "huggingface-hub[hf_transfer]>=0.20", | |
| # "polars>=1.0", | |
| # "torch>=2.0", | |
| # "transformers>=4.40", | |
| # "tokenizers>=0.19", | |
| # "toolz", | |
| # "tqdm", | |
| # "pyarrow>=15.0", | |
| # "vllm", | |
| # ] | |
| # | |
| # [[tool.uv.index]] | |
| # url = "https://wheels.vllm.ai/nightly" | |
| # /// | |
| """ | |
| Incremental batch text classification for ArXiv papers. | |
| This script processes new papers from the arxiv-metadata-snapshot dataset | |
| and updates the existing classified dataset. It only processes papers newer | |
| than the last classification run, making it efficient for daily updates. | |
| Example usage: | |
| # Daily incremental update (only new papers) | |
| uv run batch_classify_arxiv_incremental.py | |
| # Monthly full refresh (reprocess everything) | |
| uv run batch_classify_arxiv_incremental.py --full-refresh | |
| # Test with small sample | |
| uv run batch_classify_arxiv_incremental.py --limit 100 | |
| """ | |
| import argparse | |
| import json | |
| import logging | |
| import os | |
| import shutil | |
| import sys | |
| import tempfile | |
| from datetime import datetime | |
| from pathlib import Path | |
| from typing import Dict, List, Optional, Tuple | |
| import polars as pl | |
| import torch | |
| from datasets import Dataset, load_dataset | |
| from huggingface_hub import HfFolder, login | |
| from toolz import partition_all | |
| from tqdm.auto import tqdm | |
| from transformers import pipeline | |
| # Try to import vLLM - it may not be available in all environments | |
| try: | |
| import vllm | |
| from vllm import LLM | |
| VLLM_AVAILABLE = True | |
| except ImportError: | |
| VLLM_AVAILABLE = False | |
| logging.basicConfig( | |
| level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" | |
| ) | |
| logger = logging.getLogger(__name__) | |
| # Constants | |
| DEFAULT_OUTPUT_DATASET = "davanstrien/my-classified-papers" | |
| DEFAULT_INPUT_DATASET = "librarian-bots/arxiv-metadata-snapshot" | |
| DEFAULT_MODEL = "davanstrien/ModernBERT-base-is-new-arxiv-dataset" | |
| def check_backend() -> Tuple[str, int]: | |
| """ | |
| Check available backend and return (backend_name, recommended_batch_size). | |
| Returns: | |
| Tuple of (backend_name, batch_size) where backend is 'vllm', 'cuda', 'mps', or 'cpu' | |
| """ | |
| if torch.cuda.is_available() and VLLM_AVAILABLE: | |
| gpu_name = torch.cuda.get_device_name(0) | |
| gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3 | |
| logger.info(f"GPU detected: {gpu_name} with {gpu_memory:.1f} GB memory") | |
| logger.info(f"vLLM version: {vllm.__version__}") | |
| return "vllm", 500_000 # Larger batches for A100 | |
| elif torch.cuda.is_available(): | |
| logger.info("CUDA available but vLLM not installed. Using transformers with GPU.") | |
| return "cuda", 256 # Smaller batch for transformers to avoid OOM | |
| elif torch.backends.mps.is_available(): | |
| logger.info("Using Apple Silicon MPS device with transformers") | |
| return "mps", 1_000 | |
| else: | |
| logger.info("Using CPU device with transformers") | |
| return "cpu", 100 | |
| def get_last_update_date(output_dataset: str, hf_token: Optional[str] = None) -> Optional[str]: | |
| """ | |
| Get the maximum update_date from the existing classified dataset. | |
| Args: | |
| output_dataset: HuggingFace dataset ID | |
| hf_token: Optional HuggingFace token | |
| Returns: | |
| ISO format date string of the last update, or None if dataset doesn't exist | |
| """ | |
| try: | |
| logger.info(f"Checking for existing dataset: {output_dataset}") | |
| # Try to load dataset metadata | |
| from huggingface_hub import hf_hub_download, list_repo_files | |
| # Check if dataset exists | |
| try: | |
| files = list_repo_files(output_dataset, repo_type="dataset", token=hf_token) | |
| parquet_files = [f for f in files if f.endswith('.parquet')] | |
| if not parquet_files: | |
| logger.info("No parquet files found in existing dataset") | |
| return None | |
| except Exception as e: | |
| logger.info(f"Dataset {output_dataset} not found or inaccessible: {e}") | |
| return None | |
| # Download and scan parquet files to find max update_date | |
| temp_dir = Path(tempfile.mkdtemp(prefix="arxiv_incremental_check_")) | |
| try: | |
| from huggingface_hub import snapshot_download | |
| local_dir = snapshot_download( | |
| output_dataset, | |
| local_dir=str(temp_dir), | |
| allow_patterns=["*.parquet"], | |
| repo_type="dataset", | |
| token=hf_token | |
| ) | |
| # Use Polars to efficiently find max update_date | |
| lf = pl.scan_parquet(Path(local_dir).rglob("*.parquet")) | |
| max_date_df = lf.select(pl.col("update_date").max()).collect() | |
| if max_date_df.height > 0 and max_date_df.width > 0: | |
| max_date = max_date_df[0, 0] | |
| logger.info(f"Found last update date in existing dataset: {max_date}") | |
| return max_date | |
| else: | |
| logger.info("No update_date found in existing dataset") | |
| return None | |
| finally: | |
| # Cleanup temp directory | |
| if temp_dir.exists(): | |
| shutil.rmtree(temp_dir) | |
| except Exception as e: | |
| logger.warning(f"Error checking existing dataset: {e}") | |
| return None | |
| def prepare_incremental_data( | |
| input_dataset: str, | |
| temp_dir: Path, | |
| last_update_date: Optional[str] = None, | |
| limit: Optional[int] = None, | |
| full_refresh: bool = False | |
| ) -> Optional[Path]: | |
| """ | |
| Prepare data for incremental classification. | |
| Args: | |
| input_dataset: Source dataset ID | |
| temp_dir: Directory for temporary files | |
| last_update_date: Date of last classification run | |
| limit: Optional limit for testing | |
| full_refresh: If True, process all papers regardless of date | |
| Returns: | |
| Path to filtered parquet file, or None if no new papers | |
| """ | |
| output_path = temp_dir / "papers_to_classify.parquet" | |
| logger.info(f"Loading source dataset: {input_dataset}") | |
| # Download dataset | |
| from huggingface_hub import snapshot_download | |
| local_dir = temp_dir / "raw_data" | |
| snapshot_download( | |
| input_dataset, | |
| local_dir=str(local_dir), | |
| allow_patterns=["*.parquet"], | |
| repo_type="dataset", | |
| ) | |
| parquet_files = list(local_dir.rglob("*.parquet")) | |
| logger.info(f"Found {len(parquet_files)} parquet files") | |
| # Create lazy frame | |
| lf = pl.scan_parquet(parquet_files) | |
| # Filter to CS papers | |
| logger.info("Filtering to CS papers...") | |
| lf_cs = lf.filter(pl.col("categories").str.contains("cs.")) | |
| # Apply incremental filter if not full refresh | |
| if not full_refresh and last_update_date: | |
| logger.info(f"Filtering for papers newer than {last_update_date}") | |
| lf_cs = lf_cs.filter(pl.col("update_date") > last_update_date) | |
| elif full_refresh: | |
| logger.info("Full refresh mode - processing all CS papers") | |
| else: | |
| logger.info("No existing dataset found - processing all CS papers") | |
| # Apply limit if specified (for testing) | |
| if limit: | |
| logger.info(f"Limiting to {limit} papers for testing") | |
| lf_cs = lf_cs.head(limit) | |
| # Add formatted text column | |
| logger.info("Formatting text for classification...") | |
| lf_formatted = lf_cs.with_columns( | |
| pl.concat_str([ | |
| pl.lit("TITLE: "), | |
| pl.col("title"), | |
| pl.lit(" \n\nABSTRACT: "), | |
| pl.col("abstract") | |
| ]).alias("text_for_classification") | |
| ) | |
| # Collect to check if we have any papers to process | |
| df_to_classify = lf_formatted.collect(streaming=True) | |
| if df_to_classify.height == 0: | |
| logger.info("No new papers to classify") | |
| return None | |
| logger.info(f"Found {df_to_classify.height:,} papers to classify") | |
| # Write to parquet | |
| df_to_classify.write_parquet(output_path) | |
| return output_path | |
| def classify_with_vllm( | |
| dataset: Dataset, | |
| model_id: str, | |
| batch_size: int = 100_000 | |
| ) -> List[Dict]: | |
| """ | |
| Classify papers using vLLM for efficient GPU inference. | |
| """ | |
| logger.info(f"Initializing vLLM with model: {model_id}") | |
| llm = LLM(model=model_id, runner="pooling") | |
| texts = dataset["text_for_classification"] | |
| total_papers = len(texts) | |
| logger.info(f"Starting vLLM classification of {total_papers:,} papers") | |
| all_results = [] | |
| for batch in tqdm( | |
| list(partition_all(batch_size, texts)), | |
| desc="Processing batches", | |
| unit="batch" | |
| ): | |
| batch_results = llm.classify(batch) | |
| for result in batch_results: | |
| logits = torch.tensor(result.outputs.probs) | |
| probs = torch.nn.functional.softmax(logits, dim=0) | |
| top_idx = torch.argmax(probs).item() | |
| top_prob = probs[top_idx].item() | |
| # Model config: 0 -> new_dataset, 1 -> no_new_dataset | |
| label = "new_dataset" if top_idx == 0 else "no_new_dataset" | |
| all_results.append({ | |
| "classification_label": label, | |
| "is_new_dataset": label == "new_dataset", | |
| "confidence_score": float(top_prob) | |
| }) | |
| return all_results | |
| def classify_with_transformers( | |
| dataset: Dataset, | |
| model_id: str, | |
| batch_size: int = 1_000, | |
| device: str = "cpu" | |
| ) -> List[Dict]: | |
| """ | |
| Classify papers using transformers pipeline. | |
| """ | |
| logger.info(f"Initializing transformers pipeline with model: {model_id}") | |
| if device == "cuda": | |
| device_map = 0 | |
| elif device == "mps": | |
| device_map = "mps" | |
| else: | |
| device_map = None | |
| pipe = pipeline( | |
| "text-classification", | |
| model=model_id, | |
| device=device_map, | |
| batch_size=batch_size | |
| ) | |
| texts = dataset["text_for_classification"] | |
| total_papers = len(texts) | |
| logger.info(f"Starting transformers classification of {total_papers:,} papers") | |
| all_results = [] | |
| with tqdm(total=total_papers, desc="Classifying papers", unit="papers") as pbar: | |
| for batch in partition_all(batch_size, texts): | |
| batch_list = list(batch) | |
| predictions = pipe(batch_list) | |
| for pred in predictions: | |
| label = pred["label"] | |
| all_results.append({ | |
| "classification_label": label, | |
| "is_new_dataset": label == "new_dataset", | |
| "confidence_score": float(pred["score"]) | |
| }) | |
| pbar.update(len(batch_list)) | |
| return all_results | |
| def merge_with_existing( | |
| new_dataset: Dataset, | |
| output_dataset: str, | |
| temp_dir: Path, | |
| hf_token: Optional[str] = None | |
| ) -> Dataset: | |
| """ | |
| Merge newly classified papers with existing dataset. | |
| Args: | |
| new_dataset: Newly classified papers | |
| output_dataset: Target dataset ID | |
| temp_dir: Temporary directory | |
| hf_token: HuggingFace token | |
| Returns: | |
| Merged dataset | |
| """ | |
| try: | |
| logger.info(f"Loading existing dataset from {output_dataset}") | |
| # Download existing dataset | |
| from huggingface_hub import snapshot_download | |
| existing_dir = temp_dir / "existing_data" | |
| snapshot_download( | |
| output_dataset, | |
| local_dir=str(existing_dir), | |
| allow_patterns=["*.parquet"], | |
| repo_type="dataset", | |
| token=hf_token | |
| ) | |
| # Load with Polars for efficient merging | |
| existing_files = list(existing_dir.rglob("*.parquet")) | |
| if existing_files: | |
| # Convert new dataset to Polars | |
| new_df = pl.from_arrow(new_dataset.data.table) | |
| # Load existing data | |
| existing_df = pl.read_parquet(existing_files) | |
| # Combine datasets | |
| logger.info(f"Merging {new_df.height:,} new papers with {existing_df.height:,} existing papers") | |
| combined_df = pl.concat([existing_df, new_df], how="vertical") | |
| # Deduplicate by paper ID, keeping the most recent | |
| logger.info("Deduplicating by paper ID...") | |
| final_df = combined_df.unique(subset=["id"], keep="last") | |
| logger.info(f"Final dataset has {final_df.height:,} papers after deduplication") | |
| # Convert back to HuggingFace Dataset | |
| final_dataset = Dataset.from_pandas(final_df.to_pandas()) | |
| return final_dataset | |
| else: | |
| logger.info("No existing data found, returning new dataset") | |
| return new_dataset | |
| except Exception as e: | |
| logger.warning(f"Could not load existing dataset: {e}") | |
| logger.info("Returning new dataset only") | |
| return new_dataset | |
| def main( | |
| input_dataset: str = DEFAULT_INPUT_DATASET, | |
| output_dataset: str = DEFAULT_OUTPUT_DATASET, | |
| model_id: str = DEFAULT_MODEL, | |
| batch_size: Optional[int] = None, | |
| limit: Optional[int] = None, | |
| full_refresh: bool = False, | |
| temp_dir: Optional[str] = None, | |
| hf_token: Optional[str] = None | |
| ): | |
| """ | |
| Main incremental classification pipeline. | |
| """ | |
| # Authentication | |
| HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") | |
| if HF_TOKEN: | |
| login(token=HF_TOKEN) | |
| else: | |
| logger.warning("No HF_TOKEN found. You may need to login for private datasets.") | |
| # Setup temp directory | |
| if temp_dir: | |
| temp_path = Path(temp_dir) | |
| temp_path.mkdir(parents=True, exist_ok=True) | |
| else: | |
| temp_path = Path(tempfile.mkdtemp(prefix="arxiv_incremental_")) | |
| logger.info(f"Using temp directory: {temp_path}") | |
| # Check backend and set batch size | |
| backend, default_batch_size = check_backend() | |
| if batch_size is None: | |
| batch_size = default_batch_size | |
| logger.info(f"Using batch size: {batch_size:,}") | |
| # Step 1: Check for existing dataset and get last update date | |
| last_update_date = None | |
| if not full_refresh: | |
| last_update_date = get_last_update_date(output_dataset, HF_TOKEN) | |
| if last_update_date: | |
| logger.info(f"Will process papers newer than: {last_update_date}") | |
| else: | |
| logger.info("No existing dataset found - will process all papers") | |
| else: | |
| logger.info("Full refresh mode - will process all papers") | |
| # Step 2: Prepare incremental data | |
| papers_to_classify = prepare_incremental_data( | |
| input_dataset, | |
| temp_path, | |
| last_update_date, | |
| limit, | |
| full_refresh | |
| ) | |
| if papers_to_classify is None: | |
| logger.info("No new papers to classify. Dataset is up to date!") | |
| # Cleanup temp directory | |
| if not temp_dir and temp_path.exists(): | |
| shutil.rmtree(temp_path) | |
| return | |
| # Step 3: Load as HuggingFace Dataset | |
| logger.info("Loading papers to classify as HuggingFace Dataset...") | |
| dataset = load_dataset( | |
| "parquet", | |
| data_files=str(papers_to_classify), | |
| split="train" | |
| ) | |
| logger.info(f"Dataset loaded with {len(dataset):,} papers to classify") | |
| # Step 4: Classify papers | |
| if backend == "vllm": | |
| results = classify_with_vllm(dataset, model_id, batch_size) | |
| else: | |
| results = classify_with_transformers( | |
| dataset, model_id, batch_size, backend | |
| ) | |
| # Step 5: Add results to dataset | |
| logger.info("Adding classification results to dataset...") | |
| dataset = dataset.add_column("classification_label", [r["classification_label"] for r in results]) | |
| dataset = dataset.add_column("is_new_dataset", [r["is_new_dataset"] for r in results]) | |
| dataset = dataset.add_column("confidence_score", [r["confidence_score"] for r in results]) | |
| # Add metadata | |
| dataset = dataset.add_column("classification_date", [datetime.now().isoformat()] * len(dataset)) | |
| dataset = dataset.add_column("model_version", [model_id] * len(dataset)) | |
| # Remove temporary columns and problematic nested columns | |
| columns_to_remove = ["text_for_classification"] | |
| if "versions" in dataset.column_names: | |
| columns_to_remove.append("versions") | |
| if "authors_parsed" in dataset.column_names: | |
| columns_to_remove.append("authors_parsed") | |
| dataset = dataset.remove_columns(columns_to_remove) | |
| # Step 6: Merge with existing dataset (if not full refresh) | |
| if not full_refresh and last_update_date: | |
| dataset = merge_with_existing(dataset, output_dataset, temp_path, HF_TOKEN) | |
| # Step 7: Push to Hub or save locally | |
| if HF_TOKEN: | |
| logger.info(f"Pushing results to: {output_dataset}") | |
| dataset.push_to_hub(output_dataset, token=HF_TOKEN) | |
| else: | |
| local_path = temp_path / "classified_dataset" | |
| logger.info(f"No HF_TOKEN, saving results locally to: {local_path}") | |
| dataset.save_to_disk(str(local_path)) | |
| # Print statistics | |
| num_new_datasets = sum(1 for i in range(len(dataset)) if dataset[i]["is_new_dataset"]) | |
| avg_confidence = sum(dataset[i]["confidence_score"] for i in range(len(dataset))) / len(dataset) | |
| logger.info("="*60) | |
| logger.info("Incremental Classification Complete!") | |
| logger.info(f"Total papers in dataset: {len(dataset):,}") | |
| logger.info(f"Papers with new datasets: {num_new_datasets:,} ({num_new_datasets/len(dataset)*100:.1f}%)") | |
| logger.info(f"Average confidence score: {avg_confidence:.3f}") | |
| logger.info(f"Results saved to: {output_dataset}") | |
| if not full_refresh and last_update_date: | |
| logger.info(f"Processed papers newer than: {last_update_date}") | |
| logger.info("="*60) | |
| # Cleanup temp directory if not explicitly specified | |
| if not temp_dir and temp_path.exists(): | |
| logger.info(f"Cleaning up temp directory: {temp_path}") | |
| shutil.rmtree(temp_path) | |
| if __name__ == "__main__": | |
| parser = argparse.ArgumentParser( | |
| description="Incremental classification of ArXiv papers for new datasets", | |
| formatter_class=argparse.RawDescriptionHelpFormatter, | |
| epilog=""" | |
| Examples: | |
| # Daily incremental update (only new papers) | |
| uv run batch_classify_arxiv_incremental.py | |
| # Monthly full refresh (reprocess everything) | |
| uv run batch_classify_arxiv_incremental.py --full-refresh | |
| # Test with small sample | |
| uv run batch_classify_arxiv_incremental.py --limit 100 | |
| # Custom datasets | |
| uv run batch_classify_arxiv_incremental.py \\ | |
| --input-dataset librarian-bots/arxiv-metadata-snapshot \\ | |
| --output-dataset my-custom-classification | |
| """ | |
| ) | |
| parser.add_argument( | |
| "--input-dataset", | |
| type=str, | |
| default=DEFAULT_INPUT_DATASET, | |
| help=f"Input dataset on HuggingFace Hub (default: {DEFAULT_INPUT_DATASET})" | |
| ) | |
| parser.add_argument( | |
| "--output-dataset", | |
| type=str, | |
| default=DEFAULT_OUTPUT_DATASET, | |
| help=f"Output dataset on HuggingFace Hub (default: {DEFAULT_OUTPUT_DATASET})" | |
| ) | |
| parser.add_argument( | |
| "--model", | |
| type=str, | |
| default=DEFAULT_MODEL, | |
| help=f"Model ID for classification (default: {DEFAULT_MODEL})" | |
| ) | |
| parser.add_argument( | |
| "--batch-size", | |
| type=int, | |
| help="Batch size for inference (auto-detected if not specified)" | |
| ) | |
| parser.add_argument( | |
| "--limit", | |
| type=int, | |
| help="Limit number of papers for testing" | |
| ) | |
| parser.add_argument( | |
| "--full-refresh", | |
| action="store_true", | |
| help="Process all papers regardless of update date (monthly refresh)" | |
| ) | |
| parser.add_argument( | |
| "--temp-dir", | |
| type=str, | |
| help="Directory for temporary files (auto-created if not specified)" | |
| ) | |
| parser.add_argument( | |
| "--hf-token", | |
| type=str, | |
| help="HuggingFace token (can also use HF_TOKEN env var)" | |
| ) | |
| args = parser.parse_args() | |
| main( | |
| input_dataset=args.input_dataset, | |
| output_dataset=args.output_dataset, | |
| model_id=args.model, | |
| batch_size=args.batch_size, | |
| limit=args.limit, | |
| full_refresh=args.full_refresh, | |
| temp_dir=args.temp_dir, | |
| hf_token=args.hf_token | |
| ) |