davanstrien HF Staff commited on
Commit
a00a5c7
·
verified ·
1 Parent(s): 7dde9ce

Upload batch_classify_arxiv_incremental.py with huggingface_hub

Browse files
batch_classify_arxiv_incremental.py CHANGED
@@ -87,7 +87,7 @@ def check_backend() -> Tuple[str, int]:
87
  return "vllm", 100_000
88
  elif torch.cuda.is_available():
89
  logger.info("CUDA available but vLLM not installed. Using transformers with GPU.")
90
- return "cuda", 10_000
91
  elif torch.backends.mps.is_available():
92
  logger.info("Using Apple Silicon MPS device with transformers")
93
  return "mps", 1_000
@@ -253,7 +253,7 @@ def classify_with_vllm(
253
  Classify papers using vLLM for efficient GPU inference.
254
  """
255
  logger.info(f"Initializing vLLM with model: {model_id}")
256
- llm = LLM(model=model_id, task="classify")
257
 
258
  texts = dataset["text_for_classification"]
259
  total_papers = len(texts)
 
87
  return "vllm", 100_000
88
  elif torch.cuda.is_available():
89
  logger.info("CUDA available but vLLM not installed. Using transformers with GPU.")
90
+ return "cuda", 256 # Smaller batch for transformers to avoid OOM
91
  elif torch.backends.mps.is_available():
92
  logger.info("Using Apple Silicon MPS device with transformers")
93
  return "mps", 1_000
 
253
  Classify papers using vLLM for efficient GPU inference.
254
  """
255
  logger.info(f"Initializing vLLM with model: {model_id}")
256
+ llm = LLM(model=model_id, runner="pooling")
257
 
258
  texts = dataset["text_for_classification"]
259
  total_papers = len(texts)