research-document-archive / runpod /02_bertopic_gpu.py
datamatters24's picture
Upload runpod/02_bertopic_gpu.py with huggingface_hub
67abc8b verified
"""
Step 2: BERTopic + UMAP clustering on GPU.
Run this on RUNPOD (2x RTX 5090, 64GB VRAM).
Input: embeddings.npz + doc_metadata.jsonl (from Step 1)
Output: bertopic_results.jsonl (doc_id -> topic assignments + labels)
topic_info.json (topic descriptions)
umap_coords.npz (2D coordinates for visualization)
Install: pip install bertopic cuml-cu12 hdbscan umap-learn plotly
(or: pip install bertopic[all] cuml-cu12)
"""
import json
import time
import numpy as np
# ── Configuration ─────────────────────────────────────────────────────────────
WORKSPACE = "/workspace" # RunPod default
EMBEDDINGS_FILE = f"{WORKSPACE}/embeddings.npz"
METADATA_FILE = f"{WORKSPACE}/doc_metadata.jsonl"
OUTPUT_DIR = WORKSPACE
# BERTopic parameters
MIN_TOPIC_SIZE = 50 # minimum docs per topic
NR_TOPICS = "auto" # let BERTopic decide, or set int like 100
UMAP_N_NEIGHBORS = 15
UMAP_N_COMPONENTS = 5 # internal UMAP dims for clustering
UMAP_MIN_DIST = 0.0
UMAP_METRIC = "cosine"
# Visualization UMAP (separate 2D projection)
VIZ_N_COMPONENTS = 2
VIZ_N_NEIGHBORS = 15
def main():
t_start = time.time()
# ── Load data ─────────────────────────────────────────────────────────────
print("Loading embeddings...")
data = np.load(EMBEDDINGS_FILE)
embeddings = data["embeddings"] # (N, 384)
doc_ids = data["doc_ids"] # (N,)
print(f" Shape: {embeddings.shape}, dtype: {embeddings.dtype}")
print(f" Memory: {embeddings.nbytes / 1e9:.2f} GB")
print("Loading metadata...")
metadata = {}
with open(METADATA_FILE) as f:
for line in f:
d = json.loads(line)
metadata[d["id"]] = d
print(f" Documents: {len(metadata)}")
# ── Try GPU-accelerated UMAP (cuML), fall back to CPU ─────────────────────
try:
from cuml.manifold import UMAP as cuUMAP
print("\nUsing GPU-accelerated UMAP (cuML)")
umap_model = cuUMAP(
n_neighbors=UMAP_N_NEIGHBORS,
n_components=UMAP_N_COMPONENTS,
min_dist=UMAP_MIN_DIST,
metric=UMAP_METRIC,
random_state=42,
)
USE_GPU = True
except ImportError:
from umap import UMAP
print("\nUsing CPU UMAP (cuML not available)")
umap_model = UMAP(
n_neighbors=UMAP_N_NEIGHBORS,
n_components=UMAP_N_COMPONENTS,
min_dist=UMAP_MIN_DIST,
metric=UMAP_METRIC,
random_state=42,
low_memory=True,
)
USE_GPU = False
# ── HDBSCAN ───────────────────────────────────────────────────────────────
try:
from cuml.cluster import HDBSCAN as cuHDBSCAN
print("Using GPU-accelerated HDBSCAN (cuML)")
hdbscan_model = cuHDBSCAN(
min_cluster_size=MIN_TOPIC_SIZE,
min_samples=10,
gen_min_span_tree=True,
prediction_data=True,
)
except ImportError:
from hdbscan import HDBSCAN
print("Using CPU HDBSCAN")
hdbscan_model = HDBSCAN(
min_cluster_size=MIN_TOPIC_SIZE,
min_samples=10,
gen_min_span_tree=True,
prediction_data=True,
)
# ── BERTopic ──────────────────────────────────────────────────────────────
from bertopic import BERTopic
from bertopic.vectorizers import ClassTfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
# We already have embeddings, so no embedding model needed
# We need document texts for topic representation (c-TF-IDF)
# If no texts available, BERTopic can still cluster but won't generate labels
# We'll use the file paths as pseudo-documents and rely on keyword extraction
print("\nPreparing document texts from metadata...")
# Use source_section + filename as lightweight pseudo-text
# The actual topic labeling will come from the cluster structure
docs = []
for doc_id in doc_ids:
meta = metadata.get(int(doc_id), {})
section = meta.get("section", "unknown")
path = meta.get("path", "")
fname = path.split("/")[-1] if path else ""
docs.append(f"{section} {fname}")
vectorizer = CountVectorizer(stop_words="english", ngram_range=(1, 2))
ctfidf = ClassTfidfTransformer(reduce_frequent_words=True)
print("\nInitializing BERTopic...")
topic_model = BERTopic(
umap_model=umap_model,
hdbscan_model=hdbscan_model,
vectorizer_model=vectorizer,
ctfidf_model=ctfidf,
nr_topics=NR_TOPICS,
top_n_words=10,
verbose=True,
calculate_probabilities=False, # saves memory at 234K docs
)
# ── Fit ───────────────────────────────────────────────────────────────────
print(f"\nFitting BERTopic on {len(embeddings)} documents...")
t_fit = time.time()
topics, probs = topic_model.fit_transform(docs, embeddings=embeddings)
print(f"Fit complete in {(time.time() - t_fit) / 60:.1f} minutes")
# ── Topic info ────────────────────────────────────────────────────────────
topic_info = topic_model.get_topic_info()
print(f"\nTopics discovered: {len(topic_info) - 1}") # -1 for outlier topic
print(f"Outlier documents (topic -1): {(np.array(topics) == -1).sum()}")
print("\nTop 20 topics:")
print(topic_info.head(20).to_string())
# ── 2D UMAP for visualization ─────────────────────────────────────────────
print("\nComputing 2D UMAP projection for visualization...")
t_viz = time.time()
try:
if USE_GPU:
viz_umap = cuUMAP(
n_neighbors=VIZ_N_NEIGHBORS,
n_components=VIZ_N_COMPONENTS,
min_dist=0.1,
metric=UMAP_METRIC,
random_state=42,
)
else:
from umap import UMAP
viz_umap = UMAP(
n_neighbors=VIZ_N_NEIGHBORS,
n_components=VIZ_N_COMPONENTS,
min_dist=0.1,
metric=UMAP_METRIC,
random_state=42,
low_memory=True,
)
coords_2d = viz_umap.fit_transform(embeddings)
if hasattr(coords_2d, "to_numpy"):
coords_2d = coords_2d.to_numpy()
coords_2d = np.array(coords_2d, dtype=np.float32)
print(f"2D projection complete in {(time.time() - t_viz) / 60:.1f} minutes")
except Exception as e:
print(f"2D projection failed: {e}")
coords_2d = np.zeros((len(embeddings), 2), dtype=np.float32)
# ── Save results ──────────────────────────────────────────────────────────
print("\nSaving results...")
# 1. Per-document topic assignments
results_path = f"{OUTPUT_DIR}/bertopic_results.jsonl"
with open(results_path, "w") as f:
for i, doc_id in enumerate(doc_ids):
meta = metadata.get(int(doc_id), {})
record = {
"document_id": int(doc_id),
"source_section": meta.get("section", ""),
"topic_id": int(topics[i]),
"umap_x": float(coords_2d[i][0]),
"umap_y": float(coords_2d[i][1]),
}
f.write(json.dumps(record) + "\n")
print(f" {results_path} ({len(doc_ids)} records)")
# 2. Topic descriptions
topic_info_path = f"{OUTPUT_DIR}/topic_info.json"
topic_details = {}
for topic_id in topic_info["Topic"].unique():
if topic_id == -1:
topic_details[-1] = {"label": "Outlier", "words": [], "count": int((np.array(topics) == -1).sum())}
continue
words = topic_model.get_topic(topic_id)
topic_details[int(topic_id)] = {
"label": "_".join([w for w, _ in words[:3]]),
"words": [{"word": w, "score": float(s)} for w, s in words[:10]],
"count": int((np.array(topics) == topic_id).sum()),
}
with open(topic_info_path, "w") as f:
json.dump(topic_details, f, indent=2)
print(f" {topic_info_path} ({len(topic_details)} topics)")
# 3. UMAP coordinates
coords_path = f"{OUTPUT_DIR}/umap_coords.npz"
np.savez_compressed(coords_path, coords=coords_2d, doc_ids=doc_ids, topics=np.array(topics))
print(f" {coords_path}")
# 4. Save the BERTopic model
model_path = f"{OUTPUT_DIR}/bertopic_model"
topic_model.save(model_path, serialization="safetensors", save_ctfidf=True)
print(f" {model_path}/")
# ── Summary ───────────────────────────────────────────────────────────────
total_time = (time.time() - t_start) / 60
print(f"\n{'='*60}")
print(f"BERTopic clustering complete!")
print(f" Documents: {len(doc_ids):,}")
print(f" Topics found: {len(topic_details) - 1}") # exclude outlier
print(f" Outliers: {(np.array(topics) == -1).sum():,}")
print(f" Total time: {total_time:.1f} minutes")
print(f" GPU used: {USE_GPU}")
print(f"\nFiles to transfer back to Hetzner:")
print(f" scp {results_path} {topic_info_path} {coords_path} hetzner:/var/www/research/runpod/")
print(f"{'='*60}")
if __name__ == "__main__":
main()