| import torch
|
| import torch.nn.functional as F
|
| from typing import List, Optional, Dict, Tuple, Union
|
| import math
|
| import random
|
| from tqdm import tqdm
|
| from rosaplus import ROSAPlus, ROSAFallbackLM, ROSACharPredictor
|
|
|
| class ROSACudaWrapper:
|
| """
|
| CUDA-accelerated wrapper for ROSAPlus.
|
| Optimized for batched inference using PyTorch.
|
| """
|
| def __init__(self, model: ROSAPlus, device: Union[str, torch.device] = "cuda"):
|
| if model.lm is None:
|
| raise RuntimeError("ROSAPlus model must have a built LM before converting to CUDA.")
|
|
|
| self.device = torch.device(device)
|
| self.model = model
|
| self.alphabet = model.lm.alphabet
|
| self.char_to_idx = {ch: i for i, ch in enumerate(self.alphabet)}
|
| self.idx_to_char = {i: ch for i, ch in enumerate(self.alphabet)}
|
| self.vocab_size = len(self.alphabet)
|
|
|
|
|
| print(f"Converting SAM graph to CUDA tensors on {self.device}...")
|
|
|
|
|
|
|
| self.c = torch.tensor(model.sam.c, dtype=torch.long, device=self.device)
|
| self.d = torch.tensor(model.sam.d, dtype=torch.long, device=self.device)
|
|
|
|
|
|
|
|
|
| num_states = len(model.sam.b)
|
| self.num_states = num_states
|
|
|
| print(f"Graph stats: {num_states} states, {self.vocab_size} vocab size.")
|
| if num_states * self.vocab_size > 500_000_000:
|
| print("WARNING: Graph is very large. Dense transition table might consume excessive GPU memory.")
|
|
|
|
|
| self.transitions = torch.full((num_states, self.vocab_size), -1, dtype=torch.long, device=self.device)
|
|
|
|
|
|
|
|
|
| b_cpu = torch.full((num_states, self.vocab_size), -1, dtype=torch.long)
|
| for i, trans in enumerate(tqdm(model.sam.b, desc="Building transition table")):
|
| for ch, next_state in trans.items():
|
| if ch in self.char_to_idx:
|
| b_cpu[i, self.char_to_idx[ch]] = next_state
|
| self.transitions = b_cpu.to(self.device)
|
|
|
|
|
|
|
|
|
|
|
| self.counts_matrix = torch.zeros((num_states, self.vocab_size), dtype=torch.float32, device="cpu")
|
|
|
| for i, freq in enumerate(tqdm(model.lm.freq, desc="Building count table")):
|
| for ch, cnt in freq.items():
|
| if ch in self.char_to_idx:
|
| self.counts_matrix[i, self.char_to_idx[ch]] = float(cnt)
|
|
|
| self.counts_matrix = self.counts_matrix.to(self.device)
|
|
|
|
|
| self.N = self.counts_matrix.sum(dim=1)
|
| self.T = (self.counts_matrix > 0).float().sum(dim=1)
|
|
|
|
|
| self.unigram_counts = torch.zeros(self.vocab_size, dtype=torch.float32, device=self.device)
|
| for ch, cnt in model.lm.unigram.items():
|
| if ch in self.char_to_idx:
|
| self.unigram_counts[self.char_to_idx[ch]] = float(cnt)
|
| self.unigram_total = self.unigram_counts.sum()
|
|
|
| self.max_order = model.max_order
|
| if self.max_order is None:
|
| self.max_order = int(1e9)
|
|
|
| print("CUDA initialization complete.")
|
|
|
| def _advance_batch(self, current_states: torch.Tensor, next_chars_idx: torch.Tensor) -> torch.Tensor:
|
| """
|
| Advance states for a batch of characters.
|
| current_states: [batch_size]
|
| next_chars_idx: [batch_size]
|
| Returns: [batch_size] next states
|
| """
|
|
|
|
|
|
|
|
|
|
|
|
|
| next_states = self.transitions[current_states, next_chars_idx]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| active_mask = (next_states == -1)
|
| curr = current_states.clone()
|
|
|
|
|
| max_depth = 100
|
|
|
|
|
| for _ in range(max_depth):
|
| if not active_mask.any():
|
| break
|
|
|
|
|
| curr[active_mask] = self.c[curr[active_mask]]
|
|
|
|
|
| root_parent_mask = (curr == -1) & active_mask
|
| if root_parent_mask.any():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| pass
|
|
|
|
|
|
|
|
|
|
|
| valid_curr = curr.clone()
|
| valid_curr[valid_curr == -1] = 0
|
|
|
| new_trans = self.transitions[valid_curr, next_chars_idx]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| is_root_parent = (curr == -1)
|
|
|
|
|
| mask_normal = active_mask & (~is_root_parent)
|
| if mask_normal.any():
|
| t = self.transitions[curr[mask_normal], next_chars_idx[mask_normal]]
|
| found = (t != -1)
|
|
|
|
|
| found_indices = torch.nonzero(mask_normal).squeeze(1)[found]
|
| next_states[found_indices] = t[found]
|
| active_mask[found_indices] = False
|
|
|
|
|
| mask_root = active_mask & is_root_parent
|
| if mask_root.any():
|
|
|
| t = self.transitions[torch.zeros_like(curr[mask_root]), next_chars_idx[mask_root]]
|
|
|
| t[t == -1] = 0
|
|
|
| indices = torch.nonzero(mask_root).squeeze(1)
|
| next_states[indices] = t
|
| active_mask[indices] = False
|
|
|
|
|
| next_states[active_mask] = 0
|
|
|
| return next_states
|
|
|
| def get_probs_batch(self, current_states: torch.Tensor) -> torch.Tensor:
|
| """
|
| Compute Witten-Bell smoothed probabilities for a batch of states.
|
| Returns: [batch_size, vocab_size]
|
| """
|
| batch_size = current_states.shape[0]
|
| probs = torch.zeros((batch_size, self.vocab_size), device=self.device)
|
| residual = torch.ones(batch_size, device=self.device)
|
|
|
| curr = current_states.clone()
|
| active_mask = torch.ones(batch_size, dtype=torch.bool, device=self.device)
|
|
|
|
|
|
|
|
|
| max_depth = 100
|
|
|
| for _ in range(max_depth):
|
| if not active_mask.any():
|
| break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| valid_mask = (curr != -1) & active_mask
|
| if not valid_mask.any():
|
| break
|
|
|
|
|
| batch_indices = torch.nonzero(valid_mask).squeeze(1)
|
| states_v = curr[batch_indices]
|
|
|
|
|
|
|
| d_v = self.d[states_v]
|
| process_mask = (d_v <= self.max_order)
|
|
|
|
|
| proc_indices = batch_indices[process_mask]
|
| proc_states = states_v[process_mask]
|
|
|
| if len(proc_states) > 0:
|
| N_v = self.N[proc_states]
|
| T_v = self.T[proc_states]
|
|
|
|
|
|
|
|
|
|
|
|
|
| has_counts = (N_v > 0)
|
|
|
|
|
| final_proc_indices = proc_indices[has_counts]
|
| final_proc_states = proc_states[has_counts]
|
|
|
| if len(final_proc_states) > 0:
|
| N_f = N_v[has_counts]
|
| T_f = T_v[has_counts]
|
|
|
| lam = N_f / (N_f + T_f + 1e-9)
|
|
|
| lam[T_f == 0] = 1.0
|
|
|
|
|
|
|
| r = residual[final_proc_indices].unsqueeze(1)
|
| l = lam.unsqueeze(1)
|
| c = self.counts_matrix[final_proc_states]
|
| n = N_f.unsqueeze(1)
|
|
|
| added_probs = r * l * (c / n)
|
| probs[final_proc_indices] += added_probs
|
|
|
|
|
| residual[final_proc_indices] *= (1.0 - lam)
|
|
|
|
|
| curr[batch_indices] = self.c[states_v]
|
|
|
|
|
| active_mask = active_mask & (curr != -1)
|
|
|
|
|
| active_mask = active_mask & (residual > 1e-6)
|
|
|
|
|
|
|
| if self.unigram_total > 0:
|
| uni_probs = self.unigram_counts / self.unigram_total
|
| probs += residual.unsqueeze(1) * uni_probs.unsqueeze(0)
|
| else:
|
|
|
| probs += residual.unsqueeze(1) * (1.0 / self.vocab_size)
|
|
|
|
|
| sum_probs = probs.sum(dim=1, keepdim=True)
|
| probs = probs / (sum_probs + 1e-12)
|
|
|
| return probs
|
|
|
| def generate_batch(
|
| self,
|
| prompts: List[str],
|
| steps: int = 100,
|
| temperature: float = 1.0,
|
| top_p: float = 0.9,
|
| top_k: int = 50,
|
| seed: Optional[int] = None
|
| ) -> List[str]:
|
| """
|
| Batched generation.
|
| """
|
| if seed is not None:
|
| torch.manual_seed(seed)
|
|
|
| batch_size = len(prompts)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| current_states = torch.zeros(batch_size, dtype=torch.long, device=self.device)
|
|
|
|
|
|
|
| max_len = max(len(p) for p in prompts)
|
|
|
|
|
|
|
|
|
| print("Processing prompts...")
|
| for i in range(max_len):
|
|
|
|
|
|
|
|
|
|
|
|
|
| chars = []
|
| mask = []
|
| for p in prompts:
|
| if i < len(p):
|
| if p[i] in self.char_to_idx:
|
| chars.append(self.char_to_idx[p[i]])
|
| else:
|
| chars.append(0)
|
| mask.append(True)
|
| else:
|
| chars.append(0)
|
| mask.append(False)
|
|
|
| chars_tensor = torch.tensor(chars, dtype=torch.long, device=self.device)
|
| mask_tensor = torch.tensor(mask, dtype=torch.bool, device=self.device)
|
|
|
| if mask_tensor.any():
|
|
|
|
|
| active_states = current_states[mask_tensor]
|
| active_chars = chars_tensor[mask_tensor]
|
| new_states = self._advance_batch(active_states, active_chars)
|
| current_states[mask_tensor] = new_states
|
|
|
|
|
| print(f"Generating {steps} steps for {batch_size} sequences...")
|
| generated_indices = []
|
|
|
| for _ in range(steps):
|
|
|
| probs = self.get_probs_batch(current_states)
|
|
|
|
|
|
|
| if temperature != 1.0:
|
| probs = torch.pow(probs, 1.0 / temperature)
|
| probs = probs / probs.sum(dim=1, keepdim=True)
|
|
|
|
|
| if top_k > 0:
|
| vals, inds = torch.topk(probs, k=min(top_k, self.vocab_size), dim=1)
|
| probs_topk = torch.zeros_like(probs)
|
| probs_topk.scatter_(1, inds, vals)
|
| probs = probs_topk / probs_topk.sum(dim=1, keepdim=True)
|
|
|
|
|
|
|
|
|
| if top_p < 1.0:
|
| sorted_probs, sorted_indices = torch.sort(probs, descending=True, dim=1)
|
| cumulative_probs = torch.cumsum(sorted_probs, dim=1)
|
|
|
|
|
| sorted_indices_to_remove = cumulative_probs > top_p
|
|
|
| sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
|
| sorted_indices_to_remove[..., 0] = 0
|
|
|
| indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
|
| probs[indices_to_remove] = 0
|
| probs = probs / probs.sum(dim=1, keepdim=True)
|
|
|
|
|
| next_chars = torch.multinomial(probs, num_samples=1).squeeze(1)
|
|
|
| generated_indices.append(next_chars.cpu())
|
|
|
|
|
| current_states = self._advance_batch(current_states, next_chars)
|
|
|
|
|
| outputs = []
|
| generated_indices = torch.stack(generated_indices, dim=1)
|
|
|
| for i in range(batch_size):
|
| indices = generated_indices[i].tolist()
|
| text = "".join([self.idx_to_char.get(idx, "") for idx in indices])
|
| outputs.append(text)
|
|
|
| return outputs
|
|
|
|
|
| def run_cuda_inference(model_path: str, prompts: List[str], steps=100, device="cuda"):
|
| """
|
| Load a model, convert to CUDA, and run batched inference.
|
| """
|
| print(f"Loading model from {model_path}...")
|
| model = ROSAPlus.load(model_path)
|
|
|
| cuda_model = ROSACudaWrapper(model, device=device)
|
|
|
| results = cuda_model.generate_batch(prompts, steps=steps)
|
| return results
|
|
|
| if __name__ == "__main__":
|
|
|
| from rosaplus import ROSAPlus
|
|
|
|
|
|
|
| model = ROSAPlus.load("rosa-model.json")
|
|
|
|
|
| cuda_model = ROSACudaWrapper(model, device="cuda")
|
|
|
|
|
| prompts = ["The sky is", "Once upon a time", "Hello world"]
|
| results = cuda_model.generate_batch(prompts, steps=200, temperature=0.8)
|
|
|
| for p, r in zip(prompts, results):
|
| print(f"{p} -> {r}")
|
|
|
|
|
| import sys
|
| if len(sys.argv) > 1:
|
| model_file = sys.argv[1]
|
| prompts = ["The meaning of life is", "Once upon a time"]
|
| results = run_cuda_inference(model_file, prompts)
|
| for p, r in zip(prompts, results):
|
| print(f"Prompt: {p}")
|
| print(f"Result: {r}")
|
| print("-" * 20)
|
|
|