| from model import PredicateAwareSRL |
| import torch, json |
| from transformers import AutoTokenizer |
| import spacy |
| from spacy import cli as spacy_cli |
|
|
| _CACHE = { |
| "ckpt_path": None, |
| "bert_name": None, |
| "spacy_model": None, |
| "device": None, |
| "model": None, |
| "tokenizer": None, |
| "label2id": None, |
| "id2label": None, |
| "hparams": None, |
| "nlp": None, |
| } |
|
|
| _CACHE = { |
| "model": None, "tokenizer": None, "id2label": None, "nlp": None, "device": None, |
| "ckpt_path": None, "bert_name": None, "spacy_model": None, |
| } |
|
|
| def srl_init(model_path, bert_name="bert-base-cased", spacy_model="en_core_web_md"): |
| """ |
| Call ONCE per session to load and cache model/tokenizer/spaCy. |
| After this, you can call: prediction("your sentence here") |
| """ |
|
|
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| ckpt = torch.load(model_path, map_location=device) |
| hp = ckpt.get("hparams", ckpt.get("hyper_parameters", {})) |
| if "bert_name" not in hp: |
| hp["bert_name"] = bert_name |
| if "num_labels" not in hp: |
| label2id = ckpt.get("label2id") or {v:k for k,v in ckpt["id2label"].items()} |
| hp["num_labels"] = len(label2id) |
|
|
| model = PredicateAwareSRL(**hp).to(device).eval() |
| state = ckpt.get("model_state") or ckpt.get("state_dict") or ckpt |
| model.load_state_dict(state) |
|
|
| tokenizer = AutoTokenizer.from_pretrained(hp.get("bert_name", bert_name), use_fast=True) |
|
|
| try: |
| nlp = spacy.load(spacy_model, disable=["parser","ner","lemmatizer"]) |
| except OSError: |
| spacy_cli.download(spacy_model) |
| nlp = spacy.load(spacy_model, disable=["parser","ner","lemmatizer"]) |
|
|
| label2id = ckpt.get("label2id") or {v:k for k,v in ckpt["id2label"].items()} |
| id2label = {int(v): k for k, v in label2id.items()} |
|
|
| _CACHE.update({ |
| "model": model, "tokenizer": tokenizer, "id2label": id2label, |
| "nlp": nlp, "device": device, "ckpt_path": model_path, |
| "bert_name": hp.get("bert_name", bert_name), "spacy_model": spacy_model, |
| }) |
| torch.set_grad_enabled(False) |
|
|
| def normalize_whitespace(s: str) -> str: |
| if s is None: return "" |
| return s.replace("\u00A0", " ").replace("\u2009", " ").strip() |
|
|
| def spacy_verb_indices(nlp, sentence: str): |
| doc = nlp(sentence) |
| return [i for i, t in enumerate(doc) if t.pos_ in ("VERB","AUX") or t.tag_.startswith("VB")] |
|
|
| def words_and_spans_spacy(sentence: str, nlp): |
| doc = nlp(sentence) |
| words = [t.text for t in doc] |
| spans = [(t.idx, t.idx + len(t.text)) for t in doc] |
| return words, spans |
|
|
| def bio_to_spans(tags): |
| spans = []; i = 0 |
| while i < len(tags): |
| t = tags[i] |
| if t == "O" or t.endswith("-V"): |
| i += 1; continue |
| if t.startswith("B-"): |
| role = t[2:]; j = i+1 |
| while j < len(tags) and tags[j] == f"I-{role}": j += 1 |
| spans.append((role, i, j-1)); i = j |
| else: |
| i += 1 |
| return spans |
|
|
|
|
| def _predict_cached(sentence): |
| """Internal: uses cached objects set by srl_init().""" |
| if _CACHE["model"] is None: |
| raise RuntimeError("Model not loaded. Call srl_init(ckpt_path, bert_name) once first.") |
| model = _CACHE["model"] |
| tokenizer = _CACHE["tokenizer"] |
| id2label = _CACHE["id2label"] |
| nlp = _CACHE["nlp"] |
| device = "cuda" if (_CACHE["device"].type == "cuda") else "cpu" |
|
|
| sentence = normalize_whitespace(sentence) |
| |
| return predict_srl_allennlp_like_spacy( |
| model, tokenizer, nlp, sentence, id2label, |
| device=device, prob_threshold=0.40, top_k=None, pick_best_if_none=True |
| ) |
|
|
| def _pick_device(dev=None): |
| if dev == "cpu": |
| return torch.device("cpu") |
| if dev and dev.startswith("cuda") and torch.cuda.is_available(): |
| return torch.device(dev) |
| return torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
| def _ensure_loaded(model_path, bert_name, spacy_model, model_cls): |
| """Load model/tokenizer/spaCy once and reuse.""" |
| must_reload = ( |
| _CACHE["model"] is None |
| or _CACHE["ckpt_path"] != model_path |
| or _CACHE["bert_name"] != bert_name |
| or _CACHE["spacy_model"] != spacy_model |
| ) |
| if not must_reload: |
| return |
|
|
| device = _pick_device() |
| ckpt = torch.load(model_path, map_location=device) |
| h = ckpt.get("hparams", ckpt.get("hyper_parameters", {})) |
|
|
| |
| if "bert_name" not in h: h["bert_name"] = bert_name |
| if "num_labels" not in h: |
| label2id = ckpt.get("label2id") |
| if label2id is None and "id2label" in ckpt: |
| label2id = {v:k for k,v in ckpt["id2label"].items()} |
| h["num_labels"] = len(label2id) if label2id else 0 |
|
|
| model = model_cls(**h).to(device).eval() |
| state = ckpt.get("model_state") or ckpt.get("state_dict") or ckpt |
| model.load_state_dict(state) |
|
|
| tok = AutoTokenizer.from_pretrained(h.get("bert_name", bert_name), use_fast=True) |
|
|
| try: |
| nlp = spacy.load(spacy_model, disable=["parser","ner","lemmatizer"]) |
| except OSError: |
| spacy_cli.download(spacy_model) |
| nlp = spacy.load(spacy_model, disable=["parser","ner","lemmatizer"]) |
|
|
| label2id = ckpt.get("label2id") |
| if label2id is None and "id2label" in ckpt: |
| label2id = {v:k for k,v in ckpt["id2label"].items()} |
| id2label = {int(v): k for k, v in label2id.items()} |
|
|
| _CACHE.update({ |
| "ckpt_path": model_path, |
| "bert_name": h.get("bert_name", bert_name), |
| "spacy_model": spacy_model, |
| "device": device, |
| "model": model, |
| "tokenizer": tok, |
| "label2id": label2id, |
| "id2label": id2label, |
| "hparams": h, |
| "nlp": nlp, |
| }) |
| torch.set_grad_enabled(False) |
|
|
|
|
| @torch.no_grad() |
| def predict_srl_single(model, tokenizer, words, predicate_word_idx, id2label, device="cuda"): |
| model.eval() |
| sent_enc = tokenizer( |
| words, is_split_into_words=True, add_special_tokens=False, |
| return_attention_mask=False, return_token_type_ids=False, |
| ) |
| |
| try: |
| sent_word_ids = sent_enc.word_ids() |
| except Exception: |
| raise ValueError("Tokenizer must be fast (use_fast=True).") |
|
|
| sent_wp_ids = sent_enc["input_ids"] |
| if isinstance(sent_wp_ids[0], list): |
| sent_wp_ids = sent_wp_ids[0] |
|
|
| first_pos_by_wid = {} |
| for pos, wid in enumerate(sent_word_ids): |
| if wid is not None and wid not in first_pos_by_wid: |
| first_pos_by_wid[wid] = pos + 1 |
|
|
| n_words = len(words) |
| word_first_wp_fullidx = torch.tensor( |
| [first_pos_by_wid[i] for i in range(n_words)], dtype=torch.long |
| ).unsqueeze(0) |
|
|
| pred_enc = tokenizer( |
| [words[predicate_word_idx]], is_split_into_words=True, add_special_tokens=False, |
| return_attention_mask=False, return_token_type_ids=False, |
| ) |
| pred_wp_ids = pred_enc["input_ids"] |
| if isinstance(pred_wp_ids[0], list): |
| pred_wp_ids = pred_wp_ids[0] |
|
|
| cls_id, sep_id = tokenizer.cls_token_id, tokenizer.sep_token_id |
| input_ids = [cls_id] + sent_wp_ids + [sep_id] + pred_wp_ids + [sep_id] |
| token_type_ids = [0] * (1 + len(sent_wp_ids) + 1) + [1] * (len(pred_wp_ids) + 1) |
| attention_mask = [1] * len(input_ids) |
|
|
| device = _pick_device(device) |
| input_ids = torch.tensor(input_ids).unsqueeze(0).to(device) |
| token_type_ids = torch.tensor(token_type_ids).unsqueeze(0).to(device) |
| attention_mask = torch.tensor(attention_mask).unsqueeze(0).to(device) |
|
|
| sent_len = torch.tensor([n_words], dtype=torch.long).to(device) |
| sentence_mask = torch.ones(1, n_words, dtype=torch.bool).to(device) |
| pred_word_idx = torch.tensor([predicate_word_idx], dtype=torch.long).to(device) |
| indicator = torch.zeros(1, n_words, dtype=torch.long).to(device) |
| indicator[0, predicate_word_idx] = 1 |
| word_first_wp_fullidx = word_first_wp_fullidx.to(device) |
|
|
| logits, _ = model( |
| input_ids=input_ids, |
| token_type_ids=token_type_ids, |
| attention_mask=attention_mask, |
| word_first_wp_fullidx=word_first_wp_fullidx, |
| sentence_mask=sentence_mask, |
| sent_lens=sent_len, |
| pred_word_idx=pred_word_idx, |
| indicator=indicator, |
| labels=None, |
| ) |
| pred_ids = logits.argmax(-1).squeeze(0).tolist() |
| tags = [id2label[i] for i in pred_ids] |
| return tags, logits.squeeze(0).cpu() |
|
|
|
|
| def _encode_sentence_once(words, tokenizer): |
| enc = tokenizer( |
| words, is_split_into_words=True, add_special_tokens=False, |
| return_attention_mask=False, return_token_type_ids=False, |
| ) |
| sent_wp_ids = enc["input_ids"] |
| if isinstance(sent_wp_ids[0], list): |
| sent_wp_ids = sent_wp_ids[0] |
| wid = enc.word_ids() |
| first_pos = {} |
| for pos, w in enumerate(wid): |
| if w is not None and w not in first_pos: |
| first_pos[w] = pos + 1 |
| n_words = len(words) |
| word_first = torch.tensor([first_pos[i] for i in range(n_words)], dtype=torch.long) |
| return sent_wp_ids, word_first, n_words |
|
|
| @torch.no_grad() |
| def _batch_predict_verbs(model, tokenizer, words, verb_idxs, id2label, device): |
| """One forward pass for all verbs in the sentence.""" |
| device = _pick_device(device) |
| sent_wp_ids, word_first_1, n_words = _encode_sentence_once(words, tokenizer) |
| cls_id, sep_id = tokenizer.cls_token_id, tokenizer.sep_token_id |
|
|
| ids_list, tt_list, am_list = [], [], [] |
| pred_idx_list, ind_list, wf_list = [], [], [] |
|
|
| for p in verb_idxs: |
| pred_wp_ids = tokenizer( |
| [words[p]], is_split_into_words=True, add_special_tokens=False, |
| return_attention_mask=False, return_token_type_ids=False, |
| )["input_ids"] |
| if isinstance(pred_wp_ids[0], list): |
| pred_wp_ids = pred_wp_ids[0] |
|
|
| ids = [cls_id] + sent_wp_ids + [sep_id] + pred_wp_ids + [sep_id] |
| tt = [0]*(1 + len(sent_wp_ids) + 1) + [1]*(len(pred_wp_ids) + 1) |
| am = [1]*len(ids) |
|
|
| ids_list.append(torch.tensor(ids, dtype=torch.long)) |
| tt_list.append(torch.tensor(tt, dtype=torch.long)) |
| am_list.append(torch.tensor(am, dtype=torch.long)) |
| pred_idx_list.append(torch.tensor(p, dtype=torch.long)) |
| ind = torch.zeros(n_words, dtype=torch.long); ind[p] = 1 |
| ind_list.append(ind) |
| wf_list.append(word_first_1.clone()) |
|
|
| |
| def pad_1d(seq, pad_id=0): |
| L = max(x.numel() for x in seq) |
| out = torch.full((len(seq), L), pad_id, dtype=seq[0].dtype) |
| for i, x in enumerate(seq): |
| out[i, :x.numel()] = x |
| return out |
|
|
| pad_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id |
| input_ids = pad_1d(ids_list, pad_id).to(device) |
| token_type_ids = pad_1d(tt_list, 0).to(device) |
| attention_mask = pad_1d(am_list, 0).to(device) |
|
|
| B = len(verb_idxs) |
| sent_lens = torch.full((B,), n_words, dtype=torch.long, device=device) |
| sentence_mask = torch.ones(B, n_words, dtype=torch.bool, device=device) |
| pred_word_idx = torch.stack(pred_idx_list).to(device) |
| indicator = torch.stack(ind_list).to(device) |
| word_first_wp_fullidx = torch.stack(wf_list).to(device) |
|
|
| logits, _ = model( |
| input_ids=input_ids, |
| token_type_ids=token_type_ids, |
| attention_mask=attention_mask, |
| word_first_wp_fullidx=word_first_wp_fullidx, |
| sentence_mask=sentence_mask, |
| sent_lens=sent_lens, |
| pred_word_idx=pred_word_idx, |
| indicator=indicator, |
| labels=None, |
| ) |
|
|
| results = [] |
| for row, p in enumerate(verb_idxs): |
| row_logits = logits[row] |
| tags = [id2label[i] for i in row_logits.argmax(-1).tolist()] |
| results.append((p, tags, row_logits)) |
| return results |
|
|
|
|
| @torch.no_grad() |
| def predict_srl_allennlp_like_spacy( |
| model, tokenizer, nlp, sentence, id2label, |
| device="cuda", |
| prob_threshold=0.50, |
| top_k=None, |
| pick_best_if_none=True |
| ): |
| model.eval() |
| words, _ = words_and_spans_spacy(sentence, nlp) |
| if not words: |
| return [], [] |
|
|
| verb_idxs = spacy_verb_indices(nlp, sentence) |
| if not verb_idxs: |
| return words, [] |
|
|
| |
| batch_out = _batch_predict_verbs(model, tokenizer, words, verb_idxs, id2label, device) |
| b_v_id = next((i for i,t in id2label.items() if t in ("B-V","V")), None) |
|
|
| frames = [] |
| for p, tags, row_logits in batch_out: |
| p_bv = float(torch.softmax(row_logits[p], dim=-1)[b_v_id].item()) if b_v_id is not None else 1.0 |
| frames.append({ |
| "predicate_index": p, |
| "predicate": words[p], |
| "p_bv": p_bv, |
| "tags": tags, |
| "spans": bio_to_spans(tags) |
| }) |
|
|
| |
| if prob_threshold is not None: |
| keep = [f for f in frames if f["p_bv"] >= prob_threshold] |
| if not keep and pick_best_if_none and frames: |
| keep = [max(frames, key=lambda r: r["p_bv"])] |
| frames = keep |
| if top_k is not None and len(frames) > top_k: |
| frames = sorted(frames, key=lambda r: r["p_bv"], reverse=True)[:top_k] |
|
|
| return words, frames |
|
|
| def main_predictor(model_path, bert_name, sentence, spacy_model="en_core_web_md"): |
| sentence = normalize_whitespace(sentence) |
| _ensure_loaded(model_path, bert_name, spacy_model, PredicateAwareSRL) |
| model = _CACHE["model"] |
| tokenizer = _CACHE["tokenizer"] |
| id2label = _CACHE["id2label"] |
| nlp = _CACHE["nlp"] |
| device = _CACHE["device"] |
|
|
| words, frames = predict_srl_allennlp_like_spacy( |
| model, tokenizer, nlp, sentence, id2label, |
| device=str(device), prob_threshold=0.40, top_k=None, pick_best_if_none=True |
| ) |
| return words, frames |
|
|
|
|
|
|