| | """
|
| | HuggingFaceμμ λͺ¨λΈμ λ‘λνμ¬ μΆλ‘ νλ μμ
|
| |
|
| | Usage:
|
| | from inference_example import extract_sentences
|
| | results = extract_sentences("μΌμ±μ μμ μ€μ μ΄ μμ₯ μμμ μννλ€. ...")
|
| | """
|
| |
|
| | import re
|
| | from typing import List, Dict
|
| |
|
| | import torch
|
| | from transformers import AutoTokenizer
|
| |
|
| | from model import (
|
| | DocumentEncoderConfig,
|
| | DocumentEncoderForExtractiveSummarization,
|
| | IDX_TO_ROLE,
|
| | )
|
| |
|
| |
|
| | def split_into_sentences(text: str) -> List[str]:
|
| | sentences = re.split(r"(?<=[.!?])\s+", text.strip())
|
| | return [s.strip() for s in sentences if s.strip()]
|
| |
|
| |
|
| | def extract_sentences(
|
| | text: str,
|
| | model_name_or_path: str = "./",
|
| | top_k: int = 3,
|
| | threshold: float = 0.5,
|
| | device: str = None,
|
| | ) -> Dict:
|
| | """
|
| | ν
μ€νΈμμ λνλ¬Έμ₯μ μΆμΆνκ³ μν μ λΆλ₯ν©λλ€.
|
| |
|
| | Args:
|
| | text: μ
λ ₯ ν
μ€νΈ (κΈμ΅ 리ν¬νΈ λ±)
|
| | model_name_or_path: λͺ¨λΈ κ²½λ‘ λλ HuggingFace repo ID
|
| | top_k: μΆμΆν μ΅λ λ¬Έμ₯ μ
|
| | threshold: λνλ¬Έμ₯ νλ¨ μκ³κ°
|
| | device: cuda λλ cpu
|
| |
|
| | Returns:
|
| | dict with 'sentences', 'all_scores', 'all_roles', 'selected'
|
| | """
|
| | device = device or ("cuda" if torch.cuda.is_available() else "cpu")
|
| |
|
| | config = DocumentEncoderConfig.from_pretrained(model_name_or_path)
|
| | model = DocumentEncoderForExtractiveSummarization.from_pretrained(
|
| | model_name_or_path, config=config
|
| | )
|
| | model = model.to(device)
|
| | model.eval()
|
| |
|
| | tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
| |
|
| | sentences = split_into_sentences(text)
|
| | if not sentences:
|
| | return {"sentences": [], "all_scores": [], "all_roles": [], "selected": []}
|
| |
|
| | max_sentences = config.max_sentences
|
| | max_length = config.max_length
|
| |
|
| | padded = sentences[:max_sentences]
|
| | num_real = len(padded)
|
| | while len(padded) < max_sentences:
|
| | padded.append("")
|
| |
|
| | all_input_ids, all_attention_mask = [], []
|
| | for s in padded:
|
| | if s:
|
| | enc = tokenizer(s, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt")
|
| | else:
|
| | enc = {
|
| | "input_ids": torch.zeros(1, max_length, dtype=torch.long),
|
| | "attention_mask": torch.zeros(1, max_length, dtype=torch.long),
|
| | }
|
| | all_input_ids.append(enc["input_ids"])
|
| | all_attention_mask.append(enc["attention_mask"])
|
| |
|
| | input_ids = torch.cat(all_input_ids, dim=0).unsqueeze(0).to(device)
|
| | attention_mask = torch.cat(all_attention_mask, dim=0).unsqueeze(0).to(device)
|
| | document_mask = torch.zeros(1, max_sentences, device=device)
|
| | document_mask[0, :num_real] = 1
|
| |
|
| | with torch.no_grad():
|
| | scores, role_logits = model(input_ids, attention_mask, document_mask)
|
| |
|
| | scores_list = scores[0, :num_real].tolist()
|
| | role_indices = role_logits[0, :num_real].argmax(dim=-1).tolist()
|
| | roles_list = [IDX_TO_ROLE[idx] for idx in role_indices]
|
| |
|
| | selected = []
|
| | for i, (sent, score, role) in enumerate(zip(sentences, scores_list, roles_list)):
|
| | if score >= threshold:
|
| | selected.append({"index": i, "sentence": sent, "score": score, "role": role})
|
| |
|
| | selected.sort(key=lambda x: x["score"], reverse=True)
|
| | selected = selected[:top_k]
|
| | selected.sort(key=lambda x: x["index"])
|
| |
|
| | return {
|
| | "sentences": sentences,
|
| | "all_scores": scores_list,
|
| | "all_roles": roles_list,
|
| | "selected": selected,
|
| | }
|
| |
|
| |
|
| | if __name__ == "__main__":
|
| | text = """
|
| | μΌμ±μ μμ 2024λ
4λΆκΈ° μ€μ μ΄ μμ₯ μμμ μννλ€.
|
| | λ©λͺ¨λ¦¬ λ°λ체 κ°κ²© μμΉμΌλ‘ μμ
μ΄μ΅μ΄ μ λΆκΈ° λλΉ 30% μ¦κ°νλ€.
|
| | HBM3E μμ°μ΄ 본격νλλ©΄μ AI λ°λ체 μμ₯ μ μ μ¨μ΄ νλλ μ λ§μ΄λ€.
|
| | λ€λ§, μ€κ΅ μμ₯μ λΆνμ€μ±μ΄ μ¬μ ν 리μ€ν¬ μμΈμΌλ‘ μμ©νκ³ μλ€.
|
| | νμ¬λ μ¬ν΄ μ€λΉ ν¬μλ₯Ό 20% νλν κ³νμ΄λ€.
|
| | """
|
| |
|
| | result = extract_sentences(text, model_name_or_path="./")
|
| |
|
| | print("=" * 60)
|
| | print("μ 체 λ¬Έμ₯ λΆμ:")
|
| | for i, (s, sc, r) in enumerate(zip(result["sentences"], result["all_scores"], result["all_roles"])):
|
| | marker = "*" if sc >= 0.5 else " "
|
| | print(f" {marker} {i+1}. [{sc:.4f}] [{r:10s}] {s}")
|
| |
|
| | print(f"\nμ νλ λνλ¬Έμ₯:")
|
| | for item in result["selected"]:
|
| | print(f" - [{item['score']:.4f}] [{item['role']:10s}] {item['sentence']}")
|
| |
|