langquantof commited on
Commit
3d36724
ยท
verified ยท
1 Parent(s): 32b405b

Upload inference_example.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. inference_example.py +130 -0
inference_example.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ HuggingFace์—์„œ ๋ชจ๋ธ์„ ๋กœ๋“œํ•˜์—ฌ ์ถ”๋ก ํ•˜๋Š” ์˜ˆ์ œ
3
+
4
+ Usage:
5
+ from inference_example import extract_sentences
6
+ results = extract_sentences("์‚ผ์„ฑ์ „์ž์˜ ์‹ค์ ์ด ์‹œ์žฅ ์˜ˆ์ƒ์„ ์ƒํšŒํ–ˆ๋‹ค. ...")
7
+ """
8
+
9
+ import re
10
+ from typing import List, Dict
11
+
12
+ import torch
13
+ from transformers import AutoTokenizer
14
+
15
+ from model import (
16
+ DocumentEncoderConfig,
17
+ DocumentEncoderForExtractiveSummarization,
18
+ IDX_TO_ROLE,
19
+ )
20
+
21
+
22
+ def split_into_sentences(text: str) -> List[str]:
23
+ sentences = re.split(r"(?<=[.!?])\s+", text.strip())
24
+ return [s.strip() for s in sentences if s.strip()]
25
+
26
+
27
+ def extract_sentences(
28
+ text: str,
29
+ model_name_or_path: str = "./", # ๋กœ์ปฌ ๋˜๋Š” HuggingFace repo ID
30
+ top_k: int = 3,
31
+ threshold: float = 0.5,
32
+ device: str = None,
33
+ ) -> Dict:
34
+ """
35
+ ํ…์ŠคํŠธ์—์„œ ๋Œ€ํ‘œ๋ฌธ์žฅ์„ ์ถ”์ถœํ•˜๊ณ  ์—ญํ• ์„ ๋ถ„๋ฅ˜ํ•ฉ๋‹ˆ๋‹ค.
36
+
37
+ Args:
38
+ text: ์ž…๋ ฅ ํ…์ŠคํŠธ (๊ธˆ์œต ๋ฆฌํฌํŠธ ๋“ฑ)
39
+ model_name_or_path: ๋ชจ๋ธ ๊ฒฝ๋กœ ๋˜๋Š” HuggingFace repo ID
40
+ top_k: ์ถ”์ถœํ•  ์ตœ๋Œ€ ๋ฌธ์žฅ ์ˆ˜
41
+ threshold: ๋Œ€ํ‘œ๋ฌธ์žฅ ํŒ๋‹จ ์ž„๊ณ„๊ฐ’
42
+ device: cuda ๋˜๋Š” cpu
43
+
44
+ Returns:
45
+ dict with 'sentences', 'all_scores', 'all_roles', 'selected'
46
+ """
47
+ device = device or ("cuda" if torch.cuda.is_available() else "cpu")
48
+
49
+ config = DocumentEncoderConfig.from_pretrained(model_name_or_path)
50
+ model = DocumentEncoderForExtractiveSummarization.from_pretrained(
51
+ model_name_or_path, config=config
52
+ )
53
+ model = model.to(device)
54
+ model.eval()
55
+
56
+ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
57
+
58
+ sentences = split_into_sentences(text)
59
+ if not sentences:
60
+ return {"sentences": [], "all_scores": [], "all_roles": [], "selected": []}
61
+
62
+ max_sentences = config.max_sentences
63
+ max_length = config.max_length
64
+
65
+ padded = sentences[:max_sentences]
66
+ num_real = len(padded)
67
+ while len(padded) < max_sentences:
68
+ padded.append("")
69
+
70
+ all_input_ids, all_attention_mask = [], []
71
+ for s in padded:
72
+ if s:
73
+ enc = tokenizer(s, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt")
74
+ else:
75
+ enc = {
76
+ "input_ids": torch.zeros(1, max_length, dtype=torch.long),
77
+ "attention_mask": torch.zeros(1, max_length, dtype=torch.long),
78
+ }
79
+ all_input_ids.append(enc["input_ids"])
80
+ all_attention_mask.append(enc["attention_mask"])
81
+
82
+ input_ids = torch.cat(all_input_ids, dim=0).unsqueeze(0).to(device)
83
+ attention_mask = torch.cat(all_attention_mask, dim=0).unsqueeze(0).to(device)
84
+ document_mask = torch.zeros(1, max_sentences, device=device)
85
+ document_mask[0, :num_real] = 1
86
+
87
+ with torch.no_grad():
88
+ scores, role_logits = model(input_ids, attention_mask, document_mask)
89
+
90
+ scores_list = scores[0, :num_real].tolist()
91
+ role_indices = role_logits[0, :num_real].argmax(dim=-1).tolist()
92
+ roles_list = [IDX_TO_ROLE[idx] for idx in role_indices]
93
+
94
+ selected = []
95
+ for i, (sent, score, role) in enumerate(zip(sentences, scores_list, roles_list)):
96
+ if score >= threshold:
97
+ selected.append({"index": i, "sentence": sent, "score": score, "role": role})
98
+
99
+ selected.sort(key=lambda x: x["score"], reverse=True)
100
+ selected = selected[:top_k]
101
+ selected.sort(key=lambda x: x["index"])
102
+
103
+ return {
104
+ "sentences": sentences,
105
+ "all_scores": scores_list,
106
+ "all_roles": roles_list,
107
+ "selected": selected,
108
+ }
109
+
110
+
111
+ if __name__ == "__main__":
112
+ text = """
113
+ ์‚ผ์„ฑ์ „์ž์˜ 2024๋…„ 4๋ถ„๊ธฐ ์‹ค์ ์ด ์‹œ์žฅ ์˜ˆ์ƒ์„ ์ƒํšŒํ–ˆ๋‹ค.
114
+ ๋ฉ”๋ชจ๋ฆฌ ๋ฐ˜๋„์ฒด ๊ฐ€๊ฒฉ ์ƒ์Šน์œผ๋กœ ์˜์—…์ด์ต์ด ์ „๋ถ„๊ธฐ ๋Œ€๋น„ 30% ์ฆ๊ฐ€ํ–ˆ๋‹ค.
115
+ HBM3E ์–‘์‚ฐ์ด ๋ณธ๊ฒฉํ™”๋˜๋ฉด์„œ AI ๋ฐ˜๋„์ฒด ์‹œ์žฅ ์ ์œ ์œจ์ด ํ™•๋Œ€๋  ์ „๋ง์ด๋‹ค.
116
+ ๋‹ค๋งŒ, ์ค‘๊ตญ ์‹œ์žฅ์˜ ๋ถˆํ™•์‹ค์„ฑ์ด ์—ฌ์ „ํžˆ ๋ฆฌ์Šคํฌ ์š”์ธ์œผ๋กœ ์ž‘์šฉํ•˜๊ณ  ์žˆ๋‹ค.
117
+ ํšŒ์‚ฌ๋Š” ์˜ฌํ•ด ์„ค๋น„ ํˆฌ์ž๋ฅผ 20% ํ™•๋Œ€ํ•  ๊ณ„ํš์ด๋‹ค.
118
+ """
119
+
120
+ result = extract_sentences(text, model_name_or_path="./")
121
+
122
+ print("=" * 60)
123
+ print("์ „์ฒด ๋ฌธ์žฅ ๋ถ„์„:")
124
+ for i, (s, sc, r) in enumerate(zip(result["sentences"], result["all_scores"], result["all_roles"])):
125
+ marker = "*" if sc >= 0.5 else " "
126
+ print(f" {marker} {i+1}. [{sc:.4f}] [{r:10s}] {s}")
127
+
128
+ print(f"\n์„ ํƒ๋œ ๋Œ€ํ‘œ๋ฌธ์žฅ:")
129
+ for item in result["selected"]:
130
+ print(f" - [{item['score']:.4f}] [{item['role']:10s}] {item['sentence']}")