ddokbaro commited on
Commit
cf5028a
ยท
verified ยท
1 Parent(s): d0e3458

Upload 2 files

Browse files
Files changed (2) hide show
  1. kci_full_download_v2.py +125 -0
  2. kci_xml_to_jsonl.py +228 -0
kci_full_download_v2.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import xml.etree.ElementTree as ET
3
+ import time
4
+ import os
5
+ import json
6
+ import sys
7
+
8
+ class KCIKciHarvester:
9
+ def __init__(self, save_dir=r"D:\KCI\data_kci_format"):
10
+ self.base_url = "https://open.kci.go.kr/oai/request"
11
+ self.save_dir = save_dir
12
+ self.state_file = os.path.join(save_dir, "harvest_state_v2.json")
13
+
14
+ # ์ €์žฅ ํด๋” ์ƒ์„ฑ (ํด๋”๋ช…์„ ๋ฐ”๊ฟ” ์„ž์ด์ง€ ์•Š๊ฒŒ ํ•จ)
15
+ os.makedirs(self.save_dir, exist_ok=True)
16
+ self.state = self.load_state()
17
+
18
+ def load_state(self):
19
+ if os.path.exists(self.state_file):
20
+ try:
21
+ with open(self.state_file, 'r', encoding='utf-8') as f:
22
+ return json.load(f)
23
+ except:
24
+ pass
25
+ return {"last_token": None, "page_count": 1}
26
+
27
+ def save_state(self, token, page_count):
28
+ self.state = {"last_token": token, "page_count": page_count}
29
+ with open(self.state_file, 'w', encoding='utf-8') as f:
30
+ json.dump(self.state, f, indent=4)
31
+
32
+ def verify_xml(self, filepath):
33
+ try:
34
+ tree = ET.parse(filepath)
35
+ root = tree.getroot()
36
+ ns = {'oai': 'http://www.openarchives.org/OAI/2.0/'}
37
+
38
+ error = root.find('.//oai:error', ns)
39
+ if error is not None:
40
+ print(f"โš ๏ธ API Error: {error.text}")
41
+ return 0
42
+
43
+ # ๋ ˆ์ฝ”๋“œ ์ˆ˜ ํ™•์ธ
44
+ records = root.findall('.//oai:record', ns)
45
+ return len(records)
46
+ except Exception:
47
+ return -1
48
+
49
+ def run(self):
50
+ print("="*60)
51
+ print(f" KCI ๊ณ ํ’ˆ์งˆ ๋ฐ์ดํ„ฐ(oai_kci) ์ˆ˜์ง‘๊ธฐ")
52
+ print(f" ์ €์žฅ ๊ฒฝ๋กœ: {self.save_dir}")
53
+ print("="*60)
54
+
55
+ token = self.state["last_token"]
56
+ page = self.state["page_count"]
57
+
58
+ if token:
59
+ print(f"๐Ÿ”„ {page}ํŽ˜์ด์ง€๋ถ€ํ„ฐ ์ด์–ด๋ฐ›์Šต๋‹ˆ๋‹ค...")
60
+ params = {'verb': 'ListRecords', 'resumptionToken': token}
61
+ else:
62
+ print(f"โ–ถ๏ธ ์ฒ˜์Œ๋ถ€ํ„ฐ ์‹œ์ž‘ (Format: oai_kci, Set: ARTI)")
63
+ params = {
64
+ 'verb': 'ListRecords',
65
+ 'set': 'ARTI', # ํ•™์ˆ ์ง€ ๋…ผ๋ฌธ ๋Œ€์ƒ
66
+ 'metadataPrefix': 'oai_kci' # โœ… ํ•ต์‹ฌ ๋ณ€๊ฒฝ: ๊ตฌ์กฐํ™”๋œ ๋ฐ์ดํ„ฐ ์š”์ฒญ
67
+ }
68
+
69
+ consecutive_errors = 0
70
+
71
+ while True:
72
+ try:
73
+ sys.stdout.write(f"\r๐Ÿ“ฅ Page {page} (oai_kci) ๋‹ค์šด๋กœ๋“œ ์ค‘... ")
74
+ sys.stdout.flush()
75
+
76
+ response = requests.get(self.base_url, params=params, timeout=60)
77
+
78
+ if response.status_code != 200:
79
+ print(f"\nโ›” ์„œ๋ฒ„ ์—๋Ÿฌ({response.status_code})...")
80
+ time.sleep(10)
81
+ consecutive_errors += 1
82
+ if consecutive_errors > 5: break
83
+ continue
84
+
85
+ # ํŒŒ์ผ ์ €์žฅ
86
+ filename = os.path.join(self.save_dir, f"kci_detailed_{page:06d}.xml")
87
+ with open(filename, "w", encoding="utf-8") as f:
88
+ f.write(response.text)
89
+
90
+ count = self.verify_xml(filename)
91
+ if count == -1:
92
+ print(f"\nโš ๏ธ ํŒŒ์ผ ์†์ƒ. ์žฌ์‹œ๋„.")
93
+ time.sleep(3)
94
+ continue
95
+
96
+ sys.stdout.write(f"โœ… ์™„๋ฃŒ ({count}๊ฑด)\n")
97
+
98
+ # ํ† ํฐ ์ถ”์ถœ
99
+ root = ET.fromstring(response.text)
100
+ ns = {'oai': 'http://www.openarchives.org/OAI/2.0/'}
101
+ token_elem = root.find('.//oai:resumptionToken', ns)
102
+
103
+ if token_elem is not None and token_elem.text:
104
+ new_token = token_elem.text
105
+ page += 1
106
+ self.save_state(new_token, page)
107
+
108
+ # ํ† ํฐ๋งŒ์œผ๋กœ ์š”์ฒญ (ํŒŒ๋ผ๋ฏธํ„ฐ ์ดˆ๊ธฐํ™”)
109
+ params = {'verb': 'ListRecords', 'resumptionToken': new_token}
110
+ consecutive_errors = 0
111
+ time.sleep(0.5)
112
+ else:
113
+ print(f"\n๐Ÿ ์ˆ˜์ง‘ ์™„๋ฃŒ!")
114
+ break
115
+
116
+ except KeyboardInterrupt:
117
+ print("\nโน๏ธ ์ค‘๋‹จ๋จ.")
118
+ break
119
+ except Exception as e:
120
+ print(f"\nโŒ ์—๋Ÿฌ: {e}")
121
+ time.sleep(10)
122
+
123
+ if __name__ == "__main__":
124
+ harvester = KCIKciHarvester()
125
+ harvester.run()
kci_xml_to_jsonl.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import json
4
+ import xml.etree.ElementTree as ET
5
+ from concurrent.futures import ProcessPoolExecutor
6
+ import time
7
+
8
+ # ==========================================
9
+ # 1. ์„ค์ •
10
+ # ==========================================
11
+ INPUT_DIR = r"D:\KCI\data_kci_format"
12
+ OUTPUT_DIR = r"D:\KCI\processed"
13
+ OUTPUT_FILE = os.path.join(OUTPUT_DIR, "kci_articles.jsonl")
14
+
15
+ # ๋„ค์ž„์ŠคํŽ˜์ด์Šค (KCI, DC ๋ชจ๋‘ ์ง€์›)
16
+ NS = {
17
+ 'oai': 'http://www.openarchives.org/OAI/2.0/',
18
+ 'kci': 'http://www.kci.go.kr/kciportal/OAI/',
19
+ 'oai_dc': 'http://www.openarchives.org/OAI/2.0/oai_dc/',
20
+ 'dc': 'http://purl.org/dc/elements/1.1/'
21
+ }
22
+
23
+ # ==========================================
24
+ # 2. ํŒŒ์‹ฑ ๋กœ์ง (ํ•˜์ด๋ธŒ๋ฆฌ๋“œ ์ง€์›)
25
+ # ==========================================
26
+ def parse_single_xml(filepath):
27
+ """
28
+ XML ํŒŒ์ผ ํ•˜๋‚˜๋ฅผ ์—ด์–ด์„œ ๋‚ด๋ถ€์˜ ๋…ผ๋ฌธ(record)๋“ค์„ ๋ฆฌ์ŠคํŠธ๋กœ ๋ณ€ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
29
+ oai_kci(์ƒ์„ธ) ํฌ๋งท๊ณผ oai_dc(๊ธฐ๋ณธ) ํฌ๋งท์„ ๋ชจ๋‘ ์ฒ˜๋ฆฌํ•ฉ๋‹ˆ๋‹ค.
30
+ """
31
+ extracted_data = []
32
+
33
+ try:
34
+ tree = ET.parse(filepath)
35
+ root = tree.getroot()
36
+
37
+ # OAI-PMH ๊ตฌ์กฐ ๋‚ด record ๋ฐ˜๋ณต
38
+ records = root.findall('.//oai:record', NS)
39
+
40
+ for record in records:
41
+ # ์‚ญ์ œ๋œ ๋ ˆ์ฝ”๋“œ๋Š” ๊ฑด๋„ˆ๋œ€
42
+ header = record.find('oai:header', NS)
43
+ if header is not None and header.get('status') == 'deleted':
44
+ continue
45
+
46
+ metadata = record.find('oai:metadata', NS)
47
+ if metadata is None:
48
+ continue
49
+
50
+ # A. oai_kci ํฌ๋งท ์‹œ๋„ (์ƒ์„ธ ์ •๋ณด)
51
+ kci_meta = metadata.find('kci:oai_kci', NS)
52
+ if kci_meta is not None:
53
+ extracted_data.append(_parse_kci_format(kci_meta, filepath))
54
+ continue
55
+
56
+ # B. oai_dc ํฌ๋งท ์‹œ๋„ (๊ธฐ๋ณธ ์ •๋ณด) - ํŒŒ์ผ ์„ž์ž„ ๋Œ€๋น„
57
+ dc_meta = metadata.find('oai_dc:dc', NS)
58
+ if dc_meta is not None:
59
+ extracted_data.append(_parse_dc_format(dc_meta, filepath))
60
+ continue
61
+
62
+ except Exception as e:
63
+ # ํŒŒ์ผ์ด ๊นจ์ง„ ๊ฒฝ์šฐ ๋กœ๊ทธ๋งŒ ๋‚จ๊ธฐ๊ณ  ๋ฌด์‹œ
64
+ # print(f"Error parsing {filepath}: {e}")
65
+ return []
66
+
67
+ return extracted_data
68
+
69
+ def _parse_kci_format(kci_meta, filepath):
70
+ """KCI ์ „์šฉ ์ƒ์„ธ ํฌ๋งท ํŒŒ์‹ฑ"""
71
+ # 1. ์ €๋„ ์ •๋ณด
72
+ j_info = kci_meta.find('kci:journalInfo', NS)
73
+ journal_data = {}
74
+ if j_info is not None:
75
+ journal_data = {
76
+ 'journal_name': _get_text(j_info, 'kci:journal-name'),
77
+ 'publisher': _get_text(j_info, 'kci:publisher-name'),
78
+ 'pub_year': _get_text(j_info, 'kci:pub-year'),
79
+ 'pub_month': _get_text(j_info, 'kci:pub-mon'),
80
+ 'volume': _get_text(j_info, 'kci:volume'),
81
+ 'issue': _get_text(j_info, 'kci:issue'),
82
+ }
83
+
84
+ # 2. ๋…ผ๋ฌธ ์ •๋ณด
85
+ a_info = kci_meta.find('kci:articleInfo', NS)
86
+ if a_info is None:
87
+ return {}
88
+
89
+ # ์ œ๋ชฉ
90
+ title_group = a_info.find('kci:title-group', NS)
91
+ title_ko, title_en = "", ""
92
+ if title_group is not None:
93
+ for t in title_group.findall('kci:article-title', NS):
94
+ lang = t.get('lang')
95
+ if lang == 'original': title_ko = t.text
96
+ elif lang == 'english': title_en = t.text
97
+
98
+ # ์ €์ž
99
+ authors = []
100
+ author_group = a_info.find('kci:author-group', NS)
101
+ if author_group is not None:
102
+ for auth in author_group.findall('kci:author', NS):
103
+ authors.append(auth.text.strip() if auth.text else "")
104
+
105
+ # ์ดˆ๋ก
106
+ abstract_ko, abstract_en = "", ""
107
+ ab_group = a_info.find('kci:abstract-group', NS)
108
+ if ab_group is not None:
109
+ for ab in ab_group.findall('kci:abstract', NS):
110
+ lang = ab.get('lang')
111
+ if lang == 'original': abstract_ko = ab.text
112
+ elif lang == 'english': abstract_en = ab.text
113
+
114
+ return {
115
+ 'id': a_info.get('article-id'),
116
+ 'format': 'oai_kci', # ํฌ๋งท ๊ตฌ๋ถ„์šฉ
117
+ 'title_ko': title_ko,
118
+ 'title_en': title_en,
119
+ 'journal': journal_data,
120
+ 'authors': authors,
121
+ 'abstract_ko': abstract_ko,
122
+ 'abstract_en': abstract_en,
123
+ 'keywords': _get_text(a_info, 'kci:keyword-group'),
124
+ 'citation_count': _get_text(a_info, 'kci:citation-count'),
125
+ 'doi': _get_text(a_info, 'kci:doi'),
126
+ 'url': _get_text(a_info, 'kci:url'),
127
+ 'file_source': os.path.basename(filepath)
128
+ }
129
+
130
+ def _parse_dc_format(dc_meta, filepath):
131
+ """DC ๊ธฐ๋ณธ ํฌ๋งท ํŒŒ์‹ฑ (๊ตฌ์กฐ๊ฐ€ ๋‹จ์ˆœํ•จ)"""
132
+
133
+ # ์ œ๋ชฉ (์–ธ์–ด ๊ตฌ๋ถ„ ์†์„ฑ์ด ์žˆ์„ ์ˆ˜๋„ ์—†์„ ์ˆ˜๋„ ์žˆ์Œ)
134
+ titles = dc_meta.findall('dc:title', NS)
135
+ title_ko = titles[0].text if titles else ""
136
+ title_en = titles[1].text if len(titles) > 1 else "" # ๋Œ€๋žต์  ์ถ”์ •
137
+
138
+ # ์ €์ž (๋‹จ์ˆœ ํ…์ŠคํŠธ ๋‚˜์—ด)
139
+ authors = [c.text for c in dc_meta.findall('dc:creator', NS) if c.text]
140
+
141
+ # ์ดˆ๋ก
142
+ descriptions = dc_meta.findall('dc:description', NS)
143
+ abstracts = [d.text for d in descriptions if d.text]
144
+ abstract_ko = abstracts[0] if abstracts else ""
145
+
146
+ # ์‹๋ณ„์ž (DOI, URL ๋“ฑ์ด ์„ž์—ฌ ์žˆ์Œ)
147
+ identifiers = dc_meta.findall('dc:identifier', NS)
148
+ doi = ""
149
+ url = ""
150
+ art_id = ""
151
+
152
+ for ide in identifiers:
153
+ text = ide.text or ""
154
+ type_attr = ide.get('type')
155
+
156
+ if type_attr == 'doi' or 'doi.org' in text:
157
+ doi = text
158
+ elif type_attr == 'artId' or 'ART' in text:
159
+ art_id = text
160
+ elif 'kci.go.kr' in text and 'http' in text:
161
+ url = text
162
+
163
+ return {
164
+ 'id': art_id,
165
+ 'format': 'oai_dc', # ํฌ๋งท ๊ตฌ๋ถ„์šฉ
166
+ 'title_ko': title_ko,
167
+ 'title_en': title_en,
168
+ 'journal': {'journal_name': _get_text(dc_meta, 'dc:publisher')}, # DC๋Š” ์ €๋„๋ช…์ด publisher์— ์žˆ๋Š” ๊ฒฝ์šฐ๊ฐ€ ๋งŽ์Œ
169
+ 'authors': authors,
170
+ 'abstract_ko': abstract_ko,
171
+ 'abstract_en': "", # DC์—์„œ๋Š” ๊ตฌ๋ถ„์ด ๋ชจํ˜ธํ•จ
172
+ 'keywords': _get_text(dc_meta, 'dc:subject'),
173
+ 'citation_count': None, # DC์—๋Š” ์—†์Œ
174
+ 'doi': doi,
175
+ 'url': url,
176
+ 'file_source': os.path.basename(filepath)
177
+ }
178
+
179
+ def _get_text(parent, tag):
180
+ if parent is None: return None
181
+ elem = parent.find(tag, NS)
182
+ return elem.text.strip() if elem is not None and elem.text else None
183
+
184
+ # ==========================================
185
+ # 3. ๋ฉ”์ธ ์‹คํ–‰ ๋กœ์ง
186
+ # ==========================================
187
+ def main():
188
+ if not os.path.exists(OUTPUT_DIR):
189
+ os.makedirs(OUTPUT_DIR)
190
+
191
+ xml_files = glob.glob(os.path.join(INPUT_DIR, "*.xml"))
192
+ total_files = len(xml_files)
193
+
194
+ print(f"๐Ÿš€ ์ด {total_files}๊ฐœ์˜ XML ํŒŒ์ผ ์ฒ˜๋ฆฌ๋ฅผ ์‹œ์ž‘ํ•ฉ๋‹ˆ๋‹ค. (ํ•˜์ด๋ธŒ๋ฆฌ๋“œ ๋ชจ๋“œ)")
195
+ print(f"๐Ÿ“‚ ์ž…๋ ฅ: {INPUT_DIR}")
196
+ print(f"๐Ÿ’พ ์ถœ๋ ฅ: {OUTPUT_FILE}")
197
+ print("โณ ๋ณ€ํ™˜ ์ค‘... (ํŒŒ์ผ์ด ๋งŽ์•„ ์‹œ๊ฐ„์ด ์กฐ๊ธˆ ๊ฑธ๋ฆฝ๋‹ˆ๋‹ค)")
198
+
199
+ start_time = time.time()
200
+
201
+ with open(OUTPUT_FILE, 'w', encoding='utf-8') as outfile:
202
+ # ProcessPoolExecutor๋กœ ๋ณ‘๋ ฌ ์ฒ˜๋ฆฌ
203
+ with ProcessPoolExecutor() as executor:
204
+ results = executor.map(parse_single_xml, xml_files)
205
+
206
+ count = 0
207
+ total_articles = 0
208
+
209
+ for result in results:
210
+ count += 1
211
+ if result:
212
+ total_articles += len(result)
213
+ for doc in result:
214
+ json.dump(doc, outfile, ensure_ascii=False)
215
+ outfile.write('\n')
216
+
217
+ if count % 2000 == 0:
218
+ print(f" [{count}/{total_files}] ์ฒ˜๋ฆฌ ์ค‘... ({total_articles} ๊ฑด ์ถ”์ถœ)")
219
+
220
+ print("="*50)
221
+ print(f"โœ… ๋ณ€ํ™˜ ์™„๋ฃŒ!")
222
+ print(f"๐Ÿ“Š ์ด ํŒŒ์ผ: {total_files}๊ฐœ")
223
+ print(f"๐Ÿ“‘ ์ถ”์ถœ๋œ ๋…ผ๋ฌธ: {total_articles}๊ฑด")
224
+ print(f"โฑ๏ธ ์†Œ์š” ์‹œ๊ฐ„: {time.time() - start_time:.1f}์ดˆ")
225
+ print(f"๐Ÿ“ ์ €์žฅ ์œ„์น˜: {OUTPUT_FILE}")
226
+
227
+ if __name__ == "__main__":
228
+ main()