|
|
import os
|
|
|
import glob
|
|
|
import json
|
|
|
import xml.etree.ElementTree as ET
|
|
|
from concurrent.futures import ProcessPoolExecutor
|
|
|
import time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
INPUT_DIR = r"D:\KCI\data_kci_format"
|
|
|
OUTPUT_DIR = r"D:\KCI\processed"
|
|
|
OUTPUT_FILE = os.path.join(OUTPUT_DIR, "kci_articles.jsonl")
|
|
|
|
|
|
|
|
|
NS = {
|
|
|
'oai': 'http://www.openarchives.org/OAI/2.0/',
|
|
|
'kci': 'http://www.kci.go.kr/kciportal/OAI/',
|
|
|
'oai_dc': 'http://www.openarchives.org/OAI/2.0/oai_dc/',
|
|
|
'dc': 'http://purl.org/dc/elements/1.1/'
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_single_xml(filepath):
|
|
|
"""
|
|
|
XML ํ์ผ ํ๋๋ฅผ ์ด์ด์ ๋ด๋ถ์ ๋
ผ๋ฌธ(record)๋ค์ ๋ฆฌ์คํธ๋ก ๋ณํํฉ๋๋ค.
|
|
|
oai_kci(์์ธ) ํฌ๋งท๊ณผ oai_dc(๊ธฐ๋ณธ) ํฌ๋งท์ ๋ชจ๋ ์ฒ๋ฆฌํฉ๋๋ค.
|
|
|
"""
|
|
|
extracted_data = []
|
|
|
|
|
|
try:
|
|
|
tree = ET.parse(filepath)
|
|
|
root = tree.getroot()
|
|
|
|
|
|
|
|
|
records = root.findall('.//oai:record', NS)
|
|
|
|
|
|
for record in records:
|
|
|
|
|
|
header = record.find('oai:header', NS)
|
|
|
if header is not None and header.get('status') == 'deleted':
|
|
|
continue
|
|
|
|
|
|
metadata = record.find('oai:metadata', NS)
|
|
|
if metadata is None:
|
|
|
continue
|
|
|
|
|
|
|
|
|
kci_meta = metadata.find('kci:oai_kci', NS)
|
|
|
if kci_meta is not None:
|
|
|
extracted_data.append(_parse_kci_format(kci_meta, filepath))
|
|
|
continue
|
|
|
|
|
|
|
|
|
dc_meta = metadata.find('oai_dc:dc', NS)
|
|
|
if dc_meta is not None:
|
|
|
extracted_data.append(_parse_dc_format(dc_meta, filepath))
|
|
|
continue
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
|
|
|
|
return []
|
|
|
|
|
|
return extracted_data
|
|
|
|
|
|
def _parse_kci_format(kci_meta, filepath):
|
|
|
"""KCI ์ ์ฉ ์์ธ ํฌ๋งท ํ์ฑ"""
|
|
|
|
|
|
j_info = kci_meta.find('kci:journalInfo', NS)
|
|
|
journal_data = {}
|
|
|
if j_info is not None:
|
|
|
journal_data = {
|
|
|
'journal_name': _get_text(j_info, 'kci:journal-name'),
|
|
|
'publisher': _get_text(j_info, 'kci:publisher-name'),
|
|
|
'pub_year': _get_text(j_info, 'kci:pub-year'),
|
|
|
'pub_month': _get_text(j_info, 'kci:pub-mon'),
|
|
|
'volume': _get_text(j_info, 'kci:volume'),
|
|
|
'issue': _get_text(j_info, 'kci:issue'),
|
|
|
}
|
|
|
|
|
|
|
|
|
a_info = kci_meta.find('kci:articleInfo', NS)
|
|
|
if a_info is None:
|
|
|
return {}
|
|
|
|
|
|
|
|
|
title_group = a_info.find('kci:title-group', NS)
|
|
|
title_ko, title_en = "", ""
|
|
|
if title_group is not None:
|
|
|
for t in title_group.findall('kci:article-title', NS):
|
|
|
lang = t.get('lang')
|
|
|
if lang == 'original': title_ko = t.text
|
|
|
elif lang == 'english': title_en = t.text
|
|
|
|
|
|
|
|
|
authors = []
|
|
|
author_group = a_info.find('kci:author-group', NS)
|
|
|
if author_group is not None:
|
|
|
for auth in author_group.findall('kci:author', NS):
|
|
|
authors.append(auth.text.strip() if auth.text else "")
|
|
|
|
|
|
|
|
|
abstract_ko, abstract_en = "", ""
|
|
|
ab_group = a_info.find('kci:abstract-group', NS)
|
|
|
if ab_group is not None:
|
|
|
for ab in ab_group.findall('kci:abstract', NS):
|
|
|
lang = ab.get('lang')
|
|
|
if lang == 'original': abstract_ko = ab.text
|
|
|
elif lang == 'english': abstract_en = ab.text
|
|
|
|
|
|
return {
|
|
|
'id': a_info.get('article-id'),
|
|
|
'format': 'oai_kci',
|
|
|
'title_ko': title_ko,
|
|
|
'title_en': title_en,
|
|
|
'journal': journal_data,
|
|
|
'authors': authors,
|
|
|
'abstract_ko': abstract_ko,
|
|
|
'abstract_en': abstract_en,
|
|
|
'keywords': _get_text(a_info, 'kci:keyword-group'),
|
|
|
'citation_count': _get_text(a_info, 'kci:citation-count'),
|
|
|
'doi': _get_text(a_info, 'kci:doi'),
|
|
|
'url': _get_text(a_info, 'kci:url'),
|
|
|
'file_source': os.path.basename(filepath)
|
|
|
}
|
|
|
|
|
|
def _parse_dc_format(dc_meta, filepath):
|
|
|
"""DC ๊ธฐ๋ณธ ํฌ๋งท ํ์ฑ (๊ตฌ์กฐ๊ฐ ๋จ์ํจ)"""
|
|
|
|
|
|
|
|
|
titles = dc_meta.findall('dc:title', NS)
|
|
|
title_ko = titles[0].text if titles else ""
|
|
|
title_en = titles[1].text if len(titles) > 1 else ""
|
|
|
|
|
|
|
|
|
authors = [c.text for c in dc_meta.findall('dc:creator', NS) if c.text]
|
|
|
|
|
|
|
|
|
descriptions = dc_meta.findall('dc:description', NS)
|
|
|
abstracts = [d.text for d in descriptions if d.text]
|
|
|
abstract_ko = abstracts[0] if abstracts else ""
|
|
|
|
|
|
|
|
|
identifiers = dc_meta.findall('dc:identifier', NS)
|
|
|
doi = ""
|
|
|
url = ""
|
|
|
art_id = ""
|
|
|
|
|
|
for ide in identifiers:
|
|
|
text = ide.text or ""
|
|
|
type_attr = ide.get('type')
|
|
|
|
|
|
if type_attr == 'doi' or 'doi.org' in text:
|
|
|
doi = text
|
|
|
elif type_attr == 'artId' or 'ART' in text:
|
|
|
art_id = text
|
|
|
elif 'kci.go.kr' in text and 'http' in text:
|
|
|
url = text
|
|
|
|
|
|
return {
|
|
|
'id': art_id,
|
|
|
'format': 'oai_dc',
|
|
|
'title_ko': title_ko,
|
|
|
'title_en': title_en,
|
|
|
'journal': {'journal_name': _get_text(dc_meta, 'dc:publisher')},
|
|
|
'authors': authors,
|
|
|
'abstract_ko': abstract_ko,
|
|
|
'abstract_en': "",
|
|
|
'keywords': _get_text(dc_meta, 'dc:subject'),
|
|
|
'citation_count': None,
|
|
|
'doi': doi,
|
|
|
'url': url,
|
|
|
'file_source': os.path.basename(filepath)
|
|
|
}
|
|
|
|
|
|
def _get_text(parent, tag):
|
|
|
if parent is None: return None
|
|
|
elem = parent.find(tag, NS)
|
|
|
return elem.text.strip() if elem is not None and elem.text else None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
if not os.path.exists(OUTPUT_DIR):
|
|
|
os.makedirs(OUTPUT_DIR)
|
|
|
|
|
|
xml_files = glob.glob(os.path.join(INPUT_DIR, "*.xml"))
|
|
|
total_files = len(xml_files)
|
|
|
|
|
|
print(f"๐ ์ด {total_files}๊ฐ์ XML ํ์ผ ์ฒ๋ฆฌ๋ฅผ ์์ํฉ๋๋ค. (ํ์ด๋ธ๋ฆฌ๋ ๋ชจ๋)")
|
|
|
print(f"๐ ์
๋ ฅ: {INPUT_DIR}")
|
|
|
print(f"๐พ ์ถ๋ ฅ: {OUTPUT_FILE}")
|
|
|
print("โณ ๋ณํ ์ค... (ํ์ผ์ด ๋ง์ ์๊ฐ์ด ์กฐ๊ธ ๊ฑธ๋ฆฝ๋๋ค)")
|
|
|
|
|
|
start_time = time.time()
|
|
|
|
|
|
with open(OUTPUT_FILE, 'w', encoding='utf-8') as outfile:
|
|
|
|
|
|
with ProcessPoolExecutor() as executor:
|
|
|
results = executor.map(parse_single_xml, xml_files)
|
|
|
|
|
|
count = 0
|
|
|
total_articles = 0
|
|
|
|
|
|
for result in results:
|
|
|
count += 1
|
|
|
if result:
|
|
|
total_articles += len(result)
|
|
|
for doc in result:
|
|
|
json.dump(doc, outfile, ensure_ascii=False)
|
|
|
outfile.write('\n')
|
|
|
|
|
|
if count % 2000 == 0:
|
|
|
print(f" [{count}/{total_files}] ์ฒ๋ฆฌ ์ค... ({total_articles} ๊ฑด ์ถ์ถ)")
|
|
|
|
|
|
print("="*50)
|
|
|
print(f"โ
๋ณํ ์๋ฃ!")
|
|
|
print(f"๐ ์ด ํ์ผ: {total_files}๊ฐ")
|
|
|
print(f"๐ ์ถ์ถ๋ ๋
ผ๋ฌธ: {total_articles}๊ฑด")
|
|
|
print(f"โฑ๏ธ ์์ ์๊ฐ: {time.time() - start_time:.1f}์ด")
|
|
|
print(f"๐ ์ ์ฅ ์์น: {OUTPUT_FILE}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
main() |