KCI_data / kci_xml_to_jsonl.py
ddokbaro's picture
Upload 2 files
cf5028a verified
import os
import glob
import json
import xml.etree.ElementTree as ET
from concurrent.futures import ProcessPoolExecutor
import time
# ==========================================
# 1. ์„ค์ •
# ==========================================
INPUT_DIR = r"D:\KCI\data_kci_format"
OUTPUT_DIR = r"D:\KCI\processed"
OUTPUT_FILE = os.path.join(OUTPUT_DIR, "kci_articles.jsonl")
# ๋„ค์ž„์ŠคํŽ˜์ด์Šค (KCI, DC ๋ชจ๋‘ ์ง€์›)
NS = {
'oai': 'http://www.openarchives.org/OAI/2.0/',
'kci': 'http://www.kci.go.kr/kciportal/OAI/',
'oai_dc': 'http://www.openarchives.org/OAI/2.0/oai_dc/',
'dc': 'http://purl.org/dc/elements/1.1/'
}
# ==========================================
# 2. ํŒŒ์‹ฑ ๋กœ์ง (ํ•˜์ด๋ธŒ๋ฆฌ๋“œ ์ง€์›)
# ==========================================
def parse_single_xml(filepath):
"""
XML ํŒŒ์ผ ํ•˜๋‚˜๋ฅผ ์—ด์–ด์„œ ๋‚ด๋ถ€์˜ ๋…ผ๋ฌธ(record)๋“ค์„ ๋ฆฌ์ŠคํŠธ๋กœ ๋ณ€ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
oai_kci(์ƒ์„ธ) ํฌ๋งท๊ณผ oai_dc(๊ธฐ๋ณธ) ํฌ๋งท์„ ๋ชจ๋‘ ์ฒ˜๋ฆฌํ•ฉ๋‹ˆ๋‹ค.
"""
extracted_data = []
try:
tree = ET.parse(filepath)
root = tree.getroot()
# OAI-PMH ๊ตฌ์กฐ ๋‚ด record ๋ฐ˜๋ณต
records = root.findall('.//oai:record', NS)
for record in records:
# ์‚ญ์ œ๋œ ๋ ˆ์ฝ”๋“œ๋Š” ๊ฑด๋„ˆ๋œ€
header = record.find('oai:header', NS)
if header is not None and header.get('status') == 'deleted':
continue
metadata = record.find('oai:metadata', NS)
if metadata is None:
continue
# A. oai_kci ํฌ๋งท ์‹œ๋„ (์ƒ์„ธ ์ •๋ณด)
kci_meta = metadata.find('kci:oai_kci', NS)
if kci_meta is not None:
extracted_data.append(_parse_kci_format(kci_meta, filepath))
continue
# B. oai_dc ํฌ๋งท ์‹œ๋„ (๊ธฐ๋ณธ ์ •๋ณด) - ํŒŒ์ผ ์„ž์ž„ ๋Œ€๋น„
dc_meta = metadata.find('oai_dc:dc', NS)
if dc_meta is not None:
extracted_data.append(_parse_dc_format(dc_meta, filepath))
continue
except Exception as e:
# ํŒŒ์ผ์ด ๊นจ์ง„ ๊ฒฝ์šฐ ๋กœ๊ทธ๋งŒ ๋‚จ๊ธฐ๊ณ  ๋ฌด์‹œ
# print(f"Error parsing {filepath}: {e}")
return []
return extracted_data
def _parse_kci_format(kci_meta, filepath):
"""KCI ์ „์šฉ ์ƒ์„ธ ํฌ๋งท ํŒŒ์‹ฑ"""
# 1. ์ €๋„ ์ •๋ณด
j_info = kci_meta.find('kci:journalInfo', NS)
journal_data = {}
if j_info is not None:
journal_data = {
'journal_name': _get_text(j_info, 'kci:journal-name'),
'publisher': _get_text(j_info, 'kci:publisher-name'),
'pub_year': _get_text(j_info, 'kci:pub-year'),
'pub_month': _get_text(j_info, 'kci:pub-mon'),
'volume': _get_text(j_info, 'kci:volume'),
'issue': _get_text(j_info, 'kci:issue'),
}
# 2. ๋…ผ๋ฌธ ์ •๋ณด
a_info = kci_meta.find('kci:articleInfo', NS)
if a_info is None:
return {}
# ์ œ๋ชฉ
title_group = a_info.find('kci:title-group', NS)
title_ko, title_en = "", ""
if title_group is not None:
for t in title_group.findall('kci:article-title', NS):
lang = t.get('lang')
if lang == 'original': title_ko = t.text
elif lang == 'english': title_en = t.text
# ์ €์ž
authors = []
author_group = a_info.find('kci:author-group', NS)
if author_group is not None:
for auth in author_group.findall('kci:author', NS):
authors.append(auth.text.strip() if auth.text else "")
# ์ดˆ๋ก
abstract_ko, abstract_en = "", ""
ab_group = a_info.find('kci:abstract-group', NS)
if ab_group is not None:
for ab in ab_group.findall('kci:abstract', NS):
lang = ab.get('lang')
if lang == 'original': abstract_ko = ab.text
elif lang == 'english': abstract_en = ab.text
return {
'id': a_info.get('article-id'),
'format': 'oai_kci', # ํฌ๋งท ๊ตฌ๋ถ„์šฉ
'title_ko': title_ko,
'title_en': title_en,
'journal': journal_data,
'authors': authors,
'abstract_ko': abstract_ko,
'abstract_en': abstract_en,
'keywords': _get_text(a_info, 'kci:keyword-group'),
'citation_count': _get_text(a_info, 'kci:citation-count'),
'doi': _get_text(a_info, 'kci:doi'),
'url': _get_text(a_info, 'kci:url'),
'file_source': os.path.basename(filepath)
}
def _parse_dc_format(dc_meta, filepath):
"""DC ๊ธฐ๋ณธ ํฌ๋งท ํŒŒ์‹ฑ (๊ตฌ์กฐ๊ฐ€ ๋‹จ์ˆœํ•จ)"""
# ์ œ๋ชฉ (์–ธ์–ด ๊ตฌ๋ถ„ ์†์„ฑ์ด ์žˆ์„ ์ˆ˜๋„ ์—†์„ ์ˆ˜๋„ ์žˆ์Œ)
titles = dc_meta.findall('dc:title', NS)
title_ko = titles[0].text if titles else ""
title_en = titles[1].text if len(titles) > 1 else "" # ๋Œ€๋žต์  ์ถ”์ •
# ์ €์ž (๋‹จ์ˆœ ํ…์ŠคํŠธ ๋‚˜์—ด)
authors = [c.text for c in dc_meta.findall('dc:creator', NS) if c.text]
# ์ดˆ๋ก
descriptions = dc_meta.findall('dc:description', NS)
abstracts = [d.text for d in descriptions if d.text]
abstract_ko = abstracts[0] if abstracts else ""
# ์‹๋ณ„์ž (DOI, URL ๋“ฑ์ด ์„ž์—ฌ ์žˆ์Œ)
identifiers = dc_meta.findall('dc:identifier', NS)
doi = ""
url = ""
art_id = ""
for ide in identifiers:
text = ide.text or ""
type_attr = ide.get('type')
if type_attr == 'doi' or 'doi.org' in text:
doi = text
elif type_attr == 'artId' or 'ART' in text:
art_id = text
elif 'kci.go.kr' in text and 'http' in text:
url = text
return {
'id': art_id,
'format': 'oai_dc', # ํฌ๋งท ๊ตฌ๋ถ„์šฉ
'title_ko': title_ko,
'title_en': title_en,
'journal': {'journal_name': _get_text(dc_meta, 'dc:publisher')}, # DC๋Š” ์ €๋„๋ช…์ด publisher์— ์žˆ๋Š” ๊ฒฝ์šฐ๊ฐ€ ๋งŽ์Œ
'authors': authors,
'abstract_ko': abstract_ko,
'abstract_en': "", # DC์—์„œ๋Š” ๊ตฌ๋ถ„์ด ๋ชจํ˜ธํ•จ
'keywords': _get_text(dc_meta, 'dc:subject'),
'citation_count': None, # DC์—๋Š” ์—†์Œ
'doi': doi,
'url': url,
'file_source': os.path.basename(filepath)
}
def _get_text(parent, tag):
if parent is None: return None
elem = parent.find(tag, NS)
return elem.text.strip() if elem is not None and elem.text else None
# ==========================================
# 3. ๋ฉ”์ธ ์‹คํ–‰ ๋กœ์ง
# ==========================================
def main():
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
xml_files = glob.glob(os.path.join(INPUT_DIR, "*.xml"))
total_files = len(xml_files)
print(f"๐Ÿš€ ์ด {total_files}๊ฐœ์˜ XML ํŒŒ์ผ ์ฒ˜๋ฆฌ๋ฅผ ์‹œ์ž‘ํ•ฉ๋‹ˆ๋‹ค. (ํ•˜์ด๋ธŒ๋ฆฌ๋“œ ๋ชจ๋“œ)")
print(f"๐Ÿ“‚ ์ž…๋ ฅ: {INPUT_DIR}")
print(f"๐Ÿ’พ ์ถœ๋ ฅ: {OUTPUT_FILE}")
print("โณ ๋ณ€ํ™˜ ์ค‘... (ํŒŒ์ผ์ด ๋งŽ์•„ ์‹œ๊ฐ„์ด ์กฐ๊ธˆ ๊ฑธ๋ฆฝ๋‹ˆ๋‹ค)")
start_time = time.time()
with open(OUTPUT_FILE, 'w', encoding='utf-8') as outfile:
# ProcessPoolExecutor๋กœ ๋ณ‘๋ ฌ ์ฒ˜๋ฆฌ
with ProcessPoolExecutor() as executor:
results = executor.map(parse_single_xml, xml_files)
count = 0
total_articles = 0
for result in results:
count += 1
if result:
total_articles += len(result)
for doc in result:
json.dump(doc, outfile, ensure_ascii=False)
outfile.write('\n')
if count % 2000 == 0:
print(f" [{count}/{total_files}] ์ฒ˜๋ฆฌ ์ค‘... ({total_articles} ๊ฑด ์ถ”์ถœ)")
print("="*50)
print(f"โœ… ๋ณ€ํ™˜ ์™„๋ฃŒ!")
print(f"๐Ÿ“Š ์ด ํŒŒ์ผ: {total_files}๊ฐœ")
print(f"๐Ÿ“‘ ์ถ”์ถœ๋œ ๋…ผ๋ฌธ: {total_articles}๊ฑด")
print(f"โฑ๏ธ ์†Œ์š” ์‹œ๊ฐ„: {time.time() - start_time:.1f}์ดˆ")
print(f"๐Ÿ“ ์ €์žฅ ์œ„์น˜: {OUTPUT_FILE}")
if __name__ == "__main__":
main()