Datasets:
File size: 7,700 Bytes
3e44c70 0010dd7 3e44c70 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 |
import os
import gzip
import xml.etree.ElementTree as ET
import pandas as pd
import tqdm
import glob
import xml.etree.ElementTree as ET
def extract_meta_info(xml_content):
root = ET.fromstring(xml_content)
meta_info_list = [] # List to hold metadata of each article
# Loop over each article in the XML (assuming it's in a root <PubmedArticle> list)
articles = root.findall(".//PubmedArticle") # Or adjust the XPath based on your XML structure
for article in articles:
meta_info = {}
# Extract PMID
pmid = article.find(".//PMID")
meta_info['PMID'] = pmid.text if pmid is not None else None
# Extract DateCompleted
date_completed = article.find(".//DateCompleted")
if date_completed is not None:
year = date_completed.find(".//Year")
month = date_completed.find(".//Month")
day = date_completed.find(".//Day")
meta_info['DateCompleted'] = f"{year.text}-{month.text}-{day.text}" if year is not None and month is not None and day is not None else None
# Extract DateRevised
date_revised = article.find(".//DateRevised")
if date_revised is not None:
year = date_revised.find(".//Year")
month = date_revised.find(".//Month")
day = date_revised.find(".//Day")
meta_info['DateRevised'] = f"{year.text}-{month.text}-{day.text}" if year is not None and month is not None and day is not None else None
# Extract ISSN
issn = article.find(".//ISSN")
meta_info['ISSN'] = issn.text if issn is not None else None
# Extract Journal Title
journal_title = article.find(".//Journal/Title")
meta_info['JournalTitle'] = journal_title.text if journal_title is not None else None
# Extract Article Title
article_title = article.find(".//ArticleTitle")
meta_info['ArticleTitle'] = article_title.text if article_title is not None else None
# Extract Authors
authors = article.findall(".//AuthorList/Author")
author_names = []
for author in authors:
last_name = author.find(".//LastName")
fore_name = author.find(".//ForeName")
if last_name is not None and fore_name is not None:
author_names.append(f"{last_name.text} {fore_name.text}")
meta_info['Authors'] = ', '.join(author_names) if author_names else None
# Extract Language
language = article.find(".//Language")
meta_info['Language'] = language.text if language is not None else None
# Extract Grants
grants = article.findall(".//GrantList/Grant")
grant_info = []
for grant in grants:
grant_id = grant.find(".//GrantID")
agency = grant.find(".//Agency")
country = grant.find(".//Country")
if grant_id is not None and agency is not None and country is not None:
grant_info.append(f"{grant_id.text} ({agency.text}, {country.text})")
meta_info['Grants'] = '; '.join(grant_info) if grant_info else None
# Extract Publication Types
publication_types = article.findall(".//PublicationTypeList/PublicationType")
pub_types = []
for pub_type in publication_types:
pub_types.append(pub_type.text)
meta_info['PublicationTypes'] = ', '.join(pub_types) if pub_types else None
# Extract Chemicals
chemicals = article.findall(".//ChemicalList/Chemical")
chemical_info = []
for chemical in chemicals:
substance_name = chemical.find(".//NameOfSubstance")
if substance_name is not None:
chemical_info.append(substance_name.text)
meta_info['Chemicals'] = ', '.join(chemical_info) if chemical_info else None
# Extract CitationSubset
citation_subset = article.find(".//CitationSubset")
meta_info['CitationSubset'] = citation_subset.text if citation_subset is not None else None
# Extract Article IDs (DOI, etc.)
article_ids = article.findall(".//ArticleIdList/ArticleId")
article_id_info = []
for article_id in article_ids:
article_id_info.append(article_id.text)
meta_info['ArticleIds'] = ', '.join(filter(None, article_id_info)) if article_id_info else None
# Extract Abstract
abstract_texts, abstract_parts = article.findall(".//Abstract/AbstractText"), []
for elem in abstract_texts:
label = elem.attrib.get("Label", "")
text = elem.text.strip() if elem.text else ""
if label:
abstract_parts.append(f"{label}: {text}")
else:
abstract_parts.append(text)
abstract = "\n".join(abstract_parts) if abstract_parts else None
meta_info["Abstract"] = abstract
# Extract Mesh Terms
mesh_terms = article.findall(".//MeshHeadingList/MeshHeading")
mesh_terms_info = []
for mesh_term in mesh_terms:
descriptor_name = mesh_term.find(".//DescriptorName")
if descriptor_name is not None:
mesh_terms_info.append(descriptor_name.text)
meta_info['MeshTerms'] = ', '.join(filter(None, mesh_terms_info)) if mesh_terms_info else None
# Extract Keywords
keywords = article.findall(".//KeywordList/Keyword")
keyword_info = []
for keyword in keywords:
keyword_info.append(keyword.text)
meta_info['Keywords'] = ', '.join(filter(None, keyword_info)) if keyword_info else None
# Append the metadata for this article to the list
meta_info_list.append(meta_info)
return meta_info_list
def extract(input_dir, output_csv):
# Create a temporary directory to store individual CSVs
temp_dir = os.path.join(os.path.dirname(output_csv), 'temp')
os.makedirs(temp_dir, exist_ok=True)
# Iterate over all .gz files in the directory
for filename in tqdm.tqdm(os.listdir(input_dir)):
if filename.endswith('.xml.gz'):
file_path = os.path.join(input_dir, filename)
# Decompress and read the XML content
with gzip.open(file_path, 'rb') as f:
xml_content = f.read()
# Extract meta information
meta_info_list = extract_meta_info(xml_content)
# Save meta information to a temporary CSV file
temp_csv_path = os.path.join(temp_dir, f"{os.path.splitext(filename)[0]}.csv")
# Create a DataFrame from the list of dictionaries (each dict represents an article's metadata)
df = pd.DataFrame(meta_info_list)
# Save the DataFrame to a CSV file
df.to_csv(temp_csv_path, index=False)
# Combine all temporary CSVs into a single large CSV file
all_csv_files = glob.glob(os.path.join(temp_dir, '*.csv'))
combined_df = pd.concat((pd.read_csv(f) for f in all_csv_files), ignore_index=True)
combined_df.to_csv(output_csv, index=False)
# Optionally, delete the temporary files
# for f in all_csv_files:
# os.remove(f)
# os.rmdir(temp_dir)
print(f"Meta information extracted and saved to {output_csv}")
if __name__ == "__main__":
# Define the input directory
input_dir = './pubmed_data' # Replace with actual path
output_csv = './2025/meta_info_2025_0327.csv' # Output CSV file path
extract(input_dir=input_dir, output_csv=output_csv)
|