Update pubmed25_debug.py
Browse files- pubmed25_debug.py +36 -43
pubmed25_debug.py
CHANGED
|
@@ -582,92 +582,85 @@ class Pubmed(datasets.GeneratorBasedBuilder):
|
|
| 582 |
|
| 583 |
|
| 584 |
def _generate_examples(self, filenames):
|
| 585 |
-
"""Yields examples parsing XML files using iterparse for memory efficiency."""
|
| 586 |
-
id_ = 0 # Simple counter for
|
|
|
|
| 587 |
|
| 588 |
for filename in filenames:
|
| 589 |
logger.info(f"Processing file: {filename}")
|
| 590 |
try:
|
| 591 |
with gzip.open(filename, "rb") as f: # Read as bytes for ET
|
| 592 |
-
# Use iterparse to process the XML incrementally
|
| 593 |
context = ET.iterparse(f, events=("end",))
|
| 594 |
-
# Get
|
| 595 |
-
event, root = next(context)
|
| 596 |
|
| 597 |
for event, elem in context:
|
| 598 |
-
# Process each PubmedArticle element when its closing tag is found
|
| 599 |
if event == "end" and elem.tag == "PubmedArticle":
|
| 600 |
-
article_dict_wrapper = None
|
|
|
|
| 601 |
try:
|
| 602 |
-
# Parse the completed element into our dictionary structure
|
| 603 |
article_dict_wrapper = self.xml_to_dictionnary(elem)
|
| 604 |
-
# Expected structure: {'PubmedArticle': {...actual data...}}
|
| 605 |
|
| 606 |
if not article_dict_wrapper or 'PubmedArticle' not in article_dict_wrapper:
|
| 607 |
logger.warning(f"Parser returned empty or invalid structure for a PubmedArticle element in {filename}")
|
| 608 |
-
elem.clear()
|
| 609 |
-
root.clear() # Periodically clear root too? Maybe not needed per element.
|
| 610 |
continue
|
| 611 |
|
| 612 |
article = article_dict_wrapper.get('PubmedArticle')
|
| 613 |
-
|
| 614 |
-
# Ensure the extracted article data is not empty
|
| 615 |
if not article or not isinstance(article, dict):
|
| 616 |
logger.warning(f"Parsed empty or invalid article data from element in {filename}")
|
| 617 |
elem.clear()
|
| 618 |
continue
|
| 619 |
|
| 620 |
-
# ---
|
| 621 |
-
|
| 622 |
-
self.update_citation(article)
|
| 623 |
-
|
| 624 |
-
# 2. Create the default structure expected by the schema
|
| 625 |
-
new_article = default_article()
|
| 626 |
-
|
| 627 |
-
# 3. Merge the parsed data into the default structure
|
| 628 |
-
deepupdate(new_article, article)
|
| 629 |
-
|
| 630 |
-
# --- Validation and Yielding ---
|
| 631 |
-
# Retrieve PMID for yielding and logging (ensure it's valid)
|
| 632 |
-
pmid_val = new_article.get("MedlineCitation", {}).get("PMID", 0)
|
| 633 |
-
pmid = 0
|
| 634 |
try:
|
| 635 |
pmid = int(pmid_val)
|
| 636 |
if pmid <= 0: raise ValueError("PMID must be positive")
|
| 637 |
except (ValueError, TypeError):
|
| 638 |
logger.warning(f"Skipping article due to invalid or missing PMID: '{pmid_val}' in {filename}")
|
| 639 |
elem.clear()
|
| 640 |
-
continue # Skip this article
|
| 641 |
|
| 642 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 643 |
encoded_example = self.info.features.encode_example(new_article)
|
| 644 |
|
| 645 |
# Yield pmid as key and the validated dictionary
|
| 646 |
yield pmid, new_article # Use actual PMID as the example key
|
| 647 |
-
|
| 648 |
|
| 649 |
except Exception as e:
|
| 650 |
-
|
| 651 |
-
|
| 652 |
-
logger.error(f"Failed to process article PMID {pmid_for_log} in {filename}: {e}", exc_info=True)
|
| 653 |
-
# Optionally log the problematic article data (can be large)
|
| 654 |
# if 'new_article' in locals(): logger.debug(f"Problematic data: {new_article}")
|
| 655 |
|
| 656 |
finally:
|
| 657 |
-
#
|
| 658 |
-
|
| 659 |
-
elem.clear()
|
| 660 |
-
# Optionally, clear previous siblings from the root periodically
|
| 661 |
-
# while root and root.getprevious() is not None:
|
| 662 |
-
# del root.getparent()[0]
|
| 663 |
-
|
| 664 |
-
# Clear the root element after processing the file
|
| 665 |
if root is not None:
|
| 666 |
root.clear()
|
| 667 |
|
| 668 |
except ET.ParseError as e:
|
| 669 |
logger.error(f"XML ParseError in file {filename}: {e}")
|
| 670 |
-
continue
|
| 671 |
except gzip.BadGzipFile:
|
| 672 |
logger.error(f"Bad Gzip File error for {filename}. It might be corrupted or incomplete.")
|
| 673 |
continue
|
|
|
|
| 582 |
|
| 583 |
|
| 584 |
def _generate_examples(self, filenames):
|
| 585 |
+
"""Yields examples parsing XML files using iterparse for memory efficiency, skipping duplicate PMIDs."""
|
| 586 |
+
# id_ = 0 # Simple counter if needed for logging, but not used as key anymore
|
| 587 |
+
yielded_pmids = set() # Keep track of PMIDs we've already yielded
|
| 588 |
|
| 589 |
for filename in filenames:
|
| 590 |
logger.info(f"Processing file: {filename}")
|
| 591 |
try:
|
| 592 |
with gzip.open(filename, "rb") as f: # Read as bytes for ET
|
|
|
|
| 593 |
context = ET.iterparse(f, events=("end",))
|
| 594 |
+
event, root = next(context) # Get root iterator
|
|
|
|
| 595 |
|
| 596 |
for event, elem in context:
|
|
|
|
| 597 |
if event == "end" and elem.tag == "PubmedArticle":
|
| 598 |
+
article_dict_wrapper = None
|
| 599 |
+
pmid = "UNKNOWN_PMID" # Default for logging if extraction fails early
|
| 600 |
try:
|
|
|
|
| 601 |
article_dict_wrapper = self.xml_to_dictionnary(elem)
|
|
|
|
| 602 |
|
| 603 |
if not article_dict_wrapper or 'PubmedArticle' not in article_dict_wrapper:
|
| 604 |
logger.warning(f"Parser returned empty or invalid structure for a PubmedArticle element in {filename}")
|
| 605 |
+
elem.clear()
|
|
|
|
| 606 |
continue
|
| 607 |
|
| 608 |
article = article_dict_wrapper.get('PubmedArticle')
|
|
|
|
|
|
|
| 609 |
if not article or not isinstance(article, dict):
|
| 610 |
logger.warning(f"Parsed empty or invalid article data from element in {filename}")
|
| 611 |
elem.clear()
|
| 612 |
continue
|
| 613 |
|
| 614 |
+
# --- Extract PMID early for duplicate check ---
|
| 615 |
+
pmid_val = article.get("MedlineCitation", {}).get("PMID", 0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 616 |
try:
|
| 617 |
pmid = int(pmid_val)
|
| 618 |
if pmid <= 0: raise ValueError("PMID must be positive")
|
| 619 |
except (ValueError, TypeError):
|
| 620 |
logger.warning(f"Skipping article due to invalid or missing PMID: '{pmid_val}' in {filename}")
|
| 621 |
elem.clear()
|
| 622 |
+
continue # Skip this article if PMID invalid
|
| 623 |
|
| 624 |
+
# --- !!! DUPLICATE CHECK !!! ---
|
| 625 |
+
if pmid in yielded_pmids:
|
| 626 |
+
logger.warning(f"Skipping duplicate PMID {pmid} found in {filename}.")
|
| 627 |
+
elem.clear()
|
| 628 |
+
continue # Skip this duplicate entry
|
| 629 |
+
# --- End DUPLICATE CHECK ---
|
| 630 |
+
|
| 631 |
+
# --- If not duplicate, proceed with processing ---
|
| 632 |
+
self.update_citation(article)
|
| 633 |
+
new_article = default_article()
|
| 634 |
+
deepupdate(new_article, article)
|
| 635 |
+
|
| 636 |
+
# --- Final validation before yield (PMID check redundant but safe) ---
|
| 637 |
+
final_pmid_check = new_article.get("MedlineCitation", {}).get("PMID", 0)
|
| 638 |
+
if final_pmid_check != pmid:
|
| 639 |
+
logger.error(f"PMID mismatch after processing! Expected {pmid}, got {final_pmid_check}. Skipping article.")
|
| 640 |
+
elem.clear()
|
| 641 |
+
continue
|
| 642 |
+
|
| 643 |
+
# Validate against schema
|
| 644 |
encoded_example = self.info.features.encode_example(new_article)
|
| 645 |
|
| 646 |
# Yield pmid as key and the validated dictionary
|
| 647 |
yield pmid, new_article # Use actual PMID as the example key
|
| 648 |
+
yielded_pmids.add(pmid) # Add to set *after* successful yield
|
| 649 |
|
| 650 |
except Exception as e:
|
| 651 |
+
logger.error(f"Failed to process article PMID {pmid} in {filename}: {e}", exc_info=False) # exc_info=True for full traceback
|
| 652 |
+
# Logging data causing error can be helpful but verbose:
|
|
|
|
|
|
|
| 653 |
# if 'new_article' in locals(): logger.debug(f"Problematic data: {new_article}")
|
| 654 |
|
| 655 |
finally:
|
| 656 |
+
elem.clear() # Clear the element in all cases (success, skip, error)
|
| 657 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 658 |
if root is not None:
|
| 659 |
root.clear()
|
| 660 |
|
| 661 |
except ET.ParseError as e:
|
| 662 |
logger.error(f"XML ParseError in file {filename}: {e}")
|
| 663 |
+
continue
|
| 664 |
except gzip.BadGzipFile:
|
| 665 |
logger.error(f"Bad Gzip File error for {filename}. It might be corrupted or incomplete.")
|
| 666 |
continue
|