, , and , "
"with colspan and rowspan when needed to preserve merged cells. "
"Do not wrap HTML tables in markdown code fences. Preserve visible table text, "
"row order, column order, punctuation, signs, and numeric formatting."
)
def _mistral_ocr_table_format() -> str:
value = os.getenv("MISTRAL_OCR_TABLE_FORMAT", "html").strip().lower()
if value in {"html", "markdown"}:
return value
return "html"
def _build_mistral_ocr_payload(signed_url: str) -> dict:
table_format = _mistral_ocr_table_format()
payload = {
"model": OCR_MODEL,
"document": {"document_url": signed_url},
"table_format": table_format,
}
if table_format == "html":
payload["document_annotation_format"] = {
"type": "json_schema",
"json_schema": {
"name": "ocr_markdown_with_html_tables",
"strict": True,
"schema": {
"type": "object",
"properties": {
"content": {"type": "string"},
},
"required": ["content"],
"additionalProperties": False,
},
},
}
payload["document_annotation_prompt"] = MISTRAL_OCR_HTML_TABLE_PROMPT
return payload
def _post_mistral_ocr_with_retry(
*,
headers: Dict[str, str],
payload: Dict[str, Any],
operation_label: str,
timeout_s: int = 600,
):
max_retries = max(1, int(getattr(Config, "API_MAX_RETRIES", 4) or 4))
delay = max(0.1, float(getattr(Config, "API_INITIAL_DELAY_SECONDS", 2.0) or 2.0))
transient_statuses = {408, 409, 425, 429, 500, 502, 503, 504}
last_exc: Optional[Exception] = None
for attempt in range(1, max_retries + 1):
response = None
try:
response = requests.post(OCR_API_URL, headers=headers, json=payload, timeout=timeout_s)
if response.status_code not in transient_statuses:
response.raise_for_status()
return response
response.raise_for_status()
except Exception as exc:
last_exc = exc
status_code = getattr(response, "status_code", None)
is_transient = status_code in transient_statuses or isinstance(
exc,
(
requests.Timeout,
requests.ConnectionError,
),
)
if not is_transient or attempt >= max_retries:
raise
retry_after = None
if response is not None:
retry_after_raw = response.headers.get("Retry-After")
try:
retry_after = float(retry_after_raw) if retry_after_raw else None
except (TypeError, ValueError):
retry_after = None
sleep_s = max(delay, retry_after or 0.0) + random.uniform(0, 0.5)
print(
f"OCR API transient error during {operation_label}: {exc}. "
f"Retrying in {sleep_s:.2f}s... (Attempt {attempt}/{max_retries})"
)
time.sleep(sleep_s)
delay = min(delay * 2, 60.0)
if last_exc is not None:
raise last_exc
raise RuntimeError(f"OCR API request failed during {operation_label}")
def _decode_uu_block_bytes(text: str) -> bytes:
lines = text.splitlines()
begin_idx = next((i for i, line in enumerate(lines) if re.match(r"^begin\s+\d{3}\s+[^\n]+$", line.strip())), None)
if begin_idx is None:
raise ValueError("Could not find a valid 'begin' line in the uuencoded block.")
decoded = bytearray()
found_end = False
for line in lines[begin_idx + 1:]:
uu_line = line.rstrip("\r\n")
if uu_line == "end":
found_end = True
break
if uu_line == "":
continue
try:
decoded.extend(binascii.a2b_uu(uu_line.encode("latin-1")))
except binascii.Error as e:
if "Trailing garbage" not in str(e):
raise ValueError(f"Failed to decode uuencoded block: {e}") from e
recovered = None
for end in range(len(uu_line) - 1, 0, -1):
try:
candidate = binascii.a2b_uu(uu_line[:end].encode("latin-1"))
recovered = candidate
break
except binascii.Error:
continue
if recovered is None:
raise ValueError(f"Failed to decode uuencoded block: {e}") from e
decoded.extend(recovered)
if not found_end:
raise ValueError("Could not find the terminating 'end' line in the uuencoded block.")
return bytes(decoded)
def _extract_uu_block(text: str) -> Tuple[bytes, str]:
"""Finds and decodes the first uuencoded block."""
fname_match = re.search(r"begin\s+\d{3}\s+([^\n]+)", text)
if not fname_match:
raise ValueError("Could not find a valid 'begin' line in the uuencoded block.")
filename = fname_match.group(1).strip()
decoded_bytes = _decode_uu_block_bytes(text)
if not decoded_bytes:
raise ValueError("UU decoding produced no data.")
if decoded_bytes.startswith(b"%PDF-") and b"%%EOF" not in decoded_bytes[-1024:]:
decoded_bytes += b"\n%%EOF\n"
return decoded_bytes, filename
def _slice_pdf_bytes(pdf_bytes: bytes, first_page: int, last_page: Optional[int] = None) -> bytes:
"""Extracts a page range from a PDF bytes object."""
reader = PdfReader(io.BytesIO(pdf_bytes))
writer = PdfWriter()
num_pages = len(reader.pages)
start_idx = first_page - 1
end_idx = num_pages if last_page is None else min(last_page, num_pages)
for i in range(start_idx, end_idx):
writer.add_page(reader.pages[i])
with io.BytesIO() as buf:
writer.write(buf)
return buf.getvalue()
def is_page_nearly_blank(page: fitz.Page, threshold: float = 3.5) -> bool:
"""Checks if a page is visually blank by analyzing pixel standard deviation."""
pix = page.get_pixmap(matrix=fitz.Matrix(0.5, 0.5), colorspace=fitz.csGRAY, alpha=False)
img_data = np.frombuffer(pix.samples, dtype=np.uint8)
if img_data.size < 100: return True
std_dev = np.std(img_data)
return std_dev < threshold
def realign_fixed_width_table(text: str) -> str:
"""Parses and perfectly reformats a poorly-aligned fixed-width table."""
lines = text.strip().split('\n')
if len(lines) < 2: return text
separator_index = next((i for i, line in enumerate(lines) if '--' in line and len(line.replace('-', '').replace(' ', '')) < 5), -1)
if separator_index == -1: return text
separator_line = lines[separator_index]
boundaries = []
in_dash = False
for i, char in enumerate(separator_line):
if char == '-' and not in_dash:
in_dash = True
start = i
elif char == ' ' and in_dash:
in_dash = False
boundaries.append((start, i))
if in_dash: boundaries.append((start, len(separator_line)))
if not boundaries: return text
rows = [[line[s:e].strip() for s, e in boundaries] for line in lines if line.strip() != separator_line.strip() and any(line[s:e].strip() for s, e in boundaries)]
if not rows: return text
widths = [max(len(cell) for cell in col) for col in zip(*rows)]
realigned = []
header, body = rows[0], rows[1:]
realigned.append(' '.join(h.ljust(w) for h, w in zip(header, widths)))
realigned.append(' '.join('-' * w for w in widths))
for row in body:
full_row = row + [''] * (len(widths) - len(row))
realigned.append(' '.join([full_row[0].ljust(widths[0])] + [c.rjust(w) for c, w in zip(full_row[1:], widths[1:])]))
return '\n'.join(realigned)
def _ascii_text(s: str) -> str:
s = _normalize_ocr_text(s or "")
s = s.translate(PUNCT_CANON)
s = s.replace("\u201c", '"').replace("\u201d", '"')
s = s.replace("\u2022", "*").replace("\u00b7", "*")
s = unicodedata.normalize("NFKD", s).encode("ascii", "ignore").decode("ascii")
return re.sub(r"\s+", " ", s).strip()
def is_numeric_like(s: str) -> bool:
s = _ascii_text(s)
if s in {"", "-"}:
return False
s = s.replace(",", "").replace("$", "").replace("%", "")
if s.startswith("(") and s.endswith(")"):
s = "-" + s[1:-1]
try:
float(s)
return True
except ValueError:
return False
def table_to_fixed_width(table) -> str:
rows = []
for tr in table.find_all("tr"):
cells = [_ascii_text(c.get_text(" ", strip=True)) for c in tr.find_all(["th", "td"])]
if cells:
rows.append(cells)
if not rows:
return ""
col_count = max(len(r) for r in rows)
rows = [r + [""] * (col_count - len(r)) for r in rows]
aligns = []
for j in range(col_count):
if j == 0:
aligns.append("left")
continue
col_vals = [r[j] for r in rows[1:] if r[j].strip()]
numeric_count = sum(is_numeric_like(v) for v in col_vals)
aligns.append("right" if col_vals and numeric_count >= len(col_vals) / 2 else "left")
widths = [max(len(r[j]) for r in rows) for j in range(col_count)]
def fmt(row):
out = []
for j, cell in enumerate(row):
if aligns[j] == "right":
out.append(cell.rjust(widths[j]))
else:
out.append(cell.ljust(widths[j]))
return " ".join(out).rstrip()
lines = []
for i, row in enumerate(rows):
lines.append(fmt(row))
if i == 0:
lines.append(" ".join("-" * w for w in widths).rstrip())
return "\n".join(lines)
def _html_to_fixed_width_ascii(html_fragment: str) -> str:
html_fragment = (html_fragment or "").strip()
if html_fragment.startswith("```"):
html_fragment = re.sub(r"^```(?:html)?\s*", "", html_fragment, flags=re.I)
html_fragment = re.sub(r"\s*```$", "", html_fragment)
soup = BeautifulSoup(html_fragment, "html.parser")
root = soup.body if soup.body else soup
output_parts: List[str] = []
def append_text(text: str):
text = _ascii_text(text)
if not text:
return
m = re.match(r"^\*\*(.+?)\*\*$", text)
if m:
output_parts.append(m.group(1).strip().upper())
else:
output_parts.append(text)
def walk(nodes):
for node in nodes:
if isinstance(node, NavigableString):
append_text(str(node))
continue
node_name = getattr(node, "name", None)
if node_name == "table":
fixed = table_to_fixed_width(node)
if fixed.strip():
output_parts.append(fixed)
continue
if node_name == "br":
continue
if getattr(node, "contents", None):
walk(node.contents)
else:
append_text(node.get_text(" ", strip=True))
walk(root.contents)
return "\n\n".join(part for part in output_parts if part.strip())
_DASH_TRANSLATE = str.maketrans({
"\u2010": "-", "\u2011": "-", "\u2012": "-", "\u2013": "-", "\u2014": "-", "\u2015": "-",
"\u2212": "-", "\uFE58": "-", "\uFE63": "-", "\uFF0D": "-"
})
def _normalize_ocr_text(s: str) -> str:
s = unicodedata.normalize("NFKC", s or "")
s = s.replace("\u00A0", " ").replace("\ufeff", "").replace("\u200b", "").replace("\u200d", "")
s = s.translate(_DASH_TRANSLATE)
if "\\n" in s: s = s.replace(r"\n", "\n")
return s
_CELL_SEP_RE = re.compile(r'^\s*:?\s*-{2,}\s*:?\s*$')
_MISTRAL_HTML_TABLE_FRAGMENT_RE = re.compile(r"", re.I)
def _is_separator_line(line: str) -> bool:
line = _normalize_ocr_text(line)
if '|' not in line: return False
core = line.strip()
if not core.startswith('|'): return False
parts = [p.strip() for p in core.strip('|').split('|')]
if not parts or any(p == '' for p in parts): return False
return all(_CELL_SEP_RE.match(p or '') for p in parts)
def find_md_table_blocks(text: str) -> List[Tuple[int,int]]:
text = _normalize_ocr_text(text)
lines = text.splitlines()
blocks: List[Tuple[int,int]] = []
i, n = 0, len(lines)
while i < n - 1:
if '|' in lines[i].lstrip() and i + 1 < n and _is_separator_line(lines[i+1]):
j, has_body = i + 2, False
while j < n and '|' in lines[j].lstrip() and lines[j].strip() != '':
has_body = True
j += 1
if has_body:
blocks.append((i, j-1))
i = j
continue
i += 1
return blocks
def slice_text_by_blocks(text: str, blocks: List[Tuple[int,int]]) -> List[str]:
text = _normalize_ocr_text(text)
lines = text.splitlines()
return ["\n".join(lines[s:e+1]) for s, e in blocks]
def replace_blocks_with(text: str, blocks: List[Tuple[int,int]], repl_texts: List[str]) -> str:
text = _normalize_ocr_text(text)
lines = text.splitlines()
out, cur, k = [], 0, 0
for s, e in blocks:
out.extend(lines[cur:s])
out.extend(repl_texts[k].splitlines())
cur = e + 1
k += 1
out.extend(lines[cur:])
return "\n".join(out)
def _inline_mistral_table_placeholders(page_obj, text_content: str) -> str:
rendered = str(text_content or "")
tables = page_obj.get("tables") if isinstance(page_obj, dict) else None
if not isinstance(tables, list) or not tables:
return rendered
inlined_count = 0
fallback_contents = []
for table in tables:
if not isinstance(table, dict):
continue
table_id = _normalize_ocr_text(str(table.get("id") or "")).strip()
table_content = _normalize_ocr_text(
str(table.get("content") or table.get("html") or table.get("markdown") or "")
).strip()
if not table_content:
continue
if table_id:
placeholder = f"[{table_id}]({table_id})"
if placeholder in rendered:
rendered = rendered.replace(placeholder, table_content)
inlined_count += 1
continue
fallback_contents.append(table_content)
if inlined_count == 0 and fallback_contents:
missing_contents = [
content
for content in fallback_contents
if content not in rendered
]
if missing_contents:
if rendered.strip():
rendered = rendered.rstrip() + "\n\n" + "\n\n".join(missing_contents)
else:
rendered = "\n\n".join(missing_contents)
return rendered
def _mistral_table_cell_text(cell) -> str:
text = _normalize_ocr_text(cell.get_text(" ", strip=True))
text = re.sub(r"\s+", " ", text).strip()
text = text.replace("|", r"\|")
return text
def _html_table_fragment_to_mmd(table_html: str) -> str:
try:
soup = BeautifulSoup(table_html, "html.parser")
except Exception:
return table_html
table = soup.find("table")
if table is None:
return table_html
rows = []
pending_rowspans = {}
for tr in table.find_all("tr"):
row = []
col = 0
for cell in tr.find_all(["th", "td"], recursive=False):
while pending_rowspans.get(col, 0) > 0:
row.append("")
pending_rowspans[col] -= 1
col += 1
try:
colspan = max(1, int(cell.get("colspan", 1)))
except (TypeError, ValueError):
colspan = 1
try:
rowspan = max(1, int(cell.get("rowspan", 1)))
except (TypeError, ValueError):
rowspan = 1
row.append(_mistral_table_cell_text(cell))
for _ in range(1, colspan):
row.append("")
if rowspan > 1:
for offset in range(colspan):
pending_rowspans[col + offset] = max(pending_rowspans.get(col + offset, 0), rowspan - 1)
col += colspan
while pending_rowspans.get(col, 0) > 0:
row.append("")
pending_rowspans[col] -= 1
col += 1
if any(cell.strip() for cell in row):
rows.append(row)
if not rows:
return table_html
width = max(len(row) for row in rows)
padded_rows = [row + [""] * (width - len(row)) for row in rows]
def render_row(row) -> str:
return "| " + " | ".join(cell or " " for cell in row) + " |"
separator = "| " + " | ".join("---" for _ in range(width)) + " |"
return "\n".join([render_row(padded_rows[0]), separator, *[render_row(row) for row in padded_rows[1:]]])
def _convert_mistral_html_tables_to_mmd(text: str) -> str:
if not text or " str:
rendered = _html_table_fragment_to_mmd(match.group(0))
return f"\n\n{rendered}\n\n"
converted = _MISTRAL_HTML_TABLE_FRAGMENT_RE.sub(replace_table, text)
return re.sub(r"\n{3,}", "\n\n", converted).strip()
def _pick_text(page_obj) -> str:
text_content = _normalize_ocr_text((page_obj.get("markdown") or page_obj.get("text") or "").strip())
inlined_text = _inline_mistral_table_placeholders(page_obj, text_content)
return _normalize_ocr_text(_convert_mistral_html_tables_to_mmd(inlined_text).strip())
def _full_page_data_uri(doc: fitz.Document, page_index0: int, zoom: float = Config.IMAGE_RENDER_ZOOM) -> str:
page = doc.load_page(page_index0)
pix = page.get_pixmap(matrix=fitz.Matrix(zoom, zoom), alpha=False)
return f"data:image/png;base64,{base64.b64encode(pix.tobytes('png')).decode('utf-8')}"
def get_signed_url_with_retry(client, file_id, max_retries=Config.API_MAX_RETRIES, initial_delay=Config.API_INITIAL_DELAY_SECONDS):
"""
Attempts to get a signed URL, retrying with exponential backoff if a 404 error occurs.
"""
delay = initial_delay
for attempt in range(max_retries):
try:
signed_url = client.files.get_signed_url(file_id=file_id).url
return signed_url
except SDKError as e:
if e.status_code == 404 and attempt < max_retries - 1:
print(f"File ID {file_id} not found yet. Retrying in {delay:.2f}s... (Attempt {attempt + 1}/{max_retries})")
time.sleep(delay)
delay *= 2
delay += random.uniform(0, 0.1)
else:
raise e
raise Exception(f"Failed to get signed URL for file {file_id} after {max_retries} attempts.")
def _process_pdf_bytes_with_fallback(
pdf_bytes: bytes,
file_name: str,
*,
batch_size: int,
mistral_api_key: Optional[str],
per_table_sleep_s: float,
start_time: float,
time_limit_s: int,
):
"""
Main PDF processing workflow that takes bytes and returns processed content
and a boolean indicating if a timeout occurred, plus the number of PDF pages
successfully parsed through OCR.
"""
_log_current_filing_ocr("pdf_or_rendered_html")
_print_mistral_monthly_usage("before", file_name, explicit_api_key=mistral_api_key)
doc = fitz.open(stream=pdf_bytes, filetype="pdf")
n_total = doc.page_count
results = []
parsed_page_count = 0
timed_out = False
print(f"[init] processing '{file_name}' ({n_total} pages) in batches of {batch_size}…")
p = 1
while p <= n_total:
if time.time() - start_time > time_limit_s:
print(f"\n[timeout] Time limit of {time_limit_s // 60} minutes reached. Stopping processing for this document.")
timed_out = True
break
q = min(p + batch_size - 1, n_total)
print(f"[basic] pages {p}–{q} …")
pages = []
try:
chunk_bytes = _slice_pdf_bytes(pdf_bytes, first_page=p, last_page=q)
def _run_batch_ocr(*, client: Mistral, api_key: str, key_spec: Dict[str, Any]):
up = client.files.upload(file={"file_name": f"chunk_{p}-{q}_{file_name}", "content": chunk_bytes}, purpose="ocr")
if not up or not up.id:
raise Exception("File upload failed to return a valid ID.")
signed_url = get_signed_url_with_retry(client, file_id=up.id)
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
payload = _build_mistral_ocr_payload(signed_url)
response = _post_mistral_ocr_with_retry(
headers=headers,
payload=payload,
operation_label=f"pdf batch {p}-{q} for {file_name}",
timeout_s=600,
)
ocr_data = response.json()
usage = _summarize_ocr_usage(ocr_data, response.headers)
return ocr_data.get("pages", []), usage, key_spec["env_name"]
pages, usage, used_env_name = _run_with_mistral_key_rotation(
f"pdf batch {p}-{q} for {file_name}",
_run_batch_ocr,
explicit_api_key=mistral_api_key,
)
_record_mistral_key_success(used_env_name, usage=usage, explicit_api_key=mistral_api_key)
except Exception as e:
print(f"API processing for pages {p}-{q} failed and was skipped. Error: {e}")
error_message = f"Could not repair '{file_name}'. The file may be severely corrupted. Error: {e}"
logging.error(
f"FILE: {file_name}\nERROR: {error_message}\nTRACEBACK:\n{traceback.format_exc()}"
)
if not pages:
p = q + 1
continue
parsed_page_count += len(pages)
for i, page_obj in enumerate(pages):
page_no = p + i
doc_page = doc.load_page(page_no - 1)
if is_page_nearly_blank(doc_page):
print(f"[page {page_no}] is nearly blank -> skipping.")
continue
text_basic = _pick_text(page_obj)
results.append({"page": page_no, "content": text_basic, "source": "mistral-ocr"})
p = q + 1
_print_mistral_monthly_usage("after", file_name, explicit_api_key=mistral_api_key)
return results, timed_out, parsed_page_count
def _try_repair_pdf_bytes(pdf_bytes: bytes, file_name: str) -> bytes:
"""
Attempts to repair a potentially truncated or corrupted PDF byte stream.
"""
if not pdf_bytes.startswith(b"%PDF-"):
print(f"[warning] Data for '{file_name}' does not appear to be a PDF. Skipping repair.")
return pdf_bytes
if b"%%EOF" not in pdf_bytes[-1024:]:
print(f"[info] PDF '{file_name}' appears truncated. Appending EOF marker for recovery.")
pdf_bytes += b"\n%%EOF\n"
try:
with fitz.open(stream=pdf_bytes, filetype="pdf") as doc:
if doc.needs_pass:
print(f"[warning] PDF '{file_name}' is password protected and cannot be repaired or processed.")
return None
with io.BytesIO() as output_buffer:
doc.save(output_buffer, garbage=4, clean=True, deflate=True)
print(f"[success] Successfully repaired and rebuilt '{file_name}'.")
return output_buffer.getvalue()
except Exception as e:
print(f"[error] Could not repair '{file_name}'. The file may be severely corrupted. Error: {e}")
logging.error(
f"FILE: {file_name}\n"
f"ERROR: Could not process PDF attachment: {e}\n"
f"TRACEBACK:\n{traceback.format_exc()}"
)
return None
return pdf_bytes
def parse_pdf_attachments(pdf_blobs) -> tuple[str, int]:
"""
Parses uu-encoded PDF attachments using a high-quality, in-memory workflow.
This version includes an attempt to repair corrupted PDFs.
"""
_load_sec_parser_env()
if not _has_mistral_api_keys():
print(f"{_mistral_no_keys_message()} Skipping PDF processing.")
return "", 0
TIME_LIMIT_SECONDS = Config.PDF_TIMEOUT_LIMIT * 60
start_time = time.time()
timed_out = False
total_parsed_page_count = 0
md_parts = ["\n### Attached PDF Documents\n"]
for i, pdf_data in enumerate(pdf_blobs, 1):
if timed_out:
break
filename = "unknown.pdf"
try:
pdf_bytes, filename = _extract_uu_block(pdf_data)
repaired_pdf_bytes = _try_repair_pdf_bytes(pdf_bytes, filename)
if not repaired_pdf_bytes:
md_parts.append(f"**Attachment {i}:** `{filename}` – Corrupted and could not be repaired.")
continue
if not repaired_pdf_bytes.startswith(b"%PDF-"):
md_parts.append(f"**Attachment {i}:** `{filename}` – not a PDF.")
continue
md_parts.append(f"**Attachment {i}:** `{filename}`")
page_results, timed_out_during_processing, parsed_page_count = _process_pdf_bytes_with_fallback(
pdf_bytes=repaired_pdf_bytes,
file_name=filename,
batch_size=Config.PDF_BATCH_SIZE,
mistral_api_key=None,
per_table_sleep_s=Config.PER_TABLE_SLEEP_SECONDS,
start_time=start_time,
time_limit_s=TIME_LIMIT_SECONDS
)
total_parsed_page_count += int(parsed_page_count or 0)
if timed_out_during_processing:
timed_out = True
attachment_content_parts = [res.get('content', '') for res in page_results if res.get('content')]
if attachment_content_parts:
md_parts.append("\n\n".join(attachment_content_parts))
else:
if not timed_out:
md_parts.append("_No text found in this document._")
except Exception as e:
md_parts.append(f"Could not process `{filename}`: {e}")
logging.error(
f"FILE: {filename}\n"
f"ERROR: Could not process PDF attachment: {e}\n"
f"TRACEBACK:\n{traceback.format_exc()}"
)
traceback.print_exc()
if timed_out:
md_parts.append("\n\n**Time limit hit – remaining pages or documents were skipped.**")
return "\n\n".join(md_parts), total_parsed_page_count
_CP1252_CTRL_RE = re.compile(r'[\x80-\x9F]')
def _has_cp1252_ctrls(s: str) -> bool:
return bool(_CP1252_CTRL_RE.search(s))
def normalize_text_markup(markup):
if isinstance(markup, bytes):
utf8_text = None
try:
utf8_text = markup.decode('utf-8')
except UnicodeDecodeError:
utf8_text = None
if utf8_text is not None and not _has_cp1252_ctrls(utf8_text):
text = utf8_text
else:
markup = UnicodeDammit.detwingle(markup)
utf8_text = None
try:
utf8_text = markup.decode('utf-8')
except UnicodeDecodeError:
utf8_text = None
ud = UnicodeDammit(markup, is_html=True, smart_quotes_to='unicode')
text = ud.unicode_markup
detected_encoding = (ud.original_encoding or '').lower()
if utf8_text is not None and detected_encoding.startswith('mac_'):
text = utf8_text
elif (not text) or ('\uFFFD' in text) or _has_cp1252_ctrls(text):
text = markup.decode('latin-1', errors='strict')
else:
text = str(markup)
text = text.translate(CP1252_CTRL_TO_UNICODE)
mojibake_map = {
'â�”': '—',
'â�“': '–',
'�': "'",
'�': '"',
'â�d': '"',
'�': ' ',
}
for bad, good in mojibake_map.items():
text = text.replace(bad, good)
text = text.replace("<", "<").replace(">", ">").replace(" ", "##INDENT##")
text = html.unescape(text)
text = (text
.replace('\u00AD', '')
.replace('\u00A0', ' ')
.replace('\u2007', ' ')
.replace('\u202F', ' ')
.replace('\u2009', ' ')
.replace('\u2014', '—')
.replace('\u2013', '–'))
text = text.translate(PUNCT_CANON)
return unicodedata.normalize('NFC', text)
ORDER_I = [
"1. Title of Security##ROWSPAN_1## 1. Title of Security##ROWSPAN_1##",
"2. Transaction Date##ROWSPAN_2## 2. Transaction Date##ROWSPAN_2##",
"2A. Deemed Execution Date##ROWSPAN_3## 2A. Deemed Execution Date##ROWSPAN_3##",
"3. Transaction Code (V)##COLSPAN_1## Code",
"3. Transaction Code (V)##COLSPAN_1## V",
"4. Securities Acquired (A) or Disposed of (D)##COLSPAN_2## Amount",
"4. Securities Acquired (A) or Disposed of (D)##COLSPAN_2## (A) or (D)",
"4. Securities Acquired (A) or Disposed of (D)##COLSPAN_2## Price",
"5. Amount of Securities Beneficially Owned##ROWSPAN_4## 5. Amount of Securities Beneficially Owned##ROWSPAN_4##",
"6. Ownership Form##ROWSPAN_5## 6. Ownership Form##ROWSPAN_5##",
"7. Nature of Indirect Beneficial Ownership##ROWSPAN_6## 7. Nature of Indirect Beneficial Ownership##ROWSPAN_6##",
]
ORDER_II = [
"1. Title of Derivative Security##ROWSPAN_7## 1. Title of Derivative Security##ROWSPAN_7##",
"2. Conversion or Exercise Price##ROWSPAN_8## 2. Conversion or Exercise Price##ROWSPAN_8##",
"3. Transaction Date##ROWSPAN_9## 3. Transaction Date##ROWSPAN_9##",
"3A. Deemed Execution Date##ROWSPAN_10## 3A. Deemed Execution Date##ROWSPAN_10##",
"4. Transaction Code (V)##COLSPAN_3## Code",
"4. Transaction Code (V)##COLSPAN_3## V",
"5. Number of Derivative Securities Acquired (A) or Disposed of (D)##COLSPAN_4## (A)",
"5. Number of Derivative Securities Acquired (A) or Disposed of (D)##COLSPAN_4## (D)",
"6. Date Exercisable and Expiration Date##COLSPAN_5## Date Exercisable",
"6. Date Exercisable and Expiration Date##COLSPAN_5## Expiration Date",
"7. Title and Amount of Underlying Securities##COLSPAN_6## Title",
"7. Title and Amount of Underlying Securities##COLSPAN_6## Amount or Number of Shares",
"8. Price of Derivative Security##ROWSPAN_11## 8. Price of Derivative Security##ROWSPAN_11##",
"9. Number of Derivative Securities Beneficially Owned##ROWSPAN_12## 9. Number of Derivative Securities Beneficially Owned##ROWSPAN_12##",
"10. Ownership Form##ROWSPAN_13## 10. Ownership Form##ROWSPAN_13##",
"11. Nature of Indirect Beneficial Ownership##ROWSPAN_14## 11. Nature of Indirect Beneficial Ownership##ROWSPAN_14##",
]
ORDER_I_FORM3 = [
"1. Title of Security",
"2. Amount of Securities Beneficially Owned",
"3. Ownership Form",
"4. Nature of Indirect Beneficial Ownership",
]
ORDER_II_FORM3 = [
"1. Title of Derivative Security##ROWSPAN_1## 1. Title of Derivative Security##ROWSPAN_1##",
"2. Date Exercisable and Expiration Date (Month/Day/Year)##COLSPAN_1## Date Exercisable",
"2. Date Exercisable and Expiration Date (Month/Day/Year)##COLSPAN_1## Expiration Date",
"3. Title and Amount of Underlying Securities##COLSPAN_2## Title",
"3. Title and Amount of Underlying Securities##COLSPAN_2## Amount or Number of Shares",
"4. Conversion or Exercise Price##ROWSPAN_2## 4. Conversion or Exercise Price##ROWSPAN_2##",
"5. Ownership Form##ROWSPAN_3## 5. Ownership Form##ROWSPAN_3##",
"6. Nature of Indirect Beneficial Ownership##ROWSPAN_4## 6. Nature of Indirect Beneficial Ownership##ROWSPAN_4##",
]
SEC_COUNTRY_CODES = {
'B9': 'ANTIGUA AND BARBUDA',
'E9': 'CAYMAN ISLANDS',
'F4': 'CHINA',
'K3': 'HONG KONG',
'AL': 'ALABAMA', 'AK': 'ALASKA', 'AZ': 'ARIZONA', 'AR': 'ARKANSAS', 'CA': 'CALIFORNIA',
'CO': 'COLORADO', 'CT': 'CONNECTICUT', 'DE': 'DELAWARE', 'DC': 'DISTRICT OF COLUMBIA',
'FL': 'FLORIDA', 'GA': 'GEORGIA', 'HI': 'HAWAII', 'ID': 'IDAHO', 'IL': 'ILLINOIS',
'IN': 'INDIANA', 'IA': 'IOWA', 'KS': 'KANSAS', 'KY': 'KENTUCKY', 'LA': 'LOUISIANA',
'ME': 'MAINE', 'MD': 'MARYLAND', 'MA': 'MASSACHUSETTS', 'MI': 'MICHIGAN',
'MN': 'MINNESOTA', 'MS': 'MISSISSIPPI', 'MO': 'MISSOURI', 'MT': 'MONTANA',
'NE': 'NEBRASKA', 'NV': 'NEVADA', 'NH': 'NEW HAMPSHIRE', 'NJ': 'NEW JERSEY',
'NM': 'NEW MEXICO', 'NY': 'NEW YORK', 'NC': 'NORTH CAROLINA', 'ND': 'NORTH DAKOTA',
'OH': 'OHIO', 'OK': 'OKLAHOMA', 'OR': 'OREGON', 'PA': 'PENNSYLVANIA',
'RI': 'RHODE ISLAND', 'SC': 'SOUTH CAROLINA', 'SD': 'SOUTH DAKOTA',
'TN': 'TENNESSEE', 'TX': 'TEXAS', 'UT': 'UTAH', 'VT': 'VERMONT', 'VA': 'VIRGINIA',
'WA': 'WASHINGTON', 'WV': 'WEST VIRGINIA', 'WI': 'WISCONSIN', 'WY': 'WYOMING',
'A0': 'ALBERTA', 'A1': 'BRITISH COLUMBIA', 'A2': 'MANITOBA', 'A3': 'NEW BRUNSWICK',
'A4': 'NEWFOUNDLAND', 'A5': 'NOVA SCOTIA', 'A6': 'ONTARIO', 'A7': 'PRINCE EDWARD ISLAND',
'A8': 'QUEBEC', 'A9': 'SASKATCHEWAN', 'B0': 'YUKON TERRITORY',
'D4': 'GERMANY', 'G6': 'NETHERLANDS', 'H2': 'SWITZERLAND', 'L8': 'UNITED KINGDOM',
'Z4': 'ISRAEL'
}
ITEM_HEADING = re.compile(
r'^\s*Item[\s\u00A0]+\d+[A-Za-z]?\.[^.]*\s*$', re.I
)
SUBITEM_HEAD = re.compile(r'^\s*\([A-Za-z0-9]+\)\s+.+$')
_sup_re = re.compile(r'(.*?) ', re.I | re.S)
PART_HEADING = re.compile(r'^\s*PART\s+[IVXLC]+\b.*\s*$', re.I)
DOT_ROW = re.compile(
r"""^\s*
(?P[A-Za-z].*?)
(?:\.{2,}|\s{2,})\s*
(?P[$()\-0-9,.\s]+)
\s*$
""",
re.X,
)
HEADER_HINT_RE = re.compile(r"^\s*(\d{4}|[A-Za-z]{3,}).*\s{2,}")
SGML_TAG_RE = re.compile(r"|")
def check_timeout(start_time: float, time_limit_s: int, stage_name: str):
"""
Checks if the elapsed time has exceeded the limit.
Returns a placeholder string on timeout, otherwise returns None.
"""
if time.time() - start_time > time_limit_s:
minutes = time_limit_s // 60
print(f"[timeout] {stage_name} exceeded {minutes} minutes. Stopping.")
return "\n\n\n\n"
return None
def _sup_to_caret(txt: str) -> str:
"""foo → ^foo^ (markdown-it-sup)."""
if not isinstance(txt, str):
return str(txt)
return _sup_re.sub(r'^\1^', txt)
def df_to_multimarkdown(df: pd.DataFrame) -> str:
"""
Ultra-compact MultiMarkdown table with intelligent merging for both
rowspan and colspan based on unique identifiers. This version correctly
handles complex nested and overlapping spans.
"""
if df.empty:
return ""
df = drop_tag_only_rows_cols(df, cols_only=True)
COLSPAN_MARKER = "##__COLSPAN__##"
ROWSPAN_MARKER = "^^"
proc = df.copy().astype(str).fillna('')
span_ids = pd.DataFrame(index=df.index, columns=df.columns, dtype=object)
rowspan_pattern = re.compile(r'##ROWSPAN_(\w+)##')
colspan_pattern = re.compile(r'##COLSPAN_(\w+)##')
for r in range(proc.shape[0]):
for c in range(proc.shape[1]):
cell_text = proc.iat[r, c]
row_match = rowspan_pattern.search(cell_text)
col_match = colspan_pattern.search(cell_text)
span_ids.iat[r, c] = {
'row': row_match.group(1) if row_match else None,
'col': col_match.group(1) if col_match else None,
}
proc.iat[r, c] = rowspan_pattern.sub('', colspan_pattern.sub('', cell_text)).strip()
visited = set()
for r_start in range(proc.shape[0]):
for c_start in range(proc.shape[1]):
if (r_start, c_start) in visited:
continue
start_ids = span_ids.iat[r_start, c_start]
r_id = start_ids.get('row')
c_id = start_ids.get('col')
r_end, c_end = r_start, c_start
if r_id:
for r in range(r_start + 1, proc.shape[0]):
if span_ids.iat[r, c_start].get('row') == r_id:
r_end = r
else: break
if c_id:
for c in range(c_start + 1, proc.shape[1]):
if span_ids.iat[r_start, c].get('col') == c_id:
c_end = c
else: break
longest_content = ""
for r_iter in range(r_start, r_end + 1):
for c_iter in range(c_start, c_end + 1):
current_content = str(proc.iat[r_iter, c_iter])
if len(current_content.strip()) > len(longest_content.strip()):
longest_content = current_content
proc.iat[r_start, c_start] = longest_content
for r_block in range(r_start, r_end + 1):
for c_block in range(c_start, c_end + 1):
if (r_block, c_block) in visited:
continue
visited.add((r_block, c_block))
if (r_block, c_block) == (r_start, c_start):
continue
if r_block > r_start:
if c_block == c_start:
proc.iat[r_block, c_block] = ROWSPAN_MARKER
else:
proc.iat[r_block, c_block] = COLSPAN_MARKER
elif c_block > c_start:
proc.iat[r_block, c_block] = COLSPAN_MARKER
if not proc.empty:
keep_mask = proc.apply(lambda row: any(str(cell).strip() not in ('—', '^^') for cell in row), axis=1)
proc = proc[keep_mask].reset_index(drop=True)
def row_md(series: pd.Series) -> str:
"""Renders a pandas Series into a clean MultiMarkdown table row."""
cells = series.fillna('').tolist()
out = ["|"]
i = 0
while i < len(cells):
cell_raw = cells[i]
j = i
while j + 1 < len(cells) and cells[j + 1] == COLSPAN_MARKER:
j += 1
col_span = j - i + 1
cell_text = _sup_to_caret(cell_raw)
out.append(f" {cell_text} ")
out.append("|" * col_span)
i = j + 1
return "".join(out)
header_cells = [_sup_to_caret(str(c)) for c in df.columns]
header = "| " + " | ".join(header_cells) + " |"
divider = "|" + "|".join(["---"] * len(df.columns)) + "|"
body = [row_md(proc.iloc[r]) for r in range(len(proc))]
return "\n".join([header, divider, *body])
def _parse_md_table_to_df(table_str: str) -> pd.DataFrame:
"""
Parses a standard Markdown table string back into a DataFrame,
correctly handling multi-line headers by skipping the separator line.
"""
lines = table_str.strip().split('\n')
rows_of_cells = []
for line in lines:
if not line.strip().startswith('|'):
continue
cells = [cell for cell in line.strip().strip('|').split('|')]
if len(cells) > 0 and all(re.fullmatch(r'[:\-\s]*', c) for c in cells) and '-' in line:
continue
rows_of_cells.append(cells)
if not rows_of_cells or len(rows_of_cells) < 1:
return pd.DataFrame()
header = rows_of_cells[0]
num_cols = len(header)
data = []
for row in rows_of_cells[1:]:
if len(row) < num_cols:
row.extend([''] * (num_cols - len(row)))
data.append(row[:num_cols])
return pd.DataFrame(data, columns=header)
def convert_all_tables_to_mmd(markdown_content: str) -> str:
"""
Finds all standard Markdown tables wrapped with '---' delimiters in the
final output and converts them to MultiMarkdown format, preserving the
delimiters.
"""
table_pattern = re.compile(
r'^\s*---\s*$'
r'([\s\S]*?)'
r'^\s*---\s*$',
re.MULTILINE
)
def _uniquify(cols):
seen = {}
out = []
for c in ["" if c is None else str(c) for c in cols]:
n = seen.get(c, 0)
out.append(f"{c}__{n}" if n else c)
seen[c] = n + 1
return out
def replacer(match):
md_table_str = match.group(1)
df = _parse_md_table_to_df(md_table_str)
if df.empty:
return match.group(0)
if any(str(c).strip() for c in df.columns):
orig_cols = ["" if c is None else str(c) for c in df.columns]
uniq_cols = _uniquify(orig_cols)
df = df.copy()
df.columns = uniq_cols
header = pd.DataFrame([orig_cols], columns=uniq_cols)
df = pd.concat([header, df], ignore_index=True, sort=False)
df.columns = [''] * df.shape[1]
mmd_table = df_to_multimarkdown(df)
return f"\n---\n\n{mmd_table}\n\n---\n"
return table_pattern.sub(replacer, markdown_content)
def reorder(df: pd.DataFrame, order) -> pd.DataFrame:
if isinstance(df.columns, pd.MultiIndex):
df.columns = [' '.join(map(str, tup)).strip() for tup in df.columns]
cols_in_order = [c for c in order if c in df.columns]
extras = [c for c in df.columns if c not in cols_in_order]
return df[cols_in_order + extras]
_PAREN_NUM_RE = re.compile(r'\((\$?)([\d][\d,]*)\)')
def _strip_commas_in_paren(val):
if isinstance(val, str):
return _PAREN_NUM_RE.sub(
lambda m: f"({m.group(1)}{m.group(2).replace(',', '')})",
val,
)
return val
def _collapse_newlines(s: str) -> str:
"""Turn hard new-lines inside a cell into a visible (or a space)."""
return re.sub(r'\s*\n\s*', ' ', s)
def read_html(path) -> str:
"""Read an HTML file and return normalized Unicode text."""
raw = path.read_bytes()
return normalize_text_markup(raw)
def is_centered(element) -> bool:
"""
Checks if a BeautifulSoup element is centered by checking its own attributes
or those of its parent tags. This iterative version prevents recursion errors.
"""
while element and hasattr(element, 'name'):
if element.name == 'center':
return True
if element.get('align', '').lower() == 'center':
return True
style = element.get('style', '').lower()
if 'text-align:center' in style.replace(' ', ''):
return True
element = element.parent
return False
_JS_CSS_RE = re.compile(r'''
^\s*(?:var|function)\b
| ^\s*[/]{2}
| ^\s*/\*
| ^\s*[.#][\w-]+\s*[{]
''', re.I | re.X)
def is_junk_text(text: str) -> bool:
"""Checks if a line of text is likely unwanted XBRL metadata."""
if _JS_CSS_RE.match(text): return True
if re.fullmatch(r'\d{4}-\d{2}-\d{2}', text): return True
if re.fullmatch(r'\d{6,10}', text): return True
if text == 'us-': return True
if re.fullmatch(r'([a-zA-Z0-9\-]+:[a-zA-Z0-9\.]+\s*)+', text): return True
return False
_UNCLOSED_CELL_RE = re.compile(
r'^\s*\$?\([\d,]+(?:\.\d+)?\s*$'
)
def _close_unclosed_paren(val):
"""Add a missing ')' only when the *whole* cell is an unclosed value."""
if isinstance(val, str) and _UNCLOSED_CELL_RE.match(val):
return val.rstrip() + ')'
return val
def drop_pctless_dupes(df: pd.DataFrame) -> pd.DataFrame:
"""
If two adjacent columns carry the *same* numeric info and the only
difference is that one shows a “%”, keep the % column and drop the
other. Works even when negatives are shown as “(4)” / “(4)%”.
"""
to_drop_indices = set()
def _norm(col: pd.Series) -> pd.Series:
return (col.fillna('')
.astype(str)
.str.replace(r'[()\s%]', '', regex=True)
.str.replace('—', ''))
for i in range(len(df.columns) - 1):
if i in to_drop_indices or (i + 1) in to_drop_indices:
continue
col1 = df.iloc[:, i]
col2 = df.iloc[:, i + 1]
if _norm(col1).equals(_norm(col2)):
pct1 = col1.astype(str).str.contains('%').sum()
pct2 = col2.astype(str).str.contains('%').sum()
to_drop_indices.add(i if pct1 < pct2 else (i + 1))
return df.iloc[:, [idx for idx in range(df.shape[1]) if idx not in to_drop_indices]]
DOLLAR_RE = re.compile(r'^\(\s*\$|\$\s*')
def drop_dollarless_dupes(df: pd.DataFrame) -> pd.DataFrame:
"""
If two *adjacent* columns carry the same numeric information and the only
difference is that one shows a leading “$”, keep the $-column and drop
the other. Handles negatives shown as “($4)” / “(4)”.
"""
to_drop_indices = set()
def _norm(col: pd.Series) -> pd.Series:
return (col.fillna('')
.astype(str)
.str.replace(r'[()\s$,]', '', regex=True)
.str.replace('—', ''))
for i in range(len(df.columns) - 1):
if i in to_drop_indices or (i + 1) in to_drop_indices:
continue
col1 = df.iloc[:, i]
col2 = df.iloc[:, i + 1]
if _norm(col1).equals(_norm(col2)):
d1 = col1.astype(str).str.match(DOLLAR_RE).sum()
d2 = col2.astype(str).str.match(DOLLAR_RE).sum()
to_drop_indices.add(i if d1 < d2 else (i + 1))
return df.iloc[:, [idx for idx in range(df.shape[1]) if idx not in to_drop_indices]]
def _shift_colx_into_named(df: pd.DataFrame) -> pd.DataFrame:
colx = [c for c in df.columns if str(c).startswith("Col ")]
real = [c for c in df.columns if c not in colx and c != "Label"]
if not colx:
return df
if not real:
df.rename(columns={c: "" for c in colx}, inplace=True)
return df
for idx in df.index:
stash = []
for c in colx:
val = df.at[idx, c]
if pd.notna(val) and str(val).strip():
stash.append(val)
df.at[idx, c] = ""
if not stash:
continue
for c in real:
if not stash:
break
if pd.isna(df.at[idx, c]) or str(df.at[idx, c]).strip() == "":
df.at[idx, c] = stash.pop(0)
to_drop = []
for c in colx:
if df[c].replace("", np.nan).isna().all():
to_drop.append(c)
df = df.drop(columns=to_drop)
df.rename(columns={c: "" for c in colx if c not in to_drop}, inplace=True)
return df
_BLANK_RE = re.compile(r'^\s*$|^\s*[—–-]+\s*$')
def _is_blank(val) -> bool:
"""
True if *val* is NaN, empty, whitespace or just dashes (even with
stray spaces/NBSPs wrapped around).
"""
if val is None or (isinstance(val, float) and np.isnan(val)):
return True
return bool(_BLANK_RE.fullmatch(str(val)))
def drop_header_only_cols(df: pd.DataFrame) -> pd.DataFrame:
"""
Remove any column whose *body* (rows 1…end) is completely blank.
Works even with duplicate column names because we slice by position.
"""
if df.shape[1] < 2 or df.shape[0] < 2:
return df
body = df.iloc[2:]
keep = ~body.applymap(_is_blank).all(axis=0).to_numpy()
return df.iloc[:, keep]
def _drop_header_and_empty_cols(df: pd.DataFrame,
header_rows: int = 3,
min_blank_rows: int = 10) -> pd.DataFrame:
if df.shape[0] <= header_rows + min_blank_rows:
return df
if len(df.iloc[:header_rows + 1].replace('\u00A0', '', regex=False).replace(r'^[\s\u00A0]*[—–-]?[\s\u00A0]*$', np.nan, regex=True).to_string()) > 80:
return df
body = (
df.iloc[header_rows:]
.replace('\u00A0', '', regex=False)
.replace(r'^[\s\u00A0]*[—–-]?[\s\u00A0]*$', np.nan, regex=True)
)
blank_counts = body.isna().sum(axis=0)
drop_mask = (blank_counts == len(body)) & (blank_counts >= min_blank_rows)
return df.loc[:, ~drop_mask]
def _late_drop_blank_header_subset_cols(df: pd.DataFrame,
header_rows: int = 2,
min_blank_rows: int = 3) -> pd.DataFrame:
"""
Late cleanup for empty spacer columns whose header is just a split-out
subset of an adjacent populated header column.
This targets tables where symbol/header merges leave behind an empty body
column carrying only partial header text, while preserving intentionally
blank columns that have distinct headers.
"""
if df.shape[1] < 3 or df.shape[0] <= header_rows + min_blank_rows:
return df
body = (
df.iloc[header_rows:]
.replace('\u00A0', '', regex=False)
.replace(r'^[\s\u00A0]*[—–-]?[\s\u00A0]*$', np.nan, regex=True)
)
nonblank_counts = body.notna().sum(axis=0).to_numpy()
header = df.iloc[:header_rows].copy()
def _norm_header_cell(val) -> str:
if not isinstance(val, str):
return '' if val is None else str(val)
text = normalize_for_symbol_check(val)
text = text.replace(' ', ' ').replace('##NEWLINE##', ' ')
return re.sub(r'\s+', ' ', text).strip()
norm_header = header.applymap(_norm_header_cell)
keep_mask = np.ones(df.shape[1], dtype=bool)
for i in range(1, df.shape[1] - 1):
if nonblank_counts[i] != 0:
continue
if nonblank_counts[i - 1] == 0 and nonblank_counts[i + 1] == 0:
continue
sub_parts = [str(norm_header.iat[r, i]).strip() for r in range(header_rows)]
if not any(sub_parts):
continue
for neighbor_idx in (i - 1, i + 1):
if nonblank_counts[neighbor_idx] == 0:
continue
super_parts = [str(norm_header.iat[r, neighbor_idx]).strip() for r in range(header_rows)]
matches = True
compared = False
for sub_part, super_part in zip(sub_parts, super_parts):
if not sub_part:
continue
compared = True
if not super_part or sub_part not in super_part:
matches = False
break
if matches and compared:
keep_mask[i] = False
break
return df.loc[:, keep_mask]
_BLANK_RE = re.compile(r'^\s*$|^\s*[—–-]+\s*$')
def _is_blank(val) -> bool:
if val is None or (isinstance(val, float) and np.isnan(val)):
return True
return bool(_BLANK_RE.fullmatch(str(val)))
def drop_adjacent_head_dupes(df: pd.DataFrame,
n_head: int = 3) -> pd.DataFrame:
"""
Drops adjacent columns with duplicate headers, using a refined heuristic
to preserve columns with significant and mostly distinct data.
"""
if df.shape[1] < 2:
return df
FILL_THRESHOLD = 0.25
MAX_OVERLAP_PCT = 0.25
start_col = 0
if df.shape[0] > n_head:
first_col_body = df.iloc[n_head:, 0].dropna().astype(str)
if not first_col_body.empty:
cells_with_letters = first_col_body.str.contains(r'[a-zA-Z]').sum()
if (cells_with_letters / len(first_col_body)) > 0.75:
start_col = 1
keep_mask = np.ones(df.shape[1], dtype=bool)
head = (df.iloc[:n_head, :]
.fillna('')
.astype(str)
.apply(lambda col: col.str.strip()))
body = df.iloc[n_head:, :]
body_len = len(body)
for i in range(start_col, df.shape[1] - 1):
if not keep_mask[i]:
continue
header_col_i = head.iloc[:, i]
header_col_j = head.iloc[:, i + 1]
if header_col_i.equals(header_col_j) and not header_col_i.apply(_is_blank).all():
body_col_i = body.iloc[:, i]
body_col_j = body.iloc[:, i + 1]
if body_col_i.fillna('').equals(body_col_j.fillna('')):
keep_mask[i + 1] = False
continue
if body_len > 0:
nb_i = (~body_col_i.apply(_is_blank)).sum()
nb_j = (~body_col_j.apply(_is_blank)).sum()
vals_i = set(body_col_i.dropna().astype(str).str.strip())
vals_j = set(body_col_j.dropna().astype(str).str.strip())
vals_i.discard('')
vals_j.discard('')
shared_values_count = len(vals_i & vals_j)
denom = max(1, min(nb_i, nb_j))
overlap_ratio = shared_values_count / denom
are_sufficiently_distinct = overlap_ratio < MAX_OVERLAP_PCT
fill_pct_i = nb_i / body_len
fill_pct_j = nb_j / body_len
vals_i = set(body_col_i.dropna().astype(str).str.strip()); vals_i.discard('')
vals_j = set(body_col_j.dropna().astype(str).str.strip()); vals_j.discard('')
shared = len(vals_i & vals_j)
overlap_i = shared / max(1, len(vals_i))
overlap_j = shared / max(1, len(vals_j))
if nb_i <= nb_j and overlap_i >= 0.90:
keep_mask[i] = False
elif nb_j < nb_i and overlap_j >= 0.90:
keep_mask[i + 1] = False
return df.loc[:, keep_mask]
def drop_visually_redundant_blank_cols(df: pd.DataFrame, header_rows: int = 2) -> pd.DataFrame:
if df.shape[1] < 2 or df.shape[0] < header_rows:
return df
header = df.iloc[:header_rows]
body = df.iloc[header_rows:]
if body.empty:
return df
col_groups = {}
for i in range(df.shape[1]):
key = tuple(header.iloc[:, i].fillna('').astype(str).tolist())
if key not in col_groups:
col_groups[key] = []
col_groups[key].append(i)
indices_to_drop = []
for header_key, indices in col_groups.items():
if len(indices) < 2:
continue
header_has_visible_content = False
for cell in header_key:
cleaned_header = re.sub(
r'##(?:ROWSPAN|COLSPAN)_\w+##|##NEWLINE##|##INDENT##|| | ',
'',
str(cell),
flags=re.I,
)
cleaned_header = re.sub(r'[\u00A0\u200B-\u200D\u2060\u2063\uFEFF]+', '', cleaned_header)
if cleaned_header.strip():
header_has_visible_content = True
break
if not header_has_visible_content:
continue
blank_in_group = []
non_blank_in_group = []
for idx in indices:
if body.iloc[:, idx].apply(_is_blank).all():
blank_in_group.append(idx)
else:
non_blank_in_group.append(idx)
if non_blank_in_group:
indices_to_drop.extend(blank_in_group)
elif blank_in_group:
indices_to_drop.extend(blank_in_group[1:])
if not indices_to_drop:
return df
return df.drop(df.columns[sorted(list(set(indices_to_drop)))], axis=1)
_LAYOUT_SCAFFOLD_RE = re.compile(
r'##(?:ROWSPAN|COLSPAN)_\w+##|##NEWLINE##|##INDENT##|| |'
r'##(?:BOLD|ITALIC|U)_(?:START|END)_\d+##|##LINK_START_\d+__[^#]+##|##LINK_END_\d+##',
re.I,
)
def _blank_layout_only_cell(cell):
"""
Treat parser-introduced layout scaffolding as empty, while preserving
real visible content such as superscripts, currency symbols, and text.
"""
if not isinstance(cell, str):
return cell
cleaned = _LAYOUT_SCAFFOLD_RE.sub('', cell)
cleaned = re.sub(r'(?i)\b(?:nan|none)\b', '', cleaned)
cleaned = re.sub(r'[\u00A0\u200B-\u200D\u2060-\u206F\uFEFF]+', '', cleaned)
cleaned = re.sub(r'\s+', '', cleaned)
if cleaned == '':
return ''
return cell
_YEAR_TAIL_RE = re.compile(r"\s*years$", flags=re.IGNORECASE)
def _norm(s):
s = str(s).strip()
s = _YEAR_TAIL_RE.sub("", s)
return s.strip()
def is_direct_subset(series_subset: pd.Series, series_superset: pd.Series) -> bool:
non_blank_subset = ~series_subset.apply(_is_blank)
non_blank_superset = ~series_superset.apply(_is_blank)
if non_blank_subset.sum() >= non_blank_superset.sum():
return False
subset_values = series_subset[non_blank_subset].apply(_norm)
corresponding_superset_values = series_superset[non_blank_subset].apply(_norm)
are_equal = (subset_values == corresponding_superset_values).all()
return are_equal
def drop_subset_columns(df: pd.DataFrame) -> pd.DataFrame:
"""
Iterates through a DataFrame and drops a column if it is a "direct subset"
of an adjacent column.
This is useful for removing spacer columns or redundant columns that only
contain a subset of information already present in their neighbor.
Args:
df: The pandas DataFrame to clean.
Returns:
A new DataFrame with subset columns removed.
"""
if df.shape[1] < 2:
return df
indices_to_drop = set()
for i in range(df.shape[1] - 1):
idx_left, idx_right = i, i + 1
if idx_left in indices_to_drop or idx_right in indices_to_drop:
continue
col_left = df.iloc[:, idx_left]
col_right = df.iloc[:, idx_right]
if is_direct_subset(col_right, col_left):
indices_to_drop.add(idx_right)
elif is_direct_subset(col_left, col_right):
indices_to_drop.add(idx_left)
if not indices_to_drop:
return df
sorted_indices_to_drop = sorted(list(indices_to_drop), reverse=True)
df_cleaned = df.drop(df.columns[sorted_indices_to_drop], axis=1)
return df_cleaned
SUP_HTML = re.compile(r']*>.*? ', flags=re.I)
HTML_TAGS = re.compile(r'<[^>]+>')
ZERO_WIDTH = re.compile(r'[\u200B-\u200F\u202A-\u202E\u2060-\u206F\ufeff]')
THOUSANDS_SEP = re.compile(r'(?<=\d)[,·\u00A0\u202F](?=\d)')
def _clean_numeric_cell_value(val):
if not isinstance(val, str):
return val
if val == "`":
return ""
core = re.sub(
r'##(SUP|/SUP|SUB|/SUB|BOLD_START_\d+|BOLD_END_\d+|U_START_\d+|U_END_\d+|ITALIC_START_\d+|ITALIC_END_\d+|ROWSPAN_\d+|COLSPAN_\d+|LINK_START_\d+__[^#]+|LINK_END_\d+)##',
'', val
).replace('##NEWLINE##','').replace('##INDENT##','').replace('','').replace(' ','')
core = re.sub(r']*>.*? ', '', core, flags=re.I)
core = re.sub(r'<[^>]+>', '', core)
core = ZERO_WIDTH.sub('', core)
core = unicodedata.normalize('NFKC', core)
core_stripped = core.strip()
if not core_stripped:
return val
probe = re.sub(r'[,$()\s–—-]', '', core_stripped)
is_numeric = probe.replace('.', '', 1).isdigit()
if is_numeric:
val_no_zw = ZERO_WIDTH.sub('', val)
return THOUSANDS_SEP.sub('', val_no_zw)
return val
def drop_exact_dup_cols(df: pd.DataFrame) -> pd.DataFrame:
"""Drops columns whose name and contents exactly match a previous column."""
seen = set()
keep_idxs = []
for idx, col in enumerate(df.columns):
col_values = tuple(df.iloc[:, idx].tolist())
key = (col, col_values)
if key not in seen:
seen.add(key)
keep_idxs.append(idx)
return df.iloc[:, keep_idxs]
def get_first_data_row_index(df):
first_data_row_index = 0
if not df.empty:
first_col = df.iloc[:, 0]
num_rows = len(first_col)
first_non_empty_idx = num_rows
for idx, val in first_col.items():
if not is_cell_truly_empty(val):
first_non_empty_idx = idx
break
first_italic_idx = len(df)
for idx, row in df.iterrows():
if any('##ITALIC_START_' in str(cell) for cell in row):
first_italic_idx = idx
break
first_col_label_idx = min(first_non_empty_idx, first_italic_idx)
if first_col_label_idx == num_rows:
first_col_label_idx = 0
header_end_idx = 0
for idx, row in df.iterrows():
is_header_row = row.apply(lambda x:
is_cell_truly_empty(x) or
'##BOLD_START' in str(x) and
not is_numeric_like(str(x))
).all()
if is_header_row:
continue
else:
header_end_idx = idx
break
first_col_label_idx = 0 if len(df) <= first_col_label_idx else first_col_label_idx
header_end_idx = 0 if len(df) <= header_end_idx else header_end_idx
first_data_row_index = max(first_col_label_idx, header_end_idx)
if first_data_row_index < len(df):
row_to_check = df.iloc[first_data_row_index]
all_cells_have_border = all("" in str(cell) for cell in row_to_check)
all_other_cells_are_bold = False
if df.shape[1] > 1:
cells_except_first = row_to_check.iloc[1:]
all_other_cells_are_bold = all("##BOLD_START" in str(cell) for cell in cells_except_first)
next_index_is_valid = (first_data_row_index + 1) < len(df)
if all_cells_have_border and all_other_cells_are_bold and next_index_is_valid:
first_data_row_index += 1
if first_data_row_index < len(df):
row_to_check = df.iloc[first_data_row_index]
if '##(in millions' in row_to_check.to_string() or "(In millions of Canadian dollars)" in row_to_check.to_string():
if (first_data_row_index + 1) < len(df):
first_data_row_index += 1
else:
first_data_row_index = 0
return first_data_row_index
def should_flag_as_token_only(column_series: pd.Series) -> bool:
"""
Determines if a column should be flagged as "token-only" for potential removal.
This version is modified to NOT flag columns that only contain '%', as they
are needed for merging.
"""
non_empty_values = column_series.dropna()
if non_empty_values.empty:
return False
ONLY_TOKS = {'(', ')', ')%', ')bp', ')##DOUBLE_ASTERISK##', 'months', 'years'}
is_suspicious = non_empty_values.isin(ONLY_TOKS).any()
if not is_suspicious:
return False
if non_empty_values.eq('%').any():
numeric_percent_pattern = re.compile(r'^\s*[\d\.]+\s*%\s*$')
has_numeric_percents = column_series.str.match(numeric_percent_pattern, na=False).any()
if has_numeric_percents:
return False
return True
def _is_column_fully_ignorable(
df: pd.DataFrame,
col_to_check_idx: int,
partner_col_idx: int,
allow_dollar_sign: bool
) -> bool:
"""
Helper function to determine if an entire column is redundant.
This version uses a local "blank check" that does NOT treat dashes as blank.
"""
_TRULY_BLANK_RE = re.compile(r'^\s*$')
def _is_truly_blank_for_colspan(val) -> bool:
"""A local version of _is_blank that considers dashes as content."""
if val is None or (isinstance(val, float) and np.isnan(val)):
return True
return bool(_TRULY_BLANK_RE.fullmatch(str(val)))
tag_re = re.compile(
r'##(?:COLSPAN|ROWSPAN)_\d+##|##NEWLINE##|##INDENT##|| | |##(?:ITALIC|BOLD|U)_(?:START|END)_\d+##|\b(?:nan)\b',
re.I
)
colspan_re = re.compile(r'##COLSPAN_(\w+)##')
_ZW_RE = re.compile(r'[\u200B-\u200D\u2060-\u206F\uFEFF]')
def _normalize(s: str) -> str:
s = '' if s is None else str(s)
s = _ZW_RE.sub('', s)
s = tag_re.sub('', s)
return s.strip()
for row_idx in range(len(df)):
cell_to_check = str(df.iat[row_idx, col_to_check_idx])
partner_cell = str(df.iat[row_idx, partner_col_idx])
cleaned_cell = _normalize(cell_to_check)
is_content_ignorable = _is_truly_blank_for_colspan(cleaned_cell) or \
(allow_dollar_sign and cleaned_cell == '$')
partner_colspan_match = colspan_re.search(partner_cell)
is_active_colspan_target = (
partner_colspan_match and
partner_colspan_match.group(0) in cell_to_check
)
if is_active_colspan_target:
if not is_content_ignorable:
cleaned_partner_cell = _normalize(partner_cell)
if cleaned_cell != cleaned_partner_cell:
return False
continue
if not is_content_ignorable:
return False
return True
def drop_active_colspan_empty_cols(df: pd.DataFrame, allow_dollar_sign: bool = False) -> pd.DataFrame:
"""
Finds and removes columns that are entirely redundant using a robust positional mask.
A column is redundant if its cells are either targets of an active colspan
from an adjacent column OR its content is blank (or optionally, just '$').
This version correctly checks both left and right columns in a pair and is
immune to non-standard column labels.
"""
if df.shape[1] < 2:
return df
keep_mask = np.ones(df.shape[1], dtype=bool)
colspan_re = re.compile(r'##COLSPAN_(\w+)##')
for i in range(df.shape[1] - 1):
left_col_idx = i
right_col_idx = i + 1
is_pair_linked = False
for row_idx in range(len(df)):
left_cell = str(df.iat[row_idx, left_col_idx])
right_cell = str(df.iat[row_idx, right_col_idx])
left_match = colspan_re.search(left_cell)
if left_match and left_match.group(0) in right_cell:
is_pair_linked = True
break
right_match = colspan_re.search(right_cell)
if right_match and right_match.group(0) in left_cell:
is_pair_linked = True
break
if not is_pair_linked:
continue
if keep_mask[right_col_idx] and _is_column_fully_ignorable(df, right_col_idx, left_col_idx, allow_dollar_sign):
keep_mask[right_col_idx] = False
return df.loc[:, keep_mask]
def normalize_for_symbol_check(val, remove_sups=True):
"""
A helper to strip all placeholders and special tags for symbol checks.
Includes a parameter to conditionally preserve superscript tags.
"""
if not isinstance(val, str):
return str(val)
if remove_sups:
placeholder_pattern = r'##(SUP|/SUP|SUB|/SUB|BOLD_START_\d+|BOLD_END_\d+|U_START_\d+|U_END_\d+|ITALIC_START_\d+|ITALIC_END_\d+|ROWSPAN_\d+|COLSPAN_\d+|LINK_START_\d+__[^#]+|LINK_END_\d+)##'
else:
placeholder_pattern = r'##(?!SUP|/SUP|SUB|/SUB)(?:BOLD_START_\d+|BOLD_END_\d+|U_START_\d+|U_END_\d+|ITALIC_START_\d+|ITALIC_END_\d+|ROWSPAN_\d+|COLSPAN_\d+|LINK_START_\d+__[^#]+|LINK_END_\d+)##'
text = re.sub(placeholder_pattern, '', val)
text = text.replace('##NEWLINE##', '').replace('', '').replace("##INDENT##", "").replace(" ", "")
if remove_sups:
text = text.replace("", "").replace(" ", "")
return text.strip()
def is_sup_only_column(column_series: pd.Series) -> bool:
sup_only_pattern = re.compile(r'^(?:\s*(?:]*>.*? |##SUP##.*?##/SUP##)\s*)+$')
for cell_value in column_series.dropna():
normalized_val = normalize_for_symbol_check(cell_value, remove_sups=False)
if normalized_val and not sup_only_pattern.fullmatch(normalized_val):
return False
return any(normalize_for_symbol_check(v, remove_sups=False) for v in column_series.dropna())
def clean_financial_df(df_to_clean: pd.DataFrame) -> pd.DataFrame:
df = df_to_clean.copy()
preserved_header_rows = None
keep = [True] + [not df.iloc[:, i].equals(df.iloc[:, i-1])
for i in range(1, df.shape[1])]
df = df.loc[:, keep]
df = drop_active_colspan_empty_cols(df)
df = df.replace(r' ', '', regex=True)
df = df.applymap(_clean_numeric_cell_value)
df.dropna(how='all', inplace=True)
df.reset_index(drop=True, inplace=True)
if isinstance(df.columns, pd.MultiIndex):
def _clean_multiindex_header_part(raw_part) -> str:
clean = str(raw_part).strip()
if not clean or re.fullmatch(r'Unnamed:\s*\d+(?:_level_\d+)?', clean):
return ""
clean = re.sub(r'##(?:ROWSPAN|COLSPAN)_\d+##', '', clean)
clean = re.sub(r'\s*##NEWLINE##\s*', '##NEWLINE## ', clean)
clean = re.sub(r'\s+', ' ', clean).strip()
clean = clean.replace(' ##NEWLINE##', '##NEWLINE##').replace('##NEWLINE## ', '##NEWLINE## ')
return clean.strip()
def _extract_preserved_header_rows(columns: pd.MultiIndex) -> List[List[str]]:
rows: List[List[str]] = []
for level in range(columns.nlevels):
header_row: List[str] = []
for col_idx, col_tuple in enumerate(columns):
raw_text = str(col_tuple[level]).strip()
clean_text = _clean_multiindex_header_part(raw_text)
colspan_match = re.search(r'##COLSPAN_(\d+)##', raw_text)
has_colspan_marker = bool(colspan_match)
colspan_marker_id = colspan_match.group(1) if colspan_match else None
has_rowspan_marker = bool(re.search(r'##ROWSPAN_\d+##', raw_text))
if level > 0:
prev_raw_text = str(columns[col_idx][level - 1]).strip()
prev_clean_text = _clean_multiindex_header_part(prev_raw_text)
prev_has_rowspan = bool(re.search(r'##ROWSPAN_\d+##', prev_raw_text))
else:
prev_clean_text = ""
prev_has_rowspan = False
if (
level > 0
and clean_text
and clean_text == prev_clean_text
and (has_rowspan_marker or prev_has_rowspan)
):
header_row.append("^^")
elif (
col_idx > 0
and clean_text
and has_colspan_marker
and clean_text == header_row[-1]
):
prev_same_level_raw_text = str(columns[col_idx - 1][level]).strip()
prev_colspan_match = re.search(r'##COLSPAN_(\d+)##', prev_same_level_raw_text)
prev_colspan_marker_id = prev_colspan_match.group(1) if prev_colspan_match else None
if prev_colspan_marker_id == colspan_marker_id:
header_row.append("##__COLSPAN__##")
else:
header_row.append(clean_text)
elif not clean_text and has_colspan_marker:
header_row.append("##__COLSPAN__##")
else:
header_row.append(clean_text)
rows.append(header_row)
while rows and all(str(cell).strip() in {"", "##__COLSPAN__##"} for cell in rows[-1]):
rows.pop()
return rows
def _flatten_multiindex_header(col_tuple) -> str:
parts = []
last_clean = None
for raw_part in col_tuple:
clean = _clean_multiindex_header_part(raw_part)
if not clean:
continue
if clean == last_clean:
continue
parts.append(clean)
last_clean = clean
return ' '.join(parts).strip()
if any('' in ' '.join(col) for col in list(df.columns)):
hdr = df.columns.to_frame(index=False).T
hdr.columns = range(df.shape[1])
body = df.copy()
body.columns = hdr.columns
df = pd.concat([hdr, body], ignore_index=True)
else:
preserved_header_rows = _extract_preserved_header_rows(df.columns)
df.columns = [_flatten_multiindex_header(col) for col in df.columns]
if preserved_header_rows:
df.attrs['preserved_header_rows'] = preserved_header_rows
df = df.replace(to_replace=r'^Unnamed.*$', value='', regex=True)
sup_re = re.compile(r'?sup\b[^>]*>', re.I)
def normalize_for_comparison(x):
"""
Removes sup tags, invisible characters, replaces non-breaking spaces and tags,
strips whitespace, normalizes ampersand spacing, and treats dash-only
cells as empty for comparison.
"""
if isinstance(x, str):
x = re.sub(r' ', ' ', x, flags=re.IGNORECASE)
x = re.sub(r'[\u2060-\u206F]', '', x)
x = sup_re.sub('', x)
x = x.replace('\u00A0', ' ')
x = re.sub(r'\s*&\s*', ' & ', x)
x = x.replace('##NEWLINE##', '')
x = x.replace('', '')
x = re.sub(r'##(SUP|/SUP|SUB|/SUB|BOLD_START_\d+|BOLD_END_\d+|U_START_\d+|U_END_\d+|ITALIC_START_\d+|ITALIC_END_\d+|ROWSPAN_\d+|COLSPAN_\d+|LINK_START_\d+__[^#]+|LINK_END_\d+)##', '', x)
x = x.replace("##INDENT##", "")
x = re.sub(r'\s+', ' ', x)
stripped_x = x.strip()
if stripped_x in ('—', '-', '–'):
return ''
return stripped_x
return x
df_for_comparison = df.copy().applymap(normalize_for_comparison)
df_for_comparison.columns = [normalize_for_comparison(c) for c in df_for_comparison.columns]
def apply_cleaning_and_sync(original_df, comparison_df, cleaning_func, *args, **kwargs):
comp_df_copy = comparison_df.copy()
prefixed_cols = [f"{i}___{col}" for i, col in enumerate(comp_df_copy.columns)]
comp_df_copy.columns = prefixed_cols
cleaned_prefixed_df = cleaning_func(comp_df_copy, *args, **kwargs)
kept_indices = [int(col.split('___')[0]) for col in cleaned_prefixed_df.columns]
new_original_df = original_df.iloc[:, kept_indices]
new_comparison_df = comparison_df.iloc[:, kept_indices]
return new_original_df, new_comparison_df
CLEAN_TAG = re.compile(r'\s*(?:##NEWLINE##| )\s*', flags=re.I)
ONLY_SYM = re.compile(r'^\s*([\$%\(\)]|%\)|\)%|\)$)\s*(?:##NEWLINE##| )?\s*$', flags=re.I)
def _strip_tag_only_symbols(val):
if isinstance(val, str) and ONLY_SYM.fullmatch(val):
return CLEAN_TAG.sub('', val).strip()
return val
df = df.applymap(_strip_tag_only_symbols)
df_for_comparison = df_for_comparison.applymap(_strip_tag_only_symbols)
dash_vals = ['–', '-', '—']
is_dash = lambda s: s.astype(str).str.strip().isin(dash_vals)
header_rows = 1
if df.shape[0] > header_rows:
i = 0
current_columns = df.columns.tolist()
while i < len(current_columns) - 2:
body_df = df.iloc[header_rows:]
col1 = df.iloc[:, i]
col2 = df.iloc[:, i + 1]
col3 = df.iloc[:, i + 2]
body_col1 = body_df.iloc[:, i]
body_col2 = body_df.iloc[:, i + 1]
body_col3 = body_df.iloc[:, i + 2]
c2_all_dashes = (not body_col2.dropna().empty) and is_dash(body_col2).all()
has_adjacent_dashes = (
(is_dash(body_col1) & is_dash(body_col2))
| (is_dash(body_col2) & is_dash(body_col3))
).any()
if c2_all_dashes and not has_adjacent_dashes:
rows_to_merge = is_dash(col2) & ~is_dash(col1) & ~is_dash(col3)
merged_values = (
col1.fillna('').astype(str) +
' – ' +
col3.fillna('').astype(str)
)
row_mask = rows_to_merge.to_numpy()
df.iloc[row_mask, i] = merged_values.loc[rows_to_merge].to_numpy()
keep_mask = np.ones(df.shape[1], dtype=bool)
keep_mask[[i + 1, i + 2]] = False
df = df.loc[:, keep_mask]
df_for_comparison = df_for_comparison.loc[:, keep_mask]
current_columns = df.columns.tolist()
continue
i += 1
if not df.empty and df.shape[0] > 0:
def _clean_header_cell(cell):
if pd.isna(cell): return cell
cleaned_cell = str(cell).strip()
return re.sub(r'^\s*(\d{4})\.0?\s*$', r'\1', cleaned_cell)
df.iloc[0] = df.iloc[0].apply(_clean_header_cell)
if not df_for_comparison.empty and df_for_comparison.shape[0] > 0:
df_for_comparison.iloc[0] = df_for_comparison.iloc[0].apply(_clean_header_cell)
mask = (df
.replace(r'(##COLSPAN_\d+##|)', '', regex=True)
.replace(r'[\s\u200b\u200c\u200d\u2060-\u2064\ufeff]+', '',
regex=True)
.fillna('')
.eq('')
.all(axis=1))
df = df[~mask].copy().reset_index(drop=True)
df_for_comparison = df_for_comparison[~mask].reset_index(drop=True)
def find_dest_col(df, row_idx, start_col_idx):
"""Return first non-blank column index scanning leftwards from start_col_idx."""
col = start_col_idx
while col >= 0:
val = df.iat[row_idx, col]
if not (pd.isna(val) or str(val).strip() == ''):
return col
col -= 1
return start_col_idx
df_to_string = df_for_comparison.to_string(index=False, header=False)
first_data_row_index = get_first_data_row_index(df)
first_data_row_index = first_data_row_index if first_data_row_index > 0 else 1
if "Intended Award Value:" not in df_to_string and "UnderwritingDiscountsandCommissions" not in df_to_string or bool(re.search(r'\d', df_to_string)):
right_merge_prefix_joiners = {
'$': '',
'£': '',
'¥': '',
'¥': '',
'�': '',
'(peso)': ' ',
}
current_cols = df.columns.tolist()
i = 0
while i < len(current_cols) - 1:
col, nxt = current_cols[i], current_cols[i + 1]
normalized_col = df[col].apply(normalize_for_symbol_check)
normalized_col_lower = normalized_col.astype(str).str.lower()
if normalized_col_lower.isin(right_merge_prefix_joiners).any():
is_symbol_only_column_body = False
col_body = normalized_col_lower.iloc[first_data_row_index:]
non_empty_body_vals = col_body.replace(r'(?i)^nan$', np.nan, regex=True).dropna()
non_empty_body_vals = non_empty_body_vals[non_empty_body_vals.astype(str).str.strip() != '']
if not non_empty_body_vals.empty and non_empty_body_vals.isin(right_merge_prefix_joiners).all():
is_symbol_only_column_body = True
if not is_symbol_only_column_body:
should_abort_merge = False
if i + 1 < len(current_cols):
nxt_col_name = current_cols[i + 1]
normalized_nxt_col = df[nxt_col_name].apply(normalize_for_symbol_check)
if normalized_nxt_col.str.contains('%', na=False).any():
currency_indices = normalized_col_lower[normalized_col_lower.isin(right_merge_prefix_joiners)].index
percent_indices = normalized_nxt_col[normalized_nxt_col.str.contains('%', na=False)].index
if not currency_indices.empty and not percent_indices.empty:
if currency_indices.max() < percent_indices.min():
should_abort_merge = True
if should_abort_merge:
i += 1
continue
made_a_merge = False
rows_to_clear = []
for idx in df.index[normalized_col_lower.isin(right_merge_prefix_joiners)]:
prefix_symbol = normalized_col.loc[idx]
prefix_key = normalized_col_lower.loc[idx]
joiner = right_merge_prefix_joiners.get(prefix_key, '')
next_cell_raw_value = str(df.loc[idx, nxt])
next_cell_normalized = normalize_for_symbol_check(next_cell_raw_value)
if next_cell_normalized.startswith(prefix_symbol):
continue
if not next_cell_normalized.startswith(prefix_symbol):
if next_cell_raw_value.strip() == "":
df.loc[idx, nxt] = f"{prefix_symbol}{joiner}—"
else:
df.loc[idx, nxt] = f"{prefix_symbol}{joiner}{next_cell_raw_value}".strip()
rows_to_clear.append(idx)
made_a_merge = True
if rows_to_clear:
df.loc[rows_to_clear, col] = ''
if made_a_merge:
df.drop(columns=[col], inplace=True)
df_for_comparison.drop(columns=[df_for_comparison.columns[i]], inplace=True)
current_cols = df.columns.tolist()
continue
i += 1
first_data_row_index = get_first_data_row_index(df)
first_data_row_index = first_data_row_index if first_data_row_index > 0 else 1
current_cols = df.columns.tolist()
df_for_comparison_cols = df_for_comparison.columns.tolist()
df = df.replace(r'(?i)^nan$', '', regex=True).fillna('')
i = 0
is_annotation_col_re = re.compile(
r"""^
\s*
( # Start of a repeating group for one or more tokens
(?:
# Alternative 1: Simple symbols like ), %, )bp from your original regex
\)|%|\)bp
|
# Alternative 2: The specific parenthesized footnote rule.
# This explicitly matches numbers from 0-49 OR letters (like (a), (iv)).
# It will NOT match (50).
\(\s*(?:[1-4]?\d|[a-z]+)\s*\)
|
# Alternative 3: Digits followed by a letter, with an optional paren
\)?\s*\d+[a-z]
|
# --- NEW, RESTRICTED ALTERNATIVE ---
# Alternative 4: A single letter from 'a' through 'n' for footnotes.
# This specific range prevents capturing common data markers like 'x' or 'o'.
[a-n]
)
\s* # Allow optional whitespace between tokens
)+ # End of the repeating group, must match at least once
$""",
re.VERBOSE | re.IGNORECASE
)
special_merge_symbols = {'%', '%#', ')', ')%', ')bp', ')##DOUBLE_ASTERISK##', 'months', '%]', ']%', '##SINGLE_ASTERISK##', ']', "§", "‡", "•]", "years"}
contains_footnote_text_re = re.compile(r"[a-z]", re.IGNORECASE)
is_ratio_table = 'ratio' in df.to_string(index=False, header=False).lower()
while i < len(current_cols) - 1:
col_to_check_idx = i + 1
col_body_series = df.iloc[first_data_row_index:, col_to_check_idx]
normalized_col_body = col_body_series.apply(normalize_for_symbol_check)
candidate_eval_mask = ~col_body_series.astype(str).str.contains(
r'##(?:COLSPAN|ROWSPAN)_\d+##',
regex=True,
na=False,
)
non_blank_normalized_body = (
normalized_col_body[candidate_eval_mask]
.replace('', np.nan)
.dropna()
)
if non_blank_normalized_body.empty:
i += 1
continue
contains_checkbox_current = normalized_col_body.str.contains(r'[☐☒]', na=False).any()
contains_checkbox_next = False
if i + 2 < len(current_cols):
next_col_body = df.iloc[first_data_row_index:, i + 2]
normalized_next_col_body = next_col_body.apply(normalize_for_symbol_check)
contains_checkbox_next = normalized_next_col_body.str.contains(r'[☐☒]', na=False).any()
is_candidate_for_merge = False
colspan_re = re.compile(r'##COLSPAN_(\w+)##')
is_potentially_symbol_col = True
for r_idx, norm_val in non_blank_normalized_body.items():
row_pos = df.index.get_loc(r_idx)
if is_annotation_col_re.fullmatch(norm_val) or norm_val in special_merge_symbols:
continue
raw_cell_value = str(df.iat[row_pos, col_to_check_idx])
has_sup_marker = '' in raw_cell_value or '##SUP##' in raw_cell_value
if has_sup_marker and re.search(r'\d', norm_val):
continue
is_colspan_target_exception = False
if i < len(current_cols):
left_cell = df.iat[row_pos, i]
current_cell = df.iat[row_pos, col_to_check_idx]
match = colspan_re.search(str(left_cell))
if match and match.group(0) in str(current_cell):
is_colspan_target_exception = True
if not is_colspan_target_exception:
is_potentially_symbol_col = False
break
if is_potentially_symbol_col:
is_candidate_for_merge = True
is_special_symbol_col = (
not non_blank_normalized_body.empty
and non_blank_normalized_body.isin(special_merge_symbols).all()
)
is_x_only_col = not non_blank_normalized_body.empty and (non_blank_normalized_body == 'x').all()
should_merge_x_conditionally = is_ratio_table and is_x_only_col
if (is_candidate_for_merge or is_special_symbol_col or should_merge_x_conditionally) and not contains_checkbox_current and not contains_checkbox_next:
contains_numeric_data = False
for r_idx, norm_val in non_blank_normalized_body.items():
row_pos = df.index.get_loc(r_idx)
if is_numeric_like(norm_val) and norm_val not in special_merge_symbols:
is_footnote_override = False
raw_cell_value = str(df.iat[row_pos, col_to_check_idx])
if '' in raw_cell_value or '##SUP##' in raw_cell_value or '' in raw_cell_value or '##SUB##' in raw_cell_value:
is_footnote_override = True
if not is_footnote_override:
contains_numeric_data = True
break
if contains_numeric_data:
i += 1
continue
if first_data_row_index > 0:
for h_idx in range(first_data_row_index):
raw_merge_val = df.iat[h_idx, i + 1]
if not pd.isna(raw_merge_val) and str(raw_merge_val).strip():
value_to_merge = str(raw_merge_val)
norm_cell_to_merge = normalize_for_symbol_check(value_to_merge)
is_header_a_symbol = is_annotation_col_re.fullmatch(norm_cell_to_merge) or norm_cell_to_merge in special_merge_symbols
if is_header_a_symbol:
target_cell_raw = df.iat[h_idx, i]
target_value = '' if pd.isna(target_cell_raw) else str(target_cell_raw)
if value_to_merge not in target_value:
df.iat[h_idx, i] = f"{target_value}{value_to_merge}".strip()
has_footnote_text = non_blank_normalized_body.str.contains(contains_footnote_text_re).any()
for r_idx in non_blank_normalized_body.index:
row_pos = df.index.get_loc(r_idx)
raw_merge_val = df.iat[row_pos, col_to_check_idx]
value_to_merge = '' if pd.isna(raw_merge_val) else str(raw_merge_val)
if has_footnote_text:
raw_target_val = df.iat[row_pos, i]
target_value = '' if pd.isna(raw_target_val) else str(raw_target_val)
if value_to_merge.strip() and value_to_merge not in target_value:
joiner = " "
df.iat[row_pos, i] = f"{target_value}{joiner}{value_to_merge}".strip()
else:
dest_col_idx = find_dest_col(df, r_idx, i)
raw_target_val = df.iat[row_pos, dest_col_idx]
target_value = '' if pd.isna(raw_target_val) else str(raw_target_val)
if value_to_merge.strip() and value_to_merge not in target_value:
value_to_merge = value_to_merge.lstrip()
m = re.search(r'(##COLSPAN_\d+##(?:)?) ?$', target_value)
if m:
token = m.group(1)
base = target_value[: -len(m.group(0))].rstrip()
merged = f"{base}{value_to_merge}"
if not merged.endswith(token):
merged = f"{merged}{token}"
df.iat[row_pos, dest_col_idx] = merged.strip()
else:
df.iat[row_pos, dest_col_idx] = f"{target_value.rstrip()}{value_to_merge}".strip()
keep_mask = np.ones(df.shape[1], dtype=bool)
keep_mask[col_to_check_idx] = False
df = df.loc[:, keep_mask]
df_for_comparison = df_for_comparison.loc[:, keep_mask]
current_cols = df.columns.tolist()
df_for_comparison_cols = df_for_comparison.columns.tolist()
continue
i += 1
has_paren = df.apply(
lambda col: (
col
.apply(normalize_for_symbol_check)
.replace('', np.nan)
.dropna()
.eq(')')
.any()
)
).astype(bool)
df = df.loc[:, ~has_paren]
df_for_comparison = df_for_comparison.loc[:, ~has_paren.values]
df = df.applymap(_close_unclosed_paren)
df_string = df.to_string()
first_data_row_index = get_first_data_row_index(df)
if len(df) > 2 and df.iloc[0, 0] != "x" and "•" not in df_string and "●" not in df_string and "☐" not in df_string and " %]" not in df_string and "##= ##" not in df_string and "▪" not in df_string and "□" not in df_string and "🗹" not in df_string and "☒" not in df_string:
ONLY_TOKS = {'(', ')', ')%', ')bp', '%'}
raw = df.astype('string')
norm = raw.applymap(normalize_for_symbol_check).astype('string')
norm_nonempty = norm.replace('', pd.NA)
is_only_tokens = norm.apply(should_flag_as_token_only)
clean = (raw
.replace(r'##NEWLINE##', '', regex=True)
.replace(r'##COLSPAN_\d+##', '', regex=True)
.replace(r'##ROWSPAN_\d+##', '', regex=True)
.replace(r'##BOLD_(?:START|END)_\d+##', '', regex=True)
.replace(r'##ITALIC_(?:START|END)_\d+##', '', regex=True)
.replace(r'##U_(?:START|END)_\d+##', '', regex=True)
.replace('$$', '', regex=False)
.applymap(lambda x: x.strip() if isinstance(x, str) else x)
)
clean = clean.replace(r'^\[\]$', '__keep__', regex=True)
cells = clean.astype('string')
has_letters = cells.apply(lambda col: col.str.contains(r'[A-Za-z]', na=False)).any()
has_numbers = cells.apply(lambda col: col.str.contains(r'\d', na=False)).any()
has_currency = cells.apply(lambda col: col.str.contains(r'[\$£¥¥]', na=False)).any()
keep_mask = (has_letters | has_numbers | has_currency) & ~is_only_tokens
df = df.loc[:, keep_mask]
df_for_comparison = df_for_comparison.loc[:, keep_mask.values]
layout_clean_start = get_first_data_row_index(df)
if 0 <= layout_clean_start < len(df):
df.iloc[layout_clean_start:] = df.iloc[layout_clean_start:].applymap(_blank_layout_only_cell)
df_for_comparison = df.copy().applymap(normalize_for_comparison)
df_for_comparison.columns = [normalize_for_comparison(c) for c in df_for_comparison.columns]
df, df_for_comparison = apply_cleaning_and_sync(df, df_for_comparison, drop_adjacent_head_dupes, n_head=2)
df, df_for_comparison = apply_cleaning_and_sync(df, df_for_comparison, drop_pctless_dupes)
df, df_for_comparison = apply_cleaning_and_sync(df, df_for_comparison, drop_dollarless_dupes)
df, df_for_comparison = apply_cleaning_and_sync(df, df_for_comparison, drop_subset_columns)
df = _shift_colx_into_named(df)
df, df_for_comparison = apply_cleaning_and_sync(df, df_for_comparison, drop_visually_redundant_blank_cols, header_rows=3)
df, df_for_comparison = apply_cleaning_and_sync(df, df_for_comparison, drop_visually_redundant_blank_cols, header_rows=2)
df, df_for_comparison = apply_cleaning_and_sync(df, df_for_comparison, _drop_header_and_empty_cols, header_rows=3, min_blank_rows=8)
df, df_for_comparison = apply_cleaning_and_sync(df, df_for_comparison, _drop_header_and_empty_cols, header_rows=2, min_blank_rows=3)
df, df_for_comparison = apply_cleaning_and_sync(df, df_for_comparison, _drop_header_and_empty_cols, header_rows=1, min_blank_rows=3)
df_for_comparison = df.copy().applymap(normalize_for_comparison)
df_for_comparison.columns = [normalize_for_comparison(c) for c in df_for_comparison.columns]
df, df_for_comparison = apply_cleaning_and_sync(
df,
df_for_comparison,
_late_drop_blank_header_subset_cols,
header_rows=2,
min_blank_rows=3,
)
df = df.applymap(lambda x: '—' if isinstance(x, str) and x.replace("\u2063", '').strip() == '' else x)
df = df.replace(r'^\s*-+\s*$', np.nan, regex=True)
df = df.dropna(how='all')
df = drop_active_colspan_empty_cols(df, allow_dollar_sign=True)
cols_to_drop = []
for i in range(df.shape[1] - 1, 0, -1):
col_name = df.columns[i]
col_series = df[col_name]
if is_sup_only_column(col_series):
left_col_name = df.columns[i - 1]
df[left_col_name] = df[left_col_name].astype(str) + col_series.fillna('').astype(str)
cols_to_drop.append(col_name)
if cols_to_drop:
df.drop(columns=cols_to_drop, inplace=True)
return df
def replace_checkbox_symbols(text: str) -> str:
"""
Map any straggling checkbox, Wingdings, or stray-letter marker to a
consistent Markdown format. This serves as a final cleanup pass on
extracted text.
"""
if not isinstance(text, str):
return text
s = text
direct_map = {
'☒': '[x]', '☑': '[x]', '✓': '[x]', '✔': '[x]',
'☐': '[ ]', '❑': '[ ]', '❒': '[ ]', '❏': '[ ]', '❐': '[ ]',
'þ': '[x]', 'ý': '[x]',
'¨': '[ ]', 'ü': '[ ]',
'X': '[x]', 'x': '[x]',
'O': '[ ]', 'o': '[ ]',
}
core_symbol = re.sub(r'##(SUP|/SUP|BOLD_START_\d+|BOLD_END_\d+|U_START_\d+|U_END_\d+|ITALIC_START_\d+|ITALIC_END_\d+|ROWSPAN_\d+|COLSPAN_\d+|LINK_START_\d+__[^#]+|LINK_END_\d+)##', '', s)
core_symbol = core_symbol.replace('##NEWLINE##', '').replace('\u00A0', '').replace('\u2063', '').replace('', '')
core_symbol = core_symbol.strip()
if core_symbol in direct_map:
markdown_checkbox = direct_map[core_symbol]
return s.replace(core_symbol, markdown_checkbox)
s = re.sub(r'\s+[xX]\s*$', ' [x]', s)
s = re.sub(r'\s+[oO]\s*$', ' [ ]', s)
return s
def convert_wingdings_boxes(soup: BeautifulSoup) -> None:
"""
Turn Wingdings and Webdings symbols into their Unicode equivalents.
This works for any tag whose inline style or face attribute contains
the words "wingdings" or "webdings". It uses a comprehensive internal map.
"""
for tag in soup.find_all(True):
f_hint = (tag.get("style", "") + tag.get("face", "")).lower()
font_family = None
if "wingdings 3" in f_hint:
font_family = "wingdings 3"
elif "wingdings 2" in f_hint:
font_family = "wingdings 2"
elif "wingdings" in f_hint:
font_family = "wingdings"
elif "webdings" in f_hint:
font_family = "webdings"
else:
continue
char = tag.get_text(strip=True).replace('##NEWLINE##', '')
if not char:
continue
translation_map = WINGDINGS_MAP.get(font_family, {})
repl = translation_map.get(char)
if repl is not None:
tag.replace_with(NavigableString(repl))
def add_superscript(text):
"""
Finds common footnote markers (*, **, (1), (a), etc.) at the end of a string
and wraps them in tags, ignoring common non-footnote suffixes.
This version is modified to ignore single asterisks.
"""
if not isinstance(text, str):
return text
footnote_pattern = re.compile(r'^(.*?)(\s*)((?:\*+|\(\d+\)|\([a-zA-Z]+\))(?:\*+|\(\d+\)|\([a-zA-Z]+\))*)$')
match = footnote_pattern.match(text)
if match and match.group(3):
base = match.group(1)
space = match.group(2)
marker = match.group(3)
if marker == '*':
return text
return f"{base}{space}{marker} "
return text
def remove_empty_bold_tags(soup: BeautifulSoup):
"""
Finds and completely removes any or tag that only
contains whitespace or . This prevents the creation of "** **" artifacts.
"""
for tag in soup.find_all(['b', 'strong']):
if not tag.get_text(strip=True):
tag.decompose()
def merge_whitespace_tags(soup: BeautifulSoup):
"""
Finds inline tags (b, i, u, span, etc.) that only contain whitespace,
appends a single space to the preceding element, and removes the original tag.
This prevents important spaces between elements from being lost during parsing.
e.g., Title. Content...
becomes: Title. Content...
"""
whitespace_tags = soup.find_all(['b', 'strong', 'i', 'em', 'u', 'span', 'font'])
for tag in whitespace_tags:
if not tag.get_text(strip=True) and tag.get_text():
prev_element = tag.previous_sibling
while isinstance(prev_element, NavigableString) and not prev_element.strip():
prev_element = prev_element.previous_sibling
if prev_element:
if isinstance(prev_element, Tag):
prev_element.append(NavigableString(' '))
elif isinstance(prev_element, NavigableString):
prev_element.replace_with(NavigableString(str(prev_element) + ' '))
tag.decompose()
SENTINEL_RE = re.compile(
r'(##(?:ROWSPAN|COLSPAN)_\d+##|##NEWLINE##|##INDENT##|| | )',
re.I
)
def drop_tag_only_rows_cols(
df: pd.DataFrame,
skip_rows: int = 0,
cols_only: bool = False
) -> pd.DataFrame:
"""Remove columns (and, unless cols_only=True, rows) that become empty
once tag-like markers are stripped.
Args:
df: Input DataFrame.
skip_rows: Keep these top rows untouched when also dropping rows.
cols_only: If True, drop only empty columns; leave all rows intact.
Returns:
A cleaned DataFrame.
"""
def clean(col: pd.Series) -> pd.Series:
return (col.fillna('')
.astype(str)
.str.replace(r'(?i)\b(?:nan|none)\b', '', regex=True)
.str.replace(SENTINEL_RE, '', regex=True)
.str.replace('[\u00A0\u200B-\u200D\u2060\u2063\uFEFF]', '', regex=True)
.str.replace(r'\s+', '', regex=True)
.str.replace('—', ''))
body = df.iloc[skip_rows:]
cleaned = body.apply(clean)
keep_cols = ~cleaned.eq('').all(axis=0)
if cols_only:
return df.loc[:, keep_cols.values]
cleaned_kept = cleaned.loc[:, keep_cols]
keep_body_rows = ~cleaned_kept.eq('').all(axis=1)
row_mask = np.r_[np.ones(skip_rows, dtype=bool), keep_body_rows.values]
return df.loc[row_mask, keep_cols.values]
def df_to_markdown(df: pd.DataFrame, is_clean: bool = False, disable_numparse: bool = False, is_legacy_form4_table1 = False, is_legacy_form4_table2 = False) -> str:
"""
Converts a DataFrame to a Markdown string. This version includes a special
check to identify and reformat 2x2 "footnote tables" into a single line of text.
"""
preserved_header_rows = df.attrs.get('preserved_header_rows')
if df.shape[0] == 1 and df.shape[1] == 2:
marker = str(df.columns[0]).strip()
content = str(df.columns[1]).strip()
is_footnote_in_header = (
re.fullmatch(r'\(\s*\d+\s*\)', marker) and
all(_is_blank(val) for val in df.iloc[0])
)
if is_footnote_in_header:
formatted_marker = add_superscript(marker)
return f"{formatted_marker} {content}"
marker = str(df.iloc[0, 0]).strip()
content = str(df.iloc[0, 1]).strip()
if re.fullmatch(r'\(\s*\d+\s*\)', marker):
formatted_marker = add_superscript(marker)
return f"{formatted_marker} {content}"
if df.shape[0] == 2 and df.shape[1] == 2:
is_row0_blank = all(_is_blank(val) for val in df.iloc[0])
is_row1_blank = all(_is_blank(val) for val in df.iloc[1])
content_row = -1
if is_row0_blank and not is_row1_blank:
content_row = 1
elif is_row1_blank and not is_row0_blank:
content_row = 0
if content_row != -1:
marker = str(df.iloc[content_row, 0]).strip()
content = str(df.iloc[content_row, 1]).strip()
if re.fullmatch(r'\(\s*\d+\s*\)', marker):
formatted_marker = add_superscript(marker)
return f"{formatted_marker} {content}"
try:
df = df.replace(r'^[–—-]\s*$', '', regex=True)
df = (
df.replace(r'^\s*$', np.nan, regex=True)
.dropna(axis=1, how='all')
.dropna(axis=0, how='all')
.reset_index(drop=True)
)
if df.empty:
return ""
df = df.replace('nan', '', regex=False)
df = drop_active_colspan_empty_cols(df, allow_dollar_sign=True)
first_data_row_index = get_first_data_row_index(df)
for col_idx in range(df.shape[1]):
for row_idx in range(first_data_row_index):
cell_content = str(df.iat[row_idx, col_idx])
if '' in cell_content:
start_merge_row = 0
for k in range(row_idx - 1, -1, -1):
if '' in str(df.iat[k, col_idx]):
start_merge_row = k + 1
break
vals_to_merge = df.iloc[start_merge_row : row_idx + 1, col_idx].fillna('').astype(str).tolist()
clear_start_row = start_merge_row
if vals_to_merge and '##ITALIC_START_' in vals_to_merge[0]:
vals_to_merge = vals_to_merge[1:]
clear_start_row += 1
merged_text = '##NEWLINE##'.join(vals_to_merge)
span_tags = re.findall(r'##(?:ROWSPAN|COLSPAN)_\d+##', merged_text)
is_duplicated_span = False
if span_tags:
df_as_string = df.to_string()
for tag in span_tags:
if df_as_string.count(tag) > 1:
is_duplicated_span = True
break
if (
not is_numeric_like(merged_text)
and not is_duplicated_span
):
df.iat[row_idx, col_idx] = merged_text
for i in range(clear_start_row, row_idx):
df.iat[i, col_idx] = ''
df = df.replace(
to_replace=r"()",
value="",
regex=True
)
ZERO_WIDTH = r'\u200B\u200C\u200D\u2060\u2063\uFEFF'
blank_pattern = rf'^(?:[\s\r\n{ZERO_WIDTH}\u00A0]|##NEWLINE##)+$'
df.replace(blank_pattern, np.nan, regex=True, inplace=True)
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
if is_legacy_form4_table1:
df = df.drop(columns=[
col for col in df.columns
if df[col].iloc[1:].isna().all()
])
table_as_string = df.to_string().lower()
keywords = ["large accelerated filer", "emerging growth company", "accelerated filer", "non-accelerated filer"]
if any(keyword in table_as_string for keyword in keywords):
num_columns = df.shape[1]
null_header_df = pd.DataFrame([[''] * num_columns])
null_header_df.columns = df.columns
df = pd.concat([null_header_df, df], ignore_index=True)
df.columns = [
re.sub(r'(##)\.[1-9]$', r'\1', col) if isinstance(col, str) else col
for col in df.columns
]
if len(df.columns) > 1:
cols_to_drop = []
for i in range(1, len(df.columns)):
raw = df.iat[0, i]
if pd.isna(raw):
continue
if isinstance(raw, (int, np.integer)):
x = int(raw)
else:
txt = str(raw)
txt = re.sub(r'##(SUP|/SUP|SUB|/SUB|BOLD_START_\d+|BOLD_END_\d+|U_START_\d+|U_END_\d+|ITALIC_START_\d+|ITALIC_END_\d+|ROWSPAN_\d+|COLSPAN_\d+|LINK_START_\d+__[^#]+|LINK_END_\d+)##', '', txt)
txt = txt.replace('##NEWLINE##', '').replace(' ', '').strip()
m = re.match(r'^(\d+)$', txt)
x = int(m.group(1)) if m else None
if x is None or x >= 15:
continue
if not df.iloc[:, i].iloc[1:].replace(r'^\s*$', np.nan, regex=True).isna().all():
continue
left_col_index = i - 1
left_val_raw = df.iat[0, left_col_index]
left_val = '' if pd.isna(left_val_raw) else str(left_val_raw)
df.iat[0, left_col_index] = f"{left_val.replace('##NEWLINE##', '').strip()}{x} "
cols_to_drop.append(df.columns[i])
if cols_to_drop:
df.drop(columns=cols_to_drop, inplace=True)
df = df.replace(r'^\$nan$', '$—', regex=True)
if not all(pd.isna(df.columns)):
if preserved_header_rows:
df.columns = [''] * df.shape[1]
else:
header = pd.DataFrame([df.columns], columns=df.columns)
df = pd.concat([header, df], ignore_index=True)
df.columns = [''] * df.shape[1]
body = None
if len(df) > 1:
row0 = df.iloc[0].dropna()
vals = pd.to_numeric(row0, errors="coerce")
auto_like = (
vals.notna().all()
and len(vals) > 0
and vals.is_unique
and vals.is_monotonic_increasing
and (vals < 200).all()
)
if auto_like and len(df) >= 2:
if len(df):
new_header = [''] * len(df.columns)
body = df.iloc[1:].reset_index(drop=True)
body.columns = new_header
else:
body = pd.DataFrame(columns=df.columns)
else:
body = df.copy()
if body is None:
return ""
bullet_chars = {'○', '•', '●', '*', '·', '◦', '➢', '-', '▪'}
if body.empty and len(body.columns) == 2:
bullet = str(body.columns[0]).strip()
if bullet in bullet_chars:
text = str(body.columns[1]).strip()
return f"{bullet} {text}"
if body.empty and body.columns.empty:
return ""
def _clean_header(col):
"""Removes a trailing period from numeric headers like '2024.'."""
s = str(col).strip()
if re.fullmatch(r'\d+\.', s):
return s[:-1]
return s
new_cols = [_clean_header(col) for col in body.columns]
body.columns = new_cols
IND = "\u2063"
body = body.applymap(_strip_commas_in_paren)
body_shape = body.shape
if body_shape[0] > 2:
body = drop_tag_only_rows_cols(body)
body_string = body.to_string().lower()
is_toc = 'item' in body_string and 'part' in body_string or ('governance' in body_string) or (" 1.0" in body_string) or ("page" in body_string) or ("financial statements" in body_string)
if is_toc:
body = body.astype(str).applymap(lambda x: re.sub(r'\.0$', '', x))
def _clean_cell(val: object) -> str:
s = html.unescape(str(val) if val is not None else "")
s = re.sub(
r'##(?:| |BOLD_START_\d+|BOLD_END_\d+|U_START_\d+|U_END_\d+|'
r'ITALIC_START_\d+|ITALIC_END_\d+|ROWSPAN_\d+|COLSPAN_\d+|LINK_START_\d+__[^#]+|LINK_END_\d+)##', "", s
)
s = (s.replace("##NEWLINE##", "")
.replace("", "")
.replace("\u00A0", " ")
.replace("\u2063", "##INDENT##"))
s = re.sub(r'(?i)?sup[^>]*>', "", s)
s = re.sub(r'^\s*\^(.+?)\^\s*$', r'\1', s)
s = re.sub(r'\s+', " ", s).strip()
return s
clean = body.applymap(_clean_cell)
clean = clean.replace(r'^\s*$', "", regex=True)
body = body[~(clean == "").all(axis=1)]
list_marker_pattern = re.compile(
r'^\s*(?:\[(?: |x)\]'
r'|[ivxlcdm]+[.)]?'
r'|\d+[.)]'
r'|[a-z][.)]'
r'|\([a-z0-9]+\)'
r'|[-○•●·◦➢☐□☒⌧♦⧫▪]'
r'|\#\#SINGLE_ASTERISK\#\#'
r'|\#\#DOUBLE_ASTERISK\#\#'
r'|\#\#TRIPLE_ASTERISK\#\#'
r'|\#'
r'|\s*\(?[a-z0-9]+\)?\s*(?: |)?'
r')\s*$',
re.IGNORECASE
)
def normalize_marker(s: str) -> str:
s = html.unescape(str(s))
s = re.sub(
r'##(?:BOLD_START_\d+|BOLD_END_\d+|U_START_\d+|U_END_\d+|'
r'ITALIC_START_\d+|ITALIC_END_\d+|ROWSPAN_\d+|COLSPAN_\d+|LINK_START_\d+__[^#]+|LINK_END_\d+)##',
'',
s
)
s = s.replace('##NEWLINE##', '')
s = s.replace('', '')
s = s.replace('\u00A0', ' ')
s = s.replace('\u2063', '')
s = s.replace('##INDENT##', '')
s = s.replace(' ', '')
s = s.replace("##SINGLE_ASTERISK## ", "##SINGLE_ASTERISK##")
s = re.sub(r'^\s*\^(.+?)\^\s*$', r'\1', s)
s = re.sub(r'\s+', ' ', s).strip()
return s
if body.shape[1] == 3:
col1 = body.iloc[:, 0].astype(str).str.strip()
col2 = body.iloc[:, 1].astype(str).str.strip()
is_col1_all_digits = col1.str.isdigit().all()
is_col2_all_periods = (col2 == '.').all()
if is_col1_all_digits and is_col2_all_periods:
list_items = []
for index, row in body.iterrows():
number = row.iloc[0]
content = str(row.iloc[2]).strip()
list_items.append(f"{number}. {content}")
return "\n\n".join(list_items) + "\n\n"
if body.shape[1] == 2 and all(not str(c).strip() for c in body.columns):
first_col = body.iloc[:, 0].astype(str).str.strip()
non_empty_markers = first_col[first_col != '']
if body.shape[0] == 1:
pattern = re.compile(
r'^(?:'
r'Item\s+\d+\s?\.'
r'|'
r'##BOLD_START_(\d+)##\s*Item\s+\d+\s?\.'
r'\s*##BOLD_END_\1##'
r'|'
r'(?:[1-9]|1[0-5])\.0'
r')$'
)
def is_valid(s) -> bool:
return bool(pattern.fullmatch(str(s)))
if is_valid(body.iloc[0,0]):
return f"{str(body.iloc[0, 0]).replace('.0', '.')} {str(body.iloc[0, 1])}"
if not non_empty_markers.empty and non_empty_markers.map(normalize_marker).str.fullmatch(list_marker_pattern).all():
ALPHA_RE = re.compile(r'^\(?[a-z]\)?\.?$', re.IGNORECASE)
ROMAN_RE = re.compile(r'^\(?[ivxlcdm]+\)?\.?$', re.IGNORECASE)
md_list_items = []
for _, row in body.iterrows():
marker = str(row.iloc[0]).strip()
marker_norm = normalize_marker(marker)
content = str(row.iloc[1]).strip()
content = re.sub(r'\s*<(br)\s*/?>\s*$', '', content, flags=re.IGNORECASE).strip()
content = content.replace("##NEWLINE##", "")
md_list_items.append(f"{marker} {content}".replace("##NEWLINE##", " ").replace(' ', ''))
return "\n\n".join(md_list_items)
if body.shape == (1, 1) and all(not str(c).strip() for c in body.columns):
cell_content = str(body.iloc[0, 0]).strip()
cell_content = cell_content.strip()
cell_content = re.sub(r'\s+', ' ', cell_content)
if cell_content:
return cell_content
if preserved_header_rows:
normalized_header_rows: List[List[str]] = []
target_width = body.shape[1]
for row in preserved_header_rows:
padded_row = list(row[:target_width])
if len(padded_row) < target_width:
padded_row.extend([''] * (target_width - len(padded_row)))
normalized_header_rows.append(padded_row)
if normalized_header_rows:
header_df = pd.DataFrame(normalized_header_rows, columns=body.columns)
body = pd.concat([header_df, body], ignore_index=True)
return to_compact_markdown(body, index=False, disable_numparse=disable_numparse)
except Exception as e:
error_message = f"[TABLE PARSE ERROR]: {e}"
print(f"[TABLE PARSE ERROR in {CURRENT_PROCESSING_FILE}]: {e}")
logging.error(
f"FILE: {CURRENT_PROCESSING_FILE} (error in df_to_markdown)\n"
f"ERROR: {error_message}\n"
f"TRACEBACK:\n{traceback.format_exc()}"
)
return ""
def normalize_dl_lists(soup: BeautifulSoup):
"""
Recursively finds all list-like tags and converts them to a
flat sequence of tags. By repeatedly finding and processing the
deepest list first ("inside-out"), it ensures indentation levels
are calculated correctly.
"""
INDENT_CHAR = "##INDENT##"
list_marker_re = re.compile(r'^\s*(?:[○•●·◦➢□☐☑☒🗷✓✔]|\d+\.\d*[A-Z]?|\d+\.|\([a-zA-Z0-9]+\))\s*$')
while True:
deepest_dl = None
max_depth = -1
for dl in soup.find_all('dl'):
dt_tags = dl.find_all('dt', recursive=False)
is_list_like = dt_tags and all(list_marker_re.match(dt.get_text(strip=True)) for dt in dt_tags)
if not is_list_like:
continue
depth = len(list(dl.find_parents('dl')))
if depth > max_depth:
max_depth = depth
deepest_dl = dl
if deepest_dl is None:
break
level = max_depth
new_paragraphs = []
for child in list(deepest_dl.children):
if child.name == 'dt':
dd = child.find_next_sibling('dd')
if dd and dd.previous_sibling is child:
new_p = soup.new_tag("p")
indent_prefix = INDENT_CHAR * level
new_p.append(NavigableString(indent_prefix))
for content_node in list(child.children):
new_p.append(content_node.extract())
new_p.append(NavigableString(" "))
for content_node in list(dd.children):
new_p.append(content_node.extract())
new_paragraphs.append(new_p)
child.decompose()
dd.decompose()
deepest_dl.replace_with(*new_paragraphs)
def defragment_font_tags(soup: BeautifulSoup):
"""
Merges adjacent tags to prevent unwanted spaces from being
inserted between word fragments during text extraction.
e.g. W ord becomes Word
"""
for font_tag in soup.find_all('font'):
if not font_tag.parent:
continue
while True:
next_element = font_tag.next_sibling
if isinstance(next_element, NavigableString) and not next_element.strip():
whitespace_node = next_element
next_element = whitespace_node.next_sibling
whitespace_node.extract()
if next_element and next_element.name == 'font':
for child in list(next_element.contents):
font_tag.append(child.extract())
next_element.decompose()
else:
break
def unwrap_fragmenting_tags(soup: BeautifulSoup):
"""
Finds and merges tags with adjacent text nodes to prevent
unwanted spaces from being inserted between word fragments. This version
manually rebuilds the parent's content to ensure reliable merging.
"""
parents = {tag.parent for tag in soup.find_all('small') if tag.parent}
for parent in parents:
new_contents = []
for child in list(parent.contents):
if child.name == 'small':
small_text = child.get_text()
if new_contents and isinstance(new_contents[-1], NavigableString):
new_contents[-1] = NavigableString(str(new_contents[-1]) + small_text)
else:
new_contents.append(NavigableString(small_text))
elif isinstance(child, (Tag, NavigableString)):
new_contents.append(child)
parent.clear()
for new_child in new_contents:
parent.append(new_child)
def md_table_2row_header(df: pd.DataFrame) -> str:
"""
Render `df` as markdown where the header row is blank and the
original header text appears as the first row(s) of the body.
"""
mains, subs, last_main = [], [], ""
has_subs = any(" " in str(c) for c in df.columns)
for c in df.columns:
main, sub = (str(c).split(" ", 1) if " " in str(c) else (str(c), ""))
main, sub = main.strip(), sub.strip()
if not main:
main = last_main
else:
last_main = main
mains.append(main or " ")
subs.append(sub or " ")
blank_header = "| " + " | ".join(" " for _ in mains) + " |"
sep = "| " + " | ".join("---" for _ in mains) + " |"
body = ["| " + " | ".join(mains) + " |"]
if has_subs:
body.append("| " + " | ".join(subs) + " |")
for _, row in df.iterrows():
body.append("| " + " | ".join(
map(str, row.replace({np.nan: '—'}).tolist())) + " |")
return "\n".join([blank_header, sep, *body])
def _form4_header_details_block(xml: BeautifulSoup, owner_node: BeautifulSoup, footnotes_map: dict) -> str:
"""
Creates the full header section for a Form 4 as a single, unified
Markdown table to match the original form's visual layout.
"""
issuer_node = xml.find("issuer")
rel_node = owner_node.find("reportingOwnerRelationship")
owner_name = get_value_with_footnote(owner_node, r"rptOwnerName", footnotes_map)
addr_node = owner_node.find("reportingOwnerAddress")
street1 = get_value_with_footnote(addr_node, "rptOwnerStreet1", footnotes_map) or ""
street2 = get_value_with_footnote(addr_node, "rptOwnerStreet2", footnotes_map) or ""
full_street = f"{street1} {street2}" if street2 else street1
city = get_value_with_footnote(addr_node, "rptOwnerCity", footnotes_map) or ""
state = get_value_with_footnote(addr_node, "rptOwnerState", footnotes_map) or ""
zip_code = get_value_with_footnote(addr_node, "rptOwnerZipCode", footnotes_map) or ""
box1_html = (
"**1. Name and Address of Reporting Person*** "
f"{owner_name}(Last) (First) (Middle) "
f"{full_street}(Street) "
f"{city}, {state} {zip_code}(City) (State) (Zip) "
)
issuer_name = get_value_with_footnote(issuer_node, r"issuerName", footnotes_map)
issuer_symbol = get_value_with_footnote(issuer_node, r"issuerTradingSymbol", footnotes_map)
box2_html = f"**2. Issuer Name and Ticker or Trading Symbol** {issuer_name} [ {issuer_symbol} ]"
date_val = (get_value_with_footnote(xml, r'dateOfEarliestTransaction', footnotes_map) or
get_value_with_footnote(xml, r'periodOfReport', footnotes_map))
box3_html = f"**3. Date of Earliest Transaction (Month/Day/Year)** {date_val or ' '}"
amendment_date = get_value_with_footnote(xml, r'amendmentDate', footnotes_map)
box4_html = f"**4. If Amendment, Date of Original Filed (Month/Day/Year)** {amendment_date or ' '}"
def is_checked(node, tag_name):
tag = node.find(re.compile(tag_name, re.I))
return tag and tag.text.strip().lower() in ("1", "true", "x")
title = (get_value_with_footnote(rel_node, r"officerTitle", footnotes_map) or
get_value_with_footnote(rel_node, r"otherText", footnotes_map) or
" ")
box5_html = (
"**5. Relationship of Reporting Person(s) to Issuer** "
"(Check all applicable) "
f"[{'X' if is_checked(rel_node, 'isDirector') else ' '}] Director [{'X' if is_checked(rel_node, 'isTenPercentOwner') else ' '}] 10% Owner "
f"[{'X' if is_checked(rel_node, 'isOfficer') else ' '}] Officer (give title below) [{'X' if is_checked(rel_node, 'isOther') else ' '}] Other (specify below) "
f"_{title}_"
)
is_single = len(xml.find_all("reportingOwner")) == 1
box6_html = (
"**6. Individual or Joint/Group Filing (Check Applicable Line)** "
f"[{'X' if is_single else ' '}] Form filed by One Reporting Person "
f"[{' ' if is_single else 'X'}] Form filed by More than One Reporting Person"
)
header = "| | | |\n|:---|:---|:---|"
row1 = f"| {box1_html} | {box3_html} | {box5_html} |"
row2 = f"| {box2_html} | {box4_html} | {box6_html} |"
return f"{header}\n{row1}\n{row2}"
def get_value_with_footnote(node_to_search, tag_name_or_regex, fn_map):
"""
Extracts a value from a tag and robustly associates it with footnotes,
regardless of whether they are children or siblings of the value tag.
"""
if not node_to_search:
return ""
value_node = node_to_search.find(re.compile(tag_name_or_regex, re.I))
if not value_node:
return ""
temp_node = BeautifulSoup(str(value_node), 'lxml-xml').find()
if temp_node:
for fn_tag in temp_node.find_all('footnoteId'):
fn_tag.decompose()
val_container = temp_node.find('value') or temp_node
val = val_container.get_text(separator=' ', strip=True)
else:
val = ""
fid_nodes = value_node.find_all('footnoteId', recursive=False)
if not fid_nodes:
fid_nodes = value_node.find_next_siblings('footnoteId') + value_node.find_previous_siblings('footnoteId')
if fid_nodes:
fn_markers = "".join(fn_map.get(fid.get("id"), "") for fid in fid_nodes)
if not val or val == "—":
return fn_markers
return f"{val}{fn_markers}"
return val
def format_footnotes_in_text(text):
"""
Finds all consecutive footnote markers like (1) or (2)(3) in a string
and wraps them in tags.
"""
if not isinstance(text, str):
return text
return re.sub(r'((?:\(\d+\))+)', r'\1 ', text)
_DOLLAR_RE = re.compile(r'^-?\d+(\.\d+)?$')
def _dollarize_if_number(val: str) -> str:
"""Add a leading ‘$’ when *val* is a plain number (no parens, no footnotes)."""
if val == "":
return val
if val and _DOLLAR_RE.match(val):
return f"${val}"
elif val[0] == ".":
return f"$0{val}"
return val
def _is_voluntary(code: str, tl_value: str) -> bool:
return (code == 'V') or bool(tl_value and tl_value.strip())
def parse_form4_xml(soup, doc_type="4") -> str:
xml = soup
not_subject_flag = xml.find('notSubjectToSection16')
checkbox = f"[{'x' if not_subject_flag and not_subject_flag.text.strip() in ['1', 'true', 'Y'] else ' '}]"
aff10b5_one_flag = xml.find('aff10b5One')
checkbox2 = f"[{'x' if aff10b5_one_flag and aff10b5_one_flag.text.strip() in ['1', 'true', 'Y'] else ' '}]"
checkbox2_text = "Check this box to indicate that a transaction was made pursuant to a contract, instruction or written plan for the purchase or sale of equity securities of the issuer that is intended to satisfy the affirmative defense conditions of Rule 10b5-1(c). See Instruction 10."
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
f"## FORM {doc_type}\n\n"
"### STATEMENT OF CHANGES IN BENEFICIAL OWNERSHIP\n",
f"{checkbox} Check this box if no longer subject to Section 16. Form 4 or Form 5 obligations may continue. See Instruction 1(b).",
f"\n{checkbox2} {checkbox2_text}"
]
fn_map, fn_txt = {}, []
if (fsec := xml.find("footnotes")):
fns = fsec.find_all("footnote")
valid_fns = [f for f in fns if f.has_attr('id')]
fn_map = {f["id"]: f"({i+1})" for i, f in enumerate(valid_fns)}
fn_txt = [f"({i+1}) {f.text.strip()}" for i, f in enumerate(valid_fns)]
for owner_node in xml.find_all("reportingOwner"):
parts.append(f"\n---\n{_form4_header_details_block(xml, owner_node, fn_map)}\n---")
t1_tag = xml.find(re.compile(r"nonDerivativeTable", re.I))
search_context_t1 = t1_tag if t1_tag else xml
non_derivative_rows = search_context_t1.find_all(re.compile(r"nonDerivative(Transaction|Holding|Security)", re.I))
if non_derivative_rows:
rows_data = []
for r in non_derivative_rows:
row = {}
amounts = r.find(re.compile(r'transactionAmounts', re.I))
coding = r.find(re.compile(r'transactionCoding', re.I))
post = r.find(re.compile(r'postTransactionAmounts', re.I))
ownership = r.find(re.compile(r'ownershipNature', re.I))
row["1. Title of Security##ROWSPAN_1## 1. Title of Security##ROWSPAN_1##"] = get_value_with_footnote(r, r'securityTitle', fn_map)
row["2. Transaction Date##ROWSPAN_2## 2. Transaction Date##ROWSPAN_2##"] = get_value_with_footnote(r, r'transactionDate', fn_map)
row["2A. Deemed Execution Date##ROWSPAN_3## 2A. Deemed Execution Date##ROWSPAN_3##"] = get_value_with_footnote(r, r'deemedExecutionDate', fn_map)
code = get_value_with_footnote(coding, r'transactionCode', fn_map)
tl_value = get_value_with_footnote(r, r'transactionTimeliness', fn_map)
row["3. Transaction Code (V)##COLSPAN_1## Code"] = code
row["3. Transaction Code (V)##COLSPAN_1## V"] = "V" if _is_voluntary(code, tl_value) else ""
price = _dollarize_if_number(
get_value_with_footnote(amounts, r'transactionPricePerShare', fn_map)
)
shares = get_value_with_footnote(amounts, r'transactionShares', fn_map)
acq_disp_code = get_value_with_footnote(amounts, r'transactionAcquiredDisposedCode', fn_map)
row["4. Securities Acquired (A) or Disposed of (D)##COLSPAN_2## Amount"] = shares
row["4. Securities Acquired (A) or Disposed of (D)##COLSPAN_2## (A) or (D)"] = acq_disp_code
row["4. Securities Acquired (A) or Disposed of (D)##COLSPAN_2## Price"] = price
row["5. Amount of Securities Beneficially Owned##ROWSPAN_4## 5. Amount of Securities Beneficially Owned##ROWSPAN_4##"] = get_value_with_footnote(post, r'sharesOwnedFollowingTransaction', fn_map)
row["6. Ownership Form##ROWSPAN_5## 6. Ownership Form##ROWSPAN_5##"] = get_value_with_footnote(ownership, r'directOrIndirectOwnership', fn_map)
row["7. Nature of Indirect Beneficial Ownership##ROWSPAN_6## 7. Nature of Indirect Beneficial Ownership##ROWSPAN_6##"] = get_value_with_footnote(ownership, r'natureOfOwnership', fn_map)
rows_data.append(row)
df1 = pd.DataFrame(rows_data).fillna('')
df1 = df1.applymap(_unsplit_numbers)
df1 = df1.applymap(format_footnotes_in_text)
parts.append("\n## Table I - Non-Derivative Securities\n")
parts.append(f"---\n{md_table_2row_header(reorder(df1, ORDER_I))}\n---")
else:
parts.extend([
"\n## Table I - Non-Derivative Securities\n\n---\n",
md_table_2row_header(
pd.DataFrame([['—'] * len(ORDER_I)], columns=ORDER_I)
),
"---\n"
])
t2_tag = xml.find(re.compile(r'^derivativeTable$', re.I))
search_context_t2 = t2_tag if t2_tag else xml
derivative_rows = search_context_t2.find_all(re.compile(r'^derivative(Transaction|Holding|Security)$', re.I))
if derivative_rows:
rows_data_2 = []
for r in derivative_rows:
row = {}
amounts = r.find(re.compile(r'transactionAmounts', re.I))
coding = r.find(re.compile(r'transactionCoding', re.I))
underlying = r.find(re.compile(r'underlyingSecurity', re.I))
post = r.find(re.compile(r'postTransactionAmounts', re.I))
ownership = r.find(re.compile(r'ownershipNature', re.I))
row["1. Title of Derivative Security##ROWSPAN_7## 1. Title of Derivative Security##ROWSPAN_7##"] = get_value_with_footnote(r, r'securityTitle', fn_map)
row["2. Conversion or Exercise Price##ROWSPAN_8## 2. Conversion or Exercise Price##ROWSPAN_8##"] = _dollarize_if_number(get_value_with_footnote(r, r'conversionOrExercisePrice', fn_map))
row["3. Transaction Date##ROWSPAN_9## 3. Transaction Date##ROWSPAN_9##"] = get_value_with_footnote(r, r'transactionDate', fn_map)
row["3A. Deemed Execution Date##ROWSPAN_10## 3A. Deemed Execution Date##ROWSPAN_10##"] = get_value_with_footnote(r, r'deemedExecutionDate', fn_map)
code = get_value_with_footnote(coding, r'transactionCode', fn_map)
tl_value = get_value_with_footnote(r, r'transactionTimeliness', fn_map)
row["4. Transaction Code (V)##COLSPAN_3## Code"] = code
row["4. Transaction Code (V)##COLSPAN_3## V"] = "V" if _is_voluntary(code, tl_value) else ""
shares = get_value_with_footnote(amounts, r'transactionShares', fn_map)
acq_disp_code = get_value_with_footnote(amounts, r'transactionAcquiredDisposedCode', fn_map)
row["5. Number of Derivative Securities Acquired (A) or Disposed of (D)##COLSPAN_4## (A)"] = ""
row["5. Number of Derivative Securities Acquired (A) or Disposed of (D)##COLSPAN_4## (D)"] = ""
if acq_disp_code.strip().upper() == 'A':
row["5. Number of Derivative Securities Acquired (A) or Disposed of (D)##COLSPAN_4## (A)"] = shares
elif acq_disp_code.strip().upper() == 'D':
row["5. Number of Derivative Securities Acquired (A) or Disposed of (D)##COLSPAN_4## (D)"] = shares
row["6. Date Exercisable and Expiration Date##COLSPAN_5## Date Exercisable"] = get_value_with_footnote(r, r'exerciseDate', fn_map)
row["6. Date Exercisable and Expiration Date##COLSPAN_5## Expiration Date"] = get_value_with_footnote(r, r'expirationDate', fn_map)
row["7. Title and Amount of Underlying Securities##COLSPAN_6## Title"] = get_value_with_footnote(underlying, r'underlyingSecurityTitle', fn_map)
row["7. Title and Amount of Underlying Securities##COLSPAN_6## Amount or Number of Shares"] = get_value_with_footnote(underlying, r'underlyingSecurityShares', fn_map)
price_value = (get_value_with_footnote(r, r'derivativeSecurityPrice', fn_map) or
get_value_with_footnote(amounts, r'transactionPricePerShare', fn_map) or
get_value_with_footnote(amounts, r'transactionValue', fn_map))
row["8. Price of Derivative Security##ROWSPAN_11## 8. Price of Derivative Security##ROWSPAN_11##"] = _dollarize_if_number(price_value)
row["9. Number of Derivative Securities Beneficially Owned##ROWSPAN_12## 9. Number of Derivative Securities Beneficially Owned##ROWSPAN_12##"] = get_value_with_footnote(post, r'sharesOwnedFollowingTransaction', fn_map)
row["10. Ownership Form##ROWSPAN_13## 10. Ownership Form##ROWSPAN_13##"] = get_value_with_footnote(ownership, r'directOrIndirectOwnership', fn_map)
row["11. Nature of Indirect Beneficial Ownership##ROWSPAN_14## 11. Nature of Indirect Beneficial Ownership##ROWSPAN_14##"] = get_value_with_footnote(ownership, r'natureOfOwnership', fn_map)
rows_data_2.append(row)
df2 = pd.DataFrame(rows_data_2).fillna('')
df2 = df2.applymap(_unsplit_numbers)
df2 = df2.applymap(format_footnotes_in_text)
parts.append("## Table II - Derivative Securities\n\n---\n")
parts.append(md_table_2row_header(reorder(df2, ORDER_II)))
parts.append("---\n")
else:
parts.extend([
"## Table II - Derivative Securities\n\n---\n",
md_table_2row_header(
pd.DataFrame([['—'] * len(ORDER_II)], columns=ORDER_II)
),
"---\n"
])
if fn_txt:
parts.append("\n### Footnotes:")
parts.extend(fn_txt)
if (remarks_node := xml.find('remarks')) and (remarks_text := remarks_node.get_text(strip=True)):
parts.append(f"\n**Remarks:**\n{remarks_text}")
for sig in xml.find_all(re.compile(r"ownerSignature", re.I)):
name = get_value_with_footnote(sig, r"signatureName", fn_map)
date = get_value_with_footnote(sig, r"signatureDate", fn_map)
parts.append(f"\n**Signature:** {name or '—'} \n**Date:** {date or '—'}")
boilerplate_footer = """
### Remarks:
Reminder: Report on a separate line for each class of securities beneficially owned directly or indirectly.
* If the form is filed by more than one reporting person, see Instruction 4 (b)(v).
** Intentional misstatements or omissions of facts constitute Federal Criminal Violations See 18 U.S.C. 1001 and 15 U.S.C. 78ff(a).
Note: File three copies of this Form, one of which must be manually signed. If space is insufficient, see Instruction 6 for procedure.
**Persons who respond to the collection of information contained in this form are not required to respond unless the form displays a currently valid OMB Number.**
"""
parts.append(textwrap.dedent(boilerplate_footer).strip())
return "\n\n".join(parts).replace(".0000", "")
def _finish_block(rows, header_hint):
df = pd.DataFrame(rows)
if header_hint and len(header_hint) == df.shape[1] - 1:
df.columns = ["Label", *header_hint]
else:
df.columns = ["Label"] + [f"Col {i}" for i in range(1, df.shape[1])]
for idx in df.index:
row_vals = df.loc[idx].tolist()
label, rest = row_vals[0], row_vals[1:]
merged = []
skip_next = False
for i, v in enumerate(rest):
if skip_next:
skip_next = False
continue
if isinstance(v, str) and v.strip() == "$":
j = i + 1
while j < len(rest) and (rest[j] is None or str(rest[j]).strip() == ""):
j += 1
merged.append("$" + ("" if j >= len(rest) else str(rest[j]).strip()))
if j < len(rest):
rest[j] = ""
skip_next = False
else:
merged.append(v)
merged = [x for x in merged if not (isinstance(x, str) and x.strip() == "")]
merged += [""] * (len(rest) - len(merged))
df.loc[idx, df.columns[1:]] = merged
df.columns = ["" if str(c).startswith("Col ") else c for c in df.columns]
return df
DOT_ROW = re.compile(
r"""^\s*
(?P[A-Za-z].*?)
(?:\.{2,}|\s{2,})\s*
(?P[$()\-0-9,.\s]+)
\s*$
""",
re.X,
)
DOT_ROW = re.compile(
r"""^\s*
(?P[A-Za-z].*?)
(?:\.{2,}|\s{2,})\s*
(?P[$()\-0-9,.\s]+)
\s*$
""",
re.X,
)
HEADER_HINT_RE = re.compile(r"^\s*(\d{4}|[A-Za-z]{3,}).*\s{2,}")
SGML_TAG_RE = re.compile(r"|")
def parse_plaintext_filing(raw_text: str) -> str:
pem_pattern = r'-----BEGIN PRIVACY-ENHANCED MESSAGE-----(.*?)-----END PRIVACY-ENHANCED MESSAGE-----'
text_inside_pem = re.search(pem_pattern, raw_text, re.DOTALL)
if not text_inside_pem:
ims_pattern = r'.*? |.*? '
content = re.sub(ims_pattern, '', raw_text, flags=re.DOTALL)
else:
content = text_inside_pem.group(1)
header_pattern = r'.*? '
content_no_header = re.sub(header_pattern, '', content, flags=re.DOTALL)
doc_tags_pattern = r'.*?\n| '
content = re.sub(doc_tags_pattern, '', content_no_header, flags=re.DOTALL)
cleaned_text = re.sub(r'\n{3,}', '\n\n', content)
cleaned_text = cleaned_text.replace("", "")
spaced_text = f"\n```\n{cleaned_text}\n```\n"
return spaced_text
def parse_schedule13g_xml(xml: BeautifulSoup) -> str:
"""
Parses a Schedule 13G filing into structured Markdown, creating a
valid table structure and correctly rendering all items, checkboxes, and comments for all filers.
"""
submission = xml.find('edgarSubmission')
if not submission:
return ""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(f'^{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def get_multiline_text(node, tag):
if not node: return "—"
found = node.find(re.compile(f'^{tag}$', re.I))
if not found or not found.text: return "—"
lines = [line.strip() for line in found.text.strip().split('\n') if line.strip()]
return "\n".join(lines)
header = submission.find('headerData')
form_data = submission.find('formData')
cover_page = form_data.find('coverPageHeader')
issuer_info = cover_page.find('issuerInfo')
items = form_data.find('items')
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## SCHEDULE 13G\n\n"
"### Under the Securities Exchange Act of 1934\n"
]
amendment_no = get_text(cover_page, 'amendmentNo')
if amendment_no and amendment_no != "—":
parts.append(f"**(Amendment No. {amendment_no})**\n")
parts.append(f"**Issuer:** {get_text(issuer_info, 'issuerName')}")
parts.append(f"**Title of Class of Securities:** {get_text(cover_page, 'securitiesClassTitle')}")
parts.append(f"**CUSIP Number:** {get_text(issuer_info, 'issuerCusip')}")
parts.append(f"**Date of Event Which Requires Filing of this Statement:** {get_text(cover_page, 'eventDateRequiresFilingThisStatement')}")
parts.append("\n**Check the appropriate box to designate the rule pursuant to which this Schedule is filed:**\n")
filed_rules_nodes = cover_page.find_all(re.compile('^designateRulePursuantThisScheduleFiled$', re.I))
filed_rules = {node.text.strip() for node in filed_rules_nodes}
all_rules = ["Rule 13d-1(b)", "Rule 13d-1(c)", "Rule 13d-1(d)"]
for rule in all_rules:
checkbox = '[x]' if rule in filed_rules else '[ ]'
parts.append(f"- {checkbox} {rule}")
for reporting_person in form_data.find_all('coverPageHeaderReportingPersonDetails'):
parts.append("\n---\n")
reporting_person_name = get_text(reporting_person, 'reportingPersonName')
citizenship = get_text(reporting_person, 'citizenshipOrOrganization')
voting_power = reporting_person.find('reportingPersonBeneficiallyOwnedNumberOfShares')
sole_voting = get_text(voting_power, 'soleVotingPower')
shared_voting = get_text(voting_power, 'sharedVotingPower')
sole_dispositive = get_text(voting_power, 'soleDispositivePower')
shared_dispositive = get_text(voting_power, 'sharedDispositivePower')
aggregate_amount = get_text(reporting_person, 'reportingPersonBeneficiallyOwnedAggregateNumberOfShares')
is_aggregate_excluded = get_text(reporting_person, 'aggregateAmountExcludesCertainSharesFlag').upper() == 'Y'
checkbox_10_val = '[x]' if is_aggregate_excluded else '[ ]'
percent_11_val = get_text(reporting_person, 'classPercent').replace(' ', '')
person_type_nodes = reporting_person.find_all('typeOfReportingPerson')
person_type = ", ".join(node.text for node in person_type_nodes)
shares_block_text = "Number of Shares Beneficially Owned by Each Reporting Person With##ROWSPAN_1##"
table_content_1 = f"Names of Reporting Persons {reporting_person_name}##COLSPAN_1##"
group_membership_text = ""
member_group_node = reporting_person.find('memberGroup')
if member_group_node:
status = member_group_node.text.strip().lower()
checkbox_a = '[x]' if status == 'a' else '[ ]'
checkbox_b = '[x]' if status == 'b' else '[ ]'
group_membership_text = f"(a) {checkbox_a} (b) {checkbox_b}"
else:
group_membership_text = f"(a) [ ] (b) [ ]"
table_content_2 = f"Check the Appropriate Box if a Member of a Group (See Instructions) {group_membership_text}##COLSPAN_2##"
table_content_3 = "SEC Use Only##COLSPAN_3##"
table_content_4 = f"Citizenship or Place of Organization {citizenship}##COLSPAN_4##"
table_content_9 = f"Aggregate Amount Beneficially Owned by Each Reporting Person {aggregate_amount}##COLSPAN_9##"
table_content_10 = f"Check if the Aggregate Amount in Row (9) Excludes Certain Shares (See Instructions) {checkbox_10_val}##COLSPAN_10##"
table_content_11 = f"Percent of Class Represented by Amount in Row (9) {percent_11_val}%##COLSPAN_11##"
table_content_12 = f"Type of Reporting Person (See Instructions) {person_type}##COLSPAN_12##"
header_row = f"| 1. | {table_content_1} | {table_content_1} |"
separator_row = "|:---|:---|:---|:---|"
body_rows = [
f"| 2. | {table_content_2} | {table_content_2} |",
f"| 3. | {table_content_3} | {table_content_3} |",
f"| 4. | {table_content_4} | {table_content_4} |",
f"| {shares_block_text} | 5. | Sole Voting Power {sole_voting} |",
f"| {shares_block_text} | 6. | Shared Voting Power {shared_voting} |",
f"| {shares_block_text} | 7. | Sole Dispositive Power {sole_dispositive} |",
f"| {shares_block_text} | 8. | Shared Dispositive Power {shared_dispositive} |",
f"| 9. | {table_content_9} | {table_content_9} |",
f"| 10. | {table_content_10} | {table_content_10} |",
f"| 11. | {table_content_11} | {table_content_11} |",
f"| 12. | {table_content_12} | {table_content_12} |"
]
table_md = [header_row, separator_row] + body_rows
parts.append("\n".join(table_md))
comment_text = get_text(reporting_person, 'comments')
if comment_text and comment_text != "—":
parts.append(f"\n**Comment for Type of Reporting Person:** {comment_text}")
parts.append("\n---\n")
item1 = items.find('item1')
item2 = items.find('item2')
item3 = items.find('item3')
item4 = items.find('item4')
item5 = items.find('item5')
item6 = items.find('item6')
item7 = items.find('item7')
item8 = items.find('item8')
item9 = items.find('item9')
item10 = items.find('item10')
parts.append(f"**Item 1(a). Name of Issuer:**\n{get_text(item1, 'issuerName')}\n")
parts.append(f"**Item 1(b). Address of Issuer's Principal Executive Offices:**\n{get_text(item1, 'issuerPrincipalExecutiveOfficeAddress')}\n")
parts.append(f"**Item 2(a). Name of Person Filing:**\n{get_text(item2, 'filingPersonName')}\n")
parts.append(f"**Item 2(b). Address of Principal Business Office:**\n{get_text(item2, 'principalBusinessOfficeOrResidenceAddress')}\n")
parts.append(f"**Item 2(c). Citizenship:**\n{get_text(item2, 'citizenship')}\n")
parts.append(f"**Item 2(d). Title of Class of Securities:**\n{get_text(cover_page, 'securitiesClassTitle')}\n")
parts.append(f"**Item 2(e). CUSIP Number:**\n{get_text(issuer_info, 'issuerCusip')}\n")
parts.append("**Item 3. If this statement is filed pursuant to §§ 240.13d-1(b) or 240.13d-2(b) or (c), check whether the person filing is a:**\n")
filer_type_codes = {node.text for node in (item3.find_all('typeOfPersonFiling') if item3 else [])}
filer_type_map = {
"BK": "(b)", "BD": "(a)", "IC": "(d)", "IA": "(e)",
"HC": "(g)", "EP": "(f)", "SA": "(h)", "CP": "(i)", "CO": "(k)"
}
item3_options = {
"(a)": "Broker or dealer registered under section 15 of the Act (15 U.S.C. 78o).",
"(b)": "Bank as defined in section 3(a)(6) of the Act (15 U.S.C. 78c).",
"(c)": "Insurance company as defined in section 3(a)(19) of the Act (15 U.S.C. 78c).",
"(d)": "Investment company registered under section 8 of the Investment Company Act of 1940 (15 U.S.C. 80a-8).",
"(e)": "An investment adviser in accordance with § 240.13d-1(b)(1)(ii)(E);",
"(f)": "An employee benefit plan or endowment fund in accordance with § 240.13d-1(b)(1)(ii)(F);",
"(g)": "A parent holding company or control person in accordance with § 240.13d-1(b)(1)(ii)(G);",
"(h)": "A savings associations as defined in Section 3(b) of the Federal Deposit Insurance Act (12 U.S.C. 1813);",
"(i)": "A church plan that is excluded from the definition of an investment company under section 3(c)(14) of the Investment Company Act of 1940 (15 U.S.C. 80a-3);",
"(j)": "A non-U.S. institution in accordance with § 240.13d-1(b)(1)(ii)(J), if filing as a non-U.S. institution in accordance with § 240.13d-1(b)(1)(ii)(J), please specify the type of institution:",
"(k)": "Group, in accordance with Rule 240.13d-1(b)(1)(ii)(K)."
}
checked_item_letters = {filer_type_map.get(code) for code in filer_type_codes}
for letter, text in item3_options.items():
checkbox = '[x]' if letter in checked_item_letters else '[ ]'
parts.append(f"{letter} {checkbox} {text}")
parts.append(f"\n\n**Item 4. Ownership:**")
amount_owned_text = get_multiline_text(item4, 'amountBeneficiallyOwned')
parts.append(f"\n**(a) Amount beneficially owned:**\n\n{amount_owned_text}")
percent_val_item4 = get_multiline_text(item4, 'classPercent')
parts.append(f"\n**(b) Percent of class:**\n\n{percent_val_item4}")
breakdown = item4.find('numberOfSharesPersonHas') if item4 else None
if breakdown:
parts.append("\n\n**(c) Number of shares as to which the person has:**")
parts.append(f"\n**(i) Sole power to vote or to direct the vote:**\n\n{get_multiline_text(breakdown, 'solePowerOrDirectToVote')}")
parts.append(f"\n**(ii) Shared power to vote or to direct the vote:**\n\n{get_multiline_text(breakdown, 'sharedPowerOrDirectToVote')}")
parts.append(f"\n**(iii) Sole power to dispose or to direct the disposition of:**\n\n{get_multiline_text(breakdown, 'solePowerOrDirectToDispose')}")
parts.append(f"\n**(iv) Shared power to dispose or to direct the disposition of:**\n\n{get_multiline_text(breakdown, 'sharedPowerOrDirectToDispose')}\n")
parts.append(f"**Item 5. Ownership of Five Percent or Less of a Class.**\n")
is_not_applicable_5 = get_text(item5, 'notApplicableFlag').upper() == 'Y'
checkbox_5 = '[x]' if is_not_applicable_5 else '[ ]'
parts.append(f"{checkbox_5} If this statement is being filed to report the fact that as of the date hereof the reporting person has ceased to be the beneficial owner of more than five percent of the class of securities, check the following.\n")
parts.append(f"**Item 6. Ownership of More than 5 Percent on Behalf of Another Person.**\n")
if get_text(item6, 'notApplicableFlag').upper() != 'Y':
item6_text = get_text(item6, 'ownershipMoreThan5PercentOnBehalfOfAnotherPerson')
parts.append(f"{item6_text}\n")
else:
parts.append("Not Applicable\n")
parts.append(f"**Item 7. Identification and Classification of the Subsidiary**\n")
if get_text(item7, 'notApplicableFlag').upper() != 'Y':
item7_text = get_text(item7, 'subsidiaryIdentificationAndClassification')
parts.append(f"{item7_text}\n")
else:
parts.append("Not Applicable\n")
parts.append(f"**Item 8. Identification and Classification of Members of the Group**\n")
if get_text(item8, 'notApplicableFlag').upper() != 'Y':
item8_text = get_text(item8, 'identificationAndClassificationOfGroupMembers')
parts.append(f"{item8_text}\n")
else:
parts.append("Not Applicable\n")
parts.append(f"**Item 9. Notice of Dissolution of Group**\n")
if get_text(item9, 'notApplicableFlag').upper() != 'Y':
item9_text = get_text(item9, 'dissolutionOfGroupNotice')
parts.append(f"{item9_text}\n")
else:
parts.append("Not Applicable\n")
parts.append(f"\n**Item 10. Certification:**")
parts.append(f"{get_text(item10, 'certifications')}\n")
parts.append("\n### SIGNATURE\n")
parts.append("After reasonable inquiry and to the best of my knowledge and belief, I certify that the information set forth in this statement is true, complete and correct.\n")
for sig_info in form_data.find_all('signatureInformation'):
sig_details = sig_info.find('signatureDetails')
reporting_person = get_text(sig_info, 'reportingPersonName')
if reporting_person and reporting_person != "—":
parts.append(f"\n**{reporting_person}**")
parts.append(f"**Date:** {get_text(sig_details, 'date')}")
parts.append(f"**By:** {get_text(sig_details, 'signature')}")
parts.append(f"**Name & Title:** {get_text(sig_details, 'title')}")
return "\n\n".join(parts)
def _form3_header_details_block(xml: BeautifulSoup, owner_node, footnotes_map: dict) -> str:
"""Creates the header block for a Form 3 as a Markdown table."""
issuer_node = xml.find("issuer")
rel_node = owner_node.find("reportingOwnerRelationship")
owner_name = get_value_with_footnote(owner_node, r"rptOwnerName", footnotes_map)
addr_node = owner_node.find("reportingOwnerAddress")
street1 = get_value_with_footnote(addr_node, "rptOwnerStreet1", footnotes_map) or ""
street2 = get_value_with_footnote(addr_node, "rptOwnerStreet2", footnotes_map) or ""
full_street = f"{street1} {street2}" if street2 else street1
city = get_value_with_footnote(addr_node, "rptOwnerCity", footnotes_map) or ""
state = get_value_with_footnote(addr_node, "rptOwnerState", footnotes_map) or ""
zip_code = get_value_with_footnote(addr_node, "rptOwnerZipCode", footnotes_map) or ""
box1_html = (
"**1. Name and Address of Reporting Person*** "
f"{owner_name}(Last) (First) (Middle) "
f"{full_street}(Street) "
f"{city}, {state} {zip_code}(City) (State) (Zip) "
)
event_date_val = get_value_with_footnote(xml, r'periodOfReport', footnotes_map)
box2_html = f"**2. Date of Event Requiring Statement (Month/Day/Year)** {event_date_val or ' '}"
issuer_name = get_value_with_footnote(issuer_node, r"issuerName", footnotes_map)
issuer_symbol = get_value_with_footnote(issuer_node, r"issuerTradingSymbol", footnotes_map)
box3_html = f"**3. Issuer Name and Ticker or Trading Symbol** {issuer_name} [ {issuer_symbol} ]"
def is_checked(node, tag_name):
tag = node.find(re.compile(tag_name, re.I))
return tag and tag.text.strip().lower() in ("1", "true", "x")
title = get_value_with_footnote(rel_node, r"officerTitle", footnotes_map) or " "
box4_html = (
"**4. Relationship of Reporting Person(s) to Issuer** "
"(Check all applicable) "
f"[{'X' if is_checked(rel_node, 'isDirector') else ' '}] Director [{'X' if is_checked(rel_node, 'isTenPercentOwner') else ' '}] 10% Owner "
f"[{'X' if is_checked(rel_node, 'isOfficer') else ' '}] Officer (give title below) [{'X' if is_checked(rel_node, 'isOther') else ' '}] Other (specify below) "
f"_{title}_"
)
amendment_date = get_value_with_footnote(xml, r'amendmentDate', footnotes_map)
box5_html = f"**5. If Amendment, Date of Original Filed (Month/Day/Year)** {amendment_date or ' '}"
is_single = len(xml.find_all("reportingOwner")) == 1
box6_html = (
"**6. Individual or Joint/Group Filing (Check Applicable Line)** "
f"[{'X' if is_single else ' '}] Form filed by One Reporting Person "
f"[{' ' if is_single else 'X'}] Form filed by More than One Reporting Person"
)
header = "| | | |\n|:---|:---|:---|"
row1 = f"| {box1_html} | {box3_html} | {box5_html} |"
row2 = f"| {box2_html} | {box4_html} | {box6_html} |"
return f"\n\n---\n{header}\n{row1}\n{row2}\n\n---\n"
def parse_form3_xml(soup: BeautifulSoup) -> str:
"""
Parses an XML-based Form 3 (Initial Statement of Beneficial Ownership)
into structured Markdown.
"""
xml = soup
fn_map, fn_txt = {}, []
if (fsec := xml.find("footnotes")):
fns = fsec.find_all("footnote")
fn_map = {f["id"]: f"({i+1})" for i, f in enumerate(fns)}
fn_txt = [f"({i+1}) {f.text.strip()}" for i, f in enumerate(fns)]
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM 3\n\n"
"### INITIAL STATEMENT OF BENEFICIAL OWNERSHIP OF SECURITIES\n",
"[ ] Check this box if no longer subject to Section 16. Form 4 or Form 5 obligations may continue. See Instruction 1(b)."
]
for owner_node in xml.find_all("reportingOwner"):
parts.append(f"\n{_form3_header_details_block(xml, owner_node, fn_map)}\n")
t1_tag = xml.find(re.compile(r"nonDerivativeTable", re.I))
if t1_tag and t1_tag.get_text(strip=True):
rows_data = []
for r in t1_tag.find_all(re.compile(r"nonDerivativeHolding", re.I)):
row = {}
post = r.find(re.compile(r'postTransactionAmounts', re.I))
ownership = r.find(re.compile(r'ownershipNature', re.I))
row["1. Title of Security"] = get_value_with_footnote(r, r'securityTitle', fn_map)
row["2. Amount of Securities Beneficially Owned"] = get_value_with_footnote(post, r'sharesOwnedFollowingTransaction', fn_map)
row["3. Ownership Form"] = get_value_with_footnote(ownership, r'directOrIndirectOwnership', fn_map)
row["4. Nature of Indirect Beneficial Ownership"] = get_value_with_footnote(ownership, r'natureOfOwnership', fn_map)
rows_data.append(row)
df1 = pd.DataFrame(rows_data).fillna('')
df1 = df1.applymap(format_footnotes_in_text)
parts.append("\n## Table I - Non-Derivative Securities Beneficially Owned\n\n---\n")
parts.append(md_table_2row_header(reorder(df1, ORDER_I_FORM3)))
parts.append("---\n")
else:
df1 = pd.DataFrame([[''] * len(ORDER_I_FORM3)], columns=ORDER_I_FORM3)
df1 = df1.replace(r'^\s*$', np.nan, regex=True).dropna(how='all')
df1 = md_table_2row_header(df1)
parts.extend([
"\n## Table I - Non-Derivative Securities Beneficially Owned\n\n---\n",
df1,
"\n---"
])
t2_tag = xml.find(re.compile(r'^derivativeTable$', re.I))
if t2_tag and t2_tag.get_text(strip=True):
rows_data_2 = []
for r in t2_tag.find_all(re.compile(r'^derivativeHolding$', re.I)):
row = {}
underlying = r.find(re.compile(r'underlyingSecurity', re.I))
ownership = r.find(re.compile(r'ownershipNature', re.I))
row["1. Title of Derivative Security##ROWSPAN_1## 1. Title of Derivative Security##ROWSPAN_1##"] = get_value_with_footnote(r, r'securityTitle', fn_map)
row["2. Date Exercisable and Expiration Date (Month/Day/Year)##COLSPAN_1## Date Exercisable"] = get_value_with_footnote(r, r'exerciseDate', fn_map)
row["2. Date Exercisable and Expiration Date (Month/Day/Year)##COLSPAN_1## Expiration Date"] = get_value_with_footnote(r, r'expirationDate', fn_map)
row["3. Title and Amount of Underlying Securities##COLSPAN_2## Title"] = get_value_with_footnote(underlying, r'underlyingSecurityTitle', fn_map)
row["3. Title and Amount of Underlying Securities##COLSPAN_2## Amount or Number of Shares"] = get_value_with_footnote(underlying, r'underlyingSecurityShares', fn_map)
row["4. Conversion or Exercise Price##ROWSPAN_2## 4. Conversion or Exercise Price##ROWSPAN_2##"] = _dollarize_if_number(get_value_with_footnote(r, r'conversionOrExercisePrice', fn_map))
row["5. Ownership Form##ROWSPAN_3## 5. Ownership Form##ROWSPAN_3##"] = get_value_with_footnote(ownership, r'directOrIndirectOwnership', fn_map)
row["6. Nature of Indirect Beneficial Ownership##ROWSPAN_4## 6. Nature of Indirect Beneficial Ownership##ROWSPAN_4##"] = get_value_with_footnote(ownership, r'natureOfOwnership', fn_map)
rows_data_2.append(row)
df2 = pd.DataFrame(rows_data_2).fillna('')
df2 = df2.applymap(format_footnotes_in_text)
parts.append("\n## Table II - Derivative Securities Beneficially Owned\n\n---\n")
parts.append(md_table_2row_header(reorder(df2, ORDER_II_FORM3)))
parts.append("---\n")
else:
parts.append("\n## Table II - Derivative Securities Beneficially Owned\n")
parts.append("\n---\n")
placeholder_df = pd.DataFrame([['—'] * len(ORDER_II_FORM3)], columns=ORDER_II_FORM3)
parts.append(md_table_2row_header(placeholder_df))
parts.append("\n---")
if fn_txt:
parts.append("\n### Footnotes:")
parts.extend(fn_txt)
if (remarks_node := xml.find('remarks')) and (remarks_text := remarks_node.get_text(strip=True)):
parts.append(f"\n**Remarks:**\n{remarks_text}")
for sig in xml.find_all(re.compile(r"ownerSignature", re.I)):
name = get_value_with_footnote(sig, r"signatureName", fn_map)
date = get_value_with_footnote(sig, r"signatureDate", fn_map)
parts.append(f"\n**Signature:** {name or '—'} \n**Date:** {date or '—'}")
boilerplate_footer = """
### Remarks:
Reminder: Report on a separate line for each class of securities beneficially owned directly or indirectly.
* If the form is filed by more than one reporting person, see Instruction 4 (b)(v).
** Intentional misstatements or omissions of facts constitute Federal Criminal Violations See 18 U.S.C. 1001 and 15 U.S.C. 78ff(a).
Note: File three copies of this Form, one of which must be manually signed. If space is insufficient, see Instruction 6 for procedure.
**Persons who respond to the collection of information contained in this form are not required to respond unless the form displays a currently valid OMB Number.**
"""
parts.append(textwrap.dedent(boilerplate_footer).strip())
return "\n\n".join(parts)
def parse_form_d_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Form D into structured Markdown, accurately
rendering all sections to mimic the visual layout of the original form.
This version dynamically handles legacy exemption rules and correctly
displays amendment information.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(f'^{tag}$', re.I))
return html.unescape(found.text.strip()) if found and found.text else "—"
def safe_format_dollar(value_str: str) -> str:
if not value_str or value_str == "—": return "—"
try:
return f"${int(value_str):}"
except (ValueError, TypeError):
return value_str
def get_boolean_checkbox(node, tag):
val = get_text(node, tag).lower()
return '[x] Yes [ ] No' if val == 'true' else '[ ] Yes [x] No'
ENTITY_TYPES = [
"Corporation", "Limited Partnership", "Limited Liability Company",
"General Partnership", "Business Trust", "Other"
]
YEAR_OF_INC_OPTIONS = [
"Over Five Years Ago", "Within Last Five Years (Specify Year)", "Yet to Be Formed"
]
INDUSTRY_GROUPS_FULL = [
("Agriculture", "Health Care", "Retailing"),
("Banking & Financial Services", "Biotechnology", "Restaurants"),
(" Commercial Banking", "Health Insurance", "Technology"),
(" Insurance", "Hospitals & Physicians", " Computers"),
(" Investing", "Pharmaceuticals", " Telecommunications"),
(" Investment Banking", "Other Health Care", " Other Technology"),
(" Pooled Investment Fund", "Manufacturing", "Travel"),
(" Hedge Fund", "Real Estate", " Airlines & Airports"),
(" Private Equity Fund", " Commercial", " Lodging & Conventions"),
(" Venture Capital Fund", " Construction", " Tourism & Travel Services"),
(" Other Investment Fund", " REITS & Finance", " Other Travel"),
(" *Is the issuer registered as an investment company?*", " Residential", "Other"),
(" Other Banking & Financial Services", " Other Real Estate", None),
("Business Services", None, None),
("Energy", None, None),
(" Coal Mining", None, None),
(" Electric Utilities", None, None),
(" Energy Conservation", None, None),
(" Environmental Services", None, None),
(" Oil & Gas", None, None),
(" Other Energy", None, None)
]
REVENUE_RANGES = [
"No Revenues", "$1 - $1,000,000", "$1,000,001 - $5,000,000",
"$5,000,001 - $25,000,000", "$25,000,001 - $100,000,000",
"Over $100,000,000", "Decline to Disclose", "Not Applicable"
]
AGGREGATE_NAV_RANGES = [
"No Aggregate Net Asset Value", "$1 - $5,000,000", "$5,000,001 - $25,000,000",
"$25,000,001 - $50,000,000", "$50,000,001 - $100,000,000",
"Over $100,000,000", "Decline to Disclose", "Not Applicable"
]
SECURITY_TYPES = [
"Equity", "Debt", "Option, Warrant or Other Right to Acquire Another Security",
"Security to be Acquired Upon Exercise of Option, Warrant or Other Right to Acquire Security",
"Pooled Investment Fund Interests", "Tenant-in-Common Securities",
"Mineral Property Securities", "Other"
]
FEDERAL_EXEMPTIONS_GROUPS_CORRECT = [
[
("04a", "Rule 504(b)(1) (not (i), (ii) or (iii))"),
("04.1", "Rule 504(b)(1)(i)"),
("04.2", "Rule 504(b)(1)(ii)"),
("04d", "Rule 504(b)(1)(iii)"),
("06b", "Rule 506(b)"),
("06c", "Rule 506(c)"),
("4a5", "Securities Act Section 4(5)"),
],
[
("3c", "Investment Company Act Section 3(c)"),
("3c.1", " Section 3(c)(1)"),
("3c.2", " Section 3(c)(2)"),
("3c.3", " Section 3(c)(3)"),
("3c.4", " Section 3(c)(4)"),
("3c.5", " Section 3(c)(5)"),
("3c.6", " Section 3(c)(6)"),
("3c.7", " Section 3(c)(7)"),
],
[
(None, ""), (None, ""),
("3c.9", " Section 3(c)(9)"),
("3c.10", " Section 3(c)(10)"),
("3c.11", " Section 3(c)(11)"),
("3c.12", " Section 3(c)(12)"),
("3c.13", " Section 3(c)(13)"),
("3c.14", " Section 3(c)(14)"),
]
]
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM D\n\n"
"### Notice of Exempt Offering of Securities\n"
]
offering = xml.find('offeringData')
type_of_filing_node = offering.find('typeOfFiling')
if type_of_filing_node:
amendment_node = type_of_filing_node.find('newOrAmendment')
if amendment_node and get_text(amendment_node, 'isAmendment').lower() == 'true':
prev_accession = get_text(amendment_node, 'previousAccessionNumber')
if prev_accession != "—":
parts.append(f"**Notice of Amendment** (Previous Accession Number: {prev_accession})\n")
parts.append("### 1. Issuer's Identity\n")
issuer = xml.find('primaryIssuer')
parts.append(f"**CIK (Filer ID Number):** {get_text(issuer, 'cik')}")
parts.append(f"**Name of Issuer:** {get_text(issuer, 'entityName')}")
parts.append(f"**Jurisdiction of Incorporation/Organization:** {get_text(issuer, 'jurisdictionOfInc')}")
previous_name = get_text(issuer.find('edgarPreviousNameList'), 'previousName')
if previous_name == "—":
previous_name = get_text(issuer.find('issuerPreviousNameList'), 'value')
parts.append(f"**Previous Names:** {previous_name}")
yoi_node = issuer.find('yearOfInc')
is_over_five = get_text(yoi_node, 'overFiveYears').lower() == 'true'
is_within_five = get_text(yoi_node, 'withinFiveYears').lower() == 'true'
is_yet_to_be_formed = get_text(yoi_node, 'yetToBeFormed').lower() == 'true'
year_value = get_text(yoi_node, 'value')
parts.append("\n**Year of Incorporation/Organization**")
checkbox_over_five = '[x]' if is_over_five else '[ ]'
parts.append(f"- {checkbox_over_five} Over Five Years Ago")
checkbox_within_five = '[x]' if is_within_five else '[ ]'
specify_year_text = f" {year_value}" if is_within_five and year_value != "—" else ""
parts.append(f"- {checkbox_within_five} Within Last Five Years (Specify Year){specify_year_text}")
checkbox_yet_to_be = '[x]' if is_yet_to_be_formed else '[ ]'
parts.append(f"- {checkbox_yet_to_be} Yet to Be Formed")
entity_val = get_text(issuer, 'entityType')
entity_other_desc = get_text(issuer, 'entityTypeOtherDesc')
parts.append("\n**Entity Type**")
for option in ENTITY_TYPES:
is_checked = (option == entity_val)
if option == "Other" and is_checked:
specify_text = f" ({entity_other_desc})" if entity_other_desc and entity_other_desc != "—" else ""
parts.append(f"- [x] {option}{specify_text}")
else:
checkbox = '[x]' if is_checked else '[ ]'
parts.append(f"- {checkbox} {option}")
parts.append("\n### 2. Principal Place of Business and Contact Information\n")
addr = issuer.find('issuerAddress')
contact_info = {
"Name of Issuer": get_text(issuer, 'entityName'),
"Street Address 1": get_text(addr, 'street1'),
"Street Address 2": get_text(addr, 'street2'),
"City": get_text(addr, 'city'),
"State/Province/Country": get_text(addr, 'stateOrCountry'),
"ZIP/Postal Code": get_text(addr, 'zipCode'),
"Phone Number of Issuer": get_text(issuer, 'issuerPhoneNumber')
}
for key, value in contact_info.items():
if value and value != "—":
parts.append(f"**{key}:** {value}")
related_persons = xml.find_all('relatedPersonInfo')
if related_persons:
parts.append("\n### 3. Related Persons\n")
person_data = []
for p in related_persons:
name_node = p.find('relatedPersonName')
addr_node = p.find('relatedPersonAddress')
rels = [r.text for r in p.select('relatedPersonRelationshipList > relationship')]
first_name = get_text(name_node, 'firstName')
middle_name = get_text(name_node, 'middleName')
full_first_name = f"{first_name} {middle_name}".strip() if middle_name and middle_name != "—" else first_name
relationship_str = (
f"{'[x]' if 'Executive Officer' in rels else '[ ]'} Executive Officer "
f"{'[x]' if 'Director' in rels else '[ ]'} Director "
f"{'[x]' if 'Promoter' in rels else '[ ]'} Promoter"
)
current_person = {
"Last Name": get_text(name_node, 'lastName'),
"First Name": full_first_name,
"Street Address 1": get_text(addr_node, 'street1'),
"City": get_text(addr_node, 'city'),
"State": get_text(addr_node, 'stateOrCountry'),
"ZIP/Postal Code": get_text(addr_node, 'zipCode'),
"Relationship": relationship_str,
"Clarification of Response": get_text(p, 'relationshipClarification') or "—"
}
person_data.append(current_person)
if person_data:
person_df = pd.DataFrame(person_data)
column_order = [
"Last Name", "First Name", "Street Address 1", "City", "State",
"ZIP/Postal Code", "Relationship", "Clarification of Response"
]
person_df = person_df.reindex(columns=column_order)
parts.append(to_compact_markdown(person_df, index=False))
parts.append("\n### 4. Industry Group\n")
industry_group_node = offering.find('industryGroup')
industry_val = get_text(industry_group_node, 'industryGroupType')
investment_fund_info_node = industry_group_node.find('investmentFundInfo')
investment_fund_type_val = get_text(investment_fund_info_node, 'investmentFundType')
yes_box, no_box = '[ ]', '[ ]'
if investment_fund_info_node:
is_investment_co_val = get_text(investment_fund_info_node, 'is40Act')
if is_investment_co_val != "—":
is_investment_co = is_investment_co_val.lower() in ['true', 'y']
yes_box = '[x]' if is_investment_co else '[ ]'
no_box = '[ ]' if is_investment_co else '[x]'
table_rows = ["| | | |", "|:---|:---|:---|"]
for row_tuple in INDUSTRY_GROUPS_FULL:
cells = []
for item in row_tuple:
if item is None:
cells.append("")
continue
item_text = item.strip()
if "*" in item_text:
question_text = item_text.replace('*','')
cells.append(f" {question_text} {yes_box} Yes {no_box} No")
continue
is_checked = (item_text.replace('&', 'and') == industry_val) or (item_text == investment_fund_type_val)
checkbox = '[x]' if is_checked else '[ ]'
indentation = " " * (len(item) - len(item.lstrip(' ')))
cells.append(f"{indentation}{checkbox} {item_text}")
table_rows.append(f"| {' | '.join(cells)} |")
parts.append("\n".join(table_rows))
parts.append("\n### 5. Issuer Size\n")
issuer_size_node = offering.find('issuerSize')
revenue_val = get_text(issuer_size_node, 'revenueRange')
nav_val = get_text(issuer_size_node, 'aggregateNetAssetValueRange')
table_rows = ["| **Revenue Range** | **OR** | **Aggregate Net Asset Value Range** |", "|:---|:---:|:---|"]
for rev_option, nav_option in itertools.zip_longest(REVENUE_RANGES, AGGREGATE_NAV_RANGES, fillvalue=""):
rev_cell = f"[{'x' if rev_option == revenue_val else ' '}] {rev_option}" if rev_option else ""
nav_cell = f"[{'x' if nav_option == nav_val else ' '}] {nav_option}" if nav_option else ""
table_rows.append(f"| {rev_cell} | | {nav_cell} |")
parts.append("\n".join(table_rows))
parts.append("\n### 6. Federal Exemption(s) and Exclusion(s) Claimed (select all that apply)\n")
LEGACY_RULE_MAP = {
"05": "Rule 505",
"06": "Rule 506"
}
selected_exemptions = {item.text.strip().lower() for item in offering.select('federalExemptionsExclusions > item')}
all_modern_codes = {code.lower() for group in FEDERAL_EXEMPTIONS_GROUPS_CORRECT for code, _ in group if code}
exemptions_table_rows = ["| | | |", "|:---|:---|:---|"]
for row_tuple in itertools.zip_longest(*FEDERAL_EXEMPTIONS_GROUPS_CORRECT, fillvalue=(None, "")):
cells = []
for code, text in row_tuple:
if code is None:
cells.append(text)
else:
is_checked = code.lower() in selected_exemptions
checkbox = '[x]' if is_checked else '[ ]'
indent = " " * (len(text) - len(text.lstrip(' ')))
cells.append(f"{indent}{checkbox} {text.lstrip(' ')}")
exemptions_table_rows.append(f"| {' | '.join(cells)} |")
parts.append("\n".join(exemptions_table_rows))
legacy_selected_codes = {code for code in selected_exemptions if code not in all_modern_codes and code in LEGACY_RULE_MAP}
if legacy_selected_codes:
parts.append("\n**Legacy Exemptions Claimed:**")
for code in sorted(legacy_selected_codes):
rule_name = LEGACY_RULE_MAP[code]
parts.append(f"- [x] {rule_name}")
parts.append("\n### 7. Type of Filing\n")
is_amend = get_text(type_of_filing_node.find('newOrAmendment'), 'isAmendment').lower() == 'true'
is_new = not is_amend
first_sale_date = get_text(type_of_filing_node.find('dateOfFirstSale'), 'value')
filing_md = f"**New Notice:** {'[x]' if is_new else '[ ]'} **Date of First Sale:** {first_sale_date}\n\n**Amendment:** {'[x]' if is_amend else '[ ]'}"
parts.append(filing_md)
parts.append("\n### 8. Duration of Offering\n")
parts.append(f"Does the issuer intend this offering to last more than one year? {get_boolean_checkbox(offering.find('durationOfOffering'), 'moreThanOneYear')}")
parts.append("\n### 9. Type(s) of Securities Offered\n")
SECURITY_TYPE_MAP = {
"Equity": "isEquityType",
"Debt": "isDebtType",
"Option, Warrant or Other Right to Acquire Another Security": "isOptionToAcquireType",
"Security to be Acquired Upon Exercise of Option, Warrant or Other Right to Acquire Security": "isSecurityToBeAcquiredType",
"Pooled Investment Fund Interests": "isPooledInvestmentFundType",
"Tenant-in-Common Securities": "isTenantInCommonType",
"Mineral Property Securities": "isMineralPropertyType",
"Other": "isOtherType"
}
types_node = offering.find('typesOfSecuritiesOffered')
securities_md = []
for display_text in SECURITY_TYPES:
xml_tag = SECURITY_TYPE_MAP.get(display_text)
is_checked = False
if xml_tag and types_node:
is_checked = get_text(types_node, xml_tag).lower() in ['true', 'y']
checkbox = '[x]' if is_checked else '[ ]'
line = f"- {checkbox} {display_text}"
if display_text == "Other" and is_checked:
other_desc = get_text(types_node, 'descriptionOfOtherType')
if other_desc and other_desc != "—":
line += f" ({other_desc})"
securities_md.append(line)
parts.append("\n".join(securities_md))
parts.append("\n### 10. Business Combination Transaction\n")
biz_combo_node = offering.find('businessCombinationTransaction')
bus_combo_md = f"Is this offering being made in connection with a business combination transaction, such as a merger, acquisition or exchange offer? {get_boolean_checkbox(biz_combo_node, 'isBusinessCombinationTransaction')}"
parts.append(bus_combo_md)
clarification = get_text(biz_combo_node, 'clarificationOfResponse')
if clarification != "—":
parts.append(f"**Clarification of Response:** {clarification}")
min_inv = get_text(offering, 'minimumInvestmentAccepted')
parts.append(f"\n### 11. Minimum Investment\n**Minimum investment accepted from any outside investor:** {safe_format_dollar(min_inv)} USD")
parts.append("\n### 12. Sales Compensation\n")
recipients = offering.find_all('recipient')
if recipients:
comp_data = []
for r in recipients:
addr_node = r.find('recipientAddress')
states = ", ".join([s.text for s in r.select('statesOfSolicitationList > value')])
is_foreign = get_text(r, 'foreignSolicitation').lower() == 'true'
comp_data.append({
"Recipient Name": get_text(r, 'recipientName'),
"Recipient CRD Number": get_text(r, 'recipientCRDNumber'),
"Associated BD Name": get_text(r, 'associatedBDName'),
"Associated BD CRD Number": get_text(r, 'associatedBDCRDNumber'),
"Street 1": get_text(addr_node, 'street1'),
"Street 2": get_text(addr_node, 'street2'),
"City": get_text(addr_node, 'city'),
"State": get_text(addr_node, 'stateOrCountry'),
"ZIP Code": get_text(addr_node, 'zipCode'),
"States of Solicitation": states,
"Foreign Solicitation": '[x]' if is_foreign else '[ ]'
})
df = pd.DataFrame(comp_data)
all_cols = ["Recipient Name", "Recipient CRD Number", "Associated BD Name", "Associated BD CRD Number", "Street 1", "Street 2", "City", "State", "ZIP Code", "States of Solicitation", "Foreign Solicitation"]
df = df.reindex(columns=all_cols, fill_value="—")
parts.append(to_compact_markdown(df, index=False))
else:
none_compensation_md = [
"**Recipient:** — **Recipient CRD Number:** [x] None",
"**(Associated) Broker or Dealer:** — **(Associated) Broker or Dealer CRD Number:** [x] None",
]
parts.append("\n\n".join(none_compensation_md))
parts.append("\n### 13. Offering and Sales Amounts\n")
sales = offering.find('offeringSalesAmounts')
def format_sales_amount(label: str, value: str) -> str:
if value.strip().lower() == 'indefinite':
return f"**{label}:** USD or [x] Indefinite"
else:
try:
formatted_value = f"${int(value):,}"
return f"**{label}:** {formatted_value} USD"
except (ValueError, TypeError):
return f"**{label}:** {value} USD"
parts.append(format_sales_amount("Total Offering Amount", get_text(sales, 'totalOfferingAmount')))
parts.append(format_sales_amount("Total Amount Sold", get_text(sales, 'totalAmountSold')))
parts.append(format_sales_amount("Total Remaining to be Sold", get_text(sales, 'totalRemaining')))
if (clarification := get_text(sales, 'clarificationOfResponse')) != "—":
parts.append(f"**Clarification of Response:** {clarification}")
parts.append("\n### 14. Investors\n")
investors_node = offering.find('investors')
parts.append(f"Select if securities in the offering have been or may be sold to persons who do not qualify as accredited investors, and enter the number of such non-accredited investors who already have invested in the offering: {get_boolean_checkbox(investors_node, 'hasNonAccreditedInvestors')}")
num_non_accredited = get_text(investors_node, 'numberNonAccreditedInvestors')
if num_non_accredited != "—":
parts.append(f"**Number of such non-accredited investors:** {num_non_accredited}")
parts.append(f"**Total Number of Investors Already Invested:** {get_text(investors_node, 'totalNumberAlreadyInvested')}")
parts.append("\n### 15. Sales Commissions & Finder's Fees Expenses\n")
instructional_text = "Provide separately the amounts of sales commissions and finders fees expenses, if any. If the amount of an expenditure is not known, provide an estimate and check the box next to the amount."
parts.append(instructional_text)
fees_node = offering.find('salesCommissionsFindersFees')
def format_fee_line(label: str, fee_type_node) -> str:
if not fee_type_node: return f"**{label}** $0 USD"
amount = safe_format_dollar(get_text(fee_type_node, 'dollarAmount'))
is_estimate = get_text(fee_type_node, 'isEstimate').lower() in ['true', 'y']
if is_estimate:
return f"**{label}** {amount} USD [x] Estimate"
else:
return f"**{label}** {amount} USD"
parts.append(format_fee_line("Sales Commissions", fees_node.find('salesCommissions')))
parts.append(format_fee_line("Finders' Fees", fees_node.find('findersFees')))
if (clarification := get_text(fees_node, 'clarificationOfResponse')) != "—":
parts.append(f"\n**Clarification of Response (if Necessary):** {clarification}")
parts.append("\n### 16. Use of Proceeds\n")
proceeds_node = offering.find('useOfProceeds')
if proceeds_node:
gross_proceeds_node = proceeds_node.find('grossProceedsUsed')
amount_str = safe_format_dollar(get_text(gross_proceeds_node, 'dollarAmount'))
is_estimate = get_text(gross_proceeds_node, 'isEstimate').lower() in ['true', 'y']
estimate_text = " [x] Estimate" if is_estimate else ""
parts.append(f"Provide the amount of the gross proceeds of the offering that has been or is proposed to be used for payments to any of the persons required to be named as executive officers, directors or promoters: **{amount_str} USD{estimate_text}**")
if (clarification := get_text(proceeds_node, 'clarificationOfResponse')) != "—":
parts.append(f"**Clarification of Response (if necessary):** {clarification}")
else:
parts.append("Provide the amount of the gross proceeds...: —")
parts.append("\n### Signature and Submission\n")
parts.append("Please verify the information you have entered and review the Terms of Submission below before signing and clicking SUBMIT below to file this notice.")
terms_of_submission = [
"**Terms of Submission**",
"In submitting this notice, each issuer named above is:",
"* Notifying the SEC and/or each State in which this notice is filed of the offering of securities described and undertaking to furnish them, upon written request, in the accordance with applicable law, the information furnished to offerees.",
textwrap.fill("* Irrevocably appointing each of the Secretary of the SEC and, the Securities Administrator or other legally designated officer of the State in which the issuer maintains its principal place of business and any State in which this notice is filed, as its agents for service of process, and agreeing that these persons may accept service on its behalf, of any notice, process or pleading, and further agreeing that such service may be made by registered or certified mail, in any Federal or state action, administrative proceeding, or arbitration brought against the issuer in any place subject to the jurisdiction of the United States, if the action, proceeding or arbitration (a) arises out of any activity in connection with the offering of securities that is the subject of this notice, and (b) is founded, directly or indirectly, upon the provisions of: (i) the Securities Act of 1933, the Securities Exchange Act of 1934, the Trust Indenture Act of 1939, the Investment Company Act of 1940, or the Investment Advisers Act of 1940, or any rule or regulation under any of these statutes, or (ii) the laws of the State in which the issuer maintains its principal place of business or any State in which this notice is filed.", width=120),
"* Certifying that, if the issuer is claiming a Regulation D exemption for the offering, the issuer is not disqualified from relying on Rule 504 or Rule 506 for one of the reasons stated in Rule 504(b)(3) or Rule 506(d)."
]
parts.extend(terms_of_submission)
parts.append("\nEach issuer identified above has read this notice, knows the contents to be true, and has duly caused this notice to be signed on its behalf by the undersigned duly authorized person.")
parts.append("For signature, type in the signer's name or other letters or characters adopted or authorized as the signer's signature.")
sig = offering.find('signatureBlock').find('signature')
sig_df = pd.DataFrame([{
"Issuer": get_text(sig, 'issuerName'),
"Signature": get_text(sig, 'signatureName'),
"Name of Signer": get_text(sig, 'nameOfSigner'),
"Title": get_text(sig, 'signatureTitle'),
"Date": get_text(sig, 'signatureDate'),
}])
parts.append(to_compact_markdown(sig_df, index=False))
parts.append("\n*Persons who respond to the collection of information contained in this form are not required to respond unless the form displays a currently valid OMB number.*")
return "\n\n".join(parts)
def parse_form_n_mfp2_xml(xml: BeautifulSoup, class_name_map: dict = None) -> str:
"""
Parses an XML-based Form N-MFP2 (Monthly Schedule of Portfolio Holdings
of Money Market Funds) into a comprehensive, structured Markdown document.
This version captures all available data points for maximum detail.
"""
def get_text(node, tag, strip_ns=True):
if not node: return "—"
found = node.find(re.compile(f'^{tag}$', re.I)) or node.find(re.compile(f'(?:\\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_val(value_str: str, type_hint: str = 'string') -> str:
"""Robustly formats values based on their intended type."""
if not value_str or value_str.lower() in ('—', 'n/a', 'na'): return "—"
try:
val_float = float(value_str.replace(',', ''))
if type_hint == 'dollar': return f"${val_float:.2f}"
if type_hint == 'percent': return f"{val_float * 100:.2f}%"
if type_hint == 'shares': return f"{val_float:.4f}"
if type_hint == 'yield': return f"{val_float * 100:.4f}%"
if type_hint == 'number': return f"{val_float:.2f}"
except (ValueError, TypeError):
pass
if value_str.upper() == 'Y': return "Yes"
if value_str.upper() == 'N': return "No"
return value_str
parts = ["# Form N-MFP2: Monthly Schedule of Portfolio Holdings"]
filer_info_section = []
header_data = xml.find('headerData')
if header_data:
submission_type = get_text(header_data, 'submissionType')
filer_info_section.append(f"**Submission Type:** {submission_type}")
filer_creds = header_data.find('filerCredentials')
if filer_creds:
filer_info_section.append(f"**CIK:** {get_text(filer_creds, 'cik')}")
filer_info_section.append(f"**CCC:** {get_text(filer_creds, 'ccc')}")
parts.append("## N-MFP: Filer Information\n" + "\n".join(filer_info_section))
form_data = xml.find('formData')
gen_info = form_data.find('generalInfo')
filing_info_section = [f"### General Information"]
gen_data = {
"Report for (YYYY-MM-DD)": get_text(gen_info, 'reportDate'),
"CIK Number of Registrant": get_text(gen_info, 'cik'),
"LEI of Registrant": get_text(gen_info, 'registrantLEIId'),
"EDGAR Series Identifier": get_text(gen_info, 'seriesId'),
"Total number of share classes in the series": get_text(gen_info, 'totalShareClassesInSeries'),
"Is this the fund's final filing on Form N- MFP?": format_val(get_text(gen_info, 'finalFilingFlag')),
"Has the fund acquired or merged with another fund during the reporting period?": format_val(get_text(gen_info, 'fundAcqrdOrMrgdWthAnthrFlag')),
}
for key, val in gen_data.items():
filing_info_section.append(f"**{key}:** {val}")
parts.append("\n\n".join(filing_info_section))
series_info = form_data.find('seriesLevelInfo')
parts.append("\n## Part A: Series-Level Information about the Fund")
service_providers = []
adviser_node = series_info.find("adviser")
if adviser_node and adviser_node.get_text(strip=True):
service_providers.append({"Item": "A.2", "Role": "Investment Adviser", "Details": get_text(adviser_node, 'adviserName'), "File/CIK Number": get_text(adviser_node, 'adviserFileNumber')})
sub_adviser_node = series_info.find("subAdviser")
if sub_adviser_node and sub_adviser_node.get_text(strip=True):
service_providers.append({"Item": "A.3", "Role": "Sub-Adviser", "Details": get_text(sub_adviser_node, 'adviserName'), "File/CIK Number": get_text(sub_adviser_node, 'adviserFileNumber')})
accountant_node = series_info.find('indpPubAccountant')
if accountant_node and accountant_node.get_text(strip=True):
acc_details = f"{get_text(accountant_node, 'name')} City: {get_text(accountant_node, 'city')} State: {get_text(accountant_node, 'stateCountry')}"
service_providers.append({"Item": "A.4", "Role": "Independent Public Accountant", "Details": acc_details, "File/CIK Number": "—"})
admin_node = series_info.find('administrator')
if admin_node and admin_node.get_text(strip=True):
service_providers.append({"Item": "A.5", "Role": "Administrator", "Details": get_text(admin_node, 'administratorName'), "File/CIK Number": "—"})
transfer_agent_node = series_info.find('transferAgent')
if transfer_agent_node and transfer_agent_node.get_text(strip=True):
ta_details = f"{get_text(transfer_agent_node, 'name')} CIK: {get_text(transfer_agent_node, 'cik')}"
service_providers.append({"Item": "A.6", "Role": "Transfer Agent", "Details": ta_details, "File/CIK Number": get_text(transfer_agent_node, 'fileNumber')})
if service_providers:
parts.append("\n### Service Providers\n" + to_compact_markdown(pd.DataFrame(service_providers), index=False))
fund_chars = {
"A.1 - Securities Act File Number": get_text(series_info, 'securitiesActFileNumber'),
"A.7 - Is this a Feeder Fund?": format_val(get_text(series_info, 'feederFundFlag')),
"A.8 - Is this a Master Fund?": format_val(get_text(series_info, 'masterFundFlag')),
"A.9 - Is this series primarily used to fund insurance company separate accounts?": format_val(get_text(series_info, 'seriesFundInsuCmpnySepAccntFlag')),
"A.10 - Money Market Fund Category": get_text(series_info, 'moneyMarketFundCategory'),
"A.10.a - Is this fund an exempt retail fund?": format_val(get_text(series_info, 'fundExemptRetailFlag')),
"A.11 - WAM": f"{get_text(series_info, 'averagePortfolioMaturity')} days",
"A.12 - WAL": f"{get_text(series_info, 'averageLifeMaturity')} days",
"Does the fund apply liquidity fees?": format_val(get_text(series_info, 'liquidityFeeFundApplyFlag')),
"Total Value of Portfolio Securities": format_val(get_text(series_info, 'totalValuePortfolioSecurities'), 'dollar'),
"Amortized Cost of Portfolio Securities": format_val(get_text(series_info, 'amortizedCostPortfolioSecurities'), 'dollar'),
"Cash": format_val(get_text(series_info, 'cash'), 'dollar'),
"Total Other Assets": format_val(get_text(series_info, 'totalValueOtherAssets'), 'dollar'),
"Total Liabilities": format_val(get_text(series_info, 'totalValueLiabilities'), 'dollar'),
"Net Assets of Series": format_val(get_text(series_info, 'netAssetOfSeries'), 'dollar'),
"Number of Shares Outstanding (Series)": format_val(get_text(series_info, 'numberOfSharesOutstanding'), 'number'),
"Stable Price Per Share": format_val(get_text(series_info, 'stablePricePerShare'), 'dollar'),
"7-Day Gross Yield": format_val(get_text(series_info, 'sevenDayGrossYield'), 'yield')
}
parts.append("\n### Fund Characteristics & Assets")
for key, val in fund_chars.items():
if val not in ("—", " days"): parts.append(f"- **{key}:** {val}")
liquid_data = []
daily_assets_node = series_info.find('totalValueDailyLiquidAssets')
if daily_assets_node:
for i in range(1, 6):
day_tag = f'fridayDay{i}'
if get_text(daily_assets_node, day_tag) != "—":
liquid_data.append({
"Period": f"Friday, Week {i}",
"Daily Liquid Assets ($)": format_val(get_text(series_info.find('totalValueDailyLiquidAssets'), f'fridayDay{i}'), 'dollar'),
"Weekly Liquid Assets ($)": format_val(get_text(series_info.find('totalValueWeeklyLiquidAssets'), f'fridayWeek{i}'), 'dollar'),
"Daily Liquid Assets (%)": format_val(get_text(series_info.find('percentageDailyLiquidAssets'), f'fridayDay{i}'), 'percent'),
"Weekly Liquid Assets (%)": format_val(get_text(series_info.find('percentageWeeklyLiquidAssets'), f'fridayWeek{i}'), 'percent'),
})
if liquid_data:
parts.append("\n### A.13 - Weekly Liquid Assets\n" + to_compact_markdown(pd.DataFrame(liquid_data), index=False))
series_level_nav_data = []
nav_node = series_info.find('netAssetValue')
if nav_node:
for week_node in nav_node.find_all(re.compile(r'^(?:\w+:)?fridayWeek\d+$', re.I)):
if week_node.text.strip():
week_number_match = re.search(r'(\d+)$', week_node.name)
week_number = week_number_match.group(1) if week_number_match else '?'
series_level_nav_data.append({
"Period": f"Friday, Week {week_number}",
"Net Asset Value Per Share": format_val(week_node.text, 'shares')
})
if series_level_nav_data:
parts.append("\n### A.23 - Weekly Net Asset Value Per Share (Series-Level)\n" + to_compact_markdown(pd.DataFrame(series_level_nav_data), index=False))
class_level_nodes = form_data.find_all('classLevelInfo')
if class_level_nodes:
parts.append("\n## Part B: Class-Level Information about the Fund")
if class_name_map is None:
class_name_map = {}
for i, node in enumerate(class_level_nodes):
class_id = get_text(node, 'classesId')
class_name = class_name_map.get(class_id, f"Unknown Class ({class_id})")
parts.append(f"\n### Class: {class_name}")
class_details = {
"B.2 - Minimum Initial Investment": format_val(get_text(node, 'minInitialInvestment'), 'dollar'),
"B.3 - Net Assets of Class": format_val(get_text(node, 'netAssetsOfClass'), 'dollar'),
"B.4 - Shares Outstanding": format_val(get_text(node, 'numberOfSharesOutstanding'), 'number'),
"B.7.7 - 7-Day Net Yield": format_val(get_text(node, 'sevenDayNetYield'), 'yield'),
"B.8 - Person Paying for Fund Expenses?": format_val(get_text(node, 'personPayForFundFlag')),
}
if get_text(node, 'personPayForFundFlag').upper() == 'Y':
class_details["Expense Reimbursement/Waiver Description"] = get_text(node, 'nameOfPersonDescExpensePay')
for key, val in class_details.items():
if val != "—": parts.append(f"- **{key}:** {val}")
weekly_flow_data = []
nav_per_share_node = node.find('netAssetPerShare')
for week_node in node.find_all(re.compile(r'^(?:\w+:)?fridayWeek\d+$', re.I), recursive=False):
week_number_match = re.search(r'(\d+)$', week_node.name)
if not week_number_match:
continue
week_number = week_number_match.group(1)
subs = get_text(week_node, 'weeklyGrossSubscriptions')
reds = get_text(week_node, 'weeklyGrossRedemptions')
nav_per_share = "—"
if nav_per_share_node:
nav_week_tag = nav_per_share_node.find(re.compile(rf'^(?:\w+:)?fridayWeek{week_number}$', re.I))
if nav_week_tag:
nav_per_share = nav_week_tag.text.strip()
try: subs_val = float(subs)
except (ValueError, TypeError): subs_val = 0.0
try: reds_val = float(reds)
except (ValueError, TypeError): reds_val = 0.0
if subs_val > 0 or reds_val > 0 or nav_per_share != "—":
weekly_flow_data.append({
"Period": f"Week {week_number}",
"B.5 - Net Asset Value Per Share": format_val(nav_per_share, 'shares'),
"B.6 - Gross Subscriptions ($)": format_val(subs, 'dollar'),
"B.6 - Gross Redemptions ($)": format_val(reds, 'dollar'),
})
if weekly_flow_data:
parts.append("\n**Weekly Flows and NAV**\n" + to_compact_markdown(pd.DataFrame(weekly_flow_data), index=False))
total_node = node.find("totalForTheMonthReported")
if total_node:
parts.append("\n**Total for the month reported:**")
parts.append(f"- **Total Gross Subscriptions:** {format_val(get_text(total_node, 'weeklyGrossSubscriptions'), 'dollar')}")
parts.append(f"- **Total Gross Redemptions:** {format_val(get_text(total_node, 'weeklyGrossRedemptions'), 'dollar')}")
securities_nodes = form_data.find_all('scheduleOfPortfolioSecuritiesInfo')
if securities_nodes:
parts.append("\n## Part C: Schedule of Portfolio Securities")
for i, node in enumerate(securities_nodes):
parts.append(f"\n### Security {i+1}: {get_text(node, 'nameOfIssuer')}")
security_details = [
f"**C.1 - Title:** {get_text(node, 'titleOfIssuer')}",
f"**C.6 - Investment Category:** {get_text(node, 'investmentCategory')}",
]
id_data = { "C.3 - CUSIP": get_text(node, 'CUSIPMember'), "C.4 - ISIN": get_text(node, 'ISINId'), "C.3 - LEI": get_text(node, 'LEIID'), "C.5 - Other ID": get_text(node, 'otherUniqueId')}
id_str = ", ".join([f"{k}: {v}" for k, v in id_data.items() if v != "—"])
if id_str: security_details.append(f"**Identifiers:** {id_str}")
security_details.extend([
f"**C.18 - Value (incl. sponsor support):** {format_val(get_text(node, 'includingValueOfAnySponsorSupport'), 'dollar')}",
f"**C.18.a - Value (excl. sponsor support):** {format_val(get_text(node, 'excludingValueOfAnySponsorSupport'), 'dollar')}",
f"**C.19 - Percentage of Net Assets:** {format_val(get_text(node, 'percentageOfMoneyMarketFundNetAssets'), 'percent')}",
f"**C.17 - Yield as of Reporting Date:** {format_val(get_text(node, 'yieldOfTheSecurityAsOfReportingDate'), 'yield')}",
f"**C.11 - Maturity Date (WAM):** {get_text(node, 'investmentMaturityDateWAM')}",
f"**C.12 - Maturity Date (WAL):** {get_text(node, 'investmentMaturityDateWAL')}",
f"**C.13 - Final Legal Maturity Date:** {get_text(node, 'finalLegalInvestmentMaturityDate')}",
])
ratings = [f"{get_text(n, 'nameOfNRSRO')}: {get_text(n, 'rating')}" for n in node.find_all('NRSRO')]
if ratings: security_details.append(f"**C.10 - Ratings:** {'; '.join(ratings)}")
flags = {
"C.9 Eligible Security?": format_val(get_text(node, 'securityEligibilityFlag')),
"C.14 Has Demand Feature?": format_val(get_text(node, 'securityDemandFeatureFlag')),
"C.15 Has Guarantee?": format_val(get_text(node, 'securityGuaranteeFlag')),
"C.16 Has Enhancement?": format_val(get_text(node, 'securityEnhancementsFlag')),
"C.22 Is an Illiquid Security?": format_val(get_text(node, 'illiquidSecurityFlag')),
"C.20 Is a Daily Liquid Asset?": format_val(get_text(node, 'dailyLiquidAssetSecurityFlag')),
"C.21 Is a Weekly Liquid Asset?": format_val(get_text(node, 'weeklyLiquidAssetSecurityFlag')),
"C.23 Categorized at Level 3?": format_val(get_text(node, 'securityCategorizedAtLevel3Flag')),
}
flag_str = ", ".join([f"{k} {v}" for k, v in flags.items() if v != "—"])
if flag_str: security_details.append(f"**Characteristics:** {flag_str}")
parts.append("\n".join(f"- {item}" for item in security_details))
demand_feature_node = node.find('demandFeature')
if demand_feature_node and demand_feature_node.get_text(strip=True):
parts.append("\n**C.14.a - Demand Feature Details:**")
feature_details = {
"Issuer": get_text(demand_feature_node, 'identityOfDemandFeatureIssuer'),
"Amount Provided": get_text(demand_feature_node, 'amountProvidedByDemandFeatureIssuer'),
"Remaining Period": f"{get_text(demand_feature_node, 'remainingPeriodDemandFeature')} days",
"Is Conditional?": format_val(get_text(demand_feature_node, 'demandFeatureConditionalFlag')),
}
for key, val in feature_details.items():
if val != "—": parts.append(f"- **{key}:** {val}")
ratings = [f"{get_text(n, 'nameOfNRSRO')}: {get_text(n, 'rating')}" for n in demand_feature_node.find_all('demandFeatureRatingOrNRSRO')]
if ratings: parts.append(f"- **Ratings:** {'; '.join(ratings)}")
guarantor_node = node.find('guarantor')
if guarantor_node and guarantor_node.get_text(strip=True):
parts.append("\n**C.15.a - Guarantor Details:**")
guarantor_details = {
"Identity of Guarantor": get_text(guarantor_node, 'identityOfTheGuarantor'),
"Amount Provided": get_text(guarantor_node, 'amountProvidedByGuarantor'),
}
for key, val in guarantor_details.items():
if val != "—": parts.append(f"- **{key}:** {val}")
ratings = [f"{get_text(n, 'nameOfNRSRO')}: {get_text(n, 'rating')}" for n in guarantor_node.find_all('guarantorRatingOrNRSRO')]
if ratings: parts.append(f"- **Ratings:** {'; '.join(ratings)}")
enhancement_node = node.find('enhancementProvider')
if enhancement_node and enhancement_node.get_text(strip=True):
parts.append("\n**C.16.a - Enhancement Details:**")
enhancement_details = {
"Identity of Provider": get_text(enhancement_node, 'identityOfTheEnhancementProvider'),
"Type of Enhancement": get_text(enhancement_node, 'typeOfEnhancement'),
"Amount Provided": get_text(enhancement_node, 'amountProvidedByEnhancement'),
}
for key, val in enhancement_details.items():
if val != "—": parts.append(f"- **{key}:** {val}")
ratings = [f"{get_text(n, 'nameOfNRSRO')}: {get_text(n, 'rating')}" for n in enhancement_node.find_all('enhancementRatingOrNRSRO')]
if ratings: parts.append(f"- **Ratings:** {'; '.join(ratings)}")
repo_node = node.find('repurchaseAgreement')
if repo_node and repo_node.get_text(strip=True):
parts.append("\n**C.8 - Repurchase Agreement Details:**")
parts.append(f"- **Is Open?:** {format_val(get_text(repo_node, 'repurchaseAgreementOpenFlag'))}")
collateral_issuers = repo_node.find_all('collateralIssuers')
if collateral_issuers:
collateral_data = []
for issuer in collateral_issuers:
coupon_yield_str = get_text(issuer, 'couponOrYield')
try:
coupon_yield_formatted = f"{float(coupon_yield_str):.4f}%"
except (ValueError, TypeError):
coupon_yield_formatted = coupon_yield_str
collateral_data.append({
"Issuer Name": get_text(issuer, 'nameOfCollateralIssuer'),
"Maturity Date": get_text(issuer.find('maturityDate'), 'date'),
"Coupon/Yield": coupon_yield_formatted,
"Principal Amount": format_val(get_text(issuer, 'principalAmountToTheNearestCent'), 'dollar'),
"Collateral Value": format_val(get_text(issuer, 'valueOfCollateralToTheNearestCent'), 'dollar'),
"Category": get_text(issuer, 'ctgryInvestmentsRprsntsCollateral'),
})
parts.append("\n**Collateral:**\n" + to_compact_markdown(pd.DataFrame(collateral_data), index=False))
sig = form_data.find('signature')
if sig:
parts.append("\n## N-MFP: Signatures")
parts.append(f"**Registrant:** {get_text(sig, 'registrant')}")
parts.append(f"**Date:** {get_text(sig, 'signatureDate')}")
parts.append(f"**By:** {get_text(sig, 'signature')}")
parts.append(f"**Name of Signing Officer:** {get_text(sig, 'nameOfSigningOfficer')}")
parts.append(f"**Title of Signing Officer:** {get_text(sig, 'titleOfSigningOfficer')}")
return "\n\n".join(parts)
def parse_form497_file(header_content: str) -> str:
"""
Top-level parser for Form 497 content. It parses the general metadata
and the specific series/class SGML data from within the header.
"""
header_part = parse_sec_header(header_content)
sgml_part = parse_form497_sgml(header_content)
return f"{header_part}\n\n{sgml_part}".strip()
def parse_form497_sgml(header_content: str) -> str:
"""
Parses the SGML content from within a Form 497 header using a robust
regex-based approach to handle the malformed/unclosed tags correctly.
"""
sgml_match = re.search(
r"(.*?) ",
header_content,
re.S | re.I
)
if not sgml_match:
return "NO SERIES-AND-CLASSES-CONTRACTS-DATA BLOCK FOUND"
sgml_content = sgml_match.group(1)
md_parts = ["## Series and Classes Contracts Data"]
series_blocks = re.split(r'', sgml_content, flags=re.I)[1:]
if not series_blocks:
return ""
for series_block in series_blocks:
series_name_match = re.search(r'\s*([^\n<]+)', series_block, re.I)
series_id_match = re.search(r'\s*([^\n<]+)', series_block, re.I)
series_name = series_name_match.group(1).strip() if series_name_match else "—"
series_id = series_id_match.group(1).strip() if series_id_match else "—"
md_parts.append(f"\n### {series_name} (Series ID: {series_id})")
class_records = []
class_contract_blocks = re.findall(
r'(.*?)(?=||$)',
series_block,
re.S | re.I
)
for class_block in class_contract_blocks:
id_match = re.search(r'\s*([^\n<]+)', class_block, re.I)
name_match = re.search(r'\s*([^\n<]+)', class_block, re.I)
ticker_match = re.search(r'\s*([^\n<]+)', class_block, re.I)
record = {
'Class ID': id_match.group(1).strip() if id_match else "—",
'Class Name': name_match.group(1).strip() if name_match else "—",
'Ticker Symbol': ticker_match.group(1).strip() if ticker_match else "—",
}
class_records.append(record)
if class_records:
df = pd.DataFrame(class_records, columns=['Class Name', 'Ticker Symbol', 'Class ID'])
md_table = df_to_markdown(df, is_clean=True)
md_parts.append("---\n")
md_parts.append(md_table)
md_parts.append("\n---")
final_md = "\n".join(md_parts).strip()
return final_md
def parse_form_n_cen_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Form N-CEN into a structured Markdown document.
"""
def get_text(node, tag, strip_ns=True):
if not node: return "—"
found = node.find(re.compile(f'^{tag}$', re.I)) or node.find(re.compile(f'(?:\\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_val(value_str: str, type_hint: str = 'string') -> str:
"""Robustly formats values based on their intended type."""
if not value_str or value_str.lower() in ('—', 'n/a', 'na'): return "—"
try:
val_float = float(value_str.replace(',', ''))
if type_hint == 'dollar': return f"${val_float:,.2f}"
if type_hint == 'percent': return f"{val_float:.2f}%"
if type_hint == 'shares': return f"{val_float:.4f}"
if type_hint == 'number': return f"{val_float:,.2f}"
except (ValueError, TypeError):
pass
if value_str.upper() == 'Y' or value_str.lower() == 'true': return "Yes"
if value_str.upper() == 'N' or value_str.lower() == 'false': return "No"
return value_str
ORGANIZATION_TYPES = {
"N-1A": "a. Open-end management investment company registered under the Act on Form N-1A",
"N-2": "b. Closed-end management investment company registered under the Act on Form N-2",
"N-3": "c. Separate account offering variable annuity contracts which is registered under the Act as a management investment company on Form N-3",
"N-4": "d. Separate account offering variable annuity contracts which is registered under the Act as a unit investment trust on Form N-4",
"N-5": "e. Small business investment company registered under the Act on Form N-5",
"N-6": "f. Separate account offering variable insurance contracts which is registered under the Act as a unit investment trust on Form N-6",
"N-8B-2": "g. Unit investment trust registered under the Act on Form N-8B-2"
}
FUND_TYPES_MAP = [
("Exchange-Traded Fund or Exchange-Traded Managed Fund or offers a Class that itself is an Exchange-Traded Fund or Exchange-Traded Managed Fund", None),
("Exchange-Traded Fund", "i."),
("Exchange-Traded Managed Fund", "ii."),
("Index Fund", "b."),
("Seeks to achieve performance results that are a multiple of a benchmark, the inverse of a benchmark, or a multiple of the inverse of a benchmark", "c."),
("Interval Fund", "d."),
("Fund of Funds", "e."),
("Master-Feeder Fund", "f."),
("Money Market Fund", "g."),
("Target Date Fund", "h."),
("Underlying fund to a variable annuity or variable life insurance contract", "i."),
("N/A", None)
]
parts = ["# Form N-CEN: Annual Report for Registered Investment Companies"]
header_data = xml.find('headerData')
if header_data:
filer_info_section = [f"**Submission Type:** {get_text(header_data, 'submissionType')}"]
filer_creds = header_data.find('filer')
if filer_creds:
filer_info_section.append(f"**CIK:** {get_text(filer_creds, 'cik')}")
parts.append("## N-CEN: Filer Information\n" + "\n".join(filer_info_section))
parts.append("\n## N-CEN: Series/Class (Contract) Information")
form_data = xml.find('formData')
if form_data and (sc_info_form := form_data.find('seriesClass')):
all_flag_node = sc_info_form.find('rptIncludeAllSeriesFlag')
if all_flag_node is not None:
include_all = all_flag_node.text.lower() == 'true'
checkbox = '[x]' if include_all else '[ ]'
parts.append(f"**Report includes all Series and Classes?:** {checkbox}")
if header_data and (sc_info_header := header_data.find('seriesClass')):
records = sc_info_header.find_all('rptSeriesClassInfo')
series_records = []
class_records = []
for record in records:
series_id = get_text(record, 'seriesId')
if series_id != "—":
series_records.append(series_id)
for class_info in record.find_all('classInfo'):
class_id = get_text(class_info, 'classId')
if class_id != "—":
class_records.append(class_id)
if series_records:
for i, s_id in enumerate(series_records, 1):
parts.append(f"\n**Series ID Record:{i}**\n- **Series ID:** {s_id}")
if class_records:
for i, c_id in enumerate(class_records, 1):
parts.append(f"\n**Class ID Record:{i}**\n- **Class ID:** {c_id}")
if not form_data:
return "\n\n".join(parts)
gen_info = form_data.find('generalInfo')
if gen_info:
filing_info_section = [f"### N-CEN: Part A: General Information"]
report_period = gen_info.get('reportEndingPeriod', '—')
is_lt_12 = gen_info.get('isReportPeriodLt12', '—')
gen_data = {
"Item A.1.a - Report for period ending": report_period,
"Item A.1.b - Does this report cover a period of less than 12 months?": format_val(is_lt_12),
}
for key, val in gen_data.items():
filing_info_section.append(f"**{key}:** {val}")
parts.append("\n\n".join(filing_info_section))
reg_info = form_data.find('registrantInfo')
websites_node = reg_info.find('websites')
website_node = websites_node.find('website') if websites_node else None
if reg_info:
parts.append("\n## N-CEN: Part B: Information About the Registrant")
b1_b2_details = {
"Item B.1.a - Full name of Registrant": get_text(reg_info, 'registrantFullName'),
"Item B.1.b - Investment Company Act file number": get_text(reg_info, 'investmentCompFileNo'),
"Item B.1.c - CIK": get_text(reg_info, 'registrantCik'),
"Item B.1.d - LEI": get_text(reg_info, 'registrantLei'),
"Item B.2.a - Street 1": get_text(reg_info, 'registrantstreet1'),
"Item B.2.a - Street 2": get_text(reg_info, 'registrantstreet2'),
"Item B.2.b - City": get_text(reg_info, 'registrantcity'),
"Item B.2.c - State": get_text(reg_info, 'registrantstate').replace('US-', ''),
"Item B.2.e - Zip Code": get_text(reg_info, 'registrantzipCode'),
"Item B.2.f - Telephone": get_text(reg_info, 'registrantphoneNumber'),
"Item B.2.g - Public Website": website_node.get('webpage', '—') if website_node else '—',
}
for key, val in b1_b2_details.items():
if val and val.strip() != "—": parts.append(f"- **{key}:** {val}")
locations = reg_info.find_all('locationBooksRecord')
if locations:
parts.append("\n### Item B.3 - Location of books and records")
for i, loc in enumerate(locations, 1):
parts.append(f"\n**Location books Record: {i}**")
state_country_node = loc.find('officeStateCountry')
location_details = {
"a. Name of person (e.g., a custodian of records)": get_text(loc, 'officeName'),
"b. Street 1": get_text(loc, 'officeAddress1'),
"Street 2": get_text(loc, 'officeAddress2'),
"c. City": get_text(loc, 'officeCity'),
"d. State, if applicable": (state_country_node.get('officeState', '—') if state_country_node else '—').replace('US-', ''),
"e. Foreign country, if applicable": state_country_node.get('officeCountry', '—') if state_country_node else '—',
"f. Zip code and zip code extension, or foreign postal code": get_text(loc, 'officeRecordsZipCode'),
"g. Telephone number": get_text(loc, 'officePhone'),
"h. Briefly describe the books and records kept at this location": get_text(loc, 'booksRecordsDesc'),
}
for key, val in location_details.items():
if val and val.strip() != "—":
key_formatted = key.replace(" ", " ")
parts.append(f"- **{key_formatted}:** {val}")
family_inv_comp_node = reg_info.find('registrantFamilyInvComp')
b4_b5_details = {
"Item B.4.a - Is this the first filing by the Registrant?": format_val(get_text(reg_info, 'isRegistrantFirstFiling')),
"Item B.4.b - Is this the last filing by the Registrant?": format_val(get_text(reg_info, 'isRegistrantLastFiling')),
"Item B.5.a - Is the Registrant part of a family of investment companies?":
format_val(family_inv_comp_node.get('isRegistrantFamilyInvComp') if family_inv_comp_node else "—"),
"Item B.5.a.i - Full name of family of investment companies":
family_inv_comp_node.get('familyInvCompFullName', '—') if family_inv_comp_node else "—",
}
for key, val in b4_b5_details.items():
if val and val.strip() != "—": parts.append(f"- **{key}:** {val}")
parts.append("\n### Item B.6 - Organization")
classification_type = get_text(reg_info, 'registrantClassificationType')
for code, description in ORGANIZATION_TYPES.items():
checkbox = '[x]' if code == classification_type else '[ ]'
parts.append(f"- {checkbox} {description}")
parts.append(f"- **Item B.6.i - Total number of Series:** {get_text(reg_info, 'totalSeries')}")
parts.append(f"- **Item B.7 - Is the Registrant the issuer of a class of securities registered under the Securities Act?:** {format_val(get_text(reg_info, 'isSecuritiesActRegistration'))}")
directors = reg_info.find_all('director')
if directors:
parts.append("\n### Item B.8 - Directors")
dir_data = []
for d in directors:
file_nums = ", ".join(fn.get('fileNumber') for fn in d.find_all('fileNumberInfo'))
dir_data.append({
"Name": get_text(d, 'directorName'),
"Is Interested Person?": format_val(get_text(d, 'isDirectorInterestedPerson')),
"Other Investment Company File Numbers": file_nums if file_nums else "N/A"
})
parts.append(to_compact_markdown(pd.DataFrame(dir_data), index=False))
chief_compliance_officers = reg_info.find_all('chiefComplianceOfficer')
parts.append("\n### Item B.9. Chief compliance officer.")
if chief_compliance_officers:
for i, cco in enumerate(chief_compliance_officers, 1):
parts.append(f"\n**Chief compliance officer Record: {i}**")
state_country_node = cco.find('ccoStateCountry')
cco_details = {
"a. Full Name": get_text(cco, 'ccoName'),
"b. CRD Number, if any": get_text(cco, 'crdNumber'),
"c. Street Address 1": get_text(cco, 'ccoStreet1'),
" Street Address 2": get_text(cco, 'ccoStreet2'),
"d. City": get_text(cco, 'ccoCity'),
"e. State, if applicable": (state_country_node.get('ccoState', '—') if state_country_node else '—').replace('US-', ''),
"f. Foreign country, if applicable": state_country_node.get('ccoCountry', '—') if state_country_node else '—',
"g. Zip code": get_text(cco, 'ccoZipCode'),
"h. Telephone number": get_text(cco, 'ccoPhone'),
"i. Has the chief compliance officer changed since the last filing?": format_val(get_text(cco, 'isCcoChangedSinceLastFiling')),
}
for key, val in cco_details.items():
if val and val.strip() != "—":
key_formatted = key.replace(" ", " ")
parts.append(f"- **{key_formatted}:** {val}")
employers = cco.find_all('ccoEmployer')
if employers:
parts.append("\nIf the chief compliance officer is compensated or employed by any person other than the Registrant, provide:")
for j, emp in enumerate(employers, 1):
parts.append(f"**CCO employer Record: {j}**")
parts.append(f"- **i. Name of the person:** {get_text(emp, 'ccoEmployerName')}")
parts.append(f"- **ii. Person’s IRS Employer Identification Number:** {get_text(emp, 'ccoEmployerId')}")
else:
parts.append("No Chief Compliance Officer reported.")
parts.append("\n### Item B.10. Matters for security holder vote.")
submitted_matter_val = get_text(reg_info, 'isRegistrantSubmittedMatter')
parts.append(f"- **Were any matters submitted by the Registrant for its security holders’ vote during the reporting period?** {format_val(submitted_matter_val)}")
security_matter_node = reg_info.find('securityMatterSeriesInfo')
if security_matter_node:
series_infos = security_matter_node.find_all('seriesInfo')
if series_infos:
series_data = [{"Series Name": s.get('seriesName'), "Series ID": s.get('seriesId')} for s in series_infos]
parts.append(to_compact_markdown(pd.DataFrame(series_data), index=False))
covered_by_insurance_node = reg_info.find('coveredByInsurancePolicy')
b11_b15_details = {
"Item B.11.a - Have there been any material legal proceedings?": format_val(get_text(reg_info, 'isPreviousLegalProceeding')),
"Item B.11.b - Has any proceeding previously reported been terminated?": format_val(get_text(reg_info, 'isPreviousProceedingTerminated')),
"Item B.12.a - Were any claims with respect to the Registrant filed under a fidelity bond?": format_val(get_text(reg_info, 'isClaimFiled')),
"Item B.13.a - Are the Registrant's officers or directors covered under any insurance policy?": \
format_val(covered_by_insurance_node.get('isCoveredByInsurancePolicy') if covered_by_insurance_node else "—"),
"Item B.13.a.i - If yes, were any claims filed under the policy during the reporting period?": \
format_val(covered_by_insurance_node.get('isClaimFiledDuringPeriod') if covered_by_insurance_node else "—"),
"Item B.14 - Did an affiliated person provide any form of financial support to the Registrant?": format_val(get_text(reg_info, 'isFinancialSupportDuringPeriod')),
"Item B.15.a - Did the Registrant rely on any exemptive orders from the Commission?": format_val(get_text(reg_info, 'isExemptionFromAct')),
}
for key, val in b11_b15_details.items():
if val and val.strip() != "—": parts.append(f"- **{key}:** {val}")
release_numbers = reg_info.find_all('releaseNumberInfo')
if release_numbers:
release_list = [f" - {rn.get('releaseNumber')}" for rn in release_numbers]
parts.append("- **Item B.15.a.i - Release numbers:**\n" + "\n".join(release_list))
underwriters = reg_info.find_all('principalUnderwriter')
parts.append("\n### Item B.16. Principal underwriters.")
if underwriters:
for i, uw in enumerate(underwriters, 1):
parts.append(f"\n**Principal underwriter Record: {i}**")
state_country_node = uw.find('principalUnderWriterStateCountry')
uw_details = {
"i. Full name": get_text(uw, 'principalUnderwriterName'),
"ii. SEC file number": get_text(uw, 'principalUnderwriterFileNumber'),
"iii. CRD number": get_text(uw, 'principalUnderwriterCrdNumber'),
"iv. LEI, if any": get_text(uw, 'principalUnderwriterLei'),
"v. State, if applicable": (state_country_node.get('principalUnderWriterState', '—') if state_country_node else '—').replace('US-', ''),
"vi. Foreign country, if applicable": state_country_node.get('principalUnderWriterCountry', '—') if state_country_node else '—',
"vii. Is the principal underwriter an affiliated person...?": format_val(get_text(uw, 'isPrincipalUnderwriterAffiliatedWithRegistrant')),
}
for key, val in uw_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}:** {val}")
parts.append(f"- **b. Have any principal underwriters been hired or terminated during the reporting period?** {format_val(get_text(reg_info, 'isUnderwriterHiredOrTerminated'))}")
else:
parts.append("No Principal Underwriters reported.")
accountants = reg_info.find_all('publicAccountant')
parts.append("\n### Item B.17. Independent public accountant.")
if accountants:
for i, acc in enumerate(accountants, 1):
parts.append(f"\n**Public accountant Record: {i}**")
state_country_node = acc.find('publicAccountantStateCountry')
acc_details = {
"a. Full Name": get_text(acc, 'publicAccountantName'),
"b. PCAOB Number": get_text(acc, 'pcaobNumber'),
"c. LEI, if any": get_text(acc, 'publicAccountantLei'),
"d. State, if applicable": (state_country_node.get('publicAccountantState', '—') if state_country_node else '—').replace('US-', ''),
"e. Foreign country, if applicable": state_country_node.get('publicAccountantCountry', '—') if state_country_node else '—',
}
for key, val in acc_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}:** {val}")
parts.append(f"- **f. Has the independent public accountant changed since the last filing?** {format_val(get_text(reg_info, 'isPublicAccountantChanged'))}")
else:
parts.append("No Independent Public Accountants reported.")
b18_b23_details = {
"Item B.18 - Did an independent public accountant's report on internal control note any material weaknesses?": format_val(get_text(reg_info, 'isMaterialWeakness')),
"Item B.19 - Did an independent public accountant issue an opinion other than an unqualified opinion?": format_val(get_text(reg_info, 'isOpinionOffered')),
"Item B.20 - Have there been material changes in the method of valuation?": format_val(get_text(reg_info, 'isMaterialChange')),
"Item B.21 - Have there been any changes in accounting principles or practices?": format_val(get_text(reg_info, 'isAccountingPrincipleChange')),
"Item B.22.a - Were any payments made to shareholders as a result of an error in calculating NAV?": format_val(get_text(reg_info, 'isPaymentErrorInNetAssetValue')),
"Item B.23 - Did the Registrant pay any dividend or make any distribution required to be accompanied by a written statement?": format_val(get_text(reg_info, 'isPaymentDividend')),
}
for key, val in b18_b23_details.items():
if val and val.strip() != "—": parts.append(f"- **{key}:** {val}")
series_questions = form_data.find_all('managementInvestmentQuestion')
if series_questions:
parts.append("\n## Part C: Additional Questions for Management Investment Companies")
for i, s in enumerate(series_questions, 1):
parts.append(f"\n### Management Investment Record: {i} - {get_text(s, 'mgmtInvFundName')}")
parts.append("\n**Item C.1. Background information.**")
c1_details = {
"a. Full Name of the Fund": get_text(s, 'mgmtInvFundName'),
"b. Series identification number, if any": get_text(s, 'mgmtInvSeriesId'),
"c. LEI": get_text(s, 'mgmtInvLei'),
"d. Is this the first filing on this form by the Fund?": format_val(get_text(s, 'isFirstFilingByFund')),
}
for key, val in c1_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}:** {val}")
parts.append("\n**Item C.2. Classes of open-end management investment companies.**")
c2_details = {
"a. How many Classes of shares of the Fund (if any) are authorized?": get_text(s, 'numAuthorizedClass'),
"b. How many new Classes of shares of the Fund were added during the reporting period?": get_text(s, 'numAddedClass'),
"c. How many Classes of shares of the Fund were terminated during the reporting period?": get_text(s, 'numTerminatedClass'),
}
for key, val in c2_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}:** {val}")
outstanding_classes = s.find_all('sharesOutstanding')
if outstanding_classes:
parts.append("\n**d. For each Class with shares outstanding, provide the information requested below:**")
class_data = []
for j, c in enumerate(outstanding_classes, 1):
class_data.append({
"Shares Outstanding Record": j,
"i. Full name of Class": c.get('sharesOutstandingClassName'),
"ii. Class identification number, if any": c.get('sharesOutstandingClassId'),
"iii. Ticker symbol, if any": c.get('sharesOutstandingTickerSymbol')
})
parts.append(to_compact_markdown(pd.DataFrame(class_data), index=False))
fund_type_tags = s.find_all('fundType')
if fund_type_tags:
parts.append("\n**Item C.3. Type of fund.**")
selected_types = {ft.text.strip() for ft in fund_type_tags}
for type_desc, prefix in FUND_TYPES_MAP:
checkbox = '[x]' if type_desc in selected_types else '[ ]'
indent = " " if prefix and prefix.startswith('i') else ""
prefix_str = f"{prefix} " if prefix else ""
parts.append(f"- {indent}{checkbox} {prefix_str}{type_desc}")
parts.append(f"\n**Item C.4 - Does the Fund seek to operate as a 'non-diversified company'?** {format_val(get_text(s, 'isNonDiversifiedCompany'))}")
parts.append(f"**Item C.5 - Does the fund invest in a controlled foreign corporation?** {format_val(get_text(s, 'isForeignSubsidiary'))}")
parts.append(f"\n**Item C.6. Securities lending.**")
parts.append(f"- **a. Is the Fund authorized to engage in securities lending transactions?** {format_val(get_text(s, 'isFundSecuritiesLending'))}")
fund_lend_securities_node = s.find('fundLendSecurities')
if fund_lend_securities_node:
parts.append(f"- **b. Did the Fund lend any of its securities during the reporting period?** {format_val(fund_lend_securities_node.get('didFundLendSecurities'))}")
parts.append(f" - **i. If yes, during the reporting period, did any borrower fail to return the loaned securities by the contractual deadline with the result that:**")
parts.append(f" - **1. The Fund (or it securities lending agent) liquidated collateral pledged to secure the loaned securities?** {format_val(get_text(fund_lend_securities_node, 'isFundLiquidated'))}")
parts.append(f" - **2. The Fund was otherwise adversely impacted?** {format_val(get_text(fund_lend_securities_node, 'isFundAdverselyImpacted'))}")
security_lending_agents = s.find_all('securityLending')
if security_lending_agents:
parts.append("\n**c. Provide the information requested below about each securities lending agent, if any, retained by the Fund:**")
for k, agent in enumerate(security_lending_agents, 1):
parts.append(f"\n**Securities Lending Record: {k}**")
parts.append(f"- **i. Full name of securities lending agent:** {get_text(agent, 'securitiesAgentName')}")
parts.append(f"- **ii. LEI, if any:** {get_text(agent, 'securitiesAgentLei')}")
parts.append(f"- **iii. Is the securities lending agent an affiliated person...?** {format_val(get_text(agent, 'isSecuritiesAgentAffiliated'))}")
indemnity_node = agent.find('securityAgentIdemnity')
if indemnity_node:
parts.append(f"- **iv. Does the securities lending agent... indemnify the Fund against borrower default?** {format_val(indemnity_node.get('isSecurityAgentIdemnity'))}")
idemnity_providers = indemnity_node.find_all('idemnityProvider')
if idemnity_providers:
parts.append("- **v. If the entity providing the indemnification is not the securities lending agent, provide the following information:**")
for m, provider in enumerate(idemnity_providers, 1):
parts.append(f" **Idemnity Providers Record: {m}**")
parts.append(f" - **1. Name of person providing indemnification:** {get_text(provider, 'idemnityProviderName')}")
parts.append(f" - **2. LEI, if any:** {get_text(provider, 'idemnityProviderLei')}")
parts.append(f"- **vi. Did the Fund exercise its indemnification rights during the reporting period?** {format_val(get_text(indemnity_node, 'didIndemnificationRights'))}")
collateral_managers = s.find_all('collateralManager')
if collateral_managers:
parts.append("\n**d. If a person providing cash collateral management services to the Fund in connection with the Fund's securities lending activities does not also serve as securities lending agent, provide the following information about each cash collateral manager:**")
for k, manager in enumerate(collateral_managers, 1):
parts.append(f"\n**Collateral Managers Record: {k}**")
parts.append(f"- **i. Full name of cash collateral manager:** {manager.get('collateralManagerName', '—')}")
parts.append(f"- **ii. LEI, if any:** {manager.get('collateralManagerLei', '—')}")
parts.append(f"- **iii. Is the cash collateral manager an affiliated person, or an affiliated person of an affiliated person, of a securities lending agent retained by the Fund??** {format_val(manager.get('isCollateralManagerAffliliated'))}")
parts.append(f"- **iv. Is the cash collateral manager an affiliated person of the Fund?** {format_val(manager.get('isCollateralManagerAffliliatedWithFund'))}")
payment_types_node = s.find('paymentToAgentManagers')
if payment_types_node:
parts.append("\n**e. Types of payments made to one or more securities lending agents and cash collateral managers (check all that apply):**")
selected_payments = {p.text.strip() for p in payment_types_node.find_all('paymentToAgentManagerType')}
all_payment_types = [
"Revenue sharing split", "Fee-based revenue split (other than administrative fee)", "Administrative fee",
"Cash collateral reinvestment fee", "Indemnification fee", "Other", "N/A"
]
for p_type in all_payment_types:
checkbox = '[x]' if p_type in selected_payments else '[ ]'
parts.append(f"- {checkbox} {p_type}")
parts.append(f"\n- **f. Provide the monthly average of the value of portfolio securities on loan during the reporting period:** {format_val(get_text(s, 'avgPortfolioSecuritiesValue'), 'dollar')}")
parts.append(f"- **g. Provide the net income from securities lending activities:** {format_val(get_text(s, 'netIncomeSecuritiesLending'), 'dollar')}")
rely_on_rule_node = s.find('relyOnRuleTypes')
if rely_on_rule_node:
parts.append("\n**Item C.7. Reliance on certain statutory exemption and rules.**")
rules = [rule.text.strip() for rule in rely_on_rule_node.find_all('relyOnRuleType')]
if rules:
parts.append("Did the Fund rely on the following rules?")
for rule in rules:
parts.append(f"- {rule}")
else:
parts.append("No reliance on statutory exemptions or rules reported.")
parts.append("\n**Item C.8. Expense limitations.**")
c8_details = {
"a. Did the Fund have an expense limitation arrangement?": format_val(get_text(s, 'isExpenseLimitationInPlace')),
"b. Were any expenses reduced or waived?": format_val(get_text(s, 'isExpenseReducedOrWaived')),
"c. Are the fees waived subject to recoupment?": format_val(get_text(s, 'isFeesWaivedRecoupable')),
"d. Were any expenses previously waived recouped during the period?": format_val(get_text(s, 'isExpenseWaivedRecoupable')),
}
for key, val in c8_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}** {val}")
advisers = s.find_all('investmentAdviser')
parts.append("\n**Item C.9. Investment advisers.**")
if advisers:
for i, adviser in enumerate(advisers, 1):
parts.append(f"\n**Investment Advisers Record: {i}**")
state_country_node = adviser.find('investmentAdviserStateCountry')
adviser_details = {
"i. Full name": get_text(adviser, 'investmentAdviserName'),
"ii. SEC file number": get_text(adviser, 'investmentAdviserFileNo'),
"iii. CRD number": get_text(adviser, 'investmentAdviserCrdNo'),
"iv. LEI, if any": get_text(adviser, 'investmentAdviserLei'),
"v. State, if applicable": (state_country_node.get('investmentAdviserState', '—') if state_country_node else '—').replace('US-', ''),
"vi. Foreign country, if applicable": state_country_node.get('investmentAdviserCountry', '—') if state_country_node else '—',
"vii. Was the investment adviser hired during the reporting period?": format_val(get_text(adviser, 'isInvestmentAdviserHired')),
}
for key, val in adviser_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}:** {val}")
else:
parts.append("No Investment Advisers reported.")
sub_advisers = s.find_all('subAdviser')
if sub_advisers:
parts.append("\n**Item C.9.b. Sub-advisers.**")
for i, adviser in enumerate(sub_advisers, 1):
parts.append(f"\n**Sub-adviser Record: {i}**")
sub_adviser_details = {
"i. Full name": get_text(adviser, 'subAdviserName'),
"ii. SEC file number": get_text(adviser, 'subAdviserFileNo'),
"iii. CRD number": get_text(adviser, 'subAdviserCrdNo'),
"iv. LEI, if any": get_text(adviser, 'subAdviserLei'),
"v. Is the sub-adviser an affiliated person?": format_val(get_text(adviser, 'isSubAdviserAffiliated')),
"vi. Foreign country, if applicable": get_text(adviser, 'subAdviserCountry'),
"vii. Was the sub-adviser hired during the reporting period?": format_val(get_text(adviser, 'isSubAdviserHired')),
}
for key, val in sub_adviser_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}:** {val}")
transfer_agents = s.find_all('transferAgent')
parts.append("\n**Item C.10. Transfer agents.**")
if transfer_agents:
for i, agent in enumerate(transfer_agents, 1):
parts.append(f"\n**Transfer Agents Record: {i}**")
state_country_node = agent.find('transferAgentStateCountry')
agent_details = {
"i. Full name": get_text(agent, 'transferAgentName'),
"ii. SEC file number": get_text(agent, 'transferAgentFileNo'),
"iii. LEI, if any": get_text(agent, 'transferAgentLei'),
"iv. State, if applicable": (state_country_node.get('transferAgentState', '—') if state_country_node else '—').replace('US-', ''),
"v. Foreign country, if applicable": state_country_node.get('transferAgentCountry', '—') if state_country_node else '—',
"vi. Is the transfer agent an affiliated person of the Fund or its investment adviser(s)?": format_val(get_text(agent, 'isTransferAgentAffiliated')),
"vii. Is the transfer agent a sub-transfer agent?": format_val(get_text(agent, 'isTransferAgentSubAgent')),
}
for key, val in agent_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}:** {val}")
parts.append(f"- **b. Has a transfer agent been hired or terminated during the reporting period?** {format_val(get_text(s, 'isTransferAgentHiredOrTerminated'))}")
else:
parts.append("No Transfer Agents reported.")
pricing_services = s.find_all('pricingService')
parts.append("\n**Item C.11. Pricing services.**")
if pricing_services:
for i, service in enumerate(pricing_services, 1):
parts.append(f"\n**Pricing Services Record: {i}**")
state_country_node = service.find('pricingServiceStateCountry')
service_details = {
"i. Full name": get_text(service, 'pricingServiceName'),
"ii. LEI, if any, or provide and describe other identifying number": get_text(service, 'pricingServiceLei'),
"Description of other identifying number": get_text(service, 'pricingServiceIdNumberDesc'),
"iii. State, if applicable": (state_country_node.get('pricingServiceState', '—') if state_country_node else '—').replace('US-', ''),
"iv. Foreign country, if applicable": state_country_node.get('pricingServiceCountry', '—') if state_country_node else '—',
"v. Is the pricing service an affiliated person of the Fund or its investment adviser(s)?": format_val(get_text(service, 'isPricingServiceAffiliated')),
}
for key, val in service_details.items():
if val and val.strip() != "—":
key_formatted = key.replace(" ", " ")
parts.append(f"- **{key_formatted}:** {val}")
parts.append(f"- **b. Was a pricing service hired or terminated during the reporting period?** {format_val(get_text(s, 'isPricingServiceHiredOrTerminated'))}")
else:
parts.append("No Pricing Services reported.")
custodians = s.find_all('custodian')
parts.append("\n**Item C.12. Custodians.**")
if custodians:
parts.append("\n**a. Provide the following information about each person that provided custodial services to the Fund during the reporting period:**")
for i, custodian in enumerate(custodians, 1):
parts.append(f"\n**Custodians Record: {i}**")
state_country_node = custodian.find('custodianStateCountry')
custodian_details = {
"i. Full name": get_text(custodian, 'custodianName'),
"ii. LEI, if any": get_text(custodian, 'custodianLei'),
"iii. State, if applicable": (state_country_node.get('custodianState', '—') if state_country_node else '—').replace('US-', ''),
"iv. Foreign country, if applicable": state_country_node.get('custodianCountry', '—') if state_country_node else '—',
"v. Is the custodian an affiliated person of the Fund or its investment adviser(s)?": format_val(get_text(custodian, 'isCustodianAffiliated')),
"vi. Is the custodian a sub-custodian?": format_val(get_text(custodian, 'isSubCustodian')),
"vii. With respect to the custodian, check below to indicate the type of custody": get_text(custodian, 'custodyType'),
}
for key, val in custodian_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}:** {val}")
parts.append(f"\n- **b. Was a custodian hired or terminated during the reporting period?** {format_val(get_text(s, 'isCustodianHiredOrTerminated'))}")
else:
parts.append("No Custodians reported.")
shareholder_agents = s.find_all('shareholderServicingAgent')
parts.append("\n**Item C.13 - Shareholder Servicing Agents**")
if shareholder_agents:
for i, sa in enumerate(shareholder_agents, 1):
parts.append(f"\n**Shareholder Servicing Agents Record: {i}**")
state_country_node = sa.find('shareholderServiceAgentStateCountry')
sa_details = {
"i. Full name": get_text(sa, 'shareholderServiceAgentName'),
"ii. LEI, if any": get_text(sa, 'shareholderServiceAgentLei'),
"iii. State, if applicable": (state_country_node.get('shareholderServiceAgentState', '—') if state_country_node else '—').replace('US-', ''),
"iv. Foreign country, if applicable": state_country_node.get('shareholderServiceAgentCountry', '—') if state_country_node else '—',
"v. Is the shareholder servicing agent an affiliated person?": format_val(get_text(sa, 'isShareholderServiceAgentAffiliated')),
"vi. Is the shareholder servicing agent a sub-shareholder servicing agent?": format_val(get_text(sa, 'isShareholderServiceAgentSubshare')),
}
for key, val in sa_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}:** {val}")
parts.append(f"- **b. Has a shareholder servicing agent been hired or terminated during the reporting period?** {format_val(get_text(s, 'isShareholderServiceHiredTerminated'))}")
else:
parts.append("No Shareholder Servicing Agents reported.")
admins = s.find_all('admin')
parts.append("\n**Item C.14. Administrators.**")
if admins:
parts.append("\n**a. Provide the following information about each administrator of the Fund:**")
for i, admin in enumerate(admins, 1):
parts.append(f"\n**Administrators Record: {i}**")
state_country_node = admin.find('adminStateCountry')
admin_details = {
"i. Full name": get_text(admin, 'adminName'),
"ii. LEI, if any, or other identifying number": get_text(admin, 'adminLei'),
"iii. State, if applicable": (state_country_node.get('adminState', '—') if state_country_node else '—').replace('US-', ''),
"iv. Foreign country, if applicable": state_country_node.get('adminCountry', '—') if state_country_node else '—',
"v. Is the administrator an affiliated person of the Fund or its investment adviser(s)?": format_val(get_text(admin, 'isAdminAffiliated')),
"vi. Is the administrator a sub-administrator?": format_val(get_text(admin, 'isAdminSubAdmin')),
}
for key, val in admin_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}:** {val}")
parts.append(f"\n- **b. Has a third-party administrator been hired or terminated during the reporting period?** {format_val(get_text(s, 'isAdminHiredOrTerminated'))}")
else:
parts.append("No Administrators reported.")
affiliated_brokers = s.find_all('brokerDealer')
parts.append("\n**Item C.15 - Affiliated broker-dealers.**")
if affiliated_brokers:
for i, ab in enumerate(affiliated_brokers, 1):
parts.append(f"\n**Broker Dealers Record: {i}**")
state_country_node = ab.find('brokerDealerStateCountry')
ab_details = {
"a. Full name": get_text(ab, 'brokerDealerName'),
"b. SEC file number": get_text(ab, 'brokerDealerFileNo'),
"c. CRD number": get_text(ab, 'brokerDealerCrdNo'),
"d. LEI, if any": get_text(ab, 'brokerDealerLei'),
"e. State, if applicable": (state_country_node.get('brokerDealerState', '—') if state_country_node else '—').replace('US-', ''),
"f. Foreign country, if applicable": state_country_node.get('brokerDealerCountry', '—') if state_country_node else '—',
"g. Total commissions paid to the affiliated broker-dealer for the reporting period:": format_val(get_text(ab, 'brokerDealerCommission'), 'dollar'),
}
for key, val in ab_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}:** {val}")
else:
parts.append("No Affiliated Broker-Dealers reported.")
brokers = s.find_all('broker')
parts.append("\n**Item C.16. Brokers.**")
if brokers:
parts.append("\n**a. For each of the ten brokers that received the largest dollar amount of brokerage commissions...**")
for i, broker in enumerate(brokers, 1):
parts.append(f"\n**Brokers Record: {i}**")
state_country_node = broker.find('brokerStateCountry')
broker_details = {
"i. Full name of broker": get_text(broker, 'brokerName'),
"ii. SEC file number": get_text(broker, 'brokerFileNo'),
"iii. CRD number": get_text(broker, 'brokerCrdNo'),
"iv. LEI, if any": get_text(broker, 'brokerLei'),
"v. State, if applicable": (state_country_node.get('brokerState', '—') if state_country_node else '—').replace('US-', ''),
"vi. Foreign country, if applicable": state_country_node.get('brokerCountry', '—') if state_country_node else '—',
"vii. Gross commissions paid by the Fund for the reporting period": format_val(get_text(broker, 'grossCommission'), 'dollar'),
}
for key, val in broker_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}:** {val}")
aggregate_commission = get_text(s, 'aggregateCommission')
if aggregate_commission and aggregate_commission.strip() != "—":
parts.append(f"\n**Aggregate Commission:** {format_val(aggregate_commission, 'dollar')}")
else:
parts.append("No Brokers reported.")
principal_transactions = s.find_all('principalTransaction')
if principal_transactions:
parts.append("\n**Item C.17.a. Principal transaction counterparties.**")
principal_data = []
for pt in principal_transactions:
state_country_node = pt.find('principalStateCountry')
principal_data.append({
"Name": get_text(pt, 'principalName'),
"SEC file number": get_text(pt, 'principalFileNo'),
"CRD number": get_text(pt, 'principalCrdNo'),
"LEI": get_text(pt, 'principalLei'),
"State": (state_country_node.get('principalState', '—') if state_country_node else '—').replace('US-', ''),
"Country": state_country_node.get('principalCountry', '—') if state_country_node else '—',
"Total Purchase/Sale ($)": format_val(get_text(pt, 'principalTotalPurchaseSale'), 'dollar'),
})
if principal_data:
df = pd.DataFrame(principal_data)
parts.append(to_compact_markdown(df, index=False))
c17_c19_details = {
"Item C.17.b - Aggregate value of principal purchase/sale transactions": format_val(get_text(s, 'principalAggregatePurchase'), 'dollar'),
"Item C.18 - Did the Fund pay commissions for 'brokerage and research services'?": format_val(get_text(s, 'isBrokerageResearchPayment')),
"Item C.19.a - Fund's monthly average net assets": format_val(get_text(s, 'mnthlyAvgNetAssets'), 'dollar'),
"Item C.19.b - Money market fund's daily average net assets": format_val(get_text(s, 'dailyAvgNetAssets'), 'dollar'),
}
for key, val in c17_c19_details.items():
if val and val.strip() != "—":
parts.append(f"- **{key}:** {val}")
parts.append("\n**Item C.20. Lines of credit, interfund lending and interfund borrowing.**")
line_of_credit_node = s.find('lineOfCredit')
if line_of_credit_node:
parts.append(f"- **a. Does the Fund have available a line of credit?** {format_val(line_of_credit_node.get('hasLineOfCredit'))}")
credit_details = line_of_credit_node.find_all('lineOfCreditDetail')
if credit_details:
parts.append("\n**If yes, for each line of credit, provide the information requested below:**")
for j, detail in enumerate(credit_details, 1):
parts.append(f"\n**Line of Credit details Record: {j}**")
parts.append(f"- **i. Is the line of credit a committed or uncommitted line of credit?** {get_text(detail, 'isCreditLineCommitted')}")
parts.append(f"- **ii. What size is the line of credit?** {format_val(get_text(detail, 'lineOfCreditSize'), 'dollar')}")
institutions = detail.find_all('lineOfCreditInstitution')
if institutions:
parts.append("\n- **iii. With which institution(s) is the line of credit?**")
for k, inst in enumerate(institutions, 1):
parts.append(f" - **Line Institutions Record: {k} Name of institution:** {inst.get('creditInstitutionName', '—')}")
shared_credit_node = detail.find('sharedCreditType')
if shared_credit_node:
credit_type = shared_credit_node.get('creditType', '—')
parts.append(f"\n- **iv. Is the line of credit just for the Fund, or is it shared among multiple funds?** {credit_type}")
credit_users = shared_credit_node.find_all('creditUser')
if credit_users:
parts.append("\n - **1. If shared, list the names of other funds that may use the line of credit:**")
user_data = []
for user in credit_users:
user_data.append({
"Name of fund": user.get('fundName', '—'),
"SEC File number": user.get('secFileNo', '—')
})
if user_data:
parts.append(to_compact_markdown(pd.DataFrame(user_data), index=False))
parts.append(f"\n- **v. Did the Fund draw on the line of credit this period?** {format_val(get_text(detail, 'isCreditLineUsed'))}")
parts.append(f"\n- **b. Did the Fund engage in interfund lending?** {format_val(get_text(s, 'isInterfundLending'))}")
parts.append(f"- **c. Did the Fund engage in interfund borrowing?** {format_val(get_text(s, 'isInterfundBorrowing'))}")
swing_pricing_val = get_text(s, 'isSwingPricing')
if swing_pricing_val and swing_pricing_val != "—":
parts.append("\n**Item C.21. Swing pricing.**")
parts.append("- **a. Did the Fund (if not a Money Market Fund, Exchange-Traded Fund, or Exchange-Traded Managed Fund) engage in swing pricing?** " + format_val(swing_pricing_val))
etf_info = form_data.find('exchangeSeriesInfo')
if etf_info:
parts.append("\n## Part E: Additional Questions for ETFs and ETMFs")
for etf in etf_info.find_all('exchangeTradedFund'):
parts.append(f"\n### {get_text(etf, 'fundName')}")
exchange_node = etf.find('securityExchange')
if exchange_node:
parts.append("\n**Item E.1 - Exchange**")
parts.append(f"- **Exchange:** {exchange_node.get('fundExchange', '—')}")
parts.append(f"- **Ticker:** {exchange_node.get('fundsTickerSymbol', '—')}")
auth_parts = etf.find_all('authorizedParticipant')
if auth_parts:
parts.append("\n**Item E.2 - Authorized Participants**")
ap_data = [{"Name": ap.get('authorizedParticipantName', '—'),
"Purchase Value": format_val(ap.get('authorizedParticipantPurchaseValue', '—'), 'dollar'),
"Redeem Value": format_val(ap.get('authorizedParticipantRedeemValue', '—'), 'dollar')} for ap in auth_parts]
parts.append(to_compact_markdown(pd.DataFrame(ap_data), index=False))
parts.append("\n**Item E.3 - Creation Units**")
e3_details = {
"a. Number of Fund shares required to form a creation unit": format_val(get_text(etf, 'creationUnitNumOfShares'), 'number'),
"b.i. Average percentage of value composed of cash (purchased)": format_val(get_text(etf, 'averagePercentagePurchased'), 'percent'),
"c.i. Average percentage of value composed of cash (redeemed)": format_val(get_text(etf, 'averagePercentageRedeemed'), 'percent'),
"d.i.2. Average transaction fee (dollars for one or more units, purchased)": format_val(get_text(etf, 'creationUnitTransactionFeeManyUnits'), 'dollar'),
"d.i.3. Average transaction fee (percentage of value, purchased)": format_val(get_text(etf, 'creationUnitTransactionFeePercentagePerUnit'), 'percent'),
}
for key, val in e3_details.items():
if val and val.strip() != "—": parts.append(f"- **{key}:** {val}")
parts.append(f"- **Item E.5 - Is the Fund an 'In-Kind Exchange-Traded Fund'?** {format_val(get_text(etf, 'isInKindETF'))}")
attachments_node = form_data.find('attachmentsTab')
parts.append("\n## N-CEN: Part G: Attachments")
parts.append("**Item G.1a. Attachments.**")
parts.append("Attachments applicable to all Registrants. All Registrants shall file the following attachments, as applicable, with the current report. Indicate the attachments filed with the current report by checking the applicable items below:")
attachment_map = {
'isLegalProceedings': "i. Legal proceedings",
'isFinancialSupport': "ii. Provision of financial support",
'isIPAReportInternalControl': "iii. Independent public accountant's report on internal control (management investment companies other than small business investment companies only)",
'isChangeAccountPrinciple': "iv. Change in accounting principles and practices",
'isExemptiveOrder': "v. Information required to be filed pursuant to exemptive orders",
'isOther': "vi. Other information required to be included as an attachment pursuant to Commission rules and regulations"
}
for tag_name, text in attachment_map.items():
is_checked = False
if attachments_node:
is_checked = get_text(attachments_node, tag_name).lower() in ['y', 'true']
checkbox = '[x]' if is_checked else '[ ]'
parts.append(f"- {checkbox} {text}")
signature = form_data.find('signature')
if signature:
parts.append("\n## N-CEN: Signature")
parts.append("Pursuant to the requirements of the Investment Company Act of 1940, the Registrant has duly caused this report to a be signed on its behalf by the undersigned hereunto duly authorized.")
sig_details = {
"Registrant": signature.get('registrantSignedName', '—'),
"Date": signature.get('signedDate', '—'),
"Signature": signature.get('signature', '—'),
"Title": signature.get('title', '—'),
}
for key, val in sig_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
return "\n\n".join(parts)
def parse_form_c_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Form C into structured Markdown, accurately
rendering all sections to mimic the visual layout of the original form.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(f'^(?:\\w+:)?{tag}$', re.I))
return html.unescape(found.text.strip()) if found and found.text else "—"
def safe_format_dollar(value_str: str) -> str:
if not value_str or value_str == "—": return "—"
try:
is_negative = value_str.startswith('(') and value_str.endswith(')')
if is_negative:
value_str = '-' + value_str.strip('()')
val = float(value_str)
return f"${val:,.2f}"
except (ValueError, TypeError):
return value_str
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM C\n\n"
"### UNDER THE SECURITIES ACT OF 1933\n"
]
issuer_info = xml.find('issuerInformation')
offering_info = xml.find('offeringInformation')
annual_report_info = xml.find('annualReportDisclosureRequirements')
signature_info = xml.find('signatureInfo')
if issuer_info:
parts.append("### Issuer Information\n")
is_amendment = get_text(issuer_info, 'isAmendment').lower() in ['true', 'y']
if is_amendment:
parts.append(f"**Is this an amendment?** Yes")
nature_of_amendment = get_text(issuer_info, 'natureOfAmendment')
if nature_of_amendment != "—":
parts.append(f"**Nature of Amendment:** {nature_of_amendment}")
address_node = issuer_info.find('issuerAddress')
address_parts = [
get_text(address_node, 'street1'),
get_text(address_node, 'city'),
get_text(address_node, 'stateOrCountry'),
get_text(address_node, 'zipCode')
]
physical_address_str = ", ".join(part for part in address_parts if part and part != "—")
issuer_details = {
"Name of Issuer": get_text(issuer_info.find('issuerInfo'), 'nameOfIssuer'),
"Legal Status": get_text(issuer_info.find('legalStatus'), 'legalStatusForm'),
"Jurisdiction of Incorporation/Organization": get_text(issuer_info.find('legalStatus'), 'jurisdictionOrganization'),
"Date of Organization": get_text(issuer_info.find('legalStatus'), 'dateIncorporation'),
"Physical Address": physical_address_str or "—",
"Issuer Website": get_text(issuer_info.find('issuerInfo'), 'issuerWebsite'),
"Is there a Co-Issuer?": "Yes" if get_text(issuer_info, 'isCoIssuer') == 'Y' else "No",
"Intermediary Name": get_text(issuer_info, 'companyName'),
"Intermediary CIK": get_text(issuer_info, 'commissionCik'),
"Intermediary File Number": get_text(issuer_info, 'commissionFileNumber'),
"Intermediary CRD Number": get_text(issuer_info, 'crdNumber'),
}
for key, val in issuer_details.items():
if val and val.strip() != "—":
parts.append(f"**{key}:** {val}")
if offering_info:
parts.append("\n### Offering Information\n")
offering_details = {
"Compensation to Intermediary": get_text(offering_info, 'compensationAmount'),
"Financial Interest in Issuer": get_text(offering_info, 'financialInterest'),
"Type of Security Offered": get_text(offering_info, 'securityOfferedType'),
"Other Description of Security": get_text(offering_info, 'securityOfferedOtherDesc'),
"Number of Securities Offered": get_text(offering_info, 'noOfSecurityOffered'),
"Price per Security": safe_format_dollar(get_text(offering_info, 'price')),
"Method for Determining Price": get_text(offering_info, 'priceDeterminationMethod'),
"Target Offering Amount": safe_format_dollar(get_text(offering_info, 'offeringAmount')),
"Oversubscription Accepted": "Yes" if get_text(offering_info, 'overSubscriptionAccepted') == 'Y' else "No",
"Oversubscription Allocation Type": get_text(offering_info, 'overSubscriptionAllocationType'),
"Description of Oversubscription": get_text(offering_info, 'descOverSubscription'),
"Maximum Offering Amount": safe_format_dollar(get_text(offering_info, 'maximumOfferingAmount')),
"Deadline to Reach Target Amount": get_text(offering_info, 'deadlineDate'),
}
for key, val in offering_details.items():
if val and val.strip() != "—":
parts.append(f"**{key}:** {val}")
if annual_report_info:
parts.append("\n### Annual Report Disclosure Requirements\n")
financials = {
"Current Number of Employees": get_text(annual_report_info, 'currentEmployees'),
"Total Assets (Most Recent Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'totalAssetMostRecentFiscalYear')),
"Total Assets (Prior Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'totalAssetPriorFiscalYear')),
"Cash & Cash Equivalents (Most Recent Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'cashEquiMostRecentFiscalYear')),
"Cash & Cash Equivalents (Prior Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'cashEquiPriorFiscalYear')),
"Accounts Receivable (Most Recent Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'actReceivedMostRecentFiscalYear')),
"Accounts Receivable (Prior Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'actReceivedPriorFiscalYear')),
"Short-Term Debt (Most Recent Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'shortTermDebtMostRecentFiscalYear')),
"Short-Term Debt (Prior Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'shortTermDebtPriorFiscalYear')),
"Long-Term Debt (Most Recent Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'longTermDebtMostRecentFiscalYear')),
"Long-Term Debt (Prior Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'longTermDebtPriorFiscalYear')),
"Revenues/Sales (Most Recent Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'revenueMostRecentFiscalYear')),
"Revenues/Sales (Prior Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'revenuePriorFiscalYear')),
"Cost of Goods Sold (Most Recent Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'costGoodsSoldMostRecentFiscalYear')),
"Cost of Goods Sold (Prior Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'costGoodsSoldPriorFiscalYear')),
"Taxes Paid (Most Recent Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'taxPaidMostRecentFiscalYear')),
"Taxes Paid (Prior Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'taxPaidPriorFiscalYear')),
"Net Income (Most Recent Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'netIncomeMostRecentFiscalYear')),
"Net Income (Prior Fiscal Year)": safe_format_dollar(get_text(annual_report_info, 'netIncomePriorFiscalYear')),
}
for key, val in financials.items():
if val and val.strip() not in ["—", "$—"]:
parts.append(f"**{key}:** {val}")
jurisdiction_nodes = annual_report_info.find_all('issueJurisdictionSecuritiesOffering')
if jurisdiction_nodes:
jurisdiction_codes = [node.text for node in jurisdiction_nodes]
jurisdiction_names = [SEC_COUNTRY_CODES.get(code, code) for code in jurisdiction_codes]
parts.append("\n**Jurisdictions Offered:**")
parts.append(", ".join(jurisdiction_names))
if signature_info:
parts.append("\n### Signatures\n")
issuer_signature = signature_info.find('issuerSignature')
if issuer_signature:
parts.append(f"**Issuer:** {get_text(issuer_signature, 'issuer')}")
parts.append(f"**Signature:** {get_text(issuer_signature, 'issuerSignature')}")
parts.append(f"**Title:** {get_text(issuer_signature, 'issuerTitle')}")
for person in signature_info.find_all('signaturePerson'):
parts.append("\n---")
parts.append(f"**Signature:** {get_text(person, 'personSignature')}")
parts.append(f"**Title:** {get_text(person, 'personTitle')}")
parts.append(f"**Date:** {get_text(person, 'signatureDate')}")
return "\n\n".join(parts)
def parse_nport_p_xml(xml: BeautifulSoup, class_name_map: dict = None) -> str:
"""
Parses an XML-based Form NPORT-P into a structured Markdown document,
capturing detailed fund information, monthly returns, and comprehensive
data for each portfolio security, including derivatives. This version is
updated to capture all fields from Part A and other sections.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(rf'(?:\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_val(value_str: str, type_hint: str = 'string') -> str:
if not value_str or value_str.lower() in ('—', 'n/a', 'na'): return "—"
if value_str.upper() == 'Y': return "Yes"
if value_str.upper() == 'N': return "No"
try:
val_float = float(value_str.replace(',', ''))
if type_hint == 'dollar': return f"${val_float:.2f}"
if type_hint == 'percent': return f"{val_float:.2f}%"
if type_hint == 'shares': return f"{val_float:.4f}"
if type_hint == 'number': return f"{val_float:.0f}"
except (ValueError, TypeError):
pass
return value_str
if class_name_map is None:
class_name_map = {}
parts = ["## Form NPORT-P: Monthly Portfolio Investments Report"]
gen_info = xml.find('genInfo')
if gen_info:
parts.append("\n### NPORT-P: Part A: General Information")
parts.append("\n**Item A.1. Information about the Registrant.**")
registrant_info = {
"a. Name of Registrant": get_text(gen_info, 'regName'),
"b. Investment Company Act file number": get_text(gen_info, 'regFileNumber'),
"c. CIK number of Registrant": get_text(gen_info, 'regCik'),
"d. LEI of Registrant": get_text(gen_info, 'regLei'),
}
for key, val in registrant_info.items():
if val != "—": parts.append(f"- **{key}:** {val}")
reg_addr_node = gen_info.find('regStateConditional')
registrant_address = {
"Street Address 1": get_text(gen_info, 'regStreet1'),
"City": get_text(gen_info, 'regCity'),
"State": reg_addr_node['regState'].replace('US-', '') if reg_addr_node and reg_addr_node.has_attr('regState') else '—',
"Foreign country": reg_addr_node['regCountry'] if reg_addr_node and reg_addr_node.has_attr('regCountry') else '—',
"Zip / Postal Code": get_text(gen_info, 'regZipOrPostalCode'),
"Telephone number": get_text(gen_info, 'regPhone'),
}
parts.append("- **e. Address and telephone number of Registrant.**")
for key, val in registrant_address.items():
if val and val.strip() != "—": parts.append(f" - **{key}:** {val}")
parts.append("\n**Item A.2. Information about the Series.**")
series_info = {
"a. Name of Series": get_text(gen_info, 'seriesName'),
"b. EDGAR series identifier (if any)": get_text(gen_info, 'seriesId'),
"c. LEI of Series": get_text(gen_info, 'seriesLei'),
}
for key, val in series_info.items():
if val != "—": parts.append(f"- **{key}:** {val}")
parts.append("\n**Item A.3. Reporting period.**")
reporting_info = {
"a. Date of fiscal year-end": get_text(gen_info, 'repPdEnd'),
"b. Date as of which information is reported": get_text(gen_info, 'repPdDate'),
}
for key, val in reporting_info.items():
if val != "—": parts.append(f"- **{key}:** {val}")
parts.append("\n**Item A.4. Final filing**")
final_filing = format_val(get_text(gen_info, 'isFinalFiling'))
parts.append(f"Does the Fund anticipate that this will be its final filing on Form N-PORT? **{final_filing}**")
fund_info = xml.find('fundInfo')
if fund_info:
parts.append("\n### Fund Information")
fund_data = {
"Total Assets": format_val(get_text(fund_info, 'totAssets'), 'dollar'),
"Total Liabilities": format_val(get_text(fund_info, 'totLiabs'), 'dollar'),
"Net Assets": format_val(get_text(fund_info, 'netAssets'), 'dollar'),
"Assets Attributable to Miscellaneous Securities": format_val(get_text(fund_info, 'assetsAttrMiscSec'), 'dollar'),
"Amount of Assets Invested in Other Investment Companies": format_val(get_text(fund_info, 'assetsInvested'), 'dollar'),
"Delayed Delivery Securities": format_val(get_text(fund_info, 'delayDeliv'), 'dollar'),
"Stand-by Commitments": format_val(get_text(fund_info, 'standByCommit'), 'dollar'),
"Cash Not Reported": format_val(get_text(fund_info, 'cshNotRptdInCorD'), 'dollar'),
}
for key, val in fund_data.items():
if val and val != "—" and val != "$0.00":
parts.append(f"**{key}:** {val}")
if (cur_metric := fund_info.find('curMetric')):
parts.append("\n**Currency Risk Metrics (dv01):**")
risk_data = {
"3-Month": cur_metric.get('period3Mon'), "1-Year": cur_metric.get('period1Yr'),
"5-Year": cur_metric.get('period5Yr'), "10-Year": cur_metric.get('period10Yr'),
"30-Year": cur_metric.get('period30Yr')
}
parts.append("- " + " | ".join([f"**{k}:** {v}" for k, v in risk_data.items() if v]))
if (invst_grade := fund_info.find('creditSprdRiskInvstGrade')):
parts.append("\n**Credit Spread Risk - Investment Grade (dv01):**")
risk_data = {
"3-Month": invst_grade.get('period3Mon'), "1-Year": invst_grade.get('period1Yr'),
"5-Year": invst_grade.get('period5Yr'), "10-Year": invst_grade.get('period10Yr'),
"30-Year": invst_grade.get('period30Yr')
}
parts.append("- " + " | ".join([f"**{k}:** {v}" for k, v in risk_data.items() if v]))
if (non_invst_grade := fund_info.find('creditSprdRiskNonInvstGrade')):
parts.append("\n**Credit Spread Risk - Non-Investment Grade (dv01):**")
risk_data = {
"3-Month": non_invst_grade.get('period3Mon'), "1-Year": non_invst_grade.get('period1Yr'),
"5-Year": non_invst_grade.get('period5Yr'), "10-Year": non_invst_grade.get('period10Yr'),
"30-Year": non_invst_grade.get('period30Yr')
}
parts.append("- " + " | ".join([f"**{k}:** {v}" for k, v in risk_data.items() if v]))
return_info = fund_info.find('returnInfo')
if return_info:
parts.append("\n**Monthly Return Information**")
returns_data = []
for monthly_return in return_info.find_all('monthlyTotReturn'):
class_id = monthly_return.get('classId', 'N/A')
class_name = class_name_map.get(class_id, f"Class ID {class_id}")
returns_data.append({
"Class": class_name,
"Month 1 Return (%)": format_val(monthly_return.get('rtn1'), 'percent'),
"Month 2 Return (%)": format_val(monthly_return.get('rtn2'), 'percent'),
"Month 3 Return (%)": format_val(monthly_return.get('rtn3'), 'percent')
})
if returns_data:
parts.append(to_compact_markdown(pd.DataFrame(returns_data), index=False))
other_return_data = []
for i in range(1, 4):
other_mon_node = return_info.find(f'othMon{i}')
if other_mon_node:
other_return_data.append({ "Period": f"Month {i}", "Net Realized Gain/Loss": format_val(other_mon_node.get('netRealizedGain'), 'dollar'), "Net Unrealized Appreciation/Depreciation": format_val(other_mon_node.get('netUnrealizedAppr'), 'dollar') })
if other_return_data:
parts.append("\n**Monthly Gains & Losses**")
parts.append(to_compact_markdown(pd.DataFrame(other_return_data), index=False))
var_info = fund_info.find('varInfo')
if var_info and (designated_info := var_info.find('fundsDesignatedInfo')):
parts.append("\n**Designated Index Information**")
parts.append(f"- **Index Name:** {get_text(designated_info, 'nameDesignatedIndex')}")
parts.append(f"- **Index Identifier:** {get_text(designated_info, 'indexIdentifier')}")
class_level_nodes = xml.find_all('classLevelInfo')
if class_level_nodes:
parts.append("\n### NPORT-P: Part B: Information About the Series")
for node in class_level_nodes:
class_id = get_text(node, 'classId')
class_name = class_name_map.get(class_id, f"Class ID {class_id}")
parts.append(f"\n#### Class: {class_name} ({class_id})")
parts.append("\n**Item B.2. Assets and Liabilities**")
class_assets = {
"Total Assets": format_val(get_text(node, 'totAssets'), 'dollar'),
"Total Liabilities": format_val(get_text(node, 'totLiabs'), 'dollar'),
"Net Assets": format_val(get_text(node, 'netAssets'), 'dollar'),
}
for key, val in class_assets.items():
if val and val != "—":
parts.append(f"- **{key}:** {val}")
nav_per_share = format_val(get_text(node, 'netAssetValuePerShare'), 'shares')
if nav_per_share != "—":
parts.append(f"**Item B.3. Net asset value per share:** {nav_per_share}")
counterparty_nodes = node.find_all('securityLendingCounterparty')
if counterparty_nodes:
parts.append("\n**Item B.4. Securities Lending Counterparties**")
counterparty_data = []
for cp_node in counterparty_nodes:
counterparty_data.append({
"Counterparty Name": get_text(cp_node, 'counterpartyName'),
"Value of Securities on Loan": format_val(get_text(cp_node, 'valLoaned'), 'dollar')
})
if counterparty_data:
parts.append(to_compact_markdown(pd.DataFrame(counterparty_data), index=False))
parts.append("\n**Item B.5. Monthly Shareholder Flow Activity**")
flow_data = []
for i in range(1, 4):
flow_node = node.find(f'mon{i}Flow')
if flow_node:
flow_data.append({
"Period": f"Month {i}",
"Sales": format_val(flow_node.get('sales'), 'dollar'),
"Reinvestments": format_val(flow_node.get('reinvestment'), 'dollar'),
"Redemptions": format_val(flow_node.get('redemption'), 'dollar'),
})
if flow_data:
parts.append(to_compact_markdown(pd.DataFrame(flow_data), index=False))
liq_info = node.find('highlyLiquidInvst')
if liq_info:
parts.append("\n**Item B.6. Highly Liquid Investment Minimum**")
parts.append(f"- **Did the Fund meet the 30% minimum for at least one business day?** {format_val(get_text(liq_info, 'isFundMeet30PctDay1LiqAsset'))}")
parts.append(f"- **Did the Fund meet the 10% minimum for at least one business day?** {format_val(get_text(liq_info, 'isFundMeet10PctWklyLiqAsset'))}")
investments = xml.find_all('invstOrSec')
if investments:
parts.append("\n### Schedule of Portfolio Investments")
investment_data = []
for item in investments:
ids = []
if (cusip := get_text(item, 'cusip')) != "—": ids.append(f"CUSIP: {cusip}")
if (lei := get_text(item, 'lei')) != "—": ids.append(f"LEI: {lei}")
id_node = item.find('identifiers')
if id_node:
if (isin := get_text(id_node, 'isin')) != "—": ids.append(f"ISIN: {isin}")
if (ticker := get_text(id_node, 'ticker')) != "—": ids.append(f"Ticker: {ticker}")
id_str = " ".join(ids) if ids else "—"
lending_info = "—"
if (lending_node := item.find('securityLending')):
is_loaned = format_val(get_text(lending_node, 'isLoanByFund'))
lending_info = f"On Loan: {is_loaned}"
debt_sec = item.find('debtSec')
maturity_dt, coupon_kind, annualized_rt = "—", "—", "—"
if debt_sec:
maturity_dt = get_text(debt_sec, 'maturityDt')
coupon_kind = get_text(debt_sec, 'couponKind')
annualized_rt = format_val(get_text(debt_sec, 'annualizedRt'), 'percent')
record = {
"Name": get_text(item, 'name'),
"Title": get_text(item, 'title'),
"Identifiers": id_str,
"Payoff Profile": get_text(item, 'payoffProfile'),
"Asset Category": get_text(item, 'assetCat'),
"Issuer Category": get_text(item, 'issuerCat'),
"Country": get_text(item, 'invCountry'),
"Balance": format_val(get_text(item, 'balance'), 'number'),
"Units": get_text(item, 'units'),
"Value (USD)": format_val(get_text(item, 'valUSD'), 'dollar'),
"% of Net Assets": format_val(get_text(item, 'pctVal'), 'percent'),
"Maturity Date": maturity_dt,
"Coupon Type": coupon_kind,
"Annualized Rate (%)": annualized_rt,
"Restricted?": format_val(get_text(item, 'isRestrictedSec')),
"Fair Value Level": get_text(item, 'fairValLevel'),
"Lending Status": lending_info,
}
investment_data.append(record)
df = pd.DataFrame(investment_data)
if not df.empty:
column_order = [
"Name", "Title", "Identifiers", "Payoff Profile", "Asset Category", "Issuer Category", "Country",
"Balance", "Units", "Value (USD)", "% of Net Assets", "Maturity Date", "Coupon Type",
"Annualized Rate (%)", "Restricted?", "Fair Value Level", "Lending Status"
]
df = df.reindex(columns=column_order, fill_value="—").fillna("—")
parts.append(to_compact_markdown(df, index=False))
signature_node = xml.find('signature')
if signature_node:
parts.append("\n### Signature")
signature_data = {
"Date Signed": get_text(signature_node, 'dateSigned'),
"Name of Applicant": get_text(signature_node, 'nameOfApplicant'),
"Signature": get_text(signature_node, 'signature'),
"Name of Signer": get_text(signature_node, 'signerName'),
"Title": get_text(signature_node, 'title'),
}
for key, val in signature_data.items():
if val != "—":
parts.append(f"**{key}:** {val}")
return "\n\n".join(parts)
def parse_form1a_xml(xml: BeautifulSoup) -> str:
"""
Parses the XML of a Form 1-A filing into a structured and comprehensive
Markdown document, mirroring the sections of the official form.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(f'^(?:\\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_dollar(value_str: str) -> str:
if not value_str or value_str == "—": return "—"
try:
val = float(value_str)
return f"${val:.2f}"
except (ValueError, TypeError):
return value_str
def format_number(value_str: str) -> str:
if not value_str or value_str == "—": return "—"
try:
return f"{int(float(value_str)):}"
except (ValueError, TypeError):
return value_str
def format_bool(value_str: str, yes_char='Y', no_char='N') -> str:
if not value_str or value_str == "—": return "—"
s = value_str.strip().upper()
if s == yes_char or s == 'TRUE':
return "Yes"
if s == no_char or s == 'FALSE':
return "No"
return "—"
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM 1-A\n\n"
"### REGULATION A OFFERING STATEMENT\n"
"### UNDER THE SECURITIES ACT OF 1933\n"
]
header = xml.find('headerData')
form_data = xml.find('formData')
parts.append("### Item 1. Issuer Information")
emp_info = form_data.find('employeesInfo')
issuer_info = form_data.find('issuerInfo')
issuer_details_md = [
f"**Exact name of issuer:** {get_text(emp_info, 'issuerName')}",
f"**Jurisdiction of Incorporation/Organization:** {get_text(emp_info, 'jurisdictionOrganization')}",
f"**Year of Incorporation:** {get_text(emp_info, 'yearIncorporation')}",
f"**CIK:** {get_text(emp_info, 'cik')}",
f"**I.R.S. Employer Identification Number:** {get_text(emp_info, 'irsNum')}",
f"**Primary Standard Industrial Classification Code:** {get_text(emp_info, 'sicCode')}",
f"**Total number of full-time employees:** {format_number(get_text(emp_info, 'fullTimeEmployees'))}",
f"**Total number of part-time employees:** {format_number(get_text(emp_info, 'partTimeEmployees'))}",
f"**Address of Principal Executive Offices:** {get_text(issuer_info, 'street1')}, {get_text(issuer_info, 'street2')}, {get_text(issuer_info, 'city')}, {get_text(issuer_info, 'stateOrCountry')} {get_text(issuer_info, 'zipCode')}",
f"**Company Phone:** {get_text(issuer_info, 'phoneNumber')}",
f"**Person to contact:** {get_text(issuer_info, 'connectionName')}",
]
parts.extend(issuer_details_md)
parts.append("\n### Financial Statements")
financial_data = {
"Balance Sheet Information": {
"Cash and Cash Equivalents": format_dollar(get_text(issuer_info, 'cashEquivalents')),
"Investment Securities": format_dollar(get_text(issuer_info, 'investmentSecurities')),
"Accounts and Notes Receivable": format_dollar(get_text(issuer_info, 'accountsReceivable')),
"Property, Plant and Equipment (PP&E)": format_dollar(get_text(issuer_info, 'propertyPlantEquipment')),
"Total Assets": format_dollar(get_text(issuer_info, 'totalAssets')),
"Accounts Payable and Accrued Liabilities": format_dollar(get_text(issuer_info, 'accountsPayable')),
"Long-Term Debt": format_dollar(get_text(issuer_info, 'longTermDebt')),
"Total Liabilities": format_dollar(get_text(issuer_info, 'totalLiabilities')),
"Total Stockholders' Equity": format_dollar(get_text(issuer_info, 'totalStockholderEquity')),
"Total Liabilities and Equity": format_dollar(get_text(issuer_info, 'totalLiabilitiesAndEquity'))
},
"Statement of Comprehensive Income Information": {
"Total Revenues": format_dollar(get_text(issuer_info, 'totalRevenues')),
"Costs and Expenses Applicable to Revenues": format_dollar(get_text(issuer_info, 'costAndExpensesApplToRevenues')),
"Depreciation and Amortization": format_dollar(get_text(issuer_info, 'depreciationAndAmortization')),
"Net Income": format_dollar(get_text(issuer_info, 'netIncome')),
"Earnings Per Share - Basic": get_text(issuer_info, 'earningsPerShareBasic'),
"Earnings Per Share - Diluted": get_text(issuer_info, 'earningsPerShareDiluted')
},
"Auditor Information": {
"Name of Auditor": get_text(issuer_info, 'nameAuditor')
}
}
for section_title, data_dict in financial_data.items():
parts.append(f"\n**{section_title}**\n")
table_data = {"Metric": list(data_dict.keys()), "Amount": list(data_dict.values())}
parts.append(to_compact_markdown(pd.DataFrame(table_data), index=False))
parts.append("\n### Outstanding Securities")
common = form_data.find('commonEquity')
preferred = form_data.find('preferredEquity')
debt = form_data.find('debtSecurities')
securities_data = [
{"Class": get_text(common, 'commonEquityClassName'), "Outstanding": format_number(get_text(common, 'outstandingCommonEquity')), "CUSIP": get_text(common, 'commonCusipEquity'), "Publicly Traded": get_text(common, 'publiclyTradedCommonEquity')},
{"Class": get_text(preferred, 'preferredEquityClassName'), "Outstanding": format_number(get_text(preferred, 'outstandingPreferredEquity')), "CUSIP": get_text(preferred, 'preferredCusipEquity'), "Publicly Traded": get_text(preferred, 'publiclyTradedPreferredEquity')},
{"Class": get_text(debt, 'debtSecuritiesClassName'), "Outstanding": format_number(get_text(debt, 'outstandingDebtSecurities')), "CUSIP": get_text(debt, 'cusipDebtSecurities'), "Publicly Traded": get_text(debt, 'publiclyTradedDebtSecurities')}
]
securities_df = pd.DataFrame([s for s in securities_data if s['Class'] != 'None'])
if not securities_df.empty:
parts.append(to_compact_markdown(securities_df, index=False))
parts.append(f"\n### Item 2. Issuer Eligibility\n- [x] The issuer certifies that all of the statements in this part are true.")
parts.append(f"\n### Item 3. Application of Rule 262\n- [x] The issuer certifies that it is not disqualified and has not been involved in any disqualifying event.")
summary_info = form_data.find('summaryInfo')
parts.append("\n### Item 4. Summary Information Regarding the Offering")
offering_flags_md = [
f"**Tier:** {get_text(summary_info, 'indicateTier1Tier2Offering')}",
f"**Financial Statement Status:** {get_text(summary_info, 'financialStatementAuditStatus')}",
f"**Type of Securities Offered:** {get_text(summary_info, 'securitiesOfferedTypes')}",
f"**Is this a delayed or continuous offering?** {format_bool(get_text(summary_info, 'offerDelayedContinuousFlag'))}",
f"**Was or is the offering to take place within one year after qualification?** {format_bool(get_text(summary_info, 'offeringYearFlag'))}",
f"**Was or is the offering to commence within two days after qualification?** {format_bool(get_text(summary_info, 'offeringAfterQualifFlag'))}",
f"**Is this a best efforts offering?** {format_bool(get_text(summary_info, 'offeringBestEffortsFlag'))}",
f"**Was there any solicitation of interest?** {format_bool(get_text(summary_info, 'solicitationProposedOfferingFlag'))}",
f"**Are there any resale securities by affiliates of the issuer?** {format_bool(get_text(summary_info, 'resaleSecuritiesAffiliatesFlag'))}"
]
parts.extend(offering_flags_md)
offering_amounts_data = {
"Description": ["Number of securities offered", "Number of securities outstanding", "Price per security", "Issuer's aggregate offering price", "Aggregate offering price of securities held by security holders", "Aggregate price of securities offered concurrently", "Total aggregate offering price"],
"Amount": [
format_number(get_text(summary_info, 'securitiesOffered')),
format_number(get_text(summary_info, 'outstandingSecurities')),
format_dollar(get_text(summary_info, 'pricePerSecurity')),
format_dollar(get_text(summary_info, 'issuerAggregateOffering')),
format_dollar(get_text(summary_info, 'securityHolderAggegate')),
format_dollar(get_text(summary_info, 'qualificationOfferingAggregate')),
format_dollar(get_text(summary_info, 'totalAggregateOffering'))
]
}
parts.append("\n**Offering Amounts**")
parts.append(to_compact_markdown(pd.DataFrame(offering_amounts_data), index=False))
fees_data = {
"Service Provider": ["Auditor", "Legal", "Promoters"],
"Name": [get_text(summary_info, 'auditorServiceProviderName'), get_text(summary_info, 'legalServiceProviderName'), get_text(summary_info, 'promotersServiceProviderName')],
"Fees": [format_dollar(get_text(summary_info, 'auditorFees')), format_dollar(get_text(summary_info, 'legalFees')), format_dollar(get_text(summary_info, 'promotersFees'))]
}
parts.append("\n**Anticipated Fees**")
parts.append(to_compact_markdown(pd.DataFrame(fees_data), index=False))
parts.append(f"**Estimated Net Proceeds to the Issuer:** {format_dollar(get_text(summary_info, 'estimatedNetAmount'))}")
jur_info = form_data.find('juridictionSecuritiesOffered')
if jur_info:
parts.append("\n### Item 5. Jurisdictions in Which Securities are to be Offered")
is_none = get_text(jur_info, 'jurisdictionsOfSecOfferedNone').lower() == 'true'
if is_none:
parts.append("- All States and Territories")
else:
states = [j.text for j in jur_info.find_all('issueJuridicationSecuritiesOffering')]
parts.append(", ".join(states))
securities_issued = form_data.find('securitiesIssued')
unregistered_act = form_data.find('unregisteredSecuritiesAct')
if securities_issued:
parts.append("\n### Item 6. Unregistered Securities Issued or Sold Within One Year")
unreg_details = [
f"**Name of Such Issuer:** {get_text(securities_issued, 'securitiesIssuerName')}",
f"**Title of Securities Issued:** {get_text(securities_issued, 'securitiesIssuerTitle')}",
f"**Total Amount of Securities Issued:** {format_number(get_text(securities_issued, 'securitiesIssuedTotalAmount'))}",
f"**Amount of such securities sold by principal security holders:** {format_number(get_text(securities_issued, 'securitiesPrincipalHolderAmount'))}",
f"**Aggregate consideration:** {get_text(securities_issued, 'securitiesIssuedAggregateAmount')}",
f"**Basis for aggregate consideration:** {get_text(securities_issued, 'aggregateConsiderationBasis')}",
f"**Securities Act Exemption:** {get_text(unregistered_act, 'securitiesActExcemption')}"
]
parts.extend(unreg_details)
return "\n\n".join(parts)
def parse_abs_ee_xml(xml: BeautifulSoup) -> str:
"""
Parses the XML of a Form ABS-EE data file (EX-102) into a structured Markdown table.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(f'^{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_dollar(value_str: str) -> str:
if not value_str or value_str == "—": return "—"
try:
return f"${float(value_str):.2f}"
except (ValueError, TypeError):
return value_str
parts = ["## Exhibit 102: Asset Data File"]
assets = xml.find_all(re.compile(r'^assets$', re.I))
if not assets:
return ""
asset_records = []
for asset in assets:
prop = asset.find(re.compile(r'^property$', re.I))
if get_text(asset, 'assetNumber') == "—":
continue
record = {
"Asset Number": get_text(asset, 'assetNumber'),
"Originator": get_text(asset, 'originatorName'),
"Origination Date": get_text(asset, 'originationDate'),
"Original Loan Amount": format_dollar(get_text(asset, 'originalLoanAmount')),
"Maturity Date": get_text(asset, 'maturityDate'),
"Interest Rate (%)": get_text(asset, 'originalInterestRatePercentage'),
"Property Name": get_text(prop, 'propertyName') if prop else "—",
"Property Type": get_text(prop, 'propertyTypeCode') if prop else "—",
"City": get_text(prop, 'propertyCity') if prop else "—",
"State": get_text(prop, 'propertyState') if prop else "—",
}
asset_records.append(record)
if asset_records:
df = pd.DataFrame(asset_records)
parts.append(to_compact_markdown(df, index=False))
return "\n\n".join(parts)
def parse_abs_ee_comments_xml(xml: BeautifulSoup) -> str:
"""
Parses the XML of an ABS-EE Asset Related Document (EX-103) into Markdown.
"""
from bs4 import Comment
import textwrap
parts = ["## Exhibit 103: Asset Related Document"]
comments = xml.find_all(string=lambda text: isinstance(text, Comment))
if not comments:
return ""
for comment in comments:
comment_text = comment.strip()
if any(phrase in comment_text for phrase in ["Exhibit 103", "Ford Credit Auto Lease Trust", "Asset Related Document", "This asset related document provides narrative"]):
continue
if comment_text in ["Explanatory Narrative", "General Narrative", "Item-specific Narrative"]:
parts.append(f"### {comment_text}")
else:
item_match = re.match(r"Item\s+([0-9a-zA-Z\(\)]+)\s*\.\s*(.*)", comment_text, re.DOTALL)
if item_match:
item_num = item_match.group(1).strip()
item_text = item_match.group(2).strip()
parts.append(f"- **Item {item_num}:** {item_text}")
else:
parts.append(textwrap.fill(comment_text, width=100))
return "\n\n".join(parts)
def parse_schedule13d_xml(xml: BeautifulSoup) -> str:
"""
Parses a Schedule 13D or 13D/A filing into structured Markdown,
creating a detailed cover page table for each reporting person that
matches the visual layout of the original form.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(f'^{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
submission = xml.find('edgarSubmission')
if not submission:
return ""
cover_page = submission.find('coverPageHeader')
issuer_info = cover_page.find('issuerInfo')
form_data = submission.find('formData')
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## SCHEDULE 13D\n\n"
"### Under the Securities Exchange Act of 1934\n"
]
if (amendment_no := get_text(cover_page, 'amendmentNo')) != "—":
parts.append(f"**(Amendment No. {amendment_no})**\n")
parts.append(f"**{get_text(issuer_info, 'issuerName')}**")
parts.append(f"*(Name of Issuer)*\n")
parts.append(f"**{get_text(cover_page, 'securitiesClassTitle')}**")
parts.append(f"*(Title of Class of Securities)*\n")
parts.append(f"**{get_text(issuer_info, 'issuerCusip')}**")
parts.append(f"*(CUSIP Number)*\n")
auth_person_container = cover_page.find('authorizedPersons')
notification_info = auth_person_container.find('notificationInfo') if auth_person_container else None
if notification_info:
contact_block = [f"**{get_text(notification_info, 'personName')}**"]
address_node = notification_info.find(lambda tag: tag.name.endswith('personAddress'))
if address_node:
address_lines = []
if (street1 := get_text(address_node, 'street1')) != "—":
address_lines.append(street1)
if (street2 := get_text(address_node, 'street2')) != "—":
address_lines.append(street2)
city_state_zip = []
if (city := get_text(address_node, 'city')) != "—": city_state_zip.append(city)
if (state := get_text(address_node, 'stateOrCountry')) != "—": city_state_zip.append(state)
if (zip_code := get_text(address_node, 'zipCode')) != "—": city_state_zip.append(zip_code)
if city_state_zip:
address_lines.append(" ".join(city_state_zip))
contact_block.extend(address_lines)
if (phone_num := get_text(notification_info, 'personPhoneNum')) != "—":
contact_block.append(phone_num)
parts.append(" ".join(contact_block))
parts.append(f"*(Name, Address and Telephone Number of Person Authorized to Receive Notices and Communications)*\n")
parts.append(f"**{get_text(cover_page, 'dateOfEvent')}**")
parts.append(f"*(Date of Event Which Requires Filing of this Statement)*\n")
for i, person in enumerate(form_data.find_all('reportingPersonInfo'), 1):
cusip = get_text(issuer_info, 'issuerCusip')
name = get_text(person, 'reportingPersonName')
is_group_b = get_text(person, 'memberOfGroup') == 'b'
group_checkboxes = "[ ] (a) [x] (b)" if is_group_b else "[x] (a) [ ] (b)"
source_of_funds = get_text(person, 'fundType')
is_legal_proc = get_text(person, 'legalProceedings') == 'Y'
legal_proc_checkbox = '[x]' if is_legal_proc else '[ ]'
citizenship_code = get_text(person, 'citizenshipOrOrganization')
citizenship = SEC_COUNTRY_CODES.get(citizenship_code, citizenship_code)
sole_voting = f"{int(float(get_text(person, 'soleVotingPower'))):.2f}" if get_text(person, 'soleVotingPower') not in ["—", "0.00"] else "0.00"
shared_voting = f"{int(float(get_text(person, 'sharedVotingPower'))):.2f}" if get_text(person, 'sharedVotingPower') not in ["—", "0.00"] else "0.00"
sole_dispositive = f"{int(float(get_text(person, 'soleDispositivePower'))):.2f}" if get_text(person, 'soleDispositivePower') not in ["—", "0.00"] else "0.00"
shared_dispositive = f"{int(float(get_text(person, 'sharedDispositivePower'))):.2f}" if get_text(person, 'sharedDispositivePower') not in ["—", "0.00"] else "0.00"
aggregate_owned = f"{int(float(get_text(person, 'aggregateAmountOwned'))):.2f}" if get_text(person, 'aggregateAmountOwned') not in ["—", "0.00"] else "0.00"
is_exclude_shares = get_text(person, 'isAggregateExcludeShares') == 'Y'
exclude_shares_checkbox = '[x]' if is_exclude_shares else '[ ]'
percent_of_class = f"{get_text(person, 'percentOfClass')}%"
person_type = get_text(person, 'typeOfReportingPerson')
comment = get_text(person, 'commentContent')
shares_block_text = f"Number of Shares Beneficially Owned by Each Reporting Person With:##ROWSPAN_1##"
table_content = {
'name': f"Name of reporting person **{name}**##COLSPAN_2##",
'group': f"Check the appropriate box if a member of a Group (See Instructions) {group_checkboxes}##COLSPAN_3##",
'sec_use': "SEC use only##COLSPAN_4##",
'source_funds': f"Source of funds (See Instructions) **{source_of_funds}**##COLSPAN_5##",
'legal': f"Check if disclosure of legal proceedings is required pursuant to Items 2(d) or 2(e) {legal_proc_checkbox}##COLSPAN_6##",
'citizenship': f"Citizenship or place of organization **{citizenship}**##COLSPAN_7##",
'agg_owned': f"Aggregate amount beneficially owned by each reporting person **{aggregate_owned}**##COLSPAN_8##",
'exclude_shares': f"Check if the aggregate amount in Row (11) excludes certain shares (See Instructions) {exclude_shares_checkbox}##COLSPAN_9##",
'percent_class': f"Percent of class represented by amount in Row (11) **{percent_of_class}**##COLSPAN_10##",
'person_type': f"Type of Reporting Person (See Instructions) **{person_type}**##COLSPAN_11##"
}
cusip_header = f"| **CUSIP No.** | **{cusip}** |"
table_md = [
"| | | | |",
"|:--|:--|:--|:--|",
f"| 1 | {table_content['name']} | {table_content['name']} | |",
f"| 2 | {table_content['group']} | {table_content['group']} | |",
f"| 3 | {table_content['sec_use']} | {table_content['sec_use']} | |",
f"| 4 | {table_content['source_funds']} | {table_content['source_funds']} | |",
f"| 5 | {table_content['legal']} | {table_content['legal']} | |",
f"| 6 | {table_content['citizenship']} | {table_content['citizenship']} | |",
f"| {shares_block_text} | 7 | Sole Voting Power **{sole_voting}** |",
f"| {shares_block_text} | 8 | Shared Voting Power **{shared_voting}** |",
f"| {shares_block_text} | 9 | Sole Dispositive Power **{sole_dispositive}** |",
f"| {shares_block_text} | 10 | Shared Dispositive Power **{shared_dispositive}** |",
f"| 11 | {table_content['agg_owned']} | {table_content['agg_owned']} | |",
f"| 12 | {table_content['exclude_shares']} | {table_content['exclude_shares']} | |",
f"| 13 | {table_content['percent_class']} | {table_content['percent_class']} | |",
f"| 14 | {table_content['person_type']} | {table_content['person_type']} | |",
]
parts.append(f"\n{cusip_header}\n---\n" + "\n".join(table_md) + "\n---")
if comment != "—":
parts.append(f"\n**Comment for Reporting Person:** {comment}")
items = form_data.find('items1To7')
if items:
item1 = items.find('item1')
if item1:
parts.append("\n**Item 1. Security and Issuer**")
sec_title = get_text(item1, 'securityTitle')
if sec_title != "—":
parts.append(f"**(a) Title of Class of Securities:**\n{sec_title}")
issuer_name = get_text(item1, 'issuerName')
if issuer_name != "—":
parts.append(f"**(b) Name of Issuer:**\n{issuer_name}")
address_node = item1.find('issuerPrincipalAddress')
if address_node:
street1 = get_text(address_node, 'street1')
street2 = get_text(address_node, 'street2')
city = get_text(address_node, 'city')
state = get_text(address_node, 'stateOrCountry')
zip_code = get_text(address_node, 'zipCode')
address_parts = [p for p in [street1, street2, city, state, zip_code] if p and p != "—"]
if address_parts:
full_address = ", ".join(address_parts)
parts.append(f"**(c) Address of Issuer's Principal Executive Offices:**\n{full_address}")
if (comment := get_text(item1, 'commentText')) and comment != "—":
parts.append(f"\n{comment}")
item4 = items.find('item4')
if item4 and (purpose := get_text(item4, 'transactionPurpose')) and purpose != "—":
parts.append(f"\n**Item 4. Purpose of Transaction**\n\n{purpose}")
item5 = items.find('item5')
if item5:
parts.append("\n**Item 5. Interest in Securities of the Issuer**")
if (text_a := get_text(item5, 'percentageOfClassSecurities')):
parts.append(f"\n**(a)**\n{text_a}")
if (text_b := get_text(item5, 'numberOfShares')):
parts.append(f"\n**(b)**\n{text_b}")
if (transaction_text := get_text(item5, 'transactionDesc')):
parts.append(f"\n**(c)**\n{transaction_text}")
item6 = items.find('item6')
if item6 and (contracts := get_text(item6, 'contractDescription')) and contracts != "—":
parts.append(f"\n**Item 6. Contracts, Arrangements, Understandings or Relationships With Respect to Securities of the Issuer.**\n\n{contracts}")
parts.append("\n### SIGNATURE\n")
sig_info = form_data.find('signatureInfo')
if sig_info:
for signature in sig_info.find_all('signaturePerson'):
details = signature.find('signatureDetails')
parts.append(f"After reasonable inquiry and to the best of my knowledge and belief, I certify that the information set forth in this statement is true, complete and correct.\n")
parts.append(f"**Reporting Person:** {get_text(signature, 'signatureReportingPerson')}\n")
parts.append(f"**Signature:** {get_text(details, 'signature')}")
parts.append(f"**Name/Title:** {get_text(details, 'title')}")
parts.append(f"**Date:** {get_text(details, 'date')}\n")
return "\n\n".join(parts)
def parse_form1k_xml(xml: BeautifulSoup) -> str:
"""
Parses the XML of a Form 1-K filing into a structured Markdown document.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(f'^{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_bool(value_str: str, yes_char='Y', no_char='N') -> str:
if not value_str or value_str == "—": return "—"
s = value_str.strip().upper()
if s == yes_char or s == 'TRUE':
return "Yes"
if s == no_char or s == 'FALSE':
return "No"
return "—"
parts = ["## Form 1-K Filing Summary"]
header_data = xml.find('headerData')
form_data = xml.find('formData')
if not form_data or not header_data:
return ""
filer_info_node = header_data.find('filerInfo')
filer_creds_node = filer_info_node.find('filer').find('issuerCredentials') if filer_info_node else None
flags_node = filer_info_node.find('flags') if filer_info_node else None
parts.append("\n### Filer Information")
header_details = {
"Issuer CIK": get_text(filer_creds_node, 'cik'),
"Issuer CCC": get_text(filer_creds_node, 'ccc'),
"Is filer a shell company?": format_bool(get_text(flags_node, 'shellCompanyFlag'), no_char='N'),
"Is this filing by a successor company?": format_bool(get_text(flags_node, 'successorFilingFlag'), no_char='N'),
}
for key, val in header_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
parts.append("\n### Submission Contact Information")
submission_details = {
"Is this a LIVE or TEST Filing?": get_text(filer_info_node, 'liveTestFlag'),
"Period": get_text(header_data, 'reportingPeriod'),
}
for key, val in submission_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
item1 = form_data.find('item1')
item1_info = form_data.find('item1Info')
item2 = form_data.find('item2')
parts.append("\n### Item 1: Issuer Information (Tab 1 Notification)")
issuer_details = {
"Type of Report": get_text(item1, 'formIndication'),
"Fiscal Year End": get_text(item1, 'fiscalYearEnd'),
"Exact Name of Issuer": get_text(item1_info, 'issuerName'),
"CIK": get_text(item1_info, 'cik'),
"Jurisdiction of Incorporation": get_text(item1_info, 'jurisdictionOrganization'),
"IRS Number": get_text(item1_info, 'irsNum'),
"Address": f"{get_text(item1, 'street1')}, {get_text(item1, 'city')}, {get_text(item1, 'stateOrCountry')} {get_text(item1, 'zipCode')}",
"Issuer Phone Number": get_text(item1, 'phoneNumber'),
"Title of each class of securities issued pursuant to Regulation A": get_text(item1, 'issuedSecuritiesTitle'),
}
for key, val in issuer_details.items():
if val and "—" not in val:
parts.append(f"**{key}:** {val}")
parts.append("\n### Item 2: Ongoing Reporting Requirements")
is_compliant = get_text(item2, 'regArule257').lower() == 'true'
compliance_text = "Yes" if is_compliant else "No"
parts.append(f"**Is the issuer relying on the relief provided by Rule 257(d) for this filing?** {compliance_text}")
return "\n\n".join(parts)
def parse_form1z_xml(xml: BeautifulSoup) -> str:
"""
Parses the XML of a Form 1-Z (Exit Report) into a structured Markdown document.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(f'^{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM 1-Z\n\n"
"### EXIT REPORT UNDER REGULATION A\n"
]
header_data = xml.find('headerData')
form_data = xml.find('formData')
if not form_data or not header_data:
return ""
filer_info_node = header_data.find('filerInfo')
filer_creds_node = filer_info_node.find('filer').find('issuerCredentials') if filer_info_node else None
parts.append("### Filer Information")
header_details = {
"Issuer CIK": get_text(filer_creds_node, 'cik'),
"Issuer CCC": get_text(filer_creds_node, 'ccc'),
"Is this a LIVE or TEST Filing?": get_text(filer_info_node, 'liveTestFlag'),
}
for key, val in header_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
item1_node = form_data.find('item1')
parts.append("\n### Item 1: Issuer Information")
item1_details = {
"Name of Issuer": get_text(item1_node, 'issuerName'),
"Address": f"{get_text(item1_node, 'street1')}, {get_text(item1_node, 'city')}, {get_text(item1_node, 'stateOrCountry')} {get_text(item1_node, 'zipCode')}",
"Telephone Number": get_text(item1_node, 'phone'),
"Commission File Number": get_text(item1_node, 'commissionFileNumber'),
}
for key, val in item1_details.items():
if val and "—" not in val:
parts.append(f"**{key}:** {val}")
cert_node = form_data.find('certificationSuspension')
parts.append("\n### Part II: Certification of Suspension of Duty to File Reports")
cert_details = {
"Title of each class of securities": get_text(cert_node, 'securitiesClassTitle'),
"File Number for the Regulation A offering statement": get_text(cert_node, 'certificationFileNumber'),
"Approximate number of holders of record as of the certification date": get_text(cert_node, 'approxRecordHolders'),
}
for key, val in cert_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
sig_node = form_data.find('signatureTab')
parts.append("\n### Signature")
parts.append("Pursuant to the requirements of Regulation A, the issuer has duly caused this report to be signed on its behalf by the undersigned, thereunto duly authorized.")
sig_details = {
"CIK": get_text(sig_node, 'cik'),
"Issuer": get_text(sig_node, 'regulationIssuerName1'),
"By (Signature)": get_text(sig_node, 'signatureBy'),
"Title": get_text(sig_node, 'title'),
"Date": get_text(sig_node, 'date'),
}
for key, val in sig_details.items():
if val and val != "—":
parts.append(f"\n**{key}:** {val}")
return "\n\n".join(parts)
def parse_form_ta1_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Form TA-1 or TA-1/A into a structured Markdown document,
handling variations in the XML schema over time and capturing all conditional details.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(rf'^(?:\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_bool(value_str: str) -> str:
s = value_str.strip().upper()
if s == 'Y' or s == 'TRUE': return "Yes"
if s == 'N' or s == 'FALSE': return "No"
return "—"
def format_address(addr_node) -> str:
if not addr_node: return "—"
parts = [
get_text(addr_node, 'street1'),
get_text(addr_node, 'street2'),
get_text(addr_node, 'city'),
get_text(addr_node, 'stateOrCountry'),
get_text(addr_node, 'zipCode'),
]
return ", ".join(p for p in parts if p and p != "—")
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM TA-1\n\n"
"### UNIFORM FORM OF APPLICATION FOR REGISTRATION AS A TRANSFER AGENT\n"
]
submission_root = xml.find('edgarSubmission')
if not submission_root:
return ""
search_context = submission_root.find('formData') or submission_root
registrant = search_context.find('registrant')
independent = search_context.find('independentRegistrant')
disciplinary = search_context.find('disciplinaryHistory') or search_context
signature = search_context.find('signatureData') or search_context.find('signature')
if not registrant:
return ""
parts.append("### Registrant Information")
reg_details = {
"Appropriate regulatory agency": get_text(search_context, 'regulatoryAgency'),
"Full name of Registrant": get_text(registrant, 'entityName'),
"FINS Number": get_text(registrant, 'finsNumber'),
"Address of principal office where transfer agent activities are performed": format_address(registrant.find('principalOfficeAddress')),
}
for key, val in reg_details.items():
if val and "—" not in val:
parts.append(f"**{key}:** {val}")
is_mailing_different = format_bool(get_text(registrant, 'differentMailingAddress'))
parts.append(f"**Is mailing address different from principal office address?:** {is_mailing_different}")
if is_mailing_different == 'Yes':
mailing_address = format_address(registrant.find('mailingAddress'))
if mailing_address != "—":
parts.append(f"**Mailing Address:** {mailing_address}")
parts.append(f"**Telephone Number:** {get_text(registrant, 'telephoneNumber')}")
conducts_other_business = format_bool(get_text(registrant, 'conductBusinessInOtherLocations'))
parts.append(f"**Does registrant conduct business in other locations?:** {conducts_other_business}")
if conducts_other_business == 'Yes':
other_locations = registrant.find_all('otherBusinessLocation')
if other_locations:
for i, loc_node in enumerate(other_locations, 1):
parts.append(f"**Other Business Location Address {i}:** {format_address(loc_node)}")
remaining_reg_details = {
"Is registrant a self-transfer agent?": format_bool(get_text(registrant, 'selfTransferAgent')),
"Does registrant engage a service company to perform any of its transfer agent functions?": format_bool(get_text(registrant, 'engagedServiceCompany')),
}
for key, val in remaining_reg_details.items():
if val and "—" not in val:
parts.append(f"**{key}:** {val}")
is_engaged_as_service_co = format_bool(get_text(registrant, 'engagedAsServiceCompany'))
parts.append(f"**Is registrant engaged as a service company by a named transfer agent?:** {is_engaged_as_service_co}")
if is_engaged_as_service_co == 'Yes':
service_co_details = registrant.find_all('asServiceCompany')
for i, co in enumerate(service_co_details, 1):
parts.append(f"\n**Service Company Arrangement {i}:**")
parts.append(f"- **Name:** {get_text(co, 'entityName')}")
parts.append(f"- **File Number:** {get_text(co, 'fileNumber')}")
parts.append(f"- **Address:** {format_address(co.find('asServiceCompanyAddress'))}")
parts.append("\n### Ownership and Control Information")
registrant_type_node = independent or registrant
other_control_node = independent or search_context
ind_details = {
"Registrant Type": get_text(registrant_type_node, 'registrantType'),
"Description (if Other)": get_text(registrant_type_node, 'registrantTypeDescription'),
}
for key, val in ind_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
other_control_mgmnt_node = other_control_node.find('otherControlManagement')
has_other_control = format_bool(get_text(other_control_mgmnt_node, 'otherEntity'))
parts.append(f"**Does any other person control the management or policies of the applicant?:** {has_other_control}")
if has_other_control == 'Yes' and other_control_mgmnt_node:
details_node = other_control_mgmnt_node.find('otherControlManagementDetails')
if details_node:
parts.append(f"- **Controlling Entity Name:** {get_text(details_node, 'entityName')}")
parts.append(f"- **Agreement Description:** {get_text(details_node, 'agreementDescription')}")
parts.append(f"**Does any other person directly or indirectly finance the applicant?:** {format_bool(get_text(other_control_node.find('otherControlFinance'), 'otherEntity'))}")
control_persons = (independent and independent.find_all('soleProprietorshipOtherData')) or \
search_context.find_all('corporationPartnershipData')
if control_persons:
parts.append("\n**Control Affiliates Information:**")
owner_data = []
for person in control_persons:
owner_data.append({
"Entity Name": get_text(person, 'entityName'),
"Relationship Start Date": get_text(person, 'relationshipStartDate'),
"Title or Status": get_text(person, 'titleOrStatus'),
"Ownership Code": get_text(person, 'ownershipCode'),
"Control Person": format_bool(get_text(person, 'controlPerson')),
"Authority Description": get_text(person, 'authorityDescription'),
"Relationship End Date": get_text(person, 'relationshipEndDate')
})
df = pd.DataFrame(owner_data)
df.dropna(axis=1, how='all', inplace=True)
df = df.loc[:, (df != '—').any(axis=0)]
parts.append(to_compact_markdown(df, index=False))
parts.append("\n### Disciplinary History")
DISCIPLINARY_QUESTIONS = {
'felonyOrMisdemeanor': "Convicted/plead guilty to any felony or investment-related misdemeanor?",
'otherFelony': "Convicted/plead guilty to any other felony?",
'enjoinedInvestmentRelatedActivity': "Enjoined in connection with any investment-related activity?",
'violationOfInvestmentRelatedRegulation': "Found to have violated any investment-related statute or regulation?",
'falseStatementOrOmission': "Made a false statement or omission in a filing with the SEC?",
'violationOfRegulations': "Found to have violated SRO rules or failed to supervise?",
'authorizationDeniedOrSuspended': "Had authorization to act as a financial professional denied, suspended, or revoked?",
'registrationDeniedOrSuspended': "Had a registration as a financial professional denied, suspended, or revoked?",
'fsrFalseStatementOrOmission': "Federal/State agency found a false statement or omission?",
'fsrViolationOfInvestmentRelatedRegulation': "Federal/State agency found a violation of investment-related regulations?",
'fsrAuthorizationDeniedOrSuspended': "Federal/State agency denied, suspended, or revoked authorization?",
'fsrFoundOrderAgainstApplicant': "Federal/State agency entered an order against the applicant?",
'fsrRegistrationDeniedOrSuspended': "Federal/State agency denied, suspended, or revoked registration?",
'fsrRevokedSuspendedLicense': "Federal/State agency revoked or suspended a license?",
'sraFalseStatementOrOmission': "SRO found a false statement or omission?",
'sraViolationOfRules': "SRO found a violation of its rules?",
'sraAuthorizationDeniedOrSuspended': "SRO denied, suspended, or revoked authorization?",
'sraRevokedSuspendedLicense': "SRO revoked or suspended a license?",
'foreignAgency': "Subject of an order or finding by a foreign financial regulatory authority?",
'subjectOfProceedings': "Currently the subject of any proceeding that could result in a 'yes' answer to any of the above?",
'revokedBond': "Had a bond revoked for disorderly conduct, fraud, or dishonesty?",
'unsatisfiedJudgementsOrLiens': "Have any unsatisfied judgments or liens against them?"
}
if disciplinary:
for tag, question in DISCIPLINARY_QUESTIONS.items():
question_node = disciplinary.find(lambda t: t.name.lower() == tag.lower())
if question_node:
answer = format_bool(get_text(question_node, 'involved'))
parts.append(f"\n- **{question}:** {answer}")
if answer == 'Yes':
details_tag_name = tag + "Details"
details_nodes = question_node.find_all(lambda t: t.name.lower() == details_tag_name.lower())
for i, detail_node in enumerate(details_nodes, 1):
parts.append(f" - **Details #{i}:**")
detail_data = {
"Entity Name": get_text(detail_node, 'entityName'),
"Action Title": get_text(detail_node, 'actionTitle'),
"Action Date": get_text(detail_node, 'actionDate'),
"Court/Body Name and Location": get_text(detail_node, 'courtOrBodyNameAndLocation'),
"Action Description": get_text(detail_node, 'actionDescription'),
"Disposition": get_text(detail_node, 'dispositionOfProceeding')
}
for key, val in detail_data.items():
if val and val != "—":
parts.append(f" - **{key}:** {val}")
if signature:
parts.append("\n### Signature")
sig_details = {
"Signature": get_text(signature, 'signatureName'),
"Title": get_text(signature, 'signatureTitle'),
"Date": get_text(signature, 'signatureDate'),
"Phone Number": get_text(signature, 'signaturePhoneNumber'),
}
for key, val in sig_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
return "\n\n".join(parts)
def parse_form_mai_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Form MA-I or MA-I/A into a structured Markdown document,
capturing all applicant, employment, and disciplinary history details.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(rf'^(?:\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_bool(value_str: str) -> str:
s = value_str.strip().upper()
if s == 'Y' or s == 'TRUE': return "Yes"
if s == 'N' or s == 'FALSE': return "No"
return "—"
def format_name(name_node) -> str:
if not name_node: return "—"
first = get_text(name_node, 'firstName')
middle = get_text(name_node, 'middleName')
last = get_text(name_node, 'lastName')
return " ".join(p for p in [first, middle, last] if p and p != "—")
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM MA-I\n\n"
"### APPLICATION FOR MUNICIPAL ADVISOR REGISTRATION OF A NATURAL PERSON\n"
]
header = xml.find('headerData')
form_data = xml.find('formData')
if not form_data or not header:
return ""
filer_info = header.find('filerInfo')
contact = filer_info.find('contact') if filer_info else None
filer_node = header.find(re.compile(r'^(?:\w+:)?filer$', re.I))
parts.append("### Filer and Contact Information")
header_details = {
"Filer CIK": get_text(filer_node, 'filerId'),
"Contact Name": get_text(contact, 'name'),
"Contact Phone": get_text(contact, 'phoneNumber'),
"Contact Email": get_text(filer_info, 'contactEmail'),
"Notification Emails": ", ".join(n.text for n in header.find_all('internetNotificationAddress')),
}
for key, val in header_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
parts.append("\n### Applicant Information")
applicant_details = {
"Is this an amendment?": format_bool(get_text(form_data, 'isAmendment')),
"Is applicant a natural person?": format_bool(get_text(form_data, 'isIndividual')),
"Full Name of Applicant": format_name(form_data.find('applicantName')),
"Applicant CRD Number": get_text(form_data, 'applicantCrdNum'),
"Associated with more than one advisory firm?": format_bool(get_text(form_data, 'hasMoreThanOneAdvisoryFirms')),
"Number of advisory firms": get_text(form_data, 'noOfAdvisoryFirms'),
}
for key, val in applicant_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
offices = form_data.find_all('municipalAdvisorOffice')
if offices:
parts.append("\n### Municipal Advisor Firm Information")
for i, office in enumerate(offices, 1):
firm = office.find('municipalFirm')
parts.append(f"\n**Firm #{i}**")
firm_details = {
"Firm Name": get_text(firm, 'municipalFirmName'),
"Firm CIK": get_text(firm.find('municipalFiler'), 'filerId'),
"Employment Start Date": get_text(firm, 'recentEmploymentCommencedDate'),
"Independent Contractor Relationship?": format_bool(get_text(firm, 'isIndependentRelatioship')),
}
for key, val in firm_details.items():
if val and val != "—":
parts.append(f"- **{key}:** {val}")
reg_info = office.find('maRegistration')
if reg_info:
parts.append("\n **Firm's Registration Information:**")
reg_details = {
"Form MA Filing Date": get_text(reg_info.find('hasFiled'), 'filingDate'),
"EDGAR CIK No.": get_text(reg_info.find('hasFiled'), 'cik'),
}
for key, val in reg_details.items():
if val and val != "—":
parts.append(f" - **{key}:** {val}")
advisor_offices = office.find_all('advisorOffice')
if advisor_offices:
parts.append("\n **Office Location Information:**")
for j, adv_office in enumerate(advisor_offices, 1):
location_types = ", ".join(loc.text for loc in adv_office.find_all('locationInfo'))
parts.append(f"\n - **Office #{j} ({location_types})**")
addr_info_node = adv_office.find('addressInfo')
addr = addr_info_node.find('address') if addr_info_node else None
office_details = {
"Start Date": get_text(adv_office, 'startDate'),
"Address": f"{get_text(addr, 'street1')}, {get_text(addr, 'city')}, {get_text(addr, 'stateOrCountry')} {get_text(addr, 'zipCode')}"
}
for key, val in office_details.items():
if val and val.replace(",", "").replace(" ", "") != "—":
parts.append(f" - **{key}:** {val}")
other_names = form_data.find_all('otherName')
if other_names:
parts.append("\n### Other Names Used")
for name in other_names:
parts.append(f"- {format_name(name)}")
emp_history = form_data.find('employmentHistory')
if emp_history:
parts.append("\n### Employment History")
current = emp_history.find('currentEmployer')
parts.append("\n**Current Employer**")
current_addr_node = current.find('addressInfo') if current else None
addr_parts = [
get_text(current_addr_node, 'city'),
get_text(current_addr_node, 'stateOrCountry'),
get_text(current_addr_node, 'zipCode')
]
current_address = ", ".join(p for p in addr_parts if p and p != "—")
current_emp_details = {
"Start Date": get_text(current, 'startDate'),
"Employer Name": get_text(current, 'name'),
"Address": current_address,
"Position": get_text(current, 'positionDescription'),
"Related to Municipal Advisor business?": format_bool(get_text(current, 'isRelatedToMunicipalAdvisor')),
"Investment Related?": format_bool(get_text(current, 'isRelatedToInvestment')),
}
for key, val in current_emp_details.items():
if val and val != "—":
parts.append(f"- **{key}:** {val}")
priors = emp_history.find_all('priorEmployer')
if priors:
parts.append("\n**Prior Employers**")
for prior in priors:
parts.append(f"\n- **{get_text(prior, 'name')}** ({get_text(prior, 'startDate')} - {get_text(prior, 'endDate')})")
prior_addr_node = prior.find('addressInfo')
prior_addr_parts = [
get_text(prior_addr_node, 'city'),
get_text(prior_addr_node, 'stateOrCountry'),
get_text(prior_addr_node, 'zipCode')
]
prior_address = ", ".join(p for p in prior_addr_parts if p and p != "—")
if prior_address:
parts.append(f" - **Address:** {prior_address}")
parts.append(f" - **Position:** {get_text(prior, 'positionDescription')}")
parts.append(f"\n**Engaged in other business?** {format_bool(get_text(form_data, 'isEngagedInOtherBusiness'))}")
other_businesses = form_data.find_all('otherBusiness')
if other_businesses:
parts.append("\n### Other Business Information")
for i, business in enumerate(other_businesses, 1):
addr_node = business.find('addressInfo')
address_str = ", ".join(p for p in [get_text(addr_node, 'street1'), get_text(addr_node, 'city'), get_text(addr_node, 'stateOrCountry'), get_text(addr_node, 'zipCode')] if p and p != "—")
parts.append(f"\n**Business #{i}**")
business_details = {
"Start Date": get_text(business, 'startDate'),
"Name": get_text(business, 'name'),
"Address": address_str,
"Related to Municipal Advisor business?": format_bool(get_text(business, 'isRelatedToMunicipalAdvisor')),
"Investment Related?": format_bool(get_text(business, 'isRelatedToInvestment')),
"Nature of Business": get_text(business, 'natureOfBusiness'),
"Position": get_text(business, 'positionDescription'),
"Approx. Hours/Month": get_text(business, 'approximateHoursOrMonths'),
"Duties": get_text(business, 'dutiesDescription'),
}
for key, val in business_details.items():
if val and val != "—":
parts.append(f"- **{key}:** {val}")
questions_node = form_data.find('disclosureQuestions')
if questions_node:
parts.append("\n### Disclosure Questions")
DISCLOSURE_QUESTION_MAP = {
"Item 6A: Criminal Disclosure": {
"criminalDisclosure": {
"isConvictedOfFelony": "(1)(a) Has the individual ever been convicted of any felony, or pled guilty or nolo contendere to any charge of a felony in a domestic, foreign, or military court?",
"isChargedWithFelony": "(1)(b) Has the individual ever been charged with any felony?",
"isOrgConvictedOfFelony": "(2)(a) Based upon activities that occurred while the individual exercised control over it, has an organization ever been convicted of any felony or pled guilty or nolo contendere in a domestic or foreign court to any charge of a felony?",
"isOrgChargedWithFelony": "(2)(b) Based upon activities that occurred while the individual exercised control over it, has an organization ever been charged with any felony?"
}
},
"Item 6B: Criminal Disclosure (Misdemeanor)": {
"criminalDisclosure": {
"isConvictedOfMisdemeanor": "(1)(a) Has the individual ever been convicted of any misdemeanor or pled guilty or nolo contendere to any charge of a misdemeanor involving: municipal advisory activities or a municipal advisor-related or investment-related business or any fraud, false statements or omissions, wrongful taking of property, bribery, perjury, forgery, counterfeiting, extortion, or a conspiracy to commit any of these offenses?",
"isChargedWithMisdemeanor": "(1)(b) Has the individual ever been charged with any misdemeanor of the kind described in 6B(1)(a)?",
"isOrgConvictedOfMisdemeanor": "(2)(a) Based upon activities that occurred while the individual exercised control over it, has an organization ever been convicted of any misdemeanor or pled guilty or nolo contendere to any charge of a misdemeanor of the kind specified in 6B(1)(a)?",
"isOrgChargedWithMisdemeanor": "(2)(b) Based upon activities that occurred while the individual exercised control over it, has an organization ever been charged with any misdemeanor of the kind specified in 6B(1)(a)?"
}
},
"Item 6C: Regulatory Action Disclosure (SEC or CFTC)": {
"regulatoryDisclosure": {
"isMadeFalseStatement": "(1) Has the SEC or the CFTC ever found the individual to have made a false statement or omission?",
"isViolatedRegulation": "(2) Has the SEC or the CFTC ever found the individual to have been involved in a violation of any SEC or CFTC regulation or statute?",
"isViolatedSecurityAct": "(3) Has the SEC or the CFTC ever found the individual to have been a cause of a denial, suspension, revocation, or restriction of the authorization of a municipal advisor-related business or investment-related business to operate?",
"isOrderAgainst": "(4) Has the SEC or the CFTC ever entered an order against the individual in connection with municipal advisor-related or investment-related activity?",
"isImposedPenalty": "(5) Has the SEC or the CFTC ever imposed a civil money penalty on the individual, or ordered the individual to cease and desist from any activity?",
"isWillFullyAided": "(6) Has the SEC or the CFTC ever found the individual to have willfully violated any provision of the specified Acts, or any rule or regulation under any of such Acts, or any of the rules of the MSRB, or found the individual to have been unable to comply with any provision of such Acts, rules or regulations?",
"isFailedToSupervise": "(7) Has the SEC or the CFTC ever found the individual to have willfully aided, abetted, counseled, commanded, induced, or procured the violation by any person of any provision of the specified Acts, or any rule or regulation under any of such Acts, or any of the rules of the MSRB?",
"isFailedResonably": "(8) Has the SEC or the CFTC ever found the individual to have failed reasonably to supervise another person subject to his or her supervision, with a view to preventing the violation of any provision of the specified Acts, or any rule or regulation under any of such Acts, or any of the rules of the MSRB?"
}
},
"Item 6G: Investigation Disclosure": {
"investigationDisclosure": {
"isInvestigated": "(1) Has the individual been notified, in writing, that he or she is currently the subject of any regulatory complaint or proceeding that could result in a 'Yes' answer to any part of 6C, D, or E?"
}
},
"Item 6H: Civil Judicial Action Disclosure": {
"civilDisclosure": {
"isEnjoined": "(1)(a) Has any domestic or foreign court ever enjoined the individual in connection with any municipal advisor-related or investment-related activity?",
"isFoundInViolationOfRegulation": "(1)(b) Has any domestic or foreign court ever found that the individual was involved in a violation of any municipal advisor-related or investment-related statute(s) or regulation(s)?",
"isDismissed": "(1)(c) Has any domestic or foreign court ever dismissed, pursuant to a settlement agreement, a municipal advisor-related or investment-related civil action brought against the individual by a domestic jurisdiction or foreign financial regulatory authority?",
"isNamedInCivilProceeding": "(2) Is the individual named in any currently pending civil proceeding that could result in a 'Yes' answer to any part of 6H(1)?"
}
},
"Item 6I: Customer Complaint/Arbitration/Civil Litigation Disclosure": {
"complaintDisclosure": {
"isComplaintSettled": "(1)(a) Has the individual ever been the subject of a municipal advisor-related or investment-related, customer-initiated written or oral complaint that alleged that he or she was involved in fraud, false statements, omissions, theft, embezzlement, wrongful taking of property, bribery, forgery, counterfeiting, extortion, or dishonest, unfair or unethical practices, which was settled?",
"isComplaintPending": "(1)(b) Has the individual ever been the subject of a municipal advisor-related or investment-related, customer-initiated written or oral complaint that alleged that he or she was involved in fraud, false statements, omissions, theft, embezzlement, wrongful taking of property, bribery, forgery, counterfeiting, extortion, or dishonest, unfair or unethical practices, which is still pending?",
"isFraudCasePending": "(2)(a) Has the individual ever been the subject of a municipal advisor-related or investment-related, customer-initiated arbitration or civil litigation that alleged that he or she was involved in fraud, false statements, omissions, theft, embezzlement, wrongful taking of property, bribery, forgery, counterfeiting, extortion, or dishonest, unfair or unethical practices, which is still pending?",
"isFraudCaseResultedAward": "(2)(b) Has the individual ever been the subject of a municipal advisor-related or investment-related, customer-initiated arbitration or civil litigation that alleged that he or she was involved in fraud, false statements, omissions, theft, embezzlement, wrongful taking of property, bribery, forgery, counterfeiting, extortion, or dishonest, unfair or unethical practices, which resulted in an arbitration award or civil judgment against the individual, regardless of amount?",
"isFraudCaseSettled": "(2)(c) Has the individual ever been the subject of a municipal advisor-related or investment-related, customer-initiated arbitration or civil litigation that alleged that he or she was involved in fraud, false statements, omissions, theft, embezzlement, wrongful taking of property, bribery, forgery, counterfeiting, extortion, or dishonest, unfair or unethical practices, which was settled?"
}
},
"Item 6J: Termination Disclosure": {
"terminationDisclosure": {
"isViloatedIndustryStandard": "(1) Has the individual ever voluntarily resigned, been discharged or permitted to resign after allegations were made that accused him or her of violating municipal advisor-related or investment-related statutes, regulations, rules, or industry standards of conduct?",
"isInvolvedInFraud": "(2) Has the individual ever voluntarily resigned, been discharged or permitted to resign after allegations were made that accused him or her of fraud or the wrongful taking of property?",
"isFailedToSupervise": "(3) Has the individual ever voluntarily resigned, been discharged or permitted to resign after allegations were made that accused him or her of failure to supervise in connection with municipal advisor-related or investment-related statutes, regulations, rules or industry standards of conduct?"
}
},
"Item 6K: Financial Disclosure": {
"financialDisclosure": {
"isCompromised": "(1) Within the past 10 years, has the individual made a compromise with creditors, filed a bankruptcy petition or been the subject of an involuntary bankruptcy petition?",
"isBankruptcyPetition": "(2) Based upon events that occurred while the individual exercised control over it, has an organization made a compromise with creditors, filed a bankruptcy petition or been the subject of an involuntary bankruptcy petition?",
"isTrusteeApointed": "(3) Based upon events that occurred while the individual exercised control over it, has a broker or dealer been the subject of an involuntary bankruptcy petition, had a trustee appointed, or had a direct payment procedure initiated under the Securities Investor Protection Act?",
"isBondRevoked": "(4) Has a bonding company ever denied, paid out on, or revoked a bond for the individual?"
}
},
"Item 6M: Judgment/Lien Disclosure": {
"judgmentLienDisclosure": {
"isLienAgainst": "Are there currently any unsatisfied judgments or liens against the individual?"
}
}
}
for item_title, sections in DISCLOSURE_QUESTION_MAP.items():
parts.append(f"\n**{item_title}**")
for section_tag, questions in sections.items():
section_node = questions_node.find(section_tag)
if section_node:
for question_tag, question_text in questions.items():
answer = format_bool(get_text(section_node, question_tag))
parts.append(f"- {question_text} **{answer}**")
sig_info = form_data.find('signatureInfo')
if sig_info:
parts.append("\n### Signature")
sig = sig_info.find('signature')
sig_details = {
"Date Signed": get_text(sig, 'dateSigned'),
"Signature": get_text(sig, 'signature'),
"Title": get_text(sig, 'title'),
}
for key, val in sig_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
return "\n\n".join(parts)
def parse_form_x17a5_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Form X-17A-5 (FOCUS Report) into a structured Markdown document.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(f'^{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_bool(value_str: str) -> str:
s = value_str.strip().upper()
if s == 'Y' or s == 'TRUE': return "Yes"
if s == 'N' or s == 'FALSE': return "No"
return "—"
def format_address(addr_node) -> str:
if not addr_node: return "—"
parts = [
get_text(addr_node, 'street1'),
get_text(addr_node, 'street2'),
get_text(addr_node, 'city'),
get_text(addr_node, 'stateOrCountry'),
get_text(addr_node, 'zipCode'),
]
return ", ".join(p for p in parts if p and p != "—")
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM X-17A-5\n\n"
"### ANNUAL AUDITED REPORT\n"
]
header_data = xml.find('headerData')
form_data = xml.find('formData')
if not form_data or not header_data:
return ""
filer_info = header_data.find('filerInfo')
filer_creds = filer_info.find('filerCredentials') if filer_info else None
flags = filer_info.find('flags') if filer_info else None
parts.append("### Filer Information")
filer_details = {
"Filer CIK": get_text(filer_creds, 'filerCik'),
"Filer CCC": get_text(filer_creds, 'filerCcc'),
"Is this a LIVE or TEST filing?": get_text(filer_info, 'liveTestFlag'),
"Would you like a Return Copy?": format_bool(get_text(flags, 'returnCopyFlag')),
}
for key, val in filer_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
submission_info = form_data.find('submissionInformation')
parts.append("\n### Submission Information")
sub_details = {
"Report Period Begin Date": get_text(submission_info, 'periodBegin'),
"Report Period End Date": get_text(submission_info, 'periodEnd'),
"Type of Registrant": get_text(submission_info, 'typeOfRegistrant'),
"Any material weaknesses identified?": format_bool(get_text(submission_info, 'materialWeakness')),
"Amendment Description": get_text(submission_info, 'amendmentDescription'),
}
for key, val in sub_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
registrant_info = form_data.find('registrantIdentification')
parts.append("\n### Registrant Identification")
reg_details = {
"Name of Broker-Dealer": get_text(registrant_info, 'brokerDealerName'),
"Business Address": format_address(registrant_info.find('businessAddress')),
"Contact Person": get_text(registrant_info, 'contactPersonName'),
"Contact Phone": get_text(registrant_info, 'contactPersonPhoneNumber'),
}
for key, val in reg_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
accountant_info = form_data.find('accountantIdentification')
parts.append("\n### Independent Public Accountant Identification")
acc_details = {
"Accountant Name": get_text(accountant_info, 'accountantName'),
"Accountant Address": format_address(accountant_info.find('accountantAddress')),
"Accountant Type": get_text(accountant_info, 'accountantType'),
}
for key, val in acc_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
oath_info = form_data.find('oathSignature')
parts.append("\n### OATH OR AFFIRMATION")
parts.append(f"I, **{get_text(oath_info, 'signPersonName')}**, swear (or affirm) that, to the best of my knowledge and belief, the accompanying financial statements and supporting schedules pertaining to the firm of **{get_text(oath_info, 'entityName')}**, as of **{get_text(oath_info, 'signDate')}**, are true and correct.")
oath_details = {
"Signature": get_text(oath_info, 'signature'),
"Title": get_text(oath_info, 'oathTitle'),
"Notarized": format_bool(get_text(oath_info, 'confirmNotarizedFlag')),
}
for key, val in oath_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
return "\n\n".join(parts)
def parse_form_cfportal_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Form CFPORTAL or CFPORTAL/A into a structured Markdown document.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(rf'^(?:\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_bool(value_str: str) -> str:
s = value_str.strip().upper()
if s == 'Y' or s == 'TRUE': return "Yes"
if s == 'N' or s == 'FALSE': return "No"
return "—"
def format_address(addr_node) -> str:
if not addr_node: return "—"
parts = [
get_text(addr_node, 'street1'),
get_text(addr_node, 'street2'),
get_text(addr_node, 'city'),
get_text(addr_node, 'stateOrCountry'),
get_text(addr_node, 'zipCode'),
]
return ", ".join(p for p in parts if p and p != "—")
def format_name(name_node) -> str:
if not name_node: return "—"
first = get_text(name_node, 'firstName')
middle = get_text(name_node, 'middleName')
last = get_text(name_node, 'lastName')
return " ".join(p for p in [first, middle, last] if p and p != "—")
OWNERSHIP_CODES = {
'NA': "NA - less than 5%", 'A': "A - 5% but less than 10%",
'B': "B - 10% but less than 25%", 'C': "C - 25% but less than 50%",
'D': "D - 50% but less than 75%", 'E': "E - 75% or more",
'G': "G - Other (general partner, trustee, or elected member)"
}
ENTITY_TYPE_CODES = {
'DE': "DE (Domestic Entity)",
'FE': "FE (Foreign Entity)",
'NP': "NP (Natural Person)"
}
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM CFPORTAL\n\n"
"### FUNDING PORTAL REGISTRATION AND REPORTING\n"
]
header = xml.find('headerData')
form_data = xml.find('formData')
if not form_data or not header:
return ""
filer_info = header.find('filerInfo')
filer_creds = filer_info.find('filerCredentials') if filer_info else None
contact_info = filer_info.find('contact') if filer_info else None
parts.append("### Filer Information")
filer_details = {
"Filer CIK": get_text(filer_creds, 'filerCik'),
"Filer CCC": get_text(filer_creds, 'filerCcc'),
"Is this a LIVE or TEST Filing?": get_text(filer_info, 'liveTestFlag'),
}
for key, val in filer_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
if contact_info:
parts.append("\n### Submission Contact Information")
contact_details = {
"Name": get_text(contact_info, 'name'),
"Phone Number": get_text(contact_info, 'phoneNumber'),
"E-mail Address": get_text(filer_info, 'contactEmail'),
}
for key, val in contact_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
ident_info = form_data.find('identifyingInformation')
parts.append("\n### Identifying Information")
ident_details = {
"Full Name of Funding Portal": get_text(ident_info, 'nameOfPortal'),
"Amendment Explanation": get_text(ident_info, 'amendmentExplanation'),
"Other Business Name(s)": get_text(ident_info.find('otherNamesAndWebsiteUrls'), 'otherNamesUsedPortal'),
"Website URL(s)": get_text(ident_info.find('otherNamesAndWebsiteUrls'), 'webSiteOfPortal'),
"Previous Website URL(s)": get_text(ident_info.find('prevNamesAndWebsiteUrls'), 'prevWebSiteUrls'),
"IRS Employer ID No.": get_text(ident_info, 'irsEmployerIdNumber'),
"Portal's Main Street Address": format_address(ident_info.find('portalAddress')),
}
for key, val in ident_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
mailing_address_is_different = get_text(ident_info, 'mailingAddressDifferent').lower() == 'true'
if not mailing_address_is_different:
parts.append("**Mailing Address:** Same as main address")
else:
parts.append(f"**Mailing Address:** {format_address(ident_info.find('portalMailingAddress'))}")
ident_details_part2 = {
"Contact Telephone": get_text(ident_info.find('portalContact'), 'portalContactPhone'),
"Contact E-mail": get_text(ident_info.find('portalContact'), 'portalContactEmail'),
"Contact Employee": f"{format_name(ident_info.find('contactEmployeeName'))}, {get_text(ident_info, 'contactEmployeeTitle')}",
"Fiscal Year End": get_text(ident_info, 'fiscalYearEnd'),
"Previously registered with Commission?": format_bool(get_text(ident_info, 'anyPreviousRegistrations')),
"Registered with a foreign financial authority?": format_bool(get_text(ident_info, 'anyForeignRegistrations')),
}
for key, val in ident_details_part2.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
org_info = form_data.find('formOfOrganization')
parts.append("\n### Form of Organization")
org_details = {
"Legal Status": get_text(org_info, 'legalStatusForm'),
"State/Country of Formation": get_text(org_info, 'jurisdictionOrganization'),
"Date of Formation": get_text(org_info, 'dateIncorporation'),
}
for key, val in org_details.items():
parts.append(f"**{key}:** {val}")
succ_info = form_data.find('successions')
if succ_info:
parts.append("\n### Successions")
parts.append(f"**Is the applicant succeeding to the business of a currently registered funding portal?** {format_bool(get_text(succ_info, 'isSucceedingBusiness'))}")
if get_text(succ_info, 'isSucceedingBusiness').upper() == 'Y':
acq_info = succ_info.find('acquiredHistoryDetails')
acq_details = {
"Name of Acquired Funding Portal": get_text(acq_info, 'acquiredFundingPortal'),
"Acquired Portal's SEC File No.": get_text(acq_info, 'acquiredPortalFileNumber'),
"Brief description of the details of the succession": get_text(acq_info, 'acquiredDesc'),
}
for key, val in acq_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
ctrl_info = form_data.find('controlRelationships')
if ctrl_info and ctrl_info.find_all('fullLegalNames'):
parts.append("\n### Control Relationships")
parts.append("Persons/entities that directly or indirectly control the applicant:")
for name_node in ctrl_info.find_all('fullLegalNames'):
parts.append(f"- {get_text(name_node, 'fullLegalName')}")
disc_info = form_data.find('disclosureAnswers')
if disc_info:
parts.append("\n### Disclosure Information")
DISCLOSURE_MAP = {
'criminalDisclosure': "Criminal Disclosure", 'regulatoryActionDisclosure': "Regulatory Action Disclosure",
'civilJudicialActionDisclosure': "Civil Judicial Disclosure", 'financialDisclosure': "Financial Disclosure"
}
for tag, title in DISCLOSURE_MAP.items():
section = disc_info.find(tag)
if section and any(node.text.upper() == 'Y' for node in section.find_all()):
parts.append(f"**{title}:** Yes")
else:
parts.append(f"**{title}:** No")
parts.append(f"\n**Does the applicant engage in any non-securities related business?** {format_bool(get_text(form_data.find('nonSecuritiesRelatedBusiness'), 'isEngagedInNonSecurities'))}")
escrow_info = form_data.find('escrowArrangements')
if escrow_info:
parts.append("\n### Qualified Third Party Arrangements")
third_party = escrow_info.find('investorFundsContacts')
if third_party:
parts.append(f"**Name of person:** {get_text(third_party, 'investorFundsContactName')}")
parts.append(f"**Address:** {format_address(third_party.find('investorFundsAddress'))}")
parts.append(f"**Phone Number:** {get_text(third_party, 'investorFundsContactPhone')}")
parts.append(f"**Compensation Description:** {get_text(escrow_info, 'compensationDesc')}")
exec_info = form_data.find('execution')
parts.append("\n### Execution")
exec_details = {
"Date": get_text(exec_info, 'executionDate'),
"Full Legal Name of Funding Portal": get_text(exec_info, 'fullLegalNameFundingPortal'),
"By (Signature)": get_text(exec_info, 'personSignature'),
"Title": get_text(exec_info, 'personTitle'),
}
for key, val in exec_details.items():
parts.append(f"**{key}:** {val}")
sched_a = form_data.find('scheduleA')
if sched_a and sched_a.find_all('entityOrNaturalPerson'):
parts.append("\n### FORM FUNDING PORTAL SCHEDULE A: Direct Owners and Executive Officers")
sched_a_data = []
for person in sched_a.find_all('entityOrNaturalPerson'):
ownership_code = get_text(person, 'ownershipCode')
ownership_desc = OWNERSHIP_CODES.get(ownership_code, ownership_code)
entity_code = get_text(person, 'entityType')
entity_desc = ENTITY_TYPE_CODES.get(entity_code, entity_code)
sched_a_data.append({
"Full Legal Name": get_text(person, 'fullLegalName'),
"Entity Type": entity_desc,
"Title or Status": get_text(person, 'titleStatus'),
"Date Acquired": get_text(person, 'dateOfTitleStatusAcquired'),
"Ownership Code": ownership_desc,
"Control Person?": format_bool(get_text(person, 'controlPerson')),
"CRD No.": get_text(person, 'crdNumber'),
"IRS Tax No.": get_text(person, 'irsTaxNumber'),
"IRS Employer ID No.": get_text(person, 'irsEmployerIdNumber'),
})
parts.append(to_compact_markdown(pd.DataFrame(sched_a_data), index=False))
sched_b = form_data.find('scheduleB')
if sched_b and sched_b.find_all('amendEntityOrNaturalPerson'):
parts.append("\n### FORM FUNDING PORTAL SCHEDULE B: Amendments to Schedule A")
sched_b_data = []
for person in sched_b.find_all('amendEntityOrNaturalPerson'):
ownership_code = get_text(person, 'ownershipCode')
ownership_desc = OWNERSHIP_CODES.get(ownership_code, ownership_code)
entity_code = get_text(person, 'entityType')
entity_desc = ENTITY_TYPE_CODES.get(entity_code, entity_code)
sched_b_data.append({
"Full Legal Name": get_text(person, 'fullLegalName'),
"Type of Amendment": get_text(person, 'typeOfAmendment'),
"Entity Type": entity_desc,
"Title or Status": get_text(person, 'titleStatus'),
"Date Acquired": get_text(person, 'dateOfTitleStatusAcquired'),
"Ownership Code": ownership_desc,
"Control Person?": format_bool(get_text(person, 'controlPerson')),
"CRD No.": get_text(person, 'crdNumber'),
"IRS Tax No.": get_text(person, 'irsTaxNumber'),
"IRS Employer ID No.": get_text(person, 'irsEmployerIdNumber'),
})
parts.append(to_compact_markdown(pd.DataFrame(sched_b_data), index=False))
sched_c = form_data.find('scheduleC')
if sched_c:
parts.append("\n### FORM FUNDING PORTAL SCHEDULE C: Non-resident Funding Portals")
agent = sched_c.find('agentForService')
if agent:
parts.append("\n**A. Agent for Service of Process:**")
agent_details = {
"Name of U.S. person designated as agent": get_text(agent, 'agentName'),
"Address of U.S. person designated as agent": format_address(agent.find('agentAddress')),
}
for key, val in agent_details.items():
if val and "—" not in val:
parts.append(f"- **{key}:** {val}")
sig = sched_c.find('executionForNonResident')
if sig:
parts.append("\n**Execution for Non-Resident Funding Portals:**")
sig_details = {
"Signature": get_text(sig, 'signature'),
"Printed Name": get_text(sig, 'printedName'),
"Title": get_text(sig, 'title'),
"Date": get_text(sig, 'date'),
}
for key, val in sig_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
return "\n\n".join(parts)
def parse_form_ta2_xml(xml: BeautifulSoup) -> str:
"""
Parses a comprehensive XML-based Form TA-2 into a structured Markdown document,
mirroring all sections of the official form. Defaults missing compliance
checkboxes to NA.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(f'^{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_number(value_str: str) -> str:
if not value_str or value_str == "—": return "—"
try:
return f"{int(float(value_str)):}"
except (ValueError, TypeError):
return value_str
def format_dollar(value_str: str) -> str:
if not value_str or value_str == "—": return "—"
try:
return f"${float(value_str):.2f}"
except (ValueError, TypeError):
return value_str
def format_checkbox(value_str: str) -> str:
val = value_str.strip().upper()
if val in ('Y', 'YES'):
return "[X] Yes [ ] No [ ] NA"
if val in ('N', 'NO'):
return "[ ] Yes [X] No [ ] NA"
if val == 'NA':
return "[ ] Yes [ ] No [X] NA"
return "[ ] Yes [ ] No [X] NA"
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM TA-2\n\n"
"### FORM FOR REPORTING ACTIVITIES OF TRANSFER AGENTS\n"
]
submission = xml.find('edgarSubmission')
if not submission:
return ""
header_data = submission.find('headerData')
filer_node = submission.find('filer')
registrant_node = submission.find('registrant')
sc_data = submission.find('serviceCompanyData')
sig_data = submission.find('signatureData')
parts.append("### Registrant and Reporting Period Information")
parts.append(f"**CIK:** {get_text(filer_node, 'cik')}")
parts.append(f"**SEC File Number:** {get_text(filer_node, 'fileNumber')}")
parts.append(f"**For the reporting period ended:** {get_text(submission, 'periodOfReport')}")
parts.append("\n### Item 1. Full Name of Registrant")
parts.append(get_text(header_data, 'entityName'))
parts.append("\n### Item 2. Service Company Activities")
engaged_service_co_node = submission.find('engagedServiceCompany')
function_val = "NONE"
if engaged_service_co_node:
function_val = get_text(engaged_service_co_node, 'serviceCompany').upper()
all_box = '[X]' if function_val == 'ALL' else '[ ]'
some_box = '[X]' if function_val == 'SOME' else '[ ]'
none_box = '[X]' if function_val == 'NONE' else '[ ]'
parts.append(f"**a. During the reporting period, has the Registrant engaged a service company to perform any of its transfer agent functions?**")
parts.append(f"{all_box} All")
parts.append(f"{some_box} Some")
parts.append(f"{none_box} None")
if function_val in ('ALL', 'SOME') and engaged_service_co_node:
transfer_agents = engaged_service_co_node.find_all('serviceCompanyTransferAgent')
if transfer_agents:
agent_details = []
for agent in transfer_agents:
agent_details.append(f"- **Name of Service Company:** {get_text(agent, 'entityName')}")
agent_details.append(f" **File Number:** {get_text(agent, 'fileNumber')}")
if agent_details:
parts.append("\n" + "\n".join(agent_details))
engaged_as_service_co_node = submission.find('engagedAsServiceCompany')
is_engaged_as_sc = get_text(engaged_as_service_co_node, 'registrantEngagedService').lower() == 'y'
parts.append(f"**c. Is the Registrant engaged as a service company by a named transfer agent?** {'[X] Yes [ ] No' if is_engaged_as_sc else '[ ] Yes [X] No'}")
parts.append("\n### Item 3. Regulatory and Amendment Information")
parts.append(f"**a. Registrant's appropriate regulatory agency (ARA):** {get_text(submission, 'regulatoryAgency')}")
amend_info = submission.find('registrantRegulatoryAgency')
amend_filed_val = get_text(amend_info, 'amendmentFiled')
amend_status = "Yes" if amend_filed_val == 'Y' else "No" if amend_filed_val == 'N' else "Not Applicable"
parts.append(f"**b. During the reporting period, has the Registrant amended Form TA-1?** {amend_status}")
if sc_data:
parts.append("\n### Item 4. Transfer Agent Activities During the Reporting Period:")
parts.append(f"**a. Number of items received for transfer:** {format_number(get_text(sc_data, 'numberItemsReceivedForTransfer'))}")
parts.append(f"**b. Number of individual securityholder accounts for which the TA maintained master securityholder files:** {format_number(get_text(sc_data, 'numberMasterSecurityHolderFilings'))}")
parts.append("\n### Item 5. Aggregate number of individual securityholder accounts, including accounts in the Direct Registration System (DRS), dividend reinvestment plans and/or direct purchase plans as of December 31:")
parts.append(f"**a. Total number of individual securityholder accounts:** {format_number(get_text(sc_data, 'numberIndividualAccounts'))}")
parts.append(f"**b. Number of individual securityholder dividend reinvestment plan and/or direct purchase plan accounts:** {format_number(get_text(sc_data, 'numberDivReinvDirPurPlanAccounts'))}")
parts.append(f"**c. Number of individual securityholder DRS accounts:** {format_number(get_text(sc_data, 'numberDirectRegistSystemAccounts'))}")
sh_accounts = sc_data.find('securityHolderAccounts')
if sh_accounts:
parts.append("\n**d. Approximate percentage of accounts by security type:**")
account_data = {'Corporate Securities - Equity': f"{get_text(sh_accounts, 'equitySecurity')}%", 'Corporate Securities - Debt': f"{get_text(sh_accounts, 'debtSecurity')}%", 'Open-End Investment Company Securities': f"{get_text(sh_accounts, 'openEndInvestmentCompany')}%", 'Limited Partnership Securities': f"{get_text(sh_accounts, 'limitedPartnership')}%", 'Municipal Debt Securities': f"{get_text(sh_accounts, 'municipalDebt')}%", 'Other Securities': f"{get_text(sh_accounts, 'other')}%" }
account_df = pd.DataFrame([account_data])
parts.append(to_compact_markdown(account_df, index=False))
parts.append("\n### Item 6. Number of securities issues for which Registrant acted:")
sh_data = sc_data.find('securityHolderData')
if sh_data:
role_map = {
'transMaintainMasterSecHolder': "6(a). Receives items for transfer and maintains the master securityholder files:",
'transNotMaintMasterSecHolder': "6(b). Receives items for transfer but does not maintain the master securityholder files:",
'notTransMaintMasterSecHolder': "6(c). Does not receive items for transfer but maintains the master securityholder files:"
}
issues_rows = []
for tag_name, label in role_map.items():
node = sh_data.find(tag_name)
if node:
row_data = {
'label': label,
'equity': format_number(get_text(node, 'equitySecurity')),
'debt': format_number(get_text(node, 'debtSecurity')),
'openEnd': format_number(get_text(node, 'openEndInvestmentCompany')),
'limitedPartner': format_number(get_text(node, 'limitedPartnership')),
'municipal': format_number(get_text(node, 'municipalDebt')),
'other': format_number(get_text(node, 'other'))
}
issues_rows.append(row_data)
if issues_rows:
issues_df = pd.DataFrame(issues_rows)
issues_df.columns = [
'',
'Corporate Securities##COLSPAN_1## Equity',
'Corporate Securities##COLSPAN_1## Debt',
'Open-End Investment Company Securities',
'Limited Partnership Securities',
'Municipal Debt Securities',
'Other Securities'
]
parts.append("\n---\n")
parts.append(md_table_2row_header(issues_df))
parts.append("\n---\n")
parts.append("\n### Item 7. Scope of certain additional types of activities performed:")
parts.append(f"**a. Number of issues for which dividend reinvestment plan and/or direct purchase plan services were provided:** {format_number(get_text(sh_data, 'dividendReinvDirectPurchasePlan'))}")
parts.append(f"**b. Number of issues for which DRS services were provided:** {format_number(get_text(sh_data, 'directRegistrationSystem'))}")
dividend_node = sh_data.find('dividendAndInterest')
parts.append(f"**c(i). Dividend disbursement and interest paying agent activities - Number of issues:** {format_number(get_text(dividend_node, 'numberIssues'))}")
parts.append(f"**c(ii). Dividend disbursement and interest paying agent activities - Amount (in dollars):** {format_dollar(get_text(dividend_node, 'amountIssues'))}")
parts.append("\n### Item 8. Aged record differences, existing for more than 30 days:")
prior_agent_node = sh_data.find('priorAgent')
current_agent_node = sh_data.find('currentAgent')
aged_data = {
'Prior Transfer Agent(s) (If applicable)': [
format_number(get_text(prior_agent_node, 'numberIssues')),
format_dollar(get_text(prior_agent_node, 'amountIssues'))
],
'Current Transfer Agent': [
format_number(get_text(current_agent_node, 'numberIssues')),
format_dollar(get_text(current_agent_node, 'amountIssues'))
]
}
aged_df = pd.DataFrame(aged_data, index=['8(a)(i). Number of issues:', '8(a)(ii). Market value (in dollars):'])
parts.append(to_compact_markdown(aged_df))
num_filed = get_text(sh_data, 'numberFiled')
parts.append(f"**8(b). Number of quarterly reports regarding buy-ins filed:** {format_number(num_filed)}")
buy_in_compliance_val = "Y" if num_filed == "0" else "N"
parts.append(f"**8(c). During the reporting period, did the Registrant file all quarterly reports regarding buy-ins?** {format_checkbox(buy_in_compliance_val)}")
parts.append("\n### Item 9. Turnaround time for routine items:")
turnaround_compliance_val = get_text(sh_data, 'alwaysCompliant')
parts.append(f"**a. Has the Registrant always been in compliance with the turnaround time for routine items?** {format_checkbox(turnaround_compliance_val)}")
if turnaround_compliance_val.upper() == 'N':
parts.append(f"**Number of months not in compliance:** {format_number(get_text(sh_data, 'monthsNotInCompliance'))}")
parts.append("\n### Item 10. Open-end investment company securities activities:")
parts.append(f"**a. Total number of transactions processed:** {format_number(get_text(sh_data, 'total'))}")
parts.append(f"**b. Number of transactions processed on a date other than date of receipt of order (as ofs):** {format_number(get_text(sh_data, 'totalOtherThanReceiptOrderDate'))}")
db_search = sc_data.find('databaseSearches')
if db_search:
parts.append("\n### Item 11. Lost Securityholder Searches")
parts.append(f"**a. Date of database search:** {get_text(db_search, 'databaseSearchDate')}")
parts.append(f"**b. Number of lost securityholder accounts submitted for database search:** {format_number(get_text(db_search, 'numberLostAccountsSearched'))}")
parts.append(f"**c. Number of addresses obtained from database search:** {format_number(get_text(db_search, 'numberAddressesFromSearch'))}")
parts.append("\n### Item 12. Accounts Remitted to States")
parts.append(f"**Number of securityholder accounts that have been remitted to states:** {format_number(get_text(sc_data, 'numberLostAccountsRemittedToStates'))}")
if sig_data:
parts.append("\n### SIGNATURE")
sig_details = {
"Signature of Official responsible for Form": get_text(sig_data, 'signatureName'),
"Title of Signing Officer": get_text(sig_data, 'signatureTitle'),
"Telephone Number": get_text(sig_data, 'signaturePhoneNumber'),
"Date Signed (Month/Day/Year)": get_text(sig_data, 'signatureDate')
}
sig_df = pd.DataFrame(sig_details.items(), columns=['', ''])
parts.append(to_compact_markdown(sig_df, index=False))
return "\n\n".join(parts)
def parse_form_taw_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Form TA-W (Notice of Withdrawal) into a structured Markdown document.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(f'^{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_address(addr_node) -> str:
if not addr_node: return "—"
parts = [
get_text(addr_node, 'street1'),
get_text(addr_node, 'street2'),
get_text(addr_node, 'city'),
get_text(addr_node, 'stateOrCountry'),
get_text(addr_node, 'zipCode'),
]
return ", ".join(p for p in parts if p and p != "—")
def format_bool(value_str: str) -> str:
s = value_str.strip().lower()
if s == 'y' or s == 'true': return "Yes"
if s == 'n' or s == 'false': return "No"
return "—"
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM TA-W\n\n"
"### NOTICE OF WITHDRAWAL FROM REGISTRATION AS TRANSFER AGENT\n"
]
submission = xml.find('edgarSubmission')
if not submission:
return ""
filer_node = submission.find('filer')
registrant_node = submission.find('registrant')
taw_details_node = submission.find('tawDetails')
sig_data = submission.find('signatureData')
parts.append("### Registrant Information")
parts.append(f"**SEC File Number:** {get_text(filer_node, 'fileNumber')}")
parts.append(f"**Full Name of Registrant:** {get_text(registrant_node, 'entityName')}")
parts.append(f"**Address:** {format_address(registrant_node.find('businessAddress'))}")
parts.append(f"**Reason for withdrawal:** {get_text(registrant_node, 'withdrawalDescription')}")
parts.append(f"**Date ceased transfer agent functions:** {get_text(registrant_node, 'lastActionDate')}")
parts.append(f"**Does registrant plan to re-register in the future?** {format_bool(get_text(registrant_node, 'futureTransferAgentFunctions'))}")
parts.append("\n### Legal and Disciplinary History")
parts.append(f"**Is the registrant subject to any proceedings?** {format_bool(get_text(registrant_node.find('subjectOfProceedings'), 'involved'))}")
parts.append(f"**Does the registrant have any unsatisfied judgements or liens?** {format_bool(get_text(registrant_node.find('unsatisfiedJudgementsOrLiens'), 'involved'))}")
if taw_details_node:
successor_node = taw_details_node.find('tawSuccessorDetails')
if successor_node:
parts.append("\n### Successor Transfer Agent")
parts.append(f"**Name:** {get_text(successor_node, 'entityName')}")
parts.append(f"**Address:** {format_address(successor_node.find('tawSuccessorAddress'))}")
parts.append(f"**Is successor registered with the SEC?** {format_bool(get_text(successor_node, 'successorRegistered'))}")
custodian_node = taw_details_node.find('tawCustodians')
if custodian_node:
parts.append("\n### Custodian of Books and Records")
parts.append(f"**Name:** {get_text(custodian_node, 'entityName')}")
parts.append(f"**Address:** {format_address(custodian_node.find('tawCustodianAddress'))}")
if sig_data:
parts.append("\n### SIGNATURE")
sig_details = {
"Signature": get_text(sig_data, 'signatureName'),
"Title": get_text(sig_data, 'signatureTitle'),
"Telephone Number": get_text(sig_data, 'signaturePhoneNumber'),
"Date": get_text(sig_data, 'signatureDate'),
}
for key, val in sig_details.items():
if val != "—":
parts.append(f"**{key}:** {val}")
return "\n\n".join(parts)
def parse_form_24f2nt_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Form 24F-2NT into a structured Markdown document.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(rf'^(?:\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_dollar(value_str: str) -> str:
if not value_str or value_str == "—": return "—"
is_negative = value_str.startswith('(') and value_str.endswith(')')
if is_negative:
value_str = '-' + value_str.strip('()')
try:
val = float(value_str)
return f"${val:.2f}"
except (ValueError, TypeError):
return value_str
parts = ["## FORM 24F-2NT: Annual Notice of Securities Sold Pursuant to Rule 24f-2"]
header_data = xml.find('headerData')
filer_info = header_data.find('filerInfo') if header_data else None
filer_creds = filer_info.find('filer').find('issuerCredentials') if filer_info else None
parts.append("\n### 24F-2NT: Filer Information")
filer_details = {
"Filer CIK": get_text(filer_creds, 'cik'),
"Is this a LIVE or TEST Filing?": get_text(filer_info, 'liveTestFlag'),
"Filer Investment Company Type": get_text(filer_info, 'investmentCompanyType'),
}
for key, val in filer_details.items():
if val and val != "—": parts.append(f"**{key}:** {val}")
for filing_info in xml.find_all('annualFilingInfo'):
parts.append("\n### 24F-2NT: Annual Filing Information")
item1 = filing_info.find('item1')
issuer_addr = item1.find('addressOfIssuer') if item1 else None
parts.append(f"**1. Name and address of issuer:**")
parts.append(f"- **Name of Issuer:** {get_text(item1, 'nameOfIssuer')}")
if issuer_addr:
parts.append(f"- **Address:** {get_text(issuer_addr, 'street1')}, {get_text(issuer_addr, 'city')}, {get_text(issuer_addr, 'state')} {get_text(issuer_addr, 'zipCode')}")
item2 = filing_info.find('item2')
all_series_flag = get_text(item2.find('reportClassName'), 'rptIncludeAllFlag').lower() == 'true'
parts.append(f"\n**2. The Name and EDGAR Identifier of each series or class of securities for which this Form is filed:**")
parts.append(f"- [{'x' if all_series_flag else ' '}] Check box if the Form is being filed for all series and classes of the issuer.")
item3 = filing_info.find('item3')
parts.append(f"\n**3. Investment Company Act File Number:** {get_text(item3, 'investmentCompActFileNo')}")
sec_act_nums = ", ".join([get_text(n, 'fileNumber') for n in item3.find_all('securitiesActFileNo')])
parts.append(f" **Securities Act File Number:** {sec_act_nums}")
item4 = filing_info.find('item4')
parts.append(f"\n**4(a). Last day of fiscal year for which this Form is filed:** {get_text(item4, 'lastDayOfFiscalYear')}")
is_late = get_text(item4, 'isThisFormBeingFiledLate').lower() == 'true'
is_last_time = get_text(item4, 'isThisTheLastTimeIssuerFilingThisForm').lower() == 'true'
parts.append(f"**4(b). Check box if this Form is being filed late:** [{'x' if is_late else ' '}]")
parts.append(f"**4(c). Check box if this is the last time the issuer will be filing this Form:** [{'x' if is_last_time else ' '}]")
parts.append("\n**5. Calculation of registration fee:**")
item5 = filing_info.find('item5')
item6 = filing_info.find('item6')
item7 = filing_info.find('item7')
item8 = filing_info.find('item8')
calc_data = [
("(i) Aggregate sale price of securities sold during the fiscal year:", format_dollar(get_text(item5, 'aggregateSalePriceOfSecuritiesSold'))),
("(ii) Aggregate price of securities redeemed or repurchased during the fiscal year:", format_dollar(get_text(item5, 'aggregatePriceOfSecuritiesRedeemedOrRepurchasedInFiscalYear'))),
("(iii) Aggregate price of securities redeemed or repurchased during any prior fiscal year:", format_dollar(get_text(item5, 'aggregatePriceOfSecuritiesRedeemedOrRepurchasedAnyPrior'))),
("(iv) Total available redemption credits [add Items 5(ii) and 5(iii)]:", format_dollar(get_text(item5, 'totalAvailableRedemptionCredits'))),
("(v) Net sales:", format_dollar(get_text(item5, 'netSales'))),
("(vi) Redemption credits available for use in future years:", format_dollar(get_text(item5, 'redemptionCreditsAvailableForUseInFutureYears'))),
("(vii) Multiplier for determining registration fee:", get_text(item5, 'multiplierForDeterminingRegistrationFee')),
("(viii) Registration fee due:", format_dollar(get_text(item5, 'registrationFeeDue'))),
("6(i). Amount of securities deducted:", format_dollar(get_text(item6, 'amountOfSecuritiesDeducted'))),
("6(ii). Number of shares or other units remaining unsold:", get_text(item6, 'numberOfSharesOrOtherUnitsRemainingUnsold')),
("7. Interest due -- if this Form is being filed more than 90 days after the end of the issuer's fiscal year:", format_dollar(get_text(item7, 'interestDue'))),
("8. Total of the amount of the registration fee due plus any interest due:", format_dollar(get_text(item8, 'totalOfRegistrationFeePlusAnyInterestDue'))),
]
for label, value in calc_data:
if value != "—":
parts.append(f"- **{label}** {value}")
notes = get_text(filing_info, 'explanatoryNotes')
if notes != "—":
parts.append(f"\n**Explanatory Notes (if any):**\n{notes}")
signature = filing_info.find('signature')
if signature:
parts.append("\n**Signatures**")
parts.append(f"**Name and Title:** {get_text(signature, 'nameAndTitle')}")
parts.append(f"**Date:** {get_text(signature, 'signatureDate')}")
parts.append(f"**Signature:** {get_text(signature, 'signature')}")
return "\n\n".join(parts)
def parse_form_maw_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Form MA-W (Notice of Withdrawal) into a structured Markdown document
that accurately reflects the official form's layout.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(rf'^(?:\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_bool(value_str: str) -> str:
s = value_str.strip().upper()
if s == 'Y': return "Yes"
if s == 'N': return "No"
return "—"
def format_name(name_node) -> str:
if not name_node: return "—"
first = get_text(name_node, 'firstName')
middle = get_text(name_node, 'middleName')
last = get_text(name_node, 'lastName')
return " ".join(p for p in [first, middle, last] if p and p != "—")
def format_address(addr_node) -> str:
if not addr_node: return "—"
parts = [
get_text(addr_node, 'street1'),
get_text(addr_node, 'street2'),
get_text(addr_node, 'city'),
get_text(addr_node, 'stateOrCountry'),
get_text(addr_node, 'zipCode'),
]
return ", ".join(p for p in parts if p and p and p != "—")
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM MA-W\n\n"
"### NOTICE OF WITHDRAWAL FROM REGISTRATION AS A MUNICIPAL ADVISOR\n"
]
header = xml.find('headerData')
form_data = xml.find('formData')
if not form_data:
return ""
filer_id = get_text(header.find('filer'), 'filerId')
filer_ccc = get_text(header.find('filer'), 'filerCcc')
file_number = get_text(header.find('filer'), 'filerFileNumber')
notification_email = get_text(header, 'internetNotificationAddress')
parts.append("### Filer Information")
parts.append(f"**Filer CIK:** {filer_id}")
parts.append(f"**Filer CCC:** {filer_ccc}")
parts.append(f"**File Number:** {file_number}")
parts.append("\n### Notification Information")
parts.append(f"**Notification Email Address:** {notification_email}")
parts.append("\n### Item 1: Identifying Information")
parts.append(f"**A. Full Legal Name:** {get_text(form_data, 'fullLegalName')}")
parts.append(f"**B. SEC File Number:** {get_text(form_data, 'fileNumber')}")
contact_person = form_data.find('contactPersonInfo')
if contact_person:
parts.append("\n### Item 2: Contact Person")
name = format_name(contact_person.find('individualName'))
address = format_address(contact_person.find('address'))
parts.append(f"**Name, title, and contact information:**")
parts.append(f"- **Name:** {name}")
parts.append(f"- **Title:** {get_text(contact_person, 'title')}")
parts.append(f"- **Address:** {address}")
parts.append(f"- **Telephone Number:** {get_text(contact_person, 'phoneNumber')}")
parts.append(f"- **Email Address:** {get_text(contact_person, 'email')}")
parts.append("\n### Item 3: Money Owed to Clients")
parts.append(f"**A. Has the registrant received any pre-paid municipal advisory fees for municipal advisory activities, including pre-paid services and subscription fees for publications, that have not been delivered?** {format_bool(get_text(form_data, 'isReceivedAnyPrepaidFee'))}")
parts.append(f"**B. Borrowed any money from clients that has not been repaid?** {format_bool(get_text(form_data, 'isBorrowedNotRepaid'))}")
parts.append("\n### Item 4: Advisory Contract Assignments")
parts.append(f"**Has the registrant assigned any contracts to another person that engages in municipal advisory activities?** {format_bool(get_text(form_data, 'isAdvisoryContract'))}")
parts.append("\n### Item 5: Judgments and Liens")
parts.append(f"**Are there any unsatisfied judgments or liens against the registrant?** {format_bool(get_text(form_data, 'isUnsatisfiedJudgementsOrLiens'))}")
books_records = form_data.find('booksAndRecords')
if books_records:
parts.append("\n### Item 6: Books and Records (from Schedule W1)")
for location in books_records.find_all('personLocation'):
person_info = location.find('personInfo')
location_info = location.find('locationInfo')
parts.append(f"\n**Person with Custody:**")
parts.append(f"- **Name and business address:** {get_text(person_info, 'name')}, {format_address(person_info.find('address'))}")
parts.append(f"\n**Location of Books and Records:**")
parts.append(f"- **Name of Location, if any:** {get_text(location_info, 'nameAddressPhone.name') or 'Same as above'}")
parts.append(f"- **Address:** {format_address(location_info.find('address'))}")
parts.append(f"- **Briefly describe the books and records kept at this location:** {get_text(location_info, 'description')}")
execution = form_data.find('execution')
if execution:
parts.append("\n### Execution")
signer = execution.find('soleProprietor') or execution.find('municipalAdvisoryFirm')
if signer:
parts.append(f"**Signature:** {get_text(signer, 'signature')}")
parts.append(f"**Date:** {get_text(signer, 'date')}")
parts.append(f"**Printed Name:** {get_text(signer, 'signerName')}")
parts.append(f"**Title:** {get_text(signer, 'title')}")
return "\n\n".join(parts)
def parse_form_ma_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Form MA or MA/A (for municipal advisory firms) into a
structured Markdown document, capturing 100% of available fields.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(rf'^(?:\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_bool(value_str: str) -> str:
s = value_str.strip().upper()
if s == 'Y' or s == 'TRUE': return "Yes"
if s == 'N' or s == 'FALSE': return "No"
return "—"
def format_name(name_node) -> str:
if not name_node: return "—"
first = get_text(name_node, 'firstName')
middle = get_text(name_node, 'middleName')
last = get_text(name_node, 'lastName')
return " ".join(p for p in [first, middle, last] if p and p != "—")
def format_address(addr_node) -> str:
if not addr_node: return "—"
parts = [
get_text(addr_node, 'street1'),
get_text(addr_node, 'street2'),
get_text(addr_node, 'city'),
get_text(addr_node, 'stateOrCountry'),
get_text(addr_node, 'zipCode'),
]
return ", ".join(p for p in parts if p and p.strip() != "—")
OWNERSHIP_CODES = {
'NA': "NA - less than 5%", 'A': "A - 5% but less than 10%",
'B': "B - 10% but less than 25%", 'C': "C - 25% but less than 50%",
'D': "D - 50% but less than 75%", 'E': "E - 75% or more",
'F': "F - Other (general partner, trustee, or elected member)"
}
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM MA: UNIFORM APPLICATION FOR MUNICIPAL ADVISOR REGISTRATION\n"
]
header = xml.find('headerData')
form_data = xml.find('formData')
if not form_data or not header:
return ""
filer_node = header.find('filer')
filer_id = get_text(filer_node, 'filerId')
filer_ccc = get_text(filer_node, 'filerCcc')
contact_name = get_text(header.find('contact'), 'name')
contact_phone = get_text(header.find('contact'), 'phoneNumber')
contact_email = get_text(header, 'contactEmail')
submission_type = get_text(header, 'submissionType')
parts.append("### Filer and Contact Information")
parts.append(f"**Filer CIK:** {filer_id}")
parts.append(f"**Filer CCC:** {filer_ccc}")
parts.append(f"**Contact Name:** {contact_name}")
parts.append(f"**Contact Phone:** {contact_phone}")
parts.append(f"**Contact Email:** {contact_email}")
notifications = header.find_all('internetNotificationAddress')
if notifications:
emails = ", ".join(n.text for n in notifications)
parts.append(f"**Notification Emails:** {emails}")
parts.append("\n### Type of Filing")
if submission_type == "MA":
parts.append("**Selected Filing Type:** Initial Application")
elif submission_type == "MA/A":
parts.append("**Selected Filing Type:** Amendment")
elif submission_type == "MA-A":
parts.append("**Selected Filing Type:** Annual Update")
parts.append("\n### Item 1: Identifying Information")
controls_node = form_data.find('controls')
item1_details = {
"A. Full Legal Name of the Firm": get_text(form_data, 'firmName'),
" Organization CRD No.": get_text(form_data, 'firmCrdNumber'),
" Is applicant a Sole Proprietor?": format_bool(get_text(controls_node, 'isSolePropietor')),
" Has the municipal legal name changed since the last filing?": format_bool(get_text(controls_node, 'hasNameChange')),
"B. Doing-Business-As (DBA) Name": get_text(form_data, 'dbaName'),
" Has the applicant had any previous DBA names?": format_bool(get_text(controls_node, 'hasPreviousDBAName')),
" Does the applicant have any additional DBA names?": format_bool(get_text(controls_node, 'hasAdditionalDBANames')),
"C. IRS Employer Identification Number": get_text(form_data, 'irsNum'),
}
for key, val in item1_details.items():
if val and val != "—":
parts.append(f"**{key.replace(' ', ' ')}:** {val}")
regs = form_data.find('registrations')
if regs:
parts.append("\n**D. Registrations:**")
if (mat_reg := regs.find('maTregistration')):
parts.append(f"- **Municipal Advisor (Temporary):** SEC File No: {get_text(mat_reg, 'fileNumber')}")
base_regs = regs.find('baseRegistrations')
if base_regs:
reg_map = {
"Municipal Advisor": base_regs.find('maRegistration'),
"Broker-Dealer": base_regs.find('bdRegistration'),
"SEC-Registered Investment Adviser": base_regs.find('iaRegistration'),
"Other": base_regs.find('anotherRegistration')
}
for name, node in reg_map.items():
if node and node.get_text(strip=True):
file_no = get_text(node, 'fileNumber')
crd_no = get_text(node, 'crdNumber')
desc = get_text(node, 'description')
reg_id = get_text(node, 'registrationId')
details = []
if file_no != "—": details.append(f"SEC File No: {file_no}")
if crd_no != "—": details.append(f"CRD No: {crd_no}")
if desc != "—": details.append(f"Description: {desc}")
if reg_id != "—": details.append(f"ID: {reg_id}")
parts.append(f"- **{name}:** {', '.join(details)}")
additional_regs = form_data.find_all('additionalRegistration')
if additional_regs:
parts.append("\n**Additional Registrations:**")
for reg in additional_regs:
name_reg = reg.find('nameAndRegistration')
if name_reg:
parts.append(f"- **{get_text(name_reg, 'name')}:** {get_text(name_reg, 'registrationId')}")
principal_address = format_address(form_data.find('principalOfficeAddress'))
parts.append(f"\n**E. Principal Office and Place of Business:** {principal_address}")
parts.append(f"**Telephone Number:** {get_text(form_data.find('principalOfficeAddress'), 'phoneNumber')}")
additional_offices = form_data.find_all('additionalOffice')
if additional_offices:
parts.append("\n**Additional Offices of Employment:**")
for i, office in enumerate(additional_offices, 1):
office_info = office.find('officeInfo')
address = format_address(office_info) if office_info else "Address Not Provided"
parts.append(f"- **Office #{i} ({get_text(office, 'addDeleteAmend')}):** {address} | Phone: {get_text(office_info, 'phoneNumber')}")
parts.append(f"\n**Mailing Address is Different from Principal Office:** {format_bool(get_text(form_data, 'mailingAddressDifferent'))}")
if get_text(form_data, 'mailingAddressDifferent').upper() == 'Y':
mailing_address = format_address(form_data.find('mailingAddress'))
parts.append(f"**Mailing Address:** {mailing_address}")
parts.append(f"\n**F. Website:** {get_text(form_data, 'primaryWebAddress')}")
cco = form_data.find('cco')
if cco:
parts.append("\n**G. Chief Compliance Officer (CCO):**")
cco_details = { "Name": format_name(cco.find('name')), "Titles": ", ".join(t.text for t in cco.find_all('title')), "Address": format_address(cco.find('address')), "Phone Number": get_text(cco, 'phoneNumber'), "Email": get_text(cco, 'email') }
for key, val in cco_details.items():
if val and val != "—": parts.append(f"**{key}:** {val}")
affiliates = form_data.find_all('businessAffiliate')
if affiliates:
parts.append("\n**H. Business Affiliates:**")
for aff in affiliates:
parts.append(f"- **Name:** {get_text(aff, 'affiliateName')}")
reg_info = aff.find('applicableRegistration')
if reg_info:
parts.append(f" - **Issuing Agency:** {get_text(reg_info, 'issuingAgencyName')}")
parts.append(f" - **Jurisdiction:** {get_text(reg_info, 'jurisdiction')}")
parts.append(f"**I. Location of Books and Records:** {format_bool(get_text(controls_node, 'hasBooksRecords'))}")
parts.append("\n### Item 2: Form of Organization")
form_org_node = form_data.find('formOfOrganization')
org_type = "—"
if form_org_node:
if form_org_node.find('Corporation'): org_type = "Corporation"
elif form_org_node.find('SoleProprietorship'): org_type = "Sole Proprietorship"
elif form_org_node.find('LLP'): org_type = "Limited Liability Partnership (LLP)"
elif form_org_node.find('Partnership'): org_type = "Partnership"
elif form_org_node.find('LLC'): org_type = "Limited Liability Company (LLC)"
elif form_org_node.find('LP'): org_type = "Limited Partnership (LP)"
elif form_org_node.find('Other'): org_type = f"Other ({get_text(form_org_node, 'otherValue')})"
org_details = {
"A. Applicant's form of organization": org_type,
"B. Month of Applicant's Annual Fiscal Year End": get_text(form_data, 'monthOfFiscalYearEnd'),
"C. State, Other U.S. Jurisdiction, or Foreign Jurisdiction Under Which Applicant is Organized": get_text(form_data.find('organizedJurisdiction'), 'stateOrCountry'),
"D. Date of Organization": get_text(form_data, 'dateOfOrganization'),
"E. Is the applicant a public reporting company?": format_bool(get_text(controls_node, 'isSection12Or15ReportingCompany'))
}
for key, val in org_details.items():
if val and val != "—": parts.append(f"**{key}:** {val}")
parts.append("\n### Item 3: Successions")
is_succeeding = format_bool(get_text(controls_node, 'isSucceedingApplicant'))
parts.append(f"**Is the applicant succeeding to the business of a registered municipal advisor?** {is_succeeding}")
if is_succeeding == "Yes":
successions_node = form_data.find('successions')
if successions_node:
parts.append(f"**Date of Succession:** {get_text(successions_node, 'dateOfSuccession')}")
succ_details = successions_node.find('succeedingApplicantDetails')
if succ_details:
parts.append(f"**Name of Predecessor:** {get_text(succ_details, 'name')}")
parts.append(f"**CRD No. of Predecessor:** {get_text(succ_details, 'crdNumber')}")
parts.append(f"**SEC File No. of Predecessor:** {get_text(succ_details, 'fileNumber')}")
parts.append("\n### Item 4: Information About Applicant's Business")
me_or_op_node = form_data.find('meOrOPCompensationTypes')
solicitation_node = form_data.find('solicitationCompensationTypes')
me_or_op_comp = ", ".join(c.text for c in me_or_op_node.find_all('compensationTypes')) if me_or_op_node else "—"
solicitation_comp = ", ".join(c.text for c in solicitation_node.find_all('compensationTypes')) if solicitation_node else "—"
biz_details = {
"A. Number of Employees": get_text(form_data, 'numberOfEmployees'),
"B. Municipal Advisory Activities - Employees": get_text(form_data, 'employeesEngagedInMAA'),
"C. Registered Representatives - MAA Employees also registered reps of a broker-dealer": get_text(form_data, 'maaEmployeesRegBD'),
" MAA Employees also associated with an investment adviser": get_text(form_data, 'maaRegIA'),
"D. Public Relations Company?": format_bool(get_text(form_data, 'isPrcApplicant')),
"E. Soliciting on Behalf of an Affiliate - Number of firms": get_text(form_data, 'numberOfSolicitingFirms'),
"F. Types of Clients - Number of clients served as municipal advisor": get_text(form_data, 'clientsServedAsMA'),
" Types of Clients": ", ".join(c.text for c in form_data.find_all('clientTypes')),
"G. Solicitation of Municipal Entities and Obligated Persons - Municipal Entities": get_text(form_data, 'numberOfSolicitedME'),
" Obligated Persons": get_text(form_data, 'numberOfSolicitedOP'),
" Total Solicited": get_text(form_data, 'totalNumberOfSolicitedMEAndOP'),
"H. Types of Persons Solicited": ", ".join(p.text for p in form_data.find_all('solicitationPersonTypes')),
"I. Compensation Arrangements (Municipal Advisory)": me_or_op_comp,
"J. Compensation Arrangements (Solicitation)": solicitation_comp,
"K. Does the applicant receive compensation in the context of its municipal advisory business from other than its municipal entity or obligated person clients?": format_bool(get_text(controls_node, 'receiveCompensationForMAAFromOtherClients')),
"L. Applicant Business Relating to Municipal Securities": ", ".join(a.text for a in form_data.find_all('engagedActivityType')),
}
for key, val in biz_details.items():
if val and val != "—": parts.append(f"**{key.replace(' ', ' ')}:** {val}")
parts.append("\n### Item 5: Other Business Activities")
other_activities_node = form_data.find('otherActivities')
if other_activities_node:
activities_map = { "Broker-Dealer": other_activities_node.find('brokerDealers'), "Trust Company": other_activities_node.find('trustCompany'), "Insurance": other_activities_node.find('insurance'), "Investment Advisor": other_activities_node.find('investmentAdvisor'), }
for activity, node in activities_map.items():
if node:
is_engaged = format_bool(get_text(node, 'isActivelyEngaged'))
is_primary = format_bool(get_text(node, 'isPrimaryBusiness'))
parts.append(f"- **{activity}:** Actively Engaged: {is_engaged}, Primary Business: {is_primary}")
parts.append(f"**Is applicant engaged in any other non-municipal advisor business?** {format_bool(get_text(form_data, 'isEngagedInOtherNonMAABusiness'))}")
parts.append("\n### Item 6: Financial Industry and Other Activities of Associated Persons")
parts.append(f"**Types of associated persons:** {', '.join(t.text for t in form_data.find_all('fiaAPTypes'))}")
parts.append(f"**Total Associated Persons:** {get_text(form_data, 'totalFIAAssociatedPersons')}")
parts.append("\n### Item 7: Participation or Interest in Client Transactions")
participation_node = form_data.find('participationInterestMACT')
if participation_node:
participation_map = {
'mactBuySellFromClients': "Buy or sell municipal securities from or to municipal advisory clients for the firm's own account?",
'mactBuySellRecommendToClients': "Buy or sell municipal securities from or to third-parties on behalf of clients?",
'mactEnterDerivativesWithClients': "Enter into derivatives transactions with clients for the firm's own account?",
'mactRecommendOwnedInterestToClients': "Recommend to clients to buy/sell securities in which the firm has a financial interest?",
'mactRecommendToClientsServing': "Recommend to clients products/services of an affiliated person?",
'mactRecommendToClientsHavingOtherSalesInterest': "Recommend to clients securities of an issuer with which the firm has other relationships?",
'mactDiscAuthBuySellAsMAA': "Have discretionary authority to buy/sell municipal securities for clients?",
'mactDiscAuthBuySell': "Have discretionary authority to buy/sell any other securities or investments for clients?",
'mactDiscAuthDetermineBrokerDealer': "Have discretionary authority to determine the broker-dealer to be used for client transactions?",
'mactDiscAuthDetermineCommissionToBrokerDealer': "Have discretionary authority to determine the commission paid to a broker-dealer?",
'mactRecommendBrokerDealerToClient': "Recommend broker-dealers to clients?",
'mactRecommendBrokerDealerToClientAreAP': " If yes, are any of these broker-dealers an associated person of the applicant?",
'mactCompensateForReferrals': "Compensate any person for client referrals?",
'mactReceiveCompensationForReferrals': "Receive compensation from any person for client referrals?",
}
for tag, question in participation_map.items():
parts.append(f"- **{question.replace(' ', ' ')}:** {format_bool(get_text(participation_node, tag))}")
parts.append("\n### Item 8: Owners, Officers, and Other Control Persons")
is_cp_for_policy = get_text(controls_node, 'isCPForApplicantPolicy')
parts.append(f"**A. (2) Does any person not named in Item 1-A or Schedules A, B, or C, directly or indirectly, control the applicant's management or policies?** {format_bool(is_cp_for_policy)}")
is_public_reporting_co = get_text(controls_node, 'isSection12Or15ReportingCompany')
parts.append(f"\n**B. (1) Is any person in Schedule A, B, or C, or in Section 8-A of Schedule D a public reporting company?** {format_bool(is_public_reporting_co)}")
parts.append("\n### Item 9: Disclosure Information")
disclosure_node = form_data.find('disclosureAnswers')
if disclosure_node:
disclosure_map = {
'Criminal': [('isConvictedOfFelony', 'Applicant/Advisory Affiliate Convicted/Pled Guilty to Felony?'), ('isChargedWithFelony', 'Applicant/Advisory Affiliate Charged with Felony?'), ('isOrgConvictedOfFelony', 'Organization Convicted/Pled Guilty to Felony?'), ('isOrgChargedWithFelony', 'Organization Charged with Felony?')],
'Regulatory': [('isMadeFalseStatement', 'SEC/CFTC Found False Statement?'), ('isViolatedRegulation', 'SEC/CFTC Found Violation?'), ('isCauseOfDenial', 'SEC/CFTC Found Cause of Denial/Suspension?'), ('isOrderAgainst', 'SEC/CFTC Entered Order?'), ('isImposedPenalty', 'SEC/CFTC Imposed Civil Penalty?'), ('isUnEthical', 'SRO Found Unethical Conduct?'), ('isFoundInViolationOfRegulation', 'SRO Found Violation?'), ('isFoundInCauseOfDenial', 'SRO Found Cause of Denial/Suspension?'), ('isOrderAgainstActivity', 'SRO Barred/Suspended/Fined > $2,500?'), ('isDeniedLicense', 'SRO Denied/Suspended/Revoked Registration?'), ('isFoundMadeFalseStatement', 'Foreign Authority Found False Statement?'), ('isFoundInViolationOfRules', 'Foreign Authority Found Violation?'), ('isFoundInCauseOfSuspension', 'Foreign Authority Found Cause of Suspension?'), ('isDiscipliend', 'Foreign Authority Disciplined?'), ('isAuthorizedToActAttorney', 'Authorization to Act as Attorney/Accountant Revoked?'), ('isRegulatoryComplaint', 'Subject of a Regulatory Complaint?')],
'Civil': [('isEnjoined', 'Enjoined in Connection with Municipal Advisory Activity?'), ('isFoundInViolationOfRegulation', 'Found to Have Violated Regulations?'), ('isDismissed', 'Civil Proceeding Dismissed Pursuant to Settlement?'), ('isNamedInCivilProceeding', 'Named in Civil Proceeding Alleging Violation?')]
}
for category, questions in disclosure_map.items():
parts.append(f"\n**{category} Disclosure:**")
cat_node = disclosure_node.find(f'{category.lower()}Disclosure')
for tag, question in questions:
parts.append(f"- **{question}:** {format_bool(get_text(cat_node, tag))}")
parts.append("\n### Item 10: Small Businesses")
parts.append(f"**Does the applicant have annual receipts of less than $7,000,000?** {format_bool(get_text(form_data, 'hasAnnualReceiptsLessThan7Million'))}")
parts.append(f"**Is the applicant affiliated with a person that has annual receipts of more than $7,000,000?** {format_bool(get_text(form_data, 'isAffiliatedWithReceiptsMoreThan7Million'))}")
schedule_a = form_data.find('scheduleA')
if schedule_a:
parts.append("\n### Schedule A: Direct Owners and Executive Officers")
owner_data = []
for business in schedule_a.find_all('business'):
info = business.find('baseInformation')
owner_data.append({ "Name": get_text(business, 'name'), "Title/Status": get_text(info, 'titleStatus'), "Date Acquired": get_text(info, 'statusAcquired'), "Ownership Code": OWNERSHIP_CODES.get(get_text(info, 'ownershipCode'), get_text(info, 'ownershipCode')), "Control Person?": format_bool(get_text(info, 'isControPerson')), "IRS Number": get_text(business, 'irsNum') })
for person in schedule_a.find_all('person'):
info = person.find('baseInformation')
owner_data.append({ "Name": format_name(person.find('name')), "Title/Status": get_text(info, 'titleStatus'), "Date Acquired": get_text(info, 'statusAcquired'), "Ownership Code": OWNERSHIP_CODES.get(get_text(info, 'ownershipCode'), get_text(info, 'ownershipCode')), "Control Person?": format_bool(get_text(info, 'isControPerson')), "CRD Number": get_text(info, 'crdNumber') })
if owner_data:
df = pd.DataFrame(owner_data).fillna("—")
parts.append(to_compact_markdown(df, index=False))
schedule_b = form_data.find('scheduleB')
if schedule_b:
parts.append("\n### Schedule B: Indirect Owners")
owner_data = []
for business in schedule_b.find_all('business'):
info = business.find('baseInfo')
base = info.find('baseInformation')
owner_data.append({ "Owning Entity": get_text(business, 'owningEntity'), "Name": get_text(info, 'name'), "Title/Status": get_text(base, 'titleStatus'), "Date Acquired": get_text(base, 'statusAcquired'), "Ownership Code": OWNERSHIP_CODES.get(get_text(base, 'ownershipCode'), get_text(base, 'ownershipCode')), "Control Person?": format_bool(get_text(base, 'isControPerson')), "IRS Number": get_text(info, 'irsNum') })
for person in schedule_b.find_all('person'):
info = person.find('baseInfo')
base = info.find('baseInformation')
owner_data.append({ "Owning Entity": get_text(person, 'owningEntity'), "Name": format_name(info.find('name')), "Title/Status": get_text(base, 'titleStatus'), "Date Acquired": get_text(base, 'statusAcquired'), "Ownership Code": OWNERSHIP_CODES.get(get_text(base, 'ownershipCode'), get_text(base, 'ownershipCode')), "Control Person?": format_bool(get_text(base, 'isControPerson')) })
if owner_data:
df = pd.DataFrame(owner_data).fillna("—")
parts.append(to_compact_markdown(df, index=False))
schedule_c = form_data.find('scheduleC')
if schedule_c:
parts.append("\n### Schedule C: Amendments to Schedules A and B")
amendment_data = []
for business in schedule_c.find_all(['directBusinesses', 'indirectBusinesses']):
for biz_item in business.find_all('business'):
info = biz_item.find('baseInformation')
base_info_container = info.find('baseInfo') or info
base = base_info_container.find('baseInformation')
amendment_data.append({ "Type": get_text(biz_item, 'type'), "Ownership": "Direct" if business.name == 'directBusinesses' else "Indirect", "Owning Entity": get_text(info, 'owningEntity') or "—", "Name": get_text(base_info_container, 'name'), "Title/Status": get_text(base, 'titleStatus'), "Date Acquired": get_text(base, 'statusAcquired'), "Ownership Code": OWNERSHIP_CODES.get(get_text(base, 'ownershipCode'), get_text(base, 'ownershipCode')), "Control Person?": format_bool(get_text(base, 'isControPerson')), "IRS Number": get_text(base_info_container, 'irsNum')})
for person in schedule_c.find_all(['directPersons', 'indirectPersons']):
for person_item in person.find_all('person'):
info = person_item.find('baseInformation')
base_info_container = info.find('baseInfo') or info
base = base_info_container.find('baseInformation')
amendment_data.append({ "Type": get_text(person_item, 'type'), "Ownership": "Direct" if person.name == 'directPersons' else "Indirect", "Owning Entity": get_text(info, 'owningEntity') or "—", "Name": format_name(base_info_container.find('name')), "Title/Status": get_text(base, 'titleStatus'), "Date Acquired": get_text(base, 'statusAcquired'), "Ownership Code": OWNERSHIP_CODES.get(get_text(base, 'ownershipCode'), get_text(base, 'ownershipCode')), "Control Person?": format_bool(get_text(base, 'isControPerson')), "CRD Number": get_text(base, 'crdNumber')})
if amendment_data:
df = pd.DataFrame(amendment_data).fillna("—")
parts.append(to_compact_markdown(df, index=False))
drp_info = form_data.find('drpInfo')
if drp_info:
parts.append("\n### Disclosure Reporting Pages (DRPs)")
for reg_drp in drp_info.find_all('regulatoryDrp'):
drp_for_node = reg_drp.find('drpFor')
drp_for = "Applicant"
if drp_for_node and drp_for_node.find('applicantAndAP'):
drp_for = "Applicant and Associated Person"
elif drp_for_node and drp_for_node.find('associatedPerson'):
drp_for = "Associated Person"
questions = ", ".join(q.text for q in reg_drp.find_all('responseQuestion'))
parts.append(f"\n**Regulatory DRP for: {drp_for} (Responding to Questions: {questions})**")
applicant_info_node = reg_drp.find('applicantInfo')
if applicant_info_node:
filing = applicant_info_node.find('advBDU4Filing')
parts.append(f"- **Filed On (Applicant):** Form ADV/BD/U4 for {get_text(filing, 'name')}")
parts.append(f"- **CRD Number:** {get_text(filing, 'crdNumber')}")
parts.append(f"- **Disclosure Number:** {get_text(filing, 'disclosureNumber')}")
ap_info_node = reg_drp.find('apInfo')
if ap_info_node:
for ap in ap_info_node.find_all('associatedPerson'):
parts.append(f"\n - **Associated Person:** {format_name(ap.find('naturalPersonInfo'))}")
parts.append(f" - **CRD Number:** {get_text(ap, 'crdNumber')}")
ap_filing = ap.find('advBDU4Filing')
if ap_filing:
parts.append(f" - **Filed On (AP):** Form ADV/BD/U4 for {get_text(ap_filing, 'name')}")
parts.append(f" - **Disclosure Number:** {get_text(ap_filing, 'disclosureNumber')}")
if form_data.find('civilDisclosureDrp') or form_data.find('criminalDisclosureDrp'):
parts.append("\n*Additional Civil or Criminal DRPs may be present.*")
exec_page = form_data.find('maExecutionPage')
if exec_page:
parts.append("\n### Execution Page")
sig = exec_page.find('signature')
exec_details = { "Signature": get_text(sig, 'signature'), "Signer Name": get_text(sig, 'signerName'), "Title": get_text(sig, 'title'), "Date": get_text(sig, 'date'), "CRD Number": get_text(exec_page, 'crdNumber') }
for key, val in exec_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
return "\n\n".join(parts)
def parse_legacy_n_mfp_xml(xml: BeautifulSoup, class_name_map: dict = None) -> str:
"""
Parses an XML-based legacy Form N-MFP (pre-2016 schema) into a
comprehensive, structured Markdown document.
"""
def get_text(node, tag, strip_ns=True):
if not node: return "—"
found = node.find(re.compile(rf'(?:\w+:)?{tag}$', re.I), recursive=False)
if not found:
found = node.find(re.compile(rf'(?:\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_val(value_str: str, type_hint: str = 'string') -> str:
"""Robustly formats values based on their intended type."""
if not value_str or value_str.lower() in ('—', 'n/a', 'na'): return "—"
try:
val_float = float(value_str.replace(',', ''))
if type_hint == 'dollar': return f"${val_float:.2f}"
if type_hint == 'yield': return f"{val_float * 100:.4f}%"
if type_hint == 'percent': return f"{val_float * 100:.4f}%"
if type_hint == 'shares': return f"{val_float:.4f}"
if type_hint == 'number': return f"{val_float:.2f}"
except (ValueError, TypeError):
pass
if value_str.upper() == 'Y': return "Yes"
if value_str.upper() == 'N': return "No"
return value_str
parts = ["# Form N-MFP: Monthly Schedule of Portfolio Holdings"]
search_context = xml.find('edgarSubmission') or xml
parts.append("## N-MFP: Filer Information")
parts.append(f"**Submission Type:** {get_text(search_context, 'submissionType')}")
parts.append(f"**Live/Test Flag:** {get_text(search_context, 'liveTestFlag')}")
parts.append(f"**Is Electronic Copy of Paper Format:** {format_val(get_text(search_context, 'isThisElectronicCopyOfPaperFormat'))}")
parts.append(f"**CIK:** {get_text(search_context, 'EntityCentralIndexKey')}")
filing_info_section = ["### General Information"]
gen_data = {
"Report for (YYYY-MM-DD)": get_text(search_context, 'reportDate') or get_text(search_context, 'DocumentPeriodEndDate'),
"EDGAR Series Identifier": get_text(search_context, 'seriesId'),
"Total number of share classes in the series": get_text(search_context, 'totalClassesInSeries') or get_text(search_context, 'totalShareClassesInSeries'),
"Is this the fund's final filing on Form N-MFP?": format_val(get_text(search_context, 'isThisFinalFiling') or get_text(search_context, 'finalFilingFlag')),
"Is Fund Liquidating?": format_val(get_text(search_context, 'isFundLiquidating')),
"Is Fund Merging/Being Acquired?": format_val(get_text(search_context, 'isFundMergingWithOrBeingAcquiredByAnotherFund') or get_text(search_context, 'fundAcqrdOrMrgdWthAnthrFlag')),
"Has the fund acquired or merged with another fund during the reporting period?": format_val(get_text(search_context, 'hasFundAcquiredOrMergedWithAnotherFundSinceLastFiling')),
}
for key, val in gen_data.items():
filing_info_section.append(f"**{key}:** {val}")
parts.append("\n\n".join(filing_info_section))
series_info = search_context.find('seriesLevelInfo') or search_context.find('seriesLevelInformation')
if series_info:
parts.append("\n## Part A: Series-Level Information about the Fund")
service_providers = []
adviser_node = series_info.find("adviser") or series_info.find("investmentAdviserList")
if adviser_node:
for node in adviser_node.find_all("adviser"):
service_providers.append({"Item": "A.2", "Role": "Investment Adviser", "Details": get_text(node, 'adviserName'), "File/CIK Number": get_text(node, 'adviserFileNumber')})
sub_adviser_list = series_info.find("subAdviserList")
if sub_adviser_list:
for sub_adviser_node in sub_adviser_list.find_all("subAdviser"):
service_providers.append({"Item": "A.3", "Role": "Sub-Adviser", "Details": get_text(sub_adviser_node, 'adviserName'), "File/CIK Number": get_text(sub_adviser_node, 'adviserFileNumber')})
admin_node = series_info.find('administrator') or series_info.find('administratorList')
if admin_node and admin_node.get_text(strip=True):
service_providers.append({"Item": "A.5", "Role": "Administrator", "Details": get_text(admin_node, 'administratorName') or admin_node.text.strip(), "File/CIK Number": "—"})
accountant_node = series_info.find('indpPubAccountant') or series_info.find('independentPublicAccountant')
if accountant_node and accountant_node.get_text(strip=True):
acc_details = f"{get_text(accountant_node, 'name')} City: {get_text(accountant_node, 'city')} State: {get_text(accountant_node, 'stateCountry') or get_text(accountant_node, 'state')}"
service_providers.append({"Item": "A.4", "Role": "Independent Public Accountant", "Details": acc_details, "File/CIK Number": "—"})
transfer_agent_list = series_info.find('transferAgentList') or [series_info.find('transferAgent')]
for ta_node in transfer_agent_list:
if ta_node and ta_node.get_text(strip=True):
ta_details = f"{get_text(ta_node, 'name')} CIK: {get_text(ta_node, 'cik') or get_text(ta_node, 'EntityCentralIndexKey')}"
service_providers.append({"Item": "A.6", "Role": "Transfer Agent", "Details": ta_details, "File/CIK Number": get_text(ta_node, 'fileNumber')})
if service_providers:
parts.append("\n### Service Providers\n" + to_compact_markdown(pd.DataFrame(service_providers), index=False))
fund_chars = {
"A.1 - Securities Act File Number": get_text(series_info, 'securitiesActFileNumber') or get_text(series_info, 'ContainedFileInformationFileNumber'),
"A.7 - Is this a Feeder Fund?": format_val(get_text(series_info, 'feederFundFlag') or get_text(series_info, 'isThisFeederFund')),
"A.8 - Is this a Master Fund?": format_val(get_text(series_info, 'masterFundFlag') or get_text(series_info, 'isThisMasterFund')),
"A.9 - Is this series primarily used to fund insurance company separate accounts?": format_val(get_text(series_info, 'seriesFundInsuCmpnySepAccntFlag') or get_text(series_info, 'isThisSeriesPrimarilyUsedToFundInsuranceCompanySeperateAccounts')),
"A.10 - Money Market Fund Category": get_text(series_info, 'moneyMarketFundCategory') or get_text(series_info, 'InvestmentTypeDomain'),
"A.11 - WAM": f"{get_text(series_info, 'averagePortfolioMaturity') or get_text(series_info, 'dollarWeightedAveragePortfolioMaturity')} days",
"A.12 - WAL": f"{get_text(series_info, 'averageLifeMaturity') or get_text(series_info, 'dollarWeightedAverageLifeMaturity')} days",
"Total Value of Portfolio Securities": format_val(get_text(series_info, 'totalValuePortfolioSecurities'), 'dollar'),
"Amortized Cost of Portfolio Securities": format_val(get_text(series_info, 'amortizedCostPortfolioSecurities') or get_text(series_info, 'AvailableForSaleSecuritiesAmortizedCost'), 'dollar'),
"Cash": format_val(get_text(series_info, 'cash'), 'dollar'),
"Total Other Assets": format_val(get_text(series_info, 'totalValueOtherAssets') or get_text(series_info, 'OtherAssets'), 'dollar'),
"Total Liabilities": format_val(get_text(series_info, 'totalValueLiabilities') or get_text(series_info, 'Liabilities'), 'dollar'),
"Net Assets of Series": format_val(get_text(series_info, 'netAssetOfSeries') or get_text(series_info, 'AssetsNet'), 'dollar'),
"Number of Shares Outstanding (Series)": format_val(get_text(series_info, 'numberOfSharesOutstanding'), 'number'),
"Stable Price Per Share": format_val(get_text(series_info, 'stablePricePerShare'), 'dollar'),
"7-Day Gross Yield": format_val(get_text(series_info, 'sevenDayGrossYield') or get_text(series_info, 'MoneyMarketSevenDayYield'), 'yield')
}
parts.append("\n### Fund Characteristics & Assets")
master_fund_node = series_info.find('masterFund')
if get_text(series_info, 'feederFundFlag').upper() == 'Y' and master_fund_node:
parts.append("\n**Master Fund Information:**")
parts.append(f"- **CIK:** {get_text(master_fund_node, 'cik') or get_text(master_fund_node, 'EntityCentralIndexKey')}")
parts.append(f"- **Name:** {get_text(master_fund_node, 'entityName') or get_text(master_fund_node, 'EntityRegistrantName')}")
parts.append(f"- **Series ID:** {get_text(master_fund_node, 'seriesId') or get_text(master_fund_node, 'seriesIdentifier')}")
for key, val in fund_chars.items():
if val not in ("—", " days"): parts.append(f"- **{key}:** {val}")
liquid_data = []
dla_node = series_info.find('totalValueDailyLiquidAssets')
wla_node = series_info.find('totalValueWeeklyLiquidAssets')
pdla_node = series_info.find('percentageDailyLiquidAssets')
pwla_node = series_info.find('percentageWeeklyLiquidAssets')
if dla_node or wla_node or pdla_node or pwla_node:
for i in range(1, 6):
week_tag = f'fridayWeek{i}'
if get_text(dla_node, week_tag) != "—" or get_text(wla_node, week_tag) != "—" or \
get_text(pdla_node, week_tag) != "—" or get_text(pwla_node, week_tag) != "—":
liquid_data.append({
"Period": f"Friday, Week {i}",
"Daily Liquid Assets ($)": format_val(get_text(dla_node, week_tag), 'dollar'),
"Weekly Liquid Assets ($)": format_val(get_text(wla_node, week_tag), 'dollar'),
"Daily Liquid Assets (%)": format_val(get_text(pdla_node, week_tag), 'percent'),
"Weekly Liquid Assets (%)": format_val(get_text(pwla_node, week_tag), 'percent'),
})
if liquid_data:
parts.append("\n### Weekly Liquid Assets\n" + to_compact_markdown(pd.DataFrame(liquid_data), index=False))
series_shadow_price_node = series_info.find('seriesShadowPrice')
if series_shadow_price_node:
parts.append("\n**Series Shadow Price:**")
parts.append(f"- **NAV Per Share (incl. support):** {format_val(get_text(series_shadow_price_node, 'netValuePerShareIncludingCapitalSupportAgreement'), 'shares')} (as of {get_text(series_shadow_price_node, 'dateCalculatedFornetValuePerShareIncludingCapitalSupportAgreement')})")
parts.append(f"- **NAV Per Share (excl. support):** {format_val(get_text(series_shadow_price_node, 'netValuePerShareExcludingCapitalSupportAgreement'), 'shares')} (as of {get_text(series_shadow_price_node, 'dateCalculatedFornetValuePerShareExcludingCapitalSupportAgreement')})")
class_level_nodes = search_context.find_all('classLevelInformation') or search_context.find_all('classLevelInfo')
if class_level_nodes:
parts.append("\n## Part B: Class-Level Information about the Fund")
if class_name_map is None:
class_name_map = {}
for i, node in enumerate(class_level_nodes):
class_id = get_text(node, 'classId') or get_text(node, 'classesId')
class_name = class_name_map.get(class_id.upper(), f"Unknown Class ({class_id})")
parts.append(f"\n### Class: {class_name}")
class_details = {
"B.2 - Minimum Initial Investment": format_val(get_text(node, 'minInitialInvestment'), 'dollar'),
"B.3 - Net Assets of Class": format_val(get_text(node, 'netAssetsOfClass'), 'dollar'),
"B.4 - Shares Outstanding": format_val(get_text(node, 'numberOfSharesOutstanding'), 'number'),
"B.4 - Net Asset Value Per Share": format_val(get_text(node, 'netAssetValuePerShare'), 'number'),
"B.7.7 - 7-Day Net Yield": format_val(get_text(node, 'sevenDayNetYield'), 'yield'),
"Person Paying for Fund Expenses?": format_val(get_text(node, 'personPayForFundFlag')),
"Expense Reimbursement/Waiver Description": get_text(node, 'nameOfPersonDescExpensePay')
}
for key, val in class_details.items():
if val != "—": parts.append(f"- **{key}:** {val}")
weekly_flows = []
flow_nodes = node.find_all(re.compile(r'^(?:\w+:)?fridayWeek\d+$', re.I), recursive=False)
for week_node in flow_nodes:
week_number_match = re.search(r'(\d+)$', week_node.name)
if not week_number_match:
continue
week_number_str = week_number_match.group(1)
subs = get_text(week_node, 'weeklyGrossSubscriptions')
reds = get_text(week_node, 'weeklyGrossRedemptions')
if (subs and subs != "0.00") or (reds and reds != "0.00"):
weekly_flows.append({
"Period": f"Week {week_number_str}",
"Gross Subscriptions ($)": format_val(subs, 'dollar'),
"Gross Redemptions ($)": format_val(reds, 'dollar'),
})
if weekly_flows:
weekly_flows.sort(key=lambda x: int(re.search(r'\d+', x['Period']).group()))
parts.append("\n**Weekly Flows:**")
parts.append(to_compact_markdown(pd.DataFrame(weekly_flows), index=False))
total_node = node.find("totalForTheMonthReported")
if total_node:
parts.append("\n**Monthly Shareholder Flow Activity:**")
parts.append(f"- **Gross Subscriptions for month:** {format_val(get_text(total_node, 'weeklyGrossSubscriptions'), 'dollar')}")
parts.append(f"- **Gross Redemptions for month:** {format_val(get_text(total_node, 'grossRedemptions'), 'dollar')}")
else:
parts.append(f"- **Net flow for month:** {format_val(get_text(node, 'netShareholderFlowActivityForMonthEnded'), 'dollar')}")
class_shadow_price_node = node.find('classShadowPrice')
if class_shadow_price_node:
incl_node = class_shadow_price_node.find('netAssetValuePerShareIncludingCapitalSupportAgreement')
excl_node = class_shadow_price_node.find('netAssetValuePerShareExcludingCapitalSupportAgreement')
parts.append("\n**Class Shadow Price:**")
parts.append(f"- **NAV Per Share (incl. support):** {format_val(get_text(incl_node, 'value'), 'shares')} (as of {get_text(incl_node, 'dateAsOfWhichValueWasCalculated')})")
parts.append(f"- **NAV Per Share (excl. support):** {format_val(get_text(excl_node, 'value'), 'shares')} (as of {get_text(excl_node, 'dateAsOfWhichValueWasCalculated')})")
securities_nodes = search_context.find_all('scheduleOfPortfolioSecuritiesInfo') or search_context.find_all('scheduleOfPortfolioSecurities')
if securities_nodes:
parts.append("\n## Part C: Schedule of Portfolio Securities")
for i, node in enumerate(securities_nodes):
parts.append(f"\n### Security {i+1}: {get_text(node, 'nameOfIssuer') or get_text(node, 'InvestmentIssuer')}")
security_details = [
f"**C.1 - Title:** {get_text(node, 'titleOfIssuer') or get_text(node, 'InvestmentTitle')}",
f"**C.6 - Investment Category:** {get_text(node, 'investmentCategory') or get_text(node, 'InvestmentTypeDomain')}",
]
id_data = {
"C.3 - CUSIP": get_text(node, 'CUSIPMember'),
"C.4 - ISIN": get_text(node, 'ISINId'),
"C.3 - LEI": get_text(node, 'LEIID'),
"CIK": get_text(node, 'cik'),
"C.5 - Other ID": get_text(node, 'otherUniqueId')
}
id_str = ", ".join([f"{k}: {v}" for k, v in id_data.items() if v != "—"])
if id_str: security_details.append(f"**Identifiers:** {id_str}")
rating_node = node.find('designatedNrsro')
rating_str = get_text(node, 'securityRated') or get_text(node, 'rating')
if rating_node and get_text(rating_node, 'nameOfDesignatedNRSRO') != 'N/A':
rating_str += f" ({get_text(rating_node, 'nameOfDesignatedNRSRO')}: {get_text(rating_node, 'creditRatingDesignatedNRSRO')})"
if rating_str != "—":
security_details.append(f"**Rating:** {rating_str}")
security_details.extend([
f"**C.18 - Value (incl. sponsor support):** {format_val(get_text(node, 'includingValueOfAnySponsorSupport'), 'dollar')}",
f"**C.18.a - Value (excl. sponsor support):** {format_val(get_text(node, 'excludingValueOfAnySponsorSupport') or get_text(node, 'valueOfSecurityExcludingValueOfCapitalSupportAgreement'), 'dollar')}",
f"**Principal Amount:** {format_val(get_text(node, 'InvestmentOwnedBalancePrincipalAmount'), 'dollar')}",
f"**Amortized Cost:** {format_val(get_text(node, 'AvailableForSaleSecuritiesAmortizedCost'), 'dollar')}",
f"**Fair Value:** {format_val(get_text(node, 'InvestmentOwnedAtFairValue'), 'dollar')}",
f"**C.19 - Percentage of Net Assets:** {format_val(get_text(node, 'percentageOfMoneyMarketFundNetAssets') or get_text(node, 'InvestmentOwnedPercentOfNetAssets'), 'percent')}",
f"**C.17 - Yield as of Reporting Date:** {format_val(get_text(node, 'yieldOfTheSecurityAsOfReportingDate'), 'yield')}",
f"**C.11 - Maturity Date (WAM):** {get_text(node, 'investmentMaturityDateWAM') or get_text(node, 'InvestmentMaturityDate')}",
f"**C.12 - Maturity Date (WAL):** {get_text(node, 'investmentMaturityDateWAL') or get_text(node, 'InvestmentMaturityDate')}",
f"**C.13 - Final Legal Maturity Date:** {get_text(node, 'finalLegalInvestmentMaturityDate')}",
])
flags = {
"C.14 - Has Demand Feature?": format_val(get_text(node, 'securityDemandFeatureFlag') or get_text(node, 'doesSecurityHaveDemandFeature')),
"C.15 - Has Guarantee?": format_val(get_text(node, 'securityGuaranteeFlag') or get_text(node, 'doesSecurityHaveGuarantee')),
"C.16 - Has Enhancement?": format_val(get_text(node, 'securityEnhancementsFlag') or get_text(node, 'doesSecurityHaveEnhancementsOnWhichFundRelying')),
"C.22 - Is an Illiquid Security?": format_val(get_text(node, 'illiquidSecurityFlag') or get_text(node, 'isThisIlliquidSecurity')),
"C.20 - Is a Daily Liquid Asset?": format_val(get_text(node, 'dailyLiquidAssetSecurityFlag')),
"C.21 - Is a Weekly Liquid Asset?": format_val(get_text(node, 'weeklyLiquidAssetSecurityFlag')),
"C.23 - Categorized at Level 3?": format_val(get_text(node, 'securityCategorizedAtLevel3Flag')),
}
flag_str = ", ".join([f"{k.split('-')[0].strip()} {k.split('-')[1].strip()} {v}" for k, v in flags.items() if v != "—"])
if flag_str: security_details.append(f"**Characteristics:** {flag_str}")
parts.append("\n".join(f"- {item}" for item in security_details))
guarantor_node = node.find('guarantor')
if guarantor_node and guarantor_node.get_text(strip=True):
parts.append("\n**C.15.a - Guarantor Details:**")
guarantor_details = {"Identity of Guarantor": get_text(guarantor_node, 'identityOfTheGuarantor'), "Amount Provided": get_text(guarantor_node, 'amountProvidedByGuarantor'),}
for key, val in guarantor_details.items():
if val != "—": parts.append(f"- **{key}:** {val}")
guarantor_rating_node = guarantor_node.find('designatedNRSROGuarantor')
if guarantor_rating_node:
rating_str = f"{get_text(guarantor_rating_node, 'nameOfDesignatedNRSRO')}: {get_text(guarantor_rating_node, 'creditRatingDesignatedNRSRO')}"
if rating_str != ":": parts.append(f"- **Rating:** {rating_str}")
enhancement_node = node.find('enhancementProvider')
if enhancement_node and enhancement_node.get_text(strip=True):
parts.append("\n**C.16.a - Enhancement Details:**")
enhancement_details = {"Identity of Provider": get_text(enhancement_node, 'identityOfTheEnhancementProvider'), "Type of Enhancement": get_text(enhancement_node, 'typeOfEnhancement'), "Amount Provided": get_text(enhancement_node, 'amountProvidedByEnhancement'), }
for key, val in enhancement_details.items():
if val != "—": parts.append(f"- **{key}:** {val}")
enhancement_rating_node = enhancement_node.find('designatedNRSROEnhancement')
if enhancement_rating_node:
rating_str = f"{get_text(enhancement_rating_node, 'nameOfDesignatedNRSRO')}: {get_text(enhancement_rating_node, 'creditRatingDesignatedNRSRO')}"
if rating_str != ":": parts.append(f"- **Rating:** {rating_str}")
sig = search_context.find('signature')
if sig:
parts.append("\n## N-MFP: Signatures")
parts.append(f"**Registrant:** {get_text(sig, 'registrant')}")
parts.append(f"**Date:** {get_text(sig, 'signatureDate')}")
parts.append(f"**By:** {get_text(sig, 'signature')}")
parts.append(f"**Name of Signing Officer:** {get_text(sig, 'nameOfSigningOfficer')}")
parts.append(f"**Title of Signing Officer:** {get_text(sig, 'titleOfSigningOfficer')}")
return "\n\n".join(parts)
def parse_sbse_a_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Form SBSE-A or SBSE-A/A into a structured Markdown document.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(rf'^(?:\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_bool(value_str: str) -> str:
s = value_str.strip().upper()
if s == 'Y': return "Yes"
if s == 'N': return "No"
return "—"
def format_name(name_node) -> str:
if not name_node: return "—"
first = get_text(name_node, 'firstName')
middle = get_text(name_node, 'middleName')
last = get_text(name_node, 'lastName')
return " ".join(p for p in [first, middle, last] if p and p != "—")
def format_address(addr_node) -> str:
if not addr_node: return "—"
parts = [
get_text(addr_node, 'street1'), get_text(addr_node, 'street2'),
get_text(addr_node, 'city'), get_text(addr_node, 'stateOrCountry'),
get_text(addr_node, 'zipCode'),
]
return ", ".join(p for p in parts if p and p.strip() != "—")
OWNERSHIP_CODES = {
'NA': "NA - less than 5%", 'A': "A - 5% but less than 10%",
'B': "B - 10% but less than 25%", 'C': "C - 25% but less than 50%",
'D': "D - 50% but less than 75%", 'E': "E - 75% or more",
}
parts = ["## Form SBSE-A: Registration for Security-Based Swap Dealers"]
form_data = xml.find('formData')
if not form_data:
return ""
app1 = form_data.find('applicantOne')
if app1:
parts.append("\n### Applicant Information")
app1_details = {
"Full Applicant Name": get_text(app1, 'fullApplicantName'),
"NFA Number": get_text(app1, 'applicantNFANumber'),
"IRS Employer ID No.": get_text(app1, 'irsEmplIdentNo'),
"CIK": get_text(app1, 'applicantCik'),
"UIC": get_text(app1, 'applicantUic'),
"Main Address": format_address(app1.find('mainAddress')),
"Mailing Address": format_address(app1.find('mailingAddress')),
"Business Telephone": get_text(app1, 'businessTelephoneNumber'),
}
for key, val in app1_details.items():
if val and val != "—": parts.append(f"**{key}:** {val}")
contact = app1.find('contactEmployee')
if contact:
parts.append("\n**Contact Employee:**")
parts.append(f"- **Name:** {format_name(contact.find('contactEmployeeName'))}")
parts.append(f"- **Title:** {get_text(contact, 'title')}")
parts.append(f"- **Phone:** {get_text(contact, 'phone')}")
parts.append(f"- **Email:** {get_text(contact, 'emailAddress')}")
cco = app1.find('chiefComplianceOfficer')
if cco:
parts.append("\n**Chief Compliance Officer:**")
parts.append(f"- **Name:** {format_name(cco.find('officerName'))}")
parts.append(f"- **Title:** {get_text(cco, 'title')}")
parts.append(f"- **Phone:** {get_text(cco, 'phone')}")
parts.append(f"- **Email:** {get_text(cco, 'emailAddress')}")
app2 = form_data.find('applicantTwo')
if app2:
parts.append("\n### Business and Activities")
app2_details = {
"Registered as Swap Dealer?": format_bool(get_text(app2, 'isSwapDealer')),
"Registered as Swap Participant?": format_bool(get_text(app2, 'isSwapParticipant')),
"Uses Mathematical Models?": format_bool(get_text(app2, 'isMathematicalModels')),
"Is a Non-Resident Entity?": format_bool(get_text(app2, 'isNonResidentEntity')),
"Subject to Prudential Regulator?": format_bool(get_text(app2, 'isSubjectToRegulator')),
"Is an Investment Advisor?": format_bool(get_text(app2, 'isInvestmentAdvisor')),
"Engaged in Other Business?": format_bool(get_text(app2, 'isEngageInOtherBusiness')),
"Holds Customer Funds?": format_bool(get_text(app2, 'isHoldFunds')),
}
for key, val in app2_details.items(): parts.append(f"**{key}:** {val}")
regulators = ", ".join(r.text for r in app2.find_all('prudentialRegulator'))
if regulators: parts.append(f"**Prudential Regulators:** {regulators}")
biz_desc = get_text(app2, 'descriptionBusiness')
if biz_desc != "—": parts.append(f"\n**Description of Business:**\n{biz_desc}")
app3 = form_data.find('applicantThree')
if app3:
parts.append("\n### Control and History")
app3_details = {
"Are records kept by another entity?": format_bool(get_text(app3, 'isRecordsKept')),
"Does another entity hold funds on behalf of applicant?": format_bool(get_text(app3, 'isOnBehalf')),
"Is control exercised through an agreement?": format_bool(get_text(app3, 'isControlThroughAgreement')),
"Is applicant financed by another entity?": format_bool(get_text(app3, 'isWhollyOrPartiallyFinance')),
"Is applicant succeeding a prior entity?": format_bool(get_text(app3, 'isSucceeding')),
"Subject to foreign regulation?": format_bool(get_text(app3, 'isForeignRegulatory')),
"Number of Principals": get_text(app3, 'numberOfPrincipals'),
}
for key, val in app3_details.items(): parts.append(f"**{key}:** {val}")
schedule_a = form_data.find('scheduleA')
if schedule_a:
parts.append("\n### Schedule A: Principals")
principals_data = []
for principal in schedule_a.find_all('scheduleAInfo'):
ownership_code = get_text(principal, 'ownershipCode')
principals_data.append({
"Name": format_name(principal.find('individualName')),
"Title or Status": get_text(principal, 'titleOrStatus'),
"Date Acquired": get_text(principal, 'dateTitleOrStatusAcquired'),
"Date Began Working": get_text(principal, 'dateBeganWorking'),
"Ownership": OWNERSHIP_CODES.get(ownership_code, ownership_code),
"NFA ID No.": get_text(principal, 'nfaIdentificationNo'),
})
if principals_data:
df = pd.DataFrame(principals_data)
parts.append(to_compact_markdown(df, index=False))
schedule_b = form_data.find('scheduleB')
if schedule_b:
parts.append("\n### Schedule B: Explanations")
section1 = schedule_b.find('sectionOne')
if section1:
parts.append(f"\n**Description:**\n{get_text(section1, 'description')}")
section2 = schedule_b.find('sectionTwo')
if section2:
for record_type in ['recordsKept', 'onBehalf', 'controlThroughAgreement']:
records = section2.find_all(record_type)
if records:
title = record_type.replace('Kept', ' Keeper').replace('controlThroughAgreement', 'Controlling Entity')
parts.append(f"\n**{title.title()}:**")
for record in records:
parts.append(f"- **Name:** {get_text(record, 'firmOrOrganizationName')}")
parts.append(f" - **Address:** {format_address(record.find('firmAddress'))}")
parts.append(f" - **Effective Date:** {get_text(record, 'firmEffectiveDate')}")
parts.append(f" - **Arrangement:** {get_text(record, 'descriptionArrangement')}")
execution = form_data.find('execution')
if execution:
parts.append("\n### Execution")
exec_details = {
"Date": get_text(execution, 'date'),
"Name of Applicant": get_text(execution, 'nameOfApplicant'),
"Signature": get_text(execution, 'signature'),
"Printed Name": get_text(execution, 'nameOfPersonSigning'),
"Title": get_text(execution, 'titleOfPersonSigning'),
}
for key, val in exec_details.items(): parts.append(f"**{key}:** {val}")
return "\n\n".join(parts)
def parse_form_atsn_xml(xml: BeautifulSoup) -> str:
"""
Parses any XML-based Form ATS-N filing (including /MA, /UA, /OFA, /CA, etc.)
into a structured Markdown document.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(rf'^(?:\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_bool(value_str: str) -> str:
s = value_str.strip().upper()
if s == 'Y' or s == 'TRUE': return "Yes"
if s == 'N' or s == 'FALSE': return "No"
return "—"
def format_address(addr_node) -> str:
if not addr_node: return "—"
parts = [
get_text(addr_node, 'street1'), get_text(addr_node, 'street2'),
get_text(addr_node, 'city'), get_text(addr_node, 'state'),
get_text(addr_node, 'zip')
]
return ", ".join(p for p in parts if p and p.strip() != "—")
def get_rb_answer_and_details(parent_node, question_tag, details_tag):
question_node = parent_node.find(question_tag)
if not question_node: return "—", None
bool_attr = next((attr for attr in question_node.attrs if attr.startswith('rb')), None)
answer = format_bool(question_node.get(bool_attr, "N"))
details_text = None
if answer in ["Yes", "No"]:
details_node = question_node.find(details_tag)
if details_node and details_node.text.strip():
details_text = details_node.text.strip()
return answer, details_text
form_data = xml.find('formData')
if not form_data:
cover = xml.find('cover') or xml.find(re.compile(r'(?:\w+:)?cover$', re.I))
if cover:
submission_type = get_text(xml, 'submissionType')
title = f"Form {submission_type}: NMS Stock Alternative Trading System Report"
parts = [f"## {title}\n\n### Cover Page"]
parts.append(f"**NMS Stock ATS Name:** {get_text(cover, 'txNMSStockATSName')}")
parts.append(f"\n**Statement About Amendment:**\n{get_text(cover, 'taStatementAboutAmendment')}")
return "\n".join(parts)
return ""
submission_type_node = xml.find('submissionType')
submission_type = submission_type_node.text.strip() if submission_type_node else "ATS-N"
title = f"Form {submission_type}: NMS Stock Alternative Trading System Report"
parts = [f"## {title}"]
cover = form_data.find('cover')
if cover:
parts.append("\n### Cover Page")
parts.append(f"**NMS Stock ATS Name:** {get_text(cover, 'txNMSStockATSName')}")
parts.append(f"**Operates Pursuant to Form ATS?** {format_bool(get_text(cover, 'rbOperatesPursuantToFormATS'))}")
parts.append(f"\n**Statement About Amendment:**\n{get_text(cover, 'taStatementAboutAmendment')}")
p1 = form_data.find('partOne')
parts.append("\n### Part I: Basic Information")
p1_details = {
"1. Is the ATS operated by a registered broker-dealer?": format_bool(get_text(p1, 'rbPart1Item1IsBd')),
"2. Name of the NMS Stock ATS": get_text(p1, 'txPart1Item2ATSName'),
"3. Name(s) under which business is conducted": ", ".join([n['txPart1Item3ATSName'] for n in p1.find_all('atsName')]),
"4a. Broker-Dealer SEC File No.": get_text(p1, 'txPart1Item4aBdFileNumber'),
"4a. Broker-Dealer CRD No.": get_text(p1, 'txPart1Item4aBdCrdNumber'),
"5a. Self-Regulatory Organization": get_text(p1, 'txPart1Item5aNsaFullName'),
"5b. Effective Date of Membership": get_text(p1, 'part1Item5bEffectiveMembershipDate'),
"5c. MPID": get_text(p1, 'txtPart1Item5cNmsStockMPID'),
"6u. Website": get_text(p1, 'txtPart1Item6uwebsite'),
"7. Primary Site Address": format_address(p1.find('part1Item7PrimarySite')),
"7. Secondary Site Address": format_address(p1.find('secondarySiteI7')),
"8. Is Exhibit 1 (list of subscribers) on a public website?": format_bool(get_text(p1, 'cbPart1Item8Exhibit1atWebsite')),
"9. Is Exhibit 2 (written standards for access) on a public website?": format_bool(get_text(p1, 'cbPart1Item9Exhibit2atWebsite')),
}
for key, val in p1_details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
p2 = form_data.find('partTwo')
parts.append("\n### Part II: Written Safeguards and Procedures")
answer, details = get_rb_answer_and_details(p2, 'part2Item1aArePermittedToEnterInterest', 'taPart2Item1aUnitNamesEnterInterest')
parts.append(f"\n**1a. Are any business units of the Broker-Dealer Operator permitted to enter interest?** {answer}")
if details: parts.append(f" - **Details:** {details}")
answer, details = get_rb_answer_and_details(p2, 'part2Item1bAreSevicesSametoAllSubscribers', 'taPart2Item2bExplainDiff')
parts.append(f"**1b. Are the services offered and provided by the ATS to such business units the same?** {answer}")
if details: parts.append(f" - **Explanation:** {details}")
parts.append(f"**1c. Are there any arrangements between the ATS and such business unit?** {format_bool(get_text(p2, 'rbPart2Item1cAreThereArrangements'))}")
parts.append(f"**1d. Can order and trading interest of the business unit be routed out of the ATS?** {format_bool(get_text(p2, 'rbPart2Item1dCanOATInterestBeRouted'))}")
answer, details = get_rb_answer_and_details(p2, 'affiliatesPermittedToEnterInterest', 'taPart2Item2aAfflThatEnterInterest')
parts.append(f"\n**2a. Are any Affiliates of the Broker-Dealer Operator permitted to enter interest?** {answer}")
if details: parts.append(f" - **Affiliates:** {details}")
answer, details = get_rb_answer_and_details(p2, 'part2Item2bAreSevicestoAfflSametoSubscribers', 'taPart2Item2bExplainDiff')
parts.append(f"**2b. Are the services offered and provided by the ATS to such Affiliates the same?** {answer}")
if details: parts.append(f" - **Explanation:** {details}")
answer, details = get_rb_answer_and_details(p2, 'part2Item2cAnyFrmlInfrmlArrngmnts', 'taPart2Item2cYesAfflteDtls')
parts.append(f"**2c. Are there any arrangements between the ATS and such Affiliate?** {answer}")
if details: parts.append(f" - **Details:** {details}")
parts.append(f"**2d. Can order and trading interest of the Affiliate be routed out of the ATS?** {format_bool(get_text(p2, 'rbPart2Item2dCanOATIBeRoutedByAffl'))}")
answer, details = get_rb_answer_and_details(p2, 'part2Item3aCanSubscrOptOutWithOATIOfBD', 'taPart2Item3aExplianOptOut')
parts.append(f"\n**3a. Can a Subscriber opt-out from interacting with the order and trading interest of the Broker-Dealer Operator?** {answer}")
if details: parts.append(f" - **Explanation:** {details}")
answer, details = get_rb_answer_and_details(p2, 'part2Item3aCanSubscrOptOutWithOATIOfAffl', 'taPart2Item3bExplianOptOut')
parts.append(f"**3b. Can a Subscriber opt-out from interacting with the order and trading interest of an Affiliate?** {answer}")
if details: parts.append(f" - **Explanation:** {details}")
answer, details = get_rb_answer_and_details(p2, 'part2Item3cAreOptOutSametoAllSubscribers', 'taPart2Item3cExplainDiff')
parts.append(f"**3c. Are the means to opt-out the same for all Subscribers?** {answer}")
if details: parts.append(f" - **Explanation:** {details}")
parts.append(f"\n**4a. Are there any arrangements between the Broker-Dealer Operator and a trading center?** {format_bool(get_text(p2, 'rbPart2Item4aAreThereArrangementsBtwBDAndTC'))}")
answer, details = get_rb_answer_and_details(p2, 'part2Item5aDoesOfferProductsAndServices', 'taPart2Item5aProductsAndServices')
parts.append(f"\n**5a. Does the Broker-Dealer Operator offer any products or services to Subscribers?** {answer}")
if details: parts.append(f" - **Products/Services:** {details}")
answer, details = get_rb_answer_and_details(p2, 'part2Item5bAreSevicesSametoAllSubscribersAndBD', 'taPart2Item5bExplainDiff')
parts.append(f"**5b. Are the terms and conditions of these products/services the same for all Subscribers?** {answer}")
if details: parts.append(f" - **Explanation:** {details}")
answer, details = get_rb_answer_and_details(p2, 'part2Item5cDoesAfflOfferProductsAndServices', 'taPart2Item5cAfflProvidedProductsAndServices')
parts.append(f"**5c. Does an Affiliate of the Broker-Dealer Operator offer any products or services to Subscribers?** {answer}")
if details: parts.append(f" - **Products/Services:** {details}")
parts.append(f"**5d. Are the terms and conditions of these products/services offered by the Affiliate the same for all Subscribers?** {format_bool(get_text(p2, 'rbPart2Item5dAreTCOfSevicesSametoAll'))}")
answer, details = get_rb_answer_and_details(p2, 'part2Item6aDoesEmployeeAccessConfidentialInfo', 'taPart2Item6aUnitAfflEmployeeServices')
parts.append(f"\n**6a. Do any employees of the Broker-Dealer Operator or its Affiliates access confidential trading information?** {answer}")
if details: parts.append(f" - **Details:** {details}")
answer, details = get_rb_answer_and_details(p2, 'part2Item6bDoesAnyEntitySupportServices', 'taPart2Item6bServiceProvider')
parts.append(f"**6b. Does any other entity provide services to the ATS?** {answer}")
if details: parts.append(f" - **Providers:** {details}")
answer, details = get_rb_answer_and_details(p2, 'part2Item6cDoesServiceProviderUseATSServices', 'taPart2Item6cProviderAfflAndServicesUsed')
parts.append(f"**6c. Do any of these service providers also use the services of the ATS?** {answer}")
if details: parts.append(f" - **Details:** {details}")
answer, details = get_rb_answer_and_details(p2, 'part2Item6dAreATSSevicesSametoAll', 'taPart2Item6dExplainDiff')
parts.append(f"**6d. Are the services of the ATS to such service provider the same as for other similar Subscribers?** {answer}")
if details: parts.append(f" - **Explanation:** {details}")
parts.append(f"\n**7a. Description of Safeguards and Procedures:**\n{get_text(p2, 'taPart2Item7aDescrOfSafeGaurdsAndProcedures')}")
parts.append(f"**7b. Can a Subscriber consent to the disclosure of its confidential trading information?** {format_bool(get_text(p2, 'rbPart2Item7bCanSubscriberConsentToDisclosure'))}")
parts.append(f"**7d. Summary of roles of persons with access to confidential trading information:**\n{get_text(p2, 'taPart2Item7dSummaryOfRolesRespOfPersons')}")
p3 = form_data.find('partThree')
parts.append("\n### Part III: Manner of Operations")
parts.append(f"\n**1. Types of Subscribers:** {', '.join(t.text for t in p3.find_all('taPart3Item1SubscriberType'))}")
parts.append(f"**2a. Is a Subscriber required to be a registered broker-dealer?** {format_bool(get_text(p3, 'rbPart3Item2aRegisteredBD'))}")
answer, details = get_rb_answer_and_details(p3, 'part3Item2bSummaryOfConditions', 'taPart3Item2bSummaryOfCndtns')
parts.append(f"**2b. Are there any other conditions for eligibility to become a Subscriber?** {answer}")
if details: parts.append(f" - **Conditions:** {details}")
answer, details = get_rb_answer_and_details(p3, 'part3Item2cSummaryOfConditions', 'taPart3Item2cSummaryOfDifferences')
parts.append(f"**2c. Are the conditions for eligibility the same for all persons?** {answer}")
if details: parts.append(f" - **Differences:** {details}")
parts.append(f"**2d. Is there a written agreement required to use the ATS?** {format_bool(get_text(p3, 'rbPart3Item2dIsThereWrittenAgreement'))}")
answer, details = get_rb_answer_and_details(p3, 'part3Item3aSumryOfExcludngCondtns', 'taPart3Item3aExcludngSumryDtls')
parts.append(f"\n**3a. Are there any conditions under which a Subscriber may be excluded?** {answer}")
if details: parts.append(f" - **Conditions:** {details}")
answer, details = get_rb_answer_and_details(p3, 'part3Item3bSummaryOfConditions', 'taPart3Item3bSummaryOfDifferences')
parts.append(f"**3b. Are these conditions the same for all Subscribers?** {answer}")
if details: parts.append(f" - **Differences:** {details}")
parts.append(f"\n**4a. Hours of Operation:**\n{get_text(p3, 'taPart3Item4aHrsOfOperation')}")
parts.append(f"**4b. Are the hours of operation the same for all Subscribers?** {format_bool(get_text(p3, 'rbPart3Item4bIsHrsOfOperationsame'))}")
answer, details = get_rb_answer_and_details(p3, 'part3Item5aProtocolDetails', 'taPart3Item5aProtocolused')
parts.append(f"\n**5a. Are Subscribers permitted to enter orders and other messages by electronic means?** {answer}")
if details: parts.append(f" - **Protocols:** {details}")
answer, details = get_rb_answer_and_details(p3, 'part3Item5bProtocolDetails', 'taPart3Item5aProtocolSumryDtls')
parts.append(f"**5b. Are these protocols the same for all Subscribers?** {answer}")
if details: parts.append(f" - **Differences:** {details}")
answer, details = get_rb_answer_and_details(p3, 'part3Item5cOthrDtls', 'taPart3Item5cOthrMeansDtls')
parts.append(f"**5c. Are there any other means to enter orders?** {answer}")
if details: parts.append(f" - **Details:** {details}")
answer, details = get_rb_answer_and_details(p3, 'part3Item5dTnCDetails', 'taPart3Item5dTnCSumryDtls')
parts.append(f"**5d. Are the terms and conditions for other means the same for all Subscribers?** {answer}")
if details: parts.append(f" - **Differences:** {details}")
parts.append(f"\n**6a. Are co-location services offered?** {format_bool(get_text(p3, 'rbPart3Item6aIsCoLocRltdSrvcsOfrd'))}")
parts.append(f"**6c. Are any other means offered that reduce the latency of communications?** {format_bool(get_text(p3, 'rbPart3Item6cIsAnyOtherMeans'))}")
parts.append(f"**6e. Are any other means offered that reduce the latency of communications between the ATS and its Subscribers?** {format_bool(get_text(p3, 'rbPart3Item6eIsAnyRducdSpOfCom'))}")
parts.append(f"\n**7a. Order Types and Attributes:**\n{get_text(p3, 'taPart3Item7AOrdrTypExplain')}")
answer, details = get_rb_answer_and_details(p3, 'part3Item7bTnCDetails', 'taPart3Item7bTnCSumryDtls')
parts.append(f"**7b. Are the order types, attributes, and instructions the same for all Subscribers?** {answer}")
if details: parts.append(f" - **Differences:** {details}")
parts.append(f"\n**8a. Does the ATS require a minimum or maximum order size?** {format_bool(get_text(p3, 'rbPart3Item8aIsMinOrMaxSizeReqd'))}")
answer, details = get_rb_answer_and_details(p3, 'part3Item8cOddltOrdrReqs', 'taPart3Item8cOddLtOrdrReqsnProcdurs')
parts.append(f"**8c. Are odd-lot orders accepted and executed?** {answer}")
if details: parts.append(f" - **Procedures:** {details}")
parts.append(f"**8d. Are odd-lot procedures the same for all Subscribers?** {format_bool(get_text(p3, 'rbPart3Item8dIsReqsProcdurSameForAll'))}")
answer, details = get_rb_answer_and_details(p3, 'part3Item8eMixltOrdrDetails', 'taPart3Item8eMixltOrdrReqsProcDtls')
parts.append(f"**8e. Are mixed-lot orders accepted and executed?** {answer}")
if details: parts.append(f" - **Procedures:** {details}")
parts.append(f"**8f. Are mixed-lot procedures the same for all Subscribers?** {format_bool(get_text(p3, 'rbPart3Item8fIsRecProcSameForAll'))}")
parts.append(f"\n**9a. Does the ATS send any messages to indicate trading interest?** {format_bool(get_text(p3, 'rbPart3Item9aIsAnyMsgToIndicTI'))}")
parts.append(f"\n**10a. Opening/Re-opening/Closing Procedures:**\n{get_text(p3, 'taPart3Item10aOpenReOpenDtls')}")
parts.append(f"**10b. Are these procedures the same for all Subscribers?** {format_bool(get_text(p3, 'rbPart3Item10bIsOpnReopnSameForAll'))}")
parts.append(f"**10c. Unexecuted Orders Procedures:**\n{get_text(p3, 'taPart3Item10cUnexeOrdrTIDtls')}")
parts.append(f"**10d. Is there any difference in execution procedures during trading hours?** {format_bool(get_text(p3, 'rbPart3Item10dIsAnyDifBtwnExeProcTrdHrs'))}")
parts.append(f"**10e. Is there any difference in pre-opening or execution procedures following a stoppage?** {format_bool(get_text(p3, 'rbPart3Item10eIsAnyDifBtwnPreOpExecFlwngStpg'))}")
parts.append(f"\n**11a. Structure of the NMS Stock ATS:**\n{get_text(p3, 'taPart3Item11aStrucOfNmsStk')}")
answer, details = get_rb_answer_and_details(p3, 'part3Item11bMeansFeciltsDtls', 'taPart3Item11bMeansFeciltsDtls')
parts.append(f"**11b. Are the means that facilitate access the same for all Subscribers?** {answer}")
if details: parts.append(f" - **Differences:** {details}")
parts.append(f"**11c. Rules and procedures of the NMS Stock ATS:**\n{get_text(p3, 'taPart3Item11cRulsProcsOfNmsStk')}")
parts.append(f"**11d. Are these rules and procedures the same for all Subscribers?** {format_bool(get_text(p3, 'rbPart3Item11dIsProcsRulsSameForAll'))}")
answer, details = get_rb_answer_and_details(p3, 'part3Item12aArngmntDtls', 'taPart3Item12aArngmntTCDtls')
parts.append(f"\n**12a. Are there any arrangements to provide liquidity?** {answer}")
if details: parts.append(f" - **Details:** {details}")
answer, details = get_rb_answer_and_details(p3, 'part3Item13aSegmntDtls', 'taPart3Item13aSegProcdurDtls')
parts.append(f"\n**13a. Is order or trading interest segmented?** {answer}")
if details: parts.append(f" - **Procedures:** {details}")
parts.append(f"**13b. Is the segmentation the same for all Subscribers?** {format_bool(get_text(p3, 'rbPart3Item13bIsSegmntatnSameForAll'))}")
parts.append(f"**13c. Does segmentation depend on whether the order is from a customer?** {format_bool(get_text(p3, 'rbPart3Item13cIsCustmrOrdr'))}")
answer, details = get_rb_answer_and_details(p3, 'part3Item13dDsclrContntDtls', 'taPart3Item13dDsclosrContntDtls')
parts.append(f"**13d. Are segmentation categories disclosed to Subscribers?** {answer}")
if details: parts.append(f" - **Content:** {details}")
answer, details = get_rb_answer_and_details(p3, 'part3Item13eArngmntDtls', 'taPart3Item13eDsclosrDiffDtls')
parts.append(f"**13e. Is the disclosure the same for all Subscribers?** {answer}")
if details: parts.append(f" - **Differences:** {details}")
answer, details = get_rb_answer_and_details(p3, 'part3Item14aCntrPrtySelectnDtls', 'taPart3Item14aCntrPrtyDtls')
parts.append(f"\n**14a. Is a Subscriber designated to interact with specific trading interest?** {answer}")
if details: parts.append(f" - **Details:** {details}")
answer, details = get_rb_answer_and_details(p3, 'part3Item14bSelectDtls', 'taPart3Item14bSelectnDiffDtls')
parts.append(f"**14b. Is the counter-party selection the same for all Subscribers?** {answer}")
if details: parts.append(f" - **Differences:** {details}")
parts.append(f"\n**15a. Does the ATS use electronic communications to display order and trading interest?** {format_bool(get_text(p3, 'rbPart3Item15aIsElectrncCommu'))}")
answer, details = get_rb_answer_and_details(p3, 'part3Item15bSubSctbDtls', 'taPart3Item15bSubscrBndDtls')
parts.append(f"**15b. Is order and trading interest displayed to anyone other than Subscribers?** {answer}")
if details: parts.append(f" - **Details:** {details}")
answer, details = get_rb_answer_and_details(p3, 'part3Item15cDsplyProcDtls', 'taPart3Item5cDsplyProcDiffDtls')
parts.append(f"**15c. Are the display procedures the same for all Subscribers?** {answer}")
if details: parts.append(f" - **Differences:** {details}")
parts.append(f"\n**16a. Are orders or other messages routed out of the ATS?** {format_bool(get_text(p3, 'rbPart3Item16aIsInstRoutd'))}")
parts.append(f"\n**17a. Is there any difference between the treatment of order and trading interest based on source?** {format_bool(get_text(p3, 'rbPart3Item17aIsDiffBtwnOrdTITrtmnt'))}")
parts.append(f"**17b. Is the treatment the same for all Subscribers?** {format_bool(get_text(p3, 'rbPart3Item17bIsTrtmntSameForAll'))}")
parts.append(f"\n**18a. Does the ATS execute trades outside of its regular trading hours?** {format_bool(get_text(p3, 'rbPart3Item18aIsOutsdeTrdingHrs'))}")
parts.append(f"\n**19a. Fees:**\n{get_text(p3, 'taPart3Item19aSrvcUsgFees')}")
parts.append(f"**19b. Bundled Services/Fees:**\n{get_text(p3, 'taPart3Item19bBundldSrvcUsgFees')}")
parts.append(f"**19c. Rebates and Discounts:**\n{get_text(p3, 'taPart3Item19cRbtDiscOfFees')}")
parts.append(f"\n**20a. Suspension of Trading Procedures:**\n{get_text(p3, 'taPart3Item20aSuspndProcdur')}")
parts.append(f"**20b. Are these procedures the same for all Subscribers?** {format_bool(get_text(p3, 'rbPart3Item20bIsSuspndProcdurSameFrAll'))}")
parts.append(f"\n**21a. Trade Reporting Arrangements:**\n{get_text(p3, 'taPart3Item21aMtrlArngmntDtls')}")
parts.append(f"**21b. Are these arrangements the same for all Subscribers?** {format_bool(get_text(p3, 'rbPart3Item21bIsMtrlArngmtSameFrAll'))}")
parts.append(f"\n**22a. Clearance and Settlement Arrangements:**\n{get_text(p3, 'taPart3Item22aMtrlArngmntDtls')}")
answer, details = get_rb_answer_and_details(p3, 'part3Item22bMtrlArngmntDiffDtls', 'taPart3Item22bDiffDtls')
parts.append(f"**22b. Are these arrangements the same for all Subscribers?** {answer}")
if details: parts.append(f" - **Differences:** {details}")
parts.append(f"\n**23a. Market Data Sources:**\n{get_text(p3, 'taPart3Item23aMrktDatSrc')}")
parts.append(f"**23b. Are these sources the same for all Subscribers?** {format_bool(get_text(p3, 'rbPart3Item23bIsSrcSameFrAll'))}")
parts.append(f"\n**24a. Does the ATS aggregate Subscriber order and trading interest with that of other trading centers?** {format_bool(get_text(p3, 'rbPart3Item24aIsSubScrbrOrdr'))}")
parts.append(f"\n**25a. Did the ATS exceed the volume thresholds of Regulation ATS?** {format_bool(get_text(p3, 'rbPart3Item25aIsAvgDlyTradinVolExcd'))}")
parts.append(f"\n**26. Are order flow and execution statistics published?** {format_bool(get_text(p3, 'rbPart3Item26IsOrdrFloExecStatsPublshd'))}")
return "\n\n".join(parts)
def parse_form_n_mfp3_xml(xml: BeautifulSoup, class_name_map: dict = None) -> str:
"""
Parses an XML-based Form N-MFP3 (Monthly Schedule of Portfolio Holdings
of Money Market Funds, 2024 schema) into a comprehensive Markdown document.
"""
def get_text(node, tag, strip_ns=True):
if not node: return "—"
found = node.find(re.compile(f'^(?:\\w+:)?{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_val(value_str: str, type_hint: str = 'string') -> str:
if not value_str or value_str.lower() in ('—', 'n/a', 'na'): return "—"
try:
val_float = float(value_str.replace(',', ''))
if type_hint == 'dollar': return f"${val_float:.2f}"
if type_hint == 'percent': return f"{val_float * 100:.4f}%"
if type_hint == 'shares': return f"{val_float:.4f}"
if type_hint == 'yield': return f"{val_float * 100:.4f}%"
if type_hint == 'number': return f"{val_float:.4f}"
except (ValueError, TypeError):
pass
if value_str.upper() == 'Y': return "Yes"
if value_str.upper() == 'N': return "No"
return value_str
parts = ["# Form N-MFP3: Monthly Schedule of Portfolio Holdings"]
filer_info_section = []
header_data = xml.find('headerData')
if header_data:
submission_type = get_text(header_data, 'submissionType')
filer_info_section.append(f"**Submission Type:** {submission_type}")
filer_creds = header_data.find('filerCredentials')
if filer_creds:
filer_info_section.append(f"**CIK:** {get_text(filer_creds, 'cik')}")
parts.append("## N-MFP: Filer Information\n" + "\n".join(filer_info_section))
form_data = xml.find('formData')
gen_info = form_data.find('generalInfo')
filing_info_section = ["### General Information"]
gen_data = {
"Report for (YYYY-MM-DD)": get_text(gen_info, 'reportDate'),
"Registrant Full Name": get_text(gen_info, 'registrantFullName'),
"CIK Number of Registrant": get_text(gen_info, 'cik'),
"LEI of Registrant": get_text(gen_info, 'registrantLEIId'),
"Name of Series": get_text(gen_info, 'nameOfSeries'),
"LEI of Series": get_text(gen_info, 'leiOfSeries'),
"EDGAR Series Identifier": get_text(gen_info, 'seriesId'),
"Total number of share classes in the series": get_text(gen_info, 'totalShareClassesInSeries'),
"Is this the fund's final filing on Form N-MFP?": format_val(get_text(gen_info, 'finalFilingFlag')),
"Has the fund acquired or merged with another fund?": format_val(get_text(gen_info, 'fundAcqrdOrMrgdWthAnthrFlag')),
}
for key, val in gen_data.items():
filing_info_section.append(f"**{key}:** {val}")
parts.append("\n\n".join(filing_info_section))
series_info = form_data.find('seriesLevelInfo')
parts.append("\n## Part A: Series-Level Information about the Fund")
service_providers = []
if (adviser_node := series_info.find("adviser")):
service_providers.append({"Role": "Investment Adviser", "Details": get_text(adviser_node, 'adviserName'), "File/CIK Number": get_text(adviser_node, 'adviserFileNumber')})
if (accountant_node := series_info.find('indpPubAccountant')):
acc_details = f"{get_text(accountant_node, 'name')} City: {get_text(accountant_node, 'city')} State: {get_text(accountant_node, 'stateCountry')}"
service_providers.append({"Role": "Independent Public Accountant", "Details": acc_details, "File/CIK Number": "—"})
if (admin_node := series_info.find('administrator')):
service_providers.append({"Role": "Administrator", "Details": get_text(admin_node, 'administratorName'), "File/CIK Number": "—"})
if (transfer_agent_node := series_info.find('transferAgent')):
ta_details = f"{get_text(transfer_agent_node, 'name')} CIK: {get_text(transfer_agent_node, 'cik')}"
service_providers.append({"Role": "Transfer Agent", "Details": ta_details, "File/CIK Number": get_text(transfer_agent_node, 'fileNumber')})
if service_providers:
parts.append("\n### Service Providers\n" + to_compact_markdown(pd.DataFrame(service_providers), index=False))
fund_chars = {
"Securities Act File Number": get_text(series_info, 'securitiesActFileNumber'),
"Is this a Feeder Fund?": format_val(get_text(series_info, 'feederFundFlag')),
"Is this a Master Fund?": format_val(get_text(series_info, 'masterFundFlag')),
"Is this series for insurance company separate accounts?": format_val(get_text(series_info, 'seriesFundInsuCmpnySepAccntFlag')),
"Money Market Fund Category": get_text(series_info, 'moneyMarketFundCategory'),
"Is this a Retail Money Market Fund?": format_val(get_text(series_info, 'fundRetailMoneyMarketFlag')),
"Is this a Government Money Market Fund?": format_val(get_text(series_info, 'govMoneyMrktFundFlag')),
"WAM": f"{get_text(series_info, 'averagePortfolioMaturity')} days",
"WAL": f"{get_text(series_info, 'averageLifeMaturity')} days",
"Total Value of Portfolio Securities": format_val(get_text(series_info, 'totalValuePortfolioSecurities'), 'dollar'),
"Amortized Cost of Portfolio Securities": format_val(get_text(series_info, 'amortizedCostPortfolioSecurities'), 'dollar'),
"Cash": format_val(get_text(series_info, 'cash'), 'dollar'),
"Total Other Assets": format_val(get_text(series_info, 'totalValueOtherAssets'), 'dollar'),
"Total Liabilities": format_val(get_text(series_info, 'totalValueLiabilities'), 'dollar'),
"Net Assets of Series": format_val(get_text(series_info, 'netAssetOfSeries'), 'dollar'),
"Number of Shares Outstanding (Series)": format_val(get_text(series_info, 'numberOfSharesOutstanding'), 'number'),
"Does the fund seek to maintain a stable price per share?": format_val(get_text(series_info, 'seekStablePricePerShare')),
"Stable Price Per Share": format_val(get_text(series_info, 'stablePricePerShare'), 'dollar'),
"Is cash management vehicle an affiliated fund?": format_val(get_text(series_info, 'cashMgmtVehicleAffliatedFundFlag')),
"Does the fund apply liquidity fees?": format_val(get_text(series_info, 'liquidityFeeFundApplyFlag')),
}
parts.append("\n### Fund Characteristics & Assets")
for key, val in fund_chars.items():
if val not in ("—", " days"): parts.append(f"- **{key}:** {val}")
liquid_asset_details = series_info.find_all('liquidAssetsDetails')
if liquid_asset_details:
liquid_data = [{
"Date": get_text(d, 'totalLiquidAssetsNearPercentDate'),
"Daily Liquid Assets ($)": format_val(get_text(d, 'totalValueDailyLiquidAssets'), 'dollar'),
"Weekly Liquid Assets ($)": format_val(get_text(d, 'totalValueWeeklyLiquidAssets'), 'dollar'),
"Daily Liquid Assets (%)": format_val(get_text(d, 'percentageDailyLiquidAssets'), 'percent'),
"Weekly Liquid Assets (%)": format_val(get_text(d, 'percentageWeeklyLiquidAssets'), 'percent'),
} for d in liquid_asset_details]
parts.append("\n### Daily & Weekly Liquid Assets\n" + to_compact_markdown(pd.DataFrame(liquid_data), index=False))
seven_day_yields = series_info.find_all('sevenDayGrossYield')
if seven_day_yields:
yield_data = [{
"Date": get_text(y, 'sevenDayGrossYieldDate'),
"7-Day Gross Yield": format_val(get_text(y, 'sevenDayGrossYieldValue'), 'yield'),
} for y in seven_day_yields]
parts.append("\n### 7-Day Gross Yield\n" + to_compact_markdown(pd.DataFrame(yield_data), index=False))
daily_navs = series_info.find_all('dailyNetAssetValuePerShareSeries')
if daily_navs:
nav_data = [{
"Date": get_text(n, 'dailyNetAssetValuePerShareDateSeries'),
"Net Asset Value per Share": format_val(get_text(n, 'dailyNetAssetValuePerShareSeries'), 'shares'),
} for n in daily_navs]
parts.append("\n### Daily Net Asset Value per Share (Series)\n" + to_compact_markdown(pd.DataFrame(nav_data), index=False))
class_level_nodes = form_data.find_all('classLevelInfo')
if class_level_nodes:
parts.append("\n## Part B: Class-Level Information about the Fund")
if class_name_map is None: class_name_map = {}
for node in class_level_nodes:
class_id = get_text(node, 'classesId')
class_name = get_text(node, 'classFullName') or class_name_map.get(class_id, f"Unknown Class ({class_id})")
parts.append(f"\n### Class: {class_name}")
class_details = {
"Minimum Initial Investment": format_val(get_text(node, 'minInitialInvestment'), 'dollar'),
"Net Assets of Class": format_val(get_text(node, 'netAssetsOfClass'), 'dollar'),
"Number of Shares Outstanding": format_val(get_text(node, 'numberOfSharesOutstanding'), 'number'),
"Expense Reimbursement/Waiver": get_text(node, 'nameOfPersonDescExpensePay'),
}
for key, val in class_details.items():
if val not in ("—", "0.00"): parts.append(f"- **{key}:** {val}")
class_daily_navs = node.find_all('dailyNetAssetValuePerShareClass')
if class_daily_navs:
nav_data = [{"Date": get_text(n, 'dailyNetAssetValuePerShareDateClass'), "NAV per Share": format_val(get_text(n, 'dailyNetAssetValuePerShareClass'), 'shares')} for n in class_daily_navs]
parts.append("\n**Daily Net Asset Value per Share (Class)**\n" + to_compact_markdown(pd.DataFrame(nav_data), index=False))
daily_flows = node.find_all('dialyShareholderFlowReported')
if daily_flows:
flow_data = [{"Date": get_text(f, 'dailyShareHolderFlowDate'), "Gross Subscriptions ($)": format_val(get_text(f, 'dailyGrossSubscriptions'), 'dollar'), "Gross Redemptions ($)": format_val(get_text(f, 'dailyGrossRedemptions'), 'dollar')} for f in daily_flows]
parts.append("\n**Daily Shareholder Flows**\n" + to_compact_markdown(pd.DataFrame(flow_data), index=False))
monthly_flow = node.find('monthlyShareholderFlowReported')
if monthly_flow:
parts.append(f"**Total Gross Subscriptions (Month):** {format_val(get_text(monthly_flow, 'totalGrossSubscriptions'), 'dollar')}")
parts.append(f"**Total Gross Redemptions (Month):** {format_val(get_text(monthly_flow, 'totalGrossRedemptions'), 'dollar')}")
class_yields = node.find_all('sevenDayNetYield')
if class_yields:
yield_data = [{"Date": get_text(y, 'sevenDayNetYieldDate'), "7-Day Net Yield": format_val(get_text(y, 'sevenDayNetYieldValue'), 'yield')} for y in class_yields]
parts.append("\n**7-Day Net Yield (Class)**\n" + to_compact_markdown(pd.DataFrame(yield_data), index=False))
owner_cats = node.find_all('beneficialRecordOwnerCategory')
if owner_cats:
owner_data = []
for cat in owner_cats:
owner_data.append({
"Category": get_text(cat, 'beneficialRecordOwnerCategoryType'),
"Other Category": get_text(cat, 'otherInvestorCategory'),
"Record Owner %": format_val(get_text(cat, 'percentOutstandingSharesRecord'), 'percent'),
"Beneficial Owner %": format_val(get_text(cat, 'percentOutstandingSharesBeneficial'), 'percent'),
})
parts.append("\n**Beneficial/Record Owner Categories**\n" + to_compact_markdown(pd.DataFrame(owner_data), index=False))
securities_nodes = form_data.find_all('scheduleOfPortfolioSecuritiesInfo')
if securities_nodes:
parts.append("\n## Part C: Schedule of Portfolio Securities")
for i, node in enumerate(securities_nodes):
parts.append(f"\n### Security {i+1}: {get_text(node, 'nameOfIssuer')}")
security_details = [
f"**C.1 - Title:** {get_text(node, 'titleOfIssuer')}",
f"**C.6 - Investment Category:** {get_text(node, 'investmentCategory')}",
]
id_data = {"C.3 - CUSIP": get_text(node, 'CUSIPMember'), "C.4 - ISIN": get_text(node, 'ISINId'), "C.3 - LEI": get_text(node, 'LEIID'), "C.5 - Other ID": get_text(node, 'otherUniqueId')}
id_str = ", ".join([f"{k}: {v}" for k, v in id_data.items() if v != "—"])
if id_str: security_details.append(f"**Identifiers:** {id_str}")
security_details.extend([
f"**C.18 - Value (incl. sponsor support):** {format_val(get_text(node, 'includingValueOfAnySponsorSupport'), 'dollar')}",
f"**C.18.a - Value (excl. sponsor support):** {format_val(get_text(node, 'excludingValueOfAnySponsorSupport'), 'dollar')}",
f"**C.19 - Percentage of Net Assets:** {format_val(get_text(node, 'percentageOfMoneyMarketFundNetAssets'), 'percent')}",
f"**C.17 - Yield as of Reporting Date:** {format_val(get_text(node, 'yieldOfTheSecurityAsOfReportingDate'), 'yield')}",
f"**C.11 - Maturity Date (WAM):** {get_text(node, 'investmentMaturityDateWAM')}",
f"**C.12 - Maturity Date (WAL):** {get_text(node, 'investmentMaturityDateWAL')}",
f"**C.13 - Final Legal Maturity Date:** {get_text(node, 'finalLegalInvestmentMaturityDate')}",
])
ratings = [f"{get_text(n, 'nameOfNRSRO')}: {get_text(n, 'rating')}" for n in node.find_all('assigningNRSRORating')]
if ratings: security_details.append(f"**C.10 - Ratings:** {'; '.join(ratings)}")
flags = {
"C.9 - Eligible Security?": format_val(get_text(node, 'securityEligibilityFlag')),
"C.14 - Has Demand Feature?": format_val(get_text(node, 'securityDemandFeatureFlag')),
"C.15 - Has Guarantee?": format_val(get_text(node, 'securityGuaranteeFlag')),
"C.16 - Has Enhancement?": format_val(get_text(node, 'securityEnhancementsFlag')),
"C.22 - Is an Illiquid Security?": format_val(get_text(node, 'illiquidSecurityFlag')),
"C.20 - Is a Daily Liquid Asset?": format_val(get_text(node, 'dailyLiquidAssetSecurityFlag')),
"C.21 - Is a Weekly Liquid Asset?": format_val(get_text(node, 'weeklyLiquidAssetSecurityFlag')),
"C.23 - Categorized at Level 3?": format_val(get_text(node, 'securityCategorizedAtLevel3Flag')),
}
flag_str = ", ".join([f"{k.split('-')[0].strip()} {k.split('-')[1].strip()} {v}" for k, v in flags.items() if v != "—"])
if flag_str: security_details.append(f"**Characteristics:** {flag_str}")
parts.append("\n".join(f"- {item}" for item in security_details))
repo_node = node.find('repurchaseAgreement')
if repo_node and repo_node.get_text(strip=True):
parts.append("\n**C.8 - Repurchase Agreement Details:**")
repo_details = {
"Is Open?": format_val(get_text(repo_node, 'repurchaseAgreementOpenFlag')),
"Is Cleared?": format_val(get_text(repo_node, 'repurchaseAgreementClearedFlag')),
"Name of CCP": get_text(repo_node, 'nameOfCCP'),
"Is Tri-party?": format_val(get_text(repo_node, 'repurchaseAgreementTripartyFlag')),
}
for key, val in repo_details.items():
if val != "—": parts.append(f"- **{key}:** {val}")
collateral_issuers = repo_node.find_all('collateralIssuers')
if collateral_issuers:
collateral_data = []
for issuer in collateral_issuers:
coupon_str = get_text(issuer, 'coupon')
yield_str = get_text(issuer, 'yield')
try:
coupon_formatted = f"{float(coupon_str):.4f}%" if coupon_str != "—" else "—"
except (ValueError, TypeError):
coupon_formatted = coupon_str
try:
yield_formatted = f"{float(yield_str):.4f}%" if yield_str != "—" else "—"
except (ValueError, TypeError):
yield_formatted = yield_str
collateral_data.append({
"Issuer Name": get_text(issuer, 'nameOfCollateralIssuer'),
"Maturity Date": get_text(issuer.find('maturityDate'), 'date'),
"Coupon": coupon_formatted,
"Yield": yield_formatted,
"Principal Amount": format_val(get_text(issuer, 'principalAmountToTheNearestCent'), 'dollar'),
"Collateral Value": format_val(get_text(issuer, 'valueOfCollateralToTheNearestCent'), 'dollar'),
"Category": get_text(issuer, 'ctgryInvestmentsRprsntsCollateral'),
})
parts.append("\n**Collateral:**\n" + to_compact_markdown(pd.DataFrame(collateral_data), index=False))
sig = form_data.find('signature')
if sig:
parts.append("\n## N-MFP: Signatures")
parts.append(f"**Registrant:** {get_text(sig, 'registrant')}")
parts.append(f"**Date:** {get_text(sig, 'signatureDate')}")
parts.append(f"**By:** {get_text(sig, 'signature')}")
parts.append(f"**Name of Signing Officer:** {get_text(sig, 'nameOfSigningOfficer')}")
parts.append(f"**Title of Signing Officer:** {get_text(sig, 'titleOfSigningOfficer')}")
return "\n\n".join(parts)
def parse_form_sbsef_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Form SBSEF or SBSEF/A into a structured Markdown document.
"""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(f'^{tag}$', re.I))
return found.text.strip() if found and found.text else "—"
def format_address(addr_node) -> str:
if not addr_node: return "—"
parts = [
get_text(addr_node, 'street1'),
get_text(addr_node, 'street2'),
get_text(addr_node, 'city'),
get_text(addr_node, 'stateOrCountry'),
get_text(addr_node, 'zipCode'),
]
return ", ".join(p for p in parts if p and p != "—")
submission = xml.find('edgarSubmission')
if not submission:
return ""
header = submission.find('headerData')
form_data = submission.find('formData')
principal_info = form_data.find('principalInfo') if form_data else None
submission_type = get_text(header, 'submissionType')
title = "FORM SBSEF/A: Amendment to Registration as a Security-Based Swap Execution Facility"
if submission_type == "SBSEF":
title = "FORM SBSEF: Registration as a Security-Based Swap Execution Facility"
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
f"## {title}\n"
]
filer_creds = header.find('filerCredentials') if header else None
parts.append("### Filer Information")
parts.append(f"**Submission Type:** {submission_type}")
parts.append(f"**CIK:** {get_text(filer_creds, 'cik')}")
if principal_info:
parts.append("\n### Principal Information")
details = {
"Full Name of Applicant": get_text(principal_info, 'applicantName'),
"Principal Place of Business Address": format_address(principal_info)
}
for key, val in details.items():
if val and val != "—":
parts.append(f"**{key}:** {val}")
amended_items_str = get_text(principal_info, 'amendedItemsList')
if amended_items_str and amended_items_str != "—":
parts.append("\n**Amended Items in this Filing:**")
items = [item.strip() for item in amended_items_str.split(',')]
for item in items:
if item:
parts.append(f"- {item}")
return "\n\n".join(parts)
def parse_any_xml(xml_contents, pdf_docs=None, class_name_map=None) -> str:
"""
Route to the correct form-specific parser based on the form's unique XML structure.
Accepts a list of XML content strings.
"""
if not xml_contents and not pdf_docs:
return ""
all_parts = []
if pdf_docs:
pdf_md = parse_pdf_attachments(pdf_docs)
if pdf_md:
all_parts.append(pdf_md)
for xml_content in xml_contents:
if not xml_content.strip():
continue
xml_content = re.sub(r'\n(?=[a-z])', '', xml_content)
xml_content = re.sub(r'&(?![a-zA-Z0-9#]{2,};)', '&', xml_content)
soup = BeautifulSoup(xml_content, "lxml-xml")
parsed_part = ""
if soup.find(re.compile(r'^ownershipDocument$', re.I)):
doc_type_tag = soup.find(re.compile(r'^documentType$', re.I))
doc_type = doc_type_tag.text.strip() if doc_type_tag else ''
if doc_type == '3':
parsed_part = parse_form3_xml(soup)
else:
parsed_part = parse_form4_xml(soup, doc_type or "4")
elif edgar_submission := soup.find(re.compile(r'(?:\w+:)?edgarSubmission$', re.I)):
form_type_tag = edgar_submission.find(re.compile(r'(?:\w+:)?(submissionType|formtype)$', re.I))
form_type = form_type_tag.text.strip().upper() if form_type_tag else ""
if form_type.startswith("SBSEF"):
parsed_part = parse_form_sbsef_xml(soup)
elif form_type.startswith("ATS-N"):
parsed_part = parse_form_atsn_xml(soup)
elif form_type.startswith("SBSE-A"):
parsed_part = parse_sbse_a_xml(soup)
elif form_type.startswith("X-17A-5"):
parsed_part = parse_form_x17a5_xml(soup)
elif form_type.startswith("24F-2NT"):
parsed_part = parse_form_24f2nt_xml(soup)
elif form_type.startswith("CFPORTAL"):
parsed_part = parse_form_cfportal_xml(soup)
elif form_type.startswith("TA-1"):
parsed_part = parse_form_ta1_xml(soup)
elif form_type.startswith("TA-W"):
parsed_part = parse_form_taw_xml(soup)
elif form_type.startswith("TA-2"):
parsed_part = parse_form_ta2_xml(soup)
elif form_type.startswith("MA-I"):
parsed_part = parse_form_mai_xml(soup)
elif form_type.startswith("MA-W"):
parsed_part = parse_form_maw_xml(soup)
elif form_type.startswith("MA"):
parsed_part = parse_form_ma_xml(soup)
elif form_type.startswith("1-A") or form_type.startswith("DOS"):
parsed_part = parse_form1a_xml(soup)
elif form_type.startswith("1-K"):
parsed_part = parse_form1k_xml(soup)
elif form_type.startswith("1-Z"):
parsed_part = parse_form1z_xml(soup)
elif form_type.startswith("SCHEDULE 13G"):
parsed_part = parse_schedule13g_xml(soup)
elif form_type.startswith("SCHEDULE 13D"):
parsed_part = parse_schedule13d_xml(soup)
elif form_type.startswith("C"):
parsed_part = parse_form_c_xml(soup)
elif form_type == "D" or form_type == "D/A":
parsed_part = parse_form_d_xml(soup)
elif form_type in ("EFFECT", "QUALIF"):
parsed_part = parse_effect_xml(soup)
elif form_type.startswith("13F-"):
parsed_part = parse_form13f_hr_xml(xml_contents)
if parsed_part: all_parts.append(parsed_part)
break
elif form_type == "N-PX":
parsed_part = parse_form_npx_xml(xml_contents)
if parsed_part: all_parts.append(parsed_part)
break
elif form_type.startswith("N-MFP"):
if form_type == "N-MFP3":
parsed_part = parse_form_n_mfp3_xml(soup, class_name_map=class_name_map)
elif soup.find(re.compile(r'(?:\w+:)?seriesLevelInfo$', re.I)):
full_soup = BeautifulSoup(xml_content, "lxml-xml")
parsed_part = parse_legacy_n_mfp_xml(full_soup, class_name_map=class_name_map)
elif soup.find('formData'):
parsed_part = parse_form_n_mfp2_xml(soup, class_name_map=class_name_map)
else:
full_soup = BeautifulSoup(xml_content, "lxml-xml")
parsed_part = parse_legacy_n_mfp_xml(full_soup, class_name_map=class_name_map)
elif form_type.startswith("NPORT-P"):
parsed_part = parse_nport_p_xml(soup)
elif form_type.startswith("144"):
parsed_part = parse_form144_xml(soup, form_type)
elif form_type.startswith("N-CEN"):
parsed_part = parse_form_n_cen_xml(soup)
elif soup.find(re.compile(r'^notificationOfRemoval$', re.I)):
parsed_part = parse_form25_xml(soup)
elif asset_data_tag := soup.find(re.compile(r'^(?:ns\d+:)?assetData$', re.I)):
if asset_data_tag.find(re.compile(r'^(?:ns\d+:)?assets$', re.I)):
parsed_part = parse_abs_ee_xml(soup)
else:
parsed_part = parse_abs_ee_comments_xml(soup)
elif soup.find(re.compile(r'^comments$', re.I)):
parsed_part = parse_abs_ee_comments_xml(soup)
if parsed_part:
all_parts.append(parsed_part)
return "\n\n".join(all_parts)
def parse_effect_xml(xml: BeautifulSoup) -> str:
"""
Parses an XML-based Notice of Effectiveness (EFFECT) or Qualification (QUALIF)
into a structured Markdown document, handling variations in the XML schema.
"""
def get_text(node, tag):
if not node or not (found := node.find(re.compile(f'^{tag}$', re.I))): return "—"
return found.text.strip()
parts = [
"### UNITED STATES\n"
"### SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
]
submission_type_overall = get_text(xml.find('edgarSubmission'), 'submissionType')
if submission_type_overall == 'QUALIF':
parts.append("## Notice of Qualification")
else:
parts.append("## Notice of Effectiveness")
effective_data = xml.find(re.compile(r'^effectiveData$', re.I))
if not effective_data:
return ""
date_str = get_text(effective_data, 'finalEffectivenessDispDate')
time_str = get_text(effective_data, 'finalEffectivenessDispTime')
eff_date = "—"
if date_str != "—":
try:
dt = datetime.datetime.strptime(date_str, '%Y-%m-%d')
eff_date = f"{dt.strftime('%B')} {dt.day}, {dt.strftime('%Y')}"
except ValueError:
eff_date = date_str
details = [f"**Date:** {eff_date}"]
if time_str != "—":
details.append(f"**Time:** {time_str}")
accession_num = get_text(effective_data, 'accessionNumber')
if accession_num != "—":
details.append(f"**Accession Number:** {accession_num}")
form_type = get_text(effective_data, 'submissionType')
if form_type == "—":
form_type = get_text(effective_data, 'form')
if form_type != "—":
details.append(f"**Form Type:** {form_type}")
parts.append("\n\n".join(details))
for filer_node in effective_data.find_all(re.compile(r'^filer$', re.I)):
filer_details = [
"\n---",
f"**CIK:** {get_text(filer_node, 'cik')}",
f"**Company Name:** {get_text(filer_node, 'entityName')}",
f"**File Number:** {get_text(filer_node, 'fileNumber')}"
]
parts.extend(filer_details)
return "\n\n".join(parts)
def parse_form13f_hr_xml(xml_contents: list) -> str:
"""
Parses a 13F-HR, 13F-NT, or 13F-HR/A filing from its XML components into a
comprehensive Markdown document. This version formats the main table with
fully merged headers for a professional appearance.
"""
if not xml_contents:
return ""
try:
soups = [BeautifulSoup(xml, "lxml-xml") for xml in xml_contents]
submission_soup = next((s for s in soups if s.find(re.compile(r'(?:\w+:)?edgarSubmission$', re.I))), None)
infotable_soup = next((s for s in soups if s.find(re.compile(r'(?:\w+:)?informationTable$', re.I))), None)
except Exception as e:
return f""
if not submission_soup:
return ""
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(rf'(?:\w+:)?{tag}$', re.I))
text = found.text.strip() if found and found.text and found.text.strip() else "—"
return html.unescape(text).replace(',', '')
parts = [
"### FORM 13F COVER PAGE\n"
]
cover_page = submission_soup.find(re.compile(r'(?:\w+:)?coverPage$', re.I))
manager = cover_page.find(re.compile(r'(?:\w+:)?filingManager$', re.I)) if cover_page else None
manager_addr = manager.find(re.compile(r'(?:\w+:)?address$', re.I)) if manager else None
parts.append(f"**Report for the Calendar Year or Quarter Ended:** {get_text(cover_page, 'reportCalendarOrQuarter')}")
is_amendment = get_text(cover_page, 'isAmendment').lower() == 'true'
am_info = cover_page.find(re.compile(r'(?:\w+:)?amendmentInfo$', re.I)) if is_amendment else None
am_type = get_text(am_info, 'amendmentType') if am_info else ''
amendment_no_text = get_text(cover_page, 'amendmentNo') if is_amendment else ''
parts.append(f"**Check here if Amendment** [{'x' if is_amendment else ' '}] **Amendment Number:** {amendment_no_text}")
parts.append(f"**This Amendment (Check only one.):** [{'x' if 'RESTATEMENT' in am_type.upper() else ' '}] is a restatement.")
parts.append(f"[{'x' if 'NEW HOLDINGS' in am_type.upper() else ' '}] adds new holdings entries.")
parts.append("\n**Institutional Investment Manager Filing this Report:**\n")
filer_name = get_text(manager, 'name')
addr_parts = {
'street1': get_text(manager_addr, 'street1'),
'street2': get_text(manager_addr, 'street2'),
'city': get_text(manager_addr, 'city'),
'state': get_text(manager_addr, 'stateOrCountry'),
'zip': get_text(manager_addr, 'zipCode')
}
address_lines = []
if addr_parts['street1'] != "—":
address_lines.append(addr_parts['street1'])
if addr_parts['street2'] != "—":
address_lines.append(addr_parts['street2'])
city_state_zip = f"{addr_parts['city']}, {addr_parts['state']} {addr_parts['zip']}"
if city_state_zip.replace(",", "").replace(" ", "") != "—":
address_lines.append(city_state_zip)
filer_addr = " ".join(address_lines)
parts.append(f"**Name:** {filer_name} **Address:** {filer_addr}")
parts.append(f"\n**Form 13F File Number:** {get_text(cover_page, 'form13FFileNumber')}")
crd_num = get_text(cover_page, 'crdNumber')
sec_num = get_text(cover_page, 'secFileNumber')
if crd_num != "—":
parts.append(f"**CRD Number (if applicable):** {crd_num}")
if sec_num != "—":
parts.append(f"**SEC File Number (if applicable):** {sec_num}\n")
parts.append("The institutional investment manager filing this report and the person by whom it is signed hereby represent that the person signing the report is authorized to submit it, that all information contained herein is true, correct and complete, and that it is understood that all required items, statements, schedules, lists, and tables, are considered integral parts of this form.\n")
sig_block = submission_soup.find(re.compile(r'(?:\w+:)?signatureBlock$', re.I))
parts.append("**Person Signing this Report on Behalf of Reporting Manager:**\n")
parts.append(f"**Name:** {get_text(sig_block, 'name')} **Title:** {get_text(sig_block, 'title')} **Phone:** {get_text(sig_block, 'phone')}")
parts.append("\n**Signature, Place, and Date of Signing:**\n")
signature_line = f"{get_text(sig_block, 'signature')} {get_text(sig_block, 'city')}, {get_text(sig_block, 'stateOrCountry')} {get_text(sig_block, 'signatureDate')}"
placeholders_line = "[Signature] [City, State] [Date]"
parts.append(f"{signature_line} {placeholders_line}\n")
report_type_str = get_text(cover_page, 'reportType').upper()
parts.append("**Report Type (Check only one.):**")
parts.append(f"[{'x' if 'HOLDINGS' in report_type_str else ' '}] **13F HOLDINGS REPORT.** (Check here if all holdings of this reporting manager are reported in this report.)")
parts.append(f"[{'x' if 'NOTICE' in report_type_str else ' '}] **13F NOTICE.** (Check here if no holdings are reported in this report, and all holdings are reported by other reporting manager(s).)")
parts.append(f"[{'x' if 'COMBINATION' in report_type_str else ' '}] **13F COMBINATION REPORT.** (Check here if a portion of the holdings for this reporting manager are reported in this report and a portion are reported by other reporting manager(s).)\n")
parts.append("### Form 13F Summary Page\n")
parts.append("**Report Summary:**\n")
summary = submission_soup.find(re.compile(r'(?:\w+:)?summaryPage$', re.I))
other_managers_info = cover_page.find(re.compile(r'(?:\w+:)?otherManagersInfo$', re.I)) if cover_page else None
notice_managers = other_managers_info.find_all(re.compile(r'(?:\w+:)?otherManager$', re.I)) if other_managers_info else []
if 'NOTICE' in report_type_str:
parts.append(f"**Number of Other Included Managers:** 0")
parts.append(f"**Form 13F Information Table Entry Total:** 0")
parts.append(f"**Form 13F Information Table Value Total:** $0")
else:
value_total_raw = get_text(summary, 'tableValueTotal')
value_total_formatted = f"{int(value_total_raw):}" if value_total_raw.isdigit() else '—'
parts.append(f"**Number of Other Included Managers:** {get_text(summary, 'otherIncludedManagersCount')}")
parts.append(f"**Form 13F Information Table Entry Total:** {get_text(summary, 'tableEntryTotal')}")
parts.append(f"**Form 13F Information Table Value Total:** ${value_total_formatted}")
parts.append(" (round to nearest dollar)\n")
parts.append("**List of Other Included Managers:**")
parts.append("Provide a numbered list of the name(s) and Form 13F file number(s) of all institutional investment managers with respect to which this report is filed, other than the manager filing this report.")
parts.append("[If there are no entries in this list, state “NONE” and omit the column headings and list entries.]")
if notice_managers:
manager_data = []
for i, manager in enumerate(notice_managers, 1):
manager_data.append({
'No.': str(i),
'Name': get_text(manager, 'name'),
'Form 13F File Number': get_text(manager, 'form13FFileNumber'),
'CRD Number': get_text(manager, 'crdNumber'),
'SEC File Number': get_text(manager, 'secFileNumber'),
})
column_order = ['No.', 'Name', 'Form 13F File Number', 'CRD Number', 'SEC File Number']
manager_df = pd.DataFrame(manager_data).reindex(columns=column_order).fillna("—")
parts.append(to_compact_markdown(manager_df, index=False))
else:
summary_managers = summary.find_all(re.compile(r'(?:\w+:)?manager$', re.I)) if summary else []
if summary_managers:
manager_data = []
for manager in summary_managers:
manager_data.append({
'No.': get_text(manager, 'managerSequenceNumber'),
'Name': get_text(manager, 'name'),
'Form 13F File Number': get_text(manager, 'form13FFileNumber'),
})
column_order = ['No.', 'Name', 'Form 13F File Number']
manager_df = pd.DataFrame(manager_data).reindex(columns=column_order).fillna("—")
parts.append(to_compact_markdown(manager_df, index=False))
else:
parts.append("**NONE**")
final_columns = [
'NAME OF ISSUER##ROWSPAN_1## NAME OF ISSUER##ROWSPAN_1##',
'TITLE OF CLASS##ROWSPAN_2## TITLE OF CLASS##ROWSPAN_2##',
'CUSIP##ROWSPAN_3## CUSIP##ROWSPAN_3##',
'FIGI##ROWSPAN_4## FIGI##ROWSPAN_4##',
'VALUE (x$1000)##ROWSPAN_5## VALUE (x$1000)##ROWSPAN_5##',
'SHRS OR PRN AMT##ROWSPAN_6## SHRS OR PRN AMT##ROWSPAN_6##',
'SH/PRN##ROWSPAN_7## SH/PRN##ROWSPAN_7##',
'PUT/CALL##ROWSPAN_8## PUT/CALL##ROWSPAN_8##',
'INVESTMENT DISCRETION##ROWSPAN_9## INVESTMENT DISCRETION##ROWSPAN_9##',
'OTHER MANAGER##ROWSPAN_10## OTHER MANAGER##ROWSPAN_10##',
'VOTING AUTHORITY##COLSPAN_1## SOLE',
'VOTING AUTHORITY##COLSPAN_1## SHARED',
'VOTING AUTHORITY##COLSPAN_1## NONE'
]
if infotable_soup:
rows = []
for item in infotable_soup.find_all(re.compile(r'(?:\w+:)?infoTable$', re.I)):
shrs_prn = item.find(re.compile(r'(?:\w+:)?shrsOrPrnAmt$', re.I))
voting = item.find(re.compile(r'(?:\w+:)?votingAuthority$', re.I))
def format_numeric(val_str):
if val_str.isdigit():
num = int(val_str)
if num > 0:
return f"{num:}"
return "0"
return '—'
value_in_thousands = get_text(item, 'value')
shares_amt = get_text(shrs_prn, 'sshPrnamt')
vote_sole = get_text(voting, 'Sole')
vote_shared = get_text(voting, 'Shared')
vote_none = get_text(voting, 'None')
row_data = {
'NAME OF ISSUER##ROWSPAN_1## NAME OF ISSUER##ROWSPAN_1##': get_text(item, 'nameOfIssuer'),
'TITLE OF CLASS##ROWSPAN_2## TITLE OF CLASS##ROWSPAN_2##': get_text(item, 'titleOfClass'),
'CUSIP##ROWSPAN_3## CUSIP##ROWSPAN_3##': get_text(item, 'cusip'),
'FIGI##ROWSPAN_4## FIGI##ROWSPAN_4##': get_text(item, 'figi'),
'VALUE (x$1000)##ROWSPAN_5## VALUE (x$1000)##ROWSPAN_5##': format_numeric(value_in_thousands),
'SHRS OR PRN AMT##ROWSPAN_6## SHRS OR PRN AMT##ROWSPAN_6##': format_numeric(shares_amt),
'SH/PRN##ROWSPAN_7## SH/PRN##ROWSPAN_7##': get_text(shrs_prn, 'sshPrnamtType'),
'PUT/CALL##ROWSPAN_8## PUT/CALL##ROWSPAN_8##': get_text(item, 'putCall'),
'INVESTMENT DISCRETION##ROWSPAN_9## INVESTMENT DISCRETION##ROWSPAN_9##': get_text(item, 'investmentDiscretion'),
'OTHER MANAGER##ROWSPAN_10## OTHER MANAGER##ROWSPAN_10##': get_text(item, 'otherManager'),
'VOTING AUTHORITY##COLSPAN_1## SOLE': format_numeric(vote_sole),
'VOTING AUTHORITY##COLSPAN_1## SHARED': format_numeric(vote_shared),
'VOTING AUTHORITY##COLSPAN_1## NONE': format_numeric(vote_none),
}
rows.append(row_data)
if rows:
df = pd.DataFrame(rows)
df = df.reindex(columns=final_columns, fill_value='—')
parts.append("\n### FORM 13F INFORMATION TABLE")
table_md = md_table_2row_header(df)
parts.append(f"\n---\n{table_md}\n---")
return "\n\n".join(parts)
def parse_form_npx_xml(xml_contents) -> str:
"""
Parses a two-part Form N-PX filing into a single, comprehensive Markdown document.
"""
if not xml_contents:
return ""
main_xml = BeautifulSoup(xml_contents[0], 'lxml-xml')
def get_text(node, tag):
if not node or not (found := node.find(re.compile(f'^{tag}$', re.I))):
return "—"
return found.text.strip()
header = main_xml.find(re.compile(r'^headerData$', re.I))
filer_info = header.find(re.compile(r'^filerInfo$', re.I)) if header else None
filer_creds = filer_info.find(re.compile(r'^filer$', re.I)) if filer_info else None
flags = filer_info.find(re.compile(r'^flags$', re.I)) if filer_info else None
form_data = main_xml.find(re.compile(r'^formData$', re.I))
cover_page = form_data.find(re.compile(r'^coverPage$', re.I)) if form_data else None
reporting_person = cover_page.find(re.compile(r'^reportingPerson$', re.I)) if cover_page else None
rp_addr = reporting_person.find(re.compile(r'address', re.I)) if reporting_person else None
agent = cover_page.find(re.compile(r'^agentForService$', re.I)) if cover_page else None
agent_addr = agent.find(re.compile(r'address', re.I)) if agent else None
summary_page = form_data.find(re.compile(r'^summaryPage$', re.I)) if form_data else None
sig_page = form_data.find(re.compile(r'^signaturePage$', re.I)) if form_data else None
parts = [
"## FORM N-PX\n"
"### ANNUAL REPORT OF PROXY VOTING RECORD\n",
"## N-PX: Filer Information",
f"**Filer CIK:** {get_text(filer_creds, 'cik')}",
f"**Date of Report:** {get_text(header, 'periodOfReport')}",
f"**Are you a Registered Management Investment Company or an Institutional Manager?:** {'Institutional Manager' if get_text(filer_info, 'registrantType') == 'IM' else 'Registered Management Investment Company'}",
f"**Is this a LIVE or TEST Filing?:** {get_text(filer_info, 'liveTestFlag')}",
f"**Is this an electronic copy of an official filing submitted in paper format?:** [{'x' if get_text(flags, 'confirmingCopyFlag') == 'true' else ' '}]",
]
sub_contact = None
for tag in ('submissionContact', 'contactInfo'):
sub_contact = header.find(re.compile(f'^{tag}$', re.I)) if header else None
if sub_contact:
break
if sub_contact and sub_contact.get_text(strip=True):
parts.append("\n### Submission Contact Information")
sc_fields = {
"Name": get_text(sub_contact, 'name'),
"Title": get_text(sub_contact, 'title'),
"Phone": get_text(sub_contact, 'phoneNumber'),
"Email": get_text(sub_contact, 'emailAddress'),
}
for label, val in sc_fields.items():
if val != "—":
parts.append(f"**{label}:** {val}")
else:
parts.append("\n### Submission Contact Information\n_Not provided in this filing._")
parts.extend([
f"\n### Notification Information\n**Notify via Filing Website only?:** [{'x' if get_text(flags, 'overrideInternetFlag') == 'true' else ' '}]",
"\n## N-PX: Cover Page",
f"**Name of reporting person:** {get_text(reporting_person, 'name')}",
f"**Address:** {get_text(rp_addr, 'street1')}, {get_text(rp_addr, 'city')}, {get_text(rp_addr, 'stateOrCountry')} {get_text(rp_addr, 'zipCode')}",
f"**Telephone number:** {get_text(reporting_person, 'phoneNumber')}",
f"**Name of agent for service:** {get_text(agent, 'name')}",
f"**Agent Address:** {get_text(agent_addr, 'street1')}, {get_text(agent_addr, 'city')}, {get_text(agent_addr, 'stateOrCountry')} {get_text(agent_addr, 'zipCode')}",
f"**Reporting Period:** Report for the year ended {get_text(header, 'periodOfReport')}",
f"**SEC File Number:** {get_text(cover_page, 'fileNumber')}",
"**CRD Number (if any):** —",
"**Other SEC File Number (if any):** —",
f"**LEI (if any):** {get_text(cover_page, 'leiNumber')}",
])
report_type = get_text(cover_page.find('reportInfo'), 'reportType')
report_options = {
"Institutional Manager.": [
"Institutional Manager Voting Report",
"Institutional Manager Notice Report",
"Institutional Manager Combination Report"
],
"Registered Management Investment Company.": [
"Fund Voting Report",
"Fund Notice Report"
]
}
parts.append("\n**Report Type (check only one):**")
for category, options in report_options.items():
parts.append(f"\n{category}")
for option in options:
parts.append(f"- [{'x' if option.upper() in report_type.upper() else ' '}] {option}")
exp_choice = get_text(cover_page.find('explanatoryInformation'), 'explanatoryChoice')
parts.extend([
"\n**Do you wish to provide explanatory information pursuant to Special Instruction B.4?:**",
f"- [{' ' if exp_choice == 'Y' else 'x'}] No",
f"- [{'x' if exp_choice == 'Y' else ' '}] Yes",
"\n**Additional information:** —"
])
parts.extend([
"\n## N-PX: Summary - Included Managers",
f"**Number of Included Institutional Managers:** {get_text(summary_page, 'otherIncludedManagersCount')}",
f"**Included Institutional Managers:** {get_text(summary_page, 'includedManagers') or 'NONE'}"
])
if len(xml_contents) > 1:
proxy_xml = BeautifulSoup(xml_contents[1], 'lxml-xml')
if (proxy_vote_table := proxy_xml.find(re.compile(r'^proxyVoteTable$', re.I))):
parts.append("\n## FORM N-PX PROXY VOTING RECORD\n")
vote_records = []
for item in proxy_vote_table.find_all(re.compile(r'^proxyTable$', re.I)):
vote_node = item.find(re.compile(r'^vote$', re.I))
how_voted, sv_ford, mgmt_rec = "—", "—", "—"
if vote_node and (record := vote_node.find(re.compile(r'^voteRecord$', re.I))):
how_voted = get_text(record, 'howVoted')
sv_ford = get_text(record, 'sharesVoted')
mgmt_rec = get_text(record, 'managementRecommendation')
category_text = "; ".join(c.text for c in item.select('categoryType'))
vote_records.append({
'NAME OF ISSUER': get_text(item, 'issuerName'),
'CUSIP': get_text(item, 'cusip'),
'MEETING DATE': get_text(item, 'meetingDate'),
'VOTE DESCRIPTION': _collapse_newlines(get_text(item, 'voteDescription')),
'VOTE CATEGORY': category_text,
'SHARES VOTED': get_text(item, 'sharesVoted'),
'SHARES ON LOAN': get_text(item, 'sharesOnLoan'),
'HOW VOTED': how_voted,
'SHARES VOTED FOR OR AGAINST MANAGEMENT': sv_ford,
'FOR OR AGAINST MANAGEMENT': mgmt_rec,
'OTHER INFO': _collapse_newlines(get_text(item, 'voteOtherInfo'))
})
df = pd.DataFrame(vote_records).replace('—', '')
parts.append(to_compact_markdown(df, index=False))
parts.extend([
"\n## N-PX: Signature Block",
f"**Reporting Person:** {get_text(sig_page, 'reportingPerson')}",
f"**By (Signature):** {get_text(sig_page, 'txSignature')}",
f"**By (Printed Signature):** {get_text(sig_page, 'txPrintedSignature')}",
f"**By (Title):** {get_text(sig_page, 'txTitle')}",
f"**Date:** {get_text(sig_page, 'txAsOfDate')}"
])
return "\n\n".join(parts)
def parse_form25_xml(xml: BeautifulSoup) -> str:
"""
Parses the XML of a Form 25 filing into structured Markdown,
including all standard boilerplate text for full context.
"""
root = xml.find(re.compile(r'^notificationOfRemoval$', re.I))
if not root:
return ""
def get_text(node, tag):
if not node or not (found := node.find(re.compile(f'^{tag}$', re.I))): return "—"
return found.text.strip()
issuer_node = root.find(re.compile(r'^issuer$', re.I))
exchange_node = root.find(re.compile(r'^exchange$', re.I))
sig_node = root.find(re.compile(r'^signatureData$', re.I))
issuer_name = get_text(issuer_node, 'entityName')
file_number = get_text(issuer_node, 'fileNumber')
exchange_name = get_text(exchange_node, 'entityName')
address_node = issuer_node.find(re.compile(r'^address$', re.I))
address = "—"
if address_node:
addr_parts = [get_text(address_node, p) for p in ['street1', 'city', 'stateOrCountry', 'zipCode']]
address = ", ".join(p for p in addr_parts if p and p != "—")
tel_num = get_text(issuer_node, 'telephoneNumber')
security_desc = get_text(root, 'descriptionClassSecurity')
rule_cited = get_text(root, 'ruleProvision')
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
"## FORM 25\n\n"
"### NOTIFICATION OF REMOVAL FROM LISTING AND/OR REGISTRATION "
"UNDER SECTION 12(b) OF THE SECURITIES EXCHANGE ACT OF 1934.\n",
f"**Commission File Number:** {file_number}\n"
]
details_data = {
'Issuer:': issuer_name,
'Exchange:': exchange_name,
'(Exact name of Issuer as specified in its charter, and name of Exchange where security is listed and/or registered)': '',
'Address:': address,
'Telephone number:': tel_num,
"(Address, including zip code, and telephone number, including area code, of Issuer's principal executive offices)": '',
'(Description of class of securities)': security_desc,
}
details_df = pd.DataFrame(details_data.items(), columns=['', ''])
parts.append(to_compact_markdown(details_df, index=False))
parts.append("\n---\n\nPlease place an X in the box to designate the rule provision relied upon to strike the class of securities from listing and registration:\n")
possible_rules = [ "17 CFR 240.12d2-2(a)(1)", "17 CFR 240.12d2-2(a)(2)", "17 CFR 240.12d2-2(a)(3)", "17 CFR 240.12d2-2(a)(4)", "Pursuant to 17 CFR 240.12d2-2(b), the Exchange has complied with its rules to strike the class of securities from listing and/or withdraw registration on the Exchange.", "Pursuant to 17 CFR 240.12d2-2(c), the Issuer has complied with its rules of the Exchange and the requirements of 17 CFR 240.12d2-2(c) governing the voluntary withdrawal of the class of securities from listing and registration on the Exchange."]
for rule in possible_rules:
parts.append(f"- [{'x' if rule_cited in rule else ' '}] {rule}")
certification_text = f"Pursuant to the requirements of the Securities Exchange Act of 1934, {exchange_name} certifies that it has reasonable grounds to believe that it meets all of the requirements for filing the Form 25 and has caused this notification to be signed on its behalf by the undersigned duly authorized person."
parts.extend(["\n---\n", textwrap.fill(certification_text, width=90), "\n"])
sig_df = pd.DataFrame([{'Date': get_text(sig_node, 'signatureDate'), 'By': '', 'Name': get_text(sig_node, 'signatureName'), 'Title': get_text(sig_node, 'signatureTitle')}])
parts.append(to_compact_markdown(sig_df, index=False))
footer_text1 = "Form 25 and attached Notice will be considered compliance with the provisions of 17 CFR 240.19d-1 as applicable. See General Instructions."
footer_text2 = "Persons who respond to the collection of information contained in this form are not required to respond unless the form displays a currently valid OMB Number."
parts.extend(["\n\n" + textwrap.fill(footer_text1, width=90), "\n\n" + textwrap.fill(footer_text2, width=90)])
return "\n".join(parts)
def parse_form144_xml(xml: BeautifulSoup, form_type: str) -> str:
"""
Parses XML for Form 144 and 144/A filings into structured Markdown,
including standard boilerplate text for full context.
"""
submission = xml.find(re.compile(r'^(?:\w+:)?edgarSubmission$', re.I))
if not submission: return ""
header_data = submission.find(re.compile(r'^(?:\w+:)?headerData$', re.I))
form_data = submission.find(re.compile(r'^(?:\w+:)?formData$', re.I))
def get_text(node, tag):
if not node: return "—"
found = node.find(re.compile(rf'^(?:\w+:)?{tag}$', re.I))
return found.text.strip() if found else "—"
filer_info_node = header_data.find(re.compile(r'^(?:\w+:)?filerInfo$', re.I)) if header_data else None
filer_creds_node = filer_info_node.find(re.compile(r'^(?:\w+:)?filer$', re.I)) if filer_info_node else None
issuer_info = form_data.find(re.compile(r'^(?:\w+:)?issuerInfo$', re.I))
issuer_address_node = issuer_info.find(re.compile(r'^(?:\w+:)?issuerAddress$', re.I)) if issuer_info else None
issuer_address = "—"
if issuer_address_node:
addr_parts = [ get_text(issuer_address_node, t) for t in ['street1', 'city', 'stateOrCountry', 'zipCode'] ]
issuer_address = ", ".join(p for p in addr_parts if p and p != "—")
relationships_node = issuer_info.find(re.compile(r'^(?:\w+:)?relationshipsToIssuer$', re.I)) if issuer_info else None
relationships = [rel.text.strip() for rel in relationships_node.find_all(re.compile(r'^(?:\w+:)?relationshipToIssuer$', re.I))] if relationships_node else []
full_relationship = ", ".join(relationships) if relationships else "—"
parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
f"## FORM {form_type}\n\n"
"### NOTICE OF PROPOSED SALE OF SECURITIES\n"
"### PURSUANT TO RULE 144 UNDER THE SECURITIES ACT OF 1933\n"
]
parts.extend([f"### {form_type}: Filer Information", f"**Filer CIK:** {get_text(filer_creds_node, 'cik')}"])
if (prev_accession := get_text(header_data, 'previousAccessionNumber')) != "—":
parts.append(f"**Previous Accession Number Of The Filing:** {prev_accession}")
parts.append(f"**Is this a LIVE or TEST Filing?:** {get_text(header_data, 'liveTestFlag')}")
sub_contact = header_data.find(re.compile(r'^(?:\w+:)?submissionContact$', re.I)) if header_data else None
if sub_contact and sub_contact.get_text(strip=True):
parts.append("\n### Submission Contact Information")
sc_fields = {
"Name": get_text(sub_contact, 'name'),
"Phone": get_text(sub_contact, 'phone'),
"Email": get_text(sub_contact, 'email'),
}
for label, val in sc_fields.items():
if val != "—":
parts.append(f"**{label}:** {val}")
else:
parts.append("\n### Submission Contact Information\n_Not provided in this filing._")
parts.extend([
f"\n### {form_type}: Issuer Information",
f"**Name of Issuer:** {get_text(issuer_info, 'issuerName')}",
f"**SEC File Number:** {get_text(issuer_info, 'secFileNumber')}",
f"**Address of Issuer:** {issuer_address}",
f"**Phone:** {get_text(issuer_info, 'issuerContactPhone')}",
f"**Name of Person for Whose Account the Securities are to Be Sold:** {get_text(issuer_info, 'nameOfPersonForWhoseAccountTheSecuritiesAreToBeSold')}",
f"**Relationship to Issuer:** {full_relationship}",
"\n" + textwrap.fill("See the definition of \"person\" in paragraph (a) of Rule 144. Information is to be given not only as to the person for whose account the securities are to be sold but also as to all other persons included in that definition. In addition, information shall be given as to sales by all persons whose sales are required by paragraph (e) of Rule 144 to be aggregated with sales for the account of the person filing this notice.")
])
sec_info = form_data.find(re.compile(r'^(?:\w+:)?securitiesInformation$', re.I))
broker = sec_info.find(re.compile(r'^(?:\w+:)?brokerOrMarketmakerDetails$', re.I)) if sec_info else None
full_broker_info = get_text(broker, 'name')
if broker and (broker_addr_node := broker.find(re.compile(r'^(?:\w+:)?address$', re.I))):
addr_lines = [get_text(broker_addr_node, t) for t in ['street1', 'street2']]
city_state_zip = " ".join(p for p in [get_text(broker_addr_node, t) for t in ['city', 'stateOrCountry', 'zipCode']] if p and p != "—")
if city_state_zip: addr_lines.append(city_state_zip)
if valid_lines := [line for line in addr_lines if line and line != "—"]:
full_broker_info += " " + " ".join(valid_lines)
df_proposed_data = {
'Title of the Class of Securities To Be Sold': get_text(sec_info, 'securitiesClassTitle'),
'Name and Address of the Broker': full_broker_info,
'Number of Shares or Other Units To Be Sold': get_text(sec_info, 'noOfUnitsSold'),
'Aggregate Market Value': get_text(sec_info, 'aggregateMarketValue'),
'Number of Shares or Other Units Outstanding': get_text(sec_info, 'noOfUnitsOutstanding'),
'Approximate Date of Sale': get_text(sec_info, 'approxSaleDate'),
'Name the Securities Exchange': get_text(sec_info, 'securitiesExchangeName')
}
df_proposed = pd.DataFrame([df_proposed_data])
parts.extend([f"\n### {form_type}: Securities Information", df_to_markdown(df_proposed, is_clean=True, disable_numparse=True), "\n" + textwrap.fill("Furnish the following information with respect to the acquisition of the securities to be sold and with respect to the payment of all or any part of the purchase price or other consideration therefor:")])
acq_data = [{'Title of the Class': get_text(item, "securitiesClassTitle"), 'Date you Acquired': get_text(item, "acquiredDate"), 'Nature of Acquisition Transaction': get_text(item, "natureOfAcquisitionTransaction"), 'Name of Person from Whom Acquired': get_text(item, "nameOfPersonfromWhomAcquired"), 'Is this a Gift?': "Yes" if get_text(item, "isGiftTransaction") == 'Y' else "No", 'Date Donor Acquired': get_text(item, "donarAcquiredDate"), 'Amount of Securities Acquired': get_text(item, "amountOfSecuritiesAcquired"), 'Date of Payment': get_text(item, "paymentDate"), 'Nature of Payment *': get_text(item, "natureOfPayment")} for item in form_data.find_all(re.compile(r'^(?:\w+:)?securitiesToBeSold$', re.I))]
if acq_data:
df_acq = pd.DataFrame(acq_data)
parts.extend([f"\n### {form_type}: Securities To Be Sold", df_to_markdown(df_acq, is_clean=True, disable_numparse=True), "\n" + textwrap.fill("* If the securities were purchased and full payment therefor was not made in cash at the time of purchase, explain in the table or in a note thereto the nature of the consideration given. If the consideration consisted of any note or other obligation, or if payment was made in installments describe the arrangement and state when the note or other obligation was discharged in full or the last installment paid."), "\n" + textwrap.fill("Furnish the following information as to all securities of the issuer sold during the past 3 months by the person for whose account the securities are to be sold.")])
parts.append(f"\n### {form_type}: Securities Sold During The Past 3 Months")
if get_text(form_data, 'nothingToReportFlagOnSecuritiesSoldInPast3Months') == 'Y':
parts.append("Nothing to Report")
else:
past_sales_data = []
for s in form_data.find_all(re.compile(r'^(?:\w+:)?securitiesSoldInPast3Months$', re.I)):
seller_details_node = s.find(re.compile(r'^(?:\w+:)?sellerDetails$', re.I))
seller_name = get_text(seller_details_node, "name")
full_seller_info = seller_name
if seller_details_node and (seller_addr_node := seller_details_node.find(re.compile(r'^(?:\w+:)?address$', re.I))):
addr_lines = [get_text(seller_addr_node, t) for t in ['street1', 'street2']]
city_state_zip = " ".join(p for p in [get_text(seller_addr_node, t) for t in ['city', 'stateOrCountry', 'zipCode']] if p and p != "—")
if city_state_zip: addr_lines.append(city_state_zip)
if valid_lines := [line for line in addr_lines if line and line != "—"]:
full_seller_info += " " + " ".join(valid_lines)
past_sales_data.append({
'Name and Address of Seller': full_seller_info,
'Title of Securities Sold': get_text(s, "securitiesClassTitle"),
'Date of Sale': get_text(s, "saleDate"),
'Amount of Securities Sold': get_text(s, "amountOfSecuritiesSold"),
'Gross Proceeds': get_text(s, 'grossProceeds')
})
if past_sales_data:
parts.append(df_to_markdown(pd.DataFrame(past_sales_data), is_clean=True, disable_numparse=True))
parts.append(f"\n### {form_type}: Remarks and Signature")
if (remarks := form_data.find(re.compile(r'^(?:\w+:)?remarks$', re.I))) and remarks.text.strip():
parts.append(f"**Remarks:** {textwrap.fill(remarks.text.strip())}")
signature_node = form_data.find(re.compile(r'^(?:\w+:)?noticeSignature$', re.I))
plan_adoption_dates_node = signature_node.find(re.compile(r'^(?:\w+:)?planAdoptionDates$', re.I)) if signature_node else None
plan_date = get_text(plan_adoption_dates_node, "planAdoptionDate")
parts.append(f"**Date of Notice:** {get_text(signature_node, 'noticeDate')}")
if plan_date != "—":
parts.append(f"**Date of Plan Adoption or Giving of Instruction, If Relying on Rule 10b5-1:** {plan_date}")
parts.append("\nATTENTION:\n\n" + textwrap.fill("The person for whose account the securities to which this notice relates are to be sold hereby represents by signing this notice that he does not know any material adverse information in regard to the current and prospective operations of the Issuer of the securities to be sold which has not been publicly disclosed. If such person has adopted a written trading plan or given trading instructions to satisfy Rule 10b5-1 under the Exchange Act, by signing the form and indicating the date that the plan was adopted or the instruction given, that person makes such representation as of the plan adoption or instruction date."))
parts.append(f"\n**Signature:** {get_text(signature_node, 'signature')}")
return "\n\n".join(parts)
def fix_inverted_bold_paragraphs(soup: BeautifulSoup):
"""
Finds malformed `...
` structures and corrects them to the
standard `...
` format, which can be processed correctly by
downstream logic.
"""
for b_tag in soup.find_all(['b', 'strong']):
p_tag = b_tag.find('p')
if p_tag and b_tag.get_text(strip=True) == p_tag.get_text(strip=True):
content = p_tag.get_text(separator=' ', strip=True)
new_p = soup.new_tag('p')
new_b = soup.new_tag('b')
new_b.string = content
new_p.append(new_b)
b_tag.replace_with(new_p)
def promote_bold_subheads(soup, max_words=15, max_len=120):
"""
Finds block elements (p, div) that contain only a single bold element
and promotes the block to a tag. This version consolidates the
text to prevent downstream parsing errors.
"""
for block_tag in soup.find_all(['p', 'div']):
child_tags = block_tag.find_all(True, recursive=False)
if len(child_tags) == 1 and child_tags[0].name in ['b', 'strong'] and \
block_tag.get_text(strip=True) == child_tags[0].get_text(strip=True):
bold_child = child_tags[0]
text_for_checking = bold_child.get_text(strip=True)
if 1 < len(text_for_checking) <= max_len and len(text_for_checking.split()) <= max_words:
final_text = bold_child.get_text(separator=' ', strip=True)
h4 = soup.new_tag('h4')
h4.string = final_text
block_tag.replace_with(h4)
WS_RE = re.compile(r'^[\s\u00A0\u2063]+|[\s\u00A0\u2063]+$')
def process_inline_tags(soup, tags, placeholder_prefix):
"""
A non-destructive function that finds all specified HTML tags (e.g., ['b', 'strong'])
and wraps them with unique text-based placeholders without destroying nested tags.
This version also cleans newlines and tags from within the tag's content.
Args:
soup: The BeautifulSoup object to modify.
tags: A list of tag names to process (e.g., ['b', 'strong']).
placeholder_prefix: The prefix for the placeholder (e.g., "BOLD", "ITALIC").
"""
found_tags = list(soup.find_all(tags))
for i, tag in enumerate(found_tags):
if not tag.parent:
continue
if not tag.get_text(strip=True):
tag.decompose()
continue
is_in_table = tag.find_parent('table')
for descendant in list(tag.descendants):
if descendant.name == 'br':
if is_in_table:
descendant.replace_with(NavigableString('##NEWLINE##'))
else:
descendant.replace_with(NavigableString(' '))
elif isinstance(descendant, NavigableString):
cleaned_text = str(descendant).replace('\n', ' ')
descendant.replace_with(NavigableString(cleaned_text))
start_placeholder = f"##{placeholder_prefix}_START_{i}##"
end_placeholder = f"##{placeholder_prefix}_END_{i}##"
tag.insert_before(NavigableString(start_placeholder))
tag.insert_after(NavigableString(end_placeholder))
tag.unwrap()
def process_anchor_tags(soup):
"""
Preserve anchor href targets in text form so they survive read_html/table parsing.
Anchors are restored to markdown links in _post_process_text_cleanup.
"""
found_tags = list(soup.find_all('a'))
base_tag = soup.find('base', href=True)
base_href = (base_tag.get('href') or '').strip() if base_tag else ''
base_url = base_href or CURRENT_SOURCE_DOCUMENT_URL
for i, tag in enumerate(found_tags):
if not tag.parent:
continue
href = (tag.get('href') or '').strip()
if (
href
and base_url
and not re.match(r'^[a-z][a-z0-9+.-]*:', href, flags=re.IGNORECASE)
):
href = urljoin(base_url, href)
if not tag.get_text(strip=True):
tag.decompose()
continue
if not href or href.lower().startswith('javascript:'):
tag.unwrap()
continue
is_in_table = tag.find_parent('table')
for descendant in list(tag.descendants):
if descendant.name == 'br':
if is_in_table:
descendant.replace_with(NavigableString('##NEWLINE##'))
else:
descendant.replace_with(NavigableString(' '))
elif isinstance(descendant, NavigableString):
cleaned_text = str(descendant).replace('\n', ' ')
descendant.replace_with(NavigableString(cleaned_text))
encoded_href = quote(href, safe="/:?&=%._-")
start_placeholder = f"##LINK_START_{i}__{encoded_href}##"
end_placeholder = f"##LINK_END_{i}##"
tag.insert_before(NavigableString(start_placeholder))
tag.insert_after(NavigableString(end_placeholder))
tag.unwrap()
def _restore_markdown_links(text: str) -> str:
"""
Restore anchor placeholders to markdown links after inline formatting placeholders
have already been converted back into markdown/HTML markup.
"""
link_pattern = re.compile(
r'##LINK_START_(\d+)__([^#]+)##(.*?)##LINK_END_\1##',
re.DOTALL,
)
def _replace(match: re.Match) -> str:
href = unquote(match.group(2))
label = match.group(3).strip()
if not label:
return ''
label = label.replace('\\', '\\\\').replace('[', r'\[').replace(']', r'\]')
return f'[{label}]({href})'
previous = None
while text != previous:
previous = text
text = link_pattern.sub(_replace, text)
return text
def _unsplit_numbers(text: str) -> str:
"""
Collapse separators inside a number.
'1 234' → '1234' '1,234' → '1234' '($ 1 234)' → '($1234)'
"""
month_pattern = re.compile(
r'\b(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|'
r'May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|'
r'Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\b',
re.IGNORECASE
)
def contains_month(s: str) -> bool:
return bool(month_pattern.search(s))
if isinstance(text, str) and contains_month(text):
return text
if isinstance(text, str):
return re.sub(r'(?<=\d)[\s,]+(?=\d)', '', text)
return text
def handle_list_like_table_with_indentation(table_element, output_list: list) -> bool:
"""
Identifies tables used for indented lists. It uses a hybrid approach:
1. Calculates precise indentation from absolute CSS widths (pt, px) first.
2. If absolute widths are not found, it checks for a percentage-based 'width'
attribute and applies a heuristic (level = width / 2) as a fallback.
"""
STANDARD_INDENT_PT = 18.0
LIST_MARKER_RE_TABLE = re.compile(
r"""^\s*
(?:
[o□☒⌧♦⧫†‡-○•●·◦➢▪] # Bullet characters
|
\((?:[a-z0-9ivxlcdm]+)\) # Markers in parentheses, e.g., (a), (i), (1)
|
[a-z][\.\)] # Markers with a period or parenthesis, e.g., a., a)
|
\d+\. # Numeric markers, e.g., 1.
|
[ivxlcdm]+\. # Roman numeral markers, e.g., i., iv.
|
\d+\.\d[\d\.]* # Multi-level numeric markers like 1.1 or 1.2.3
)
\s*$""",
re.IGNORECASE | re.VERBOSE
)
rows = table_element.find_all('tr', recursive=False)
if not rows:
return False
processed_items = []
is_consistent_list_table = True
for row in rows:
if not row.get_text(strip=True):
continue
cells = row.find_all(['td', 'th'], recursive=False)
first_content_cell = None
first_content_cell_index = -1
for i, cell in enumerate(cells):
if cell.get_text(strip=True):
first_content_cell = cell
first_content_cell_index = i
break
if not first_content_cell:
continue
raw_cell_text = first_content_cell.get_text(strip=True)
normalized_text = re.sub(r'##((?:BOLD|ITALIC|U)_(?:START|END)_\d+|(?:COLSPAN)_\d+)##', '', raw_cell_text)
if not LIST_MARKER_RE_TABLE.fullmatch(normalized_text):
is_consistent_list_table = False
break
marker_cell = first_content_cell
marker_cell_index = first_content_cell_index
content_fragments = []
for i in range(marker_cell_index + 1, len(cells)):
cell_text = cells[i].get_text(separator=' ', strip=True)
if cell_text:
content_fragments.append(cell_text)
if not content_fragments:
continue
content_text = ' '.join(content_fragments)
total_indent_pt = 0.0
percentage_level = 0
conversions = {'in': 72.0, 'pt': 1.0, 'px': 0.75}
for i in range(marker_cell_index):
spacer_cell = cells[i]
if spacer_cell.get_text(strip=True):
continue
style_attr = spacer_cell.get('style', '')
width_attr = spacer_cell.get('width', '')
found_absolute_width = False
for attr_str in [style_attr, width_attr]:
abs_match = re.search(r'width\s*[:=]?\s*"?([\d\.]+)(in|pt|px)"?', attr_str, re.I)
if abs_match:
try:
value = float(abs_match.group(1))
unit = (abs_match.group(2) or 'pt').lower()
total_indent_pt += value * conversions.get(unit, 1.0)
found_absolute_width = True
break
except (ValueError, TypeError):
pass
if found_absolute_width:
continue
if spacer_cell.has_attr('width'):
pct_match = re.search(r'([\d\.]+)%', spacer_cell['width'])
if pct_match:
try:
pct_value = float(pct_match.group(1))
percentage_level += int(pct_value / 2)
except (ValueError, TypeError):
pass
marker_indent_info = _calculate_effective_indent(marker_cell)
total_indent_pt += marker_indent_info['indent']
marker_width_attr = marker_cell.get('width', '') + marker_cell.get('style', '')
marker_width_match = re.search(r'width\s*[:=]?\s*"?([\d\.]+)(pt|px)"?', marker_width_attr, re.I)
if marker_width_match:
try:
value = float(marker_width_match.group(1))
unit = (marker_width_match.group(2) or 'pt').lower()
total_indent_pt += value * conversions.get(unit, 1.0)
except (ValueError, TypeError):
pass
level = int(round(total_indent_pt / STANDARD_INDENT_PT))
if level == 0 and percentage_level > 0:
if pct_value >= 8:
level = 3
elif pct_value == 5:
level = 2.5
elif pct_value >= 4:
level = 2
elif pct_value == 3:
level = 1.5
elif pct_value > 0:
level = 1
marker_text = marker_cell.get_text(strip=True)
n = max(0, math.floor(level))
indent_prefix = "##INDENT##" * n + (' ' if math.isclose(level - n, 0.5, abs_tol=1e-9) else '')
full_line = f"{indent_prefix}{marker_text} {content_text}"
processed_items.append(full_line)
if is_consistent_list_table and processed_items:
output_list.append("\n\n".join(processed_items) + "\n\n")
return True
return False
def defragment_bolds(soup: BeautifulSoup):
"""
Finds and merges adjacent or tags to fix fragmentation.
For example, turns Hello World into Hello World .
"""
for b_tag in soup.find_all(['b', 'strong']):
while True:
next_tag = b_tag.next_sibling
if isinstance(next_tag, NavigableString) and next_tag.strip() == '':
real_next_tag = next_tag.next_sibling
else:
real_next_tag = next_tag
if real_next_tag and real_next_tag.name in ['b', 'strong']:
if isinstance(next_tag, NavigableString) and next_tag.strip() == '':
b_tag.append(" ")
b_tag.extend(real_next_tag.contents)
real_next_tag.decompose()
if isinstance(next_tag, NavigableString):
next_tag.extract()
else:
break
_INVISIBLE = re.compile(
r'\b(?:none|hidden|0(?:px|pt|em|rem)?)\b|'
r'\btransparent\b|rgba?\([^)]*,\s*0(?:\.0+)?\)|hsla?\([^)]*,\s*0(?:\.0+)?\)',
re.I
)
def has_visible_border(style: str, side: str) -> bool:
s = (style or '').lower()
m = re.search(rf'border-{side}\s*:\s*([^;]+)', s)
if m:
return not _INVISIBLE.search(m.group(1))
for prop in (f'border-{side}-width', f'border-{side}-style', f'border-{side}-color'):
m = re.search(rf'{prop}\s*:\s*([^;]+)', s)
if m and _INVISIBLE.search(m.group(1)):
return False
m = re.search(r'border\s*:\s*([^;]+)', s)
if m:
return not _INVISIBLE.search(m.group(1))
return False
def tag_border_cells(table_element, soup):
"""
Finds cells with top or bottom borders and tags them with distinct
sentinels. It now also correctly handles tags by replacing
them with line breaks.
"""
for cell in table_element.find_all(['td', 'th']):
style = cell.get('style', '').lower()
hr_tags = cell.find_all('hr')
if hr_tags:
cell.append('')
for hr in hr_tags:
hr.replace_with(soup.new_tag('br'))
if 'border-bottom: medium none' in style:
continue
if has_visible_border(style, 'bottom') and not hr_tags:
cell.append('')
if has_visible_border(style, 'top'):
cell.append('')
BULLET_CHARS = {'○', '•', '●', '·', '◦', '➢', '▪'}
def merge_bullet_head_fragments(soup: BeautifulSoup) -> None:
"""
If a block element contains only a bullet glyph and the *next*
block starts with / , merge them so that the bullet
and the bold text end up in the same paragraph:
•
We …
→ • We …
"""
for bullet_blk in soup.find_all(['p', 'div']):
txt = bullet_blk.get_text(strip=True)
if txt not in BULLET_CHARS:
continue
nxt = bullet_blk.find_next_sibling(lambda t: (
t.name in {'p', 'div'} and t.get_text(strip=True)
))
if not nxt:
continue
first_child = nxt.find(True, recursive=False)
if first_child and first_child.name in {'b', 'strong'}:
bullet_blk.string = bullet_blk.string or bullet_blk.new_string(txt)
bullet_blk.append(" ")
for node in list(nxt.contents):
bullet_blk.append(node.extract())
nxt.decompose()
def convert_styled_superscripts_to_placeholders(soup: BeautifulSoup):
"""
Finds elements styled as superscripts and replaces them with a unique
text-based placeholder. This version is safer and avoids converting large
container elements.
"""
vertical_offset_re = re.compile(
r'(bottom|top)\s*:\s*(-?[\d.]+)(?:pt|px|em)',
re.IGNORECASE
)
for tag in soup.find_all(style=True):
if tag.name not in ['span', 'font', 'p', 'i', 'b', 'strong', 'em', 'u']:
continue
style_attr = tag.get('style', '').lower().replace(' ', '')
if 'position:relative' not in style_attr:
continue
match = vertical_offset_re.search(style_attr)
if not match:
continue
prop, value_str = match.groups()
value = float(value_str)
is_styled_as_superscript = (prop == 'bottom' and value > 0) or \
(prop == 'top' and value < 0)
if is_styled_as_superscript:
text_content = tag.get_text(strip=False)
if text_content and len(text_content) < 50:
placeholder = f"##SUP##{text_content}##/SUP##"
tag.replace_with(NavigableString(placeholder))
def promote_styled_headings(soup: BeautifulSoup):
"""
Finds block elements (div, p) that are visually styled as headings
based on font-size, font-weight, and other CSS attributes, and
replaces them with standard h1, h2, etc., tags.
"""
font_size_re = re.compile(r'font-size\s*:\s*([\d\.]+)pt', re.IGNORECASE)
for tag in soup.find_all(['div', 'p']):
if tag.find_parent(['td', 'th']):
continue
style = tag.get('style', '').replace(' ', '').lower()
if not style:
continue
font_size_match = font_size_re.search(style)
font_size = float(font_size_match.group(1)) if font_size_match else 0
is_bold = 'font-weight:bold' in style or 'font-weight:700' in style
is_uppercase = 'text-transform:uppercase' in style
heading_level = 0
if font_size >= 20:
heading_level = 2
elif font_size > 16 and is_uppercase:
heading_level = 2
elif font_size >= 14 and is_bold:
heading_level = 3
elif font_size >= 12 and is_bold and is_uppercase:
heading_level = 4
elif is_bold:
text_for_checking = tag.get_text(strip=True)
if text_for_checking and len(text_for_checking.split()) < 25:
heading_level = 4
if heading_level > 0:
if tag.find('table'):
continue
text_content = tag.get_text(separator=' ', strip=True)
if text_content:
new_heading_tag = soup.new_tag(f'h{heading_level}')
new_heading_tag.string = text_content
tag.replace_with(new_heading_tag)
def handle_sentence_fragment_table(table_element, output_list: list) -> bool:
"""
Identifies tables used primarily for laying out sentence fragments (e.g.,
fill-in-the-blank forms) and converts them into a single, flowing paragraph
of Markdown instead of a multi-column table.
Args:
table_element: The BeautifulSoup object for the .
output_list: The list where Markdown chunks are being appended.
Returns:
True if the table was handled as a sentence fragment, False otherwise.
"""
rows = table_element.find_all('tr', recursive=False)
if not rows or len(rows) > 3:
return False
if len(rows) == 1:
cells = rows[0].find_all(['td', 'th'], recursive=False)
if len(cells) == 2:
first_cell_text = cells[0].get_text(strip=True)
if re.fullmatch(r'\(\s*\d+\s*\)', first_cell_text):
return False
if table_element.find('th'):
return False
total_text = table_element.get_text(strip=True)
financial_indicators_re = re.compile(r'[$£�%]|Amount|Total|Percent|Instruction|Vote|/s/|Abstained|pence|Name of Witness|owned by|\b\([a-z]\)\b|\!\[|##BOLD_START', re.IGNORECASE)
if financial_indicators_re.search(total_text):
return False
if len(total_text) < 10 or len(total_text) > 300:
return False
if len(rows) == 1 and len(rows[0].find_all(['td', 'th'], recursive=False)) <= 1:
return False
output_lines = []
for row in rows:
row_fragments = []
for cell in row.find_all(['td', 'th']):
cell_text = cell.get_text(separator='', strip=False)
if not cell_text:
continue
style = cell.get('style', '').lower()
if 'border-bottom' in style:
row_fragments.append(f"{cell_text} ")
else:
row_fragments.append(cell_text)
line_text = ' '.join(row_fragments)
if line_text:
output_lines.append(line_text)
if output_lines:
full_paragraph = ' '.join(output_lines)
full_paragraph = re.sub(r'\s+', ' ', full_paragraph).strip()
output_list.append(full_paragraph + "\n\n")
return True
return False
def parse_sec_header(raw_text: str) -> str:
"""
Parses the block of a filing into structured Markdown.
This version correctly handles multi-line, indented address blocks.
"""
if not raw_text:
return ""
output = ["## Filing Summary"]
raw_text = raw_text.replace('\r\n', '\n').strip()
raw_text = re.sub(r'<\/?SEC-HEADER.*?>', '', raw_text).strip()
lines = raw_text.split('\n')
current_section = ""
in_address_block = False
for line in lines:
line = line.rstrip()
if not line.strip() or line.strip().startswith('<'):
continue
if not line.startswith('\t') and not line.startswith(' ') and ':' in line:
in_address_block = False
key, value = [s.strip() for s in line.split(':', 1)]
output.append(f"**{key}**: {value}\n")
current_section = key
if "ADDRESS" in current_section.upper():
in_address_block = True
elif line.startswith('\t') or line.startswith(' '):
key, value = [s.strip() for s in line.split(':', 1)]
if in_address_block:
output.append(f"- **{key}:** {value}")
else:
if value:
output.append(f"- **{key}:** {value}")
else:
output.append(f"\n**{key}:**")
elif not line.startswith('\t') and ':' not in line:
in_address_block = False
section_title = line.strip()
if section_title:
output.append(f"\n### {section_title.title()}")
current_section = section_title
if "ADDRESS" in current_section.upper():
in_address_block = True
return "\n".join(output)
def parse_ims_header(raw_text: str) -> str:
"""
Parses the block from a legacy filing into structured Markdown.
"""
header_match = re.search(r"(.*?) ", raw_text, re.S | re.I)
if not header_match:
return ""
output = ["## Filing Summary"]
content = header_match.group(1).strip()
lines = content.split('\n')
in_block = False
for line in lines:
line = line.rstrip()
if not line.strip():
continue
if not line.startswith('\t') and ':' in line:
key, value = [s.strip() for s in line.split(':', 1)]
if value:
output.append(f"**{key}**: {value}")
else:
output.append(f"\n### {key.title()}")
in_block = False
elif line.startswith('\t') and ':' in line:
key, value = [s.strip() for s in line.split(':', 1)]
output.append(f"- **{key}:** {value}")
elif not line.startswith('\t') and line.strip().endswith(':'):
output.append(f"\n### {line.strip().title()}")
in_block = True
return "\n\n".join(output)
def parse_nsar_b_txt(raw_text: str) -> str:
"""
Parses both legacy single-series and multi-series plain text Form NSAR-B
filings into structured Markdown. This robust version decodes the answer key
on each line to correctly categorize all data.
"""
NSAR_MAP = {
'001A': "Registrant Name", '001B': "SEC File Number", '001C': "Telephone Number",
'002A': "Street", '002B': "City", '002C': "State", '002D01': "Zip Code",
'003': "Is Registrant a Small Business Investment Company?",
'004': "Is Registrant a Unit Investment Trust?", '005': "Is Registrant a Separate Account?",
'006': "Is Registrant a Non-diversified Company?", '007A': "Is Registrant a Series Company?",
'019A': "Is registrant a series company?", '019B': "Number of series", '019C': "Family of investment companies name",
'021': "Total Broker Commissions Paid ($000)", '024': "Is registrant a diversified investment company?",
'071A': "Total income ($000)", '071B': "Total expenses ($000)", '071C': "Net investment income ($000)",
'071D': "Net gains or (losses) ($000)", '074F': "Total Investments ($000)", '074N': "Total liabilities ($000)",
'074T': "Net assets ($000)", '075A': "Number of shares outstanding", '075B': "Net asset value per share",
'080C': "Fidelity Bond Coverage Amount ($)",
'081A': "Fidelity bond in effect?", '081B': "Fidelity bond coverage amount ($000)",
'082A': "Were any claims filed under fidelity bond?", '082B': "Amount of claims ($)",
'083A': "Any uncollectible advisory fees?", '084A': "Any uncollectible underwriting commissions?",
'085A': "Has registrant acquired another investment company?", '085B': "Has registrant been acquired by another?"
}
TABLE_SPECS = {
'007': {"name": "Series Information", "cols": {'C01': "Series Number", 'C02': "Series Name", 'C03': "Is this the last filing for this series?"}},
'008': {"name": "Investment Advisers", "cols": {'A': "Name", 'B': "Type", 'C': "File No.", 'D01': "City", 'D02': "State"}},
'010': {"name": "Custodians", "cols": {'A': "Name", 'B': "File No.", 'C01': "City", 'C02': "State"}},
'011': {"name": "Principal Underwriters", "cols": {'A': "Name", 'B': "File No.", 'C01': "City", 'C02': "State"}},
'012': {"name": "Transfer Agents", "cols": {'A': "Name", 'B': "File No.", 'C01': "City", 'C02': "State"}},
'013': {"name": "Independent Public Accountants", "cols": {'A': "Name", 'B01': "City", 'B02': "State"}},
'014': {"name": "Brokers", "cols": {'A': "Name", 'B': "File No."}},
'015': {"name": "Sub-Custodians", "cols": {'A': "Name", 'B': "Type", 'C01': "City", 'C02': "State", 'E01': "Holds Assets?"}},
'020': {"name": "Top 10 Brokers by Commission", "cols": {'A': "Broker Name", 'B': "IRS No.", 'C': "Commissions Paid ($000)"}},
'022': {"name": "Securities Depositories", "cols": {'A': "Depository Name", 'B': "IRS No.", 'C': "Value of Securities ($000)", 'D': "Amount of Deposits ($000)"}},
}
def _format_nsar_value(label: str, value: object) -> str:
if pd.isna(value) or str(value).strip() in ("N/A", "", "—", "nan"): return "—"
clean = str(value).strip().replace(",", "")
if not clean: return "—"
if "Telephone Number" in label and re.fullmatch(r"\d{10,11}", clean):
digits = clean[-10:]
return f"{digits[:3]}-{digits[3:6]}-{digits[6:]}"
if "SEC File Number" in label and re.fullmatch(r"811-\d{1,5}", clean):
prefix, num = clean.split("-")
return f"{prefix}-{int(num):05d}"
if "Net asset value per share" in label:
try: return f"${float(clean):.2f}"
except (ValueError, TypeError): return clean
if "($000)" in label:
try: return f"${int(float(clean)):}"
except (ValueError, TypeError): return clean
if "($)" in label:
try: return f"${int(float(clean)):}"
except (ValueError, TypeError): return clean
if clean.upper() in ("Y", "N", "X"): return "Yes" if clean.upper() in ("Y", "X") else "No"
if re.match(r'^-?[\d.]+$', clean):
try: return f"{int(float(clean)):}"
except (ValueError, TypeError): return clean
return str(value).strip()
pem_pattern = r'-----BEGIN PRIVACY-ENHANCED MESSAGE-----(.*?)-----END PRIVACY-ENHANCED MESSAGE-----'
text_inside_pem = re.search(pem_pattern, raw_text, re.DOTALL)
if not text_inside_pem:
content = re.sub(r'.*? |.*? ', '', raw_text, flags=re.DOTALL).strip()
else:
content = text_inside_pem.group(1)
content = re.sub(r'.*? ', '', content, flags=re.DOTALL)
content = re.sub(r'.*?\n| ', '', content, flags=re.DOTALL)
lines = [line for line in content.strip().splitlines() if line.strip() and not line.strip().startswith(('', 'SIGNATURE', 'TITLE'))]
if not lines: return ""
registrant_data = {}
series_data = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
answer_key_re = re.compile(
r"^(?P- \d{3})"
r"(?P
[A-Z ]{2})"
r"(?P\d{2})"
r"(?P\d{2})"
r"(?P\d{2})\s+"
r"(?P.*\S)?$"
)
first_series_id = None
for line in lines:
match = answer_key_re.match(line)
if not match: continue
parts = match.groupdict()
item, letter, series_id, rep_id, value = \
parts['item'], parts['letter'].strip(), parts['series'], int(parts['rep']), parts.get('value')
full_letter = letter + parts['sub'] if parts['sub'] != "00" else letter
if item == '007' and letter == 'C' and first_series_id is None:
first_series_id = parts['sub']
if rep_id > 0:
series_data[series_id][item][rep_id][full_letter] = value
elif series_id == '00':
FINANCIAL_ITEMS = {'024', '071', '072', '073', '074', '075', '076', '077'}
if item in FINANCIAL_ITEMS and first_series_id:
series_data[first_series_id][item][1][full_letter] = value
else:
registrant_data[item + full_letter] = value
else:
series_data[series_id][item][1][full_letter] = value
md_parts = ["## Form NSAR-B: Semi-Annual Report for Registered Investment Companies"]
md_parts.append("\n### General Information")
for key in ['001A', '001B', '001C', '019A', '019B', '019C']:
if key in registrant_data:
md_parts.append(f"**{NSAR_MAP[key]}**: {_format_nsar_value(NSAR_MAP[key], registrant_data.get(key))}")
addr_parts = [registrant_data.get(k) for k in ['002A', '002B', '002C', '002D01']]
if any(p for p in addr_parts if p and _format_nsar_value('', p) != '—'):
md_parts.append(f"**Address:** {', '.join(p for p in addr_parts if p and _format_nsar_value('', p) != '—')}")
for key in ['003', '004', '005', '006', '007A']:
if key in registrant_data: md_parts.append(f"**{NSAR_MAP[key]}**: {_format_nsar_value(NSAR_MAP[key], registrant_data[key])}")
registrant_tables = series_data.get('00', {})
for item_num, spec in TABLE_SPECS.items():
if item_num in registrant_tables:
table_rows = [row for rep_id, row in sorted(registrant_tables[item_num].items())]
df_table = pd.DataFrame(table_rows).rename(columns=spec['cols'])
expected_cols = list(spec['cols'].values())
df_table = df_table.reindex(columns=expected_cols, fill_value="—")
for col in df_table.columns: df_table[col] = df_table[col].apply(lambda x: _format_nsar_value(col, x))
if not df_table.empty:
md_parts.append(f"\n### {spec['name']}\n\n{to_compact_markdown(df_table, index=False)}")
for series_id_str in sorted(series_data.keys()):
if series_id_str == '00': continue
header_sgml_match = re.search(rf"S\d+?{series_id_str} \s*([^<]+)", raw_text, re.I)
series_name = header_sgml_match.group(1).strip() if header_sgml_match else f"Series {series_id_str}"
series_items = series_data[series_id_str]
if any(item in series_items for item in ['024', '071', '074', '075']):
md_parts.append(f"\n### {series_name}")
md_parts.append("\n**Financial Highlights**")
for item_code in sorted(series_items.keys()):
if item_code in NSAR_MAP:
value_dict = series_items[item_code].get(1, {})
value = next(iter(value_dict.values()), "—")
md_parts.append(f"**{NSAR_MAP[item_code]}**: {_format_nsar_value(NSAR_MAP[item_code], value)}")
md_parts.append("\n### Registrant Totals & Other Information")
for key in ['021', '080C', '081A', '081B', '082A', '082B', '083A', '084A', '085A', '085B']:
if key in registrant_data: md_parts.append(f"**{NSAR_MAP.get(key, key)}**: {_format_nsar_value(NSAR_MAP.get(key, key), registrant_data[key])}")
sig_match = re.search(r"SIGNATURE\s+([^\n]+)\nTITLE\s+([^\n]+)", content, re.S)
if not sig_match: sig_match = re.search(r"SIGNATURE\s+(.*?)\s+TITLE\s+(.*)", content, re.S)
if sig_match:
signature, title = sig_match.groups()
md_parts.append(f"\n---\n\n**Signature**: {signature.strip()} \n**Title**: {title.strip()}")
for exhibit_match in re.finditer(r".*?(EX-[^<\n]+)(.*?) ", raw_text, re.S | re.I):
ex_type, ex_content_full = exhibit_match.groups()
ex_type = ex_type.strip()
text_match = re.search(r"(.*)", ex_content_full, re.S | re.I)
text = text_match.group(1).strip() if text_match else ex_content_full.strip()
if "" in text and "" in text:
md_parts.append(f"\n## {ex_type}\n\n```text\n{text}\n```")
else:
md_parts.append(f"\n## {ex_type}\n\n{text}")
return "\n\n".join(p for p in md_parts if p and p.strip()).replace("", "")
def tag_spans_in_table_soup(table_soup: BeautifulSoup):
"""
Finds cells with rowspan or colspan and appends a unique text-based
placeholder to the cell's content for later parsing. This modifies
the soup in place.
"""
for cell in table_soup.find_all(['td', 'th']):
rowspan = cell.get('rowspan')
if rowspan:
try:
span_val = int(rowspan)
if span_val > 1:
placeholder = table_soup.new_string(f"##ROWSPAN_{span_val}##")
cell.append(placeholder)
except (ValueError, TypeError):
continue
colspan = cell.get('colspan')
if colspan:
try:
span_val = int(colspan)
if span_val > 1:
placeholder = table_soup.new_string(f"##COLSPAN_{span_val}##")
cell.append(placeholder)
except (ValueError, TypeError):
continue
def colspan_rowspan_tag(table_soup):
"""
Finds cells with top or bottom borders and tags them with distinct
sentinels for top and bottom borders.
"""
for idx, cell in enumerate(table_soup.find_all(['td', 'th'])):
colspan_tag = cell.get('colspan')
rowspan_tag = cell.get('rowspan')
if rowspan_tag:
try:
if int(rowspan_tag) > 1:
cell.append(f'##ROWSPAN_{idx}##')
except (TypeError, ValueError):
pass
if colspan_tag:
try:
if int(colspan_tag) > 1:
cell.append(f'##COLSPAN_{idx}##')
except (TypeError, ValueError):
pass
def protect_special_chars_in_tables(soup: BeautifulSoup):
"""
Finds and replaces special characters that might be misinterpreted as
Markdown (*, **, ***) with text-based placeholders.
This version is robust and applies the replacement to ALL text nodes
in the document, regardless of whether they are in a table, paragraph,
font tag, or any other element.
"""
for text_node in soup.find_all(string=True):
original_text = str(text_node)
modified_text = original_text.replace('***', '##TRIPLE_ASTERISK##')
modified_text = modified_text.replace('**', '##DOUBLE_ASTERISK##')
modified_text = modified_text.replace('*', '##SINGLE_ASTERISK##')
if original_text != modified_text:
text_node.replace_with(modified_text)
def protect_numeric_list_items(html_content: str) -> str:
"""
Finds numeric list items in table cells and protects them with a placeholder.
"""
try:
soup = BeautifulSoup(html_content, "lxml")
except ValueError as e:
if "not enough values to unpack" in str(e):
print(f"[Warning] lxml parser crashed on malformed attributes. Falling back to html.parser.")
soup = BeautifulSoup(html_content, "html.parser")
else:
raise
for td in soup.find_all(['td', 'th']):
cell_text = td.get_text(strip=True)
if re.fullmatch(r'\s*\d+\.\s*', cell_text):
td.string = f"##PROTECT_{cell_text}##"
return str(soup)
def _calculate_effective_indent(tag) -> dict:
"""
Parses the style attribute of a tag to calculate the effective left indentation
and returns it along with the font size for normalization. This version
approximates percentage-based indents.
"""
style = tag.get('style', '').lower().strip()
if not style:
return {'indent': 0.0, 'font_size': None}
prop_re = re.compile(r'([\w-]+)\s*:\s*([^;]+)')
declarations = prop_re.findall(style)
DEFAULT_CONTAINER_WIDTH_PT = 612.0
conversions = {'in': 72.0, 'pt': 1.0, 'px': 0.75, 'em': 12.0}
margin_left_pt = 0.0
padding_left_pt = 0.0
text_indent_pt = 0.0
font_size_pt = None
def parse_value(v_str):
match = re.search(r'(-?\d*\.?\d+)(in|pt|px|em|%)?', v_str)
if match:
try:
num = float(match.group(1))
unit = match.group(2) if match.group(2) else 'pt'
if unit == '%':
return (num / 100.0) * DEFAULT_CONTAINER_WIDTH_PT
return num * conversions.get(unit, 1.0)
except (ValueError, TypeError):
return 0.0
return 0.0
for prop, val_str in declarations:
if prop == 'font-size':
font_size_pt = parse_value(val_str)
elif prop == 'margin-left':
margin_left_pt = parse_value(val_str)
elif prop == 'padding-left':
padding_left_pt = parse_value(val_str)
elif prop == 'text-indent':
text_indent_pt = parse_value(val_str)
elif prop == 'margin':
values = val_str.split()
if len(values) == 1: margin_left_pt = parse_value(values[0])
elif len(values) == 2: margin_left_pt = parse_value(values[1])
elif len(values) >= 4: margin_left_pt = parse_value(values[3])
elif prop == 'padding':
values = val_str.split()
if len(values) == 1: padding_left_pt = parse_value(values[0])
elif len(values) == 2: padding_left_pt = parse_value(values[1])
elif len(values) >= 4: padding_left_pt = parse_value(values[3])
return {
'indent': margin_left_pt + padding_left_pt + text_indent_pt,
'font_size': font_size_pt
}
def _normalize_list_indentation(soup: BeautifulSoup):
"""
Finds all elements that look like list items, calculates their visual
indentation from CSS, and standardizes them into tags with an
##INDENT## placeholder. This version also merges fragmented bullet
points where the bullet and its text are in separate adjacent tags.
"""
LIST_MARKER_RE = re.compile(
r"""(?ix) # Use case-insensitive and verbose flags
^ \s* (?: # Start of line, optional space, and main non-capturing group
# Case 1: Simple bullet characters that can be immediately followed by non-space characters.
[○•●·◦➢▪]
| # OR
# Case 2: More complex markers that MUST be followed by a separator.
(?: # Group for the complex markers themselves
\d+ \. \d [\d\.]* # Multi-level numbers like 1.2.3
| \d+ \. (?!\d) # Numbers with a dot, NOT followed by another digit (e.g., "1.")
| [a-z] [\.\)] # Letters like a. or a)
| [ivxlcdm]+ \. # Roman numerals like i.
| \( [a-z0-9]+ \) # Parenthesized markers like (a) or (1)
)
# The required separator for complex markers (space, placeholder, or end-of-line)
(?: \s+ | \#\# | $ )
) # End of the main non-capturing group
""",
re.I | re.VERBOSE
)
DEFAULT_FONT_SIZE_PT = 10.0
STANDARD_INDENT_EM = 1.2
potential_list_items = soup.find_all(['p', 'div', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6'])
for tag in potential_list_items:
children = tag.find_all(['font', 'span'], recursive=False)
if len(children) > 1:
first_child, second_child = children[0], children[1]
is_bullet_fragment = (
re.fullmatch(r'\s*[•●·◦➢▪]\s*', first_child.get_text()) and
second_child.get_text(strip=True)
)
if is_bullet_fragment:
style = second_child.get('style', '')
if 'padding-left' in style:
first_child.insert_after(NavigableString(" "))
text = tag.get_text(separator=' ', strip=True)
if not LIST_MARKER_RE.match(text):
continue
indent_info = _calculate_effective_indent(tag)
indent_pt = indent_info['indent']
font_size_pt = indent_info.get('font_size') or DEFAULT_FONT_SIZE_PT
if font_size_pt > 0:
indent_em = indent_pt / font_size_pt
level = int(max(0, round(indent_em / STANDARD_INDENT_EM)))
else:
level = 0
if level > 0:
indent_prefix = soup.new_string('##INDENT##' * (level))
tag.insert(0, indent_prefix)
tag.name = 'p'
tag.attrs = {}
def is_cell_truly_empty(cell_value):
"""
Checks if a cell is empty after removing all custom tags,
placeholders, and special whitespace.
"""
if pd.isna(cell_value):
return True
text = str(cell_value)
text = re.sub(r'##(SUP|/SUP|SUB|/SUB|BOLD_START_\d+|BOLD_END_\d+|U_START_\d+|U_END_\d+|ITALIC_START_\d+|ITALIC_END_\d+|ROWSPAN_\d+|COLSPAN_\d+|LINK_START_\d+__[^#]+|LINK_END_\d+)##', '', text)
text = text.replace('##NEWLINE##', '').replace('', '').replace('\u00A0', '').replace('\u2063', '').replace("—", "").replace("–", "").replace("-", "")
if text == '':
return True
return not text.strip()
def is_numeric_like(s: str) -> bool:
"""
Checks if a string, after removing all custom placeholders, HTML tags,
and common financial formatting, can be interpreted as a number.
This is a comprehensive check for use on raw DataFrame cells.
"""
if not isinstance(s, str):
return False
cleaned_s = re.sub(r'##(SUP|/SUP|SUB|/SUB|BOLD_START_\d+|BOLD_END_\d+|U_START_\d+|U_END_\d+|ITALIC_START_\d+|ITALIC_END_\d+|ROWSPAN_\d+|COLSPAN_\d+|LINK_START_\d+__[^#]+|LINK_END_\d+)##', '', s)
cleaned_s = cleaned_s.replace('##NEWLINE##', '')
cleaned_s = cleaned_s.replace('', '')
cleaned_s = cleaned_s.replace('\u00A0', ' ')
cleaned_s = cleaned_s.replace('\u2063', '')
cleaned_s = re.sub(r'<.*?>', '', cleaned_s)
cleaned_s = cleaned_s.replace('$', '').replace(',', '').replace('(', '').replace(')', '')
cleaned_s = cleaned_s.strip().replace(' ', '')
cleaned_s = re.sub(r'(?<=\d)%', '', cleaned_s)
cleaned_s = cleaned_s.strip().replace(' ', '')
if cleaned_s in ('—', '–', '-', ''):
return True
try:
float(cleaned_s)
return True
except (ValueError, TypeError):
return False
def pre_fix_document_structure(soup: BeautifulSoup):
"""
Corrects severe structural issues in the HTML, such as improperly nested
content within list items and stray tags between list items. Also
fixes table headers () that are not wrapped in a .
"""
for table in soup.find_all("table"):
stray_th_tags = table.find_all('th', recursive=False)
if stray_th_tags:
new_header_row = soup.new_tag("tr")
for th in stray_th_tags:
new_header_row.append(th.extract())
table.insert(0, new_header_row)
for list_tag in soup.find_all(['ol', 'ul']):
for br in list_tag.find_all('br', recursive=False):
br.decompose()
return soup
def convert_styled_inline_divs_to_spans(soup: BeautifulSoup):
"""
Finds all tags styled with 'display:inline' and converts them
into
tags. This preserves their styling information for later
processing without treating them as block-level elements.
"""
for tag in soup.find_all('div', style=True):
if re.search(r'display\s*:\s*inline', tag.get('style', ''), re.IGNORECASE):
tag.name = 'span'
def convert_vertical_align_superscripts(soup: BeautifulSoup):
"""
Finds elements styled with `vertical-align: top` and a reduced font size,
and replaces them with a text-based ##SUP## placeholder for consistent processing.
"""
va_super_re = re.compile(r'vertical-align\s*:\s*(super|top)', re.IGNORECASE)
for tag in soup.find_all(['div', 'span', 'font'], style=True):
style = tag.get('style', '')
is_vertically_aligned = va_super_re.search(style)
has_small_font = 'font-size' in style.lower()
if is_vertically_aligned and has_small_font:
text_content = tag.get_text(strip=True)
if text_content:
placeholder_text = f"##SUP##{text_content}##/SUP##"
tag.replace_with(NavigableString(placeholder_text))
def is_positioned_container(tag: Tag) -> bool:
"""
Checks if a given BeautifulSoup tag is a container for positioned HTML content.
"""
if tag.find('table'):
return False
POSITIONED_DIV_THRESHOLD = 1000
div_pattern = r']+style\s*=\s*".*?position\s*:\s*(?:absolute|relative).*?left\s*:.*?"'
matches = re.findall(div_pattern, str(tag), re.IGNORECASE | re.DOTALL)
return len(matches) > POSITIONED_DIV_THRESHOLD
def is_document_layout_positioned(soup: BeautifulSoup) -> bool:
"""
Determines if an HTML document uses a positioned layout rather than a semantic one.
This is determined by checking for a high number of absolutely positioned
elements, which is a strong indicator of a document designed for visual
rendering rather than semantic parsing. It also includes a fast-path
check for known page-container patterns.
"""
page_containers = soup.find_all('div', id=re.compile(r'^pf\w+$'))
if len(page_containers) > 3:
return True
POSITIONED_ELEMENT_THRESHOLD = 1000
MAX_TABLES_THRESHOLD = 20
positioned_style_re = re.compile(r'position\s*:\s*absolute', re.IGNORECASE)
positioned_elements = soup.find_all(style=positioned_style_re)
num_tables = len(soup.find_all('table'))
if len(positioned_elements) > POSITIONED_ELEMENT_THRESHOLD and num_tables < MAX_TABLES_THRESHOLD:
return True
return False
def handle_width_indented_list_table(table_element, output_list: list) -> bool:
"""
Deterministically handles list-like tables by measuring the width of the
first spacer cell in the *first content row* to determine the indentation
level for the entire table.
Returns True if the table was handled, False otherwise.
"""
table_text = table_element.get_text(strip=True)
if '●' not in table_text and '○' not in table_text:
return False
rows = table_element.find_all('tr', recursive=False)
if not rows:
return False
is_indented_list = False
INDENTATION_THRESHOLD_PX = 100
first_content_row = next((row for row in rows if row.get_text(strip=True)), None)
if first_content_row:
cells = first_content_row.find_all('td', recursive=False)
if len(cells) == 3 and not cells[0].get_text(strip=True):
spacer_cell = cells[0]
width_attr = spacer_cell.get('style', '') + spacer_cell.get('width', '')
width_match = re.search(r'width\s*:\s*([\d\.]+)', width_attr)
if width_match:
try:
indent_px = float(width_match.group(1))
if indent_px > INDENTATION_THRESHOLD_PX:
is_indented_list = True
except ValueError:
pass
list_items = []
for row in rows:
if not row.get_text(strip=True):
continue
cells = row.find_all('td', recursive=False)
if len(cells) != 3 or cells[0].get_text(strip=True):
continue
_spacer, marker_cell, content_cell = cells
marker_text = marker_cell.get_text(strip=True)
content_text = re.sub(r'\s+', ' ', content_cell.get_text(separator=' ', strip=True)).strip()
if marker_text or content_text:
indent_prefix = "##INDENT##" if is_indented_list else ""
list_items.append(f"{indent_prefix}{marker_text} {content_text}")
if list_items:
output_list.append("\n\n".join(list_items) + "\n\n")
return True
return False
def fix_inverted_bold_paragraphs(soup: BeautifulSoup):
"""
Finds malformed `
...
` structures and corrects them to the
standard `
...
` format, which can be processed correctly by
downstream logic.
"""
for b_tag in soup.find_all(['b', 'strong']):
p_tag = b_tag.find('p')
if p_tag and b_tag.get_text(strip=True) == p_tag.get_text(strip=True):
content = p_tag.get_text(separator=' ', strip=True)
new_p = soup.new_tag('p')
new_b = soup.new_tag('b')
new_b.string = content
new_p.append(new_b)
b_tag.replace_with(new_p)
def fix_malformed_inline_paragraphs(html_content: str) -> str:
"""
Uses regex to iteratively fix malformed HTML where a
tag is incorrectly nested
inside an inline formatting tag (b, i, u, etc.). This runs on the raw string
before BeautifulSoup parsing to ensure the structure is valid.
It repeatedly swaps tag pairs like
->
and
->
until the document structure is corrected.
"""
open_pattern = re.compile(
r'(<(?:b|i|u|strong|em)(?: [^>]*)?>\s*)(
]*)?>)',
re.IGNORECASE
)
close_pattern = re.compile(
r'(
\s*)((?:b|i|u|strong|em)>)',
re.IGNORECASE
)
while True:
new_content = open_pattern.sub(r'\2\1', html_content)
new_content = close_pattern.sub(r'\2\1', new_content)
if new_content == html_content:
break
html_content = new_content
return html_content
def _fix_escaped_malformed_font_tag(html_string: str) -> str:
"""
Uses a targeted regex to fix a specific, known issue where a malformed
'< FONT...' tag is incorrectly escaped by BeautifulSoup during initial parsing.
"""
fixed_string = re.sub(r'<\s+FONT', '
')
return fixed_string
def defragment_adjacent_tags(soup: BeautifulSoup, tags_to_merge: list):
"""
Robustly merges adjacent tags of the same type. This version is corrected
to use an index-based while loop to prevent infinite loops and uses the
correct .extract() method to prevent crashes.
"""
for parent in soup.find_all(True):
i = 0
while i < len(parent.contents) - 1:
current_node = parent.contents[i]
if getattr(current_node, 'name', None) not in tags_to_merge:
i += 1
continue
next_node = parent.contents[i + 1]
whitespace_node = None
real_next_node = next_node
if isinstance(next_node, NavigableString) and not next_node.strip():
whitespace_node = next_node
if i + 2 < len(parent.contents):
real_next_node = parent.contents[i + 2]
else:
i += 1
continue
if getattr(real_next_node, 'name', None) in tags_to_merge:
if whitespace_node:
current_node.append(NavigableString(" "))
for child in list(real_next_node.contents):
current_node.append(child.extract())
real_next_node.extract()
if whitespace_node:
whitespace_node.extract()
else:
i += 1
def dedupe_adjacent_containers(lst):
out = []
for x in lst:
if not out or x != out[-1]:
out.append(x)
return out
def parse_positioned_html_islands_via_ocr(soup: BeautifulSoup):
"""
Finds "islands" of positioned HTML, converts each to a PDF, processes it via OCR,
and returns the complete parsed markdown and a success flag. It de-duplicates
page containers to prevent processing the same page multiple times.
"""
global LAST_POSITIONED_HTML_OCR_PAGE_COUNT
LAST_POSITIONED_HTML_OCR_PAGE_COUNT = 0
all_containers = [div for div in soup.find_all("div") if is_positioned_container(div)]
if not all_containers:
return None, False
top_level_containers = [c for c in all_containers if not any(p in all_containers for p in c.parents)]
if not top_level_containers:
return None, False
print(f"--> Identified {len(top_level_containers)} unique positioned HTML island(s) to process via PDF conversion.")
style_tags = soup.head.find_all('style') if soup.head else soup.find_all('style')
css_styles = "\n".join(style.string for style in style_tags if style.string)
if not _has_mistral_api_keys():
print(f"[Error] {_mistral_no_keys_message()}")
return None, False
start_time = time.time()
TIME_LIMIT_SECONDS = Config.PDF_TIMEOUT_LIMIT * 60
md_parts = []
parsed_page_count_total = 0
with sync_playwright() as p:
browser = p.chromium.launch()
page = browser.new_page()
for i, container_div in enumerate(top_level_containers):
print(f"--> Processing island {i + 1} of {len(top_level_containers)} (ID: {container_div.get('id')})...")
try:
temp_html = f"""
{str(container_div)}
"""
page.set_content(temp_html)
pdf_bytes = page.pdf(format='A4')
page_results, timed_out, _parsed_page_count = _process_pdf_bytes_with_fallback(
pdf_bytes=pdf_bytes,
file_name=f"html_island_{i+1}.pdf",
batch_size=Config.PDF_BATCH_SIZE,
mistral_api_key=None,
per_table_sleep_s=Config.PER_TABLE_SLEEP_SECONDS,
start_time=start_time,
time_limit_s=TIME_LIMIT_SECONDS
)
parsed_page_count_total += int(_parsed_page_count or 0)
parsed_markdown = "\n\n".join(res.get('content', '') for res in page_results if res.get('content'))
md_parts.append(parsed_markdown)
print(f"--> Island {i + 1} successfully parsed.")
except Exception as e:
error_msg = f"Failed to process positioned HTML island {i + 1}: {e}"
print(f"[Error] {error_msg}")
traceback.print_exc()
md_parts.append(f"")
browser.close()
final_markdown = "\n\n------\n\n".join(md_parts)
LAST_POSITIONED_HTML_OCR_PAGE_COUNT = parsed_page_count_total
return final_markdown, True
def parse_html_via_pdf_render(html_content: str, file_name_for_logging: str) -> str:
"""
High-quality OCR-based parser for any HTML document identified as having a
positioned layout. Renders the entire document to a PDF in memory and uses
Mistral's vision models to extract text and tables.
"""
global LAST_POSITIONED_HTML_OCR_PAGE_COUNT
LAST_POSITIONED_HTML_OCR_PAGE_COUNT = 0
_load_sec_parser_env()
if not _has_mistral_api_keys():
print(f"{_mistral_no_keys_message()} Skipping OCR processing.")
return "", True
start_time = time.time()
TIME_LIMIT_SECONDS = Config.PDF_TIMEOUT_LIMIT * 60
print(f"--> Rendering full document '{file_name_for_logging}' to PDF for OCR processing...")
try:
with sync_playwright() as p:
browser = p.chromium.launch()
page = browser.new_page()
page.set_content(html_content)
pdf_bytes = page.pdf(format='A4', print_background=True)
browser.close()
page_results, timed_out, _parsed_page_count = _process_pdf_bytes_with_fallback(
pdf_bytes=pdf_bytes,
file_name=file_name_for_logging,
batch_size=Config.PDF_BATCH_SIZE,
mistral_api_key=None,
per_table_sleep_s=Config.PER_TABLE_SLEEP_SECONDS,
start_time=start_time,
time_limit_s=TIME_LIMIT_SECONDS
)
LAST_POSITIONED_HTML_OCR_PAGE_COUNT = int(_parsed_page_count or 0)
if timed_out:
print(f"[timeout] Processing for '{file_name_for_logging}' timed out.")
parsed_markdown = "\n\n".join(res.get('content', '') for res in page_results if res.get('content'))
return parsed_markdown, True
except Exception as e:
error_msg = f"Failed to render or process positioned HTML file '{file_name_for_logging}': {e}"
print(f"[Error] {error_msg}")
traceback.print_exc()
return f"", True
def convert_margin_layout_to_table(soup: BeautifulSoup):
"""
Finds consecutive divs that use font tags with large left margins for layout
and converts the entire block into a single table. This handles cases where
multi-column text is created with CSS margins instead of table tags.
"""
potential_blocks = soup.find_all('div', style=re.compile(r'float:left'))
i = 0
while i < len(potential_blocks):
start_block = potential_blocks[i]
row_divs = [start_block]
current = start_block
while True:
next_sibling = current.find_next_sibling('div')
if next_sibling and next_sibling in potential_blocks:
row_divs.append(next_sibling)
current = next_sibling
else:
break
table_rows_data = []
is_target_pattern = True
for row_div in row_divs:
inner_div = row_div.find('div', recursive=False)
if not inner_div:
is_target_pattern = False
break
children = inner_div.find_all(['font', 'span'], recursive=False)
if len(children) != 2:
is_target_pattern = False
break
style = children[1].get('style', '')
margin_match = re.search(r'margin-left\s*:\s*([\d\.]+)\s*pt', style, re.I)
if not margin_match or float(margin_match.group(1)) < 100:
is_target_pattern = False
break
col1_content = children[0].decode_contents()
col2_content = children[1].decode_contents()
table_rows_data.append((col1_content, col2_content))
if is_target_pattern and table_rows_data:
new_table = soup.new_tag('table')
for col1_html, col2_html in table_rows_data:
tr = soup.new_tag('tr')
td1 = soup.new_tag('td')
td2 = soup.new_tag('td')
td1.extend(BeautifulSoup(col1_html, 'lxml').body.contents)
td2.extend(BeautifulSoup(col2_html, 'lxml').body.contents)
tr.append(td1)
tr.append(td2)
new_table.append(tr)
start_block.replace_with(new_table)
for old_div in row_divs[1:]:
old_div.decompose()
i += len(row_divs)
else:
i += 1
def to_compact_markdown(df: pd.DataFrame, **kwargs) -> str:
"""
Converts a DataFrame to a token-efficient Markdown string. It uses minimal
`---` separators but intelligently preserves the column alignment specified
by the original (longer) separator line from pandas.
"""
markdown_str = df.to_markdown(**kwargs)
if not markdown_str:
return ""
lines = markdown_str.splitlines()
if len(lines) < 2:
return markdown_str
original_separator = lines[1]
separator_cells = original_separator.strip('|').split('|')
new_separators = []
for cell in separator_cells:
cell = cell.strip()
if cell.startswith(':') and cell.endswith(':'):
new_separators.append(':---:')
elif cell.endswith(':'):
new_separators.append('---:')
else:
new_separators.append(':---')
compact_separator = '|' + '|'.join(new_separators) + '|'
lines[1] = compact_separator
return "\n".join(lines)
def _flatten_redundant_nesting(soup: BeautifulSoup):
"""
Iteratively simplifies deeply nested structures where a tag's only
significant child is another tag of the same type. This version is
corrected to NOT flatten nested font tags if it would destroy the
critical 'face' attribute.
"""
while True:
changed = False
for tag in soup.find_all(['div', 'span', 'font', 'b', 'strong', 'i', 'em']):
significant_children = [
child for child in tag.contents
if (isinstance(child, NavigableString) and child.strip()) or isinstance(child, Tag)
]
if len(significant_children) == 1:
inner_child = significant_children[0]
if isinstance(inner_child, Tag) and tag.name == inner_child.name:
if tag.name == 'font':
if inner_child.has_attr('face') and not tag.has_attr('face'):
continue
tag.clear()
for grandchild in list(inner_child.contents):
tag.append(grandchild.extract())
changed = True
if not changed:
break
def convert_div_table_to_html(div_table_soup: Tag) -> str:
"""Converts a div-based table structure into a standard HTML table string."""
html_str = ""
rows = div_table_soup.find_all(lambda tag: tag.name == 'div' and 'display: table-row' in tag.get('style', ''))
for row_div in rows:
html_str += ""
cells = row_div.find_all(lambda tag: tag.name == 'div' and 'display: table-cell' in tag.get('style', ''))
for cell_div in cells:
colspan = cell_div.get('colspan', '1')
rowspan = cell_div.get('rowspan', '1')
html_str += f"{cell_div.decode_contents()} "
html_str += " "
html_str += "
"
return html_str
def convert_nested_div_table_to_placeholders(soup: BeautifulSoup):
"""
Finds a div-based table nested inside a , parses it to a Markdown
table, replaces all pipes '|' with '' and all newlines '\n' with
'##MD_NEWLINE##', and then replaces the original div with this new
single-line string.
"""
for div_table in soup.select('td > div[style*="display: table"]'):
try:
div_html = str(div_table)
temp_soup = BeautifulSoup(div_html, 'lxml')
if (table_tag := temp_soup.find('div', style=re.compile(r'display:\s*table'))):
table_tag.name = 'table'
for row in temp_soup.find_all('div', style=re.compile(r'display:\s*table-row')):
row.name = 'tr'
for cell in temp_soup.find_all('div', style=re.compile(r'display:\s*table-cell')):
cell.name = 'td'
df_list = pd.read_html(io.StringIO(str(temp_soup)), flavor="lxml", keep_default_na=False, header=0)
if not df_list:
continue
df = df_list[0]
df.dropna(how='all', axis=1, inplace=True)
df.dropna(how='all', axis=0, inplace=True)
df = df.reset_index(drop=True)
if df.empty:
continue
md_table_string = to_compact_markdown(df, index=False)
placeholder_string = md_table_string.replace('\n', '##MD_NEWLINE##').replace('|', '')
div_table.replace_with(NavigableString(placeholder_string))
except Exception as e:
print(f"[Warning] Could not process nested div-table. Removing it. Error: {e}")
div_table.decompose()
def parse_html_filing(html_content: str, form_type: str = "", file_path: Optional[pathlib.Path] = None) -> str:
"""
Parses HTML content into Markdown using an intelligent text assembly method.
"""
global LAST_POSITIONED_HTML_OCR_PAGE_COUNT
LAST_POSITIONED_HTML_OCR_PAGE_COUNT = 0
start_time = time.time()
time_limit_s = Config.HTML_TIMEOUT_LIMIT * 60
timed_out = False
count = 0
r_tag_pattern = re.compile(
r'(?:<|\\>)\s*/?\s*\bR\b\s*(?:>|\\<|<)',
re.IGNORECASE
)
html_content = r_tag_pattern.sub('', html_content)
html_content = re.sub(r'?ix:\w+.*?>', '', html_content, flags=re.IGNORECASE)
indent_preservation_pattern = re.compile(
r'((?: |\s)+)(<(?:b|strong|i|em|u)\b[^>]*>)',
re.IGNORECASE
)
html_content = indent_preservation_pattern.sub(r'\2\1', html_content)
html_content = re.sub(r']*>', '##SUP##', html_content, flags=re.IGNORECASE)
html_content = re.sub(r' ', '##/SUP##', html_content, flags=re.IGNORECASE)
html_content = re.sub(r']*>', '##SUB##', html_content, flags=re.IGNORECASE)
html_content = re.sub(r' ', '##/SUB##', html_content, flags=re.IGNORECASE)
html_content = html_content.replace("|", r"\|").replace(" ", "##NEWLINE##").replace(" ", "##NEWLINE##")
html_content = re.sub(r'', '', html_content, flags=re.DOTALL)
html_content = fix_malformed_inline_paragraphs(html_content)
u_tag_whitespace_pattern = re.compile(
r'(]*>)(\s* \s*)( )',
re.IGNORECASE
)
html_content = u_tag_whitespace_pattern.sub(r'\1##SPACE##\3', html_content)
i_tag_whitespace_pattern = re.compile(
r'(]*>)(\s* \s*)( )',
re.IGNORECASE
)
html_content = i_tag_whitespace_pattern.sub(r'\1##I_SPACE##\3', html_content)
try:
soup = BeautifulSoup(html_content, "lxml")
except ValueError as e:
if "not enough values to unpack" in str(e):
print(f"[Warning] lxml parser crashed on malformed attributes. Falling back to html.parser.")
soup = BeautifulSoup(html_content, "html.parser")
else:
raise
convert_nested_div_table_to_placeholders(soup)
_flatten_redundant_nesting(soup)
convert_margin_layout_to_table(soup)
fix_inverted_bold_paragraphs(soup)
if is_document_layout_positioned(soup):
file_name = file_path.name if file_path else "unknown_file.html"
print(f"--> Detected positioned layout for '{file_name}'. Routing to OCR-based parser.")
return parse_html_via_pdf_render(html_content, file_name)
final_markdown_from_ocr, is_fully_processed = parse_positioned_html_islands_via_ocr(soup)
if is_fully_processed:
print("--> Document fully processed by island parser. Bypassing standard HTML parsing.")
return final_markdown_from_ocr, True
convert_wingdings_boxes(soup)
convert_vertical_align_superscripts(soup)
convert_styled_inline_divs_to_spans(soup)
soup = pre_fix_document_structure(soup)
unwrap_fragmenting_tags(soup)
for block_tag in soup.find_all(['div', 'p']):
first_text_node = block_tag.find(string=True)
if first_text_node and block_tag.get_text(strip=True).startswith(first_text_node.strip()):
text = str(first_text_node)
leading_ws_match = re.match(r'^([\s\u00A0\u2003]+)', text)
if leading_ws_match:
ws_string = leading_ws_match.group(1)
indent_level = 0
for char in ws_string:
if char in ['\u00A0', ' ']:
indent_level += 0.5
elif char == '\u2003':
indent_level += 2
final_indent_level = int(indent_level)
if final_indent_level > 0:
indent_prefix = '##INDENT##' * final_indent_level
block_tag.insert(0, NavigableString(indent_prefix))
first_text_node.replace_with(text.lstrip(' \u00A0\u2003'))
_normalize_list_indentation(soup)
convert_styled_superscripts_to_placeholders(soup)
promote_styled_headings(soup)
_debug_print("→ stage 0 (raw):", len(soup.find_all("table")))
timeout_check = check_timeout(start_time, time_limit_s, "HTML pre-processing Stage 0-1")
if timeout_check is not None:
return timeout_check, True
for text_node in soup.find_all(string=True):
s = str(text_node)
s = s.replace('\u00A0', ' ')
for z in ['\u200B', '\u200C', '\u200D', '\u2060', '\u2063', '\uFEFF']:
s = s.replace(z, '')
text_node.replace_with(s)
title_tag = soup.find('title')
title_text = title_tag.text if title_tag else ''
is_form4 = 'Form 4' in title_text or 'form 4' in html_content[:1000].lower()
is_legacy = bool(soup.find(string=re.compile(r'statement of changes in beneficial ownership', re.I)))
is_modern_xml = bool(soup.find('ownershipDocument'))
def _style_declares_bold(style: str) -> bool:
if not style:
return False
style_lc = style.lower()
style_compact = style_lc.replace(' ', '')
if any(token in style_compact for token in (
'font-weight:bold',
'font-weight:700',
'font-weight:800',
'font-weight:900',
)):
return True
font_decl_match = re.search(r'font\s*:\s*([^;]+)', style_lc)
if font_decl_match and re.search(r'(^|[\s/])(?:bold|700|800|900)(?=$|[\s/])', font_decl_match.group(1)):
return True
return False
for tr in soup.find_all('tr', style=True):
row_style = tr.get('style', '')
row_style_lc = row_style.lower().replace(' ', '')
inherited_bits = []
if _style_declares_bold(row_style):
inherited_bits.append('font-weight:bold')
if 'font-style:italic' in row_style_lc:
inherited_bits.append('font-style:italic')
if 'text-decoration:underline' in row_style_lc:
inherited_bits.append('text-decoration:underline')
if not inherited_bits:
continue
for cell in tr.find_all(['td', 'th'], recursive=False):
cell_style = cell.get('style', '')
cell_style_lc = cell_style.lower().replace(' ', '')
additions = []
for bit in inherited_bits:
if bit.startswith('font-weight:'):
if 'font-weight:' not in cell_style_lc:
additions.append(bit)
elif bit.startswith('font-style:'):
if 'font-style:' not in cell_style_lc:
additions.append(bit)
elif bit.startswith('text-decoration:'):
if 'text-decoration:' not in cell_style_lc:
additions.append(bit)
if additions:
merged_style = cell_style.rstrip().rstrip(';')
if merged_style:
merged_style += '; '
merged_style += '; '.join(additions)
cell['style'] = merged_style
styled_tags = soup.find_all(['span', 'font', 'p', 'div', 'td', 'th'], style=True)
for tag in styled_tags:
raw_style_str = tag.get('style', '')
style_str = raw_style_str.lower().replace(' ', '')
is_bold = _style_declares_bold(raw_style_str)
is_italic = 'font-style:italic' in style_str
is_underline = 'text-decoration:underline' in style_str
if not (is_bold or is_italic or is_underline):
continue
if is_bold and tag.find_parent(['b', 'strong']):
is_bold = False
if not (is_bold or is_italic or is_underline):
if tag.name in ['span', 'font']:
tag.unwrap()
continue
inner_content_holder = soup.new_tag('div')
for child in list(tag.contents):
inner_content_holder.append(child.extract())
if is_underline:
new_u_tag = soup.new_tag('u')
new_u_tag.extend(inner_content_holder.contents)
inner_content_holder.clear()
inner_content_holder.append(new_u_tag)
if is_italic:
new_i_tag = soup.new_tag('i')
new_i_tag.extend(inner_content_holder.contents)
inner_content_holder.clear()
inner_content_holder.append(new_i_tag)
if is_bold:
new_b_tag = soup.new_tag('b')
new_b_tag.extend(inner_content_holder.contents)
inner_content_holder.clear()
inner_content_holder.append(new_b_tag)
tag.clear()
tag.extend(inner_content_holder.contents)
if tag.name in ['span', 'font']:
tag.unwrap()
merge_whitespace_tags(soup)
remove_empty_bold_tags(soup)
defragment_adjacent_tags(soup, ['b', 'strong'])
defragment_adjacent_tags(soup, ['i', 'em'])
defragment_adjacent_tags(soup, ['font'])
process_inline_tags(soup, ['b', 'strong'], "BOLD")
process_inline_tags(soup, ['i', 'em'], "ITALIC")
process_inline_tags(soup, ['u'], "U")
process_anchor_tags(soup)
colspan_rowspan_tag(soup)
_debug_print("→ stage 1 (after bold cleanup):", len(soup.find_all("table")))
timeout_check = check_timeout(start_time, time_limit_s, "HTML pre-processing Stage 1-2")
if timeout_check is not None:
return timeout_check, True
xml_tags = soup.find_all(re.compile(r'^xml$', re.I))
if xml_tags:
if soup.body is None or not soup.body.get_text(strip=True):
return parse_any_xml([t.decode_contents() for t in xml_tags])
_debug_print("→ stage 2 (after wingdings):", len(soup.find_all("table")))
timeout_check = check_timeout(start_time, time_limit_s, "HTML pre-processing Stage 2-3")
if timeout_check is not None:
return timeout_check, True
normalize_dl_lists(soup)
protect_special_chars_in_tables(soup)
_debug_print("→ stage 3 (after list-table→li):", len(soup.find_all("table")))
timeout_check = check_timeout(start_time, time_limit_s, "HTML pre-processing Stage 3-4")
if timeout_check is not None:
return timeout_check, True
for tag in soup.find_all(["xml", "script", "style", "ix:header", "ix:resources"]):
tag.decompose()
_debug_print("→ stage 4 (after xml/script/style/ix):", len(soup.find_all("table")))
timeout_check = check_timeout(start_time, time_limit_s, "HTML pre-processing Stage 4-5")
if timeout_check is not None:
return timeout_check, True
for tag in soup.find_all(attrs={"style": re.compile(r'display:\s*none', re.I)}):
tag.decompose()
_debug_print("→ stage 5 (after display:none):", len(soup.find_all("table")))
timeout_check = check_timeout(start_time, time_limit_s, "HTML pre-processing Stage (final)")
if timeout_check is not None:
return timeout_check, True
total_tables = len(soup.find_all("table"))
next_milestone_pct = 10
for img in soup.find_all("img"):
src = img.get("src", "").strip()
if not src:
img.decompose()
continue
alt = img.get("alt", "").strip()
img.replace_with(NavigableString(f""))
sections_md, text_buf = [], []
BLOCK_TAGS = {"p", "div"}
HEADING_TAGS = {"h1", "h2", "h3", "h4", "h5", "h6"}
pending: List[str] = []
last_emitted = None
def _emit_pending():
nonlocal last_emitted
if pending:
unique_pending = [p for p in pending if p.lstrip().rstrip() != last_emitted]
sections_md.extend(unique_pending)
if unique_pending: last_emitted = unique_pending[-1].lstrip().rstrip()
pending.clear()
def flush(prefix: str = ""):
raw_text = "".join(text_buf)
txt = re.sub(r'\s+', ' ', raw_text).strip()
if txt:
_emit_pending()
final_text = re.sub(r'##SUP##(.*?)##/SUP##', r'\1 ', txt)
if final_text.startswith(('http://', 'https://')):
sections_md.append(prefix + final_text + "\n\n")
else:
sections_md.append(prefix + final_text + "\n\n")
text_buf.clear()
def queue(level: int, cand: str, el):
tag = f"\n{'#' * level} {cand}\n"
if not pending or pending[-1] != tag:
pending.append(tag)
el.clear()
body_tag = soup.body
body = body_tag
if body_tag and body_tag.find(True):
body = body_tag
else:
body = soup
is_13f_filing = "13F" in form_type.upper()
for elem in body.descendants:
if time.time() - start_time > time_limit_s:
print(f"[timeout] HTML parsing exceeded {time_limit_s // 60} minutes. Stopping.")
timed_out = True
break
if elem.name == 'p' and elem.has_attr('style'):
style = elem.get('style', '').lower()
if 'border-bottom' in style and not elem.get_text(strip=True):
flush()
sections_md.append("\n\n------\n\n")
elem.clear()
continue
if elem.name == 'div':
style = elem.get('style', '').lower()
if 'page-break-after: always' in style and not elem.get_text(strip=True):
flush()
sections_md.append("\n\n------\n\n")
elem.clear()
continue
if elem.name in HEADING_TAGS:
flush()
raw_cand = elem.get_text(separator=' ', strip=True)
cand = re.sub(r'\s+', ' ', raw_cand).strip()
if cand and cand != last_emitted:
lvl = int(elem.name[1])
tag = f"\n{'#' * lvl} {cand}\n"
sections_md.append(tag)
last_emitted = cand
elem.clear()
continue
if elem.name == "li":
flush()
li_text = elem.get_text()
if li_text:
if li_text.lstrip().startswith(tuple(BULLET_CHARS)):
sections_md.append("\n" + li_text)
else:
sections_md.append("* " + li_text)
sections_md.append("\n\n")
elem.clear()
continue
if elem.name in BLOCK_TAGS or elem.name == "br":
flush()
continue
if elem.name == "hr":
flush()
sections_md.append("\n\n------\n\n")
elem.clear()
continue
if elem.name == "pre":
flush()
pre_text = elem.get_text()
if pre_text:
sections_md.append(pre_text)
elem.clear()
continue
if elem.name == "table":
count += 1
current_pct = (count / total_tables) * 100
if current_pct >= next_milestone_pct and total_tables > 10:
milestone_to_print = int(next_milestone_pct)
print(f"-> Processing tables... {milestone_to_print}% complete ({count} of {total_tables})")
next_milestone_pct += 10.0
flush()
if handle_width_indented_list_table(elem, sections_md):
elem.clear()
continue
if handle_list_like_table_with_indentation(elem, sections_md):
elem.clear()
continue
if handle_sentence_fragment_table(elem, sections_md):
elem.clear()
continue
tag_border_cells(elem, soup)
DEFAULT_FONT_SIZE_PT = 10.0
STANDARD_INDENT_EM = 1.2
for cell in elem.find_all(['td', 'th']):
elements_to_check = [cell] + cell.find_all(['p', 'div', 'font'])
max_indent_pt = 0.0
font_size_pt = None
for el in elements_to_check:
indent_info = _calculate_effective_indent(el)
if indent_info['indent'] > max_indent_pt:
max_indent_pt = indent_info['indent']
if indent_info['font_size']:
font_size_pt = indent_info['font_size']
if font_size_pt is None:
for el in elements_to_check:
indent_info = _calculate_effective_indent(el)
if indent_info['font_size']:
font_size_pt = indent_info['font_size']
break
effective_font_size = font_size_pt or DEFAULT_FONT_SIZE_PT
if max_indent_pt > 0 and effective_font_size > 0:
indent_em = max_indent_pt / effective_font_size
ratio = indent_em / STANDARD_INDENT_EM
quantized_level = round(ratio * 4) / 4
full_indents = int(quantized_level)
remainder = quantized_level - full_indents
indent_prefix = ""
if full_indents > 0:
indent_prefix += '##INDENT##' * full_indents
if remainder >= 0.75:
indent_prefix += ' '
elif remainder >= 0.5:
indent_prefix += ' '
if indent_prefix:
cell.insert(0, NavigableString(indent_prefix))
table_text = elem.get_text(separator=' ', strip=True)
if ITEM_HEADING.match(table_text):
queue(3, table_text, elem)
continue
for cell in elem.find_all(['td', 'th']):
indent_level = 0
text_indent_level = 0
elements_to_check = [cell] + cell.find_all(['div', 'p'], recursive=False)
for el in elements_to_check:
style = el.get('style', '')
if not style: continue
pad_match = re.search(r'padding-left\s*:\s*([\d\.]+)(pt|px|em)', style)
margin_match = re.search(r'margin-left\s*:\s*([\d\.]+)(pt|px|em)', style)
total_offset_pt = 0.0
if pad_match:
val, unit = float(pad_match.group(1)), pad_match.group(2)
if unit == 'em': total_offset_pt += val * 10.0
elif unit == 'px': total_offset_pt += val * 0.75
else: total_offset_pt += val
if margin_match:
val, unit = float(margin_match.group(1)), margin_match.group(2)
if unit == 'em': total_offset_pt += val * 10.0
elif unit == 'px': total_offset_pt += val * 0.75
else: total_offset_pt += val
if total_offset_pt > 0:
level = int(round(total_offset_pt / 5.0))
if level > 0:
indent_level = level
break
first_visible_text = None
for descendant in cell.descendants:
if not isinstance(descendant, NavigableString):
continue
descendant_text = str(descendant)
if descendant_text.strip(' \t\r\n\u00A0\u2003'):
first_visible_text = descendant_text
break
if first_visible_text:
leading_ws_match = re.match(r'^([\s\u00A0\u2003]+)', first_visible_text)
if leading_ws_match:
ws_string = leading_ws_match.group(1)
ws_string = re.sub(r'[\r\n]+[ \t\f\v]*', '', ws_string)
indent_units = 0.0
for char in ws_string:
if char in ['\u00A0', ' ']:
indent_units += 0.5
elif char == '\u2003':
indent_units += 2.0
text_indent_level = int(indent_units)
indent_level = max(indent_level, text_indent_level)
for br in cell.find_all('br'):
br.replace_with('##NEWLINE##')
for p in cell.find_all(['p', 'div']):
p.append('##NEWLINE##')
cell_text = re.sub(r'(?<=[A-Za-z0-9])-\s+', '- ', cell.get_text(strip=False).replace("**", "").replace("** ", ""))
if text_indent_level > 0:
cell_text = re.sub(r'^[\s\u00A0\u2003]+', '', cell_text)
if cell_text.strip() == "##NEWLINE##":
cell_text = ""
IND = "\u2063"
if text_indent_level > 0 and not re.match(r'^(?:##INDENT##| )+', cell_text):
cell_text = ('##INDENT##' * text_indent_level) + cell_text
elif indent_level > 0:
cell_text = IND * indent_level + cell_text
cell.clear()
cell.string = cell_text
for cell in elem.find_all(['td', 'th']):
if cell.get('colspan'):
try:
if int(cell['colspan']) > 500:
del cell['colspan']
except (ValueError, TypeError):
del cell['colspan']
for tr in elem.find_all('tr'):
if not tr.find(['td', 'th']):
tr.decompose()
table_html = str(elem)
table_html = protect_numeric_list_items(table_html)
table_html = _fix_escaped_malformed_font_tag(table_html)
try:
df_from_html = pd.read_html(io.StringIO(table_html), flavor="lxml", keep_default_na=False, na_values=[""])[0]
df_from_html.replace(to_replace=r'##PROTECT_(.*?)##', value=r'\1', regex=True, inplace=True)
df_from_html = df_from_html.replace({'##VISUAL_BORDER##': ''}, regex=False)
first_real_row_idx = 0
for i, row in df_from_html.iterrows():
is_junk = all(
str(cell).strip() in ('', 'nan', '', '', 'NaN') or 'spacer.gif' in str(cell)
for cell in row
)
if not is_junk:
first_real_row_idx = i
break
raw_df = df_from_html.iloc[first_real_row_idx:].reset_index(drop=True)
raw_df = raw_df.replace(r'^\s*(?: )?\s*$', np.nan, regex=True)
raw_df = (raw_df
.dropna(how='all')
.dropna(how='all', axis=1)
.reset_index(drop=True))
if not raw_df.empty:
for r in range(1, len(raw_df)):
for c in range(len(raw_df.columns)):
if isinstance(raw_df.iat[r, c], str) and '' in raw_df.iat[r, c]:
above_cell = raw_df.iat[r - 1, c]
if pd.isna(above_cell):
raw_df.iat[r - 1, c] = ''
else:
raw_df.iat[r - 1, c] = str(above_cell) + ''
raw_df = raw_df.replace({r'': '', r'': ''}, regex=True)
sup_replacer = lambda x: re.sub(r'##SUP##(.*?)##/SUP##', r'\1 ', str(x)) if '##SUP##' in str(x) else x
raw_df = raw_df.applymap(sup_replacer)
raw_df = drop_tag_only_rows_cols(raw_df).reset_index(drop=True)
raw_df = (
raw_df
.replace(r' -(?=[A-Za-z])', ' - ', regex=True)
.replace(r'\s{2,}', ' ', regex=True)
)
def normalize_for_comparison(val):
if isinstance(val, str):
text = val.replace('\u2063', '').replace('\u00A0', '')
return re.sub(r'\s+', ' ', text).strip()
return val
if not raw_df.empty and raw_df.shape[1] > 1:
cols_to_clean = raw_df.columns[1:]
raw_df[cols_to_clean] = raw_df[cols_to_clean].applymap(normalize_for_comparison)
table_text = re.sub(r'##(BOLD_START_\d+|BOLD_END_\d+|U_START_\d+|U_END_\d+|ITALIC_START_\d+|ITALIC_END_\d+|ROWSPAN_\d+|COLSPAN_\d+|LINK_START_\d+__[^#]+|LINK_END_\d+)##', '', table_text)
table_text = table_text.replace('##NEWLINE##', '').replace(' ', '').strip()
positives = (
(('$' in table_text or '£' in table_text or '�' in table_text or " ) " in table_text) and re.search(r'\d', table_text)) or
('%' in table_text or re.search(r'\([\d,]+\)', table_text)) or
(len(table_text) > 300 and re.search(r'\d', table_text) and
"Part I" not in table_text and
"Name of each exchange on which registered" not in table_text and
"ITEM 1" not in table_text) or
any(k in table_text for k in ["Common stock", "Total", "By:", "Earnings", "##SUP", "##SUB", "marketing", "Period Ended", "Months Ended", "For Against"]) or
((raw_df == ')').any().any())
)
exclusions = any(k in table_text for k in ["Emerging growth company", "Smaller reporting company", "[One-month LIBOR +] __%", "⌧"])
is_financial_table = positives and not exclusions
if is_financial_table and not is_13f_filing and not (is_form4 and is_legacy):
df_to_render = clean_financial_df(raw_df)
else:
df_to_render = raw_df
if "OO" in table_text and "CHECK" in table_text:
df_to_render = drop_active_colspan_empty_cols(df_to_render)
md = df_to_markdown(df_to_render, disable_numparse=True, is_legacy_form4_table1=((is_form4 and is_legacy and "Table I" in df_to_render.to_string())), is_legacy_form4_table2=(is_form4 and is_legacy and "Table II" in df_to_render.to_string()))
if md and not md.isspace():
_emit_pending()
if "|:-" in md:
sections_md.append(f"\n---\n\n{md}\n\n---\n")
else:
sections_md.append(f"{md}\n\n")
except (ValueError, IndexError):
fallback_text = elem.get_text(separator=' ', strip=False)
if fallback_text:
_emit_pending()
sections_md.append(textwrap.fill(fallback_text) + "\n")
elem.clear()
continue
if isinstance(elem, NavigableString):
if not elem.find_parent(HEADING_TAGS.union({"li", "table", "script", "style"})):
text_buf.append(str(elem))
flush()
pending.clear()
md = "".join(sections_md)
md = re.sub(
r"EX-[\d\.]+\s+\d+\s+[\w\.]+\.htm\s+EX-[\d\.]+\s+Document\s+"
r"created\s+using\s+Wdesk.*?Document",
"",
md,
flags=re.I,
)
md = re.sub(r'\n{3,}', '\n\n', md).strip()
if timed_out:
return md + "\n\n", False
else:
return md, False
def clean_phone_numbers(text: str) -> str:
"""
Removes newlines from within phone numbers and unifies formatting
by removing any Markdown bold tags from the number components.
"""
if not isinstance(text, str):
return text
phone_pattern = re.compile(r"""
\*{0,2}
(
\(\s*\d{3}\s*\)
)
\*{0,2}
\s*\n\s*
\*{0,2}
(
\d{3}\s*[-]?\s*\d{4}
)
\*{0,2}
""", re.VERBOSE)
return phone_pattern.sub(r"\1 \2", text)
BULLETS = "○•●·◦➢▪"
BOLD_SPLIT_RE = re.compile(r'\*\* +')
def _fix_paragraph_bold_runs(txt: str) -> str:
out, last = [], 0
for m in BOLD_SPLIT_RE.finditer(txt):
i = m.start()
window = txt[max(0, i-150):i]
if window.count("**") >= 2:
continue
if re.search(rf"[{BULLETS}]\s*\*\*\s*$", window):
continue
out.append(txt[last:i].rstrip())
out.append("\n\n**")
last = m.end()
out.append(txt[last:])
return "".join(out)
def _convert_bullet_tables_to_lists(markdown_content: str) -> str:
"""
Finds and converts two-column Markdown tables that are used to format
bulleted lists into proper list items.
e.g., | • | Some text... | -> • Some text...
"""
table_pattern = re.compile(r'\n---\n\n(.*?)\n\n---\n', re.S)
bullet_chars = {'○', '•', '●', '*', '·', '◦', '➢', '▪'}
def replacer(match):
md_table_str = match.group(1)
lines = md_table_str.strip().split('\n')
if len(lines) < 3:
return match.group(0)
header_cells = [cell.strip() for cell in lines[0].strip('|').split('|')]
if any(header_cells):
return match.group(0)
separator_cells = [cell.strip() for cell in lines[1].strip('|').split('|')]
if len(separator_cells) != 2:
return match.group(0)
list_items = []
is_bullet_table = True
for line in lines[2:]:
data_cells = [cell.strip() for cell in line.strip('|').split('|')]
if len(data_cells) != 2:
is_bullet_table = False
break
bullet_part = data_cells[0]
text_part = data_cells[1]
if bullet_part not in bullet_chars:
is_bullet_table = False
break
list_items.append(f"{bullet_part} {text_part}")
if is_bullet_table and list_items:
return "\n".join(list_items)
return match.group(0)
return table_pattern.sub(replacer, markdown_content)
def _format_footnote_lists(markdown_content: str) -> str:
"""
Finds and formats footnotes that appear either as two-column tables
or as simple numbered lines, converting the number into a superscript.
"""
table_pattern = re.compile(r'\n---\n\n(.*?)\n\n---\n', re.S)
def table_replacer(match):
md_table_str = match.group(1)
lines = md_table_str.strip().split('\n')
if len(lines) != 3:
return match.group(0)
if any(cell.strip() for cell in lines[0].strip('|').split('|')):
return match.group(0)
if len(lines[1].strip('|').split('|')) != 2:
return match.group(0)
data_cells = [cell.strip() for cell in lines[2].strip('|').split('|')]
if len(data_cells) != 2:
return match.group(0)
number_part, text_part = data_cells
num_match = re.fullmatch(r'(?:)?\s*(\d{1,2})\s*(?: )?', number_part)
if num_match and text_part:
num_str = num_match.group(1)
return f"{num_str} {text_part}"
return match.group(0)
content = table_pattern.sub(table_replacer, markdown_content)
footnote_line_pattern = re.compile(r'^(\d{1,2})\.\s+(?=[A-Z])', re.MULTILINE)
content = footnote_line_pattern.sub(r'\1 ', content)
return content
def _remove_page_numbers(markdown_content: str) -> str:
"""
Removes page numbers from the document by targeting three patterns:
1. Standalone integers on a line by themselves (e.g., "1", "A-1").
2. Standalone integers surrounded by hyphens (e.g., "-1-").
3. The above patterns when they are the sole content of a single-cell Markdown table.
The number must be less than 500 to be considered a page number.
This version also collapses the extra newlines left by the removal.
"""
pattern_standalone = re.compile(
r"(\n{2,}|^)"
r"(?: )?"
r"[ \t]*"
r"(?:"
r"(?:[A-Z]+-)?(\d{1,3})(?:\.)?"
r"|"
r"-\s*(\d{1,3})\s*-"
r"|"
r"page\s+(\d{1,3})(?:\.)?"
r")"
r"[ \t]*"
r"(\n{2,}|$)",
re.MULTILINE | re.IGNORECASE
)
def replacer_standalone(m):
try:
num_str = m.group(2) or m.group(3) or m.group(4)
page_num = int(num_str)
if page_num < 500:
return "\n\n"
except (ValueError, TypeError, IndexError):
pass
return m.group(0)
content = pattern_standalone.sub(replacer_standalone, markdown_content)
pattern_table = re.compile(r'\n---\n\n(.*?)\n\n---\n', re.S)
def replacer_table(match):
"""Callback to check if a table block is just a page number."""
table_content = match.group(1).strip()
lines = table_content.split('\n')
if len(lines) == 3:
data_row = lines[2].strip()
cell_match = re.fullmatch(
r'\|\s*(?:(?:[A-Z]+-)?(\d{1,3})(?:\.)?|-\s*(\d{1,3})\s*-)\s*\|',
data_row
)
if cell_match:
try:
num_str = cell_match.group(1) or cell_match.group(2)
page_num = int(num_str)
if page_num < 500:
return "\n\n"
except (ValueError, TypeError, IndexError):
pass
return match.group(0)
content = pattern_table.sub(replacer_table, content)
return re.sub(r'\n{3,}', '\n\n', content)
BULLET_BOLD_SPLIT_RE = re.compile(
rf"""(?x)
(^[^\n]{{0,150}}) # 1) a look-back window ≤150 chars, captured
\n+ # 2) the offending newline(s)
(\*\*[A-Za-z]) # 3) "**S" (opening bold + letter)
""",
re.M,
)
def merge_adjacent_italics(s: str) -> str:
pair = re.compile(r"##ITALIC_END_(\d+)####ITALIC_START_(\d+)##")
while True:
m = pair.search(s)
if not m:
break
a, b = m.group(1), m.group(2)
start_tag = f"##ITALIC_START_{a}##"
idx = s.rfind(start_tag, 0, m.start())
before, after = s[:m.start()], s[m.end():]
if idx != -1:
before = before[:idx] + f"##ITALIC_START_{b}##" + before[idx + len(start_tag):]
if before and after and not before[-1].isspace() and not after[0].isspace():
s = before + " " + after
else:
s = before + after
return s
def merge_bracket_fragmented_underlines(s: str) -> str:
"""
Repairs underline placeholder runs that split bracketed text into
adjacent fragments, e.g.:
##U_START_a##[##U_END_a####U_START_b##Reserved##U_END_b##
-> ##U_START_b##[Reserved##U_END_b##
and the symmetric closing-bracket case.
"""
while True:
updated = s
updated = re.sub(
r'##U_START_(\d+)##\[\s*##U_END_\1##\s*##U_START_(\d+)##(.*?)##U_END_\2##',
r'##U_START_\2##[\3##U_END_\2##',
updated,
)
updated = re.sub(
r'##U_START_(\d+)##(.*?)##U_END_\1##\s*##U_START_(\d+)##\]\s*##U_END_\3##',
r'##U_START_\1##\2]##U_END_\1##',
updated,
)
if updated == s:
break
s = updated
return s
def collapse_redundant_bold_placeholders(s: str) -> str:
"""
Collapses duplicated bold placeholder wrappers before they are restored
to literal markdown asterisks.
This is intentionally done at the placeholder stage so escaped literal
asterisks in the source document are not touched.
"""
while True:
updated = s
updated = re.sub(
r"##BOLD_START_\d+##"
r"((?:##(?:BOLD|ITALIC|U)_(?:START|END)_\d+##|\s)*)"
r"(##BOLD_START_(\d+)##.*?##BOLD_END_\3##)"
r"((?:##(?:BOLD|ITALIC|U)_(?:START|END)_\d+##|\s)*)"
r"##BOLD_END_\d+##",
r"\1\2\4",
updated,
flags=re.DOTALL,
)
updated = re.sub(
r"##BOLD_START_\d+##(?=(?:\s|##NEWLINE##)*##BOLD_START_\d+##)",
"",
updated,
)
updated = re.sub(
r"(##BOLD_END_\d+##)(?:\s|##NEWLINE##)*##BOLD_END_\d+##",
r"\1",
updated,
)
if updated == s:
break
s = updated
return s
def _post_process_text_cleanup(markdown_text: str, legacy_form4 = False) -> str:
"""
Final-stage clean-up for Markdown pulled from iXBRL/EDGAR filings.
Returns a tidy Markdown string with:
• mojibake fixed
• hidden metadata lines removed
• common word-splits repaired
• normalised spacing & punctuation
"""
if not markdown_text:
return ""
pattern = re.compile(r'(##BOLD_START_\d+##)(\s+)##BOLD_START_(\d+)##\((##BOLD_END_\3##)')
replacement = r'\2\1('
markdown_text = pattern.sub(replacement, markdown_text)
pattern = re.compile(
r'(##BOLD_START_(\d+)##)'
r'##BOLD_START_(\d+)##'
r'(\)?%|\))'
r'##BOLD_END_\3##'
r'##BOLD_END_\2##'
)
replacement = r'\1\4##BOLD_END_\2##'
markdown_text = pattern.sub(replacement, markdown_text)
markdown_text = markdown_text.replace(" ", "").replace("##SUP####/SUP##", "").replace("syste m,", "system,")
pattern = r"(##BOLD_START_\d+##)\n\n"
replacement = r"\n\n\1"
markdown_text = re.sub(pattern, replacement, markdown_text)
pattern = re.compile(r'\s+(##(?:BOLD|ITALIC|U)_END_\d+##)\s+([,.:;!?])')
markdown_text = pattern.sub(r'\1\2', markdown_text)
markdown_text = re.sub(
r"##BOLD_START_\d+##(?:##(?:BOLD|ITALIC)_(?:START|END)_\d+##)*(##BOLD_START_(\d+)##.*?##BOLD_END_\2##)(?:##(?:BOLD|ITALIC)_(?:START|END)_\d+##)*##BOLD_END_\d+##",
r"\1", markdown_text, flags=re.DOTALL)
markdown_text = re.sub(
r"##ITALIC_START_\d+##(?:##(?:BOLD|ITALIC)_(?:START|END)_\d+##)*(##ITALIC_START_(\d+)##.*?##ITALIC_END_\2##)(?:##(?:BOLD|ITALIC)_(?:START|END)_\d+##)*##ITALIC_END_\d+##",
r"\1", markdown_text, flags=re.DOTALL)
markdown_text = re.sub(r"##BOLD_START_\d+##(##BOLD_START_\d+##)", r"\1", markdown_text)
markdown_text = re.sub(r"(##BOLD_END_\d+##)##BOLD_END_\d+##", r"\1", markdown_text)
markdown_text = re.sub(r"##ITALIC_START_\d+##(##ITALIC_START_\d+##)", r"\1", markdown_text)
markdown_text = re.sub(r"(##ITALIC_END_\d+##)##ITALIC_END_\d+##", r"\1", markdown_text)
markdown_text = markdown_text.replace(" ", " ").replace(" ", " ").replace(" ", " ")
markdown_text = re.sub(
r'##NEWLINE##(?=(##BOLD_START_\d+##\)##BOLD_END_\d+##))',
r'',
markdown_text
)
markdown_text = merge_adjacent_italics(markdown_text)
markdown_text = re.sub(r"##ITALIC_START_\d+##([○•●·◦➢])##ITALIC_END_\d+####ITALIC_START_(\d+)##", r"##ITALIC_START_\2##\1", markdown_text)
pattern = r"(##BOLD_START_(\d+)####NEWLINE##)"
replacement = r"##NEWLINE####BOLD_START_\2##"
markdown_text = markdown_text.replace("##NEWLINE##", "").replace("##NEWLINE## ", " ")
_SWAP = re.compile(r'##NEWLINE##\s*((?:##(?:ITALIC|BOLD|U)_END_\d+##\s*)+)')
markdown_text = _SWAP.sub(r'\1##NEWLINE## ', markdown_text)
wrap_start = r'(?:##(?:ITALIC|BOLD)_START_\d+##)*'
wrap_end = r'(?:##(?:ITALIC|BOLD)_END_\d+##)*'
roman_dot = r'(?i:[ivxlcdm]+)\.'
indent = r'(?:##INDENT##)*'
pair_paren = r'\([a-zA-Z]\)\([a-zA-Z]\)'
marker_core = rf'(?:{pair_paren}|\*|\•|\d+\.\d[\d\.]*|\d+\.(?!\d)|\([a-zA-Z]\)|\((?i:[ivxlcdm]+)\)|\(\d+\)|{roman_dot}|[a-zA-Z]\.)'
marker = rf'{indent}{marker_core}'
pattern = re.compile(
rf'(?m)^({wrap_start}{marker}{wrap_end})[ \t]*\r?\n(?:[ \t]*\r?\n)*(?=\S)'
)
markdown_text = pattern.sub(r'\1 ', markdown_text)
pattern = re.compile(
r'##BOLD_END_(\d+)##(?: )?'
r'##BOLD_START_\d+##'
r'(.*?)'
r'##BOLD_END_\d+## ',
re.DOTALL
)
markdown_text = pattern.sub(r'\2 ##BOLD_END_\1##', markdown_text)
pattern = re.compile(
r'##BOLD_END_(\d+)##(?: )?'
r'##SUB####BOLD_START_\d+##'
r'(.*?)'
r'##BOLD_END_\d+####/SUB##',
re.DOTALL
)
markdown_text = pattern.sub(r'##SUB##\2##/SUB####BOLD_END_\1##', markdown_text)
markdown_text = re.sub(r'(##BOLD_START_\d+##•##BOLD_END_\d+##) (##BOLD_START_\d+##•##BOLD_END_\d+##)(\s?)', r'\1\n\n\2\3', markdown_text)
markdown_text = re.sub(
r'##BOLD_START_(\d+)##\s*\(\s*##BOLD_END_\1##\s*##BOLD_START_(\d+)##(.*?)##BOLD_END_\2##',
r'##BOLD_START_\2##(\3##BOLD_END_\2##',
markdown_text
)
markdown_text = re.sub(
r'##BOLD_END_(\d+)##(?:##NEWLINE##\s*)?\s*##BOLD_START_\d+##(\s?(?:\)%|\)|%|,))(\s*)##BOLD_END_\d+##',
r'\2\3##BOLD_END_\1##',
markdown_text,
)
markdown_text = re.sub(r' (##(?:BOLD|U|ITALIC)_END_\d+##)', r'\1 ', markdown_text)
markdown_text = re.sub(r'(?m)^•(?=\S)', '• ', markdown_text)
markdown_text = re.sub(
r'\.##BOLD_END_(\d+)####U_START',
r'.##BOLD_END_\1## ##U_START',
markdown_text
)
markdown_text = re.sub(r'##ITALIC_START_(\d+)####I_SPACE####ITALIC_END_\1##', ' ', markdown_text)
markdown_text = re.sub(r'(##ITALIC_START_\d+##)(##I_SPACE##)', r'\2\1', markdown_text)
markdown_text = re.sub(r'(##I_SPACE##)(##ITALIC_END_\d+##)', r'\2\1', markdown_text)
markdown_text = re.sub(r'(\d{1,2}\.\d+[A-Z]?\.?)(##ITALIC_START_\d+##)', r'\1 \2', markdown_text)
markdown_text = re.sub(
r'((?:##(?:BOLD|ITALIC|U)_END_\d+##[^\S\r\n]*)+)(?=\S)',
lambda m: re.sub(r'[^\S\r\n]+', '', m.group(1)) + (' ' if re.search(r'[^\S\r\n]', m.group(1)) else ''),
markdown_text
)
markdown_text = re.sub(
r'(##(?:BOLD|ITALIC|U)_START_\d+##)\s',
r' \1',
markdown_text
)
markdown_text = markdown_text.replace("\u00a0", " ")
mojibake = {
"â\x80\x94": "—",
"â\x80\x93": "–",
"â\x80\x99": "'",
"â\x80\x98": "'",
"â\x80\x9c": '"',
"â\x80\x9d": '"',
"â\x80¦": "...",
"�": "'",
"�": '"',
"â�d": '"',
"â� ": '"',
"â�”": "—",
"â�“": "–",
"�": " ",
"�": "...",
"”": '"',
"“": '"',
"’": "'",
"‘": "'",
}
for bad, good in mojibake.items():
markdown_text = markdown_text.replace(bad, good)
markdown_text = clean_phone_numbers(markdown_text)
junk_patterns = [
re.compile(r'^\s*(?:<\??\s*)?xml\s+version\s*=\s*[\'"]\s*1\.0\s*[\'"].*$', re.IGNORECASE | re.MULTILINE),
re.compile(
r"^\s*\*{0,2}000\d{7}[^\n]*(?:Q[1-4]|FY|10-K|10-Q)[^\n]*false\*{0,2}\s*$",
re.IGNORECASE | re.MULTILINE,
),
re.compile(r"^.*XBRL Document Created with.*$", re.MULTILINE),
re.compile(r'^.*(?:Created by|Powered by|Unique Code|Generated At).*$', re.IGNORECASE | re.MULTILINE),
re.compile(r"^false\d{4}FY\d+.*http://fasb\.org.*$", re.IGNORECASE | re.MULTILINE),
re.compile(
r"^(?!\s*\|).*\[(?:Member|Axis|Domain|Line Items|Abstract|Table|Text Block|"
r"Policy Text Block|Extensible Enumeration|Flag|Roll Forward)\].*$",
re.IGNORECASE | re.MULTILINE,
),
]
for pat in junk_patterns:
markdown_text = pat.sub("", markdown_text)
split_fixes = {
r"##NEWLINE##": " "
}
for bad_re, good in split_fixes.items():
markdown_text = re.sub(bad_re, good, markdown_text, flags=re.IGNORECASE)
markdown_text = re.sub(r'( [ \t]*){2,}', ' ', markdown_text, flags=re.IGNORECASE)
table_pattern = re.compile(r'\n---\n\n(.*?)\n\n---\n', re.S)
def remove_empty_tables_replacer(match):
table_content = match.group(1)
content_check = re.sub(r'[|\-:\s—–]', '', table_content)
if not content_check:
return ''
return match.group(0)
markdown_text = table_pattern.sub(remove_empty_tables_replacer, markdown_text)
markdown_text = re.sub(
r'(##BOLD_END_\d+##)'
r'((?: |##NEWLINE##|##COLSPAN_\d+##|\s)*)'
r'(##BOLD_START_\d+##)'
r'(\)?%|\))'
r'(##BOLD_END_\d+##)',
r' \4\1\2',
markdown_text
)
markdown_text = re.sub(r"[ \t]+", " ", markdown_text)
markdown_text = re.sub(r"\n{3,}", "\n\n", markdown_text)
markdown_text = re.sub(r"(?)", ")").replace(" %", "%").replace("$ ", "$").replace("##BOL D", "##BOLD")
link_pattern = re.compile(r'^\s*link\d+\s+".*?"\s*\n?', re.MULTILINE)
markdown_text = link_pattern.sub("", markdown_text)
markdown_text = markdown_text.replace("• ", "• ").replace("● ", "● ").replace("· ", "· ").replace("◦ ", "◦ ").replace("➢ ", "➢ ")
BULLET_RUN_RE = re.compile(
r'((?:^|(?:##INDENT##)+)\s*[○•●·◦➢▪])'
r'(?:\s*(?: \s*|\n\s*)){2,}',
re.MULTILINE
)
markdown_text = BULLET_RUN_RE.sub(r'\1 ', markdown_text)
markdown_text = _fix_paragraph_bold_runs(markdown_text)
CHECK_RUN_RE = re.compile(
r'([✓✔✘])'
r'(?:\s* \s*|\s*\n\s*)+'
)
markdown_text = CHECK_RUN_RE.sub(r'\1 ', markdown_text)
markdown_text = re.sub(
r'(##BOLD_START_\d+##TABLE OF CONTENTS##BOLD_END_\d+##)|\bTABLE OF CONTENTS\b',
lambda m: m.group(1) or '**TABLE OF CONTENTS**',
markdown_text,
)
markdown_text = markdown_text.replace("##SUP##", "").replace("##/SUP##", " ").replace("##SUP##", "").replace("##/SUP##", "")
markdown_text = markdown_text.replace("##SUB##", "").replace("##/SUB##", " ").replace("##SUB##", "").replace("##/SUB##", "")
markdown_text = re.sub(r'##BOLD_START_\d+##(?=\s* ##BOLD_START)', '', markdown_text)
markdown_text = re.sub(r'(##BOLD_END_\d+##)(##BOLD_START_\d+##)', r'\1 \2', markdown_text)
markdown_text = re.sub(r'(\d+)\s+(%##BOLD_END_)', r'\1\2', markdown_text)
markdown_text = re.sub(r'(##BOLD_START_\d+##)\s+', r' \1', markdown_text)
markdown_text = re.sub(r'\s+(##BOLD_END_\d+##)', r'\1', markdown_text)
markdown_text = collapse_redundant_bold_placeholders(markdown_text)
markdown_text = re.sub(r'(?<=\S)##BOLD_START_\d+##', r'**', markdown_text)
markdown_text = re.sub(r'##BOLD_START_\d+##', r'**', markdown_text)
markdown_text = re.sub(r'##BOLD_END_\d+##(?=\S)', r'**', markdown_text)
markdown_text = re.sub(r'##BOLD_END_\d+##', r'**', markdown_text)
markdown_text = re.sub(r'(##ITALIC_START_\d+##)\s+', r'\1', markdown_text)
markdown_text = re.sub(r'\s+(##ITALIC_END_\d+##)', r'\1', markdown_text)
markdown_text = re.sub(r'(?<=\S)##ITALIC_START_\d+##', r'*', markdown_text)
markdown_text = re.sub(r'##ITALIC_START_\d+##', r'*', markdown_text)
markdown_text = re.sub(r'##ITALIC_END_\d+##(?=\S)', r'*', markdown_text)
markdown_text = re.sub(r'##ITALIC_END_\d+##', r'*', markdown_text)
markdown_text = merge_bracket_fragmented_underlines(markdown_text)
markdown_text = re.sub(r'(##U_START_\d+##)\s+', r'\1', markdown_text)
markdown_text = re.sub(r'\s+(##U_END_\d+##)', r'\1', markdown_text)
markdown_text = re.sub(r'(?<=\S)##U_START_\d+##', r'', markdown_text)
markdown_text = re.sub(r'##U_START_\d+##', r'', markdown_text)
markdown_text = re.sub(r'##U_END_\d+##(?=\S)', r' ', markdown_text)
markdown_text = re.sub(r'##U_END_\d+##', r' ', markdown_text)
markdown_text = _restore_markdown_links(markdown_text)
markdown_text = markdown_text.replace("$ **", "**$").replace("** **)**", ")**")
pattern = r'^(Table of Contents)\s*•\s*(.+)$'
replacement = r'\1\n\n• \2'
markdown_text = re.sub(pattern,
replacement,
markdown_text,
flags=re.IGNORECASE | re.MULTILINE)
markdown_text = re.sub(r'(?", "_").replace(" _", "_")
pattern = r'Unnamed:\s*\d+(?:_level_\d+)?\s'
markdown_text = re.sub(pattern, '', markdown_text)
markdown_text = re.sub(r"\.\d+", "", markdown_text)
markdown_text = re.sub(r" \.\d+(?![%\d])", "", markdown_text)
markdown_text = markdown_text.replace("", "")
if legacy_form4:
bad_table2_row2 = "| 1. Title of Derivative Security (Instr. 3) | 2. Conver- sion or Exercise Price of Deri- vative Security | 3. Transaction Date (Month/ Day/ Year) | 3A. Deemed Execution Date, if any (Month/ Day/ Year) | Code | V A D DE ED Title Amount or Number of Shares | 8. Price of Derivative Security (Instr.5) | 9. Number of Derivative Securities Beneficially Owned Following Reported Transaction(s) (Instr.4) | 10. Owner- ship Form of Deriv- ative Securities: Direct (D) or Indirect (I) (Instr.4) | 11. Nature of Indirect Beneficial Ownership (Instr.4) | | | | | |"
fixed_table2_row2 = "| 1. Title of Derivative Security (Instr. 3) | 2. Conver- sion or Exercise Price of Deri- vative Security | 3. Transaction Date (Month/ Day/ Year) | 3A. Deemed Execution Date, if any (Month/ Day/ Year) | Code | V | A | D | DE | ED | Title | Amount or Number of Shares | 8. Price of Derivative Security (Instr.5) | 9. Number of Derivative Securities Beneficially Owned Following Reported Transaction(s) (Instr.4) | 10. Owner- ship Form of Deriv- ative Securities: Direct (D) or Indirect (I) (Instr.4) | 11. Nature of Indirect Beneficial Ownership (Instr.4) |"
markdown_text = markdown_text.replace(bad_table2_row2, fixed_table2_row2)
markdown_text = _convert_bullet_tables_to_lists(markdown_text)
footnote_spacing_pattern = re.compile(
r'(?<=[^\n])'
r'\n'
r'(\d+ .*)',
re.MULTILINE
)
markdown_text = footnote_spacing_pattern.sub(r'\n\n\1', markdown_text)
markdown_text = markdown_text.replace("**\n•", "**\n\n•").replace("** 1. ", "**\n\n1. ")
markdown_text = markdown_text.replace(" ##COLSPAN", "##COLSPAN").replace(" ##ROWSPAN", "##ROWSPAN")
markdown_text = markdown_text.replace("p>", " ").replace(" ", "").replace(" ", " ").replace("0.%", "0.0%").replace("ER>", "")
markdown_text = re.sub(r'SPAN##[1-9](?!\d)', 'SPAN##', markdown_text)
markdown_text = re.sub(
r'(?m)^QuickLinks(?: -- Click here to rapidly navigate through this document)?\r?\n\n',
'',
markdown_text
)
markdown_text = _remove_page_numbers(markdown_text)
markdown_text = re.sub(r'(?m)^[ \t]*(?:100|[1-9]?\d) \s+QuickLinks[ \t]*\r?$', '', markdown_text)
markdown_text = markdown_text.replace(' " *', ' "*').replace('* " ', '*" ').replace(' (" *', ' ()"*').replace('* ") ', '*") ').replace("**(** ", "**(**")
markdown_text = markdown_text.replace("##TRIPLE_ASTERISK##", "\\*\\*\\*")
markdown_text = markdown_text.replace("##DOUBLE_ASTERISK##", "\\*\\*")
markdown_text = markdown_text.replace("##SINGLE_ASTERISK##", "\\*")
markdown_text = re.sub(r'(?m)^((?:[A-Za-z]\))|(?:[1-9]\d?\.))([A-Za-z])', r'\1 \2', markdown_text)
markdown_text = re.sub(r'(\s*\d+\s* )\s* \s+(?=[A-Za-z0-9])', r'\1 ', markdown_text, flags=re.I)
markdown_text = re.sub(
r'\s*(\((?:\d+|[A-Za-z]+))\s* \s* \s*\s*(\))\s* ',
r'\1\2 ',
markdown_text,
flags=re.I,
)
pattern = r'(?m)(^[○•●·◦➢])(?=\S)'
markdown_text = re.sub(pattern, r'\1 ', markdown_text)
markdown_text = re.sub(r'(?:\n\n------){2,}', '\n\n------', markdown_text)
pattern = r'([○•●·◦➢])\s* \s*'
markdown_text = re.sub(pattern, r'\1 ', markdown_text)
markdown_text = markdown_text.replace(" ", " ").replace(" ", "")
markdown_text = re.sub(r"##COLSPAN_\d+## ", "", markdown_text)
pattern = r'(?m)^(\(\d+\) )(?!\s)(\S)'
markdown_text = re.sub(pattern, r'\1 \2', markdown_text)
markdown_text = apply_markdown_hardcodes(markdown_text)
return markdown_text.strip()
_pre_tag_re = re.compile(r'(?is).*? ')
def _extract_pre_blocks(text: str, stash,
ph_fmt="__PRE_BLOCK_{:03d}__") -> str:
"""Replace every … with a unique placeholder and store the block."""
def _sub(m):
idx = len(stash)
stash.append(m.group(0))
return ph_fmt.format(idx)
return _pre_tag_re.sub(_sub, text)
def parse_legacy_13f_hr_txt(raw_text: str) -> str:
"""
Router function to detect the 13F-HR text format and call the correct parser.
"""
if ("----------------" in raw_text and "NAME OF ISSUER" in raw_text) or "x x" not in raw_text:
print("--> Detected fixed-width format.")
return parse_plaintext_filing(raw_text)
else:
print("--> Detected free-form (OCR-style) format.")
return _parse_free_form_13f_hr(raw_text)
def _parse_free_form_13f_hr(raw_text: str) -> str:
"""
Parses legacy 13F-HR filings that are in a free-form, OCR-like text format.
"""
import re, pandas as pd
CUSIP_RE = re.compile(r'\b([0-9A-Za-z]{8}[0-9])\b')
NUM_RE = re.compile(r'(\d{1,3}(?:,\d{3})*)')
TITLE_CANDIDATES = [
"Common Stock","Common","ADR","SPON ADR","Spon ADR","Spons ADR",
"Preferred","PRFD","Convertible","Convert","Convert Bond","ConvertBond",
"Debenture","Notes"
]
HEADERS = [
'NAME OF ISSUER','TITLE OF CLASS','CUSIP','VALUE (x$1000)',
'SHRS OR PRN AMT','SH/PRN','PUT/CALL','INVESTMENT DISCRETION','OTHER MANAGER',
'VOTING AUTHORITY (SOLE)','VOTING AUTHORITY (SHARED)','VOTING AUTHORITY (NONE)'
]
def smart_title_case(text: str) -> str:
words = text.split()
title_cased_words = []
for word in words:
if word.isupper() and len(word) > 1:
title_cased_words.append(word)
else:
title_cased_words.append(word.capitalize())
return ' '.join(title_cased_words)
def _normalize_number_token(s: str) -> str:
return s.replace(',', '')
def _pick_title(text: str) -> str:
up = text.upper()
best = None
for t in TITLE_CANDIDATES:
if t.upper() in up:
if not best or up.rfind(t.upper()) > up.rfind(best.upper()):
best = t
return best or "Common"
table_start = re.search(r'Form 13F Information Table', raw_text, re.I | re.S)
if not table_start:
return ""
cover = raw_text[:table_start.start()]
cover = "\n".join(l.strip() for l in cover.splitlines() if l.strip())
cover = re.sub(r'FORM 13F\s+FORM 13F', '## FORM 13F (Some records may not parsed due to OCR errors in the original filing)', cover, flags=re.I)
cover = re.sub(r'COVER PAGE', '\n\n### COVER PAGE', cover, flags=re.I)
cover = re.sub(r'SUMMARY PAGE', '\n\n### SUMMARY PAGE', cover, flags=re.I)
cover = re.sub(r'Name:', '\n\n**Name:**', cover, flags=re.I)
cover = re.sub(r'Address:', '\n\n**Address:**', cover, flags=re.I)
cover = re.sub(r'Person signing this report on behalf of Reporting Manager:',
'\n\n**Person signing this report on behalf of Reporting Manager:**',
cover, flags=re.I)
blob = raw_text[table_start.end():]
blob = re.sub(r'?TABLE>|?C>', ' ', blob, flags=re.I)
blob = re.sub(r'\s+', ' ', blob).strip()
blob = re.sub(r'(\d),\s+(\d{3})', r'\1,\2', blob)
blob = re.sub(r'(\d{1,3}),\s+(\d)\s+(\d{3}\b)', r'\1 \2,\3', blob)
matches = list(CUSIP_RE.finditer(blob))
holdings = []
for i, m in enumerate(matches):
start = m.start()
end = matches[i+1].start() if i+1 < len(matches) else len(blob)
rec = blob[start:end].strip()
cusip = m.group(1).upper()
rest = rec[len(m.group(1)):].strip()
nums = NUM_RE.findall(rest)
value_tok = nums[0] if len(nums) >= 1 else '0'
shares_tok = nums[1] if len(nums) >= 2 else '0'
first_num_pos = rest.find(value_tok) if nums else -1
issuer_title_text = rest
if first_num_pos != -1:
issuer_title_text = rest[:first_num_pos].strip()
else:
if ' x' in rest.lower():
issuer_title_text = rest.lower().split(' x')[0].strip()
value_raw = _normalize_number_token(value_tok)
shares_raw = _normalize_number_token(shares_tok)
title = _pick_title(issuer_title_text)
issuer_text = re.sub(re.escape(title) + r'$', '', issuer_title_text, flags=re.I).strip()
issuer_text = re.sub(r'\bSpons?\b\s*A(?:DR)?\b$', '', issuer_text, flags=re.I).strip()
issuer_text = re.sub(r'\s{2,}', ' ', issuer_text)
issuer = smart_title_case(issuer_text)
x_count = len(re.findall(r'\bx\b', rec, flags=re.I))
inv_disc = "SOLE" if x_count >= 1 else "—"
voting_sole = int(shares_raw) if (x_count >= 1 and shares_raw.isdigit() and int(shares_raw) > 0) else 0
is_prn = any(k in title.upper() for k in ("CONVERT", "DEBENT", "NOTES"))
row = {
'NAME OF ISSUER': issuer or '—',
'TITLE OF CLASS': 'ConvertBond' if 'CONVERT' in title.upper() else (title or 'Common'),
'CUSIP': cusip,
'VALUE (x$1000)': f"{int(value_raw):}" if value_raw.isdigit() and int(value_raw) > 0 else '—',
'SHRS OR PRN AMT': f"{int(shares_raw):}" if shares_raw.isdigit() and int(shares_raw) > 0 else '—',
'SH/PRN': 'PRN' if is_prn else 'SH',
'PUT/CALL': '—',
'INVESTMENT DISCRETION': inv_disc,
'OTHER MANAGER': '—',
'VOTING AUTHORITY (SOLE)': f"{voting_sole:,}" if voting_sole > 0 else '—',
'VOTING AUTHORITY (SHARED)': '—',
'VOTING AUTHORITY (NONE)': '—',
}
holdings.append(row)
if not holdings:
return f"{cover}\n\n"
df = pd.DataFrame(holdings).reindex(columns=HEADERS).fillna('—')
return f"{cover}\n\n{to_compact_markdown(df, index=False)}"
def _process_image_bytes_with_mistral(
image_bytes: bytes,
file_name: str,
page_no: int,
*,
per_table_sleep_s: float,
mistral_api_key: Optional[str] = None,
):
"""
Processes a single rendered page image using Mistral OCR.
This is an adaptation of your PDF processing logic for a single image.
"""
try:
def _run_page_ocr(*, client: Mistral, api_key: str, key_spec: Dict[str, Any]):
up = client.files.upload(file={"file_name": f"page_{page_no}_{file_name}", "content": image_bytes}, purpose="ocr")
if not up or not up.id:
raise Exception("Image upload failed to return a valid ID.")
signed_url = get_signed_url_with_retry(client, file_id=up.id)
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
payload = _build_mistral_ocr_payload(signed_url)
max_ocr_retries = 4
ocr_delay = 2.0
for attempt in range(max_ocr_retries):
response = requests.post(OCR_API_URL, headers=headers, json=payload, timeout=600)
if response.status_code in [429, 500, 502, 503, 504]:
if attempt < max_ocr_retries - 1:
print(f"OCR API error {response.status_code}. Retrying in {ocr_delay:.2f}s... (Attempt {attempt + 1}/{max_ocr_retries})")
time.sleep(ocr_delay)
ocr_delay *= 2
ocr_delay += random.uniform(0, 0.5)
continue
response.raise_for_status()
break
ocr_data = response.json()
usage = _summarize_ocr_usage(ocr_data, response.headers)
if not ocr_data.get("pages"):
print(f"[page {page_no}] OCR returned no content.")
return "", usage, key_spec["env_name"]
page_obj = ocr_data["pages"][0]
return _pick_text(page_obj), usage, key_spec["env_name"]
page_text, usage, used_env_name = _run_with_mistral_key_rotation(
f"rendered page {page_no} for {file_name}",
_run_page_ocr,
explicit_api_key=mistral_api_key,
)
_record_mistral_key_success(used_env_name, usage=usage, explicit_api_key=mistral_api_key)
return page_text
except Exception as e:
print(f"API processing for page {page_no} failed and was skipped. Error: {e}")
return f""
def parse_html_via_ocr(filepath: pathlib.Path) -> str:
"""
High-quality OCR-based parser for positioned HTML. Renders each page to an
image and uses Mistral OCR to extract text and tables.
"""
_load_sec_parser_env()
if not _has_mistral_api_keys():
print(f"{_mistral_no_keys_message()} Skipping OCR processing.")
return ""
_log_current_filing_ocr("html_image_ocr")
html_content = filepath.read_text(encoding='utf-8', errors='replace')
try:
soup = BeautifulSoup(html_content, "lxml")
except ValueError as e:
if "not enough values to unpack" in str(e):
print(f"[Warning] lxml parser crashed on malformed attributes. Falling back to html.parser.")
soup = BeautifulSoup(html_content, "html.parser")
else:
raise
pages = soup.find_all('div', id=re.compile(r'^pf\w+$'))
if not pages:
print("Could not find page containers (e.g., ). Treating document as a single page.")
pages = [soup]
md_parts = []
options = {
'format': 'png',
'quality': '100',
'width': '1224',
'disable-smart-width': ''
}
print(f"Found {len(pages)} pages to process via OCR.")
for i, page_soup in enumerate(pages, 1):
print(f"--> Rendering and processing page {i}...")
try:
image_bytes = imgkit.from_string(str(page_soup), False, options=options)
page_content = _process_image_bytes_with_mistral(
image_bytes=image_bytes,
file_name=filepath.name,
page_no=i,
per_table_sleep_s=Config.PER_TABLE_SLEEP_SECONDS,
mistral_api_key=None,
)
md_parts.append(page_content)
except Exception as e:
error_msg = f"Failed to render or process page {i}: {e}"
print(error_msg)
md_parts.append(f"")
return "\n\n------\n\n".join(md_parts)
def parse_series_and_classes_sgml(header_content: str) -> str:
"""
Parses the
SGML block from a
filing header into a structured Markdown output.
"""
sgml_match = re.search(
r"(.*?) ",
header_content,
re.S | re.I
)
if not sgml_match:
return ""
sgml_content = sgml_match.group(1)
md_parts = ["## Series and Classes Contracts Data"]
series_blocks = re.split(r'', sgml_content, flags=re.I)[1:]
if not series_blocks:
return ""
for series_block in series_blocks:
series_name_match = re.search(r'\s*([^\n<]+)', series_block, re.I)
series_id_match = re.search(r'\s*([^\n<]+)', series_block, re.I)
series_name = series_name_match.group(1).strip() if series_name_match else "—"
series_id = series_id_match.group(1).strip() if series_id_match else "—"
md_parts.append(f"\n### {series_name} (Series ID: {series_id})")
class_records = []
class_contract_blocks = re.findall(
r'(.*?)(?=||$)',
series_block,
re.S | re.I
)
for class_block in class_contract_blocks:
id_match = re.search(r'\s*([^\n<]+)', class_block, re.I)
name_match = re.search(r'\s*([^\n<]+)', class_block, re.I)
ticker_match = re.search(r'\s*([^\n<]+)', class_block, re.I)
record = {
'Class ID': id_match.group(1).strip() if id_match else "—",
'Class Name': name_match.group(1).strip() if name_match else "—",
'Ticker Symbol': ticker_match.group(1).strip() if ticker_match else "—",
}
class_records.append(record)
if class_records:
df = pd.DataFrame(class_records)
md_parts.append(to_compact_markdown(df, index=False))
return "\n\n".join(md_parts)
def parse_legacy_paper_filing(raw_text: str, form_type: str) -> str:
"""
Parses various legacy plain-text paper filings (ADV series, MSDW, etc.).
All data is contained within the or block, and
it also captures any text from the block.
"""
form_titles = {
"ADV": "FORM ADV: Uniform Application for Investment Adviser Registration",
"ADV/A": "FORM ADV/A: Amendment to Form ADV",
"ADV-E": "FORM ADV-E: Certificate of Accounting of Client Securities and Funds",
"ADV-H-T": "FORM ADV-H-T: Application for a Temporary Hardship Exemption",
"ADV-H-C": "FORM ADV-H-C: Application for a Continuing Hardship Exemption",
"ADV-NR": "FORM ADV-NR: Appointment of Agent for Service of Process by Non-Resident Adviser",
"ADVW": "FORM ADVW: Notice of Withdrawal from Registration as Investment Adviser",
"ADVCO": "FORM ADVCO: Correction to an ADV Filing",
"MSDW": "FORM MSDW: Notice of Withdrawal from Registration as a Municipal Securities Dealer"
}
title = form_titles.get(form_type, f"Form {form_type}")
md_parts = [
"### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n"
"**Washington, D.C. 20549**\n\n"
f"## {title}\n"
]
header_match = re.search(r"<(?:SEC|IMS)-HEADER>(.*?)(?:SEC|IMS)-HEADER>", raw_text, re.S | re.I)
if not header_match:
md_parts.append(f"")
else:
header_content = header_match.group(1).strip()
for line in header_content.splitlines():
line = line.strip()
if not line:
continue
if not line.startswith('\t') and line.endswith(':'):
section_name = line.rstrip(':').strip()
if section_name in ["FILER", "COMPANY DATA", "FILING VALUES", "BUSINESS ADDRESS", "MAIL ADDRESS", "FORMER COMPANY"]:
md_parts.append(f"\n### {section_name.title()}")
continue
if ':' in line:
key, value = line.split(':', 1)
key = key.strip()
value = value.strip()
if key and value:
md_parts.append(f"**{key}:** {value}")
doc_match = re.search(r"(.*?) ", raw_text, re.S | re.I)
if doc_match:
doc_content = doc_match.group(1)
text_match = re.search(r"(.*?) ", doc_content, re.S | re.I)
if text_match:
doc_text = text_match.group(1).strip()
if doc_text:
md_parts.append("\n### Document Note")
blockquote_lines = [f"> {line.strip()}" for line in doc_text.splitlines() if line.strip()]
md_parts.append("\n".join(blockquote_lines))
return "\n".join(md_parts)
def _extract_class_name_map_from_header_content(header_content: str) -> dict:
if not header_content:
return {}
try:
header_soup = BeautifulSoup(header_content, 'lxml')
except ValueError as e:
if "not enough values to unpack" in str(e):
print("[Warning] lxml crashed while parsing header contract data. Falling back to html.parser.")
header_soup = BeautifulSoup(header_content, 'html.parser')
else:
raise
class_name_map = {}
if (scd := header_soup.find('series-and-classes-contracts-data')):
for series in scd.find_all('series'):
for class_contract in series.find_all('class-contract'):
class_id_tag = class_contract.find('class-contract-id')
class_name_tag = class_contract.find('class-contract-name')
if class_id_tag and class_name_tag:
id_text_node = class_id_tag.find(string=True, recursive=False)
name_text_node = class_name_tag.find(string=True, recursive=False)
if id_text_node and name_text_node:
class_id = id_text_node.strip()
class_name = name_text_node.strip()
if class_id:
class_name_map[class_id] = class_name
return class_name_map
def _iter_document_blocks(raw_bytes: bytes):
for match in re.finditer(rb"(.*?) ", raw_bytes, re.S | re.I):
yield match.group(1)
def _iter_document_blocks_from_file(filepath: pathlib.Path, chunk_size: int = 8 * 1024 * 1024):
start_re = re.compile(rb"", re.I)
end_re = re.compile(rb" ", re.I)
overlap = 64
buffer = b""
in_document = False
doc_parts: List[bytes] = []
with filepath.open("rb") as handle:
while True:
chunk = handle.read(chunk_size)
if not chunk:
break
buffer += chunk
while True:
if not in_document:
start_match = start_re.search(buffer)
if not start_match:
buffer = buffer[-overlap:]
break
buffer = buffer[start_match.end():]
doc_parts = []
in_document = True
end_match = end_re.search(buffer)
if end_match:
doc_parts.append(buffer[:end_match.start()])
yield b"".join(doc_parts)
buffer = buffer[end_match.end():]
doc_parts = []
in_document = False
continue
if len(buffer) > overlap:
doc_parts.append(buffer[:-overlap])
buffer = buffer[-overlap:]
break
def _first_document_block_from_file(filepath: pathlib.Path) -> Optional[bytes]:
return next(_iter_document_blocks_from_file(filepath), None)
def _read_prefix_until_any(
filepath: pathlib.Path,
markers: List[bytes],
chunk_size: int = 1024 * 1024,
max_bytes: int = 64 * 1024 * 1024,
) -> bytes:
buffer = b""
marker_res = [re.compile(re.escape(marker), re.I) for marker in markers if marker]
with filepath.open("rb") as handle:
while True:
chunk = handle.read(chunk_size)
if not chunk:
return buffer
buffer += chunk
earliest_end: Optional[int] = None
for marker_re in marker_res:
match = marker_re.search(buffer)
if match and (earliest_end is None or match.end() < earliest_end):
earliest_end = match.end()
if earliest_end is not None:
return buffer[:earliest_end]
if len(buffer) >= max_bytes:
return buffer
def _file_contains_bytes(filepath: pathlib.Path, needle: bytes, chunk_size: int = 4 * 1024 * 1024) -> bool:
if not needle:
return False
needle_lower = needle.lower()
overlap = max(0, len(needle) - 1)
tail = b""
with filepath.open("rb") as handle:
while True:
chunk = handle.read(chunk_size)
if not chunk:
return False
haystack = tail + chunk
if needle_lower in haystack.lower():
return True
tail = haystack[-overlap:] if overlap else b""
def _extract_xml_blobs_from_body_bytes(body_bytes: bytes) -> List[str]:
return [
normalize_text_markup(match.group(1))
for match in re.finditer(rb"(.*?) ", body_bytes, re.S | re.I)
]
def _body_bytes_without_xml(body_bytes: bytes) -> bytes:
if not re.search(rb".*? ", b"", body_bytes, flags=re.S | re.I)
def _iter_pdf_attachment_texts_from_file(
filepath: pathlib.Path,
skip_types: set[bytes],
xbrl_ex_re: re.Pattern,
):
for doc_bytes in _iter_document_blocks_from_file(filepath):
m = re.search(rb"\s*([^\s<]+)", doc_bytes, re.I)
doc_type_bytes = m.group(1).upper() if m else b""
if doc_type_bytes in skip_types or xbrl_ex_re.match(doc_type_bytes):
continue
text_match = re.search(rb"(.*?) ", doc_bytes, re.S | re.I)
body_bytes = text_match.group(1) if text_match else doc_bytes
if not body_bytes.strip():
desc_match = re.search(rb"\s*(.*?)\s*<", doc_bytes, re.S | re.I)
if desc_match:
body_bytes = desc_match.group(1)
if re.search(rb"", body_bytes, re.I):
yield body_bytes.decode('latin-1', errors='replace')
def process_local_xbrl(filepath: pathlib.Path) -> str:
"""
Read an EDGAR filing (HTML/HTM/TXT) and return a clean Markdown version.
This version processes documents sequentially to preserve the original filing order.
"""
global LAST_PARSE_STATS
parse_stats = _new_parse_stats(filepath)
def record(source_format: str, text: str, label: str = "") -> None:
_record_parse_stats_part(parse_stats, source_format, text, label)
def record_pdf(text: str, label: str, page_count: int) -> None:
_record_parse_stats_part(parse_stats, "pdf", text, label)
_record_parse_stats_pdf_pages(parse_stats, page_count)
def finish(markdown_text: str, form_type: str = "") -> str:
global LAST_PARSE_STATS
final_text = (markdown_text or "").strip()
LAST_PARSE_STATS = _finalize_parse_stats(parse_stats, final_text, form_type or main_form_type)
return final_text
prefix_bytes = _read_prefix_until_any(
filepath,
[b" ", b"", b""],
)
first_doc_bytes = _first_document_block_from_file(filepath)
main_form_type = ""
legacy_paper_forms = {"MSDW", "MSDCO", "MSD", "MSD/A", "8-M", "9-M"}
class_name_map = {}
def is_html_content(content_str):
return re.search(r"<\s*(html|div|p)\b", content_str, re.I)
header_match_bytes = re.search(rb"(.*?) ", prefix_bytes, re.S | re.I)
ims_header_match_bytes = re.search(rb"(.*?) ", prefix_bytes, re.S | re.I)
header_part = ""
sgml_header_part = ""
if header_match_bytes:
header_bytes = header_match_bytes.group(1)
header_content = normalize_text_markup(header_bytes)
class_name_map = _extract_class_name_map_from_header_content(header_content)
sgml_header_part = parse_series_and_classes_sgml(header_content)
if (m := re.search(r"CONFORMED SUBMISSION TYPE:\s*([^\s]+)", header_content, re.I)):
main_form_type = m.group(1).strip().upper()
if (main_form_type.startswith("ADV") or main_form_type in legacy_paper_forms) and _file_contains_bytes(filepath, b""):
print(f"--> Detected legacy paper filing: {main_form_type}. Routing to dedicated paper parser.")
full_text_decoded = filepath.read_bytes().decode('latin-1', 'replace')
legacy_md = parse_legacy_paper_filing(full_text_decoded, main_form_type)
record("text", legacy_md, "legacy_paper")
return finish(legacy_md, main_form_type)
if main_form_type in ("497", "24F-2NT"):
header_part = parse_form497_file(header_content)
else:
header_part = parse_sec_header(header_content)
record("sgml", header_part, "sec_header")
record("sgml", sgml_header_part, "series_classes_sgml")
elif ims_header_match_bytes:
full_text_decoded = filepath.read_bytes().decode('latin-1', 'replace')
header_part = parse_ims_header(full_text_decoded)
if (m := re.search(r"CONFORMED SUBMISSION TYPE:\s*([^\s]+)", full_text_decoded, re.I)):
main_form_type = m.group(1).strip().upper()
if (main_form_type.startswith("ADV") or main_form_type in legacy_paper_forms) and _file_contains_bytes(filepath, b""):
print(f"--> Detected legacy paper filing: {main_form_type}. Routing to dedicated paper parser.")
legacy_md = parse_legacy_paper_filing(full_text_decoded, main_form_type)
record("text", legacy_md, "legacy_paper")
return finish(legacy_md, main_form_type)
record("sgml", header_part, "ims_header")
else:
header_part = ""
record("sgml", header_part, "missing_header_placeholder")
if first_doc_bytes is None:
raw_bytes = filepath.read_bytes()
body_content_bytes_match = re.search(rb"(.*)", raw_bytes, re.S | re.I)
if not body_content_bytes_match:
header_end = header_match_bytes.end() if header_match_bytes else 0
body_content_bytes_match = raw_bytes[header_end:]
else:
body_content_bytes_match = body_content_bytes_match.group(1)
if re.search(rb"", body_content_bytes_match, re.I):
pdf_md, pdf_page_count = parse_pdf_attachments([body_content_bytes_match.decode('latin-1', 'replace')])
record_pdf(pdf_md, "embedded_pdf", pdf_page_count)
return finish(f"{header_part}\n\n{pdf_md}", main_form_type)
body_content = normalize_text_markup(body_content_bytes_match)
if is_html_content(body_content):
md, positioned = parse_html_filing(body_content, form_type="", file_path=filepath)
if positioned:
body_md = md
record_pdf(body_md, "positioned_html_ocr", LAST_POSITIONED_HTML_OCR_PAGE_COUNT)
else:
body_md = _post_process_text_cleanup(md)
record("html", body_md, "body_html")
else:
body_md = parse_plaintext_filing(body_content)
record("text", body_md, "body_text")
return finish(f"{header_part}\n\n{body_md}", main_form_type)
parts: List[str] = [header_part] if header_part else []
if sgml_header_part:
parts.append(sgml_header_part)
pre_stash: List[str] = []
xml_blobs: List[str] = []
pdf_blobs: List[str] = []
if main_form_type.startswith(('13F-', 'N-PX')):
all_xml_contents = []
legacy_text_content = ""
for doc_bytes in _iter_document_blocks_from_file(filepath):
text_match = re.search(rb"(.*?) ", doc_bytes, re.S | re.I)
body_bytes = text_match.group(1) if text_match else doc_bytes
if not body_bytes.strip(): continue
xmls_in_doc = _extract_xml_blobs_from_body_bytes(body_bytes)
if xmls_in_doc:
all_xml_contents.extend(xmls_in_doc)
else:
doc_content = normalize_text_markup(body_bytes)
if doc_content.strip() and "13F" in main_form_type:
legacy_text_content = doc_content
break
if all_xml_contents:
parts = [header_part] if header_part else []
xml_md = parse_any_xml(all_xml_contents)
record("xml", xml_md, "13f_npx_xml")
parts.append(xml_md)
final_md = "\n\n".join(p for p in parts if p.strip())
return finish(final_md, main_form_type)
elif legacy_text_content:
legacy_md = parse_legacy_13f_hr_txt(legacy_text_content)
record("text", legacy_md, "legacy_13f_text")
return finish(f"{header_part}\n\n{legacy_md}", main_form_type)
skip_types = {b"EXCEL", b"XML", b"XBRLSUMMARY", b"JSON", b"ZIP", b"PAPER", b"GRAPHIC"}
xbrl_ex_re = re.compile(rb"^EX-101\.(INS|SCH|CAL|DEF|LAB|PRE)$", re.I)
if not main_form_type and first_doc_bytes is not None:
if (m := re.search(rb"\s*([^\s<]+)", first_doc_bytes, re.I)):
main_form_type = m.group(1).upper().decode('ascii', 'ignore')
saw_pdf_blobs = False
for idx, doc_bytes in enumerate(_iter_document_blocks_from_file(filepath), start=1):
_debug_print(f"Document {idx} is being processed")
m = re.search(rb"\s*([^\s<]+)", doc_bytes, re.I)
doc_type_bytes = m.group(1).upper() if m else b""
doc_type = doc_type_bytes.decode('ascii', 'ignore')
if doc_type_bytes in skip_types or xbrl_ex_re.match(doc_type_bytes):
continue
text_match = re.search(rb"(.*?) ", doc_bytes, re.S | re.I)
body_bytes = text_match.group(1) if text_match else doc_bytes
if not body_bytes.strip():
desc_match = re.search(rb"\s*(.*?)\s*<", doc_bytes, re.S | re.I)
if desc_match:
body_bytes = desc_match.group(1)
if re.search(rb"", body_bytes, re.I):
saw_pdf_blobs = True
continue
xmls_in_doc = _extract_xml_blobs_from_body_bytes(body_bytes)
if xmls_in_doc:
xml_blobs.extend(xmls_in_doc)
doc_content = normalize_text_markup(_body_bytes_without_xml(body_bytes))
else:
doc_content = normalize_text_markup(body_bytes)
body_wo_xml = doc_content
body_wo_xml = _extract_pre_blocks(body_wo_xml, pre_stash)
is_legacy_form4_doc = (not xmls_in_doc) and (doc_type == "4")
parsed_part = ""
if doc_type.startswith("NSAR-B") and not is_html_content(body_wo_xml):
parsed_part = parse_nsar_b_txt(body_wo_xml)
parsed_source_format = "text"
elif is_html_content(body_wo_xml) or xmls_in_doc:
html_part, positioned = parse_html_filing(body_wo_xml, form_type=main_form_type, file_path=filepath)
if positioned:
parsed_part = html_part
parsed_source_format = "pdf"
else:
parsed_part = _post_process_text_cleanup(html_part, legacy_form4=is_legacy_form4_doc)
parsed_source_format = "html"
else:
parsed_part = parse_plaintext_filing(body_wo_xml)
parsed_source_format = "text"
if parsed_part.strip():
if parsed_source_format == "pdf":
record_pdf(parsed_part, doc_type or "positioned_html_ocr", LAST_POSITIONED_HTML_OCR_PAGE_COUNT)
else:
record(parsed_source_format, parsed_part, doc_type or parsed_source_format)
if (ex := re.match(r"EX[-\s]?(\d+\.\d+)", doc_type, re.I)) and not xmls_in_doc:
parts.append(f"\n## Exhibit {ex.group(1)}\n")
elif doc_type and doc_type not in {main_form_type, ""} and not xmls_in_doc:
desc_match = re.search(r"\s*(.*?)\s*<", doc_content, re.I)
parts.append(f"\n## {(desc_match.group(1) if desc_match else doc_type).title()}\n")
parts.append(parsed_part)
if saw_pdf_blobs:
pdf_md, pdf_page_count = parse_pdf_attachments(
_iter_pdf_attachment_texts_from_file(filepath, skip_types, xbrl_ex_re)
)
record_pdf(pdf_md, "pdf_attachments", pdf_page_count)
parts.append(pdf_md)
if xml_blobs:
xml_md = parse_any_xml(xml_blobs, pdf_docs=None, class_name_map=class_name_map)
record("xml", xml_md, "xml_documents")
parts.append(xml_md)
final_md = "\n\n".join(p for p in parts if p.strip())
rendered_pre = [parse_plaintext_filing(b) for b in pre_stash]
if rendered_pre:
record("text", "\n\n".join(rendered_pre), "pre_blocks")
for i, block in enumerate(rendered_pre):
final_md = final_md.replace(f"__PRE_BLOCK_{i:03d}__", block)
return finish(final_md, main_form_type)
def conditional_delete(match_object):
"""
This function is called for every match.
It checks the length of the content after 'begin 644 '.
"""
captured_content = match_object.group(1)
if len(captured_content) > 50:
return match_object.group(0)
else:
return ""
def main_one(path: pathlib.Path, to_mmd: bool = False, source_document_url: Optional[str] = None) -> None:
global CURRENT_PROCESSING_FILE, CURRENT_SOURCE_DOCUMENT_URL
CURRENT_PROCESSING_FILE = str(path.resolve())
CURRENT_SOURCE_DOCUMENT_URL = (source_document_url or '').strip() or None
try:
doc = process_local_xbrl(path)
if not doc:
raise ValueError("empty output")
doc = doc.replace('| — |', '| |').replace('| — |', '| |').replace('| — |', '| |').replace('| — |', '| |').replace('| - |', '| |').replace('| - |', '| |')
before_pattern = re.compile(r'(?---\n", "\n---\n")
delimiter_fix_pattern = re.compile(r'(^---)([ \t]*[^-].*)', re.MULTILINE)
doc = delimiter_fix_pattern.sub(r'\1\n\n\2', doc)
if to_mmd == False:
doc = re.sub(r'(?<=[^\s-])', ' ', doc)
doc = doc.replace('', r'\|')
doc = doc.replace('##MD_NEWLINE##', ' ')
if to_mmd:
doc = convert_all_tables_to_mmd(doc)
doc = apply_markdown_hardcodes(doc)
doc = doc.replace("", "^").replace(" ", "^")
doc = doc.replace("", "~").replace(" ", "~")
doc = doc.replace(r"\ |", r"\|")
doc = re.sub(r'(?<=[^\s-])', ' ', doc)
doc = doc.replace('', r'\|')
doc = doc.replace('##MD_NEWLINE##', ' ')
if (r"\>/R\<" in doc and r"\>R\<" in doc and r"\>PAGE\<" in doc):
doc = doc.replace(r"\>/R\<", "").replace(r"\>R\<", "").replace(r"\>R\/R\<", "").replace(r"\>R\<", "").replace(r"\>R\\", "").replace(r"#### \>PAGE\<", "")
doc = re.sub(r"\n\\>PAGE\\< ?\r?\n", "\n", doc)
MARKERS = re.compile(
r'(?m)^[ \t]*(?:\*\*\\?>R\\?\*\*|\d+[ \t]+\\?>PAGE\\?<)[ \t]*(?:\r?\n|$)'
)
doc = MARKERS.sub('', doc)
doc = re.sub(r'##(ROWSPAN_\d+|COLSPAN_\d+)##', '', doc)
doc = doc.replace(" )", ")").replace(" ]", "]").replace(" %", "%").replace(" )%", ")%")
doc = re.sub(r' \.[1-9](?!\d)', '', doc, flags=re.I)
doc = re.sub(r'(?<=\| )—(?= +\|)', ' ', doc)
doc = re.sub(r'\$ (?=[0-9(])', '$', doc)
doc = doc.replace('##SINGLE_ASTERISK##', '').replace('##DOUBLE_ASTERISK##', '').replace('##TRIPLE_ASTERISK##', '')
doc = re.sub(r'(?m)^\| o (?=.)', r'| ##INDENT##o ', doc)
doc = doc.replace('\u2063', '').replace('##INDENT##', ' ')
doc = doc.replace("\n\n\n\n ", "")
doc = doc.replace("| **%****%** |", "| **%** |")
doc = re.sub(r'(\*{3,})(\.\*\*|\)?%\*\*)', r'*\2', doc)
m = re.search(r'^begin 644.*(?:\r?\n|$)', doc, flags=re.M)
doc = doc.replace("| nan |", "| |").replace("| nan |", "| |")
doc = doc[:m.start()] if m else doc
doc = doc.replace("\n## Excel\n\n\n", "")
doc = doc.replace('##SPACE##', ' ').replace('##I_SPACE##', ' ')
doc = doc.replace(" |", " |")
doc = re.sub(r"\n * *\n", "\n", doc)
doc = re.sub(r"(\d+)\s%( \|)", r"\1%\2", doc)
doc = doc.replace("| %** |", "| **%** |")
doc = re.sub(r"\*\*(\d+(?:\.\d+)?)\*\*\s*%?\s*\*\*(?=\s|$)", r"**\1%**", doc)
doc = re.sub(r"(^|\n) (?=#+)", r"\1", doc)
doc = re.sub(r" (?=#+)", r"\n\n", doc)
doc = doc.replace(" ------", "------").replace(" ", " ").replace(" **)**", "**)**")
doc = doc.replace("\n# # #\n", "\n\\# \\# \\#\n")
doc = doc.replace("\n ---\n\n|", "\n---\n\n|")
pattern = r'(?<=[^\s-])------(?=\r?\n)'
replacement = r'\n\n------'
doc = re.sub(pattern, replacement, doc)
doc = doc.replace(" % |", "% |").replace("* *%* |", "%* |").replace("*** *%***", "%***").replace("* *%* |", "%* |")
item_heading_pattern = re.compile(
r'(^\s*(?:\*\*)?\s*(?:item\s+)?\d+[A-Z]?\.)'
r'(?=[A-Z])',
re.IGNORECASE | re.MULTILINE
)
doc = item_heading_pattern.sub(r'\1 ', doc)
pattern = r"\|\s\**(?: | )+\**\s\|"
doc = doc.replace(") ** |", ")** |")
doc = re.sub(
r"\*\*\(\s*(\$?[+-]?\d[\d,]*(?:\.\d+)?)\s*\*\*\s*(?: \s*)?\s*\*\*\)\s*\*\*",
r"**(\1)**",
doc,
flags=re.IGNORECASE,
)
doc = re.sub(
r"\*\*([+-]?\d[\d,]*(?:\.\d+)?)\*\*(\s*(?: )?\s*(%?)\s*)\*\*(?=\s|$)",
lambda m: f"**{m.group(1)}{'%' if not m.group(3) else m.group(2)}**",
doc,
flags=re.IGNORECASE,
)
doc = re.sub(pattern, "| |", doc)
doc = re.sub(pattern, "| |", doc)
doc = re.sub(r'(?m)^\*\*2\*\*\s+\*\*nd\b', r'**2nd', doc)
doc = doc.replace("** ---\n\n|", "**\n\n---\n\n|")
doc = doc.replace("| $**$** | $%** |", "| **$** | **%** |").replace('| — |', '| |').replace('| — |', '| |').replace("| **$Change** |", "| **$ Change** |").replace("| **%Change** |", "| **% Change** |").replace(" months |", " months |")
pattern = r"(\n\n\*\*\d+)\. \*\*"
replacement = r"\1.**"
doc = re.sub(pattern, replacement, doc)
doc = doc.replace("**• ** ", "**•** ")
pattern = re.compile(r"^(begin 644 (.*)(\r?\n|$))", re.MULTILINE)
doc = pattern.sub(conditional_delete, doc)
doc = re.sub(r'\^\((\d+)\)((?: )+)\^', r'^(\1)^\2', doc)
doc = doc.replace("**6** **.** **ACCOUNTS RECEIVABLE**", "**6.** **ACCOUNTS RECEIVABLE**").replace("**1** **2.** **Long-term debt**", "**12.** **Long-term debt**").replace("**1** **3.** **Employee future benefits**", "**13.** **Employee future benefits**")
out = path.with_suffix(".md")
out.write_text(doc, encoding="utf-8")
parse_stats = _complete_parse_stats_for_output(
LAST_PARSE_STATS,
output_path=out,
final_markdown=doc,
to_mmd=to_mmd,
)
try:
_write_parse_stats_outputs(parse_stats, out)
_print_parse_stats_summary(parse_stats)
except Exception as stats_exc:
print(f"[parse-stats warning] Could not write parse stats: {stats_exc}")
print(f"Successful! Output written to {out}")
except Exception as e:
logging.error(
f"FILE: {path.name}\n"
f"ERROR: {e}\n"
f"TRACEBACK:\n{traceback.format_exc()}"
)
print(f"[ERROR] {path}: {e}. Details logged to {log_file_path.name}", file=sys.stderr)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A command-line tool to parse an SEC filing in HTML, HTM, or TXT format and convert it to Markdown.",
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("path", nargs="?", help="Path to the SEC filing (e.g., your_file.html).")
parser.add_argument(
"--to_mmd",
action="store_true",
help="Convert all tables in the final output to MultiMarkdown format."
)
parser.add_argument(
"--source-document-url",
help="Optional absolute source document URL used to resolve fragment links like #a_001."
)
parser.add_argument(
"--mistral-key-status",
action="store_true",
help="Print the shared Mistral key rotation/usage monitor JSON and exit."
)
parser.add_argument(
"--reset-mistral-key-status",
action="store_true",
help="Reset the shared Mistral key rotation/usage monitor JSON and exit."
)
args = parser.parse_args()
if args.reset_mistral_key_status:
print(json.dumps(reset_mistral_key_status(), indent=2, sort_keys=True))
sys.exit(0)
if args.mistral_key_status:
print(json.dumps(get_mistral_key_status_snapshot(), indent=2, sort_keys=True))
sys.exit(0)
if not args.path:
parser.error("the following arguments are required: path")
file_path = pathlib.Path(args.path)
if not file_path.is_file() and file_path.parts and file_path.parts[0] == "sec_parser":
alt_path = pathlib.Path(*file_path.parts[1:])
if alt_path.is_file():
print(f"[info] Using '{alt_path}' instead of '{args.path}'.")
file_path = alt_path
if not file_path.is_file():
print(f"Error: File not found at {args.path}", file=sys.stderr)
sys.exit(1)
main_one(file_path, to_mmd=args.to_mmd, source_document_url=args.source_document_url)