crisisnet-dataset / scripts /06_download_supply_chain.py
Sashank-810's picture
Clean upload without parquet files
f5e9144
#!/usr/bin/env python3
"""Extract major customer disclosures from 10-K filings for supply chain graph. -> Module_3"""
import os
import json
import re
import csv
from datetime import datetime
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
FILINGS_DIR = os.path.join(BASE_DIR, "Module_2", "10k_extracted")
OUTPUT_DIR = os.path.join(BASE_DIR, "Module_3")
LOG_FILE = os.path.join(BASE_DIR, "logs", "06_supply_chain.log")
os.makedirs(OUTPUT_DIR, exist_ok=True)
log = open(LOG_FILE, 'w')
log.write(f"Supply chain extraction started: {datetime.now()}\n\n")
PATTERNS = [
r'(?i)major\s+customer',
r'(?i)significant\s+customer',
r'(?i)largest\s+customer',
r'(?i)principal\s+customer',
r'(?i)10\s*%\s*(?:or more\s+)?of\s+(?:our\s+)?(?:total\s+)?(?:revenue|sales|net sales)',
r'(?i)accounted\s+for\s+(?:approximately\s+)?\d+\s*%',
r'(?i)(?:key|primary|major)\s+(?:supplier|vendor)',
r'(?i)supply\s+agreement',
r'(?i)customer\s+concentration',
r'(?i)revenue\s+concentration',
]
results = []
print("Scanning extracted 10-K filings for customer/supplier disclosures...")
if os.path.exists(FILINGS_DIR):
# Walk subdirectories (crawler saves to 10k_extracted/10-K/)
all_json_files = []
for root, dirs, files in os.walk(FILINGS_DIR):
for f in sorted(files):
if f.endswith('.json'):
all_json_files.append((f, os.path.join(root, f)))
print(f" Found {len(all_json_files)} JSON files to scan")
for filename, filepath in all_json_files:
try:
with open(filepath) as f:
filing = json.load(f)
for item_key in ['item_1', 'item_1a', 'item_7']:
text = filing.get(item_key, '')
if not text:
continue
for pattern in PATTERNS:
matches = list(re.finditer(pattern, text))
for match in matches:
start = max(0, match.start() - 500)
end = min(len(text), match.end() + 500)
context = text[start:end].replace('\n', ' ').strip()
results.append({
'file': filename,
'section': item_key,
'pattern_matched': pattern,
'match_text': match.group(),
'context': context
})
log.write(f" {filename}: scanned\n")
except Exception as e:
log.write(f" {filename}: ERROR — {e}\n")
else:
print(f" WARNING: {FILINGS_DIR} does not exist yet.")
print(f" Run script 04 (10-K download) first, then re-run this script.")
log.write("FILINGS_DIR not found. Run edgar-crawler first.\n")
if results:
output_file = os.path.join(OUTPUT_DIR, "customer_disclosures_raw.csv")
with open(output_file, 'w', newline='', encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=results[0].keys())
writer.writeheader()
writer.writerows(results)
print(f"\n Found {len(results)} customer/supplier disclosure mentions")
print(f" Saved to {output_file}")
else:
print("\n No results found. Normal if 10-K extraction hasn't run yet.")
# Create starter edges template
template_file = os.path.join(OUTPUT_DIR, "edges_template.csv")
if not os.path.exists(template_file):
known_edges = [
("SLB", "XOM", "service_provider", "oilfield services"),
("SLB", "CVX", "service_provider", "oilfield services"),
("SLB", "COP", "service_provider", "oilfield services"),
("HAL", "XOM", "service_provider", "oilfield services"),
("HAL", "CVX", "service_provider", "oilfield services"),
("HAL", "EOG", "service_provider", "oilfield services"),
("BKR", "XOM", "service_provider", "oilfield services"),
("BKR", "COP", "service_provider", "oilfield services"),
("NOV", "SLB", "equipment_supplier", "drilling equipment"),
("NOV", "HAL", "equipment_supplier", "drilling equipment"),
("FTI", "XOM", "service_provider", "subsea engineering"),
("FTI", "CVX", "service_provider", "subsea engineering"),
("EOG", "EPD", "shipper", "crude/NGL transport"),
("DVN", "TRGP", "shipper", "gas processing"),
("FANG", "EPD", "shipper", "crude transport"),
("EQT", "WMB", "shipper", "gas transport"),
("AR", "AM", "shipper", "gas gathering (subsidiary)"),
("CTRA", "WMB", "shipper", "gas transport"),
("RRC", "ET", "shipper", "gas transport"),
("SWN", "ET", "shipper", "gas transport"),
("KMI", "VLO", "pipeline_supplier", "crude delivery"),
("KMI", "MPC", "pipeline_supplier", "crude delivery"),
("EPD", "VLO", "pipeline_supplier", "NGL supply"),
("EPD", "PSX", "pipeline_supplier", "NGL supply"),
("ET", "PSX", "pipeline_supplier", "crude/NGL"),
("XOM", "VLO", "crude_supplier", "crude oil"),
("CVX", "MPC", "crude_supplier", "crude oil"),
("OXY", "VLO", "crude_supplier", "crude oil"),
("CHK", "LNG", "gas_supplier", "natural gas for LNG"),
("EQT", "LNG", "gas_supplier", "natural gas for LNG"),
]
with open(template_file, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(["source", "target", "relationship_type", "description"])
writer.writerows(known_edges)
print(f"\n Created starter edges template with {len(known_edges)} known relationships")
print(f" File: {template_file}")
log.write(f"\nFinished: {datetime.now()}\n")
log.close()
print(f"Log saved to {LOG_FILE}")
print(f"Data saved to: {OUTPUT_DIR}")