sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
davila7/claude-code-templates:cli-tool/components/skills/scientific/clinical-decision-support/scripts/validate_cds_document.py | #!/usr/bin/env python3
"""
Validate Clinical Decision Support Documents for Quality and Completeness
Checks for:
- Evidence citations for all recommendations
- Statistical reporting completeness
- Biomarker nomenclature consistency
- Required sections present
- HIPAA de-identification
- GRADE recommendation format
Dependencies: None (pure Python)
"""
import re
import argparse
from pathlib import Path
from collections import defaultdict
class CDSValidator:
"""Validator for clinical decision support documents."""
def __init__(self, filepath):
self.filepath = filepath
with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
self.content = f.read()
self.errors = []
self.warnings = []
self.info = []
def validate_all(self):
"""Run all validation checks."""
print(f"Validating: {self.filepath}")
print("="*70)
self.check_required_sections()
self.check_evidence_citations()
self.check_recommendation_grading()
self.check_statistical_reporting()
self.check_hipaa_identifiers()
self.check_biomarker_nomenclature()
return self.generate_report()
def check_required_sections(self):
"""Check if required sections are present."""
# Cohort analysis required sections
cohort_sections = [
'cohort characteristics',
'biomarker',
'outcomes',
'statistical analysis',
'clinical implications',
'references'
]
# Treatment recommendation required sections
rec_sections = [
'evidence',
'recommendation',
'monitoring',
'references'
]
content_lower = self.content.lower()
# Check which document type
is_cohort = 'cohort' in content_lower
is_recommendation = 'recommendation' in content_lower
if is_cohort:
missing = [sec for sec in cohort_sections if sec not in content_lower]
if missing:
self.warnings.append(f"Cohort analysis may be missing sections: {', '.join(missing)}")
else:
self.info.append("All cohort analysis sections present")
if is_recommendation:
missing = [sec for sec in rec_sections if sec not in content_lower]
if missing:
self.errors.append(f"Recommendation document missing required sections: {', '.join(missing)}")
else:
self.info.append("All recommendation sections present")
def check_evidence_citations(self):
"""Check that recommendations have citations."""
# Find recommendation statements
rec_pattern = r'(recommend|should|prefer|suggest|consider)(.*?)(?:\n\n|\Z)'
recommendations = re.findall(rec_pattern, self.content, re.IGNORECASE | re.DOTALL)
# Find citations
citation_patterns = [
r'\[\d+\]', # Numbered citations [1]
r'\(.*?\d{4}\)', # Author year (Smith 2020)
r'et al\.', # Et al citations
r'NCCN|ASCO|ESMO', # Guideline references
]
uncited_recommendations = []
for i, (_, rec_text) in enumerate(recommendations):
has_citation = any(re.search(pattern, rec_text) for pattern in citation_patterns)
if not has_citation:
snippet = rec_text[:60].strip() + '...'
uncited_recommendations.append(snippet)
if uncited_recommendations:
self.warnings.append(f"Found {len(uncited_recommendations)} recommendations without citations")
for rec in uncited_recommendations[:3]: # Show first 3
self.warnings.append(f" - {rec}")
else:
self.info.append(f"All {len(recommendations)} recommendations have citations")
def check_recommendation_grading(self):
"""Check for GRADE-style recommendation strength."""
# Look for GRADE notation (1A, 1B, 2A, 2B, 2C)
grade_pattern = r'GRADE\s*[12][A-C]|Grade\s*[12][A-C]|\(?\s*[12][A-C]\s*\)?'
grades = re.findall(grade_pattern, self.content, re.IGNORECASE)
# Look for strong/conditional language
strong_pattern = r'(strong|we recommend|should)'
conditional_pattern = r'(conditional|weak|we suggest|may consider|could consider)'
strong_count = len(re.findall(strong_pattern, self.content, re.IGNORECASE))
conditional_count = len(re.findall(conditional_pattern, self.content, re.IGNORECASE))
if grades:
self.info.append(f"Found {len(grades)} GRADE-style recommendations")
else:
self.warnings.append("No GRADE-style recommendation grading found (1A, 1B, 2A, etc.)")
if strong_count > 0 or conditional_count > 0:
self.info.append(f"Recommendation language: {strong_count} strong, {conditional_count} conditional")
else:
self.warnings.append("No clear recommendation strength language (strong/conditional) found")
def check_statistical_reporting(self):
"""Check for proper statistical reporting."""
# Check for p-values
p_values = re.findall(r'p\s*[=<>]\s*[\d.]+', self.content, re.IGNORECASE)
# Check for confidence intervals
ci_pattern = r'95%\s*CI|confidence interval'
cis = re.findall(ci_pattern, self.content, re.IGNORECASE)
# Check for hazard ratios
hr_pattern = r'HR\s*[=:]\s*[\d.]+'
hrs = re.findall(hr_pattern, self.content)
# Check for sample sizes
n_pattern = r'n\s*=\s*\d+'
sample_sizes = re.findall(n_pattern, self.content, re.IGNORECASE)
if not p_values:
self.warnings.append("No p-values found - statistical significance not reported")
else:
self.info.append(f"Found {len(p_values)} p-values")
if hrs and not cis:
self.warnings.append("Hazard ratios reported without confidence intervals")
if not sample_sizes:
self.warnings.append("Sample sizes (n=X) not clearly reported")
# Check for common statistical errors
if 'p=0.00' in self.content or 'p = 0.00' in self.content:
self.warnings.append("Found p=0.00 (should report as p<0.001 instead)")
def check_hipaa_identifiers(self):
"""Check for potential HIPAA identifiers."""
# 18 HIPAA identifiers (simplified check for common ones)
identifiers = {
'Names': r'Dr\.\s+[A-Z][a-z]+|Patient:\s*[A-Z][a-z]+',
'Specific dates': r'\d{1,2}/\d{1,2}/\d{4}', # MM/DD/YYYY
'Phone numbers': r'\d{3}[-.]?\d{3}[-.]?\d{4}',
'Email addresses': r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}',
'SSN': r'\d{3}-\d{2}-\d{4}',
'MRN': r'MRN\s*:?\s*\d+',
}
found_identifiers = []
for identifier_type, pattern in identifiers.items():
matches = re.findall(pattern, self.content)
if matches:
found_identifiers.append(f"{identifier_type}: {len(matches)} instance(s)")
if found_identifiers:
self.errors.append("Potential HIPAA identifiers detected:")
for identifier in found_identifiers:
self.errors.append(f" - {identifier}")
self.errors.append(" ** Ensure proper de-identification before distribution **")
else:
self.info.append("No obvious HIPAA identifiers detected (basic check only)")
def check_biomarker_nomenclature(self):
"""Check for consistent biomarker nomenclature."""
# Common biomarker naming issues
issues = []
# Check for gene names (should be italicized in LaTeX)
gene_names = ['EGFR', 'ALK', 'ROS1', 'BRAF', 'KRAS', 'HER2', 'TP53', 'BRCA1', 'BRCA2']
for gene in gene_names:
# Check if gene appears but not in italics (\textit{} or \emph{})
if gene in self.content:
if f'\\textit{{{gene}}}' not in self.content and f'\\emph{{{gene}}}' not in self.content:
if '.tex' in self.filepath.suffix:
issues.append(f"{gene} should be italicized in LaTeX (\\textit{{{gene}}})")
# Check for protein vs gene naming
# HER2 (protein) vs ERBB2 (gene) - both valid
# Check for mutation nomenclature (HGVS format)
hgvs_pattern = r'p\.[A-Z]\d+[A-Z]' # e.g., p.L858R
hgvs_mutations = re.findall(hgvs_pattern, self.content)
if hgvs_mutations:
self.info.append(f"Found {len(hgvs_mutations)} HGVS protein nomenclature (e.g., p.L858R)")
# Warn about non-standard mutation format
if 'EGFR mutation' in self.content and 'exon' not in self.content.lower():
self.warnings.append("EGFR mutation mentioned - specify exon/variant (e.g., exon 19 deletion)")
if issues:
self.warnings.extend(issues)
def generate_report(self):
"""Generate validation report."""
print("\n" + "="*70)
print("VALIDATION REPORT")
print("="*70)
if self.errors:
print(f"\nβ ERRORS ({len(self.errors)}):")
for error in self.errors:
print(f" {error}")
if self.warnings:
print(f"\nβ οΈ WARNINGS ({len(self.warnings)}):")
for warning in self.warnings:
print(f" {warning}")
if self.info:
print(f"\nβ PASSED CHECKS ({len(self.info)}):")
for info in self.info:
print(f" {info}")
# Overall status
print("\n" + "="*70)
if self.errors:
print("STATUS: β VALIDATION FAILED - Address errors before distribution")
return False
elif self.warnings:
print("STATUS: β οΈ VALIDATION PASSED WITH WARNINGS - Review recommended")
return True
else:
print("STATUS: β VALIDATION PASSED - Document meets quality standards")
return True
def save_report(self, output_file):
"""Save validation report to file."""
with open(output_file, 'w') as f:
f.write("CLINICAL DECISION SUPPORT DOCUMENT VALIDATION REPORT\n")
f.write("="*70 + "\n")
f.write(f"Document: {self.filepath}\n")
f.write(f"Validated: {Path.cwd()}\n\n")
if self.errors:
f.write(f"ERRORS ({len(self.errors)}):\n")
for error in self.errors:
f.write(f" - {error}\n")
f.write("\n")
if self.warnings:
f.write(f"WARNINGS ({len(self.warnings)}):\n")
for warning in self.warnings:
f.write(f" - {warning}\n")
f.write("\n")
if self.info:
f.write(f"PASSED CHECKS ({len(self.info)}):\n")
for info in self.info:
f.write(f" - {info}\n")
print(f"\nValidation report saved to: {output_file}")
def main():
parser = argparse.ArgumentParser(description='Validate clinical decision support documents')
parser.add_argument('input_file', type=str, help='Document to validate (.tex, .md, .txt)')
parser.add_argument('-o', '--output', type=str, default=None,
help='Save validation report to file')
parser.add_argument('--strict', action='store_true',
help='Treat warnings as errors')
args = parser.parse_args()
# Validate
validator = CDSValidator(args.input_file)
passed = validator.validate_all()
# Save report if requested
if args.output:
validator.save_report(args.output)
# Exit code
if args.strict and (validator.errors or validator.warnings):
exit(1)
elif validator.errors:
exit(1)
else:
exit(0)
if __name__ == '__main__':
main()
# Example usage:
# python validate_cds_document.py cohort_analysis.tex
# python validate_cds_document.py treatment_recommendations.tex -o validation_report.txt
# python validate_cds_document.py document.tex --strict # Warnings cause failure
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/clinical-decision-support/scripts/validate_cds_document.py",
"license": "MIT License",
"lines": 260,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/clinical-reports/scripts/check_deidentification.py | #!/usr/bin/env python3
"""
Check clinical reports for HIPAA identifiers that need removal.
Scans text for 18 HIPAA identifiers and flags potential privacy violations.
Usage:
python check_deidentification.py <input_file>
python check_deidentification.py <input_file> --output violations.json
"""
import argparse
import json
import re
from pathlib import Path
from typing import Dict, List
# 18 HIPAA Identifiers patterns
HIPAA_IDENTIFIERS = {
"1_names": {
"description": "Names (patient, family, providers)",
"patterns": [
r"\b(Dr\.|Mr\.|Mrs\.|Ms\.)\s+[A-Z][a-z]+",
r"\b[A-Z][a-z]+,\s+[A-Z][a-z]+\b", # Last, First
],
"severity": "HIGH"
},
"2_geographic": {
"description": "Geographic subdivisions smaller than state",
"patterns": [
r"\b\d+\s+[A-Z][a-z]+\s+(Street|St|Avenue|Ave|Road|Rd|Boulevard|Blvd|Lane|Ln|Drive|Dr)\b",
r"\b[A-Z][a-z]+,\s+[A-Z]{2}\s+\d{5}\b", # City, ST ZIP
],
"severity": "HIGH"
},
"3_dates": {
"description": "Dates (except year)",
"patterns": [
r"\b(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])/\d{4}\b",
r"\b(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[a-z]*\s+\d{1,2},\s+\d{4}\b",
r"\b\d{1,2}\s+(January|February|March|April|May|June|July|August|September|October|November|December)\s+\d{4}\b",
],
"severity": "HIGH"
},
"4_telephone": {
"description": "Telephone numbers",
"patterns": [
r"\b\(?\d{3}\)?[-.\s]?\d{3}[-.\s]?\d{4}\b",
r"\b1-\d{3}-\d{3}-\d{4}\b",
],
"severity": "HIGH"
},
"5_fax": {
"description": "Fax numbers",
"patterns": [
r"(?i)fax[:]\s*\(?\d{3}\)?[-.\s]?\d{3}[-.\s]?\d{4}",
],
"severity": "HIGH"
},
"6_email": {
"description": "Email addresses",
"patterns": [
r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b",
],
"severity": "HIGH"
},
"7_ssn": {
"description": "Social Security numbers",
"patterns": [
r"\b\d{3}-\d{2}-\d{4}\b",
r"\b\d{9}\b",
],
"severity": "CRITICAL"
},
"8_mrn": {
"description": "Medical record numbers",
"patterns": [
r"(?i)(mrn|medical\s+record\s+(number|#))[:]\s*\d+",
r"(?i)patient\s+id[:]\s*\d+",
],
"severity": "HIGH"
},
"9_health_plan": {
"description": "Health plan beneficiary numbers",
"patterns": [
r"(?i)(insurance|policy)\s+(number|#|id)[:]\s*[A-Z0-9]+",
],
"severity": "HIGH"
},
"10_account": {
"description": "Account numbers",
"patterns": [
r"(?i)account\s+(number|#)[:]\s*\d+",
],
"severity": "MEDIUM"
},
"11_license": {
"description": "Certificate/license numbers",
"patterns": [
r"(?i)(driver[']?s\s+license|DL)[:]\s*[A-Z0-9]+",
],
"severity": "MEDIUM"
},
"12_vehicle": {
"description": "Vehicle identifiers",
"patterns": [
r"(?i)(license\s+plate|VIN)[:]\s*[A-Z0-9]+",
],
"severity": "MEDIUM"
},
"13_device": {
"description": "Device identifiers and serial numbers",
"patterns": [
r"(?i)(serial|device)\s+(number|#)[:]\s*[A-Z0-9-]+",
],
"severity": "MEDIUM"
},
"14_url": {
"description": "Web URLs",
"patterns": [
r"https?://[^\s]+",
r"www\.[^\s]+",
],
"severity": "MEDIUM"
},
"15_ip": {
"description": "IP addresses",
"patterns": [
r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b",
],
"severity": "HIGH"
},
"16_biometric": {
"description": "Biometric identifiers",
"patterns": [
r"(?i)(fingerprint|voiceprint|retinal\s+scan)",
],
"severity": "CRITICAL"
},
"17_photos": {
"description": "Full-face photographs",
"patterns": [
r"(?i)(photograph|photo|image).*face",
r"\.(jpg|jpeg|png|gif)\b",
],
"severity": "HIGH"
},
"18_unique": {
"description": "Any other unique identifying characteristic",
"patterns": [
r"(?i)(tattoo|birthmark|scar).*unique",
],
"severity": "MEDIUM"
},
}
def check_identifiers(text: str) -> Dict:
"""Check text for HIPAA identifiers."""
violations = {}
total_issues = 0
for identifier_id, config in HIPAA_IDENTIFIERS.items():
matches = []
for pattern in config["patterns"]:
found = re.findall(pattern, text, re.IGNORECASE)
matches.extend(found)
if matches:
# Remove duplicates, limit to first 5 examples
unique_matches = list(set(matches))[:5]
violations[identifier_id] = {
"description": config["description"],
"severity": config["severity"],
"count": len(matches),
"examples": unique_matches
}
total_issues += len(matches)
return {
"total_violations": len(violations),
"total_instances": total_issues,
"violations": violations
}
def check_age_compliance(text: str) -> Dict:
"""Check if ages >89 are properly aggregated."""
age_pattern = r"\b(\d{2,3})\s*(?:year|yr)s?[\s-]?old\b"
ages = [int(age) for age in re.findall(age_pattern, text, re.IGNORECASE)]
violations = [age for age in ages if age > 89]
return {
"ages_over_89": len(violations),
"examples": violations[:5] if violations else [],
"compliant": len(violations) == 0
}
def generate_report(filename: str) -> Dict:
"""Generate de-identification compliance report."""
filepath = Path(filename)
if not filepath.exists():
raise FileNotFoundError(f"File not found: {filename}")
with open(filepath, 'r', encoding='utf-8') as f:
text = f.read()
identifier_check = check_identifiers(text)
age_check = check_age_compliance(text)
# Determine overall compliance
critical_violations = sum(
1 for v in identifier_check["violations"].values()
if v["severity"] == "CRITICAL"
)
high_violations = sum(
1 for v in identifier_check["violations"].values()
if v["severity"] == "HIGH"
)
if critical_violations > 0 or high_violations >= 3:
status = "NON_COMPLIANT"
elif high_violations > 0 or not age_check["compliant"]:
status = "NEEDS_REVIEW"
else:
status = "COMPLIANT"
report = {
"filename": str(filename),
"status": status,
"identifier_violations": identifier_check,
"age_compliance": age_check,
"recommendation": get_recommendation(status, identifier_check, age_check)
}
return report
def get_recommendation(status: str, identifiers: Dict, ages: Dict) -> str:
"""Generate recommendation based on findings."""
if status == "COMPLIANT":
return "Document appears compliant. Perform final manual review before publication."
recommendations = []
if identifiers["total_violations"] > 0:
recommendations.append(
f"Remove or redact {identifiers['total_instances']} identified HIPAA identifiers."
)
if not ages["compliant"]:
recommendations.append(
f"Aggregate {ages['ages_over_89']} age(s) >89 years to '90 or older' or '>89 years'."
)
return " ".join(recommendations)
def print_report(report: Dict):
"""Print human-readable report."""
print("=" * 70)
print("HIPAA DE-IDENTIFICATION CHECK")
print(f"File: {report['filename']}")
print("=" * 70)
print()
print(f"Overall Status: {report['status']}")
print()
if report["identifier_violations"]["total_violations"] == 0:
print("β No HIPAA identifiers detected")
else:
print(f"β Found {report['identifier_violations']['total_violations']} types of violations")
print(f" Total instances: {report['identifier_violations']['total_instances']}")
print()
print("Violations by type:")
print("-" * 70)
for id_type, details in sorted(
report["identifier_violations"]["violations"].items(),
key=lambda x: {"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2}[x[1]["severity"]]
):
severity_symbol = "β β β " if details["severity"] == "CRITICAL" else "β β " if details["severity"] == "HIGH" else "β "
print(f"{severity_symbol} [{details['severity']:8}] {details['description']}")
print(f" Count: {details['count']}")
print(f" Examples:")
for example in details["examples"]:
print(f" - {example}")
print()
age_check = report["age_compliance"]
if age_check["compliant"]:
print("β Age reporting compliant (no ages >89 or properly aggregated)")
else:
print(f"β Age compliance issue: {age_check['ages_over_89']} age(s) >89 detected")
print(f" Ages must be aggregated to '90 or older' or '>89 years'")
print(f" Ages found: {age_check['examples']}")
print()
print("Recommendation:")
print(report["recommendation"])
print("=" * 70)
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Check clinical reports for HIPAA identifiers"
)
parser.add_argument("input_file", help="Path to clinical report file")
parser.add_argument("--output", "-o", help="Output JSON report to file")
parser.add_argument("--json", action="store_true", help="Output JSON to stdout")
args = parser.parse_args()
try:
report = generate_report(args.input_file)
if args.json:
print(json.dumps(report, indent=2))
else:
print_report(report)
if args.output:
with open(args.output, 'w') as f:
json.dump(report, f, indent=2)
print(f"\nJSON report saved to: {args.output}")
# Exit with non-zero if violations found
exit_code = 0 if report["status"] == "COMPLIANT" else 1
return exit_code
except Exception as e:
print(f"Error: {e}")
return 1
if __name__ == "__main__":
import sys
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/clinical-reports/scripts/check_deidentification.py",
"license": "MIT License",
"lines": 298,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/clinical-reports/scripts/compliance_checker.py | #!/usr/bin/env python3
"""
Check clinical reports for regulatory compliance (HIPAA, GCP, FDA).
Usage:
python compliance_checker.py <report_file>
"""
import argparse
import json
import re
COMPLIANCE_CHECKS = {
"hipaa": {
"consent_statement": r"(?i)(informed\s+consent|written\s+consent).*obtained",
"deidentification": r"(?i)(de-identif|anonymi[sz])",
},
"gcp": {
"irb_approval": r"(?i)(IRB|IEC|ethics\s+committee).*approv",
"protocol_compliance": r"(?i)protocol",
"informed_consent": r"(?i)informed\s+consent",
},
"fda": {
"study_id": r"(?i)(IND|IDE|protocol)\s+(number|#)[:]\s*\S+",
"safety_reporting": r"(?i)(adverse\s+event|SAE)",
}
}
def check_compliance(filename: str) -> dict:
"""Check regulatory compliance."""
with open(filename, 'r', encoding='utf-8') as f:
content = f.read()
results = {}
for regulation, checks in COMPLIANCE_CHECKS.items():
reg_results = {}
for check_name, pattern in checks.items():
reg_results[check_name] = bool(re.search(pattern, content))
results[regulation] = reg_results
return {"filename": filename, "compliance": results}
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="Check regulatory compliance")
parser.add_argument("input_file", help="Path to clinical report")
parser.add_argument("--json", action="store_true")
args = parser.parse_args()
try:
report = check_compliance(args.input_file)
if args.json:
print(json.dumps(report, indent=2))
else:
print("\nRegulatory Compliance Check:\n")
for reg, checks in report["compliance"].items():
print(f"{reg.upper()}:")
for check, passed in checks.items():
symbol = "β" if passed else "β"
print(f" {symbol} {check}")
print()
return 0
except Exception as e:
print(f"Error: {e}")
return 1
if __name__ == "__main__":
import sys
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/clinical-reports/scripts/compliance_checker.py",
"license": "MIT License",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/clinical-reports/scripts/extract_clinical_data.py | #!/usr/bin/env python3
"""
Extract structured clinical data from reports.
Usage:
python extract_clinical_data.py <report_file>
"""
import argparse
import json
import re
def extract_vital_signs(content: str) -> dict:
"""Extract vital signs."""
vitals = {}
patterns = {
"temperature": r"(?i)temp(?:erature)?[:]\s*([\d.]+)\s*Β°?F",
"bp": r"(?i)BP[:]\s*(\d+/\d+)",
"hr": r"(?i)HR[:]\s*(\d+)",
"rr": r"(?i)RR[:]\s*(\d+)",
"spo2": r"(?i)SpO2[:]\s*([\d.]+)%",
}
for vital, pattern in patterns.items():
match = re.search(pattern, content)
if match:
vitals[vital] = match.group(1)
return vitals
def extract_demographics(content: str) -> dict:
"""Extract patient demographics."""
demographics = {}
patterns = {
"age": r"(?i)(\d+)[\s-]year[\s-]old",
"sex": r"(?i)(male|female|M|F)",
}
for demo, pattern in patterns.items():
match = re.search(pattern, content)
if match:
demographics[demo] = match.group(1)
return demographics
def extract_medications(content: str) -> list:
"""Extract medication list."""
meds = []
# Simple pattern for common medication format
pattern = r"(?i)(\w+)\s+(\d+\s*mg)\s+(PO|IV|SC)\s+(daily|BID|TID|QID)"
matches = re.findall(pattern, content)
for match in matches:
meds.append({
"drug": match[0],
"dose": match[1],
"route": match[2],
"frequency": match[3]
})
return meds
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="Extract clinical data")
parser.add_argument("input_file", help="Path to clinical report")
parser.add_argument("--output", "-o", help="Output JSON file")
args = parser.parse_args()
try:
with open(args.input_file, 'r', encoding='utf-8') as f:
content = f.read()
extracted_data = {
"demographics": extract_demographics(content),
"vital_signs": extract_vital_signs(content),
"medications": extract_medications(content),
}
if args.output:
with open(args.output, 'w') as f:
json.dump(extracted_data, f, indent=2)
print(f"β Data extracted to: {args.output}")
else:
print(json.dumps(extracted_data, indent=2))
return 0
except Exception as e:
print(f"Error: {e}")
return 1
if __name__ == "__main__":
import sys
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/clinical-reports/scripts/extract_clinical_data.py",
"license": "MIT License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/clinical-reports/scripts/format_adverse_events.py | #!/usr/bin/env python3
"""
Format adverse event data into tables for clinical trial reports.
Converts CSV or structured data into formatted AE summary tables.
Usage:
python format_adverse_events.py <ae_data.csv>
"""
import argparse
import csv
from collections import defaultdict
from pathlib import Path
def format_ae_summary_table(data: list) -> str:
"""Generate AE summary table in markdown format."""
# Group by treatment arm
arm_stats = defaultdict(lambda: {
'total': 0,
'any_ae': 0,
'related_ae': 0,
'sae': 0,
'deaths': 0,
'discontinuations': 0
})
for row in data:
arm = row.get('treatment_arm', 'Unknown')
arm_stats[arm]['total'] += 1
if row.get('any_ae', '').lower() == 'yes':
arm_stats[arm]['any_ae'] += 1
if row.get('related', '').lower() == 'yes':
arm_stats[arm]['related_ae'] += 1
if row.get('serious', '').lower() == 'yes':
arm_stats[arm]['sae'] += 1
if row.get('fatal', '').lower() == 'yes':
arm_stats[arm]['deaths'] += 1
if row.get('discontinuation', '').lower() == 'yes':
arm_stats[arm]['discontinuations'] += 1
# Generate table
table = "| Category | " + " | ".join(arm_stats.keys()) + " |\n"
table += "|----------|" + "|".join(["--------"] * len(arm_stats)) + "|\n"
categories = [
('Total N', 'total'),
('Any AE', 'any_ae'),
('Treatment-related AE', 'related_ae'),
('Serious AE', 'sae'),
('Deaths', 'deaths'),
('Discontinuation due to AE', 'discontinuations')
]
for cat_name, cat_key in categories:
row_data = [cat_name]
for arm_data in arm_stats.values():
count = arm_data[cat_key]
total = arm_data['total']
pct = (count / total * 100) if total > 0 and cat_key != 'total' else 0
value = f"{count}" if cat_key == 'total' else f"{count} ({pct:.1f}%)"
row_data.append(value)
table += "| " + " | ".join(row_data) + " |\n"
return table
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="Format AE data into tables")
parser.add_argument("input_file", help="Path to AE data CSV")
parser.add_argument("--output", "-o", help="Output markdown file")
args = parser.parse_args()
try:
with open(args.input_file, 'r') as f:
reader = csv.DictReader(f)
data = list(reader)
table = format_ae_summary_table(data)
if args.output:
with open(args.output, 'w') as f:
f.write(table)
print(f"β Table saved to: {args.output}")
else:
print("\nAdverse Events Summary Table:\n")
print(table)
return 0
except Exception as e:
print(f"Error: {e}")
return 1
if __name__ == "__main__":
import sys
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/clinical-reports/scripts/format_adverse_events.py",
"license": "MIT License",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/clinical-reports/scripts/generate_report_template.py | #!/usr/bin/env python3
"""
Interactive template generator for clinical reports.
Helps users select and generate appropriate clinical report templates.
Usage:
python generate_report_template.py
python generate_report_template.py --type case_report --output my_case_report.md
"""
import argparse
import shutil
from pathlib import Path
TEMPLATES = {
"case_report": "case_report_template.md",
"soap_note": "soap_note_template.md",
"h_and_p": "history_physical_template.md",
"discharge_summary": "discharge_summary_template.md",
"consult_note": "consult_note_template.md",
"radiology": "radiology_report_template.md",
"pathology": "pathology_report_template.md",
"lab": "lab_report_template.md",
"sae": "clinical_trial_sae_template.md",
"csr": "clinical_trial_csr_template.md",
}
DESCRIPTIONS = {
"case_report": "Clinical Case Report (CARE guidelines)",
"soap_note": "SOAP Progress Note",
"h_and_p": "History and Physical Examination",
"discharge_summary": "Hospital Discharge Summary",
"consult_note": "Consultation Note",
"radiology": "Radiology/Imaging Report",
"pathology": "Surgical Pathology Report",
"lab": "Laboratory Report",
"sae": "Serious Adverse Event Report",
"csr": "Clinical Study Report (ICH-E3)",
}
def get_template_dir() -> Path:
"""Get the templates directory path."""
script_dir = Path(__file__).parent
template_dir = script_dir.parent / "assets"
return template_dir
def list_templates():
"""List available templates."""
print("\nAvailable Clinical Report Templates:")
print("=" * 60)
for i, (key, desc) in enumerate(DESCRIPTIONS.items(), 1):
print(f"{i:2}. {key:20} - {desc}")
print("=" * 60)
def generate_template(template_type: str, output_file: str = None):
"""Generate template file."""
if template_type not in TEMPLATES:
raise ValueError(f"Invalid template type: {template_type}")
template_filename = TEMPLATES[template_type]
template_path = get_template_dir() / template_filename
if not template_path.exists():
raise FileNotFoundError(f"Template not found: {template_path}")
if output_file is None:
output_file = f"new_{template_filename}"
shutil.copy(template_path, output_file)
print(f"β Template created: {output_file}")
print(f" Type: {DESCRIPTIONS[template_type]}")
print(f" Source: {template_filename}")
return output_file
def interactive_mode():
"""Interactive template selection."""
list_templates()
print()
while True:
choice = input("Select template number (or 'q' to quit): ").strip()
if choice.lower() == 'q':
print("Goodbye!")
return
try:
idx = int(choice) - 1
template_types = list(TEMPLATES.keys())
if 0 <= idx < len(template_types):
template_type = template_types[idx]
output_file = input(f"Output filename (default: new_{TEMPLATES[template_type]}): ").strip()
if not output_file:
output_file = None
generate_template(template_type, output_file)
another = input("\nGenerate another template? (y/n): ").strip().lower()
if another != 'y':
print("Goodbye!")
return
else:
print()
list_templates()
print()
else:
print("Invalid selection. Please try again.")
except (ValueError, IndexError):
print("Invalid input. Please enter a number or 'q' to quit.")
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Generate clinical report templates"
)
parser.add_argument(
"--type",
choices=list(TEMPLATES.keys()),
help="Template type to generate"
)
parser.add_argument(
"--output",
"-o",
help="Output filename"
)
parser.add_argument(
"--list",
action="store_true",
help="List available templates"
)
args = parser.parse_args()
try:
if args.list:
list_templates()
elif args.type:
generate_template(args.type, args.output)
else:
# Interactive mode
interactive_mode()
return 0
except Exception as e:
print(f"Error: {e}")
return 1
if __name__ == "__main__":
import sys
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/clinical-reports/scripts/generate_report_template.py",
"license": "MIT License",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/clinical-reports/scripts/terminology_validator.py | #!/usr/bin/env python3
"""
Validate medical terminology and coding in clinical reports.
Usage:
python terminology_validator.py <report_file>
"""
import argparse
import json
import re
# Common medical abbreviations that should be avoided (JCAHO "Do Not Use" list)
DO_NOT_USE = {
"U": "Unit",
"IU": "International Unit",
"QD": "daily",
"QOD": "every other day",
"MS": "morphine sulfate or magnesium sulfate",
"MSO4": "morphine sulfate",
"MgSO4": "magnesium sulfate",
}
# Common abbreviations with potential ambiguity
AMBIGUOUS = ["cc", "hs", "TIW", "SC", "SQ", "D/C", "AS", "AD", "AU", "OS", "OD", "OU"]
def check_do_not_use_abbreviations(content: str) -> dict:
"""Check for prohibited abbreviations."""
violations = {}
for abbrev, meaning in DO_NOT_USE.items():
# Word boundary pattern to avoid false positives
pattern = rf"\b{re.escape(abbrev)}\b"
matches = re.findall(pattern, content)
if matches:
violations[abbrev] = {
"count": len(matches),
"should_use": meaning,
"severity": "HIGH"
}
return violations
def check_ambiguous_abbreviations(content: str) -> dict:
"""Check for ambiguous abbreviations."""
found = {}
for abbrev in AMBIGUOUS:
pattern = rf"\b{re.escape(abbrev)}\b"
matches = re.findall(pattern, content, re.IGNORECASE)
if matches:
found[abbrev] = {
"count": len(matches),
"severity": "MEDIUM"
}
return found
def validate_icd10_format(content: str) -> list:
"""Check ICD-10 code format."""
# ICD-10 format: Letter + 2 digits + optional decimal + 0-4 more digits
pattern = r"\b[A-Z]\d{2}\.?\d{0,4}\b"
codes = re.findall(pattern, content)
return list(set(codes)) # Unique codes
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="Validate medical terminology")
parser.add_argument("input_file", help="Path to clinical report")
parser.add_argument("--json", action="store_true")
args = parser.parse_args()
try:
with open(args.input_file, 'r', encoding='utf-8') as f:
content = f.read()
do_not_use = check_do_not_use_abbreviations(content)
ambiguous = check_ambiguous_abbreviations(content)
icd10_codes = validate_icd10_format(content)
report = {
"filename": args.input_file,
"do_not_use_violations": do_not_use,
"ambiguous_abbreviations": ambiguous,
"icd10_codes_found": icd10_codes,
"total_issues": len(do_not_use) + len(ambiguous)
}
if args.json:
print(json.dumps(report, indent=2))
else:
print("\nTerminology Validation Report:\n")
if do_not_use:
print("β DO NOT USE Abbreviations Found:")
for abbrev, details in do_not_use.items():
print(f" {abbrev}: {details['count']} occurrence(s)")
print(f" β Use '{details['should_use']}' instead")
print()
else:
print("β No prohibited abbreviations found\n")
if ambiguous:
print("β Ambiguous Abbreviations Found:")
for abbrev, details in ambiguous.items():
print(f" {abbrev}: {details['count']} occurrence(s)")
print(" Consider spelling out for clarity\n")
if icd10_codes:
print(f"βΉ ICD-10 codes detected: {len(icd10_codes)}")
for code in icd10_codes[:5]:
print(f" - {code}")
if len(icd10_codes) > 5:
print(f" ... and {len(icd10_codes) - 5} more")
print()
return 0 if not do_not_use else 1
except Exception as e:
print(f"Error: {e}")
return 1
if __name__ == "__main__":
import sys
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/clinical-reports/scripts/terminology_validator.py",
"license": "MIT License",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/clinical-reports/scripts/validate_case_report.py | #!/usr/bin/env python3
"""
Validate case reports against CARE (CAse REport) guidelines.
This script checks a clinical case report for compliance with CARE guidelines
and provides a checklist of required elements.
Usage:
python validate_case_report.py <input_file.md|.txt>
python validate_case_report.py <input_file> --output report.json
"""
import argparse
import json
import re
from pathlib import Path
from typing import Dict, List, Tuple
class CareValidator:
"""Validator for CARE guideline compliance."""
# CARE checklist items with regex patterns
CARE_REQUIREMENTS = {
"title": {
"name": "Title contains 'case report'",
"pattern": r"(?i)(case\s+report|case\s+study)",
"section": "Title",
"required": True
},
"keywords": {
"name": "Keywords provided (2-5)",
"pattern": r"(?i)keywords?[:]\s*(.+)",
"section": "Keywords",
"required": True
},
"abstract": {
"name": "Abstract present",
"pattern": r"(?i)##?\s*abstract",
"section": "Abstract",
"required": True
},
"introduction": {
"name": "Introduction explaining novelty",
"pattern": r"(?i)##?\s*introduction",
"section": "Introduction",
"required": True
},
"patient_info": {
"name": "Patient demographics present",
"pattern": r"(?i)(patient\s+information|demographics?)",
"section": "Patient Information",
"required": True
},
"clinical_findings": {
"name": "Clinical findings documented",
"pattern": r"(?i)(clinical\s+findings?|physical\s+exam)",
"section": "Clinical Findings",
"required": True
},
"timeline": {
"name": "Timeline of events",
"pattern": r"(?i)(timeline|chronology)",
"section": "Timeline",
"required": True
},
"diagnostic": {
"name": "Diagnostic assessment",
"pattern": r"(?i)diagnostic\s+(assessment|evaluation|workup)",
"section": "Diagnostic Assessment",
"required": True
},
"therapeutic": {
"name": "Therapeutic interventions",
"pattern": r"(?i)(therapeutic\s+intervention|treatment)",
"section": "Therapeutic Interventions",
"required": True
},
"followup": {
"name": "Follow-up and outcomes",
"pattern": r"(?i)(follow[\-\s]?up|outcomes?)",
"section": "Follow-up and Outcomes",
"required": True
},
"discussion": {
"name": "Discussion with literature review",
"pattern": r"(?i)##?\s*discussion",
"section": "Discussion",
"required": True
},
"consent": {
"name": "Informed consent statement",
"pattern": r"(?i)(informed\s+consent|written\s+consent|consent.*obtained)",
"section": "Informed Consent",
"required": True
},
}
# HIPAA identifiers to check for
HIPAA_PATTERNS = {
"dates": r"\b(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])/\d{4}\b",
"phone": r"\b\d{3}[-.]?\d{3}[-.]?\d{4}\b",
"email": r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b",
"ssn": r"\b\d{3}-\d{2}-\d{4}\b",
"mrn": r"(?i)(mrn|medical\s+record)[:]\s*\d+",
"zip_full": r"\b\d{5}-\d{4}\b",
}
def __init__(self, filename: str):
"""Initialize validator with input file."""
self.filename = Path(filename)
self.content = self._read_file()
self.results = {}
def _read_file(self) -> str:
"""Read input file content."""
try:
with open(self.filename, 'r', encoding='utf-8') as f:
return f.read()
except FileNotFoundError:
raise FileNotFoundError(f"File not found: {self.filename}")
except Exception as e:
raise Exception(f"Error reading file: {e}")
def validate_care_compliance(self) -> Dict[str, Dict]:
"""Validate compliance with CARE guidelines."""
results = {}
for key, item in self.CARE_REQUIREMENTS.items():
pattern = item["pattern"]
found = bool(re.search(pattern, self.content))
results[key] = {
"name": item["name"],
"section": item["section"],
"required": item["required"],
"found": found,
"status": "PASS" if found else "FAIL" if item["required"] else "WARNING"
}
self.results["care_compliance"] = results
return results
def check_deidentification(self) -> Dict[str, List[str]]:
"""Check for potential HIPAA identifier violations."""
violations = {}
for identifier, pattern in self.HIPAA_PATTERNS.items():
matches = re.findall(pattern, self.content)
if matches:
violations[identifier] = matches[:5] # Limit to first 5 examples
self.results["hipaa_violations"] = violations
return violations
def check_word_count(self) -> Dict[str, int]:
"""Check word count and provide limits guidance."""
words = len(re.findall(r'\b\w+\b', self.content))
word_count = {
"total_words": words,
"typical_min": 1500,
"typical_max": 3000,
"status": "ACCEPTABLE" if 1500 <= words <= 3500 else "CHECK"
}
self.results["word_count"] = word_count
return word_count
def check_references(self) -> Dict[str, any]:
"""Check for presence of references."""
ref_patterns = [
r"##?\s*references",
r"\[\d+\]",
r"\d+\.\s+[A-Z][a-z]+.*\d{4}", # Numbered references
]
has_refs = any(re.search(p, self.content, re.IGNORECASE) for p in ref_patterns)
ref_count = len(re.findall(r"\[\d+\]", self.content))
references = {
"has_references": has_refs,
"estimated_count": ref_count,
"recommended_min": 10,
"status": "ACCEPTABLE" if ref_count >= 10 else "LOW"
}
self.results["references"] = references
return references
def generate_report(self) -> Dict:
"""Generate comprehensive validation report."""
if not self.results:
self.validate_care_compliance()
self.check_deidentification()
self.check_word_count()
self.check_references()
# Calculate overall compliance
care = self.results["care_compliance"]
total_required = sum(1 for v in care.values() if v["required"])
passed = sum(1 for v in care.values() if v["required"] and v["found"])
compliance_rate = (passed / total_required * 100) if total_required > 0 else 0
report = {
"filename": str(self.filename),
"compliance_rate": round(compliance_rate, 1),
"care_compliance": care,
"hipaa_violations": self.results["hipaa_violations"],
"word_count": self.results["word_count"],
"references": self.results["references"],
"overall_status": "PASS" if compliance_rate >= 90 and not self.results["hipaa_violations"] else "NEEDS_REVISION"
}
return report
def print_report(self):
"""Print human-readable validation report."""
report = self.generate_report()
print("=" * 70)
print(f"CARE Guideline Validation Report")
print(f"File: {report['filename']}")
print("=" * 70)
print()
print(f"Overall Compliance: {report['compliance_rate']}%")
print(f"Status: {report['overall_status']}")
print()
print("CARE Checklist:")
print("-" * 70)
for key, item in report["care_compliance"].items():
status_symbol = "β" if item["found"] else "β"
print(f"{status_symbol} [{item['status']:8}] {item['name']}")
print()
if report["hipaa_violations"]:
print("HIPAA DE-IDENTIFICATION WARNINGS:")
print("-" * 70)
for identifier, examples in report["hipaa_violations"].items():
print(f"β {identifier.upper()}: {len(examples)} instance(s) found")
for ex in examples[:3]:
print(f" Example: {ex}")
print()
else:
print("β No obvious HIPAA identifiers detected")
print()
wc = report["word_count"]
print(f"Word Count: {wc['total_words']} words")
print(f" Typical range: {wc['typical_min']}-{wc['typical_max']} words")
print(f" Status: {wc['status']}")
print()
refs = report["references"]
print(f"References: {refs['estimated_count']} citation(s) detected")
print(f" Recommended minimum: {refs['recommended_min']}")
print(f" Status: {refs['status']}")
print()
print("=" * 70)
# Recommendations
issues = []
if report['compliance_rate'] < 100:
missing = [v["name"] for v in report["care_compliance"].values() if v["required"] and not v["found"]]
issues.append(f"Missing required sections: {', '.join(missing)}")
if report["hipaa_violations"]:
issues.append("HIPAA identifiers detected - review de-identification")
if refs["status"] == "LOW":
issues.append("Low reference count - consider adding more citations")
if issues:
print("RECOMMENDATIONS:")
for i, issue in enumerate(issues, 1):
print(f"{i}. {issue}")
else:
print("β Case report meets CARE guidelines!")
print("=" * 70)
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Validate clinical case reports against CARE guidelines"
)
parser.add_argument(
"input_file",
help="Path to case report file (Markdown or text)"
)
parser.add_argument(
"--output",
"-o",
help="Output JSON report to file"
)
parser.add_argument(
"--json",
action="store_true",
help="Output JSON to stdout instead of human-readable report"
)
args = parser.parse_args()
try:
validator = CareValidator(args.input_file)
report = validator.generate_report()
if args.json:
print(json.dumps(report, indent=2))
else:
validator.print_report()
if args.output:
with open(args.output, 'w') as f:
json.dumps(report, f, indent=2)
print(f"\nJSON report saved to: {args.output}")
# Exit with non-zero if validation failed
exit_code = 0 if report["overall_status"] == "PASS" else 1
return exit_code
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
return 1
if __name__ == "__main__":
import sys
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/clinical-reports/scripts/validate_case_report.py",
"license": "MIT License",
"lines": 283,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/clinical-reports/scripts/validate_trial_report.py | #!/usr/bin/env python3
"""
Validate clinical trial reports against ICH-E3 structure.
Checks Clinical Study Reports (CSR) for ICH-E3 compliance.
Usage:
python validate_trial_report.py <csr_file.md>
"""
import argparse
import json
import re
from pathlib import Path
ICH_E3_SECTIONS = {
"title_page": "Title Page",
"synopsis": "Synopsis (2)",
"toc": "Table of Contents (3)",
"abbreviations": "List of Abbreviations (4)",
"ethics": "Ethics (Section 2)",
"investigators": "Investigators and Study Administrative Structure (Section 3)",
"introduction": "Introduction (Section 4)",
"objectives": "Study Objectives and Plan (Section 5)",
"study_patients": "Study Patients (Section 6)",
"efficacy": "Efficacy Evaluation (Section 7)",
"safety": "Safety Evaluation (Section 8)",
"discussion": "Discussion and Overall Conclusions (Section 9)",
"tables_figures": "Tables, Figures, and Graphs (Section 10)",
"references": "References (Section 11)",
"appendices": "Appendices (Section 12-14)",
}
def validate_ich_e3(filename: str) -> dict:
"""Validate CSR structure against ICH-E3."""
with open(filename, 'r', encoding='utf-8') as f:
content = f.read()
results = {}
for section_id, section_name in ICH_E3_SECTIONS.items():
# Simple pattern matching for section headers
pattern = rf"(?i)##?\s*{re.escape(section_name.split('(')[0].strip())}"
found = bool(re.search(pattern, content))
results[section_id] = {"name": section_name, "found": found}
compliance_rate = sum(1 for r in results.values() if r["found"]) / len(results) * 100
return {
"filename": filename,
"compliance_rate": round(compliance_rate, 1),
"sections": results,
"status": "PASS" if compliance_rate >= 90 else "NEEDS_REVISION"
}
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="Validate CSR against ICH-E3")
parser.add_argument("input_file", help="Path to CSR file")
parser.add_argument("--json", action="store_true", help="Output JSON")
args = parser.parse_args()
try:
report = validate_ich_e3(args.input_file)
if args.json:
print(json.dumps(report, indent=2))
else:
print(f"\nICH-E3 Compliance: {report['compliance_rate']}%")
print(f"Status: {report['status']}\n")
print("Section Checklist:")
for section, details in report["sections"].items():
symbol = "β" if details["found"] else "β"
print(f"{symbol} {details['name']}")
return 0 if report["status"] == "PASS" else 1
except Exception as e:
print(f"Error: {e}")
return 1
if __name__ == "__main__":
import sys
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/clinical-reports/scripts/validate_trial_report.py",
"license": "MIT License",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/clinicaltrials-database/scripts/query_clinicaltrials.py | #!/usr/bin/env python3
"""
ClinicalTrials.gov API Query Helper
A comprehensive Python script for querying the ClinicalTrials.gov API v2.
Provides convenient functions for common query patterns including searching
by condition, intervention, location, sponsor, and retrieving specific trials.
API Documentation: https://clinicaltrials.gov/data-api/api
Rate Limit: ~50 requests per minute per IP address
"""
import requests
import json
from typing import Dict, List, Optional, Union
from urllib.parse import urlencode
BASE_URL = "https://clinicaltrials.gov/api/v2"
def search_studies(
condition: Optional[str] = None,
intervention: Optional[str] = None,
location: Optional[str] = None,
sponsor: Optional[str] = None,
status: Optional[Union[str, List[str]]] = None,
nct_ids: Optional[List[str]] = None,
sort: str = "LastUpdatePostDate:desc",
page_size: int = 10,
page_token: Optional[str] = None,
format: str = "json"
) -> Dict:
"""
Search for clinical trials using various filters.
Args:
condition: Disease or condition (e.g., "lung cancer", "diabetes")
intervention: Treatment or intervention (e.g., "Pembrolizumab", "exercise")
location: Geographic location (e.g., "New York", "California")
sponsor: Sponsor or collaborator name (e.g., "National Cancer Institute")
status: Study status(es). Can be string or list. Valid values:
RECRUITING, NOT_YET_RECRUITING, ENROLLING_BY_INVITATION,
ACTIVE_NOT_RECRUITING, SUSPENDED, TERMINATED, COMPLETED, WITHDRAWN
nct_ids: List of NCT IDs to filter by
sort: Sort order (e.g., "LastUpdatePostDate:desc", "EnrollmentCount:desc")
page_size: Number of results per page (default: 10, max: 1000)
page_token: Token for pagination (returned from previous query)
format: Response format ("json" or "csv")
Returns:
Dictionary containing search results with studies and metadata
"""
params = {}
# Build query parameters
if condition:
params['query.cond'] = condition
if intervention:
params['query.intr'] = intervention
if location:
params['query.locn'] = location
if sponsor:
params['query.spons'] = sponsor
# Handle status filter (can be list or string)
if status:
if isinstance(status, list):
params['filter.overallStatus'] = ','.join(status)
else:
params['filter.overallStatus'] = status
# Handle NCT IDs filter
if nct_ids:
params['filter.ids'] = ','.join(nct_ids)
# Add pagination and sorting
params['sort'] = sort
params['pageSize'] = page_size
if page_token:
params['pageToken'] = page_token
# Set format
params['format'] = format
url = f"{BASE_URL}/studies"
response = requests.get(url, params=params)
response.raise_for_status()
if format == "json":
return response.json()
else:
return response.text
def get_study_details(nct_id: str, format: str = "json") -> Dict:
"""
Retrieve detailed information about a specific clinical trial.
Args:
nct_id: The NCT ID of the trial (e.g., "NCT04852770")
format: Response format ("json" or "csv")
Returns:
Dictionary containing comprehensive study information
"""
params = {'format': format}
url = f"{BASE_URL}/studies/{nct_id}"
response = requests.get(url, params=params)
response.raise_for_status()
if format == "json":
return response.json()
else:
return response.text
def search_with_all_results(
condition: Optional[str] = None,
intervention: Optional[str] = None,
location: Optional[str] = None,
sponsor: Optional[str] = None,
status: Optional[Union[str, List[str]]] = None,
max_results: Optional[int] = None
) -> List[Dict]:
"""
Search for clinical trials and automatically paginate through all results.
Args:
condition: Disease or condition to search for
intervention: Treatment or intervention to search for
location: Geographic location to search in
sponsor: Sponsor or collaborator name
status: Study status(es) to filter by
max_results: Maximum number of results to retrieve (None for all)
Returns:
List of all matching studies
"""
all_studies = []
page_token = None
while True:
result = search_studies(
condition=condition,
intervention=intervention,
location=location,
sponsor=sponsor,
status=status,
page_size=1000, # Use max page size for efficiency
page_token=page_token
)
studies = result.get('studies', [])
all_studies.extend(studies)
# Check if we've reached the max or there are no more results
if max_results and len(all_studies) >= max_results:
return all_studies[:max_results]
# Check for next page
page_token = result.get('nextPageToken')
if not page_token:
break
return all_studies
def extract_study_summary(study: Dict) -> Dict:
"""
Extract key information from a study for quick overview.
Args:
study: A study dictionary from the API response
Returns:
Dictionary with essential study information
"""
protocol = study.get('protocolSection', {})
identification = protocol.get('identificationModule', {})
status_module = protocol.get('statusModule', {})
description = protocol.get('descriptionModule', {})
return {
'nct_id': identification.get('nctId'),
'title': identification.get('officialTitle') or identification.get('briefTitle'),
'status': status_module.get('overallStatus'),
'phase': protocol.get('designModule', {}).get('phases', []),
'enrollment': protocol.get('designModule', {}).get('enrollmentInfo', {}).get('count'),
'brief_summary': description.get('briefSummary'),
'last_update': status_module.get('lastUpdatePostDateStruct', {}).get('date')
}
# Example usage
if __name__ == "__main__":
# Example 1: Search for recruiting lung cancer trials
print("Example 1: Searching for recruiting lung cancer trials...")
results = search_studies(
condition="lung cancer",
status="RECRUITING",
page_size=5
)
print(f"Found {results.get('totalCount', 0)} total trials")
print(f"Showing first {len(results.get('studies', []))} trials\n")
# Example 2: Get details for a specific trial
if results.get('studies'):
first_study = results['studies'][0]
nct_id = first_study['protocolSection']['identificationModule']['nctId']
print(f"Example 2: Getting details for {nct_id}...")
details = get_study_details(nct_id)
summary = extract_study_summary(details)
print(json.dumps(summary, indent=2))
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/clinicaltrials-database/scripts/query_clinicaltrials.py",
"license": "MIT License",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/clinpgx-database/scripts/query_clinpgx.py | #!/usr/bin/env python3
"""
ClinPGx API Query Helper Script
Provides ready-to-use functions for querying the ClinPGx database API.
Includes rate limiting, error handling, and caching functionality.
ClinPGx API: https://api.clinpgx.org/
Rate limit: 2 requests per second
License: Creative Commons Attribution-ShareAlike 4.0 International
"""
import requests
import time
import json
from pathlib import Path
from typing import Dict, List, Optional, Any
# API Configuration
BASE_URL = "https://api.clinpgx.org/v1/"
RATE_LIMIT_DELAY = 0.5 # 500ms delay = 2 requests/second
def rate_limited_request(url: str, params: Optional[Dict] = None, delay: float = RATE_LIMIT_DELAY) -> requests.Response:
"""
Make API request with rate limiting compliance.
Args:
url: API endpoint URL
params: Query parameters
delay: Delay in seconds between requests (default 0.5s for 2 req/sec)
Returns:
Response object
"""
response = requests.get(url, params=params)
time.sleep(delay)
return response
def safe_api_call(url: str, params: Optional[Dict] = None, max_retries: int = 3) -> Optional[Dict]:
"""
Make API call with error handling and exponential backoff retry.
Args:
url: API endpoint URL
params: Query parameters
max_retries: Maximum number of retry attempts
Returns:
JSON response data or None on failure
"""
for attempt in range(max_retries):
try:
response = requests.get(url, params=params, timeout=10)
if response.status_code == 200:
time.sleep(RATE_LIMIT_DELAY)
return response.json()
elif response.status_code == 429:
# Rate limit exceeded
wait_time = 2 ** attempt # Exponential backoff: 1s, 2s, 4s
print(f"Rate limit exceeded. Waiting {wait_time}s before retry...")
time.sleep(wait_time)
elif response.status_code == 404:
print(f"Resource not found: {url}")
return None
else:
response.raise_for_status()
except requests.exceptions.RequestException as e:
print(f"Attempt {attempt + 1}/{max_retries} failed: {e}")
if attempt == max_retries - 1:
print(f"Failed after {max_retries} attempts")
return None
time.sleep(1)
return None
def cached_query(cache_file: str, query_func, *args, **kwargs) -> Any:
"""
Cache API results to avoid repeated queries.
Args:
cache_file: Path to cache file
query_func: Function to call if cache miss
*args, **kwargs: Arguments to pass to query_func
Returns:
Cached or freshly queried data
"""
cache_path = Path(cache_file)
if cache_path.exists():
print(f"Loading from cache: {cache_file}")
with open(cache_path) as f:
return json.load(f)
print(f"Cache miss. Querying API...")
result = query_func(*args, **kwargs)
if result is not None:
cache_path.parent.mkdir(parents=True, exist_ok=True)
with open(cache_path, 'w') as f:
json.dump(result, f, indent=2)
print(f"Cached to: {cache_file}")
return result
# Core Query Functions
def get_gene_info(gene_symbol: str) -> Optional[Dict]:
"""
Retrieve detailed information about a pharmacogene.
Args:
gene_symbol: Gene symbol (e.g., "CYP2D6", "TPMT")
Returns:
Gene information dictionary
Example:
>>> gene_data = get_gene_info("CYP2D6")
>>> print(gene_data['symbol'], gene_data['name'])
"""
url = f"{BASE_URL}gene/{gene_symbol}"
return safe_api_call(url)
def get_drug_info(drug_name: str) -> Optional[List[Dict]]:
"""
Search for drug/chemical information by name.
Args:
drug_name: Drug name (e.g., "warfarin", "codeine")
Returns:
List of matching drugs
Example:
>>> drugs = get_drug_info("warfarin")
>>> for drug in drugs:
>>> print(drug['name'], drug['id'])
"""
url = f"{BASE_URL}chemical"
params = {"name": drug_name}
return safe_api_call(url, params)
def get_gene_drug_pairs(gene: Optional[str] = None, drug: Optional[str] = None) -> Optional[List[Dict]]:
"""
Query gene-drug interaction pairs.
Args:
gene: Gene symbol (optional)
drug: Drug name (optional)
Returns:
List of gene-drug pairs with clinical annotations
Example:
>>> # Get all pairs for CYP2D6
>>> pairs = get_gene_drug_pairs(gene="CYP2D6")
>>>
>>> # Get specific gene-drug pair
>>> pair = get_gene_drug_pairs(gene="CYP2D6", drug="codeine")
"""
url = f"{BASE_URL}geneDrugPair"
params = {}
if gene:
params["gene"] = gene
if drug:
params["drug"] = drug
return safe_api_call(url, params)
def get_cpic_guidelines(gene: Optional[str] = None, drug: Optional[str] = None) -> Optional[List[Dict]]:
"""
Retrieve CPIC clinical practice guidelines.
Args:
gene: Gene symbol (optional)
drug: Drug name (optional)
Returns:
List of CPIC guidelines
Example:
>>> # Get all CPIC guidelines
>>> guidelines = get_cpic_guidelines()
>>>
>>> # Get guideline for specific gene-drug
>>> guideline = get_cpic_guidelines(gene="CYP2C19", drug="clopidogrel")
"""
url = f"{BASE_URL}guideline"
params = {"source": "CPIC"}
if gene:
params["gene"] = gene
if drug:
params["drug"] = drug
return safe_api_call(url, params)
def get_alleles(gene: str) -> Optional[List[Dict]]:
"""
Get all alleles for a pharmacogene including function and frequency.
Args:
gene: Gene symbol (e.g., "CYP2D6")
Returns:
List of alleles with functional annotations and population frequencies
Example:
>>> alleles = get_alleles("CYP2D6")
>>> for allele in alleles:
>>> print(f"{allele['name']}: {allele['function']}")
"""
url = f"{BASE_URL}allele"
params = {"gene": gene}
return safe_api_call(url, params)
def get_allele_info(allele_name: str) -> Optional[Dict]:
"""
Get detailed information about a specific allele.
Args:
allele_name: Allele name (e.g., "CYP2D6*4")
Returns:
Allele information dictionary
Example:
>>> allele = get_allele_info("CYP2D6*4")
>>> print(allele['function'], allele['frequencies'])
"""
url = f"{BASE_URL}allele/{allele_name}"
return safe_api_call(url)
def get_clinical_annotations(
gene: Optional[str] = None,
drug: Optional[str] = None,
evidence_level: Optional[str] = None
) -> Optional[List[Dict]]:
"""
Retrieve curated literature annotations for gene-drug interactions.
Args:
gene: Gene symbol (optional)
drug: Drug name (optional)
evidence_level: Filter by evidence level (1A, 1B, 2A, 2B, 3, 4)
Returns:
List of clinical annotations
Example:
>>> # Get all annotations for CYP2D6
>>> annotations = get_clinical_annotations(gene="CYP2D6")
>>>
>>> # Get high-quality evidence only
>>> high_quality = get_clinical_annotations(evidence_level="1A")
"""
url = f"{BASE_URL}clinicalAnnotation"
params = {}
if gene:
params["gene"] = gene
if drug:
params["drug"] = drug
if evidence_level:
params["evidenceLevel"] = evidence_level
return safe_api_call(url, params)
def get_drug_labels(drug: str, source: Optional[str] = None) -> Optional[List[Dict]]:
"""
Retrieve pharmacogenomic drug label information.
Args:
drug: Drug name
source: Regulatory source (e.g., "FDA", "EMA")
Returns:
List of drug labels with PGx information
Example:
>>> # Get all labels for warfarin
>>> labels = get_drug_labels("warfarin")
>>>
>>> # Get only FDA labels
>>> fda_labels = get_drug_labels("warfarin", source="FDA")
"""
url = f"{BASE_URL}drugLabel"
params = {"drug": drug}
if source:
params["source"] = source
return safe_api_call(url, params)
def search_variants(rsid: Optional[str] = None, chromosome: Optional[str] = None,
position: Optional[str] = None) -> Optional[List[Dict]]:
"""
Search for genetic variants by rsID or genomic position.
Args:
rsid: dbSNP rsID (e.g., "rs4244285")
chromosome: Chromosome number
position: Genomic position
Returns:
List of matching variants
Example:
>>> # Search by rsID
>>> variant = search_variants(rsid="rs4244285")
>>>
>>> # Search by position
>>> variants = search_variants(chromosome="10", position="94781859")
"""
url = f"{BASE_URL}variant"
if rsid:
url = f"{BASE_URL}variant/{rsid}"
return safe_api_call(url)
params = {}
if chromosome:
params["chromosome"] = chromosome
if position:
params["position"] = position
return safe_api_call(url, params)
def get_pathway_info(pathway_id: Optional[str] = None, drug: Optional[str] = None) -> Optional[Any]:
"""
Retrieve pharmacokinetic/pharmacodynamic pathway information.
Args:
pathway_id: ClinPGx pathway ID (optional)
drug: Drug name (optional)
Returns:
Pathway information or list of pathways
Example:
>>> # Get specific pathway
>>> pathway = get_pathway_info(pathway_id="PA146123006")
>>>
>>> # Get all pathways for a drug
>>> pathways = get_pathway_info(drug="warfarin")
"""
if pathway_id:
url = f"{BASE_URL}pathway/{pathway_id}"
return safe_api_call(url)
url = f"{BASE_URL}pathway"
params = {}
if drug:
params["drug"] = drug
return safe_api_call(url, params)
# Utility Functions
def export_to_dataframe(data: List[Dict], output_file: Optional[str] = None):
"""
Convert API results to pandas DataFrame for analysis.
Args:
data: List of dictionaries from API
output_file: Optional CSV output file path
Returns:
pandas DataFrame
Example:
>>> pairs = get_gene_drug_pairs(gene="CYP2D6")
>>> df = export_to_dataframe(pairs, "cyp2d6_pairs.csv")
>>> print(df.head())
"""
try:
import pandas as pd
except ImportError:
print("pandas not installed. Install with: pip install pandas")
return None
df = pd.DataFrame(data)
if output_file:
df.to_csv(output_file, index=False)
print(f"Data exported to: {output_file}")
return df
def batch_gene_query(gene_list: List[str], delay: float = 0.5) -> Dict[str, Dict]:
"""
Query multiple genes in batch with rate limiting.
Args:
gene_list: List of gene symbols
delay: Delay between requests (default 0.5s)
Returns:
Dictionary mapping gene symbols to gene data
Example:
>>> genes = ["CYP2D6", "CYP2C19", "CYP2C9", "TPMT"]
>>> results = batch_gene_query(genes)
>>> for gene, data in results.items():
>>> print(f"{gene}: {data['name']}")
"""
results = {}
print(f"Querying {len(gene_list)} genes with {delay}s delay between requests...")
for gene in gene_list:
print(f"Fetching: {gene}")
data = get_gene_info(gene)
if data:
results[gene] = data
time.sleep(delay)
print(f"Completed: {len(results)}/{len(gene_list)} successful")
return results
def find_actionable_gene_drug_pairs(cpic_level: str = "A") -> Optional[List[Dict]]:
"""
Find all clinically actionable gene-drug pairs with CPIC guidelines.
Args:
cpic_level: CPIC recommendation level (A, B, C, D)
Returns:
List of actionable gene-drug pairs
Example:
>>> # Get all Level A recommendations
>>> actionable = find_actionable_gene_drug_pairs(cpic_level="A")
>>> for pair in actionable:
>>> print(f"{pair['gene']} - {pair['drug']}")
"""
url = f"{BASE_URL}geneDrugPair"
params = {"cpicLevel": cpic_level}
return safe_api_call(url, params)
# Example Usage
if __name__ == "__main__":
print("ClinPGx API Query Examples\n")
# Example 1: Get gene information
print("=" * 60)
print("Example 1: Get CYP2D6 gene information")
print("=" * 60)
cyp2d6 = get_gene_info("CYP2D6")
if cyp2d6:
print(f"Gene: {cyp2d6.get('symbol')}")
print(f"Name: {cyp2d6.get('name')}")
print()
# Example 2: Search for a drug
print("=" * 60)
print("Example 2: Search for warfarin")
print("=" * 60)
warfarin = get_drug_info("warfarin")
if warfarin:
for drug in warfarin[:1]: # Show first result
print(f"Drug: {drug.get('name')}")
print(f"ID: {drug.get('id')}")
print()
# Example 3: Get gene-drug pairs
print("=" * 60)
print("Example 3: Get CYP2C19-clopidogrel pair")
print("=" * 60)
pair = get_gene_drug_pairs(gene="CYP2C19", drug="clopidogrel")
if pair:
print(f"Found {len(pair)} gene-drug pair(s)")
if len(pair) > 0:
print(f"Annotations: {pair[0].get('sources', [])}")
print()
# Example 4: Get CPIC guidelines
print("=" * 60)
print("Example 4: Get CPIC guidelines for CYP2C19")
print("=" * 60)
guidelines = get_cpic_guidelines(gene="CYP2C19")
if guidelines:
print(f"Found {len(guidelines)} guideline(s)")
for g in guidelines[:2]: # Show first 2
print(f" - {g.get('name')}")
print()
# Example 5: Get alleles for a gene
print("=" * 60)
print("Example 5: Get CYP2D6 alleles")
print("=" * 60)
alleles = get_alleles("CYP2D6")
if alleles:
print(f"Found {len(alleles)} allele(s)")
for allele in alleles[:3]: # Show first 3
print(f" - {allele.get('name')}: {allele.get('function')}")
print()
print("=" * 60)
print("Examples completed!")
print("=" * 60)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/clinpgx-database/scripts/query_clinpgx.py",
"license": "MIT License",
"lines": 405,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/scientific/cosmic-database/scripts/download_cosmic.py | #!/usr/bin/env python3
"""
COSMIC Data Download Utility
This script provides functions to download data from the COSMIC database
(Catalogue of Somatic Mutations in Cancer).
Usage:
from download_cosmic import download_cosmic_file, list_available_files
# Download a specific file
download_cosmic_file(
email="user@example.com",
password="password",
filepath="GRCh38/cosmic/latest/CosmicMutantExport.tsv.gz",
output_filename="mutations.tsv.gz"
)
Requirements:
- requests library: pip install requests
- Valid COSMIC account credentials (register at cancer.sanger.ac.uk/cosmic)
"""
import requests
import sys
import os
from typing import Optional
def download_cosmic_file(
email: str,
password: str,
filepath: str,
output_filename: Optional[str] = None,
genome_assembly: str = "GRCh38"
) -> bool:
"""
Download a file from COSMIC database.
Args:
email: COSMIC account email
password: COSMIC account password
filepath: Relative path to file (e.g., "GRCh38/cosmic/latest/CosmicMutantExport.tsv.gz")
output_filename: Optional custom output filename (default: last part of filepath)
genome_assembly: Genome assembly version (GRCh37 or GRCh38, default: GRCh38)
Returns:
True if download successful, False otherwise
Example:
download_cosmic_file(
"user@email.com",
"pass123",
"GRCh38/cosmic/latest/CosmicMutantExport.tsv.gz"
)
"""
base_url = "https://cancer.sanger.ac.uk/cosmic/file_download/"
# Determine output filename
if output_filename is None:
output_filename = os.path.basename(filepath)
try:
# Step 1: Get the download URL
print(f"Requesting download URL for: {filepath}")
r = requests.get(
base_url + filepath,
auth=(email, password),
timeout=30
)
if r.status_code == 401:
print("ERROR: Authentication failed. Check email and password.")
return False
elif r.status_code == 404:
print(f"ERROR: File not found: {filepath}")
return False
elif r.status_code != 200:
print(f"ERROR: Request failed with status code {r.status_code}")
print(f"Response: {r.text}")
return False
# Parse response to get download URL
response_data = r.json()
download_url = response_data.get("url")
if not download_url:
print("ERROR: No download URL in response")
return False
# Step 2: Download the file
print(f"Downloading file from: {download_url}")
file_response = requests.get(download_url, stream=True, timeout=300)
if file_response.status_code != 200:
print(f"ERROR: Download failed with status code {file_response.status_code}")
return False
# Step 3: Write to disk
print(f"Saving to: {output_filename}")
total_size = int(file_response.headers.get('content-length', 0))
with open(output_filename, 'wb') as f:
if total_size == 0:
f.write(file_response.content)
else:
downloaded = 0
for chunk in file_response.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
downloaded += len(chunk)
# Show progress
progress = (downloaded / total_size) * 100
print(f"\rProgress: {progress:.1f}%", end='', flush=True)
print() # New line after progress
print(f"β Successfully downloaded: {output_filename}")
return True
except requests.exceptions.Timeout:
print("ERROR: Request timed out")
return False
except requests.exceptions.RequestException as e:
print(f"ERROR: Request failed: {e}")
return False
except Exception as e:
print(f"ERROR: Unexpected error: {e}")
return False
def get_common_file_path(
data_type: str,
genome_assembly: str = "GRCh38",
version: str = "latest"
) -> Optional[str]:
"""
Get the filepath for common COSMIC data files.
Args:
data_type: Type of data (e.g., 'mutations', 'gene_census', 'signatures')
genome_assembly: GRCh37 or GRCh38
version: COSMIC version (use 'latest' for most recent)
Returns:
Filepath string or None if type unknown
"""
common_files = {
'mutations': f'{genome_assembly}/cosmic/{version}/CosmicMutantExport.tsv.gz',
'mutations_vcf': f'{genome_assembly}/cosmic/{version}/VCF/CosmicCodingMuts.vcf.gz',
'gene_census': f'{genome_assembly}/cosmic/{version}/cancer_gene_census.csv',
'resistance_mutations': f'{genome_assembly}/cosmic/{version}/CosmicResistanceMutations.tsv.gz',
'structural_variants': f'{genome_assembly}/cosmic/{version}/CosmicStructExport.tsv.gz',
'gene_expression': f'{genome_assembly}/cosmic/{version}/CosmicCompleteGeneExpression.tsv.gz',
'copy_number': f'{genome_assembly}/cosmic/{version}/CosmicCompleteCNA.tsv.gz',
'fusion_genes': f'{genome_assembly}/cosmic/{version}/CosmicFusionExport.tsv.gz',
'signatures': f'signatures/signatures.tsv',
'sample_info': f'{genome_assembly}/cosmic/{version}/CosmicSample.tsv.gz',
}
return common_files.get(data_type)
def main():
"""Command-line interface for downloading COSMIC files."""
import argparse
parser = argparse.ArgumentParser(
description='Download files from COSMIC database',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Download mutations file
%(prog)s user@email.com --filepath GRCh38/cosmic/latest/CosmicMutantExport.tsv.gz
# Download using shorthand
%(prog)s user@email.com --data-type mutations
# Download for GRCh37
%(prog)s user@email.com --data-type gene_census --assembly GRCh37
"""
)
parser.add_argument('email', help='COSMIC account email')
parser.add_argument('--password', help='COSMIC account password (will prompt if not provided)')
parser.add_argument('--filepath', help='Full filepath to download')
parser.add_argument('--data-type',
choices=['mutations', 'mutations_vcf', 'gene_census', 'resistance_mutations',
'structural_variants', 'gene_expression', 'copy_number',
'fusion_genes', 'signatures', 'sample_info'],
help='Common data type shorthand')
parser.add_argument('--assembly', default='GRCh38',
choices=['GRCh37', 'GRCh38'],
help='Genome assembly (default: GRCh38)')
parser.add_argument('--version', default='latest',
help='COSMIC version (default: latest)')
parser.add_argument('-o', '--output', help='Output filename')
args = parser.parse_args()
# Get password if not provided
if not args.password:
import getpass
args.password = getpass.getpass('COSMIC password: ')
# Determine filepath
if args.filepath:
filepath = args.filepath
elif args.data_type:
filepath = get_common_file_path(args.data_type, args.assembly, args.version)
if not filepath:
print(f"ERROR: Unknown data type: {args.data_type}")
return 1
else:
print("ERROR: Must provide either --filepath or --data-type")
parser.print_help()
return 1
# Download the file
success = download_cosmic_file(
email=args.email,
password=args.password,
filepath=filepath,
output_filename=args.output,
genome_assembly=args.assembly
)
return 0 if success else 1
if __name__ == '__main__':
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/cosmic-database/scripts/download_cosmic.py",
"license": "MIT License",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/deepchem/scripts/graph_neural_network.py | #!/usr/bin/env python3
"""
Graph Neural Network Training Script
This script demonstrates training Graph Convolutional Networks (GCNs) and other
graph-based models for molecular property prediction.
Usage:
python graph_neural_network.py --dataset tox21 --model gcn
python graph_neural_network.py --dataset bbbp --model attentivefp
python graph_neural_network.py --data custom.csv --task-type regression
"""
import argparse
import deepchem as dc
import sys
AVAILABLE_MODELS = {
'gcn': 'Graph Convolutional Network',
'gat': 'Graph Attention Network',
'attentivefp': 'Attentive Fingerprint',
'mpnn': 'Message Passing Neural Network',
'dmpnn': 'Directed Message Passing Neural Network'
}
MOLNET_DATASETS = {
'tox21': ('classification', 12),
'bbbp': ('classification', 1),
'bace': ('classification', 1),
'hiv': ('classification', 1),
'delaney': ('regression', 1),
'freesolv': ('regression', 1),
'lipo': ('regression', 1)
}
def create_model(model_type, n_tasks, mode='classification'):
"""
Create a graph neural network model.
Args:
model_type: Type of model ('gcn', 'gat', 'attentivefp', etc.)
n_tasks: Number of prediction tasks
mode: 'classification' or 'regression'
Returns:
DeepChem model
"""
if model_type == 'gcn':
return dc.models.GCNModel(
n_tasks=n_tasks,
mode=mode,
batch_size=128,
learning_rate=0.001,
dropout=0.0
)
elif model_type == 'gat':
return dc.models.GATModel(
n_tasks=n_tasks,
mode=mode,
batch_size=128,
learning_rate=0.001
)
elif model_type == 'attentivefp':
return dc.models.AttentiveFPModel(
n_tasks=n_tasks,
mode=mode,
batch_size=128,
learning_rate=0.001
)
elif model_type == 'mpnn':
return dc.models.MPNNModel(
n_tasks=n_tasks,
mode=mode,
batch_size=128,
learning_rate=0.001
)
elif model_type == 'dmpnn':
return dc.models.DMPNNModel(
n_tasks=n_tasks,
mode=mode,
batch_size=128,
learning_rate=0.001
)
else:
raise ValueError(f"Unknown model type: {model_type}")
def train_on_molnet(dataset_name, model_type, n_epochs=50):
"""
Train a graph neural network on a MoleculeNet benchmark dataset.
Args:
dataset_name: Name of MoleculeNet dataset
model_type: Type of model to train
n_epochs: Number of training epochs
Returns:
Trained model and test scores
"""
print("=" * 70)
print(f"Training {AVAILABLE_MODELS[model_type]} on {dataset_name.upper()}")
print("=" * 70)
# Get dataset info
task_type, n_tasks_default = MOLNET_DATASETS[dataset_name]
# Load dataset with graph featurization
print(f"\nLoading {dataset_name} dataset with GraphConv featurizer...")
load_func = getattr(dc.molnet, f'load_{dataset_name}')
tasks, datasets, transformers = load_func(
featurizer='GraphConv',
splitter='scaffold'
)
train, valid, test = datasets
n_tasks = len(tasks)
print(f"\nDataset Information:")
print(f" Task type: {task_type}")
print(f" Number of tasks: {n_tasks}")
print(f" Training samples: {len(train)}")
print(f" Validation samples: {len(valid)}")
print(f" Test samples: {len(test)}")
# Create model
print(f"\nCreating {AVAILABLE_MODELS[model_type]} model...")
model = create_model(model_type, n_tasks, mode=task_type)
# Train
print(f"\nTraining for {n_epochs} epochs...")
model.fit(train, nb_epoch=n_epochs)
print("Training complete!")
# Evaluate
print("\n" + "=" * 70)
print("Model Evaluation")
print("=" * 70)
if task_type == 'classification':
metrics = [
dc.metrics.Metric(dc.metrics.roc_auc_score, name='ROC-AUC'),
dc.metrics.Metric(dc.metrics.accuracy_score, name='Accuracy'),
dc.metrics.Metric(dc.metrics.f1_score, name='F1'),
]
else:
metrics = [
dc.metrics.Metric(dc.metrics.r2_score, name='RΒ²'),
dc.metrics.Metric(dc.metrics.mean_absolute_error, name='MAE'),
dc.metrics.Metric(dc.metrics.root_mean_squared_error, name='RMSE'),
]
results = {}
for dataset_name_eval, dataset in [('Train', train), ('Valid', valid), ('Test', test)]:
print(f"\n{dataset_name_eval} Set:")
scores = model.evaluate(dataset, metrics)
results[dataset_name_eval] = scores
for metric_name, score in scores.items():
print(f" {metric_name}: {score:.4f}")
return model, results
def train_on_custom_data(data_path, model_type, task_type, target_cols, smiles_col='smiles', n_epochs=50):
"""
Train a graph neural network on custom CSV data.
Args:
data_path: Path to CSV file
model_type: Type of model to train
task_type: 'classification' or 'regression'
target_cols: List of target column names
smiles_col: Name of SMILES column
n_epochs: Number of training epochs
Returns:
Trained model and test dataset
"""
print("=" * 70)
print(f"Training {AVAILABLE_MODELS[model_type]} on Custom Data")
print("=" * 70)
# Load and featurize data
print(f"\nLoading data from {data_path}...")
featurizer = dc.feat.MolGraphConvFeaturizer()
loader = dc.data.CSVLoader(
tasks=target_cols,
feature_field=smiles_col,
featurizer=featurizer
)
dataset = loader.create_dataset(data_path)
print(f"Loaded {len(dataset)} molecules")
# Split data
print("\nSplitting data with scaffold splitter...")
splitter = dc.splits.ScaffoldSplitter()
train, valid, test = splitter.train_valid_test_split(
dataset,
frac_train=0.8,
frac_valid=0.1,
frac_test=0.1
)
print(f" Training: {len(train)}")
print(f" Validation: {len(valid)}")
print(f" Test: {len(test)}")
# Create model
print(f"\nCreating {AVAILABLE_MODELS[model_type]} model...")
n_tasks = len(target_cols)
model = create_model(model_type, n_tasks, mode=task_type)
# Train
print(f"\nTraining for {n_epochs} epochs...")
model.fit(train, nb_epoch=n_epochs)
print("Training complete!")
# Evaluate
print("\n" + "=" * 70)
print("Model Evaluation")
print("=" * 70)
if task_type == 'classification':
metrics = [
dc.metrics.Metric(dc.metrics.roc_auc_score, name='ROC-AUC'),
dc.metrics.Metric(dc.metrics.accuracy_score, name='Accuracy'),
]
else:
metrics = [
dc.metrics.Metric(dc.metrics.r2_score, name='RΒ²'),
dc.metrics.Metric(dc.metrics.mean_absolute_error, name='MAE'),
]
for dataset_name, dataset in [('Train', train), ('Valid', valid), ('Test', test)]:
print(f"\n{dataset_name} Set:")
scores = model.evaluate(dataset, metrics)
for metric_name, score in scores.items():
print(f" {metric_name}: {score:.4f}")
return model, test
def main():
parser = argparse.ArgumentParser(
description='Train graph neural networks for molecular property prediction'
)
parser.add_argument(
'--model',
type=str,
choices=list(AVAILABLE_MODELS.keys()),
default='gcn',
help='Type of graph neural network model'
)
parser.add_argument(
'--dataset',
type=str,
choices=list(MOLNET_DATASETS.keys()),
default=None,
help='MoleculeNet dataset to use'
)
parser.add_argument(
'--data',
type=str,
default=None,
help='Path to custom CSV file'
)
parser.add_argument(
'--task-type',
type=str,
choices=['classification', 'regression'],
default='classification',
help='Type of prediction task (for custom data)'
)
parser.add_argument(
'--targets',
nargs='+',
default=['target'],
help='Names of target columns (for custom data)'
)
parser.add_argument(
'--smiles-col',
type=str,
default='smiles',
help='Name of SMILES column'
)
parser.add_argument(
'--epochs',
type=int,
default=50,
help='Number of training epochs'
)
args = parser.parse_args()
# Validate arguments
if args.dataset is None and args.data is None:
print("Error: Must specify either --dataset (MoleculeNet) or --data (custom CSV)",
file=sys.stderr)
return 1
if args.dataset and args.data:
print("Error: Cannot specify both --dataset and --data",
file=sys.stderr)
return 1
# Train model
try:
if args.dataset:
model, results = train_on_molnet(
args.dataset,
args.model,
n_epochs=args.epochs
)
else:
model, test_set = train_on_custom_data(
args.data,
args.model,
args.task_type,
args.targets,
smiles_col=args.smiles_col,
n_epochs=args.epochs
)
print("\n" + "=" * 70)
print("Training Complete!")
print("=" * 70)
return 0
except Exception as e:
print(f"\nError: {e}", file=sys.stderr)
import traceback
traceback.print_exc()
return 1
if __name__ == '__main__':
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/deepchem/scripts/graph_neural_network.py",
"license": "MIT License",
"lines": 291,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/deepchem/scripts/predict_solubility.py | #!/usr/bin/env python3
"""
Molecular Solubility Prediction Script
This script trains a model to predict aqueous solubility from SMILES strings
using the Delaney (ESOL) dataset as an example. Can be adapted for custom datasets.
Usage:
python predict_solubility.py --data custom_data.csv --smiles-col smiles --target-col solubility
python predict_solubility.py # Uses Delaney dataset by default
"""
import argparse
import deepchem as dc
import numpy as np
import sys
def train_solubility_model(data_path=None, smiles_col='smiles', target_col='measured log solubility in mols per litre'):
"""
Train a solubility prediction model.
Args:
data_path: Path to CSV file with SMILES and solubility data. If None, uses Delaney dataset.
smiles_col: Name of column containing SMILES strings
target_col: Name of column containing solubility values
Returns:
Trained model, test dataset, and transformers
"""
print("=" * 60)
print("DeepChem Solubility Prediction")
print("=" * 60)
# Load data
if data_path is None:
print("\nUsing Delaney (ESOL) benchmark dataset...")
tasks, datasets, transformers = dc.molnet.load_delaney(
featurizer='ECFP',
splitter='scaffold'
)
train, valid, test = datasets
else:
print(f"\nLoading custom data from {data_path}...")
featurizer = dc.feat.CircularFingerprint(radius=2, size=2048)
loader = dc.data.CSVLoader(
tasks=[target_col],
feature_field=smiles_col,
featurizer=featurizer
)
dataset = loader.create_dataset(data_path)
# Split data
print("Splitting data with scaffold splitter...")
splitter = dc.splits.ScaffoldSplitter()
train, valid, test = splitter.train_valid_test_split(
dataset,
frac_train=0.8,
frac_valid=0.1,
frac_test=0.1
)
# Normalize data
print("Normalizing features and targets...")
transformers = [
dc.trans.NormalizationTransformer(
transform_y=True,
dataset=train
)
]
for transformer in transformers:
train = transformer.transform(train)
valid = transformer.transform(valid)
test = transformer.transform(test)
tasks = [target_col]
print(f"\nDataset sizes:")
print(f" Training: {len(train)} molecules")
print(f" Validation: {len(valid)} molecules")
print(f" Test: {len(test)} molecules")
# Create model
print("\nCreating multitask regressor...")
model = dc.models.MultitaskRegressor(
n_tasks=len(tasks),
n_features=2048, # ECFP fingerprint size
layer_sizes=[1000, 500],
dropouts=0.25,
learning_rate=0.001,
batch_size=50
)
# Train model
print("\nTraining model...")
model.fit(train, nb_epoch=50)
print("Training complete!")
# Evaluate model
print("\n" + "=" * 60)
print("Model Evaluation")
print("=" * 60)
metrics = [
dc.metrics.Metric(dc.metrics.r2_score, name='RΒ²'),
dc.metrics.Metric(dc.metrics.mean_absolute_error, name='MAE'),
dc.metrics.Metric(dc.metrics.root_mean_squared_error, name='RMSE'),
]
for dataset_name, dataset in [('Train', train), ('Valid', valid), ('Test', test)]:
print(f"\n{dataset_name} Set:")
scores = model.evaluate(dataset, metrics)
for metric_name, score in scores.items():
print(f" {metric_name}: {score:.4f}")
return model, test, transformers
def predict_new_molecules(model, smiles_list, transformers=None):
"""
Predict solubility for new molecules.
Args:
model: Trained DeepChem model
smiles_list: List of SMILES strings
transformers: List of data transformers to apply
Returns:
Array of predictions
"""
print("\n" + "=" * 60)
print("Predicting New Molecules")
print("=" * 60)
# Featurize new molecules
featurizer = dc.feat.CircularFingerprint(radius=2, size=2048)
features = featurizer.featurize(smiles_list)
# Create dataset
new_dataset = dc.data.NumpyDataset(X=features)
# Apply transformers (if any)
if transformers:
for transformer in transformers:
new_dataset = transformer.transform(new_dataset)
# Predict
predictions = model.predict(new_dataset)
# Display results
print("\nPredictions:")
for smiles, pred in zip(smiles_list, predictions):
print(f" {smiles:30s} -> {pred[0]:.3f} log(mol/L)")
return predictions
def main():
parser = argparse.ArgumentParser(
description='Train a molecular solubility prediction model'
)
parser.add_argument(
'--data',
type=str,
default=None,
help='Path to CSV file with molecular data'
)
parser.add_argument(
'--smiles-col',
type=str,
default='smiles',
help='Name of column containing SMILES strings'
)
parser.add_argument(
'--target-col',
type=str,
default='solubility',
help='Name of column containing target values'
)
parser.add_argument(
'--predict',
nargs='+',
default=None,
help='SMILES strings to predict after training'
)
args = parser.parse_args()
# Train model
try:
model, test_set, transformers = train_solubility_model(
data_path=args.data,
smiles_col=args.smiles_col,
target_col=args.target_col
)
except Exception as e:
print(f"\nError during training: {e}", file=sys.stderr)
return 1
# Make predictions on new molecules if provided
if args.predict:
try:
predict_new_molecules(model, args.predict, transformers)
except Exception as e:
print(f"\nError during prediction: {e}", file=sys.stderr)
return 1
else:
# Example predictions
example_smiles = [
'CCO', # Ethanol
'CC(=O)O', # Acetic acid
'c1ccccc1', # Benzene
'CN1C=NC2=C1C(=O)N(C(=O)N2C)C', # Caffeine
]
predict_new_molecules(model, example_smiles, transformers)
print("\n" + "=" * 60)
print("Complete!")
print("=" * 60)
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/deepchem/scripts/predict_solubility.py",
"license": "MIT License",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/deepchem/scripts/transfer_learning.py | #!/usr/bin/env python3
"""
Transfer Learning Script for DeepChem
Use pretrained models (ChemBERTa, GROVER, MolFormer) for molecular property prediction
with transfer learning. Particularly useful for small datasets.
Usage:
python transfer_learning.py --model chemberta --data my_data.csv --target activity
python transfer_learning.py --model grover --dataset bbbp
"""
import argparse
import deepchem as dc
import sys
PRETRAINED_MODELS = {
'chemberta': {
'name': 'ChemBERTa',
'description': 'BERT pretrained on 77M molecules from ZINC15',
'model_id': 'seyonec/ChemBERTa-zinc-base-v1'
},
'grover': {
'name': 'GROVER',
'description': 'Graph transformer pretrained on 10M molecules',
'model_id': None # GROVER uses its own loading mechanism
},
'molformer': {
'name': 'MolFormer',
'description': 'Transformer pretrained on molecular structures',
'model_id': 'ibm/MoLFormer-XL-both-10pct'
}
}
def train_chemberta(train_dataset, valid_dataset, test_dataset, task_type='classification', n_tasks=1, n_epochs=10):
"""
Fine-tune ChemBERTa on a dataset.
Args:
train_dataset: Training dataset
valid_dataset: Validation dataset
test_dataset: Test dataset
task_type: 'classification' or 'regression'
n_tasks: Number of prediction tasks
n_epochs: Number of fine-tuning epochs
Returns:
Trained model and evaluation results
"""
print("=" * 70)
print("Fine-tuning ChemBERTa")
print("=" * 70)
print("\nChemBERTa is a BERT model pretrained on 77M molecules from ZINC15.")
print("It uses SMILES strings as input and has learned rich molecular")
print("representations that transfer well to downstream tasks.")
print(f"\nLoading pretrained ChemBERTa model...")
model = dc.models.HuggingFaceModel(
model=PRETRAINED_MODELS['chemberta']['model_id'],
task=task_type,
n_tasks=n_tasks,
batch_size=32,
learning_rate=2e-5 # Lower LR for fine-tuning
)
print(f"\nFine-tuning for {n_epochs} epochs...")
print("(This may take a while on the first run as the model is downloaded)")
model.fit(train_dataset, nb_epoch=n_epochs)
print("Fine-tuning complete!")
# Evaluate
print("\n" + "=" * 70)
print("Model Evaluation")
print("=" * 70)
if task_type == 'classification':
metrics = [
dc.metrics.Metric(dc.metrics.roc_auc_score, name='ROC-AUC'),
dc.metrics.Metric(dc.metrics.accuracy_score, name='Accuracy'),
]
else:
metrics = [
dc.metrics.Metric(dc.metrics.r2_score, name='RΒ²'),
dc.metrics.Metric(dc.metrics.mean_absolute_error, name='MAE'),
]
results = {}
for name, dataset in [('Train', train_dataset), ('Valid', valid_dataset), ('Test', test_dataset)]:
print(f"\n{name} Set:")
scores = model.evaluate(dataset, metrics)
results[name] = scores
for metric_name, score in scores.items():
print(f" {metric_name}: {score:.4f}")
return model, results
def train_grover(train_dataset, test_dataset, task_type='classification', n_tasks=1, n_epochs=20):
"""
Fine-tune GROVER on a dataset.
Args:
train_dataset: Training dataset
test_dataset: Test dataset
task_type: 'classification' or 'regression'
n_tasks: Number of prediction tasks
n_epochs: Number of fine-tuning epochs
Returns:
Trained model and evaluation results
"""
print("=" * 70)
print("Fine-tuning GROVER")
print("=" * 70)
print("\nGROVER is a graph transformer pretrained on 10M molecules using")
print("self-supervised learning. It learns both node and graph-level")
print("representations through masked atom/bond prediction tasks.")
print(f"\nCreating GROVER model...")
model = dc.models.GroverModel(
task=task_type,
n_tasks=n_tasks,
model_dir='./grover_pretrained'
)
print(f"\nFine-tuning for {n_epochs} epochs...")
model.fit(train_dataset, nb_epoch=n_epochs)
print("Fine-tuning complete!")
# Evaluate
print("\n" + "=" * 70)
print("Model Evaluation")
print("=" * 70)
if task_type == 'classification':
metrics = [
dc.metrics.Metric(dc.metrics.roc_auc_score, name='ROC-AUC'),
dc.metrics.Metric(dc.metrics.accuracy_score, name='Accuracy'),
]
else:
metrics = [
dc.metrics.Metric(dc.metrics.r2_score, name='RΒ²'),
dc.metrics.Metric(dc.metrics.mean_absolute_error, name='MAE'),
]
results = {}
for name, dataset in [('Train', train_dataset), ('Test', test_dataset)]:
print(f"\n{name} Set:")
scores = model.evaluate(dataset, metrics)
results[name] = scores
for metric_name, score in scores.items():
print(f" {metric_name}: {score:.4f}")
return model, results
def load_molnet_dataset(dataset_name, model_type):
"""
Load a MoleculeNet dataset with appropriate featurization.
Args:
dataset_name: Name of MoleculeNet dataset
model_type: Type of pretrained model being used
Returns:
tasks, train/valid/test datasets, transformers
"""
# Map of MoleculeNet datasets
molnet_datasets = {
'tox21': dc.molnet.load_tox21,
'bbbp': dc.molnet.load_bbbp,
'bace': dc.molnet.load_bace_classification,
'hiv': dc.molnet.load_hiv,
'delaney': dc.molnet.load_delaney,
'freesolv': dc.molnet.load_freesolv,
'lipo': dc.molnet.load_lipo
}
if dataset_name not in molnet_datasets:
raise ValueError(f"Unknown dataset: {dataset_name}")
# ChemBERTa and MolFormer use raw SMILES
if model_type in ['chemberta', 'molformer']:
featurizer = 'Raw'
# GROVER needs graph features
elif model_type == 'grover':
featurizer = 'GraphConv'
else:
featurizer = 'ECFP'
print(f"\nLoading {dataset_name} dataset...")
load_func = molnet_datasets[dataset_name]
tasks, datasets, transformers = load_func(
featurizer=featurizer,
splitter='scaffold'
)
return tasks, datasets, transformers
def load_custom_dataset(data_path, target_cols, smiles_col, model_type):
"""
Load a custom CSV dataset.
Args:
data_path: Path to CSV file
target_cols: List of target column names
smiles_col: Name of SMILES column
model_type: Type of pretrained model being used
Returns:
train, valid, test datasets
"""
print(f"\nLoading custom data from {data_path}...")
# Choose featurizer based on model
if model_type in ['chemberta', 'molformer']:
featurizer = dc.feat.DummyFeaturizer() # Models handle featurization
elif model_type == 'grover':
featurizer = dc.feat.MolGraphConvFeaturizer()
else:
featurizer = dc.feat.CircularFingerprint()
loader = dc.data.CSVLoader(
tasks=target_cols,
feature_field=smiles_col,
featurizer=featurizer
)
dataset = loader.create_dataset(data_path)
print(f"Loaded {len(dataset)} molecules")
# Split data
print("Splitting data with scaffold splitter...")
splitter = dc.splits.ScaffoldSplitter()
train, valid, test = splitter.train_valid_test_split(
dataset,
frac_train=0.8,
frac_valid=0.1,
frac_test=0.1
)
print(f" Training: {len(train)}")
print(f" Validation: {len(valid)}")
print(f" Test: {len(test)}")
return train, valid, test
def main():
parser = argparse.ArgumentParser(
description='Transfer learning for molecular property prediction'
)
parser.add_argument(
'--model',
type=str,
choices=list(PRETRAINED_MODELS.keys()),
required=True,
help='Pretrained model to use'
)
parser.add_argument(
'--dataset',
type=str,
choices=['tox21', 'bbbp', 'bace', 'hiv', 'delaney', 'freesolv', 'lipo'],
default=None,
help='MoleculeNet dataset to use'
)
parser.add_argument(
'--data',
type=str,
default=None,
help='Path to custom CSV file'
)
parser.add_argument(
'--target',
nargs='+',
default=['target'],
help='Target column name(s) for custom data'
)
parser.add_argument(
'--smiles-col',
type=str,
default='smiles',
help='SMILES column name for custom data'
)
parser.add_argument(
'--task-type',
type=str,
choices=['classification', 'regression'],
default='classification',
help='Type of prediction task'
)
parser.add_argument(
'--epochs',
type=int,
default=10,
help='Number of fine-tuning epochs'
)
args = parser.parse_args()
# Validate arguments
if args.dataset is None and args.data is None:
print("Error: Must specify either --dataset or --data", file=sys.stderr)
return 1
if args.dataset and args.data:
print("Error: Cannot specify both --dataset and --data", file=sys.stderr)
return 1
# Print model info
model_info = PRETRAINED_MODELS[args.model]
print("\n" + "=" * 70)
print(f"Transfer Learning with {model_info['name']}")
print("=" * 70)
print(f"\n{model_info['description']}")
try:
# Load dataset
if args.dataset:
tasks, datasets, transformers = load_molnet_dataset(args.dataset, args.model)
train, valid, test = datasets
task_type = 'classification' if args.dataset in ['tox21', 'bbbp', 'bace', 'hiv'] else 'regression'
n_tasks = len(tasks)
else:
train, valid, test = load_custom_dataset(
args.data,
args.target,
args.smiles_col,
args.model
)
task_type = args.task_type
n_tasks = len(args.target)
# Train model
if args.model == 'chemberta':
model, results = train_chemberta(
train, valid, test,
task_type=task_type,
n_tasks=n_tasks,
n_epochs=args.epochs
)
elif args.model == 'grover':
model, results = train_grover(
train, test,
task_type=task_type,
n_tasks=n_tasks,
n_epochs=args.epochs
)
else:
print(f"Error: Model {args.model} not yet implemented", file=sys.stderr)
return 1
print("\n" + "=" * 70)
print("Transfer Learning Complete!")
print("=" * 70)
print("\nTip: Pretrained models often work best with:")
print(" - Small datasets (< 1000 samples)")
print(" - Lower learning rates (1e-5 to 5e-5)")
print(" - Fewer epochs (5-20)")
print(" - Avoiding overfitting through early stopping")
return 0
except Exception as e:
print(f"\nError: {e}", file=sys.stderr)
import traceback
traceback.print_exc()
return 1
if __name__ == '__main__':
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/deepchem/scripts/transfer_learning.py",
"license": "MIT License",
"lines": 319,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/deeptools/scripts/validate_files.py | #!/usr/bin/env python3
"""
deepTools File Validation Script
Validates BAM, bigWig, and BED files for deepTools analysis.
Checks for file existence, proper indexing, and basic format requirements.
"""
import os
import sys
import argparse
from pathlib import Path
def check_file_exists(filepath):
"""Check if file exists and is readable."""
if not os.path.exists(filepath):
return False, f"File not found: {filepath}"
if not os.access(filepath, os.R_OK):
return False, f"File not readable: {filepath}"
return True, f"β File exists: {filepath}"
def check_bam_index(bam_file):
"""Check if BAM file has an index (.bai or .bam.bai)."""
bai_file1 = bam_file + ".bai"
bai_file2 = bam_file.replace(".bam", ".bai")
if os.path.exists(bai_file1):
return True, f"β BAM index found: {bai_file1}"
elif os.path.exists(bai_file2):
return True, f"β BAM index found: {bai_file2}"
else:
return False, f"β BAM index missing for: {bam_file}\n Run: samtools index {bam_file}"
def check_bigwig_file(bw_file):
"""Basic check for bigWig file."""
# Check file size (bigWig files should have reasonable size)
file_size = os.path.getsize(bw_file)
if file_size < 100:
return False, f"β bigWig file suspiciously small: {bw_file} ({file_size} bytes)"
return True, f"β bigWig file appears valid: {bw_file} ({file_size} bytes)"
def check_bed_file(bed_file):
"""Basic validation of BED file format."""
try:
with open(bed_file, 'r') as f:
lines = [line.strip() for line in f if line.strip() and not line.startswith('#')]
if len(lines) == 0:
return False, f"β BED file is empty: {bed_file}"
# Check first few lines for basic format
for i, line in enumerate(lines[:10], 1):
fields = line.split('\t')
if len(fields) < 3:
return False, f"β BED file format error at line {i}: expected at least 3 columns\n Line: {line}"
# Check if start and end are integers
try:
start = int(fields[1])
end = int(fields[2])
if start >= end:
return False, f"β BED file error at line {i}: start >= end ({start} >= {end})"
except ValueError:
return False, f"β BED file format error at line {i}: start and end must be integers\n Line: {line}"
return True, f"β BED file format appears valid: {bed_file} ({len(lines)} regions)"
except Exception as e:
return False, f"β Error reading BED file: {bed_file}\n Error: {str(e)}"
def validate_files(bam_files=None, bigwig_files=None, bed_files=None):
"""
Validate all provided files.
Args:
bam_files: List of BAM file paths
bigwig_files: List of bigWig file paths
bed_files: List of BED file paths
Returns:
Tuple of (success: bool, messages: list)
"""
all_success = True
messages = []
# Validate BAM files
if bam_files:
messages.append("\n=== Validating BAM Files ===")
for bam_file in bam_files:
# Check existence
success, msg = check_file_exists(bam_file)
messages.append(msg)
if not success:
all_success = False
continue
# Check index
success, msg = check_bam_index(bam_file)
messages.append(msg)
if not success:
all_success = False
# Validate bigWig files
if bigwig_files:
messages.append("\n=== Validating bigWig Files ===")
for bw_file in bigwig_files:
# Check existence
success, msg = check_file_exists(bw_file)
messages.append(msg)
if not success:
all_success = False
continue
# Basic bigWig check
success, msg = check_bigwig_file(bw_file)
messages.append(msg)
if not success:
all_success = False
# Validate BED files
if bed_files:
messages.append("\n=== Validating BED Files ===")
for bed_file in bed_files:
# Check existence
success, msg = check_file_exists(bed_file)
messages.append(msg)
if not success:
all_success = False
continue
# Check BED format
success, msg = check_bed_file(bed_file)
messages.append(msg)
if not success:
all_success = False
return all_success, messages
def main():
parser = argparse.ArgumentParser(
description="Validate files for deepTools analysis",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Validate BAM files
python validate_files.py --bam sample1.bam sample2.bam
# Validate all file types
python validate_files.py --bam input.bam chip.bam --bed peaks.bed --bigwig signal.bw
# Validate from a directory
python validate_files.py --bam *.bam --bed *.bed
"""
)
parser.add_argument('--bam', nargs='+', help='BAM files to validate')
parser.add_argument('--bigwig', '--bw', nargs='+', help='bigWig files to validate')
parser.add_argument('--bed', nargs='+', help='BED files to validate')
args = parser.parse_args()
# Check if any files were provided
if not any([args.bam, args.bigwig, args.bed]):
parser.print_help()
sys.exit(1)
# Run validation
success, messages = validate_files(
bam_files=args.bam,
bigwig_files=args.bigwig,
bed_files=args.bed
)
# Print results
for msg in messages:
print(msg)
# Summary
print("\n" + "="*50)
if success:
print("β All validations passed!")
sys.exit(0)
else:
print("β Some validations failed. Please fix the issues above.")
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/deeptools/scripts/validate_files.py",
"license": "MIT License",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/diffdock/scripts/analyze_results.py | #!/usr/bin/env python3
"""
DiffDock Results Analysis Script
This script analyzes DiffDock prediction results, extracting confidence scores,
ranking predictions, and generating summary reports.
Usage:
python analyze_results.py results/output_dir/
python analyze_results.py results/ --top 50 --threshold 0.0
python analyze_results.py results/ --export summary.csv
"""
import argparse
import os
import sys
import json
from pathlib import Path
from collections import defaultdict
import re
def parse_confidence_scores(results_dir):
"""
Parse confidence scores from DiffDock output directory.
Args:
results_dir: Path to DiffDock results directory
Returns:
dict: Dictionary mapping complex names to their predictions and scores
"""
results = {}
results_path = Path(results_dir)
# Check if this is a single complex or batch results
sdf_files = list(results_path.glob("*.sdf"))
if sdf_files:
# Single complex output
results['single_complex'] = parse_single_complex(results_path)
else:
# Batch output - multiple subdirectories
for subdir in results_path.iterdir():
if subdir.is_dir():
complex_results = parse_single_complex(subdir)
if complex_results:
results[subdir.name] = complex_results
return results
def parse_single_complex(complex_dir):
"""Parse results for a single complex."""
predictions = []
# Look for SDF files with rank information
for sdf_file in complex_dir.glob("*.sdf"):
filename = sdf_file.name
# Extract rank from filename (e.g., "rank_1.sdf" or "index_0_rank_1.sdf")
rank_match = re.search(r'rank_(\d+)', filename)
if rank_match:
rank = int(rank_match.group(1))
# Try to extract confidence score from filename or separate file
confidence = extract_confidence_score(sdf_file, complex_dir)
predictions.append({
'rank': rank,
'file': sdf_file.name,
'path': str(sdf_file),
'confidence': confidence
})
# Sort by rank
predictions.sort(key=lambda x: x['rank'])
return {'predictions': predictions} if predictions else None
def extract_confidence_score(sdf_file, complex_dir):
"""
Extract confidence score for a prediction.
Tries multiple methods:
1. Read from confidence_scores.txt file
2. Parse from SDF file properties
3. Extract from filename if present
"""
# Method 1: confidence_scores.txt
confidence_file = complex_dir / "confidence_scores.txt"
if confidence_file.exists():
try:
with open(confidence_file) as f:
lines = f.readlines()
# Extract rank from filename
rank_match = re.search(r'rank_(\d+)', sdf_file.name)
if rank_match:
rank = int(rank_match.group(1))
if rank <= len(lines):
return float(lines[rank - 1].strip())
except Exception:
pass
# Method 2: Parse from SDF file
try:
with open(sdf_file) as f:
content = f.read()
# Look for confidence score in SDF properties
conf_match = re.search(r'confidence[:\s]+(-?\d+\.?\d*)', content, re.IGNORECASE)
if conf_match:
return float(conf_match.group(1))
except Exception:
pass
# Method 3: Filename (e.g., "rank_1_conf_0.95.sdf")
conf_match = re.search(r'conf_(-?\d+\.?\d*)', sdf_file.name)
if conf_match:
return float(conf_match.group(1))
return None
def classify_confidence(score):
"""Classify confidence score into categories."""
if score is None:
return "Unknown"
elif score > 0:
return "High"
elif score > -1.5:
return "Moderate"
else:
return "Low"
def print_summary(results, top_n=None, min_confidence=None):
"""Print a formatted summary of results."""
print("\n" + "="*80)
print("DiffDock Results Summary")
print("="*80)
all_predictions = []
for complex_name, data in results.items():
predictions = data.get('predictions', [])
print(f"\n{complex_name}")
print("-" * 80)
if not predictions:
print(" No predictions found")
continue
# Filter by confidence if specified
filtered_predictions = predictions
if min_confidence is not None:
filtered_predictions = [p for p in predictions if p['confidence'] is not None and p['confidence'] >= min_confidence]
# Limit to top N if specified
if top_n is not None:
filtered_predictions = filtered_predictions[:top_n]
for pred in filtered_predictions:
confidence = pred['confidence']
confidence_class = classify_confidence(confidence)
conf_str = f"{confidence:>7.3f}" if confidence is not None else " N/A"
print(f" Rank {pred['rank']:2d}: Confidence = {conf_str} ({confidence_class:8s}) | {pred['file']}")
# Add to all predictions for overall statistics
if confidence is not None:
all_predictions.append((complex_name, pred['rank'], confidence))
# Show statistics for this complex
if filtered_predictions and any(p['confidence'] is not None for p in filtered_predictions):
confidences = [p['confidence'] for p in filtered_predictions if p['confidence'] is not None]
print(f"\n Statistics: {len(filtered_predictions)} predictions")
print(f" Mean confidence: {sum(confidences)/len(confidences):.3f}")
print(f" Max confidence: {max(confidences):.3f}")
print(f" Min confidence: {min(confidences):.3f}")
# Overall statistics
if all_predictions:
print("\n" + "="*80)
print("Overall Statistics")
print("="*80)
confidences = [conf for _, _, conf in all_predictions]
print(f" Total predictions: {len(all_predictions)}")
print(f" Total complexes: {len(results)}")
print(f" Mean confidence: {sum(confidences)/len(confidences):.3f}")
print(f" Max confidence: {max(confidences):.3f}")
print(f" Min confidence: {min(confidences):.3f}")
# Confidence distribution
high = sum(1 for c in confidences if c > 0)
moderate = sum(1 for c in confidences if -1.5 < c <= 0)
low = sum(1 for c in confidences if c <= -1.5)
print(f"\n Confidence distribution:")
print(f" High (> 0): {high:4d} ({100*high/len(confidences):5.1f}%)")
print(f" Moderate (-1.5 to 0): {moderate:4d} ({100*moderate/len(confidences):5.1f}%)")
print(f" Low (< -1.5): {low:4d} ({100*low/len(confidences):5.1f}%)")
print("\n" + "="*80)
def export_to_csv(results, output_path):
"""Export results to CSV file."""
import csv
with open(output_path, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['complex_name', 'rank', 'confidence', 'confidence_class', 'file_path'])
for complex_name, data in results.items():
predictions = data.get('predictions', [])
for pred in predictions:
confidence = pred['confidence']
confidence_class = classify_confidence(confidence)
conf_value = confidence if confidence is not None else ''
writer.writerow([
complex_name,
pred['rank'],
conf_value,
confidence_class,
pred['path']
])
print(f"β Exported results to: {output_path}")
def get_top_predictions(results, n=10, sort_by='confidence'):
"""Get top N predictions across all complexes."""
all_predictions = []
for complex_name, data in results.items():
predictions = data.get('predictions', [])
for pred in predictions:
if pred['confidence'] is not None:
all_predictions.append({
'complex': complex_name,
**pred
})
# Sort by confidence (descending)
all_predictions.sort(key=lambda x: x['confidence'], reverse=True)
return all_predictions[:n]
def print_top_predictions(results, n=10):
"""Print top N predictions across all complexes."""
top_preds = get_top_predictions(results, n)
print("\n" + "="*80)
print(f"Top {n} Predictions Across All Complexes")
print("="*80)
for i, pred in enumerate(top_preds, 1):
confidence_class = classify_confidence(pred['confidence'])
print(f"{i:2d}. {pred['complex']:30s} | Rank {pred['rank']:2d} | "
f"Confidence: {pred['confidence']:7.3f} ({confidence_class})")
print("="*80)
def main():
parser = argparse.ArgumentParser(
description='Analyze DiffDock prediction results',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Analyze all results in directory
python analyze_results.py results/output_dir/
# Show only top 5 predictions per complex
python analyze_results.py results/ --top 5
# Filter by confidence threshold
python analyze_results.py results/ --threshold 0.0
# Export to CSV
python analyze_results.py results/ --export summary.csv
# Show top 20 predictions across all complexes
python analyze_results.py results/ --best 20
"""
)
parser.add_argument('results_dir', help='Path to DiffDock results directory')
parser.add_argument('--top', '-t', type=int,
help='Show only top N predictions per complex')
parser.add_argument('--threshold', type=float,
help='Minimum confidence threshold')
parser.add_argument('--export', '-e', metavar='FILE',
help='Export results to CSV file')
parser.add_argument('--best', '-b', type=int, metavar='N',
help='Show top N predictions across all complexes')
args = parser.parse_args()
# Validate results directory
if not os.path.exists(args.results_dir):
print(f"Error: Results directory not found: {args.results_dir}")
return 1
# Parse results
print(f"Analyzing results in: {args.results_dir}")
results = parse_confidence_scores(args.results_dir)
if not results:
print("No DiffDock results found in directory")
return 1
# Print summary
print_summary(results, top_n=args.top, min_confidence=args.threshold)
# Print top predictions across all complexes
if args.best:
print_top_predictions(results, args.best)
# Export to CSV if requested
if args.export:
export_to_csv(results, args.export)
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/diffdock/scripts/analyze_results.py",
"license": "MIT License",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/diffdock/scripts/prepare_batch_csv.py | #!/usr/bin/env python3
"""
DiffDock Batch CSV Preparation and Validation Script
This script helps prepare and validate CSV files for DiffDock batch processing.
It checks for required columns, validates file paths, and ensures SMILES strings
are properly formatted.
Usage:
python prepare_batch_csv.py input.csv --validate
python prepare_batch_csv.py --create --output batch_input.csv
"""
import argparse
import os
import sys
import pandas as pd
from pathlib import Path
try:
from rdkit import Chem
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
RDKIT_AVAILABLE = True
except ImportError:
RDKIT_AVAILABLE = False
print("Warning: RDKit not available. SMILES validation will be skipped.")
def validate_smiles(smiles_string):
"""Validate a SMILES string using RDKit."""
if not RDKIT_AVAILABLE:
return True, "RDKit not available for validation"
try:
mol = Chem.MolFromSmiles(smiles_string)
if mol is None:
return False, "Invalid SMILES structure"
return True, "Valid SMILES"
except Exception as e:
return False, str(e)
def validate_file_path(file_path, base_dir=None):
"""Validate that a file path exists."""
if pd.isna(file_path) or file_path == "":
return True, "Empty (will use protein_sequence)"
# Handle relative paths
if base_dir:
full_path = Path(base_dir) / file_path
else:
full_path = Path(file_path)
if full_path.exists():
return True, f"File exists: {full_path}"
else:
return False, f"File not found: {full_path}"
def validate_csv(csv_path, base_dir=None):
"""
Validate a DiffDock batch input CSV file.
Args:
csv_path: Path to CSV file
base_dir: Base directory for relative paths (default: CSV directory)
Returns:
bool: True if validation passes
list: List of validation messages
"""
messages = []
valid = True
# Read CSV
try:
df = pd.read_csv(csv_path)
messages.append(f"β Successfully read CSV with {len(df)} rows")
except Exception as e:
messages.append(f"β Error reading CSV: {e}")
return False, messages
# Check required columns
required_cols = ['complex_name', 'protein_path', 'ligand_description', 'protein_sequence']
missing_cols = [col for col in required_cols if col not in df.columns]
if missing_cols:
messages.append(f"β Missing required columns: {', '.join(missing_cols)}")
valid = False
else:
messages.append("β All required columns present")
# Set base directory
if base_dir is None:
base_dir = Path(csv_path).parent
# Validate each row
for idx, row in df.iterrows():
row_msgs = []
# Check complex name
if pd.isna(row['complex_name']) or row['complex_name'] == "":
row_msgs.append("Missing complex_name")
valid = False
# Check that either protein_path or protein_sequence is provided
has_protein_path = not pd.isna(row['protein_path']) and row['protein_path'] != ""
has_protein_seq = not pd.isna(row['protein_sequence']) and row['protein_sequence'] != ""
if not has_protein_path and not has_protein_seq:
row_msgs.append("Must provide either protein_path or protein_sequence")
valid = False
elif has_protein_path and has_protein_seq:
row_msgs.append("Warning: Both protein_path and protein_sequence provided, will use protein_path")
# Validate protein path if provided
if has_protein_path:
file_valid, msg = validate_file_path(row['protein_path'], base_dir)
if not file_valid:
row_msgs.append(f"Protein file issue: {msg}")
valid = False
# Validate ligand description
if pd.isna(row['ligand_description']) or row['ligand_description'] == "":
row_msgs.append("Missing ligand_description")
valid = False
else:
ligand_desc = row['ligand_description']
# Check if it's a file path or SMILES
if os.path.exists(ligand_desc) or "/" in ligand_desc or "\\" in ligand_desc:
# Likely a file path
file_valid, msg = validate_file_path(ligand_desc, base_dir)
if not file_valid:
row_msgs.append(f"Ligand file issue: {msg}")
valid = False
else:
# Likely a SMILES string
smiles_valid, msg = validate_smiles(ligand_desc)
if not smiles_valid:
row_msgs.append(f"SMILES issue: {msg}")
valid = False
if row_msgs:
messages.append(f"\nRow {idx + 1} ({row.get('complex_name', 'unnamed')}):")
for msg in row_msgs:
messages.append(f" - {msg}")
# Summary
messages.append(f"\n{'='*60}")
if valid:
messages.append("β CSV validation PASSED - ready for DiffDock")
else:
messages.append("β CSV validation FAILED - please fix issues above")
return valid, messages
def create_template_csv(output_path, num_examples=3):
"""Create a template CSV file with example entries."""
examples = {
'complex_name': ['example1', 'example2', 'example3'][:num_examples],
'protein_path': ['protein1.pdb', '', 'protein3.pdb'][:num_examples],
'ligand_description': [
'CC(=O)Oc1ccccc1C(=O)O', # Aspirin SMILES
'COc1ccc(C#N)cc1', # Example SMILES
'ligand.sdf' # Example file path
][:num_examples],
'protein_sequence': [
'', # Empty - using PDB file
'MSKGEELFTGVVPILVELDGDVNGHKFSVSGEGEGDATYGKLTLKFICTTGKLPVPWPTLVTTFSYGVQCFSRYPDHMKQHDFFKSAMPEGYVQERTIFFKDDGNYKTRAEVKFEGDTLVNRIELKGIDFKEDGNILGHKLEYNYNSHNVYIMADKQKNGIKVNFKIRHNIEDGSVQLADHYQQNTPIGDGPVLLPDNHYLSTQSALSKDPNEKRDHMVLLEFVTAAGITHGMDELYK', # GFP sequence
'' # Empty - using PDB file
][:num_examples]
}
df = pd.DataFrame(examples)
df.to_csv(output_path, index=False)
return df
def main():
parser = argparse.ArgumentParser(
description='Prepare and validate DiffDock batch CSV files',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Validate existing CSV
python prepare_batch_csv.py input.csv --validate
# Create template CSV
python prepare_batch_csv.py --create --output batch_template.csv
# Create template with 5 example rows
python prepare_batch_csv.py --create --output template.csv --num-examples 5
# Validate with custom base directory for relative paths
python prepare_batch_csv.py input.csv --validate --base-dir /path/to/data/
"""
)
parser.add_argument('csv_file', nargs='?', help='CSV file to validate')
parser.add_argument('--validate', action='store_true',
help='Validate the CSV file')
parser.add_argument('--create', action='store_true',
help='Create a template CSV file')
parser.add_argument('--output', '-o', help='Output path for template CSV')
parser.add_argument('--num-examples', type=int, default=3,
help='Number of example rows in template (default: 3)')
parser.add_argument('--base-dir', help='Base directory for relative file paths')
args = parser.parse_args()
# Create template
if args.create:
output_path = args.output or 'diffdock_batch_template.csv'
df = create_template_csv(output_path, args.num_examples)
print(f"β Created template CSV: {output_path}")
print(f"\nTemplate contents:")
print(df.to_string(index=False))
print(f"\nEdit this file with your protein-ligand pairs and run with:")
print(f" python -m inference --config default_inference_args.yaml \\")
print(f" --protein_ligand_csv {output_path} --out_dir results/")
return 0
# Validate CSV
if args.validate or args.csv_file:
if not args.csv_file:
print("Error: CSV file required for validation")
parser.print_help()
return 1
if not os.path.exists(args.csv_file):
print(f"Error: CSV file not found: {args.csv_file}")
return 1
print(f"Validating: {args.csv_file}")
print("="*60)
valid, messages = validate_csv(args.csv_file, args.base_dir)
for msg in messages:
print(msg)
return 0 if valid else 1
# No action specified
parser.print_help()
return 1
if __name__ == '__main__':
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/diffdock/scripts/prepare_batch_csv.py",
"license": "MIT License",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/diffdock/scripts/setup_check.py | #!/usr/bin/env python3
"""
DiffDock Environment Setup Checker
This script verifies that the DiffDock environment is properly configured
and all dependencies are available.
Usage:
python setup_check.py
python setup_check.py --verbose
"""
import argparse
import sys
import os
from pathlib import Path
def check_python_version():
"""Check Python version."""
import sys
version = sys.version_info
print("Checking Python version...")
if version.major == 3 and version.minor >= 8:
print(f" β Python {version.major}.{version.minor}.{version.micro}")
return True
else:
print(f" β Python {version.major}.{version.minor}.{version.micro} "
f"(requires Python 3.8 or higher)")
return False
def check_package(package_name, import_name=None, version_attr='__version__'):
"""Check if a Python package is installed."""
if import_name is None:
import_name = package_name
try:
module = __import__(import_name)
version = getattr(module, version_attr, 'unknown')
print(f" β {package_name:20s} (version: {version})")
return True
except ImportError:
print(f" β {package_name:20s} (not installed)")
return False
def check_pytorch():
"""Check PyTorch installation and CUDA availability."""
print("\nChecking PyTorch...")
try:
import torch
print(f" β PyTorch version: {torch.__version__}")
# Check CUDA
if torch.cuda.is_available():
print(f" β CUDA available: {torch.cuda.get_device_name(0)}")
print(f" - CUDA version: {torch.version.cuda}")
print(f" - Number of GPUs: {torch.cuda.device_count()}")
return True, True
else:
print(f" β CUDA not available (will run on CPU)")
return True, False
except ImportError:
print(f" β PyTorch not installed")
return False, False
def check_pytorch_geometric():
"""Check PyTorch Geometric installation."""
print("\nChecking PyTorch Geometric...")
packages = [
('torch-geometric', 'torch_geometric'),
('torch-scatter', 'torch_scatter'),
('torch-sparse', 'torch_sparse'),
('torch-cluster', 'torch_cluster'),
]
all_ok = True
for pkg_name, import_name in packages:
if not check_package(pkg_name, import_name):
all_ok = False
return all_ok
def check_core_dependencies():
"""Check core DiffDock dependencies."""
print("\nChecking core dependencies...")
dependencies = [
('numpy', 'numpy'),
('scipy', 'scipy'),
('pandas', 'pandas'),
('rdkit', 'rdkit', 'rdBase.__version__'),
('biopython', 'Bio', '__version__'),
('pytorch-lightning', 'pytorch_lightning'),
('PyYAML', 'yaml'),
]
all_ok = True
for dep in dependencies:
pkg_name = dep[0]
import_name = dep[1]
version_attr = dep[2] if len(dep) > 2 else '__version__'
if not check_package(pkg_name, import_name, version_attr):
all_ok = False
return all_ok
def check_esm():
"""Check ESM (protein language model) installation."""
print("\nChecking ESM (for protein sequence folding)...")
try:
import esm
print(f" β ESM installed (version: {esm.__version__ if hasattr(esm, '__version__') else 'unknown'})")
return True
except ImportError:
print(f" β ESM not installed (needed for protein sequence folding)")
print(f" Install with: pip install fair-esm")
return False
def check_diffdock_installation():
"""Check if DiffDock is properly installed/cloned."""
print("\nChecking DiffDock installation...")
# Look for key files
key_files = [
'inference.py',
'default_inference_args.yaml',
'environment.yml',
]
found_files = []
missing_files = []
for filename in key_files:
if os.path.exists(filename):
found_files.append(filename)
else:
missing_files.append(filename)
if found_files:
print(f" β Found DiffDock files in current directory:")
for f in found_files:
print(f" - {f}")
else:
print(f" β DiffDock files not found in current directory")
print(f" Current directory: {os.getcwd()}")
print(f" Make sure you're in the DiffDock repository root")
# Check for model checkpoints
model_dir = Path('./workdir/v1.1/score_model')
confidence_dir = Path('./workdir/v1.1/confidence_model')
if model_dir.exists() and confidence_dir.exists():
print(f" β Model checkpoints found")
else:
print(f" β Model checkpoints not found in ./workdir/v1.1/")
print(f" Models will be downloaded on first run")
return len(found_files) > 0
def print_installation_instructions():
"""Print installation instructions if setup is incomplete."""
print("\n" + "="*80)
print("Installation Instructions")
print("="*80)
print("""
If DiffDock is not installed, follow these steps:
1. Clone the repository:
git clone https://github.com/gcorso/DiffDock.git
cd DiffDock
2. Create conda environment:
conda env create --file environment.yml
conda activate diffdock
3. Verify installation:
python setup_check.py
For Docker installation:
docker pull rbgcsail/diffdock
docker run -it --gpus all --entrypoint /bin/bash rbgcsail/diffdock
micromamba activate diffdock
For more information, visit: https://github.com/gcorso/DiffDock
""")
def print_performance_notes(has_cuda):
"""Print performance notes based on available hardware."""
print("\n" + "="*80)
print("Performance Notes")
print("="*80)
if has_cuda:
print("""
β GPU detected - DiffDock will run efficiently
Expected performance:
- First run: ~2-5 minutes (pre-computing SO(2)/SO(3) tables)
- Subsequent runs: ~10-60 seconds per complex (depending on settings)
- Batch processing: Highly efficient with GPU
""")
else:
print("""
β No GPU detected - DiffDock will run on CPU
Expected performance:
- CPU inference is SIGNIFICANTLY slower than GPU
- Single complex: Several minutes to hours
- Batch processing: Not recommended on CPU
Recommendation: Use GPU for practical applications
- Cloud options: Google Colab, AWS, or other cloud GPU services
- Local: Install CUDA-capable GPU
""")
def main():
parser = argparse.ArgumentParser(
description='Check DiffDock environment setup',
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--verbose', '-v', action='store_true',
help='Show detailed version information')
args = parser.parse_args()
print("="*80)
print("DiffDock Environment Setup Checker")
print("="*80)
checks = []
# Run all checks
checks.append(("Python version", check_python_version()))
pytorch_ok, has_cuda = check_pytorch()
checks.append(("PyTorch", pytorch_ok))
checks.append(("PyTorch Geometric", check_pytorch_geometric()))
checks.append(("Core dependencies", check_core_dependencies()))
checks.append(("ESM", check_esm()))
checks.append(("DiffDock files", check_diffdock_installation()))
# Summary
print("\n" + "="*80)
print("Summary")
print("="*80)
all_passed = all(result for _, result in checks)
for check_name, result in checks:
status = "β PASS" if result else "β FAIL"
print(f" {status:8s} - {check_name}")
if all_passed:
print("\nβ All checks passed! DiffDock is ready to use.")
print_performance_notes(has_cuda)
return 0
else:
print("\nβ Some checks failed. Please install missing dependencies.")
print_installation_instructions()
return 1
if __name__ == '__main__':
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/diffdock/scripts/setup_check.py",
"license": "MIT License",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/document-skills/docx/ooxml/scripts/validation/redlining.py | """
Validator for tracked changes in Word documents.
"""
import subprocess
import tempfile
import zipfile
from pathlib import Path
class RedliningValidator:
"""Validator for tracked changes in Word documents."""
def __init__(self, unpacked_dir, original_docx, verbose=False):
self.unpacked_dir = Path(unpacked_dir)
self.original_docx = Path(original_docx)
self.verbose = verbose
self.namespaces = {
"w": "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
}
def validate(self):
"""Main validation method that returns True if valid, False otherwise."""
# Verify unpacked directory exists and has correct structure
modified_file = self.unpacked_dir / "word" / "document.xml"
if not modified_file.exists():
print(f"FAILED - Modified document.xml not found at {modified_file}")
return False
# First, check if there are any tracked changes by Scientific-Writer to validate
try:
import xml.etree.ElementTree as ET
tree = ET.parse(modified_file)
root = tree.getroot()
# Check for w:del or w:ins tags authored by Scientific-Writer
del_elements = root.findall(".//w:del", self.namespaces)
ins_elements = root.findall(".//w:ins", self.namespaces)
# Filter to only include changes by Scientific-Writer
sw_del_elements = [
elem
for elem in del_elements
if elem.get(f"{{{self.namespaces['w']}}}author") == "Scientific-Writer"
]
sw_ins_elements = [
elem
for elem in ins_elements
if elem.get(f"{{{self.namespaces['w']}}}author") == "Scientific-Writer"
]
# Redlining validation is only needed if tracked changes by Scientific-Writer have been used.
if not sw_del_elements and not sw_ins_elements:
if self.verbose:
print("PASSED - No tracked changes by Scientific-Writer found.")
return True
except Exception:
# If we can't parse the XML, continue with full validation
pass
# Create temporary directory for unpacking original docx
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Unpack original docx
try:
with zipfile.ZipFile(self.original_docx, "r") as zip_ref:
zip_ref.extractall(temp_path)
except Exception as e:
print(f"FAILED - Error unpacking original docx: {e}")
return False
original_file = temp_path / "word" / "document.xml"
if not original_file.exists():
print(
f"FAILED - Original document.xml not found in {self.original_docx}"
)
return False
# Parse both XML files using xml.etree.ElementTree for redlining validation
try:
import xml.etree.ElementTree as ET
modified_tree = ET.parse(modified_file)
modified_root = modified_tree.getroot()
original_tree = ET.parse(original_file)
original_root = original_tree.getroot()
except ET.ParseError as e:
print(f"FAILED - Error parsing XML files: {e}")
return False
# Remove Scientific-Writer's tracked changes from both documents
self._remove_sw_tracked_changes(original_root)
self._remove_sw_tracked_changes(modified_root)
# Extract and compare text content
modified_text = self._extract_text_content(modified_root)
original_text = self._extract_text_content(original_root)
if modified_text != original_text:
# Show detailed character-level differences for each paragraph
error_message = self._generate_detailed_diff(
original_text, modified_text
)
print(error_message)
return False
if self.verbose:
print("PASSED - All changes by Scientific-Writer are properly tracked")
return True
def _generate_detailed_diff(self, original_text, modified_text):
"""Generate detailed word-level differences using git word diff."""
error_parts = [
"FAILED - Document text doesn't match after removing Scientific-Writer's tracked changes",
"",
"Likely causes:",
" 1. Modified text inside another author's <w:ins> or <w:del> tags",
" 2. Made edits without proper tracked changes",
" 3. Didn't nest <w:del> inside <w:ins> when deleting another's insertion",
"",
"For pre-redlined documents, use correct patterns:",
" - To reject another's INSERTION: Nest <w:del> inside their <w:ins>",
" - To restore another's DELETION: Add new <w:ins> AFTER their <w:del>",
"",
]
# Show git word diff
git_diff = self._get_git_word_diff(original_text, modified_text)
if git_diff:
error_parts.extend(["Differences:", "============", git_diff])
else:
error_parts.append("Unable to generate word diff (git not available)")
return "\n".join(error_parts)
def _get_git_word_diff(self, original_text, modified_text):
"""Generate word diff using git with character-level precision."""
try:
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Create two files
original_file = temp_path / "original.txt"
modified_file = temp_path / "modified.txt"
original_file.write_text(original_text, encoding="utf-8")
modified_file.write_text(modified_text, encoding="utf-8")
# Try character-level diff first for precise differences
result = subprocess.run(
[
"git",
"diff",
"--word-diff=plain",
"--word-diff-regex=.", # Character-by-character diff
"-U0", # Zero lines of context - show only changed lines
"--no-index",
str(original_file),
str(modified_file),
],
capture_output=True,
text=True,
)
if result.stdout.strip():
# Clean up the output - remove git diff header lines
lines = result.stdout.split("\n")
# Skip the header lines (diff --git, index, +++, ---, @@)
content_lines = []
in_content = False
for line in lines:
if line.startswith("@@"):
in_content = True
continue
if in_content and line.strip():
content_lines.append(line)
if content_lines:
return "\n".join(content_lines)
# Fallback to word-level diff if character-level is too verbose
result = subprocess.run(
[
"git",
"diff",
"--word-diff=plain",
"-U0", # Zero lines of context
"--no-index",
str(original_file),
str(modified_file),
],
capture_output=True,
text=True,
)
if result.stdout.strip():
lines = result.stdout.split("\n")
content_lines = []
in_content = False
for line in lines:
if line.startswith("@@"):
in_content = True
continue
if in_content and line.strip():
content_lines.append(line)
return "\n".join(content_lines)
except (subprocess.CalledProcessError, FileNotFoundError, Exception):
# Git not available or other error, return None to use fallback
pass
return None
def _remove_sw_tracked_changes(self, root):
"""Remove tracked changes authored by Scientific-Writer from the XML root."""
ins_tag = f"{{{self.namespaces['w']}}}ins"
del_tag = f"{{{self.namespaces['w']}}}del"
author_attr = f"{{{self.namespaces['w']}}}author"
# Remove w:ins elements
for parent in root.iter():
to_remove = []
for child in parent:
if child.tag == ins_tag and child.get(author_attr) == "Scientific-Writer":
to_remove.append(child)
for elem in to_remove:
parent.remove(elem)
# Unwrap content in w:del elements where author is "Scientific-Writer"
deltext_tag = f"{{{self.namespaces['w']}}}delText"
t_tag = f"{{{self.namespaces['w']}}}t"
for parent in root.iter():
to_process = []
for child in parent:
if child.tag == del_tag and child.get(author_attr) == "Scientific-Writer":
to_process.append((child, list(parent).index(child)))
# Process in reverse order to maintain indices
for del_elem, del_index in reversed(to_process):
# Convert w:delText to w:t before moving
for elem in del_elem.iter():
if elem.tag == deltext_tag:
elem.tag = t_tag
# Move all children of w:del to its parent before removing w:del
for child in reversed(list(del_elem)):
parent.insert(del_index, child)
parent.remove(del_elem)
def _extract_text_content(self, root):
"""Extract text content from Word XML, preserving paragraph structure.
Empty paragraphs are skipped to avoid false positives when tracked
insertions add only structural elements without text content.
"""
p_tag = f"{{{self.namespaces['w']}}}p"
t_tag = f"{{{self.namespaces['w']}}}t"
paragraphs = []
for p_elem in root.findall(f".//{p_tag}"):
# Get all text elements within this paragraph
text_parts = []
for t_elem in p_elem.findall(f".//{t_tag}"):
if t_elem.text:
text_parts.append(t_elem.text)
paragraph_text = "".join(text_parts)
# Skip empty paragraphs - they don't affect content validation
if paragraph_text:
paragraphs.append(paragraph_text)
return "\n".join(paragraphs)
if __name__ == "__main__":
raise RuntimeError("This module should not be run directly.")
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/document-skills/docx/ooxml/scripts/validation/redlining.py",
"license": "MIT License",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/document-skills/docx/scripts/document.py | #!/usr/bin/env python3
"""
Library for working with Word documents: comments, tracked changes, and editing.
Usage:
from skills.docx.scripts.document import Document
# Initialize
doc = Document('workspace/unpacked')
doc = Document('workspace/unpacked', author="John Doe", initials="JD")
# Find nodes
node = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "1"})
node = doc["word/document.xml"].get_node(tag="w:p", line_number=10)
# Add comments
doc.add_comment(start=node, end=node, text="Comment text")
doc.reply_to_comment(parent_comment_id=0, text="Reply text")
# Suggest tracked changes
doc["word/document.xml"].suggest_deletion(node) # Delete content
doc["word/document.xml"].revert_insertion(ins_node) # Reject insertion
doc["word/document.xml"].revert_deletion(del_node) # Reject deletion
# Save
doc.save()
"""
import html
import random
import shutil
import tempfile
from datetime import datetime, timezone
from pathlib import Path
from defusedxml import minidom
from ooxml.scripts.pack import pack_document
from ooxml.scripts.validation.docx import DOCXSchemaValidator
from ooxml.scripts.validation.redlining import RedliningValidator
from .utilities import XMLEditor
# Path to template files
TEMPLATE_DIR = Path(__file__).parent / "templates"
class DocxXMLEditor(XMLEditor):
"""XMLEditor that automatically applies RSID, author, and date to new elements.
Automatically adds attributes to elements that support them when inserting new content:
- w:rsidR, w:rsidRDefault, w:rsidP (for w:p and w:r elements)
- w:author and w:date (for w:ins, w:del, w:comment elements)
- w:id (for w:ins and w:del elements)
Attributes:
dom (defusedxml.minidom.Document): The DOM document for direct manipulation
"""
def __init__(
self, xml_path, rsid: str, author: str = "Scientific-Writer", initials: str = "SW"
):
"""Initialize with required RSID and optional author.
Args:
xml_path: Path to XML file to edit
rsid: RSID to automatically apply to new elements
author: Author name for tracked changes and comments (default: "Scientific-Writer")
initials: Author initials (default: "SW")
"""
super().__init__(xml_path)
self.rsid = rsid
self.author = author
self.initials = initials
def _get_next_change_id(self):
"""Get the next available change ID by checking all tracked change elements."""
max_id = -1
for tag in ("w:ins", "w:del"):
elements = self.dom.getElementsByTagName(tag)
for elem in elements:
change_id = elem.getAttribute("w:id")
if change_id:
try:
max_id = max(max_id, int(change_id))
except ValueError:
pass
return max_id + 1
def _ensure_w16du_namespace(self):
"""Ensure w16du namespace is declared on the root element."""
root = self.dom.documentElement
if not root.hasAttribute("xmlns:w16du"): # type: ignore
root.setAttribute( # type: ignore
"xmlns:w16du",
"http://schemas.microsoft.com/office/word/2023/wordml/word16du",
)
def _ensure_w16cex_namespace(self):
"""Ensure w16cex namespace is declared on the root element."""
root = self.dom.documentElement
if not root.hasAttribute("xmlns:w16cex"): # type: ignore
root.setAttribute( # type: ignore
"xmlns:w16cex",
"http://schemas.microsoft.com/office/word/2018/wordml/cex",
)
def _ensure_w14_namespace(self):
"""Ensure w14 namespace is declared on the root element."""
root = self.dom.documentElement
if not root.hasAttribute("xmlns:w14"): # type: ignore
root.setAttribute( # type: ignore
"xmlns:w14",
"http://schemas.microsoft.com/office/word/2010/wordml",
)
def _inject_attributes_to_nodes(self, nodes):
"""Inject RSID, author, and date attributes into DOM nodes where applicable.
Adds attributes to elements that support them:
- w:r: gets w:rsidR (or w:rsidDel if inside w:del)
- w:p: gets w:rsidR, w:rsidRDefault, w:rsidP, w14:paraId, w14:textId
- w:t: gets xml:space="preserve" if text has leading/trailing whitespace
- w:ins, w:del: get w:id, w:author, w:date, w16du:dateUtc
- w:comment: gets w:author, w:date, w:initials
- w16cex:commentExtensible: gets w16cex:dateUtc
Args:
nodes: List of DOM nodes to process
"""
from datetime import datetime, timezone
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
def is_inside_deletion(elem):
"""Check if element is inside a w:del element."""
parent = elem.parentNode
while parent:
if parent.nodeType == parent.ELEMENT_NODE and parent.tagName == "w:del":
return True
parent = parent.parentNode
return False
def add_rsid_to_p(elem):
if not elem.hasAttribute("w:rsidR"):
elem.setAttribute("w:rsidR", self.rsid)
if not elem.hasAttribute("w:rsidRDefault"):
elem.setAttribute("w:rsidRDefault", self.rsid)
if not elem.hasAttribute("w:rsidP"):
elem.setAttribute("w:rsidP", self.rsid)
# Add w14:paraId and w14:textId if not present
if not elem.hasAttribute("w14:paraId"):
self._ensure_w14_namespace()
elem.setAttribute("w14:paraId", _generate_hex_id())
if not elem.hasAttribute("w14:textId"):
self._ensure_w14_namespace()
elem.setAttribute("w14:textId", _generate_hex_id())
def add_rsid_to_r(elem):
# Use w:rsidDel for <w:r> inside <w:del>, otherwise w:rsidR
if is_inside_deletion(elem):
if not elem.hasAttribute("w:rsidDel"):
elem.setAttribute("w:rsidDel", self.rsid)
else:
if not elem.hasAttribute("w:rsidR"):
elem.setAttribute("w:rsidR", self.rsid)
def add_tracked_change_attrs(elem):
# Auto-assign w:id if not present
if not elem.hasAttribute("w:id"):
elem.setAttribute("w:id", str(self._get_next_change_id()))
if not elem.hasAttribute("w:author"):
elem.setAttribute("w:author", self.author)
if not elem.hasAttribute("w:date"):
elem.setAttribute("w:date", timestamp)
# Add w16du:dateUtc for tracked changes (same as w:date since we generate UTC timestamps)
if elem.tagName in ("w:ins", "w:del") and not elem.hasAttribute(
"w16du:dateUtc"
):
self._ensure_w16du_namespace()
elem.setAttribute("w16du:dateUtc", timestamp)
def add_comment_attrs(elem):
if not elem.hasAttribute("w:author"):
elem.setAttribute("w:author", self.author)
if not elem.hasAttribute("w:date"):
elem.setAttribute("w:date", timestamp)
if not elem.hasAttribute("w:initials"):
elem.setAttribute("w:initials", self.initials)
def add_comment_extensible_date(elem):
# Add w16cex:dateUtc for comment extensible elements
if not elem.hasAttribute("w16cex:dateUtc"):
self._ensure_w16cex_namespace()
elem.setAttribute("w16cex:dateUtc", timestamp)
def add_xml_space_to_t(elem):
# Add xml:space="preserve" to w:t if text has leading/trailing whitespace
if (
elem.firstChild
and elem.firstChild.nodeType == elem.firstChild.TEXT_NODE
):
text = elem.firstChild.data
if text and (text[0].isspace() or text[-1].isspace()):
if not elem.hasAttribute("xml:space"):
elem.setAttribute("xml:space", "preserve")
for node in nodes:
if node.nodeType != node.ELEMENT_NODE:
continue
# Handle the node itself
if node.tagName == "w:p":
add_rsid_to_p(node)
elif node.tagName == "w:r":
add_rsid_to_r(node)
elif node.tagName == "w:t":
add_xml_space_to_t(node)
elif node.tagName in ("w:ins", "w:del"):
add_tracked_change_attrs(node)
elif node.tagName == "w:comment":
add_comment_attrs(node)
elif node.tagName == "w16cex:commentExtensible":
add_comment_extensible_date(node)
# Process descendants (getElementsByTagName doesn't return the element itself)
for elem in node.getElementsByTagName("w:p"):
add_rsid_to_p(elem)
for elem in node.getElementsByTagName("w:r"):
add_rsid_to_r(elem)
for elem in node.getElementsByTagName("w:t"):
add_xml_space_to_t(elem)
for tag in ("w:ins", "w:del"):
for elem in node.getElementsByTagName(tag):
add_tracked_change_attrs(elem)
for elem in node.getElementsByTagName("w:comment"):
add_comment_attrs(elem)
for elem in node.getElementsByTagName("w16cex:commentExtensible"):
add_comment_extensible_date(elem)
def replace_node(self, elem, new_content):
"""Replace node with automatic attribute injection."""
nodes = super().replace_node(elem, new_content)
self._inject_attributes_to_nodes(nodes)
return nodes
def insert_after(self, elem, xml_content):
"""Insert after with automatic attribute injection."""
nodes = super().insert_after(elem, xml_content)
self._inject_attributes_to_nodes(nodes)
return nodes
def insert_before(self, elem, xml_content):
"""Insert before with automatic attribute injection."""
nodes = super().insert_before(elem, xml_content)
self._inject_attributes_to_nodes(nodes)
return nodes
def append_to(self, elem, xml_content):
"""Append to with automatic attribute injection."""
nodes = super().append_to(elem, xml_content)
self._inject_attributes_to_nodes(nodes)
return nodes
def revert_insertion(self, elem):
"""Reject an insertion by wrapping its content in a deletion.
Wraps all runs inside w:ins in w:del, converting w:t to w:delText.
Can process a single w:ins element or a container element with multiple w:ins.
Args:
elem: Element to process (w:ins, w:p, w:body, etc.)
Returns:
list: List containing the processed element(s)
Raises:
ValueError: If the element contains no w:ins elements
Example:
# Reject a single insertion
ins = doc["word/document.xml"].get_node(tag="w:ins", attrs={"w:id": "5"})
doc["word/document.xml"].revert_insertion(ins)
# Reject all insertions in a paragraph
para = doc["word/document.xml"].get_node(tag="w:p", line_number=42)
doc["word/document.xml"].revert_insertion(para)
"""
# Collect insertions
ins_elements = []
if elem.tagName == "w:ins":
ins_elements.append(elem)
else:
ins_elements.extend(elem.getElementsByTagName("w:ins"))
# Validate that there are insertions to reject
if not ins_elements:
raise ValueError(
f"revert_insertion requires w:ins elements. "
f"The provided element <{elem.tagName}> contains no insertions. "
)
# Process all insertions - wrap all children in w:del
for ins_elem in ins_elements:
runs = list(ins_elem.getElementsByTagName("w:r"))
if not runs:
continue
# Create deletion wrapper
del_wrapper = self.dom.createElement("w:del")
# Process each run
for run in runs:
# Convert w:t β w:delText and w:rsidR β w:rsidDel
if run.hasAttribute("w:rsidR"):
run.setAttribute("w:rsidDel", run.getAttribute("w:rsidR"))
run.removeAttribute("w:rsidR")
elif not run.hasAttribute("w:rsidDel"):
run.setAttribute("w:rsidDel", self.rsid)
for t_elem in list(run.getElementsByTagName("w:t")):
del_text = self.dom.createElement("w:delText")
# Copy ALL child nodes (not just firstChild) to handle entities
while t_elem.firstChild:
del_text.appendChild(t_elem.firstChild)
for i in range(t_elem.attributes.length):
attr = t_elem.attributes.item(i)
del_text.setAttribute(attr.name, attr.value)
t_elem.parentNode.replaceChild(del_text, t_elem)
# Move all children from ins to del wrapper
while ins_elem.firstChild:
del_wrapper.appendChild(ins_elem.firstChild)
# Add del wrapper back to ins
ins_elem.appendChild(del_wrapper)
# Inject attributes to the deletion wrapper
self._inject_attributes_to_nodes([del_wrapper])
return [elem]
def revert_deletion(self, elem):
"""Reject a deletion by re-inserting the deleted content.
Creates w:ins elements after each w:del, copying deleted content and
converting w:delText back to w:t.
Can process a single w:del element or a container element with multiple w:del.
Args:
elem: Element to process (w:del, w:p, w:body, etc.)
Returns:
list: If elem is w:del, returns [elem, new_ins]. Otherwise returns [elem].
Raises:
ValueError: If the element contains no w:del elements
Example:
# Reject a single deletion - returns [w:del, w:ins]
del_elem = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "3"})
nodes = doc["word/document.xml"].revert_deletion(del_elem)
# Reject all deletions in a paragraph - returns [para]
para = doc["word/document.xml"].get_node(tag="w:p", line_number=42)
nodes = doc["word/document.xml"].revert_deletion(para)
"""
# Collect deletions FIRST - before we modify the DOM
del_elements = []
is_single_del = elem.tagName == "w:del"
if is_single_del:
del_elements.append(elem)
else:
del_elements.extend(elem.getElementsByTagName("w:del"))
# Validate that there are deletions to reject
if not del_elements:
raise ValueError(
f"revert_deletion requires w:del elements. "
f"The provided element <{elem.tagName}> contains no deletions. "
)
# Track created insertion (only relevant if elem is a single w:del)
created_insertion = None
# Process all deletions - create insertions that copy the deleted content
for del_elem in del_elements:
# Clone the deleted runs and convert them to insertions
runs = list(del_elem.getElementsByTagName("w:r"))
if not runs:
continue
# Create insertion wrapper
ins_elem = self.dom.createElement("w:ins")
for run in runs:
# Clone the run
new_run = run.cloneNode(True)
# Convert w:delText β w:t
for del_text in list(new_run.getElementsByTagName("w:delText")):
t_elem = self.dom.createElement("w:t")
# Copy ALL child nodes (not just firstChild) to handle entities
while del_text.firstChild:
t_elem.appendChild(del_text.firstChild)
for i in range(del_text.attributes.length):
attr = del_text.attributes.item(i)
t_elem.setAttribute(attr.name, attr.value)
del_text.parentNode.replaceChild(t_elem, del_text)
# Update run attributes: w:rsidDel β w:rsidR
if new_run.hasAttribute("w:rsidDel"):
new_run.setAttribute("w:rsidR", new_run.getAttribute("w:rsidDel"))
new_run.removeAttribute("w:rsidDel")
elif not new_run.hasAttribute("w:rsidR"):
new_run.setAttribute("w:rsidR", self.rsid)
ins_elem.appendChild(new_run)
# Insert the new insertion after the deletion
nodes = self.insert_after(del_elem, ins_elem.toxml())
# If processing a single w:del, track the created insertion
if is_single_del and nodes:
created_insertion = nodes[0]
# Return based on input type
if is_single_del and created_insertion:
return [elem, created_insertion]
else:
return [elem]
@staticmethod
def suggest_paragraph(xml_content: str) -> str:
"""Transform paragraph XML to add tracked change wrapping for insertion.
Wraps runs in <w:ins> and adds <w:ins/> to w:rPr in w:pPr for numbered lists.
Args:
xml_content: XML string containing a <w:p> element
Returns:
str: Transformed XML with tracked change wrapping
"""
wrapper = f'<root xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main">{xml_content}</root>'
doc = minidom.parseString(wrapper)
para = doc.getElementsByTagName("w:p")[0]
# Ensure w:pPr exists
pPr_list = para.getElementsByTagName("w:pPr")
if not pPr_list:
pPr = doc.createElement("w:pPr")
para.insertBefore(
pPr, para.firstChild
) if para.firstChild else para.appendChild(pPr)
else:
pPr = pPr_list[0]
# Ensure w:rPr exists in w:pPr
rPr_list = pPr.getElementsByTagName("w:rPr")
if not rPr_list:
rPr = doc.createElement("w:rPr")
pPr.appendChild(rPr)
else:
rPr = rPr_list[0]
# Add <w:ins/> to w:rPr
ins_marker = doc.createElement("w:ins")
rPr.insertBefore(
ins_marker, rPr.firstChild
) if rPr.firstChild else rPr.appendChild(ins_marker)
# Wrap all non-pPr children in <w:ins>
ins_wrapper = doc.createElement("w:ins")
for child in [c for c in para.childNodes if c.nodeName != "w:pPr"]:
para.removeChild(child)
ins_wrapper.appendChild(child)
para.appendChild(ins_wrapper)
return para.toxml()
def suggest_deletion(self, elem):
"""Mark a w:r or w:p element as deleted with tracked changes (in-place DOM manipulation).
For w:r: wraps in <w:del>, converts <w:t> to <w:delText>, preserves w:rPr
For w:p (regular): wraps content in <w:del>, converts <w:t> to <w:delText>
For w:p (numbered list): adds <w:del/> to w:rPr in w:pPr, wraps content in <w:del>
Args:
elem: A w:r or w:p DOM element without existing tracked changes
Returns:
Element: The modified element
Raises:
ValueError: If element has existing tracked changes or invalid structure
"""
if elem.nodeName == "w:r":
# Check for existing w:delText
if elem.getElementsByTagName("w:delText"):
raise ValueError("w:r element already contains w:delText")
# Convert w:t β w:delText
for t_elem in list(elem.getElementsByTagName("w:t")):
del_text = self.dom.createElement("w:delText")
# Copy ALL child nodes (not just firstChild) to handle entities
while t_elem.firstChild:
del_text.appendChild(t_elem.firstChild)
# Preserve attributes like xml:space
for i in range(t_elem.attributes.length):
attr = t_elem.attributes.item(i)
del_text.setAttribute(attr.name, attr.value)
t_elem.parentNode.replaceChild(del_text, t_elem)
# Update run attributes: w:rsidR β w:rsidDel
if elem.hasAttribute("w:rsidR"):
elem.setAttribute("w:rsidDel", elem.getAttribute("w:rsidR"))
elem.removeAttribute("w:rsidR")
elif not elem.hasAttribute("w:rsidDel"):
elem.setAttribute("w:rsidDel", self.rsid)
# Wrap in w:del
del_wrapper = self.dom.createElement("w:del")
parent = elem.parentNode
parent.insertBefore(del_wrapper, elem)
parent.removeChild(elem)
del_wrapper.appendChild(elem)
# Inject attributes to the deletion wrapper
self._inject_attributes_to_nodes([del_wrapper])
return del_wrapper
elif elem.nodeName == "w:p":
# Check for existing tracked changes
if elem.getElementsByTagName("w:ins") or elem.getElementsByTagName("w:del"):
raise ValueError("w:p element already contains tracked changes")
# Check if it's a numbered list item
pPr_list = elem.getElementsByTagName("w:pPr")
is_numbered = pPr_list and pPr_list[0].getElementsByTagName("w:numPr")
if is_numbered:
# Add <w:del/> to w:rPr in w:pPr
pPr = pPr_list[0]
rPr_list = pPr.getElementsByTagName("w:rPr")
if not rPr_list:
rPr = self.dom.createElement("w:rPr")
pPr.appendChild(rPr)
else:
rPr = rPr_list[0]
# Add <w:del/> marker
del_marker = self.dom.createElement("w:del")
rPr.insertBefore(
del_marker, rPr.firstChild
) if rPr.firstChild else rPr.appendChild(del_marker)
# Convert w:t β w:delText in all runs
for t_elem in list(elem.getElementsByTagName("w:t")):
del_text = self.dom.createElement("w:delText")
# Copy ALL child nodes (not just firstChild) to handle entities
while t_elem.firstChild:
del_text.appendChild(t_elem.firstChild)
# Preserve attributes like xml:space
for i in range(t_elem.attributes.length):
attr = t_elem.attributes.item(i)
del_text.setAttribute(attr.name, attr.value)
t_elem.parentNode.replaceChild(del_text, t_elem)
# Update run attributes: w:rsidR β w:rsidDel
for run in elem.getElementsByTagName("w:r"):
if run.hasAttribute("w:rsidR"):
run.setAttribute("w:rsidDel", run.getAttribute("w:rsidR"))
run.removeAttribute("w:rsidR")
elif not run.hasAttribute("w:rsidDel"):
run.setAttribute("w:rsidDel", self.rsid)
# Wrap all non-pPr children in <w:del>
del_wrapper = self.dom.createElement("w:del")
for child in [c for c in elem.childNodes if c.nodeName != "w:pPr"]:
elem.removeChild(child)
del_wrapper.appendChild(child)
elem.appendChild(del_wrapper)
# Inject attributes to the deletion wrapper
self._inject_attributes_to_nodes([del_wrapper])
return elem
else:
raise ValueError(f"Element must be w:r or w:p, got {elem.nodeName}")
def _generate_hex_id() -> str:
"""Generate random 8-character hex ID for para/durable IDs.
Values are constrained to be less than 0x7FFFFFFF per OOXML spec:
- paraId must be < 0x80000000
- durableId must be < 0x7FFFFFFF
We use the stricter constraint (0x7FFFFFFF) for both.
"""
return f"{random.randint(1, 0x7FFFFFFE):08X}"
def _generate_rsid() -> str:
"""Generate random 8-character hex RSID."""
return "".join(random.choices("0123456789ABCDEF", k=8))
class Document:
"""Manages comments in unpacked Word documents."""
def __init__(
self,
unpacked_dir,
rsid=None,
track_revisions=False,
author="Scientific-Writer",
initials="SW",
):
"""
Initialize with path to unpacked Word document directory.
Automatically sets up comment infrastructure (people.xml, RSIDs).
Args:
unpacked_dir: Path to unpacked DOCX directory (must contain word/ subdirectory)
rsid: Optional RSID to use for all comment elements. If not provided, one will be generated.
track_revisions: If True, enables track revisions in settings.xml (default: False)
author: Default author name for comments (default: "Scientific-Writer")
initials: Default author initials for comments (default: "SW")
"""
self.original_path = Path(unpacked_dir)
if not self.original_path.exists() or not self.original_path.is_dir():
raise ValueError(f"Directory not found: {unpacked_dir}")
# Create temporary directory with subdirectories for unpacked content and baseline
self.temp_dir = tempfile.mkdtemp(prefix="docx_")
self.unpacked_path = Path(self.temp_dir) / "unpacked"
shutil.copytree(self.original_path, self.unpacked_path)
# Pack original directory into temporary .docx for validation baseline (outside unpacked dir)
self.original_docx = Path(self.temp_dir) / "original.docx"
pack_document(self.original_path, self.original_docx, validate=False)
self.word_path = self.unpacked_path / "word"
# Generate RSID if not provided
self.rsid = rsid if rsid else _generate_rsid()
print(f"Using RSID: {self.rsid}")
# Set default author and initials
self.author = author
self.initials = initials
# Cache for lazy-loaded editors
self._editors = {}
# Comment file paths
self.comments_path = self.word_path / "comments.xml"
self.comments_extended_path = self.word_path / "commentsExtended.xml"
self.comments_ids_path = self.word_path / "commentsIds.xml"
self.comments_extensible_path = self.word_path / "commentsExtensible.xml"
# Load existing comments and determine next ID (before setup modifies files)
self.existing_comments = self._load_existing_comments()
self.next_comment_id = self._get_next_comment_id()
# Convenient access to document.xml editor (semi-private)
self._document = self["word/document.xml"]
# Setup tracked changes infrastructure
self._setup_tracking(track_revisions=track_revisions)
# Add author to people.xml
self._add_author_to_people(author)
def __getitem__(self, xml_path: str) -> DocxXMLEditor:
"""
Get or create a DocxXMLEditor for the specified XML file.
Enables lazy-loaded editors with bracket notation:
node = doc["word/document.xml"].get_node(tag="w:p", line_number=42)
Args:
xml_path: Relative path to XML file (e.g., "word/document.xml", "word/comments.xml")
Returns:
DocxXMLEditor instance for the specified file
Raises:
ValueError: If the file does not exist
Example:
# Get node from document.xml
node = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "1"})
# Get node from comments.xml
comment = doc["word/comments.xml"].get_node(tag="w:comment", attrs={"w:id": "0"})
"""
if xml_path not in self._editors:
file_path = self.unpacked_path / xml_path
if not file_path.exists():
raise ValueError(f"XML file not found: {xml_path}")
# Use DocxXMLEditor with RSID, author, and initials for all editors
self._editors[xml_path] = DocxXMLEditor(
file_path, rsid=self.rsid, author=self.author, initials=self.initials
)
return self._editors[xml_path]
def add_comment(self, start, end, text: str) -> int:
"""
Add a comment spanning from one element to another.
Args:
start: DOM element for the starting point
end: DOM element for the ending point
text: Comment content
Returns:
The comment ID that was created
Example:
start_node = cm.get_document_node(tag="w:del", id="1")
end_node = cm.get_document_node(tag="w:ins", id="2")
cm.add_comment(start=start_node, end=end_node, text="Explanation")
"""
comment_id = self.next_comment_id
para_id = _generate_hex_id()
durable_id = _generate_hex_id()
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
# Add comment ranges to document.xml immediately
self._document.insert_before(start, self._comment_range_start_xml(comment_id))
# If end node is a paragraph, append comment markup inside it
# Otherwise insert after it (for run-level anchors)
if end.tagName == "w:p":
self._document.append_to(end, self._comment_range_end_xml(comment_id))
else:
self._document.insert_after(end, self._comment_range_end_xml(comment_id))
# Add to comments.xml immediately
self._add_to_comments_xml(
comment_id, para_id, text, self.author, self.initials, timestamp
)
# Add to commentsExtended.xml immediately
self._add_to_comments_extended_xml(para_id, parent_para_id=None)
# Add to commentsIds.xml immediately
self._add_to_comments_ids_xml(para_id, durable_id)
# Add to commentsExtensible.xml immediately
self._add_to_comments_extensible_xml(durable_id)
# Update existing_comments so replies work
self.existing_comments[comment_id] = {"para_id": para_id}
self.next_comment_id += 1
return comment_id
def reply_to_comment(
self,
parent_comment_id: int,
text: str,
) -> int:
"""
Add a reply to an existing comment.
Args:
parent_comment_id: The w:id of the parent comment to reply to
text: Reply text
Returns:
The comment ID that was created for the reply
Example:
cm.reply_to_comment(parent_comment_id=0, text="I agree with this change")
"""
if parent_comment_id not in self.existing_comments:
raise ValueError(f"Parent comment with id={parent_comment_id} not found")
parent_info = self.existing_comments[parent_comment_id]
comment_id = self.next_comment_id
para_id = _generate_hex_id()
durable_id = _generate_hex_id()
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
# Add comment ranges to document.xml immediately
parent_start_elem = self._document.get_node(
tag="w:commentRangeStart", attrs={"w:id": str(parent_comment_id)}
)
parent_ref_elem = self._document.get_node(
tag="w:commentReference", attrs={"w:id": str(parent_comment_id)}
)
self._document.insert_after(
parent_start_elem, self._comment_range_start_xml(comment_id)
)
parent_ref_run = parent_ref_elem.parentNode
self._document.insert_after(
parent_ref_run, f'<w:commentRangeEnd w:id="{comment_id}"/>'
)
self._document.insert_after(
parent_ref_run, self._comment_ref_run_xml(comment_id)
)
# Add to comments.xml immediately
self._add_to_comments_xml(
comment_id, para_id, text, self.author, self.initials, timestamp
)
# Add to commentsExtended.xml immediately (with parent)
self._add_to_comments_extended_xml(
para_id, parent_para_id=parent_info["para_id"]
)
# Add to commentsIds.xml immediately
self._add_to_comments_ids_xml(para_id, durable_id)
# Add to commentsExtensible.xml immediately
self._add_to_comments_extensible_xml(durable_id)
# Update existing_comments so replies work
self.existing_comments[comment_id] = {"para_id": para_id}
self.next_comment_id += 1
return comment_id
def __del__(self):
"""Clean up temporary directory on deletion."""
if hasattr(self, "temp_dir") and Path(self.temp_dir).exists():
shutil.rmtree(self.temp_dir)
def validate(self) -> None:
"""
Validate the document against XSD schema and redlining rules.
Raises:
ValueError: If validation fails.
"""
# Create validators with current state
schema_validator = DOCXSchemaValidator(
self.unpacked_path, self.original_docx, verbose=False
)
redlining_validator = RedliningValidator(
self.unpacked_path, self.original_docx, verbose=False
)
# Run validations
if not schema_validator.validate():
raise ValueError("Schema validation failed")
if not redlining_validator.validate():
raise ValueError("Redlining validation failed")
def save(self, destination=None, validate=True) -> None:
"""
Save all modified XML files to disk and copy to destination directory.
This persists all changes made via add_comment() and reply_to_comment().
Args:
destination: Optional path to save to. If None, saves back to original directory.
validate: If True, validates document before saving (default: True).
"""
# Only ensure comment relationships and content types if comment files exist
if self.comments_path.exists():
self._ensure_comment_relationships()
self._ensure_comment_content_types()
# Save all modified XML files in temp directory
for editor in self._editors.values():
editor.save()
# Validate by default
if validate:
self.validate()
# Copy contents from temp directory to destination (or original directory)
target_path = Path(destination) if destination else self.original_path
shutil.copytree(self.unpacked_path, target_path, dirs_exist_ok=True)
# ==================== Private: Initialization ====================
def _get_next_comment_id(self):
"""Get the next available comment ID."""
if not self.comments_path.exists():
return 0
editor = self["word/comments.xml"]
max_id = -1
for comment_elem in editor.dom.getElementsByTagName("w:comment"):
comment_id = comment_elem.getAttribute("w:id")
if comment_id:
try:
max_id = max(max_id, int(comment_id))
except ValueError:
pass
return max_id + 1
def _load_existing_comments(self):
"""Load existing comments from files to enable replies."""
if not self.comments_path.exists():
return {}
editor = self["word/comments.xml"]
existing = {}
for comment_elem in editor.dom.getElementsByTagName("w:comment"):
comment_id = comment_elem.getAttribute("w:id")
if not comment_id:
continue
# Find para_id from the w:p element within the comment
para_id = None
for p_elem in comment_elem.getElementsByTagName("w:p"):
para_id = p_elem.getAttribute("w14:paraId")
if para_id:
break
if not para_id:
continue
existing[int(comment_id)] = {"para_id": para_id}
return existing
# ==================== Private: Setup Methods ====================
def _setup_tracking(self, track_revisions=False):
"""Set up comment infrastructure in unpacked directory.
Args:
track_revisions: If True, enables track revisions in settings.xml
"""
# Create or update word/people.xml
people_file = self.word_path / "people.xml"
self._update_people_xml(people_file)
# Update XML files
self._add_content_type_for_people(self.unpacked_path / "[Content_Types].xml")
self._add_relationship_for_people(
self.word_path / "_rels" / "document.xml.rels"
)
# Always add RSID to settings.xml, optionally enable trackRevisions
self._update_settings(
self.word_path / "settings.xml", track_revisions=track_revisions
)
def _update_people_xml(self, path):
"""Create people.xml if it doesn't exist."""
if not path.exists():
# Copy from template
shutil.copy(TEMPLATE_DIR / "people.xml", path)
def _add_content_type_for_people(self, path):
"""Add people.xml content type to [Content_Types].xml if not already present."""
editor = self["[Content_Types].xml"]
if self._has_override(editor, "/word/people.xml"):
return
# Add Override element
root = editor.dom.documentElement
override_xml = '<Override PartName="/word/people.xml" ContentType="application/vnd.openxmlformats-officedocument.wordprocessingml.people+xml"/>'
editor.append_to(root, override_xml)
def _add_relationship_for_people(self, path):
"""Add people.xml relationship to document.xml.rels if not already present."""
editor = self["word/_rels/document.xml.rels"]
if self._has_relationship(editor, "people.xml"):
return
root = editor.dom.documentElement
root_tag = root.tagName # type: ignore
prefix = root_tag.split(":")[0] + ":" if ":" in root_tag else ""
next_rid = editor.get_next_rid()
# Create the relationship entry
rel_xml = f'<{prefix}Relationship Id="{next_rid}" Type="http://schemas.microsoft.com/office/2011/relationships/people" Target="people.xml"/>'
editor.append_to(root, rel_xml)
def _update_settings(self, path, track_revisions=False):
"""Add RSID and optionally enable track revisions in settings.xml.
Args:
path: Path to settings.xml
track_revisions: If True, adds trackRevisions element
Places elements per OOXML schema order:
- trackRevisions: early (before defaultTabStop)
- rsids: late (after compat)
"""
editor = self["word/settings.xml"]
root = editor.get_node(tag="w:settings")
prefix = root.tagName.split(":")[0] if ":" in root.tagName else "w"
# Conditionally add trackRevisions if requested
if track_revisions:
track_revisions_exists = any(
elem.tagName == f"{prefix}:trackRevisions"
for elem in editor.dom.getElementsByTagName(f"{prefix}:trackRevisions")
)
if not track_revisions_exists:
track_rev_xml = f"<{prefix}:trackRevisions/>"
# Try to insert before documentProtection, defaultTabStop, or at start
inserted = False
for tag in [f"{prefix}:documentProtection", f"{prefix}:defaultTabStop"]:
elements = editor.dom.getElementsByTagName(tag)
if elements:
editor.insert_before(elements[0], track_rev_xml)
inserted = True
break
if not inserted:
# Insert as first child of settings
if root.firstChild:
editor.insert_before(root.firstChild, track_rev_xml)
else:
editor.append_to(root, track_rev_xml)
# Always check if rsids section exists
rsids_elements = editor.dom.getElementsByTagName(f"{prefix}:rsids")
if not rsids_elements:
# Add new rsids section
rsids_xml = f'''<{prefix}:rsids>
<{prefix}:rsidRoot {prefix}:val="{self.rsid}"/>
<{prefix}:rsid {prefix}:val="{self.rsid}"/>
</{prefix}:rsids>'''
# Try to insert after compat, before clrSchemeMapping, or before closing tag
inserted = False
compat_elements = editor.dom.getElementsByTagName(f"{prefix}:compat")
if compat_elements:
editor.insert_after(compat_elements[0], rsids_xml)
inserted = True
if not inserted:
clr_elements = editor.dom.getElementsByTagName(
f"{prefix}:clrSchemeMapping"
)
if clr_elements:
editor.insert_before(clr_elements[0], rsids_xml)
inserted = True
if not inserted:
editor.append_to(root, rsids_xml)
else:
# Check if this rsid already exists
rsids_elem = rsids_elements[0]
rsid_exists = any(
elem.getAttribute(f"{prefix}:val") == self.rsid
for elem in rsids_elem.getElementsByTagName(f"{prefix}:rsid")
)
if not rsid_exists:
rsid_xml = f'<{prefix}:rsid {prefix}:val="{self.rsid}"/>'
editor.append_to(rsids_elem, rsid_xml)
# ==================== Private: XML File Creation ====================
def _add_to_comments_xml(
self, comment_id, para_id, text, author, initials, timestamp
):
"""Add a single comment to comments.xml."""
if not self.comments_path.exists():
shutil.copy(TEMPLATE_DIR / "comments.xml", self.comments_path)
editor = self["word/comments.xml"]
root = editor.get_node(tag="w:comments")
escaped_text = (
text.replace("&", "&").replace("<", "<").replace(">", ">")
)
# Note: w:rsidR, w:rsidRDefault, w:rsidP on w:p, w:rsidR on w:r,
# and w:author, w:date, w:initials on w:comment are automatically added by DocxXMLEditor
comment_xml = f'''<w:comment w:id="{comment_id}">
<w:p w14:paraId="{para_id}" w14:textId="77777777">
<w:r><w:rPr><w:rStyle w:val="CommentReference"/></w:rPr><w:annotationRef/></w:r>
<w:r><w:rPr><w:color w:val="000000"/><w:sz w:val="20"/><w:szCs w:val="20"/></w:rPr><w:t>{escaped_text}</w:t></w:r>
</w:p>
</w:comment>'''
editor.append_to(root, comment_xml)
def _add_to_comments_extended_xml(self, para_id, parent_para_id):
"""Add a single comment to commentsExtended.xml."""
if not self.comments_extended_path.exists():
shutil.copy(
TEMPLATE_DIR / "commentsExtended.xml", self.comments_extended_path
)
editor = self["word/commentsExtended.xml"]
root = editor.get_node(tag="w15:commentsEx")
if parent_para_id:
xml = f'<w15:commentEx w15:paraId="{para_id}" w15:paraIdParent="{parent_para_id}" w15:done="0"/>'
else:
xml = f'<w15:commentEx w15:paraId="{para_id}" w15:done="0"/>'
editor.append_to(root, xml)
def _add_to_comments_ids_xml(self, para_id, durable_id):
"""Add a single comment to commentsIds.xml."""
if not self.comments_ids_path.exists():
shutil.copy(TEMPLATE_DIR / "commentsIds.xml", self.comments_ids_path)
editor = self["word/commentsIds.xml"]
root = editor.get_node(tag="w16cid:commentsIds")
xml = f'<w16cid:commentId w16cid:paraId="{para_id}" w16cid:durableId="{durable_id}"/>'
editor.append_to(root, xml)
def _add_to_comments_extensible_xml(self, durable_id):
"""Add a single comment to commentsExtensible.xml."""
if not self.comments_extensible_path.exists():
shutil.copy(
TEMPLATE_DIR / "commentsExtensible.xml", self.comments_extensible_path
)
editor = self["word/commentsExtensible.xml"]
root = editor.get_node(tag="w16cex:commentsExtensible")
xml = f'<w16cex:commentExtensible w16cex:durableId="{durable_id}"/>'
editor.append_to(root, xml)
# ==================== Private: XML Fragments ====================
def _comment_range_start_xml(self, comment_id):
"""Generate XML for comment range start."""
return f'<w:commentRangeStart w:id="{comment_id}"/>'
def _comment_range_end_xml(self, comment_id):
"""Generate XML for comment range end with reference run.
Note: w:rsidR is automatically added by DocxXMLEditor.
"""
return f'''<w:commentRangeEnd w:id="{comment_id}"/>
<w:r>
<w:rPr><w:rStyle w:val="CommentReference"/></w:rPr>
<w:commentReference w:id="{comment_id}"/>
</w:r>'''
def _comment_ref_run_xml(self, comment_id):
"""Generate XML for comment reference run.
Note: w:rsidR is automatically added by DocxXMLEditor.
"""
return f'''<w:r>
<w:rPr><w:rStyle w:val="CommentReference"/></w:rPr>
<w:commentReference w:id="{comment_id}"/>
</w:r>'''
# ==================== Private: Metadata Updates ====================
def _has_relationship(self, editor, target):
"""Check if a relationship with given target exists."""
for rel_elem in editor.dom.getElementsByTagName("Relationship"):
if rel_elem.getAttribute("Target") == target:
return True
return False
def _has_override(self, editor, part_name):
"""Check if an override with given part name exists."""
for override_elem in editor.dom.getElementsByTagName("Override"):
if override_elem.getAttribute("PartName") == part_name:
return True
return False
def _has_author(self, editor, author):
"""Check if an author already exists in people.xml."""
for person_elem in editor.dom.getElementsByTagName("w15:person"):
if person_elem.getAttribute("w15:author") == author:
return True
return False
def _add_author_to_people(self, author):
"""Add author to people.xml (called during initialization)."""
people_path = self.word_path / "people.xml"
# people.xml should already exist from _setup_tracking
if not people_path.exists():
raise ValueError("people.xml should exist after _setup_tracking")
editor = self["word/people.xml"]
root = editor.get_node(tag="w15:people")
# Check if author already exists
if self._has_author(editor, author):
return
# Add author with proper XML escaping to prevent injection
escaped_author = html.escape(author, quote=True)
person_xml = f'''<w15:person w15:author="{escaped_author}">
<w15:presenceInfo w15:providerId="None" w15:userId="{escaped_author}"/>
</w15:person>'''
editor.append_to(root, person_xml)
def _ensure_comment_relationships(self):
"""Ensure word/_rels/document.xml.rels has comment relationships."""
editor = self["word/_rels/document.xml.rels"]
if self._has_relationship(editor, "comments.xml"):
return
root = editor.dom.documentElement
root_tag = root.tagName # type: ignore
prefix = root_tag.split(":")[0] + ":" if ":" in root_tag else ""
next_rid_num = int(editor.get_next_rid()[3:])
# Add relationship elements
rels = [
(
next_rid_num,
"http://schemas.openxmlformats.org/officeDocument/2006/relationships/comments",
"comments.xml",
),
(
next_rid_num + 1,
"http://schemas.microsoft.com/office/2011/relationships/commentsExtended",
"commentsExtended.xml",
),
(
next_rid_num + 2,
"http://schemas.microsoft.com/office/2016/09/relationships/commentsIds",
"commentsIds.xml",
),
(
next_rid_num + 3,
"http://schemas.microsoft.com/office/2018/08/relationships/commentsExtensible",
"commentsExtensible.xml",
),
]
for rel_id, rel_type, target in rels:
rel_xml = f'<{prefix}Relationship Id="rId{rel_id}" Type="{rel_type}" Target="{target}"/>'
editor.append_to(root, rel_xml)
def _ensure_comment_content_types(self):
"""Ensure [Content_Types].xml has comment content types."""
editor = self["[Content_Types].xml"]
if self._has_override(editor, "/word/comments.xml"):
return
root = editor.dom.documentElement
# Add Override elements
overrides = [
(
"/word/comments.xml",
"application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml",
),
(
"/word/commentsExtended.xml",
"application/vnd.openxmlformats-officedocument.wordprocessingml.commentsExtended+xml",
),
(
"/word/commentsIds.xml",
"application/vnd.openxmlformats-officedocument.wordprocessingml.commentsIds+xml",
),
(
"/word/commentsExtensible.xml",
"application/vnd.openxmlformats-officedocument.wordprocessingml.commentsExtensible+xml",
),
]
for part_name, content_type in overrides:
override_xml = (
f'<Override PartName="{part_name}" ContentType="{content_type}"/>'
)
editor.append_to(root, override_xml)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/document-skills/docx/scripts/document.py",
"license": "MIT License",
"lines": 1038,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/drugbank-database/scripts/drugbank_helper.py | #!/usr/bin/env python3
"""
DrugBank Helper Functions
Utility functions for common DrugBank operations including:
- Drug information extraction
- Interaction analysis
- Target identification
- Chemical property extraction
Usage:
from drugbank_helper import DrugBankHelper
db = DrugBankHelper()
drug_info = db.get_drug_info('DB00001')
interactions = db.get_interactions('DB00001')
"""
from typing import Dict, List, Optional, Any
import xml.etree.ElementTree as ET
class DrugBankHelper:
"""Helper class for DrugBank data access and analysis"""
NAMESPACE = {'db': 'http://www.drugbank.ca'}
def __init__(self, root=None):
"""
Initialize DrugBankHelper
Args:
root: Pre-loaded XML root element. If None, will load from drugbank-downloader
"""
self.root = root
self._drug_cache = {}
def _get_root(self):
"""Lazy load DrugBank root element"""
if self.root is None:
from drugbank_downloader import get_drugbank_root
self.root = get_drugbank_root()
return self.root
def _get_text_safe(self, element) -> Optional[str]:
"""Safely extract text from XML element"""
return element.text if element is not None else None
def find_drug(self, drugbank_id: str):
"""
Find drug element by DrugBank ID
Args:
drugbank_id: DrugBank ID (e.g., 'DB00001')
Returns:
XML element for the drug or None if not found
"""
if drugbank_id in self._drug_cache:
return self._drug_cache[drugbank_id]
root = self._get_root()
for drug in root.findall('db:drug', self.NAMESPACE):
primary_id = drug.find('db:drugbank-id[@primary="true"]', self.NAMESPACE)
if primary_id is not None and primary_id.text == drugbank_id:
self._drug_cache[drugbank_id] = drug
return drug
return None
def get_drug_info(self, drugbank_id: str) -> Dict[str, Any]:
"""
Get comprehensive drug information
Args:
drugbank_id: DrugBank ID
Returns:
Dictionary with drug information including name, type, description, etc.
"""
drug = self.find_drug(drugbank_id)
if drug is None:
return {}
info = {
'drugbank_id': drugbank_id,
'name': self._get_text_safe(drug.find('db:name', self.NAMESPACE)),
'type': drug.get('type'),
'description': self._get_text_safe(drug.find('db:description', self.NAMESPACE)),
'cas_number': self._get_text_safe(drug.find('db:cas-number', self.NAMESPACE)),
'indication': self._get_text_safe(drug.find('db:indication', self.NAMESPACE)),
'pharmacodynamics': self._get_text_safe(drug.find('db:pharmacodynamics', self.NAMESPACE)),
'mechanism_of_action': self._get_text_safe(drug.find('db:mechanism-of-action', self.NAMESPACE)),
}
return info
def get_interactions(self, drugbank_id: str) -> List[Dict[str, str]]:
"""
Get all drug-drug interactions
Args:
drugbank_id: DrugBank ID
Returns:
List of interaction dictionaries
"""
drug = self.find_drug(drugbank_id)
if drug is None:
return []
interactions = []
ddi_elem = drug.find('db:drug-interactions', self.NAMESPACE)
if ddi_elem is not None:
for interaction in ddi_elem.findall('db:drug-interaction', self.NAMESPACE):
interactions.append({
'partner_id': self._get_text_safe(interaction.find('db:drugbank-id', self.NAMESPACE)),
'partner_name': self._get_text_safe(interaction.find('db:name', self.NAMESPACE)),
'description': self._get_text_safe(interaction.find('db:description', self.NAMESPACE)),
})
return interactions
def get_targets(self, drugbank_id: str) -> List[Dict[str, Any]]:
"""
Get drug targets
Args:
drugbank_id: DrugBank ID
Returns:
List of target dictionaries
"""
drug = self.find_drug(drugbank_id)
if drug is None:
return []
targets = []
targets_elem = drug.find('db:targets', self.NAMESPACE)
if targets_elem is not None:
for target in targets_elem.findall('db:target', self.NAMESPACE):
target_data = {
'id': self._get_text_safe(target.find('db:id', self.NAMESPACE)),
'name': self._get_text_safe(target.find('db:name', self.NAMESPACE)),
'organism': self._get_text_safe(target.find('db:organism', self.NAMESPACE)),
'known_action': self._get_text_safe(target.find('db:known-action', self.NAMESPACE)),
}
# Extract actions
actions_elem = target.find('db:actions', self.NAMESPACE)
if actions_elem is not None:
target_data['actions'] = [
action.text for action in actions_elem.findall('db:action', self.NAMESPACE)
]
# Extract polypeptide info
polypeptide = target.find('db:polypeptide', self.NAMESPACE)
if polypeptide is not None:
target_data['uniprot_id'] = polypeptide.get('id')
target_data['gene_name'] = self._get_text_safe(
polypeptide.find('db:gene-name', self.NAMESPACE)
)
targets.append(target_data)
return targets
def get_properties(self, drugbank_id: str) -> Dict[str, Dict[str, Any]]:
"""
Get chemical properties
Args:
drugbank_id: DrugBank ID
Returns:
Dictionary with 'calculated' and 'experimental' property dictionaries
"""
drug = self.find_drug(drugbank_id)
if drug is None:
return {'calculated': {}, 'experimental': {}}
properties = {'calculated': {}, 'experimental': {}}
# Calculated properties
calc_props = drug.find('db:calculated-properties', self.NAMESPACE)
if calc_props is not None:
for prop in calc_props.findall('db:property', self.NAMESPACE):
kind = self._get_text_safe(prop.find('db:kind', self.NAMESPACE))
value = self._get_text_safe(prop.find('db:value', self.NAMESPACE))
if kind and value:
properties['calculated'][kind] = value
# Experimental properties
exp_props = drug.find('db:experimental-properties', self.NAMESPACE)
if exp_props is not None:
for prop in exp_props.findall('db:property', self.NAMESPACE):
kind = self._get_text_safe(prop.find('db:kind', self.NAMESPACE))
value = self._get_text_safe(prop.find('db:value', self.NAMESPACE))
if kind and value:
properties['experimental'][kind] = value
return properties
def check_interaction(self, drug1_id: str, drug2_id: str) -> Optional[Dict[str, str]]:
"""
Check if two drugs interact
Args:
drug1_id: First drug DrugBank ID
drug2_id: Second drug DrugBank ID
Returns:
Interaction dictionary if interaction exists, None otherwise
"""
interactions1 = self.get_interactions(drug1_id)
for interaction in interactions1:
if interaction['partner_id'] == drug2_id:
return interaction
# Check reverse direction
interactions2 = self.get_interactions(drug2_id)
for interaction in interactions2:
if interaction['partner_id'] == drug1_id:
return interaction
return None
def check_polypharmacy(self, drug_ids: List[str]) -> List[Dict[str, Any]]:
"""
Check interactions in a drug regimen
Args:
drug_ids: List of DrugBank IDs
Returns:
List of all interactions found between the drugs
"""
all_interactions = []
for i, drug1 in enumerate(drug_ids):
for drug2 in drug_ids[i + 1:]:
interaction = self.check_interaction(drug1, drug2)
if interaction:
interaction['drug1'] = drug1
interaction['drug2'] = drug2
all_interactions.append(interaction)
return all_interactions
def get_smiles(self, drugbank_id: str) -> Optional[str]:
"""
Get SMILES structure for a drug
Args:
drugbank_id: DrugBank ID
Returns:
SMILES string or None
"""
props = self.get_properties(drugbank_id)
return props.get('calculated', {}).get('SMILES')
def get_inchi(self, drugbank_id: str) -> Optional[str]:
"""
Get InChI structure for a drug
Args:
drugbank_id: DrugBank ID
Returns:
InChI string or None
"""
props = self.get_properties(drugbank_id)
return props.get('calculated', {}).get('InChI')
def search_by_name(self, name: str, exact: bool = False) -> List[Dict[str, str]]:
"""
Search drugs by name
Args:
name: Drug name to search for
exact: If True, require exact match (case-insensitive)
Returns:
List of matching drugs with id and name
"""
root = self._get_root()
results = []
search_term = name.lower()
for drug in root.findall('db:drug', self.NAMESPACE):
drug_id = drug.find('db:drugbank-id[@primary="true"]', self.NAMESPACE).text
drug_name = self._get_text_safe(drug.find('db:name', self.NAMESPACE))
if drug_name:
if exact:
if drug_name.lower() == search_term:
results.append({'id': drug_id, 'name': drug_name})
else:
if search_term in drug_name.lower():
results.append({'id': drug_id, 'name': drug_name})
return results
# Example usage
if __name__ == "__main__":
# Initialize helper
db = DrugBankHelper()
# Example: Get drug information
print("Example 1: Get drug information")
drug_info = db.get_drug_info('DB00001')
print(f"Drug: {drug_info.get('name')}")
print(f"Type: {drug_info.get('type')}")
print(f"Indication: {drug_info.get('indication', 'N/A')[:100]}...")
print()
# Example: Get interactions
print("Example 2: Get drug interactions")
interactions = db.get_interactions('DB00001')
print(f"Found {len(interactions)} interactions")
if interactions:
print(f"First interaction: {interactions[0]['partner_name']}")
print()
# Example: Get targets
print("Example 3: Get drug targets")
targets = db.get_targets('DB00001')
print(f"Found {len(targets)} targets")
if targets:
print(f"First target: {targets[0]['name']}")
print()
# Example: Check drug pair interaction
print("Example 4: Check specific drug pair")
interaction = db.check_interaction('DB00001', 'DB00002')
if interaction:
print("Interaction found!")
print(f"Description: {interaction['description'][:100]}...")
else:
print("No interaction found")
print()
# Example: Search by name
print("Example 5: Search drugs by name")
results = db.search_by_name('aspirin', exact=True)
if results:
print(f"Found: {results[0]['id']} - {results[0]['name']}")
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/drugbank-database/scripts/drugbank_helper.py",
"license": "MIT License",
"lines": 279,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/ensembl-database/scripts/ensembl_query.py | #!/usr/bin/env python3
"""
Ensembl REST API Query Script
Reusable functions for common Ensembl database queries with built-in rate limiting and error handling.
Usage:
python ensembl_query.py --gene BRCA2 --species human
python ensembl_query.py --variant rs699 --species human
python ensembl_query.py --region "7:140424943-140624564" --species human
"""
import requests
import time
import json
import argparse
from typing import Dict, List, Optional, Any
class EnsemblAPIClient:
"""Client for querying the Ensembl REST API with rate limiting and error handling."""
def __init__(self, server: str = "https://rest.ensembl.org", rate_limit: int = 15):
"""
Initialize the Ensembl API client.
Args:
server: Base URL for the Ensembl REST API
rate_limit: Maximum requests per second (default 15 for anonymous users)
"""
self.server = server
self.rate_limit = rate_limit
self.request_count = 0
self.last_request_time = 0
def _rate_limit_check(self):
"""Enforce rate limiting before making requests."""
current_time = time.time()
time_since_last = current_time - self.last_request_time
if time_since_last < 1.0:
if self.request_count >= self.rate_limit:
sleep_time = 1.0 - time_since_last
time.sleep(sleep_time)
self.request_count = 0
self.last_request_time = time.time()
else:
self.request_count = 0
self.last_request_time = current_time
def _make_request(
self,
endpoint: str,
params: Optional[Dict] = None,
max_retries: int = 3,
method: str = "GET",
data: Optional[Dict] = None
) -> Any:
"""
Make an API request with error handling and retries.
Args:
endpoint: API endpoint path
params: Query parameters
max_retries: Maximum number of retry attempts
method: HTTP method (GET or POST)
data: JSON data for POST requests
Returns:
JSON response data
Raises:
Exception: If request fails after max retries
"""
headers = {"Content-Type": "application/json"}
url = f"{self.server}{endpoint}"
for attempt in range(max_retries):
self._rate_limit_check()
self.request_count += 1
try:
if method == "POST":
response = requests.post(url, headers=headers, json=data)
else:
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
return response.json()
elif response.status_code == 429:
# Rate limited - wait and retry
retry_after = int(response.headers.get('Retry-After', 1))
print(f"Rate limited. Waiting {retry_after} seconds...")
time.sleep(retry_after)
elif response.status_code == 404:
raise Exception(f"Resource not found: {endpoint}")
else:
response.raise_for_status()
except requests.exceptions.RequestException as e:
if attempt == max_retries - 1:
raise Exception(f"Request failed after {max_retries} attempts: {e}")
time.sleep(2 ** attempt) # Exponential backoff
raise Exception(f"Failed after {max_retries} attempts")
def lookup_gene_by_symbol(self, species: str, symbol: str, expand: bool = True) -> Dict:
"""
Look up gene information by symbol.
Args:
species: Species name (e.g., 'human', 'mouse')
symbol: Gene symbol (e.g., 'BRCA2', 'TP53')
expand: Include transcript information
Returns:
Gene information dictionary
"""
endpoint = f"/lookup/symbol/{species}/{symbol}"
params = {"expand": 1} if expand else {}
return self._make_request(endpoint, params=params)
def lookup_by_id(self, ensembl_id: str, expand: bool = False) -> Dict:
"""
Look up object by Ensembl ID.
Args:
ensembl_id: Ensembl identifier (e.g., 'ENSG00000139618')
expand: Include child objects
Returns:
Object information dictionary
"""
endpoint = f"/lookup/id/{ensembl_id}"
params = {"expand": 1} if expand else {}
return self._make_request(endpoint, params=params)
def get_sequence(
self,
ensembl_id: str,
seq_type: str = "genomic",
format: str = "json"
) -> Any:
"""
Retrieve sequence by Ensembl ID.
Args:
ensembl_id: Ensembl identifier
seq_type: Sequence type ('genomic', 'cds', 'cdna', 'protein')
format: Output format ('json', 'fasta', 'text')
Returns:
Sequence data
"""
endpoint = f"/sequence/id/{ensembl_id}"
params = {"type": seq_type}
if format == "fasta":
headers = {"Content-Type": "text/x-fasta"}
url = f"{self.server}{endpoint}"
response = requests.get(url, headers=headers, params=params)
return response.text
return self._make_request(endpoint, params=params)
def get_region_sequence(
self,
species: str,
region: str,
format: str = "json"
) -> Any:
"""
Get genomic sequence for a region.
Args:
species: Species name
region: Region string (e.g., '7:140424943-140624564')
format: Output format ('json', 'fasta', 'text')
Returns:
Sequence data
"""
endpoint = f"/sequence/region/{species}/{region}"
if format == "fasta":
headers = {"Content-Type": "text/x-fasta"}
url = f"{self.server}{endpoint}"
response = requests.get(url, headers=headers)
return response.text
return self._make_request(endpoint)
def get_variant(self, species: str, variant_id: str, include_pops: bool = True) -> Dict:
"""
Get variant information by ID.
Args:
species: Species name
variant_id: Variant identifier (e.g., 'rs699')
include_pops: Include population frequencies
Returns:
Variant information dictionary
"""
endpoint = f"/variation/{species}/{variant_id}"
params = {"pops": 1} if include_pops else {}
return self._make_request(endpoint, params=params)
def predict_variant_effect(
self,
species: str,
hgvs_notation: str
) -> List[Dict]:
"""
Predict variant consequences using VEP.
Args:
species: Species name
hgvs_notation: HGVS notation (e.g., 'ENST00000288602:c.803C>T')
Returns:
List of predicted consequences
"""
endpoint = f"/vep/{species}/hgvs/{hgvs_notation}"
return self._make_request(endpoint)
def find_orthologs(
self,
ensembl_id: str,
target_species: Optional[str] = None
) -> Dict:
"""
Find orthologs for a gene.
Args:
ensembl_id: Source gene Ensembl ID
target_species: Target species (optional, returns all if not specified)
Returns:
Homology information dictionary
"""
endpoint = f"/homology/id/{ensembl_id}"
params = {}
if target_species:
params["target_species"] = target_species
return self._make_request(endpoint, params=params)
def get_region_features(
self,
species: str,
region: str,
feature_type: str = "gene"
) -> List[Dict]:
"""
Get genomic features in a region.
Args:
species: Species name
region: Region string (e.g., '7:140424943-140624564')
feature_type: Feature type ('gene', 'transcript', 'variation', etc.)
Returns:
List of features
"""
endpoint = f"/overlap/region/{species}/{region}"
params = {"feature": feature_type}
return self._make_request(endpoint, params=params)
def get_species_info(self) -> List[Dict]:
"""
Get information about all available species.
Returns:
List of species information dictionaries
"""
endpoint = "/info/species"
result = self._make_request(endpoint)
return result.get("species", [])
def get_assembly_info(self, species: str) -> Dict:
"""
Get assembly information for a species.
Args:
species: Species name
Returns:
Assembly information dictionary
"""
endpoint = f"/info/assembly/{species}"
return self._make_request(endpoint)
def map_coordinates(
self,
species: str,
asm_from: str,
region: str,
asm_to: str
) -> Dict:
"""
Map coordinates between genome assemblies.
Args:
species: Species name
asm_from: Source assembly (e.g., 'GRCh37')
region: Region string (e.g., '7:140453136-140453136')
asm_to: Target assembly (e.g., 'GRCh38')
Returns:
Mapped coordinates
"""
endpoint = f"/map/{species}/{asm_from}/{region}/{asm_to}"
return self._make_request(endpoint)
def main():
"""Command-line interface for common Ensembl queries."""
parser = argparse.ArgumentParser(
description="Query the Ensembl database via REST API"
)
parser.add_argument("--gene", help="Gene symbol to look up")
parser.add_argument("--ensembl-id", help="Ensembl ID to look up")
parser.add_argument("--variant", help="Variant ID (e.g., rs699)")
parser.add_argument("--region", help="Genomic region (chr:start-end)")
parser.add_argument(
"--species",
default="human",
help="Species name (default: human)"
)
parser.add_argument(
"--orthologs",
help="Find orthologs for gene (provide Ensembl ID)"
)
parser.add_argument(
"--target-species",
help="Target species for ortholog search"
)
parser.add_argument(
"--sequence",
action="store_true",
help="Retrieve sequence (requires --gene or --ensembl-id or --region)"
)
parser.add_argument(
"--format",
choices=["json", "fasta"],
default="json",
help="Output format (default: json)"
)
parser.add_argument(
"--assembly",
default="GRCh37",
help="For GRCh37, use grch37.rest.ensembl.org server"
)
args = parser.parse_args()
# Select appropriate server
server = "https://rest.ensembl.org"
if args.assembly.lower() == "grch37":
server = "https://grch37.rest.ensembl.org"
client = EnsemblAPIClient(server=server)
try:
if args.gene:
print(f"Looking up gene: {args.gene}")
result = client.lookup_gene_by_symbol(args.species, args.gene)
if args.sequence:
print(f"\nRetrieving sequence for {result['id']}...")
seq_result = client.get_sequence(
result['id'],
format=args.format
)
print(json.dumps(seq_result, indent=2) if args.format == "json" else seq_result)
else:
print(json.dumps(result, indent=2))
elif args.ensembl_id:
print(f"Looking up ID: {args.ensembl_id}")
result = client.lookup_by_id(args.ensembl_id, expand=True)
if args.sequence:
print(f"\nRetrieving sequence...")
seq_result = client.get_sequence(
args.ensembl_id,
format=args.format
)
print(json.dumps(seq_result, indent=2) if args.format == "json" else seq_result)
else:
print(json.dumps(result, indent=2))
elif args.variant:
print(f"Looking up variant: {args.variant}")
result = client.get_variant(args.species, args.variant)
print(json.dumps(result, indent=2))
elif args.region:
if args.sequence:
print(f"Retrieving sequence for region: {args.region}")
result = client.get_region_sequence(
args.species,
args.region,
format=args.format
)
print(json.dumps(result, indent=2) if args.format == "json" else result)
else:
print(f"Finding features in region: {args.region}")
result = client.get_region_features(args.species, args.region)
print(json.dumps(result, indent=2))
elif args.orthologs:
print(f"Finding orthologs for: {args.orthologs}")
result = client.find_orthologs(
args.orthologs,
target_species=args.target_species
)
print(json.dumps(result, indent=2))
else:
parser.print_help()
except Exception as e:
print(f"Error: {e}")
return 1
return 0
if __name__ == "__main__":
exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/ensembl-database/scripts/ensembl_query.py",
"license": "MIT License",
"lines": 360,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/etetoolkit/scripts/quick_visualize.py | #!/usr/bin/env python3
"""
Quick tree visualization script with common customization options.
Provides command-line interface for rapid tree visualization with
customizable styles, layouts, and output formats.
"""
import argparse
import sys
from pathlib import Path
try:
from ete3 import Tree, TreeStyle, NodeStyle
except ImportError:
print("Error: ete3 not installed. Install with: pip install ete3")
sys.exit(1)
def create_tree_style(args):
"""Create TreeStyle based on arguments."""
ts = TreeStyle()
# Basic display options
ts.show_leaf_name = args.show_names
ts.show_branch_length = args.show_lengths
ts.show_branch_support = args.show_support
ts.show_scale = args.show_scale
# Layout
ts.mode = args.mode
ts.rotation = args.rotation
# Circular tree options
if args.mode == "c":
ts.arc_start = args.arc_start
ts.arc_span = args.arc_span
# Spacing
ts.branch_vertical_margin = args.vertical_margin
if args.scale_factor:
ts.scale = args.scale_factor
# Title
if args.title:
from ete3 import TextFace
title_face = TextFace(args.title, fsize=16, bold=True)
ts.title.add_face(title_face, column=0)
return ts
def apply_node_styling(tree, args):
"""Apply styling to tree nodes."""
for node in tree.traverse():
nstyle = NodeStyle()
if node.is_leaf():
# Leaf style
nstyle["fgcolor"] = args.leaf_color
nstyle["size"] = args.leaf_size
else:
# Internal node style
nstyle["fgcolor"] = args.internal_color
nstyle["size"] = args.internal_size
# Color by support if enabled
if args.color_by_support and hasattr(node, 'support') and node.support:
if node.support >= 0.9:
nstyle["fgcolor"] = "darkgreen"
elif node.support >= 0.7:
nstyle["fgcolor"] = "orange"
else:
nstyle["fgcolor"] = "red"
node.set_style(nstyle)
def visualize_tree(tree_file, output, args):
"""Load tree, apply styles, and render."""
try:
tree = Tree(str(tree_file), format=args.format)
except Exception as e:
print(f"Error loading tree: {e}")
sys.exit(1)
# Apply styling
apply_node_styling(tree, args)
# Create tree style
ts = create_tree_style(args)
# Render
try:
# Determine output parameters based on format
output_path = str(output)
render_args = {"tree_style": ts}
if args.width:
render_args["w"] = args.width
if args.height:
render_args["h"] = args.height
if args.units:
render_args["units"] = args.units
if args.dpi:
render_args["dpi"] = args.dpi
tree.render(output_path, **render_args)
print(f"Tree rendered successfully to: {output}")
except Exception as e:
print(f"Error rendering tree: {e}")
sys.exit(1)
def main():
parser = argparse.ArgumentParser(
description="Quick tree visualization with ETE toolkit",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Basic visualization
%(prog)s tree.nw output.pdf
# Circular tree
%(prog)s tree.nw output.pdf --mode c
# Large tree with custom sizing
%(prog)s tree.nw output.png --width 1200 --height 800 --units px --dpi 300
# Hide names, show support, color by support
%(prog)s tree.nw output.pdf --no-names --show-support --color-by-support
# Custom title
%(prog)s tree.nw output.pdf --title "Phylogenetic Tree of Species"
# Semicircular layout
%(prog)s tree.nw output.pdf --mode c --arc-start -90 --arc-span 180
"""
)
parser.add_argument("input", help="Input tree file (Newick format)")
parser.add_argument("output", help="Output image file (png, pdf, or svg)")
# Tree format
parser.add_argument("--format", type=int, default=0,
help="Newick format number (default: 0)")
# Display options
display = parser.add_argument_group("Display options")
display.add_argument("--no-names", dest="show_names", action="store_false",
help="Don't show leaf names")
display.add_argument("--show-lengths", action="store_true",
help="Show branch lengths")
display.add_argument("--show-support", action="store_true",
help="Show support values")
display.add_argument("--show-scale", action="store_true",
help="Show scale bar")
# Layout options
layout = parser.add_argument_group("Layout options")
layout.add_argument("--mode", choices=["r", "c"], default="r",
help="Tree mode: r=rectangular, c=circular (default: r)")
layout.add_argument("--rotation", type=int, default=0,
help="Tree rotation in degrees (default: 0)")
layout.add_argument("--arc-start", type=int, default=0,
help="Circular tree start angle (default: 0)")
layout.add_argument("--arc-span", type=int, default=360,
help="Circular tree arc span (default: 360)")
# Styling options
styling = parser.add_argument_group("Styling options")
styling.add_argument("--leaf-color", default="blue",
help="Leaf node color (default: blue)")
styling.add_argument("--leaf-size", type=int, default=6,
help="Leaf node size (default: 6)")
styling.add_argument("--internal-color", default="gray",
help="Internal node color (default: gray)")
styling.add_argument("--internal-size", type=int, default=4,
help="Internal node size (default: 4)")
styling.add_argument("--color-by-support", action="store_true",
help="Color internal nodes by support value")
# Size and spacing
size = parser.add_argument_group("Size and spacing")
size.add_argument("--width", type=int, help="Output width")
size.add_argument("--height", type=int, help="Output height")
size.add_argument("--units", choices=["px", "mm", "in"],
help="Size units (px, mm, in)")
size.add_argument("--dpi", type=int, help="DPI for raster output")
size.add_argument("--scale-factor", type=int,
help="Branch length scale factor (pixels per unit)")
size.add_argument("--vertical-margin", type=int, default=10,
help="Vertical margin between branches (default: 10)")
# Other options
parser.add_argument("--title", help="Tree title")
args = parser.parse_args()
# Validate output format
output_path = Path(args.output)
valid_extensions = {".png", ".pdf", ".svg"}
if output_path.suffix.lower() not in valid_extensions:
print(f"Error: Output must be PNG, PDF, or SVG file")
sys.exit(1)
# Visualize
visualize_tree(args.input, args.output, args)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/etetoolkit/scripts/quick_visualize.py",
"license": "MIT License",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/etetoolkit/scripts/tree_operations.py | #!/usr/bin/env python3
"""
Tree operations helper script for common ETE toolkit tasks.
Provides command-line interface for basic tree operations like:
- Format conversion
- Rooting (outgroup, midpoint)
- Pruning
- Basic statistics
- ASCII visualization
"""
import argparse
import sys
from pathlib import Path
try:
from ete3 import Tree
except ImportError:
print("Error: ete3 not installed. Install with: pip install ete3")
sys.exit(1)
def load_tree(tree_file, format_num=0):
"""Load tree from file."""
try:
return Tree(str(tree_file), format=format_num)
except Exception as e:
print(f"Error loading tree: {e}")
sys.exit(1)
def convert_format(tree_file, output, in_format=0, out_format=1):
"""Convert tree between Newick formats."""
tree = load_tree(tree_file, in_format)
tree.write(outfile=str(output), format=out_format)
print(f"Converted {tree_file} (format {in_format}) β {output} (format {out_format})")
def reroot_tree(tree_file, output, outgroup=None, midpoint=False, format_num=0):
"""Reroot tree by outgroup or midpoint."""
tree = load_tree(tree_file, format_num)
if midpoint:
midpoint_node = tree.get_midpoint_outgroup()
tree.set_outgroup(midpoint_node)
print(f"Rerooted tree using midpoint method")
elif outgroup:
try:
outgroup_node = tree & outgroup
tree.set_outgroup(outgroup_node)
print(f"Rerooted tree using outgroup: {outgroup}")
except Exception as e:
print(f"Error: Could not find outgroup '{outgroup}': {e}")
sys.exit(1)
else:
print("Error: Must specify either --outgroup or --midpoint")
sys.exit(1)
tree.write(outfile=str(output), format=format_num)
print(f"Saved rerooted tree to: {output}")
def prune_tree(tree_file, output, keep_taxa, preserve_length=True, format_num=0):
"""Prune tree to keep only specified taxa."""
tree = load_tree(tree_file, format_num)
# Read taxa list
taxa_file = Path(keep_taxa)
if taxa_file.exists():
with open(taxa_file) as f:
taxa = [line.strip() for line in f if line.strip()]
else:
taxa = [t.strip() for t in keep_taxa.split(",")]
print(f"Pruning tree to {len(taxa)} taxa")
try:
tree.prune(taxa, preserve_branch_length=preserve_length)
tree.write(outfile=str(output), format=format_num)
print(f"Pruned tree saved to: {output}")
print(f"Retained {len(tree)} leaves")
except Exception as e:
print(f"Error pruning tree: {e}")
sys.exit(1)
def tree_stats(tree_file, format_num=0):
"""Display tree statistics."""
tree = load_tree(tree_file, format_num)
print(f"\n=== Tree Statistics ===")
print(f"File: {tree_file}")
print(f"Number of leaves: {len(tree)}")
print(f"Total nodes: {len(list(tree.traverse()))}")
farthest_leaf, distance = tree.get_farthest_leaf()
print(f"Tree depth: {distance:.4f}")
print(f"Farthest leaf: {farthest_leaf.name}")
# Branch length statistics
branch_lengths = [node.dist for node in tree.traverse() if not node.is_root()]
if branch_lengths:
print(f"\nBranch length statistics:")
print(f" Mean: {sum(branch_lengths)/len(branch_lengths):.4f}")
print(f" Min: {min(branch_lengths):.4f}")
print(f" Max: {max(branch_lengths):.4f}")
# Support values
supports = [node.support for node in tree.traverse() if not node.is_leaf() and hasattr(node, 'support')]
if supports:
print(f"\nSupport value statistics:")
print(f" Mean: {sum(supports)/len(supports):.2f}")
print(f" Min: {min(supports):.2f}")
print(f" Max: {max(supports):.2f}")
print()
def show_ascii(tree_file, format_num=0, show_internal=True):
"""Display tree as ASCII art."""
tree = load_tree(tree_file, format_num)
print(tree.get_ascii(show_internal=show_internal))
def list_leaves(tree_file, format_num=0):
"""List all leaf names."""
tree = load_tree(tree_file, format_num)
for leaf in tree:
print(leaf.name)
def main():
parser = argparse.ArgumentParser(
description="ETE toolkit tree operations helper",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Convert format
%(prog)s convert input.nw output.nw --in-format 0 --out-format 1
# Midpoint root
%(prog)s reroot input.nw output.nw --midpoint
# Reroot with outgroup
%(prog)s reroot input.nw output.nw --outgroup "Outgroup_species"
# Prune tree
%(prog)s prune input.nw output.nw --keep-taxa "speciesA,speciesB,speciesC"
# Show statistics
%(prog)s stats input.nw
# Display as ASCII
%(prog)s ascii input.nw
# List all leaves
%(prog)s leaves input.nw
"""
)
subparsers = parser.add_subparsers(dest="command", help="Command to execute")
# Convert command
convert_parser = subparsers.add_parser("convert", help="Convert tree format")
convert_parser.add_argument("input", help="Input tree file")
convert_parser.add_argument("output", help="Output tree file")
convert_parser.add_argument("--in-format", type=int, default=0, help="Input format (default: 0)")
convert_parser.add_argument("--out-format", type=int, default=1, help="Output format (default: 1)")
# Reroot command
reroot_parser = subparsers.add_parser("reroot", help="Reroot tree")
reroot_parser.add_argument("input", help="Input tree file")
reroot_parser.add_argument("output", help="Output tree file")
reroot_parser.add_argument("--outgroup", help="Outgroup taxon name")
reroot_parser.add_argument("--midpoint", action="store_true", help="Use midpoint rooting")
reroot_parser.add_argument("--format", type=int, default=0, help="Newick format (default: 0)")
# Prune command
prune_parser = subparsers.add_parser("prune", help="Prune tree to specified taxa")
prune_parser.add_argument("input", help="Input tree file")
prune_parser.add_argument("output", help="Output tree file")
prune_parser.add_argument("--keep-taxa", required=True,
help="Taxa to keep (comma-separated or file path)")
prune_parser.add_argument("--no-preserve-length", action="store_true",
help="Don't preserve branch lengths")
prune_parser.add_argument("--format", type=int, default=0, help="Newick format (default: 0)")
# Stats command
stats_parser = subparsers.add_parser("stats", help="Display tree statistics")
stats_parser.add_argument("input", help="Input tree file")
stats_parser.add_argument("--format", type=int, default=0, help="Newick format (default: 0)")
# ASCII command
ascii_parser = subparsers.add_parser("ascii", help="Display tree as ASCII art")
ascii_parser.add_argument("input", help="Input tree file")
ascii_parser.add_argument("--format", type=int, default=0, help="Newick format (default: 0)")
ascii_parser.add_argument("--no-internal", action="store_true",
help="Don't show internal node names")
# Leaves command
leaves_parser = subparsers.add_parser("leaves", help="List all leaf names")
leaves_parser.add_argument("input", help="Input tree file")
leaves_parser.add_argument("--format", type=int, default=0, help="Newick format (default: 0)")
args = parser.parse_args()
if not args.command:
parser.print_help()
sys.exit(1)
# Execute command
if args.command == "convert":
convert_format(args.input, args.output, args.in_format, args.out_format)
elif args.command == "reroot":
reroot_tree(args.input, args.output, args.outgroup, args.midpoint, args.format)
elif args.command == "prune":
prune_tree(args.input, args.output, args.keep_taxa,
not args.no_preserve_length, args.format)
elif args.command == "stats":
tree_stats(args.input, args.format)
elif args.command == "ascii":
show_ascii(args.input, args.format, not args.no_internal)
elif args.command == "leaves":
list_leaves(args.input, args.format)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/etetoolkit/scripts/tree_operations.py",
"license": "MIT License",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/exploratory-data-analysis/scripts/eda_analyzer.py | #!/usr/bin/env python3
"""
Exploratory Data Analysis Analyzer
Analyzes scientific data files and generates comprehensive markdown reports
"""
import os
import sys
from pathlib import Path
from datetime import datetime
import json
def detect_file_type(filepath):
"""
Detect the file type based on extension and content.
Returns:
tuple: (extension, file_category, reference_file)
"""
file_path = Path(filepath)
extension = file_path.suffix.lower()
name = file_path.name.lower()
# Map extensions to categories and reference files
extension_map = {
# Chemistry/Molecular
'pdb': ('chemistry_molecular', 'Protein Data Bank'),
'cif': ('chemistry_molecular', 'Crystallographic Information File'),
'mol': ('chemistry_molecular', 'MDL Molfile'),
'mol2': ('chemistry_molecular', 'Tripos Mol2'),
'sdf': ('chemistry_molecular', 'Structure Data File'),
'xyz': ('chemistry_molecular', 'XYZ Coordinates'),
'smi': ('chemistry_molecular', 'SMILES String'),
'smiles': ('chemistry_molecular', 'SMILES String'),
'pdbqt': ('chemistry_molecular', 'AutoDock PDBQT'),
'mae': ('chemistry_molecular', 'Maestro Format'),
'gro': ('chemistry_molecular', 'GROMACS Coordinate File'),
'log': ('chemistry_molecular', 'Gaussian Log File'),
'out': ('chemistry_molecular', 'Quantum Chemistry Output'),
'wfn': ('chemistry_molecular', 'Wavefunction Files'),
'wfx': ('chemistry_molecular', 'Wavefunction Files'),
'fchk': ('chemistry_molecular', 'Gaussian Formatted Checkpoint'),
'cube': ('chemistry_molecular', 'Gaussian Cube File'),
'dcd': ('chemistry_molecular', 'Binary Trajectory'),
'xtc': ('chemistry_molecular', 'Compressed Trajectory'),
'trr': ('chemistry_molecular', 'GROMACS Trajectory'),
'nc': ('chemistry_molecular', 'Amber NetCDF Trajectory'),
'netcdf': ('chemistry_molecular', 'Amber NetCDF Trajectory'),
# Bioinformatics/Genomics
'fasta': ('bioinformatics_genomics', 'FASTA Format'),
'fa': ('bioinformatics_genomics', 'FASTA Format'),
'fna': ('bioinformatics_genomics', 'FASTA Format'),
'fastq': ('bioinformatics_genomics', 'FASTQ Format'),
'fq': ('bioinformatics_genomics', 'FASTQ Format'),
'sam': ('bioinformatics_genomics', 'Sequence Alignment/Map'),
'bam': ('bioinformatics_genomics', 'Binary Alignment/Map'),
'cram': ('bioinformatics_genomics', 'CRAM Format'),
'bed': ('bioinformatics_genomics', 'Browser Extensible Data'),
'bedgraph': ('bioinformatics_genomics', 'BED with Graph Data'),
'bigwig': ('bioinformatics_genomics', 'Binary BigWig'),
'bw': ('bioinformatics_genomics', 'Binary BigWig'),
'bigbed': ('bioinformatics_genomics', 'Binary BigBed'),
'bb': ('bioinformatics_genomics', 'Binary BigBed'),
'gff': ('bioinformatics_genomics', 'General Feature Format'),
'gff3': ('bioinformatics_genomics', 'General Feature Format'),
'gtf': ('bioinformatics_genomics', 'Gene Transfer Format'),
'vcf': ('bioinformatics_genomics', 'Variant Call Format'),
'bcf': ('bioinformatics_genomics', 'Binary VCF'),
'gvcf': ('bioinformatics_genomics', 'Genomic VCF'),
# Microscopy/Imaging
'tif': ('microscopy_imaging', 'Tagged Image File Format'),
'tiff': ('microscopy_imaging', 'Tagged Image File Format'),
'nd2': ('microscopy_imaging', 'Nikon NIS-Elements'),
'lif': ('microscopy_imaging', 'Leica Image Format'),
'czi': ('microscopy_imaging', 'Carl Zeiss Image'),
'oib': ('microscopy_imaging', 'Olympus Image Format'),
'oif': ('microscopy_imaging', 'Olympus Image Format'),
'vsi': ('microscopy_imaging', 'Olympus VSI'),
'ims': ('microscopy_imaging', 'Imaris Format'),
'lsm': ('microscopy_imaging', 'Zeiss LSM'),
'stk': ('microscopy_imaging', 'MetaMorph Stack'),
'dv': ('microscopy_imaging', 'DeltaVision'),
'mrc': ('microscopy_imaging', 'Medical Research Council'),
'dm3': ('microscopy_imaging', 'Gatan Digital Micrograph'),
'dm4': ('microscopy_imaging', 'Gatan Digital Micrograph'),
'dcm': ('microscopy_imaging', 'DICOM'),
'nii': ('microscopy_imaging', 'NIfTI'),
'nrrd': ('microscopy_imaging', 'Nearly Raw Raster Data'),
# Spectroscopy/Analytical
'fid': ('spectroscopy_analytical', 'NMR Free Induction Decay'),
'mzml': ('spectroscopy_analytical', 'Mass Spectrometry Markup Language'),
'mzxml': ('spectroscopy_analytical', 'Mass Spectrometry XML'),
'raw': ('spectroscopy_analytical', 'Vendor Raw Files'),
'd': ('spectroscopy_analytical', 'Agilent Data Directory'),
'mgf': ('spectroscopy_analytical', 'Mascot Generic Format'),
'spc': ('spectroscopy_analytical', 'Galactic SPC'),
'jdx': ('spectroscopy_analytical', 'JCAMP-DX'),
'jcamp': ('spectroscopy_analytical', 'JCAMP-DX'),
# Proteomics/Metabolomics
'pepxml': ('proteomics_metabolomics', 'Trans-Proteomic Pipeline Peptide XML'),
'protxml': ('proteomics_metabolomics', 'Protein Inference Results'),
'mzid': ('proteomics_metabolomics', 'Peptide Identification Format'),
'mztab': ('proteomics_metabolomics', 'Proteomics/Metabolomics Tabular Format'),
# General Scientific
'npy': ('general_scientific', 'NumPy Array'),
'npz': ('general_scientific', 'Compressed NumPy Archive'),
'csv': ('general_scientific', 'Comma-Separated Values'),
'tsv': ('general_scientific', 'Tab-Separated Values'),
'xlsx': ('general_scientific', 'Excel Spreadsheets'),
'xls': ('general_scientific', 'Excel Spreadsheets'),
'json': ('general_scientific', 'JavaScript Object Notation'),
'xml': ('general_scientific', 'Extensible Markup Language'),
'hdf5': ('general_scientific', 'Hierarchical Data Format 5'),
'h5': ('general_scientific', 'Hierarchical Data Format 5'),
'h5ad': ('bioinformatics_genomics', 'Anndata Format'),
'zarr': ('general_scientific', 'Chunked Array Storage'),
'parquet': ('general_scientific', 'Apache Parquet'),
'mat': ('general_scientific', 'MATLAB Data'),
'fits': ('general_scientific', 'Flexible Image Transport System'),
}
ext_clean = extension.lstrip('.')
if ext_clean in extension_map:
category, description = extension_map[ext_clean]
return ext_clean, category, description
return ext_clean, 'unknown', 'Unknown Format'
def get_file_basic_info(filepath):
"""Get basic file information."""
file_path = Path(filepath)
stat = file_path.stat()
return {
'filename': file_path.name,
'path': str(file_path.absolute()),
'size_bytes': stat.st_size,
'size_human': format_bytes(stat.st_size),
'modified': datetime.fromtimestamp(stat.st_mtime).isoformat(),
'extension': file_path.suffix.lower(),
}
def format_bytes(size):
"""Convert bytes to human-readable format."""
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return f"{size:.2f} {unit}"
size /= 1024.0
return f"{size:.2f} PB"
def load_reference_info(category, extension):
"""
Load reference information for the file type.
Args:
category: File category (e.g., 'chemistry_molecular')
extension: File extension
Returns:
dict: Reference information
"""
# Map categories to reference files
category_files = {
'chemistry_molecular': 'chemistry_molecular_formats.md',
'bioinformatics_genomics': 'bioinformatics_genomics_formats.md',
'microscopy_imaging': 'microscopy_imaging_formats.md',
'spectroscopy_analytical': 'spectroscopy_analytical_formats.md',
'proteomics_metabolomics': 'proteomics_metabolomics_formats.md',
'general_scientific': 'general_scientific_formats.md',
}
if category not in category_files:
return None
# Get the reference file path
script_dir = Path(__file__).parent
ref_file = script_dir.parent / 'references' / category_files[category]
if not ref_file.exists():
return None
# Parse the reference file for the specific extension
# This is a simplified parser - could be more sophisticated
try:
with open(ref_file, 'r') as f:
content = f.read()
# Extract section for this file type
# Look for the extension heading
import re
pattern = rf'### \.{extension}[^#]*?(?=###|\Z)'
match = re.search(pattern, content, re.IGNORECASE | re.DOTALL)
if match:
section = match.group(0)
return {
'raw_section': section,
'reference_file': category_files[category]
}
except Exception as e:
print(f"Error loading reference: {e}", file=sys.stderr)
return None
def analyze_file(filepath):
"""
Main analysis function that routes to specific analyzers.
Returns:
dict: Analysis results
"""
basic_info = get_file_basic_info(filepath)
extension, category, description = detect_file_type(filepath)
analysis = {
'basic_info': basic_info,
'file_type': {
'extension': extension,
'category': category,
'description': description
},
'reference_info': load_reference_info(category, extension),
'data_analysis': {}
}
# Try to perform data-specific analysis based on file type
try:
if category == 'general_scientific':
analysis['data_analysis'] = analyze_general_scientific(filepath, extension)
elif category == 'bioinformatics_genomics':
analysis['data_analysis'] = analyze_bioinformatics(filepath, extension)
elif category == 'microscopy_imaging':
analysis['data_analysis'] = analyze_imaging(filepath, extension)
# Add more specific analyzers as needed
except Exception as e:
analysis['data_analysis']['error'] = str(e)
return analysis
def analyze_general_scientific(filepath, extension):
"""Analyze general scientific data formats."""
results = {}
try:
if extension in ['npy']:
import numpy as np
data = np.load(filepath)
results = {
'shape': data.shape,
'dtype': str(data.dtype),
'size': data.size,
'ndim': data.ndim,
'statistics': {
'min': float(np.min(data)) if np.issubdtype(data.dtype, np.number) else None,
'max': float(np.max(data)) if np.issubdtype(data.dtype, np.number) else None,
'mean': float(np.mean(data)) if np.issubdtype(data.dtype, np.number) else None,
'std': float(np.std(data)) if np.issubdtype(data.dtype, np.number) else None,
}
}
elif extension in ['npz']:
import numpy as np
data = np.load(filepath)
results = {
'arrays': list(data.files),
'array_count': len(data.files),
'array_shapes': {name: data[name].shape for name in data.files}
}
elif extension in ['csv', 'tsv']:
import pandas as pd
sep = '\t' if extension == 'tsv' else ','
df = pd.read_csv(filepath, sep=sep, nrows=10000) # Sample first 10k rows
results = {
'shape': df.shape,
'columns': list(df.columns),
'dtypes': {col: str(dtype) for col, dtype in df.dtypes.items()},
'missing_values': df.isnull().sum().to_dict(),
'summary_statistics': df.describe().to_dict() if len(df.select_dtypes(include='number').columns) > 0 else {}
}
elif extension in ['json']:
with open(filepath, 'r') as f:
data = json.load(f)
results = {
'type': type(data).__name__,
'keys': list(data.keys()) if isinstance(data, dict) else None,
'length': len(data) if isinstance(data, (list, dict)) else None
}
elif extension in ['h5', 'hdf5']:
import h5py
with h5py.File(filepath, 'r') as f:
def get_structure(group, prefix=''):
items = {}
for key in group.keys():
path = f"{prefix}/{key}"
if isinstance(group[key], h5py.Dataset):
items[path] = {
'type': 'dataset',
'shape': group[key].shape,
'dtype': str(group[key].dtype)
}
elif isinstance(group[key], h5py.Group):
items[path] = {'type': 'group'}
items.update(get_structure(group[key], path))
return items
results = {
'structure': get_structure(f),
'attributes': dict(f.attrs)
}
except ImportError as e:
results['error'] = f"Required library not installed: {e}"
except Exception as e:
results['error'] = f"Analysis error: {e}"
return results
def analyze_bioinformatics(filepath, extension):
"""Analyze bioinformatics/genomics formats."""
results = {}
try:
if extension in ['fasta', 'fa', 'fna']:
from Bio import SeqIO
sequences = list(SeqIO.parse(filepath, 'fasta'))
lengths = [len(seq) for seq in sequences]
results = {
'sequence_count': len(sequences),
'total_length': sum(lengths),
'mean_length': sum(lengths) / len(lengths) if lengths else 0,
'min_length': min(lengths) if lengths else 0,
'max_length': max(lengths) if lengths else 0,
'sequence_ids': [seq.id for seq in sequences[:10]] # First 10
}
elif extension in ['fastq', 'fq']:
from Bio import SeqIO
sequences = []
for i, seq in enumerate(SeqIO.parse(filepath, 'fastq')):
sequences.append(seq)
if i >= 9999: # Sample first 10k
break
lengths = [len(seq) for seq in sequences]
qualities = [sum(seq.letter_annotations['phred_quality']) / len(seq) for seq in sequences]
results = {
'read_count_sampled': len(sequences),
'mean_length': sum(lengths) / len(lengths) if lengths else 0,
'mean_quality': sum(qualities) / len(qualities) if qualities else 0,
'min_length': min(lengths) if lengths else 0,
'max_length': max(lengths) if lengths else 0,
}
except ImportError as e:
results['error'] = f"Required library not installed (try: pip install biopython): {e}"
except Exception as e:
results['error'] = f"Analysis error: {e}"
return results
def analyze_imaging(filepath, extension):
"""Analyze microscopy/imaging formats."""
results = {}
try:
if extension in ['tif', 'tiff', 'png', 'jpg', 'jpeg']:
from PIL import Image
import numpy as np
img = Image.open(filepath)
img_array = np.array(img)
results = {
'size': img.size,
'mode': img.mode,
'format': img.format,
'shape': img_array.shape,
'dtype': str(img_array.dtype),
'value_range': [int(img_array.min()), int(img_array.max())],
'mean_intensity': float(img_array.mean()),
}
# Check for multi-page TIFF
if extension in ['tif', 'tiff']:
try:
frame_count = 0
while True:
img.seek(frame_count)
frame_count += 1
except EOFError:
results['page_count'] = frame_count
except ImportError as e:
results['error'] = f"Required library not installed (try: pip install pillow): {e}"
except Exception as e:
results['error'] = f"Analysis error: {e}"
return results
def generate_markdown_report(analysis, output_path=None):
"""
Generate a comprehensive markdown report from analysis results.
Args:
analysis: Analysis results dictionary
output_path: Path to save the report (if None, prints to stdout)
"""
lines = []
# Title
filename = analysis['basic_info']['filename']
lines.append(f"# Exploratory Data Analysis Report: {filename}\n")
lines.append(f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
lines.append("---\n")
# Basic Information
lines.append("## Basic Information\n")
basic = analysis['basic_info']
lines.append(f"- **Filename:** `{basic['filename']}`")
lines.append(f"- **Full Path:** `{basic['path']}`")
lines.append(f"- **File Size:** {basic['size_human']} ({basic['size_bytes']:,} bytes)")
lines.append(f"- **Last Modified:** {basic['modified']}")
lines.append(f"- **Extension:** `.{analysis['file_type']['extension']}`\n")
# File Type Information
lines.append("## File Type\n")
ft = analysis['file_type']
lines.append(f"- **Category:** {ft['category'].replace('_', ' ').title()}")
lines.append(f"- **Description:** {ft['description']}\n")
# Reference Information
if analysis.get('reference_info'):
lines.append("## Format Reference\n")
ref = analysis['reference_info']
if 'raw_section' in ref:
lines.append(ref['raw_section'])
lines.append(f"\n*Reference: {ref['reference_file']}*\n")
# Data Analysis
if analysis.get('data_analysis'):
lines.append("## Data Analysis\n")
data = analysis['data_analysis']
if 'error' in data:
lines.append(f"β οΈ **Analysis Error:** {data['error']}\n")
else:
# Format the data analysis based on what's present
lines.append("### Summary Statistics\n")
lines.append("```json")
lines.append(json.dumps(data, indent=2, default=str))
lines.append("```\n")
# Recommendations
lines.append("## Recommendations for Further Analysis\n")
lines.append(f"Based on the file type (`.{analysis['file_type']['extension']}`), consider the following analyses:\n")
# Add specific recommendations based on category
category = analysis['file_type']['category']
if category == 'general_scientific':
lines.append("- Statistical distribution analysis")
lines.append("- Missing value imputation strategies")
lines.append("- Correlation analysis between variables")
lines.append("- Outlier detection and handling")
lines.append("- Dimensionality reduction (PCA, t-SNE)")
elif category == 'bioinformatics_genomics':
lines.append("- Sequence quality control and filtering")
lines.append("- GC content analysis")
lines.append("- Read alignment and mapping statistics")
lines.append("- Variant calling and annotation")
lines.append("- Differential expression analysis")
elif category == 'microscopy_imaging':
lines.append("- Image quality assessment")
lines.append("- Background correction and normalization")
lines.append("- Segmentation and object detection")
lines.append("- Colocalization analysis")
lines.append("- Intensity measurements and quantification")
lines.append("")
# Footer
lines.append("---")
lines.append("*This report was generated by the exploratory-data-analysis skill.*")
report = '\n'.join(lines)
if output_path:
with open(output_path, 'w') as f:
f.write(report)
print(f"Report saved to: {output_path}")
else:
print(report)
return report
def main():
"""Main CLI interface."""
if len(sys.argv) < 2:
print("Usage: python eda_analyzer.py <filepath> [output.md]")
print(" filepath: Path to the data file to analyze")
print(" output.md: Optional output path for markdown report")
sys.exit(1)
filepath = sys.argv[1]
output_path = sys.argv[2] if len(sys.argv) > 2 else None
if not os.path.exists(filepath):
print(f"Error: File not found: {filepath}")
sys.exit(1)
# If no output path specified, use the input filename
if output_path is None:
input_path = Path(filepath)
output_path = input_path.parent / f"{input_path.stem}_eda_report.md"
print(f"Analyzing: {filepath}")
analysis = analyze_file(filepath)
print(f"\nGenerating report...")
generate_markdown_report(analysis, output_path)
print(f"\nβ Analysis complete!")
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/exploratory-data-analysis/scripts/eda_analyzer.py",
"license": "MIT License",
"lines": 458,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/fda-database/scripts/fda_examples.py | #!/usr/bin/env python3
"""
FDA API Usage Examples
Demonstrates common use cases for querying FDA databases.
Usage:
python fda_examples.py
"""
import os
from fda_query import FDAQuery
def example_drug_safety_profile(fda, drug_name):
"""
Create a comprehensive safety profile for a drug.
Includes:
- Total adverse events
- Most common reactions
- Serious events
- Recent recalls
"""
print(f"\n{'='*60}")
print(f"DRUG SAFETY PROFILE: {drug_name}")
print(f"{'='*60}\n")
# 1. Count total adverse events
events = fda.query_drug_events(drug_name, limit=1)
if "meta" in events and "results" in events["meta"]:
total = events["meta"]["results"].get("total", 0)
print(f"Total Adverse Event Reports: {total:,}")
# 2. Most common reactions
print(f"\nMost Common Adverse Reactions:")
reactions = fda.count_by_field(
"drug", "event",
search=f"patient.drug.medicinalproduct:*{drug_name}*",
field="patient.reaction.reactionmeddrapt",
exact=True
)
if "results" in reactions:
for i, item in enumerate(reactions["results"][:10], 1):
print(f" {i}. {item['term']}: {item['count']:,} reports")
# 3. Serious events
serious_events = fda.query(
"drug", "event",
search=f"patient.drug.medicinalproduct:*{drug_name}*+AND+serious:1",
limit=1
)
if "meta" in serious_events and "results" in serious_events["meta"]:
serious_total = serious_events["meta"]["results"].get("total", 0)
print(f"\nSerious Adverse Events: {serious_total:,}")
# 4. Check for recent recalls
recalls = fda.query_drug_recalls(drug_name=drug_name)
if "results" in recalls and len(recalls["results"]) > 0:
print(f"\nRecent Recalls: {len(recalls['results'])}")
for recall in recalls["results"][:3]:
print(f" - {recall.get('reason_for_recall', 'Unknown')} "
f"(Class {recall.get('classification', 'Unknown')})")
else:
print(f"\nRecent Recalls: None found")
def example_device_surveillance(fda, device_name):
"""
Monitor medical device safety.
Includes:
- Adverse events
- Event types (death, injury, malfunction)
- Recent recalls
"""
print(f"\n{'='*60}")
print(f"DEVICE SURVEILLANCE: {device_name}")
print(f"{'='*60}\n")
# 1. Count adverse events
events = fda.query_device_events(device_name, limit=1)
if "meta" in events and "results" in events["meta"]:
total = events["meta"]["results"].get("total", 0)
print(f"Total Adverse Event Reports: {total:,}")
# 2. Event types
print(f"\nEvent Type Distribution:")
event_types = fda.count_by_field(
"device", "event",
search=f"device.brand_name:*{device_name}*",
field="event_type",
exact=False
)
if "results" in event_types:
for item in event_types["results"]:
print(f" {item['term']}: {item['count']:,}")
# 3. Recent events
recent = fda.query_device_events(device_name, limit=5)
if "results" in recent and len(recent["results"]) > 0:
print(f"\nRecent Events (sample):")
for i, event in enumerate(recent["results"][:3], 1):
event_type = event.get("event_type", "Unknown")
date = event.get("date_received", "Unknown")
print(f" {i}. Type: {event_type}, Date: {date}")
def example_food_recall_monitoring(fda, allergen):
"""
Monitor food recalls for specific allergen.
Args:
fda: FDAQuery instance
allergen: Allergen to monitor (e.g., "peanut", "milk", "soy")
"""
print(f"\n{'='*60}")
print(f"ALLERGEN RECALL MONITORING: {allergen}")
print(f"{'='*60}\n")
# Find recalls mentioning this allergen
recalls = fda.query_food_recalls(reason=allergen)
if "results" in recalls and len(recalls["results"]) > 0:
print(f"Found {len(recalls['results'])} recalls mentioning '{allergen}':\n")
for recall in recalls["results"][:10]:
product = recall.get("product_description", "Unknown product")
classification = recall.get("classification", "Unknown")
reason = recall.get("reason_for_recall", "Unknown")
date = recall.get("recall_initiation_date", "Unknown")
status = recall.get("status", "Unknown")
print(f"Product: {product}")
print(f" Classification: {classification}")
print(f" Reason: {reason}")
print(f" Date: {date}")
print(f" Status: {status}")
print()
else:
print(f"No recent recalls found for allergen: {allergen}")
def example_substance_lookup(fda, substance_name):
"""
Look up substance information.
Includes:
- UNII code
- CAS numbers
- Chemical structure
- Related substances
"""
print(f"\n{'='*60}")
print(f"SUBSTANCE INFORMATION: {substance_name}")
print(f"{'='*60}\n")
substances = fda.query_substance_by_name(substance_name)
if "results" in substances and len(substances["results"]) > 0:
for i, substance in enumerate(substances["results"][:3], 1):
print(f"Match {i}:")
# Names
names = substance.get("names", [])
if names:
preferred = next((n["name"] for n in names if n.get("preferred")), names[0].get("name"))
print(f" Name: {preferred}")
# UNII
unii = substance.get("approvalID")
if unii:
print(f" UNII: {unii}")
# CAS numbers
codes = substance.get("codes", [])
cas_numbers = [c["code"] for c in codes if "CAS" in c.get("codeSystem", "")]
if cas_numbers:
print(f" CAS: {', '.join(cas_numbers)}")
# Structure
if "structure" in substance:
structure = substance["structure"]
formula = structure.get("formula")
mol_weight = structure.get("molecularWeight")
if formula:
print(f" Formula: {formula}")
if mol_weight:
print(f" Molecular Weight: {mol_weight}")
# Substance class
substance_class = substance.get("substanceClass")
if substance_class:
print(f" Class: {substance_class}")
print()
else:
print(f"No substances found matching: {substance_name}")
def example_comparative_drug_analysis(fda, drug_list):
"""
Compare safety profiles of multiple drugs.
Args:
fda: FDAQuery instance
drug_list: List of drug names to compare
"""
print(f"\n{'='*60}")
print(f"COMPARATIVE DRUG ANALYSIS")
print(f"{'='*60}\n")
print(f"Comparing: {', '.join(drug_list)}\n")
comparison = {}
for drug in drug_list:
# Get total events
events = fda.query_drug_events(drug, limit=1)
total = 0
if "meta" in events and "results" in events["meta"]:
total = events["meta"]["results"].get("total", 0)
# Get serious events
serious = fda.query(
"drug", "event",
search=f"patient.drug.medicinalproduct:*{drug}*+AND+serious:1",
limit=1
)
serious_total = 0
if "meta" in serious and "results" in serious["meta"]:
serious_total = serious["meta"]["results"].get("total", 0)
serious_rate = (serious_total / total * 100) if total > 0 else 0
comparison[drug] = {
"total_events": total,
"serious_events": serious_total,
"serious_rate": serious_rate
}
# Display comparison
print(f"{'Drug':<20} {'Total Events':>15} {'Serious Events':>15} {'Serious %':>12}")
print("-" * 65)
for drug, data in comparison.items():
print(f"{drug:<20} {data['total_events']:>15,} "
f"{data['serious_events']:>15,} {data['serious_rate']:>11.2f}%")
def example_veterinary_analysis(fda, species, drug_name):
"""
Analyze veterinary drug adverse events by species.
Args:
fda: FDAQuery instance
species: Animal species (e.g., "Dog", "Cat", "Horse")
drug_name: Veterinary drug name
"""
print(f"\n{'='*60}")
print(f"VETERINARY DRUG ANALYSIS: {drug_name} in {species}")
print(f"{'='*60}\n")
events = fda.query_animal_events(species=species, drug_name=drug_name)
if "results" in events and len(events["results"]) > 0:
print(f"Found {len(events['results'])} adverse event reports\n")
# Collect reactions
reactions = []
serious_count = 0
for event in events["results"]:
if event.get("serious_ae") == "true":
serious_count += 1
if "reaction" in event:
for reaction in event["reaction"]:
if "veddra_term_name" in reaction:
reactions.append(reaction["veddra_term_name"])
print(f"Serious Events: {serious_count} ({serious_count/len(events['results'])*100:.1f}%)")
# Count reactions
from collections import Counter
reaction_counts = Counter(reactions)
print(f"\nMost Common Reactions:")
for reaction, count in reaction_counts.most_common(10):
print(f" {reaction}: {count}")
else:
print(f"No adverse events found")
def main():
"""Run example analyses."""
# Get API key from environment
api_key = os.environ.get("FDA_API_KEY")
if not api_key:
print("Warning: No FDA_API_KEY found in environment.")
print("You can still use the API but with lower rate limits.")
print("Set FDA_API_KEY environment variable for better performance.\n")
# Initialize FDA query client
fda = FDAQuery(api_key=api_key)
# Run examples
try:
# Example 1: Drug safety profile
example_drug_safety_profile(fda, "aspirin")
# Example 2: Device surveillance
example_device_surveillance(fda, "pacemaker")
# Example 3: Food recall monitoring
example_food_recall_monitoring(fda, "undeclared peanut")
# Example 4: Substance lookup
example_substance_lookup(fda, "ibuprofen")
# Example 5: Comparative analysis
example_comparative_drug_analysis(fda, ["aspirin", "ibuprofen", "naproxen"])
# Example 6: Veterinary analysis
example_veterinary_analysis(fda, "Dog", "flea collar")
except Exception as e:
print(f"\nError running examples: {e}")
print("This may be due to API rate limits or connectivity issues.")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/fda-database/scripts/fda_examples.py",
"license": "MIT License",
"lines": 265,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/fda-database/scripts/fda_query.py | #!/usr/bin/env python3
"""
FDA API Query Helper
Comprehensive utility for querying FDA databases through openFDA API.
Includes error handling, rate limiting, caching, and common query patterns.
Usage:
from fda_query import FDAQuery
fda = FDAQuery(api_key="YOUR_API_KEY")
results = fda.query_drug_events(drug_name="aspirin", limit=100)
"""
import requests
import time
import json
import hashlib
from pathlib import Path
from datetime import datetime, timedelta
from collections import deque, Counter
from typing import Dict, List, Optional, Any
class RateLimiter:
"""Manage API rate limits."""
def __init__(self, max_per_minute: int = 240):
self.max_per_minute = max_per_minute
self.requests = deque()
def wait_if_needed(self):
"""Wait if necessary to stay under rate limit."""
now = time.time()
# Remove requests older than 1 minute
while self.requests and now - self.requests[0] > 60:
self.requests.popleft()
# Check if at limit
if len(self.requests) >= self.max_per_minute:
sleep_time = 60 - (now - self.requests[0]) + 0.1
if sleep_time > 0:
print(f"Rate limit approaching. Waiting {sleep_time:.1f} seconds...")
time.sleep(sleep_time)
self.requests.popleft()
self.requests.append(time.time())
class FDACache:
"""Simple file-based cache for FDA API responses."""
def __init__(self, cache_dir: str = "fda_cache", ttl: int = 3600):
self.cache_dir = Path(cache_dir)
self.cache_dir.mkdir(exist_ok=True)
self.ttl = ttl
def _get_cache_key(self, url: str, params: Dict) -> str:
"""Generate cache key from URL and params."""
cache_string = f"{url}_{json.dumps(params, sort_keys=True)}"
return hashlib.md5(cache_string.encode()).hexdigest()
def get(self, url: str, params: Dict) -> Optional[Dict]:
"""Get cached response if available and not expired."""
key = self._get_cache_key(url, params)
cache_file = self.cache_dir / f"{key}.json"
if cache_file.exists():
age = time.time() - cache_file.stat().st_mtime
if age < self.ttl:
with open(cache_file, 'r') as f:
return json.load(f)
return None
def set(self, url: str, params: Dict, data: Dict):
"""Cache response data."""
key = self._get_cache_key(url, params)
cache_file = self.cache_dir / f"{key}.json"
with open(cache_file, 'w') as f:
json.dump(data, f)
class FDAQuery:
"""Main class for querying FDA databases."""
BASE_URL = "https://api.fda.gov"
def __init__(self, api_key: Optional[str] = None, use_cache: bool = True,
cache_ttl: int = 3600, rate_limit: int = 240):
"""
Initialize FDA query client.
Args:
api_key: FDA API key (optional but recommended)
use_cache: Whether to use response caching
cache_ttl: Cache time-to-live in seconds
rate_limit: Requests per minute limit
"""
self.api_key = api_key
self.rate_limiter = RateLimiter(max_per_minute=rate_limit)
self.cache = FDACache(ttl=cache_ttl) if use_cache else None
def _build_url(self, category: str, endpoint: str) -> str:
"""Build full API endpoint URL."""
return f"{self.BASE_URL}/{category}/{endpoint}.json"
def _make_request(self, url: str, params: Dict, use_cache: bool = True) -> Dict:
"""
Make API request with error handling, rate limiting, and caching.
Args:
url: Full API endpoint URL
params: Query parameters
use_cache: Whether to use cache for this request
Returns:
API response as dictionary
"""
# Add API key if available
if self.api_key:
params["api_key"] = self.api_key
# Check cache
if use_cache and self.cache:
cached = self.cache.get(url, params)
if cached:
return cached
# Rate limiting
self.rate_limiter.wait_if_needed()
# Make request
try:
response = requests.get(url, params=params, timeout=30)
response.raise_for_status()
data = response.json()
# Cache successful response
if use_cache and self.cache:
self.cache.set(url, params, data)
return data
except requests.exceptions.HTTPError as e:
if response.status_code == 404:
return {"error": "No results found", "results": []}
elif response.status_code == 429:
# Rate limit exceeded, wait and retry once
print("Rate limit exceeded. Waiting 60 seconds...")
time.sleep(60)
return self._make_request(url, params, use_cache=False)
elif response.status_code == 400:
return {"error": f"Invalid query: {response.text}"}
else:
return {"error": f"HTTP error {response.status_code}: {e}"}
except requests.exceptions.RequestException as e:
return {"error": f"Request error: {e}"}
def query(self, category: str, endpoint: str, search: Optional[str] = None,
limit: int = 100, skip: int = 0, count: Optional[str] = None,
sort: Optional[str] = None) -> Dict:
"""
Generic query method for any FDA endpoint.
Args:
category: API category (drug, device, food, animalandveterinary, other)
endpoint: Specific endpoint (event, label, enforcement, etc.)
search: Search query string
limit: Maximum results to return (1-1000)
skip: Number of results to skip (for pagination)
count: Field to count/aggregate by
sort: Field to sort by (e.g., "receivedate:desc")
Returns:
API response dictionary
"""
url = self._build_url(category, endpoint)
params = {}
if search:
params["search"] = search
if limit:
params["limit"] = min(limit, 1000)
if skip:
params["skip"] = skip
if count:
params["count"] = count
if sort:
params["sort"] = sort
return self._make_request(url, params)
def query_all(self, category: str, endpoint: str, search: str,
max_results: int = 5000, batch_size: int = 100) -> List[Dict]:
"""
Query and retrieve all results with automatic pagination.
Args:
category: API category
endpoint: Specific endpoint
search: Search query string
max_results: Maximum total results to retrieve
batch_size: Results per request
Returns:
List of all result records
"""
all_results = []
skip = 0
while len(all_results) < max_results:
data = self.query(
category=category,
endpoint=endpoint,
search=search,
limit=batch_size,
skip=skip
)
if "error" in data or "results" not in data:
break
results = data["results"]
if not results:
break
all_results.extend(results)
if len(results) < batch_size:
break
skip += batch_size
return all_results[:max_results]
# Drug-specific methods
def query_drug_events(self, drug_name: str, limit: int = 100) -> Dict:
"""Query drug adverse events."""
search = f"patient.drug.medicinalproduct:*{drug_name}*"
return self.query("drug", "event", search=search, limit=limit)
def query_drug_label(self, drug_name: str, brand: bool = True) -> Dict:
"""Query drug labeling information."""
field = "openfda.brand_name" if brand else "openfda.generic_name"
search = f"{field}:{drug_name}"
return self.query("drug", "label", search=search, limit=1)
def query_drug_ndc(self, ndc: Optional[str] = None,
manufacturer: Optional[str] = None) -> Dict:
"""Query National Drug Code directory."""
if ndc:
search = f"product_ndc:{ndc}"
elif manufacturer:
search = f"labeler_name:*{manufacturer}*"
else:
raise ValueError("Must provide either ndc or manufacturer")
return self.query("drug", "ndc", search=search, limit=100)
def query_drug_recalls(self, drug_name: Optional[str] = None,
classification: Optional[str] = None) -> Dict:
"""Query drug recalls."""
search_parts = []
if drug_name:
search_parts.append(f"product_description:*{drug_name}*")
if classification:
search_parts.append(f"classification:Class+{classification}")
search = "+AND+".join(search_parts) if search_parts else None
return self.query("drug", "enforcement", search=search, limit=100,
sort="report_date:desc")
# Device-specific methods
def query_device_events(self, device_name: str, limit: int = 100) -> Dict:
"""Query device adverse events."""
search = f"device.brand_name:*{device_name}*"
return self.query("device", "event", search=search, limit=limit)
def query_device_510k(self, applicant: Optional[str] = None,
device_name: Optional[str] = None) -> Dict:
"""Query 510(k) clearances."""
if applicant:
search = f"applicant:*{applicant}*"
elif device_name:
search = f"device_name:*{device_name}*"
else:
raise ValueError("Must provide either applicant or device_name")
return self.query("device", "510k", search=search, limit=100)
def query_device_classification(self, product_code: str) -> Dict:
"""Query device classification by product code."""
search = f"product_code:{product_code}"
return self.query("device", "classification", search=search, limit=1)
# Food-specific methods
def query_food_events(self, product_name: Optional[str] = None,
industry: Optional[str] = None) -> Dict:
"""Query food adverse events."""
if product_name:
search = f"products.name_brand:*{product_name}*"
elif industry:
search = f"products.industry_name:*{industry}*"
else:
search = "_exists_:report_number"
return self.query("food", "event", search=search, limit=100)
def query_food_recalls(self, product: Optional[str] = None,
reason: Optional[str] = None,
classification: Optional[str] = None) -> Dict:
"""Query food recalls."""
search_parts = []
if product:
search_parts.append(f"product_description:*{product}*")
if reason:
search_parts.append(f"reason_for_recall:*{reason}*")
if classification:
search_parts.append(f"classification:Class+{classification}")
search = "+AND+".join(search_parts) if search_parts else "_exists_:recall_number"
return self.query("food", "enforcement", search=search, limit=100,
sort="report_date:desc")
# Animal & Veterinary methods
def query_animal_events(self, species: Optional[str] = None,
drug_name: Optional[str] = None) -> Dict:
"""Query animal drug adverse events."""
search_parts = []
if species:
search_parts.append(f"animal.species:*{species}*")
if drug_name:
search_parts.append(f"drug.brand_name:*{drug_name}*")
search = "+AND+".join(search_parts) if search_parts else "_exists_:unique_aer_id_number"
return self.query("animalandveterinary", "event", search=search, limit=100)
# Substance methods
def query_substance_by_unii(self, unii: str) -> Dict:
"""Query substance by UNII code."""
search = f"approvalID:{unii}"
return self.query("other", "substance", search=search, limit=1)
def query_substance_by_name(self, name: str) -> Dict:
"""Query substance by name."""
search = f"names.name:*{name}*"
return self.query("other", "substance", search=search, limit=10)
# Analysis methods
def count_by_field(self, category: str, endpoint: str,
search: str, field: str, exact: bool = True) -> Dict:
"""
Count and aggregate results by a specific field.
Args:
category: API category
endpoint: Specific endpoint
search: Search query
field: Field to count by
exact: Use exact phrase matching
Returns:
Count results
"""
count_field = f"{field}.exact" if exact and not field.endswith(".exact") else field
return self.query(category, endpoint, search=search, count=count_field)
def get_date_range_data(self, category: str, endpoint: str,
date_field: str, days_back: int = 30,
additional_search: Optional[str] = None) -> List[Dict]:
"""
Get data for a specific date range.
Args:
category: API category
endpoint: Specific endpoint
date_field: Date field name
days_back: Number of days to look back
additional_search: Additional search criteria
Returns:
List of results
"""
end_date = datetime.now()
start_date = end_date - timedelta(days=days_back)
date_range = f"[{start_date.strftime('%Y%m%d')}+TO+{end_date.strftime('%Y%m%d')}]"
search = f"{date_field}:{date_range}"
if additional_search:
search = f"{search}+AND+{additional_search}"
return self.query_all(category, endpoint, search=search)
def main():
"""Example usage."""
import os
# Get API key from environment or use None
api_key = os.environ.get("FDA_API_KEY")
# Initialize client
fda = FDAQuery(api_key=api_key)
# Example 1: Query drug adverse events
print("Querying aspirin adverse events...")
events = fda.query_drug_events("aspirin", limit=10)
if "results" in events:
print(f"Found {len(events['results'])} events")
# Example 2: Count reactions
print("\nCounting reactions...")
counts = fda.count_by_field(
"drug", "event",
search="patient.drug.medicinalproduct:aspirin",
field="patient.reaction.reactionmeddrapt"
)
if "results" in counts:
for item in counts["results"][:5]:
print(f" {item['term']}: {item['count']}")
# Example 3: Get drug label
print("\nGetting drug label...")
label = fda.query_drug_label("Lipitor", brand=True)
if "results" in label and len(label["results"]) > 0:
result = label["results"][0]
if "indications_and_usage" in result:
print(f" Indications: {result['indications_and_usage'][0][:200]}...")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/fda-database/scripts/fda_query.py",
"license": "MIT License",
"lines": 350,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/gene-database/scripts/batch_gene_lookup.py | #!/usr/bin/env python3
"""
Batch gene lookup using NCBI APIs.
This script efficiently processes multiple gene queries with proper
rate limiting and error handling.
"""
import argparse
import json
import sys
import time
import urllib.parse
import urllib.request
from typing import Optional, List, Dict, Any
def read_gene_list(filepath: str) -> List[str]:
"""
Read gene identifiers from a file (one per line).
Args:
filepath: Path to file containing gene symbols or IDs
Returns:
List of gene identifiers
"""
try:
with open(filepath, 'r') as f:
genes = [line.strip() for line in f if line.strip()]
return genes
except FileNotFoundError:
print(f"Error: File '{filepath}' not found", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error reading file: {e}", file=sys.stderr)
sys.exit(1)
def batch_esearch(queries: List[str], organism: Optional[str] = None,
api_key: Optional[str] = None) -> Dict[str, str]:
"""
Search for multiple gene symbols and return their IDs.
Args:
queries: List of gene symbols
organism: Optional organism filter
api_key: Optional NCBI API key
Returns:
Dictionary mapping gene symbol to Gene ID (or 'NOT_FOUND')
"""
base_url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
results = {}
# Rate limiting
delay = 0.1 if api_key else 0.34 # 10 req/sec with key, 3 req/sec without
for query in queries:
# Build search term
search_term = f"{query}[gene]"
if organism:
search_term += f" AND {organism}[organism]"
params = {
'db': 'gene',
'term': search_term,
'retmax': 1,
'retmode': 'json'
}
if api_key:
params['api_key'] = api_key
url = f"{base_url}esearch.fcgi?{urllib.parse.urlencode(params)}"
try:
with urllib.request.urlopen(url) as response:
data = json.loads(response.read().decode())
if 'esearchresult' in data and 'idlist' in data['esearchresult']:
id_list = data['esearchresult']['idlist']
results[query] = id_list[0] if id_list else 'NOT_FOUND'
else:
results[query] = 'ERROR'
except Exception as e:
print(f"Error searching for {query}: {e}", file=sys.stderr)
results[query] = 'ERROR'
time.sleep(delay)
return results
def batch_esummary(gene_ids: List[str], api_key: Optional[str] = None,
chunk_size: int = 200) -> Dict[str, Dict[str, Any]]:
"""
Get summaries for multiple genes in batches.
Args:
gene_ids: List of Gene IDs
api_key: Optional NCBI API key
chunk_size: Number of IDs per request (max 500)
Returns:
Dictionary mapping Gene ID to summary data
"""
base_url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
all_results = {}
# Rate limiting
delay = 0.1 if api_key else 0.34
# Process in chunks
for i in range(0, len(gene_ids), chunk_size):
chunk = gene_ids[i:i + chunk_size]
params = {
'db': 'gene',
'id': ','.join(chunk),
'retmode': 'json'
}
if api_key:
params['api_key'] = api_key
url = f"{base_url}esummary.fcgi?{urllib.parse.urlencode(params)}"
try:
with urllib.request.urlopen(url) as response:
data = json.loads(response.read().decode())
if 'result' in data:
for gene_id in chunk:
if gene_id in data['result']:
all_results[gene_id] = data['result'][gene_id]
except Exception as e:
print(f"Error fetching summaries for chunk: {e}", file=sys.stderr)
time.sleep(delay)
return all_results
def batch_lookup_by_ids(gene_ids: List[str], api_key: Optional[str] = None) -> List[Dict[str, Any]]:
"""
Lookup genes by IDs and return structured data.
Args:
gene_ids: List of Gene IDs
api_key: Optional NCBI API key
Returns:
List of gene information dictionaries
"""
summaries = batch_esummary(gene_ids, api_key=api_key)
results = []
for gene_id in gene_ids:
if gene_id in summaries:
gene = summaries[gene_id]
results.append({
'gene_id': gene_id,
'symbol': gene.get('name', 'N/A'),
'description': gene.get('description', 'N/A'),
'organism': gene.get('organism', {}).get('scientificname', 'N/A'),
'chromosome': gene.get('chromosome', 'N/A'),
'map_location': gene.get('maplocation', 'N/A'),
'type': gene.get('geneticsource', 'N/A')
})
else:
results.append({
'gene_id': gene_id,
'error': 'Not found or error fetching'
})
return results
def batch_lookup_by_symbols(gene_symbols: List[str], organism: str,
api_key: Optional[str] = None) -> List[Dict[str, Any]]:
"""
Lookup genes by symbols and return structured data.
Args:
gene_symbols: List of gene symbols
organism: Organism name
api_key: Optional NCBI API key
Returns:
List of gene information dictionaries
"""
# First, search for IDs
print(f"Searching for {len(gene_symbols)} gene symbols...", file=sys.stderr)
symbol_to_id = batch_esearch(gene_symbols, organism=organism, api_key=api_key)
# Filter to valid IDs
valid_ids = [id for id in symbol_to_id.values() if id not in ['NOT_FOUND', 'ERROR']]
if not valid_ids:
print("No genes found", file=sys.stderr)
return []
print(f"Found {len(valid_ids)} genes, fetching details...", file=sys.stderr)
# Fetch summaries
summaries = batch_esummary(valid_ids, api_key=api_key)
# Build results
results = []
for symbol, gene_id in symbol_to_id.items():
if gene_id == 'NOT_FOUND':
results.append({
'query_symbol': symbol,
'status': 'not_found'
})
elif gene_id == 'ERROR':
results.append({
'query_symbol': symbol,
'status': 'error'
})
elif gene_id in summaries:
gene = summaries[gene_id]
results.append({
'query_symbol': symbol,
'gene_id': gene_id,
'symbol': gene.get('name', 'N/A'),
'description': gene.get('description', 'N/A'),
'organism': gene.get('organism', {}).get('scientificname', 'N/A'),
'chromosome': gene.get('chromosome', 'N/A'),
'map_location': gene.get('maplocation', 'N/A'),
'type': gene.get('geneticsource', 'N/A')
})
return results
def main():
parser = argparse.ArgumentParser(
description='Batch gene lookup using NCBI APIs',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Lookup by gene IDs
%(prog)s --ids 672,7157,5594
# Lookup by symbols from a file
%(prog)s --file genes.txt --organism human
# Lookup with API key and save to file
%(prog)s --ids 672,7157,5594 --api-key YOUR_KEY --output results.json
"""
)
parser.add_argument('--ids', '-i', help='Comma-separated Gene IDs')
parser.add_argument('--file', '-f', help='File containing gene symbols (one per line)')
parser.add_argument('--organism', '-o', help='Organism name (required with --file)')
parser.add_argument('--output', '-O', help='Output file path (JSON format)')
parser.add_argument('--api-key', '-k', help='NCBI API key')
parser.add_argument('--pretty', '-p', action='store_true',
help='Pretty-print JSON output')
args = parser.parse_args()
if not args.ids and not args.file:
parser.error("Either --ids or --file must be provided")
if args.file and not args.organism:
parser.error("--organism is required when using --file")
# Process genes
if args.ids:
gene_ids = [id.strip() for id in args.ids.split(',')]
results = batch_lookup_by_ids(gene_ids, api_key=args.api_key)
else:
gene_symbols = read_gene_list(args.file)
results = batch_lookup_by_symbols(gene_symbols, args.organism, api_key=args.api_key)
# Output results
indent = 2 if args.pretty else None
json_output = json.dumps(results, indent=indent)
if args.output:
try:
with open(args.output, 'w') as f:
f.write(json_output)
print(f"Results written to {args.output}", file=sys.stderr)
except Exception as e:
print(f"Error writing output file: {e}", file=sys.stderr)
sys.exit(1)
else:
print(json_output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/gene-database/scripts/batch_gene_lookup.py",
"license": "MIT License",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/gene-database/scripts/fetch_gene_data.py | #!/usr/bin/env python3
"""
Fetch gene data from NCBI using the Datasets API.
This script provides access to the NCBI Datasets API for retrieving
comprehensive gene information including metadata and sequences.
"""
import argparse
import json
import sys
import urllib.parse
import urllib.request
from typing import Optional, Dict, Any, List
DATASETS_API_BASE = "https://api.ncbi.nlm.nih.gov/datasets/v2alpha/gene"
def get_taxon_id(taxon_name: str) -> Optional[str]:
"""
Convert taxon name to NCBI taxon ID.
Args:
taxon_name: Common or scientific name (e.g., "human", "Homo sapiens")
Returns:
Taxon ID as string, or None if not found
"""
# Common mappings
common_taxa = {
'human': '9606',
'homo sapiens': '9606',
'mouse': '10090',
'mus musculus': '10090',
'rat': '10116',
'rattus norvegicus': '10116',
'zebrafish': '7955',
'danio rerio': '7955',
'fruit fly': '7227',
'drosophila melanogaster': '7227',
'c. elegans': '6239',
'caenorhabditis elegans': '6239',
'yeast': '4932',
'saccharomyces cerevisiae': '4932',
'arabidopsis': '3702',
'arabidopsis thaliana': '3702',
'e. coli': '562',
'escherichia coli': '562',
}
taxon_lower = taxon_name.lower().strip()
return common_taxa.get(taxon_lower)
def fetch_gene_by_id(gene_id: str, api_key: Optional[str] = None) -> Dict[str, Any]:
"""
Fetch gene data by Gene ID.
Args:
gene_id: NCBI Gene ID
api_key: Optional NCBI API key
Returns:
Gene data as dictionary
"""
url = f"{DATASETS_API_BASE}/id/{gene_id}"
headers = {}
if api_key:
headers['api-key'] = api_key
try:
req = urllib.request.Request(url, headers=headers)
with urllib.request.urlopen(req) as response:
return json.loads(response.read().decode())
except urllib.error.HTTPError as e:
print(f"HTTP Error {e.code}: {e.reason}", file=sys.stderr)
if e.code == 404:
print(f"Gene ID {gene_id} not found", file=sys.stderr)
return {}
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
return {}
def fetch_gene_by_symbol(symbol: str, taxon: str, api_key: Optional[str] = None) -> Dict[str, Any]:
"""
Fetch gene data by gene symbol and taxon.
Args:
symbol: Gene symbol (e.g., "BRCA1")
taxon: Organism name or taxon ID
api_key: Optional NCBI API key
Returns:
Gene data as dictionary
"""
# Convert taxon name to ID if needed
taxon_id = get_taxon_id(taxon)
if not taxon_id:
# Try to use as-is (might already be a taxon ID)
taxon_id = taxon
url = f"{DATASETS_API_BASE}/symbol/{symbol}/taxon/{taxon_id}"
headers = {}
if api_key:
headers['api-key'] = api_key
try:
req = urllib.request.Request(url, headers=headers)
with urllib.request.urlopen(req) as response:
return json.loads(response.read().decode())
except urllib.error.HTTPError as e:
print(f"HTTP Error {e.code}: {e.reason}", file=sys.stderr)
if e.code == 404:
print(f"Gene symbol '{symbol}' not found for taxon {taxon}", file=sys.stderr)
return {}
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
return {}
def fetch_multiple_genes(gene_ids: List[str], api_key: Optional[str] = None) -> Dict[str, Any]:
"""
Fetch data for multiple genes by ID.
Args:
gene_ids: List of Gene IDs
api_key: Optional NCBI API key
Returns:
Combined gene data as dictionary
"""
# For multiple genes, use POST request
url = f"{DATASETS_API_BASE}/id"
data = json.dumps({"gene_ids": gene_ids}).encode('utf-8')
headers = {'Content-Type': 'application/json'}
if api_key:
headers['api-key'] = api_key
try:
req = urllib.request.Request(url, data=data, headers=headers, method='POST')
with urllib.request.urlopen(req) as response:
return json.loads(response.read().decode())
except urllib.error.HTTPError as e:
print(f"HTTP Error {e.code}: {e.reason}", file=sys.stderr)
return {}
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
return {}
def display_gene_info(data: Dict[str, Any], verbose: bool = False) -> None:
"""
Display gene information in human-readable format.
Args:
data: Gene data dictionary from API
verbose: Show detailed information
"""
if 'genes' not in data:
print("No gene data found in response")
return
for gene in data['genes']:
gene_info = gene.get('gene', {})
print(f"Gene ID: {gene_info.get('gene_id', 'N/A')}")
print(f"Symbol: {gene_info.get('symbol', 'N/A')}")
print(f"Description: {gene_info.get('description', 'N/A')}")
if 'tax_name' in gene_info:
print(f"Organism: {gene_info['tax_name']}")
if 'chromosomes' in gene_info:
chromosomes = ', '.join(gene_info['chromosomes'])
print(f"Chromosome(s): {chromosomes}")
# Nomenclature
if 'nomenclature_authority' in gene_info:
auth = gene_info['nomenclature_authority']
print(f"Nomenclature: {auth.get('authority', 'N/A')}")
# Synonyms
if 'synonyms' in gene_info and gene_info['synonyms']:
print(f"Synonyms: {', '.join(gene_info['synonyms'])}")
if verbose:
# Gene type
if 'type' in gene_info:
print(f"Type: {gene_info['type']}")
# Genomic locations
if 'genomic_ranges' in gene_info:
print("\nGenomic Locations:")
for range_info in gene_info['genomic_ranges']:
accession = range_info.get('accession_version', 'N/A')
start = range_info.get('range', [{}])[0].get('begin', 'N/A')
end = range_info.get('range', [{}])[0].get('end', 'N/A')
strand = range_info.get('orientation', 'N/A')
print(f" {accession}: {start}-{end} ({strand})")
# Transcripts
if 'transcripts' in gene_info:
print(f"\nTranscripts: {len(gene_info['transcripts'])}")
for transcript in gene_info['transcripts'][:5]: # Show first 5
print(f" {transcript.get('accession_version', 'N/A')}")
print()
def main():
parser = argparse.ArgumentParser(
description='Fetch gene data from NCBI Datasets API',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Fetch by Gene ID
%(prog)s --gene-id 672
# Fetch by gene symbol and organism
%(prog)s --symbol BRCA1 --taxon human
# Fetch multiple genes
%(prog)s --gene-id 672,7157,5594
# Get JSON output
%(prog)s --symbol TP53 --taxon "Homo sapiens" --output json
# Verbose output with details
%(prog)s --gene-id 672 --verbose
"""
)
parser.add_argument('--gene-id', '-g', help='Gene ID(s), comma-separated')
parser.add_argument('--symbol', '-s', help='Gene symbol')
parser.add_argument('--taxon', '-t', help='Organism name or taxon ID (required with --symbol)')
parser.add_argument('--output', '-o', choices=['pretty', 'json'], default='pretty',
help='Output format (default: pretty)')
parser.add_argument('--verbose', '-v', action='store_true',
help='Show detailed information')
parser.add_argument('--api-key', '-k', help='NCBI API key')
args = parser.parse_args()
if not args.gene_id and not args.symbol:
parser.error("Either --gene-id or --symbol must be provided")
if args.symbol and not args.taxon:
parser.error("--taxon is required when using --symbol")
# Fetch data
if args.gene_id:
gene_ids = [id.strip() for id in args.gene_id.split(',')]
if len(gene_ids) == 1:
data = fetch_gene_by_id(gene_ids[0], api_key=args.api_key)
else:
data = fetch_multiple_genes(gene_ids, api_key=args.api_key)
else:
data = fetch_gene_by_symbol(args.symbol, args.taxon, api_key=args.api_key)
if not data:
sys.exit(1)
# Output
if args.output == 'json':
print(json.dumps(data, indent=2))
else:
display_gene_info(data, verbose=args.verbose)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/gene-database/scripts/fetch_gene_data.py",
"license": "MIT License",
"lines": 220,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/gene-database/scripts/query_gene.py | #!/usr/bin/env python3
"""
Query NCBI Gene database using E-utilities.
This script provides access to ESearch, ESummary, and EFetch functions
for searching and retrieving gene information.
"""
import argparse
import json
import sys
import time
import urllib.parse
import urllib.request
from typing import Optional, Dict, List, Any
from xml.etree import ElementTree as ET
BASE_URL = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
DB = "gene"
def esearch(query: str, retmax: int = 20, api_key: Optional[str] = None) -> List[str]:
"""
Search NCBI Gene database and return list of Gene IDs.
Args:
query: Search query (e.g., "BRCA1[gene] AND human[organism]")
retmax: Maximum number of results to return
api_key: Optional NCBI API key for higher rate limits
Returns:
List of Gene IDs as strings
"""
params = {
'db': DB,
'term': query,
'retmax': retmax,
'retmode': 'json'
}
if api_key:
params['api_key'] = api_key
url = f"{BASE_URL}esearch.fcgi?{urllib.parse.urlencode(params)}"
try:
with urllib.request.urlopen(url) as response:
data = json.loads(response.read().decode())
if 'esearchresult' in data and 'idlist' in data['esearchresult']:
return data['esearchresult']['idlist']
else:
print(f"Error: Unexpected response format", file=sys.stderr)
return []
except urllib.error.HTTPError as e:
print(f"HTTP Error {e.code}: {e.reason}", file=sys.stderr)
return []
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
return []
def esummary(gene_ids: List[str], api_key: Optional[str] = None) -> Dict[str, Any]:
"""
Get document summaries for Gene IDs.
Args:
gene_ids: List of Gene IDs
api_key: Optional NCBI API key
Returns:
Dictionary of gene summaries
"""
params = {
'db': DB,
'id': ','.join(gene_ids),
'retmode': 'json'
}
if api_key:
params['api_key'] = api_key
url = f"{BASE_URL}esummary.fcgi?{urllib.parse.urlencode(params)}"
try:
with urllib.request.urlopen(url) as response:
data = json.loads(response.read().decode())
return data
except urllib.error.HTTPError as e:
print(f"HTTP Error {e.code}: {e.reason}", file=sys.stderr)
return {}
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
return {}
def efetch(gene_ids: List[str], retmode: str = 'xml', api_key: Optional[str] = None) -> str:
"""
Fetch full gene records.
Args:
gene_ids: List of Gene IDs
retmode: Return format ('xml', 'text', 'asn.1')
api_key: Optional NCBI API key
Returns:
Gene records as string in requested format
"""
params = {
'db': DB,
'id': ','.join(gene_ids),
'retmode': retmode
}
if api_key:
params['api_key'] = api_key
url = f"{BASE_URL}efetch.fcgi?{urllib.parse.urlencode(params)}"
try:
with urllib.request.urlopen(url) as response:
return response.read().decode()
except urllib.error.HTTPError as e:
print(f"HTTP Error {e.code}: {e.reason}", file=sys.stderr)
return ""
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
return ""
def search_and_summarize(query: str, organism: Optional[str] = None,
max_results: int = 20, api_key: Optional[str] = None) -> None:
"""
Search for genes and display summaries.
Args:
query: Gene search query
organism: Optional organism filter
max_results: Maximum number of results
api_key: Optional NCBI API key
"""
# Add organism filter if provided
if organism:
if '[organism]' not in query.lower():
query = f"{query} AND {organism}[organism]"
print(f"Searching for: {query}")
print("-" * 80)
# Search for gene IDs
gene_ids = esearch(query, retmax=max_results, api_key=api_key)
if not gene_ids:
print("No results found.")
return
print(f"Found {len(gene_ids)} gene(s)")
print()
# Get summaries
summaries = esummary(gene_ids, api_key=api_key)
if 'result' in summaries:
for gene_id in gene_ids:
if gene_id in summaries['result']:
gene = summaries['result'][gene_id]
print(f"Gene ID: {gene_id}")
print(f" Symbol: {gene.get('name', 'N/A')}")
print(f" Description: {gene.get('description', 'N/A')}")
print(f" Organism: {gene.get('organism', {}).get('scientificname', 'N/A')}")
print(f" Chromosome: {gene.get('chromosome', 'N/A')}")
print(f" Map Location: {gene.get('maplocation', 'N/A')}")
print(f" Type: {gene.get('geneticsource', 'N/A')}")
print()
# Respect rate limits
time.sleep(0.34) # ~3 requests per second
def fetch_by_id(gene_ids: List[str], output_format: str = 'json',
api_key: Optional[str] = None) -> None:
"""
Fetch and display gene information by ID.
Args:
gene_ids: List of Gene IDs
output_format: Output format ('json', 'xml', 'text')
api_key: Optional NCBI API key
"""
if output_format == 'json':
# Get summaries in JSON format
summaries = esummary(gene_ids, api_key=api_key)
print(json.dumps(summaries, indent=2))
else:
# Fetch full records
data = efetch(gene_ids, retmode=output_format, api_key=api_key)
print(data)
# Respect rate limits
time.sleep(0.34)
def main():
parser = argparse.ArgumentParser(
description='Query NCBI Gene database using E-utilities',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Search for gene by symbol
%(prog)s --search "BRCA1" --organism "human"
# Fetch gene by ID
%(prog)s --id 672 --format json
# Complex search query
%(prog)s --search "insulin[gene] AND diabetes[disease]"
# Multiple gene IDs
%(prog)s --id 672,7157,5594
"""
)
parser.add_argument('--search', '-s', help='Search query')
parser.add_argument('--organism', '-o', help='Organism filter')
parser.add_argument('--id', '-i', help='Gene ID(s), comma-separated')
parser.add_argument('--format', '-f', default='json',
choices=['json', 'xml', 'text'],
help='Output format (default: json)')
parser.add_argument('--max-results', '-m', type=int, default=20,
help='Maximum number of search results (default: 20)')
parser.add_argument('--api-key', '-k', help='NCBI API key for higher rate limits')
args = parser.parse_args()
if not args.search and not args.id:
parser.error("Either --search or --id must be provided")
if args.id:
# Fetch by ID
gene_ids = [id.strip() for id in args.id.split(',')]
fetch_by_id(gene_ids, output_format=args.format, api_key=args.api_key)
else:
# Search and summarize
search_and_summarize(args.search, organism=args.organism,
max_results=args.max_results, api_key=args.api_key)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/gene-database/scripts/query_gene.py",
"license": "MIT License",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/generate-image/scripts/generate_image.py | #!/usr/bin/env python3
"""
Generate and edit images using OpenRouter API with various image generation models.
Supports models like:
- google/gemini-3-pro-image-preview (generation and editing)
- black-forest-labs/flux.2-pro (generation and editing)
- black-forest-labs/flux.2-flex (generation)
- And more image generation models available on OpenRouter
For image editing, provide an input image along with an editing prompt.
"""
import sys
import json
import base64
import argparse
from pathlib import Path
from typing import Optional
def check_env_file() -> Optional[str]:
"""Check if .env file exists and contains OPENROUTER_API_KEY."""
# Look for .env in current directory and parent directories
current_dir = Path.cwd()
for parent in [current_dir] + list(current_dir.parents):
env_file = parent / ".env"
if env_file.exists():
with open(env_file, 'r') as f:
for line in f:
if line.startswith('OPENROUTER_API_KEY='):
api_key = line.split('=', 1)[1].strip().strip('"').strip("'")
if api_key:
return api_key
return None
def load_image_as_base64(image_path: str) -> str:
"""Load an image file and return it as a base64 data URL."""
path = Path(image_path)
if not path.exists():
print(f"β Error: Image file not found: {image_path}")
sys.exit(1)
# Determine MIME type from extension
ext = path.suffix.lower()
mime_types = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.webp': 'image/webp',
}
mime_type = mime_types.get(ext, 'image/png')
with open(path, 'rb') as f:
image_data = f.read()
base64_data = base64.b64encode(image_data).decode('utf-8')
return f"data:{mime_type};base64,{base64_data}"
def save_base64_image(base64_data: str, output_path: str) -> None:
"""Save base64 encoded image to file."""
# Remove data URL prefix if present
if ',' in base64_data:
base64_data = base64_data.split(',', 1)[1]
# Decode and save
image_data = base64.b64decode(base64_data)
with open(output_path, 'wb') as f:
f.write(image_data)
def generate_image(
prompt: str,
model: str = "google/gemini-3-pro-image-preview",
output_path: str = "generated_image.png",
api_key: Optional[str] = None,
input_image: Optional[str] = None
) -> dict:
"""
Generate or edit an image using OpenRouter API.
Args:
prompt: Text description of the image to generate, or editing instructions
model: OpenRouter model ID (default: google/gemini-3-pro-image-preview)
output_path: Path to save the generated image
api_key: OpenRouter API key (will check .env if not provided)
input_image: Path to an input image for editing (optional)
Returns:
dict: Response from OpenRouter API
"""
try:
import requests
except ImportError:
print("Error: 'requests' library not found. Install with: pip install requests")
sys.exit(1)
# Check for API key
if not api_key:
api_key = check_env_file()
if not api_key:
print("β Error: OPENROUTER_API_KEY not found!")
print("\nPlease create a .env file in your project directory with:")
print("OPENROUTER_API_KEY=your-api-key-here")
print("\nOr set the environment variable:")
print("export OPENROUTER_API_KEY=your-api-key-here")
print("\nGet your API key from: https://openrouter.ai/keys")
sys.exit(1)
# Determine if this is generation or editing
is_editing = input_image is not None
if is_editing:
print(f"βοΈ Editing image with model: {model}")
print(f"π· Input image: {input_image}")
print(f"π Edit prompt: {prompt}")
# Load input image as base64
image_data_url = load_image_as_base64(input_image)
# Build multimodal message content for image editing
message_content = [
{
"type": "text",
"text": prompt
},
{
"type": "image_url",
"image_url": {
"url": image_data_url
}
}
]
else:
print(f"π¨ Generating image with model: {model}")
print(f"π Prompt: {prompt}")
message_content = prompt
# Make API request
response = requests.post(
url="https://openrouter.ai/api/v1/chat/completions",
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
},
json={
"model": model,
"messages": [
{
"role": "user",
"content": message_content
}
],
"modalities": ["image", "text"]
}
)
# Check for errors
if response.status_code != 200:
print(f"β API Error ({response.status_code}): {response.text}")
sys.exit(1)
result = response.json()
# Extract and save image
if result.get("choices"):
message = result["choices"][0]["message"]
# Handle both 'images' and 'content' response formats
images = []
if message.get("images"):
images = message["images"]
elif message.get("content"):
# Some models return content as array with image parts
content = message["content"]
if isinstance(content, list):
for part in content:
if isinstance(part, dict) and part.get("type") == "image":
images.append(part)
if images:
# Save the first image
image = images[0]
if "image_url" in image:
image_url = image["image_url"]["url"]
save_base64_image(image_url, output_path)
print(f"β
Image saved to: {output_path}")
elif "url" in image:
save_base64_image(image["url"], output_path)
print(f"β
Image saved to: {output_path}")
else:
print(f"β οΈ Unexpected image format: {image}")
else:
print("β οΈ No image found in response")
if message.get("content"):
print(f"Response content: {message['content']}")
else:
print("β No choices in response")
print(f"Response: {json.dumps(result, indent=2)}")
return result
def main():
parser = argparse.ArgumentParser(
description="Generate or edit images using OpenRouter API",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Generate with default model (Gemini 3 Pro Image Preview)
python generate_image.py "A beautiful sunset over mountains"
# Use a specific model
python generate_image.py "A cat in space" --model "black-forest-labs/flux.2-pro"
# Specify output path
python generate_image.py "Abstract art" --output my_image.png
# Edit an existing image
python generate_image.py "Make the sky purple" --input photo.jpg --output edited.png
# Edit with a specific model
python generate_image.py "Add a hat to the person" --input portrait.png -m "black-forest-labs/flux.2-pro"
Popular image models:
- google/gemini-3-pro-image-preview (default, high quality, generation + editing)
- black-forest-labs/flux.2-pro (fast, high quality, generation + editing)
- black-forest-labs/flux.2-flex (development version)
"""
)
parser.add_argument(
"prompt",
type=str,
help="Text description of the image to generate, or editing instructions"
)
parser.add_argument(
"--model", "-m",
type=str,
default="google/gemini-3-pro-image-preview",
help="OpenRouter model ID (default: google/gemini-3-pro-image-preview)"
)
parser.add_argument(
"--output", "-o",
type=str,
default="generated_image.png",
help="Output file path (default: generated_image.png)"
)
parser.add_argument(
"--input", "-i",
type=str,
help="Input image path for editing (enables edit mode)"
)
parser.add_argument(
"--api-key",
type=str,
help="OpenRouter API key (will check .env if not provided)"
)
args = parser.parse_args()
generate_image(
prompt=args.prompt,
model=args.model,
output_path=args.output,
api_key=args.api_key,
input_image=args.input
)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/generate-image/scripts/generate_image.py",
"license": "MIT License",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/get-available-resources/scripts/detect_resources.py | #!/usr/bin/env python3
"""
System Resource Detection Script
Detects available compute resources including CPU, GPU, memory, and disk space.
Outputs a JSON file that Claude Code can use to make informed decisions about
computational approaches (e.g., whether to use Dask, Zarr, Joblib, etc.).
Supports: macOS, Linux, Windows
GPU Detection: NVIDIA (CUDA), AMD (ROCm), Apple Silicon (Metal)
"""
import json
import os
import platform
import psutil
import subprocess
import sys
from pathlib import Path
from typing import Dict, List, Any, Optional
def get_cpu_info() -> Dict[str, Any]:
"""Detect CPU information."""
cpu_info = {
"physical_cores": psutil.cpu_count(logical=False),
"logical_cores": psutil.cpu_count(logical=True),
"max_frequency_mhz": None,
"architecture": platform.machine(),
"processor": platform.processor(),
}
# Get CPU frequency if available
try:
freq = psutil.cpu_freq()
if freq:
cpu_info["max_frequency_mhz"] = freq.max
cpu_info["current_frequency_mhz"] = freq.current
except Exception:
pass
return cpu_info
def get_memory_info() -> Dict[str, Any]:
"""Detect memory information."""
mem = psutil.virtual_memory()
swap = psutil.swap_memory()
return {
"total_gb": round(mem.total / (1024**3), 2),
"available_gb": round(mem.available / (1024**3), 2),
"used_gb": round(mem.used / (1024**3), 2),
"percent_used": mem.percent,
"swap_total_gb": round(swap.total / (1024**3), 2),
"swap_available_gb": round((swap.total - swap.used) / (1024**3), 2),
}
def get_disk_info(path: str = None) -> Dict[str, Any]:
"""Detect disk space information for working directory or specified path."""
if path is None:
path = os.getcwd()
try:
disk = psutil.disk_usage(path)
return {
"path": path,
"total_gb": round(disk.total / (1024**3), 2),
"available_gb": round(disk.free / (1024**3), 2),
"used_gb": round(disk.used / (1024**3), 2),
"percent_used": disk.percent,
}
except Exception as e:
return {
"path": path,
"error": str(e),
}
def detect_nvidia_gpus() -> List[Dict[str, Any]]:
"""Detect NVIDIA GPUs using nvidia-smi."""
gpus = []
try:
# Try to run nvidia-smi
result = subprocess.run(
["nvidia-smi", "--query-gpu=index,name,memory.total,memory.free,driver_version,compute_cap",
"--format=csv,noheader,nounits"],
capture_output=True,
text=True,
timeout=5
)
if result.returncode == 0:
for line in result.stdout.strip().split('\n'):
if line:
parts = [p.strip() for p in line.split(',')]
if len(parts) >= 6:
gpus.append({
"index": int(parts[0]),
"name": parts[1],
"memory_total_mb": float(parts[2]),
"memory_free_mb": float(parts[3]),
"driver_version": parts[4],
"compute_capability": parts[5],
"type": "NVIDIA",
"backend": "CUDA"
})
except (subprocess.TimeoutExpired, FileNotFoundError, Exception):
pass
return gpus
def detect_amd_gpus() -> List[Dict[str, Any]]:
"""Detect AMD GPUs using rocm-smi."""
gpus = []
try:
# Try to run rocm-smi
result = subprocess.run(
["rocm-smi", "--showid", "--showmeminfo", "vram"],
capture_output=True,
text=True,
timeout=5
)
if result.returncode == 0:
# Parse rocm-smi output (basic parsing, may need refinement)
lines = result.stdout.strip().split('\n')
gpu_index = 0
for line in lines:
if 'GPU' in line and 'DID' in line:
gpus.append({
"index": gpu_index,
"name": "AMD GPU",
"type": "AMD",
"backend": "ROCm",
"info": line.strip()
})
gpu_index += 1
except (subprocess.TimeoutExpired, FileNotFoundError, Exception):
pass
return gpus
def detect_apple_silicon_gpu() -> Optional[Dict[str, Any]]:
"""Detect Apple Silicon GPU (M1/M2/M3/etc.)."""
if platform.system() != "Darwin":
return None
try:
# Check if running on Apple Silicon
result = subprocess.run(
["sysctl", "-n", "machdep.cpu.brand_string"],
capture_output=True,
text=True,
timeout=5
)
cpu_brand = result.stdout.strip()
# Check for Apple Silicon (M1, M2, M3, etc.)
if "Apple" in cpu_brand and any(chip in cpu_brand for chip in ["M1", "M2", "M3", "M4"]):
# Get GPU core count if possible
gpu_info = {
"name": cpu_brand,
"type": "Apple Silicon",
"backend": "Metal",
"unified_memory": True, # Apple Silicon uses unified memory
}
# Try to get GPU core information
try:
result = subprocess.run(
["system_profiler", "SPDisplaysDataType"],
capture_output=True,
text=True,
timeout=10
)
# Parse GPU core info from system_profiler
for line in result.stdout.split('\n'):
if 'Chipset Model' in line:
gpu_info["chipset"] = line.split(':')[1].strip()
elif 'Total Number of Cores' in line:
try:
cores = line.split(':')[1].strip()
gpu_info["gpu_cores"] = cores
except:
pass
except Exception:
pass
return gpu_info
except Exception:
pass
return None
def get_gpu_info() -> Dict[str, Any]:
"""Detect all available GPUs."""
gpu_info = {
"nvidia_gpus": detect_nvidia_gpus(),
"amd_gpus": detect_amd_gpus(),
"apple_silicon": detect_apple_silicon_gpu(),
"total_gpus": 0,
"available_backends": []
}
# Count total GPUs and available backends
if gpu_info["nvidia_gpus"]:
gpu_info["total_gpus"] += len(gpu_info["nvidia_gpus"])
gpu_info["available_backends"].append("CUDA")
if gpu_info["amd_gpus"]:
gpu_info["total_gpus"] += len(gpu_info["amd_gpus"])
gpu_info["available_backends"].append("ROCm")
if gpu_info["apple_silicon"]:
gpu_info["total_gpus"] += 1
gpu_info["available_backends"].append("Metal")
return gpu_info
def get_os_info() -> Dict[str, Any]:
"""Get operating system information."""
return {
"system": platform.system(),
"release": platform.release(),
"version": platform.version(),
"machine": platform.machine(),
"python_version": platform.python_version(),
}
def detect_all_resources(output_path: str = None) -> Dict[str, Any]:
"""
Detect all system resources and save to JSON.
Args:
output_path: Optional path to save JSON. Defaults to .claude_resources.json in cwd.
Returns:
Dictionary containing all resource information.
"""
if output_path is None:
output_path = os.path.join(os.getcwd(), ".claude_resources.json")
resources = {
"timestamp": __import__("datetime").datetime.now().isoformat(),
"os": get_os_info(),
"cpu": get_cpu_info(),
"memory": get_memory_info(),
"disk": get_disk_info(),
"gpu": get_gpu_info(),
}
# Add computational recommendations
resources["recommendations"] = generate_recommendations(resources)
# Save to JSON file
with open(output_path, 'w') as f:
json.dump(resources, f, indent=2)
return resources
def generate_recommendations(resources: Dict[str, Any]) -> Dict[str, Any]:
"""
Generate computational approach recommendations based on available resources.
"""
recommendations = {
"parallel_processing": {},
"memory_strategy": {},
"gpu_acceleration": {},
"large_data_handling": {}
}
# CPU recommendations
cpu_cores = resources["cpu"]["logical_cores"]
if cpu_cores >= 8:
recommendations["parallel_processing"]["strategy"] = "high_parallelism"
recommendations["parallel_processing"]["suggested_workers"] = max(cpu_cores - 2, 1)
recommendations["parallel_processing"]["libraries"] = ["joblib", "multiprocessing", "dask"]
elif cpu_cores >= 4:
recommendations["parallel_processing"]["strategy"] = "moderate_parallelism"
recommendations["parallel_processing"]["suggested_workers"] = max(cpu_cores - 1, 1)
recommendations["parallel_processing"]["libraries"] = ["joblib", "multiprocessing"]
else:
recommendations["parallel_processing"]["strategy"] = "sequential"
recommendations["parallel_processing"]["note"] = "Limited cores, prefer sequential processing"
# Memory recommendations
available_memory_gb = resources["memory"]["available_gb"]
total_memory_gb = resources["memory"]["total_gb"]
if available_memory_gb < 4:
recommendations["memory_strategy"]["strategy"] = "memory_constrained"
recommendations["memory_strategy"]["libraries"] = ["zarr", "dask", "h5py"]
recommendations["memory_strategy"]["note"] = "Use out-of-core processing for large datasets"
elif available_memory_gb < 16:
recommendations["memory_strategy"]["strategy"] = "moderate_memory"
recommendations["memory_strategy"]["libraries"] = ["dask", "zarr"]
recommendations["memory_strategy"]["note"] = "Consider chunking for datasets > 2GB"
else:
recommendations["memory_strategy"]["strategy"] = "memory_abundant"
recommendations["memory_strategy"]["note"] = "Can load most datasets into memory"
# GPU recommendations
gpu_info = resources["gpu"]
if gpu_info["total_gpus"] > 0:
recommendations["gpu_acceleration"]["available"] = True
recommendations["gpu_acceleration"]["backends"] = gpu_info["available_backends"]
if "CUDA" in gpu_info["available_backends"]:
recommendations["gpu_acceleration"]["suggested_libraries"] = [
"pytorch", "tensorflow", "jax", "cupy", "rapids"
]
elif "Metal" in gpu_info["available_backends"]:
recommendations["gpu_acceleration"]["suggested_libraries"] = [
"pytorch-mps", "tensorflow-metal", "jax-metal"
]
elif "ROCm" in gpu_info["available_backends"]:
recommendations["gpu_acceleration"]["suggested_libraries"] = [
"pytorch-rocm", "tensorflow-rocm"
]
else:
recommendations["gpu_acceleration"]["available"] = False
recommendations["gpu_acceleration"]["note"] = "No GPU detected, use CPU-based libraries"
# Large data handling recommendations
disk_available_gb = resources["disk"]["available_gb"]
if disk_available_gb < 10:
recommendations["large_data_handling"]["strategy"] = "disk_constrained"
recommendations["large_data_handling"]["note"] = "Limited disk space, use streaming or compression"
elif disk_available_gb < 100:
recommendations["large_data_handling"]["strategy"] = "moderate_disk"
recommendations["large_data_handling"]["libraries"] = ["zarr", "h5py", "parquet"]
else:
recommendations["large_data_handling"]["strategy"] = "disk_abundant"
recommendations["large_data_handling"]["note"] = "Sufficient space for large intermediate files"
return recommendations
def main():
"""Main entry point for CLI usage."""
import argparse
parser = argparse.ArgumentParser(
description="Detect system resources for scientific computing"
)
parser.add_argument(
"-o", "--output",
default=".claude_resources.json",
help="Output JSON file path (default: .claude_resources.json)"
)
parser.add_argument(
"-v", "--verbose",
action="store_true",
help="Print resources to stdout"
)
args = parser.parse_args()
print("π Detecting system resources...")
resources = detect_all_resources(args.output)
print(f"β
Resources detected and saved to: {args.output}")
if args.verbose:
print("\n" + "="*60)
print(json.dumps(resources, indent=2))
print("="*60)
# Print summary
print("\nπ Resource Summary:")
print(f" OS: {resources['os']['system']} {resources['os']['release']}")
print(f" CPU: {resources['cpu']['logical_cores']} cores ({resources['cpu']['physical_cores']} physical)")
print(f" Memory: {resources['memory']['total_gb']} GB total, {resources['memory']['available_gb']} GB available")
print(f" Disk: {resources['disk']['total_gb']} GB total, {resources['disk']['available_gb']} GB available")
if resources['gpu']['total_gpus'] > 0:
print(f" GPU: {resources['gpu']['total_gpus']} detected ({', '.join(resources['gpu']['available_backends'])})")
else:
print(" GPU: None detected")
print("\nπ‘ Recommendations:")
recs = resources['recommendations']
print(f" Parallel Processing: {recs['parallel_processing'].get('strategy', 'N/A')}")
print(f" Memory Strategy: {recs['memory_strategy'].get('strategy', 'N/A')}")
print(f" GPU Acceleration: {'Available' if recs['gpu_acceleration'].get('available') else 'Not Available'}")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/get-available-resources/scripts/detect_resources.py",
"license": "MIT License",
"lines": 332,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/gget/scripts/batch_sequence_analysis.py | #!/usr/bin/env python3
"""
Batch Sequence Analysis Script
Analyze multiple sequences: BLAST, alignment, and structure prediction
"""
import argparse
import sys
from pathlib import Path
import gget
def read_fasta(fasta_file):
"""Read sequences from FASTA file."""
sequences = []
current_id = None
current_seq = []
with open(fasta_file, "r") as f:
for line in f:
line = line.strip()
if line.startswith(">"):
if current_id:
sequences.append({"id": current_id, "seq": "".join(current_seq)})
current_id = line[1:]
current_seq = []
else:
current_seq.append(line)
if current_id:
sequences.append({"id": current_id, "seq": "".join(current_seq)})
return sequences
def analyze_sequences(
fasta_file,
blast_db="nr",
align=True,
predict_structure=False,
output_dir="output",
):
"""
Perform batch sequence analysis.
Args:
fasta_file: Path to FASTA file with sequences
blast_db: BLAST database to search (default: nr)
align: Whether to perform multiple sequence alignment
predict_structure: Whether to predict structures with AlphaFold
output_dir: Output directory for results
"""
output_path = Path(output_dir)
output_path.mkdir(exist_ok=True)
print(f"Batch Sequence Analysis")
print("=" * 60)
print(f"Input file: {fasta_file}")
print(f"Output directory: {output_dir}")
print("")
# Read sequences
print("Reading sequences...")
sequences = read_fasta(fasta_file)
print(f"Found {len(sequences)} sequences\n")
# Step 1: BLAST each sequence
print("Step 1: Running BLAST searches...")
print("-" * 60)
for i, seq_data in enumerate(sequences):
print(f"\n{i+1}. BLASTing {seq_data['id']}...")
try:
blast_results = gget.blast(
seq_data["seq"], database=blast_db, limit=10, save=False
)
output_file = output_path / f"{seq_data['id']}_blast.csv"
blast_results.to_csv(output_file, index=False)
print(f" Results saved to: {output_file}")
if len(blast_results) > 0:
print(f" Top hit: {blast_results.iloc[0]['Description']}")
print(
f" Max Score: {blast_results.iloc[0]['Max Score']}, "
f"Query Coverage: {blast_results.iloc[0]['Query Coverage']}"
)
except Exception as e:
print(f" Error: {e}")
# Step 2: Multiple sequence alignment
if align and len(sequences) > 1:
print("\n\nStep 2: Multiple sequence alignment...")
print("-" * 60)
try:
alignment = gget.muscle(fasta_file)
alignment_file = output_path / "alignment.afa"
with open(alignment_file, "w") as f:
f.write(alignment)
print(f"Alignment saved to: {alignment_file}")
except Exception as e:
print(f"Error in alignment: {e}")
else:
print("\n\nStep 2: Skipping alignment (only 1 sequence or disabled)")
# Step 3: Structure prediction (optional)
if predict_structure:
print("\n\nStep 3: Predicting structures with AlphaFold...")
print("-" * 60)
print(
"Note: This requires 'gget setup alphafold' and is computationally intensive"
)
for i, seq_data in enumerate(sequences):
print(f"\n{i+1}. Predicting structure for {seq_data['id']}...")
try:
structure_dir = output_path / f"structure_{seq_data['id']}"
# Uncomment to run AlphaFold prediction:
# gget.alphafold(seq_data['seq'], out=str(structure_dir))
# print(f" Structure saved to: {structure_dir}")
print(
" (Prediction skipped - uncomment code to run AlphaFold prediction)"
)
except Exception as e:
print(f" Error: {e}")
else:
print("\n\nStep 3: Structure prediction disabled")
# Summary
print("\n" + "=" * 60)
print("Batch analysis complete!")
print(f"\nResults saved to: {output_dir}/")
print(f" - BLAST results: *_blast.csv")
if align and len(sequences) > 1:
print(f" - Alignment: alignment.afa")
if predict_structure:
print(f" - Structures: structure_*/")
return True
def main():
parser = argparse.ArgumentParser(
description="Perform batch sequence analysis using gget"
)
parser.add_argument("fasta", help="Input FASTA file with sequences")
parser.add_argument(
"-db",
"--database",
default="nr",
help="BLAST database (default: nr for proteins, nt for nucleotides)",
)
parser.add_argument(
"--no-align", action="store_true", help="Skip multiple sequence alignment"
)
parser.add_argument(
"--predict-structure",
action="store_true",
help="Predict structures with AlphaFold (requires setup)",
)
parser.add_argument(
"-o", "--output", default="output", help="Output directory (default: output)"
)
args = parser.parse_args()
if not Path(args.fasta).exists():
print(f"Error: File not found: {args.fasta}")
sys.exit(1)
try:
success = analyze_sequences(
args.fasta,
blast_db=args.database,
align=not args.no_align,
predict_structure=args.predict_structure,
output_dir=args.output,
)
sys.exit(0 if success else 1)
except KeyboardInterrupt:
print("\n\nAnalysis interrupted by user")
sys.exit(1)
except Exception as e:
print(f"\n\nError: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/gget/scripts/batch_sequence_analysis.py",
"license": "MIT License",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/gget/scripts/enrichment_pipeline.py | #!/usr/bin/env python3
"""
Enrichment Analysis Pipeline
Perform comprehensive enrichment analysis on a gene list
"""
import argparse
import sys
from pathlib import Path
import gget
import pandas as pd
def read_gene_list(file_path):
"""Read gene list from file (one gene per line or CSV)."""
file_path = Path(file_path)
if file_path.suffix == ".csv":
df = pd.read_csv(file_path)
# Assume first column contains gene names
genes = df.iloc[:, 0].tolist()
else:
# Plain text file
with open(file_path, "r") as f:
genes = [line.strip() for line in f if line.strip()]
return genes
def enrichment_pipeline(
gene_list,
species="human",
background=None,
output_prefix="enrichment",
plot=True,
):
"""
Perform comprehensive enrichment analysis.
Args:
gene_list: List of gene symbols
species: Species for analysis
background: Background gene list (optional)
output_prefix: Prefix for output files
plot: Whether to generate plots
"""
print("Enrichment Analysis Pipeline")
print("=" * 60)
print(f"Analyzing {len(gene_list)} genes")
print(f"Species: {species}\n")
# Database categories to analyze
databases = {
"pathway": "KEGG Pathways",
"ontology": "Gene Ontology (Biological Process)",
"transcription": "Transcription Factors (ChEA)",
"diseases_drugs": "Disease Associations (GWAS)",
"celltypes": "Cell Type Markers (PanglaoDB)",
}
results = {}
for db_key, db_name in databases.items():
print(f"\nAnalyzing: {db_name}")
print("-" * 60)
try:
enrichment = gget.enrichr(
gene_list,
database=db_key,
species=species,
background_list=background,
plot=plot,
)
if enrichment is not None and len(enrichment) > 0:
# Save results
output_file = f"{output_prefix}_{db_key}.csv"
enrichment.to_csv(output_file, index=False)
print(f"Results saved to: {output_file}")
# Show top 5 results
print(f"\nTop 5 enriched terms:")
for i, row in enrichment.head(5).iterrows():
term = row.get("name", row.get("term", "Unknown"))
p_val = row.get(
"adjusted_p_value",
row.get("p_value", row.get("Adjusted P-value", 1)),
)
print(f" {i+1}. {term}")
print(f" P-value: {p_val:.2e}")
results[db_key] = enrichment
else:
print("No significant results found")
except Exception as e:
print(f"Error: {e}")
# Generate summary report
print("\n" + "=" * 60)
print("Generating summary report...")
summary = []
for db_key, db_name in databases.items():
if db_key in results and len(results[db_key]) > 0:
summary.append(
{
"Database": db_name,
"Total Terms": len(results[db_key]),
"Top Term": results[db_key].iloc[0].get(
"name", results[db_key].iloc[0].get("term", "N/A")
),
}
)
if summary:
summary_df = pd.DataFrame(summary)
summary_file = f"{output_prefix}_summary.csv"
summary_df.to_csv(summary_file, index=False)
print(f"\nSummary saved to: {summary_file}")
print("\n" + summary_df.to_string(index=False))
else:
print("\nNo enrichment results to summarize")
# Get expression data for genes
print("\n" + "=" * 60)
print("Getting expression data for input genes...")
try:
# Get tissue expression for first few genes
expr_data = []
for gene in gene_list[:5]: # Limit to first 5
print(f" Getting expression for {gene}...")
try:
tissue_expr = gget.archs4(gene, which="tissue")
top_tissue = tissue_expr.nlargest(1, "median").iloc[0]
expr_data.append(
{
"Gene": gene,
"Top Tissue": top_tissue["tissue"],
"Median Expression": top_tissue["median"],
}
)
except Exception as e:
print(f" Warning: {e}")
if expr_data:
expr_df = pd.DataFrame(expr_data)
expr_file = f"{output_prefix}_expression.csv"
expr_df.to_csv(expr_file, index=False)
print(f"\nExpression data saved to: {expr_file}")
except Exception as e:
print(f"Error getting expression data: {e}")
print("\n" + "=" * 60)
print("Enrichment analysis complete!")
print(f"\nOutput files (prefix: {output_prefix}):")
for db_key in databases.keys():
if db_key in results:
print(f" - {output_prefix}_{db_key}.csv")
print(f" - {output_prefix}_summary.csv")
print(f" - {output_prefix}_expression.csv")
return True
def main():
parser = argparse.ArgumentParser(
description="Perform comprehensive enrichment analysis using gget"
)
parser.add_argument(
"genes",
help="Gene list file (one gene per line or CSV with genes in first column)",
)
parser.add_argument(
"-s",
"--species",
default="human",
help="Species (human, mouse, fly, yeast, worm, fish)",
)
parser.add_argument(
"-b", "--background", help="Background gene list file (optional)"
)
parser.add_argument(
"-o", "--output", default="enrichment", help="Output prefix (default: enrichment)"
)
parser.add_argument(
"--no-plot", action="store_true", help="Disable plotting"
)
args = parser.parse_args()
# Read gene list
if not Path(args.genes).exists():
print(f"Error: File not found: {args.genes}")
sys.exit(1)
try:
gene_list = read_gene_list(args.genes)
print(f"Read {len(gene_list)} genes from {args.genes}")
# Read background if provided
background = None
if args.background:
if Path(args.background).exists():
background = read_gene_list(args.background)
print(f"Read {len(background)} background genes from {args.background}")
else:
print(f"Warning: Background file not found: {args.background}")
success = enrichment_pipeline(
gene_list,
species=args.species,
background=background,
output_prefix=args.output,
plot=not args.no_plot,
)
sys.exit(0 if success else 1)
except KeyboardInterrupt:
print("\n\nAnalysis interrupted by user")
sys.exit(1)
except Exception as e:
print(f"\n\nError: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/gget/scripts/enrichment_pipeline.py",
"license": "MIT License",
"lines": 198,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/gget/scripts/gene_analysis.py | #!/usr/bin/env python3
"""
Gene Analysis Script
Quick analysis of a gene: search, info, sequences, expression, and enrichment
"""
import argparse
import sys
import gget
def analyze_gene(gene_name, species="homo_sapiens", output_prefix=None):
"""
Perform comprehensive analysis of a gene.
Args:
gene_name: Gene symbol to analyze
species: Species name (default: homo_sapiens)
output_prefix: Prefix for output files (default: gene_name)
"""
if output_prefix is None:
output_prefix = gene_name.lower()
print(f"Analyzing gene: {gene_name}")
print("=" * 60)
# Step 1: Search for the gene
print("\n1. Searching for gene...")
search_results = gget.search([gene_name], species=species, limit=1)
if len(search_results) == 0:
print(f"Error: Gene '{gene_name}' not found in {species}")
return False
gene_id = search_results["ensembl_id"].iloc[0]
print(f" Found: {gene_id}")
print(f" Description: {search_results['ensembl_description'].iloc[0]}")
# Step 2: Get detailed information
print("\n2. Getting detailed information...")
gene_info = gget.info([gene_id], pdb=True)
gene_info.to_csv(f"{output_prefix}_info.csv", index=False)
print(f" Saved to: {output_prefix}_info.csv")
if "uniprot_id" in gene_info.columns and gene_info["uniprot_id"].iloc[0]:
print(f" UniProt ID: {gene_info['uniprot_id'].iloc[0]}")
if "pdb_id" in gene_info.columns and gene_info["pdb_id"].iloc[0]:
print(f" PDB IDs: {gene_info['pdb_id'].iloc[0]}")
# Step 3: Get sequences
print("\n3. Retrieving sequences...")
nucleotide_seq = gget.seq([gene_id])
protein_seq = gget.seq([gene_id], translate=True)
with open(f"{output_prefix}_nucleotide.fasta", "w") as f:
f.write(nucleotide_seq)
print(f" Nucleotide sequence saved to: {output_prefix}_nucleotide.fasta")
with open(f"{output_prefix}_protein.fasta", "w") as f:
f.write(protein_seq)
print(f" Protein sequence saved to: {output_prefix}_protein.fasta")
# Step 4: Get tissue expression
print("\n4. Getting tissue expression...")
try:
tissue_expr = gget.archs4(gene_name, which="tissue")
tissue_expr.to_csv(f"{output_prefix}_tissue_expression.csv", index=False)
print(f" Saved to: {output_prefix}_tissue_expression.csv")
# Show top tissues
top_tissues = tissue_expr.nlargest(5, "median")
print("\n Top expressing tissues:")
for _, row in top_tissues.iterrows():
print(f" {row['tissue']}: median = {row['median']:.2f}")
except Exception as e:
print(f" Warning: Could not retrieve ARCHS4 data: {e}")
# Step 5: Find correlated genes
print("\n5. Finding correlated genes...")
try:
correlated = gget.archs4(gene_name, which="correlation")
correlated.to_csv(f"{output_prefix}_correlated_genes.csv", index=False)
print(f" Saved to: {output_prefix}_correlated_genes.csv")
# Show top correlated
print("\n Top 10 correlated genes:")
for _, row in correlated.head(10).iterrows():
print(f" {row['gene_symbol']}: r = {row['correlation']:.3f}")
except Exception as e:
print(f" Warning: Could not retrieve correlation data: {e}")
# Step 6: Get disease associations
print("\n6. Getting disease associations...")
try:
diseases = gget.opentargets(gene_id, resource="diseases", limit=10)
diseases.to_csv(f"{output_prefix}_diseases.csv", index=False)
print(f" Saved to: {output_prefix}_diseases.csv")
print("\n Top 5 disease associations:")
for _, row in diseases.head(5).iterrows():
print(f" {row['disease_name']}: score = {row['overall_score']:.3f}")
except Exception as e:
print(f" Warning: Could not retrieve disease data: {e}")
# Step 7: Get drug associations
print("\n7. Getting drug associations...")
try:
drugs = gget.opentargets(gene_id, resource="drugs", limit=10)
if len(drugs) > 0:
drugs.to_csv(f"{output_prefix}_drugs.csv", index=False)
print(f" Saved to: {output_prefix}_drugs.csv")
print(f"\n Found {len(drugs)} drug associations")
else:
print(" No drug associations found")
except Exception as e:
print(f" Warning: Could not retrieve drug data: {e}")
print("\n" + "=" * 60)
print("Analysis complete!")
print(f"\nOutput files (prefix: {output_prefix}):")
print(f" - {output_prefix}_info.csv")
print(f" - {output_prefix}_nucleotide.fasta")
print(f" - {output_prefix}_protein.fasta")
print(f" - {output_prefix}_tissue_expression.csv")
print(f" - {output_prefix}_correlated_genes.csv")
print(f" - {output_prefix}_diseases.csv")
print(f" - {output_prefix}_drugs.csv (if available)")
return True
def main():
parser = argparse.ArgumentParser(
description="Perform comprehensive analysis of a gene using gget"
)
parser.add_argument("gene", help="Gene symbol to analyze")
parser.add_argument(
"-s",
"--species",
default="homo_sapiens",
help="Species (default: homo_sapiens)",
)
parser.add_argument(
"-o", "--output", help="Output prefix for files (default: gene name)"
)
args = parser.parse_args()
try:
success = analyze_gene(args.gene, args.species, args.output)
sys.exit(0 if success else 1)
except KeyboardInterrupt:
print("\n\nAnalysis interrupted by user")
sys.exit(1)
except Exception as e:
print(f"\n\nError: {e}")
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/gget/scripts/gene_analysis.py",
"license": "MIT License",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/kegg-database/scripts/kegg_api.py | """
KEGG REST API Helper Functions
This module provides Python functions for interacting with the KEGG REST API.
All functions return raw response text which can be parsed as needed.
API Base URL: https://rest.kegg.jp
Documentation: https://www.kegg.jp/kegg/rest/keggapi.html
IMPORTANT: KEGG API is made available only for academic use by academic users.
"""
import urllib.request
import urllib.parse
import urllib.error
from typing import Optional, List, Union
KEGG_BASE_URL = "https://rest.kegg.jp"
def kegg_info(database: str) -> str:
"""
Get database metadata and statistics.
Args:
database: KEGG database name (e.g., 'kegg', 'pathway', 'enzyme', 'genes')
Returns:
str: Database information and statistics
Example:
info = kegg_info('pathway')
"""
url = f"{KEGG_BASE_URL}/info/{database}"
try:
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
def kegg_list(database: str, org: Optional[str] = None) -> str:
"""
List entry identifiers and associated names.
Args:
database: KEGG database name or specific entry (e.g., 'pathway', 'enzyme', 'hsa:10458')
org: Optional organism code for pathway/module listings (e.g., 'hsa' for human)
Returns:
str: Tab-delimited list of entries
Examples:
pathways = kegg_list('pathway') # List all reference pathways
hsa_pathways = kegg_list('pathway', 'hsa') # List human pathways
genes = kegg_list('hsa:10458+ece:Z5100') # List specific genes
"""
if org:
url = f"{KEGG_BASE_URL}/list/{database}/{org}"
else:
url = f"{KEGG_BASE_URL}/list/{database}"
try:
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
def kegg_find(database: str, query: str, option: Optional[str] = None) -> str:
"""
Search for entries by keywords or molecular properties.
Args:
database: Database to search ('genes', 'compound', 'drug', etc.)
query: Search term or molecular property
option: Optional parameter for molecular searches:
'formula' - exact match to chemical formula
'exact_mass' - range search by exact mass (e.g., '174.05-174.15')
'mol_weight' - range search by molecular weight
Returns:
str: Tab-delimited search results
Examples:
# Keyword search
results = kegg_find('genes', 'shiga toxin')
# Formula search
compounds = kegg_find('compound', 'C7H10N4O2', 'formula')
# Mass range search
drugs = kegg_find('drug', '300-310', 'exact_mass')
"""
query_encoded = urllib.parse.quote(query)
if option:
url = f"{KEGG_BASE_URL}/find/{database}/{query_encoded}/{option}"
else:
url = f"{KEGG_BASE_URL}/find/{database}/{query_encoded}"
try:
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
def kegg_get(entries: Union[str, List[str]], option: Optional[str] = None) -> str:
"""
Retrieve full database entries or specific data formats.
Args:
entries: Single entry ID or list of entry IDs (max 10)
option: Optional output format:
'aaseq' or 'ntseq' - FASTA sequence
'mol' - MOL format (for compounds)
'kcf' - KCF format (for compounds)
'image' - PNG image (pathway maps, single entry only)
'kgml' - KGML format (pathway XML, single entry only)
'json' - JSON format (pathway only, single entry only)
Returns:
str: Entry data in requested format
Examples:
# Get pathway entry
pathway = kegg_get('hsa00010')
# Get multiple entries
genes = kegg_get(['hsa:10458', 'ece:Z5100'])
# Get sequence
sequence = kegg_get('hsa:10458', 'aaseq')
# Get pathway as JSON
pathway_json = kegg_get('hsa05130', 'json')
"""
if isinstance(entries, list):
entries_str = '+'.join(entries[:10]) # Max 10 entries
else:
entries_str = entries
if option:
url = f"{KEGG_BASE_URL}/get/{entries_str}/{option}"
else:
url = f"{KEGG_BASE_URL}/get/{entries_str}"
try:
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
def kegg_conv(target_db: str, source_db: str) -> str:
"""
Convert identifiers between KEGG and external databases.
Args:
target_db: Target database (e.g., 'ncbi-geneid', 'uniprot', 'pubchem')
source_db: Source database or entry (e.g., 'hsa', 'compound', 'hsa:10458')
Returns:
str: Tab-delimited conversion table
Examples:
# Convert all human genes to NCBI Gene IDs
conversions = kegg_conv('ncbi-geneid', 'hsa')
# Convert specific gene
gene_id = kegg_conv('ncbi-geneid', 'hsa:10458')
# Convert compounds to PubChem IDs
pubchem = kegg_conv('pubchem', 'compound')
"""
url = f"{KEGG_BASE_URL}/conv/{target_db}/{source_db}"
try:
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
def kegg_link(target_db: str, source_db: str) -> str:
"""
Find related entries across KEGG databases.
Args:
target_db: Target database (e.g., 'pathway', 'enzyme', 'genes')
source_db: Source database or entry (e.g., 'hsa', 'pathway', 'hsa:10458')
Returns:
str: Tab-delimited list of linked entries
Examples:
# Find pathways linked to human genes
links = kegg_link('pathway', 'hsa')
# Find genes in a specific pathway
genes = kegg_link('genes', 'hsa00010')
# Find pathways for a specific gene
pathways = kegg_link('pathway', 'hsa:10458')
"""
url = f"{KEGG_BASE_URL}/link/{target_db}/{source_db}"
try:
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
def kegg_ddi(drug_entries: Union[str, List[str]]) -> str:
"""
Check drug-drug interactions.
Args:
drug_entries: Single drug entry or list of drug entries (max 10)
Returns:
str: Drug interaction information
Example:
interactions = kegg_ddi(['D00001', 'D00002'])
"""
if isinstance(drug_entries, list):
entries_str = '+'.join(drug_entries[:10]) # Max 10 entries
else:
entries_str = drug_entries
url = f"{KEGG_BASE_URL}/ddi/{entries_str}"
try:
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
if __name__ == "__main__":
# Example usage
print("KEGG Info Example:")
print(kegg_info('pathway')[:200] + "...\n")
print("KEGG List Example (first 3 pathways):")
pathways = kegg_list('pathway')
print('\n'.join(pathways.split('\n')[:3]) + "\n")
print("KEGG Find Example:")
print(kegg_find('genes', 'p53')[:200] + "...")
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/kegg-database/scripts/kegg_api.py",
"license": "MIT License",
"lines": 191,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/scientific/labarchive-integration/scripts/entry_operations.py | #!/usr/bin/env python3
"""
LabArchives Entry Operations
Utilities for creating entries, uploading attachments, and managing notebook content.
"""
import argparse
import sys
import yaml
import os
from pathlib import Path
from datetime import datetime
def load_config(config_path='config.yaml'):
"""Load configuration from YAML file"""
try:
with open(config_path, 'r') as f:
return yaml.safe_load(f)
except FileNotFoundError:
print(f"β Configuration file not found: {config_path}")
print(" Run setup_config.py first to create configuration")
sys.exit(1)
except Exception as e:
print(f"β Error loading configuration: {e}")
sys.exit(1)
def init_client(config):
"""Initialize LabArchives API client"""
try:
from labarchivespy.client import Client
return Client(
config['api_url'],
config['access_key_id'],
config['access_password']
)
except ImportError:
print("β labarchives-py package not installed")
print(" Install with: pip install git+https://github.com/mcmero/labarchives-py")
sys.exit(1)
def get_user_id(client, config):
"""Get user ID via authentication"""
import xml.etree.ElementTree as ET
login_params = {
'login_or_email': config['user_email'],
'password': config['user_external_password']
}
try:
response = client.make_call('users', 'user_access_info', params=login_params)
if response.status_code == 200:
uid = ET.fromstring(response.content)[0].text
return uid
else:
print(f"β Authentication failed: HTTP {response.status_code}")
print(f" Response: {response.content.decode('utf-8')[:200]}")
sys.exit(1)
except Exception as e:
print(f"β Error during authentication: {e}")
sys.exit(1)
def create_entry(client, uid, nbid, title, content=None, date=None):
"""Create a new entry in a notebook"""
print(f"\nπ Creating entry: {title}")
# Prepare parameters
params = {
'uid': uid,
'nbid': nbid,
'title': title
}
if content:
# Ensure content is HTML formatted
if not content.startswith('<'):
content = f'<p>{content}</p>'
params['content'] = content
if date:
params['date'] = date
try:
response = client.make_call('entries', 'create_entry', params=params)
if response.status_code == 200:
print("β
Entry created successfully")
# Try to extract entry ID from response
try:
import xml.etree.ElementTree as ET
root = ET.fromstring(response.content)
entry_id = root.find('.//entry_id')
if entry_id is not None:
print(f" Entry ID: {entry_id.text}")
return entry_id.text
except:
pass
return True
else:
print(f"β Entry creation failed: HTTP {response.status_code}")
print(f" Response: {response.content.decode('utf-8')[:200]}")
return None
except Exception as e:
print(f"β Error creating entry: {e}")
return None
def create_comment(client, uid, nbid, entry_id, comment):
"""Add a comment to an existing entry"""
print(f"\n㪠Adding comment to entry {entry_id}")
params = {
'uid': uid,
'nbid': nbid,
'entry_id': entry_id,
'comment': comment
}
try:
response = client.make_call('entries', 'create_comment', params=params)
if response.status_code == 200:
print("β
Comment added successfully")
return True
else:
print(f"β Comment creation failed: HTTP {response.status_code}")
return False
except Exception as e:
print(f"β Error creating comment: {e}")
return False
def upload_attachment(client, config, uid, nbid, entry_id, file_path):
"""Upload a file attachment to an entry"""
import requests
file_path = Path(file_path)
if not file_path.exists():
print(f"β File not found: {file_path}")
return False
print(f"\nπ Uploading attachment: {file_path.name}")
print(f" Size: {file_path.stat().st_size / 1024:.2f} KB")
url = f"{config['api_url']}/entries/upload_attachment"
try:
with open(file_path, 'rb') as f:
files = {'file': f}
data = {
'uid': uid,
'nbid': nbid,
'entry_id': entry_id,
'filename': file_path.name,
'access_key_id': config['access_key_id'],
'access_password': config['access_password']
}
response = requests.post(url, files=files, data=data)
if response.status_code == 200:
print("β
Attachment uploaded successfully")
return True
else:
print(f"β Upload failed: HTTP {response.status_code}")
print(f" Response: {response.content.decode('utf-8')[:200]}")
return False
except Exception as e:
print(f"β Error uploading attachment: {e}")
return False
def batch_upload(client, config, uid, nbid, entry_id, directory):
"""Upload all files from a directory as attachments"""
directory = Path(directory)
if not directory.is_dir():
print(f"β Directory not found: {directory}")
return
files = list(directory.glob('*'))
files = [f for f in files if f.is_file()]
if not files:
print(f"β No files found in {directory}")
return
print(f"\nπ¦ Batch uploading {len(files)} files from {directory}")
successful = 0
failed = 0
for file_path in files:
if upload_attachment(client, config, uid, nbid, entry_id, file_path):
successful += 1
else:
failed += 1
print("\n" + "="*60)
print(f"Batch upload complete: {successful} successful, {failed} failed")
print("="*60)
def create_entry_with_attachments(client, config, uid, nbid, title, content,
attachments):
"""Create entry and upload multiple attachments"""
# Create entry
entry_id = create_entry(client, uid, nbid, title, content)
if not entry_id:
print("β Cannot upload attachments without entry ID")
return False
# Upload attachments
for attachment_path in attachments:
upload_attachment(client, config, uid, nbid, entry_id, attachment_path)
return True
def main():
"""Main command-line interface"""
parser = argparse.ArgumentParser(
description='LabArchives Entry Operations',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Create simple entry
python3 entry_operations.py create --nbid 12345 --title "Experiment Results"
# Create entry with content
python3 entry_operations.py create --nbid 12345 --title "Results" \\
--content "PCR amplification successful"
# Create entry with HTML content
python3 entry_operations.py create --nbid 12345 --title "Results" \\
--content "<p>Results:</p><ul><li>Sample A: Positive</li></ul>"
# Upload attachment to existing entry
python3 entry_operations.py upload --nbid 12345 --entry-id 67890 \\
--file data.csv
# Batch upload multiple files
python3 entry_operations.py batch-upload --nbid 12345 --entry-id 67890 \\
--directory ./experiment_data/
# Add comment to entry
python3 entry_operations.py comment --nbid 12345 --entry-id 67890 \\
--text "Follow-up analysis needed"
"""
)
parser.add_argument('--config', default='config.yaml',
help='Path to configuration file (default: config.yaml)')
parser.add_argument('--nbid', required=True,
help='Notebook ID')
subparsers = parser.add_subparsers(dest='command', help='Command to execute')
# Create entry command
create_parser = subparsers.add_parser('create', help='Create new entry')
create_parser.add_argument('--title', required=True, help='Entry title')
create_parser.add_argument('--content', help='Entry content (HTML supported)')
create_parser.add_argument('--date', help='Entry date (YYYY-MM-DD)')
create_parser.add_argument('--attachments', nargs='+',
help='Files to attach to the new entry')
# Upload attachment command
upload_parser = subparsers.add_parser('upload', help='Upload attachment to entry')
upload_parser.add_argument('--entry-id', required=True, help='Entry ID')
upload_parser.add_argument('--file', required=True, help='File to upload')
# Batch upload command
batch_parser = subparsers.add_parser('batch-upload',
help='Upload all files from directory')
batch_parser.add_argument('--entry-id', required=True, help='Entry ID')
batch_parser.add_argument('--directory', required=True,
help='Directory containing files to upload')
# Comment command
comment_parser = subparsers.add_parser('comment', help='Add comment to entry')
comment_parser.add_argument('--entry-id', required=True, help='Entry ID')
comment_parser.add_argument('--text', required=True, help='Comment text')
args = parser.parse_args()
if not args.command:
parser.print_help()
sys.exit(1)
# Load configuration and initialize
config = load_config(args.config)
client = init_client(config)
uid = get_user_id(client, config)
# Execute command
if args.command == 'create':
if args.attachments:
create_entry_with_attachments(
client, config, uid, args.nbid, args.title,
args.content, args.attachments
)
else:
create_entry(client, uid, args.nbid, args.title,
args.content, args.date)
elif args.command == 'upload':
upload_attachment(client, config, uid, args.nbid,
args.entry_id, args.file)
elif args.command == 'batch-upload':
batch_upload(client, config, uid, args.nbid,
args.entry_id, args.directory)
elif args.command == 'comment':
create_comment(client, uid, args.nbid, args.entry_id, args.text)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/labarchive-integration/scripts/entry_operations.py",
"license": "MIT License",
"lines": 259,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/labarchive-integration/scripts/notebook_operations.py | #!/usr/bin/env python3
"""
LabArchives Notebook Operations
Utilities for listing, backing up, and managing LabArchives notebooks.
"""
import argparse
import sys
import yaml
from datetime import datetime
from pathlib import Path
def load_config(config_path='config.yaml'):
"""Load configuration from YAML file"""
try:
with open(config_path, 'r') as f:
return yaml.safe_load(f)
except FileNotFoundError:
print(f"β Configuration file not found: {config_path}")
print(" Run setup_config.py first to create configuration")
sys.exit(1)
except Exception as e:
print(f"β Error loading configuration: {e}")
sys.exit(1)
def init_client(config):
"""Initialize LabArchives API client"""
try:
from labarchivespy.client import Client
return Client(
config['api_url'],
config['access_key_id'],
config['access_password']
)
except ImportError:
print("β labarchives-py package not installed")
print(" Install with: pip install git+https://github.com/mcmero/labarchives-py")
sys.exit(1)
def get_user_id(client, config):
"""Get user ID via authentication"""
import xml.etree.ElementTree as ET
login_params = {
'login_or_email': config['user_email'],
'password': config['user_external_password']
}
try:
response = client.make_call('users', 'user_access_info', params=login_params)
if response.status_code == 200:
uid = ET.fromstring(response.content)[0].text
return uid
else:
print(f"β Authentication failed: HTTP {response.status_code}")
print(f" Response: {response.content.decode('utf-8')[:200]}")
sys.exit(1)
except Exception as e:
print(f"β Error during authentication: {e}")
sys.exit(1)
def list_notebooks(client, uid):
"""List all accessible notebooks for a user"""
import xml.etree.ElementTree as ET
print(f"\nπ Listing notebooks for user ID: {uid}\n")
# Get user access info which includes notebook list
login_params = {'uid': uid}
try:
response = client.make_call('users', 'user_access_info', params=login_params)
if response.status_code == 200:
root = ET.fromstring(response.content)
notebooks = root.findall('.//notebook')
if not notebooks:
print("No notebooks found")
return []
notebook_list = []
print(f"{'Notebook ID':<15} {'Name':<40} {'Role':<10}")
print("-" * 70)
for nb in notebooks:
nbid = nb.find('nbid').text if nb.find('nbid') is not None else 'N/A'
name = nb.find('name').text if nb.find('name') is not None else 'Unnamed'
role = nb.find('role').text if nb.find('role') is not None else 'N/A'
notebook_list.append({'nbid': nbid, 'name': name, 'role': role})
print(f"{nbid:<15} {name:<40} {role:<10}")
print(f"\nTotal notebooks: {len(notebooks)}")
return notebook_list
else:
print(f"β Failed to list notebooks: HTTP {response.status_code}")
return []
except Exception as e:
print(f"β Error listing notebooks: {e}")
return []
def backup_notebook(client, uid, nbid, output_dir='backups', json_format=False,
no_attachments=False):
"""Backup a notebook"""
print(f"\nπΎ Backing up notebook {nbid}...")
# Create output directory
output_path = Path(output_dir)
output_path.mkdir(exist_ok=True)
# Prepare parameters
params = {
'uid': uid,
'nbid': nbid,
'json': 'true' if json_format else 'false',
'no_attachments': 'true' if no_attachments else 'false'
}
try:
response = client.make_call('notebooks', 'notebook_backup', params=params)
if response.status_code == 200:
# Determine file extension
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
if no_attachments:
ext = 'json' if json_format else 'xml'
filename = f"notebook_{nbid}_{timestamp}.{ext}"
else:
filename = f"notebook_{nbid}_{timestamp}.7z"
output_file = output_path / filename
# Write to file
with open(output_file, 'wb') as f:
f.write(response.content)
file_size = output_file.stat().st_size / (1024 * 1024) # MB
print(f"β
Backup saved: {output_file}")
print(f" File size: {file_size:.2f} MB")
return str(output_file)
else:
print(f"β Backup failed: HTTP {response.status_code}")
print(f" Response: {response.content.decode('utf-8')[:200]}")
return None
except Exception as e:
print(f"β Error during backup: {e}")
return None
def backup_all_notebooks(client, uid, output_dir='backups', json_format=False,
no_attachments=False):
"""Backup all accessible notebooks"""
print("\nπ¦ Backing up all notebooks...\n")
notebooks = list_notebooks(client, uid)
if not notebooks:
print("No notebooks to backup")
return
successful = 0
failed = 0
for nb in notebooks:
nbid = nb['nbid']
name = nb['name']
print(f"\n--- Backing up: {name} (ID: {nbid}) ---")
result = backup_notebook(client, uid, nbid, output_dir, json_format, no_attachments)
if result:
successful += 1
else:
failed += 1
print("\n" + "="*60)
print(f"Backup complete: {successful} successful, {failed} failed")
print("="*60)
def main():
"""Main command-line interface"""
parser = argparse.ArgumentParser(
description='LabArchives Notebook Operations',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# List all notebooks
python3 notebook_operations.py list
# Backup specific notebook
python3 notebook_operations.py backup --nbid 12345
# Backup all notebooks (JSON format, no attachments)
python3 notebook_operations.py backup-all --json --no-attachments
# Backup to custom directory
python3 notebook_operations.py backup --nbid 12345 --output my_backups/
"""
)
parser.add_argument('--config', default='config.yaml',
help='Path to configuration file (default: config.yaml)')
subparsers = parser.add_subparsers(dest='command', help='Command to execute')
# List command
subparsers.add_parser('list', help='List all accessible notebooks')
# Backup command
backup_parser = subparsers.add_parser('backup', help='Backup a specific notebook')
backup_parser.add_argument('--nbid', required=True, help='Notebook ID to backup')
backup_parser.add_argument('--output', default='backups',
help='Output directory (default: backups)')
backup_parser.add_argument('--json', action='store_true',
help='Return data in JSON format instead of XML')
backup_parser.add_argument('--no-attachments', action='store_true',
help='Exclude attachments from backup')
# Backup all command
backup_all_parser = subparsers.add_parser('backup-all',
help='Backup all accessible notebooks')
backup_all_parser.add_argument('--output', default='backups',
help='Output directory (default: backups)')
backup_all_parser.add_argument('--json', action='store_true',
help='Return data in JSON format instead of XML')
backup_all_parser.add_argument('--no-attachments', action='store_true',
help='Exclude attachments from backup')
args = parser.parse_args()
if not args.command:
parser.print_help()
sys.exit(1)
# Load configuration and initialize
config = load_config(args.config)
client = init_client(config)
uid = get_user_id(client, config)
# Execute command
if args.command == 'list':
list_notebooks(client, uid)
elif args.command == 'backup':
backup_notebook(client, uid, args.nbid, args.output, args.json, args.no_attachments)
elif args.command == 'backup-all':
backup_all_notebooks(client, uid, args.output, args.json, args.no_attachments)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/labarchive-integration/scripts/notebook_operations.py",
"license": "MIT License",
"lines": 203,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/labarchive-integration/scripts/setup_config.py | #!/usr/bin/env python3
"""
LabArchives Configuration Setup Script
This script helps create a config.yaml file with necessary credentials
for LabArchives API access.
"""
import yaml
import os
from pathlib import Path
def get_regional_endpoint():
"""Prompt user to select regional API endpoint"""
print("\nSelect your regional API endpoint:")
print("1. US/International (mynotebook.labarchives.com)")
print("2. Australia (aunotebook.labarchives.com)")
print("3. UK (uknotebook.labarchives.com)")
print("4. Custom endpoint")
choice = input("\nEnter choice (1-4): ").strip()
endpoints = {
'1': 'https://api.labarchives.com/api',
'2': 'https://auapi.labarchives.com/api',
'3': 'https://ukapi.labarchives.com/api'
}
if choice in endpoints:
return endpoints[choice]
elif choice == '4':
return input("Enter custom API endpoint URL: ").strip()
else:
print("Invalid choice, defaulting to US/International")
return endpoints['1']
def get_credentials():
"""Prompt user for API credentials"""
print("\n" + "="*60)
print("LabArchives API Credentials")
print("="*60)
print("\nYou need two sets of credentials:")
print("1. Institutional API credentials (from LabArchives administrator)")
print("2. User authentication credentials (from your account settings)")
print()
# Institutional credentials
print("Institutional Credentials:")
access_key_id = input(" Access Key ID: ").strip()
access_password = input(" Access Password: ").strip()
# User credentials
print("\nUser Credentials:")
user_email = input(" Your LabArchives email: ").strip()
print("\nExternal Applications Password:")
print("(Set this in your LabArchives Account Settings β Security & Privacy)")
user_password = input(" External Applications Password: ").strip()
return {
'access_key_id': access_key_id,
'access_password': access_password,
'user_email': user_email,
'user_external_password': user_password
}
def create_config_file(config_data, output_path='config.yaml'):
"""Create YAML configuration file"""
with open(output_path, 'w') as f:
yaml.dump(config_data, f, default_flow_style=False, sort_keys=False)
# Set file permissions to user read/write only for security
os.chmod(output_path, 0o600)
print(f"\nβ
Configuration saved to: {os.path.abspath(output_path)}")
print(" File permissions set to 600 (user read/write only)")
def verify_config(config_path='config.yaml'):
"""Verify configuration file can be loaded"""
try:
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
required_keys = ['api_url', 'access_key_id', 'access_password',
'user_email', 'user_external_password']
missing = [key for key in required_keys if key not in config or not config[key]]
if missing:
print(f"\nβ οΈ Warning: Missing required fields: {', '.join(missing)}")
return False
print("\nβ
Configuration file verified successfully")
return True
except Exception as e:
print(f"\nβ Error verifying configuration: {e}")
return False
def test_authentication(config_path='config.yaml'):
"""Test authentication with LabArchives API"""
print("\nWould you like to test the connection? (requires labarchives-py package)")
test = input("Test connection? (y/n): ").strip().lower()
if test != 'y':
return
try:
# Try to import labarchives-py
from labarchivespy.client import Client
import xml.etree.ElementTree as ET
# Load config
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
# Initialize client
print("\nInitializing client...")
client = Client(
config['api_url'],
config['access_key_id'],
config['access_password']
)
# Test authentication
print("Testing authentication...")
login_params = {
'login_or_email': config['user_email'],
'password': config['user_external_password']
}
response = client.make_call('users', 'user_access_info', params=login_params)
if response.status_code == 200:
# Extract UID
uid = ET.fromstring(response.content)[0].text
print(f"\nβ
Authentication successful!")
print(f" User ID: {uid}")
# Get notebook count
root = ET.fromstring(response.content)
notebooks = root.findall('.//notebook')
print(f" Accessible notebooks: {len(notebooks)}")
else:
print(f"\nβ Authentication failed: HTTP {response.status_code}")
print(f" Response: {response.content.decode('utf-8')[:200]}")
except ImportError:
print("\nβ οΈ labarchives-py package not installed")
print(" Install with: pip install git+https://github.com/mcmero/labarchives-py")
except Exception as e:
print(f"\nβ Connection test failed: {e}")
def main():
"""Main setup workflow"""
print("="*60)
print("LabArchives API Configuration Setup")
print("="*60)
# Check if config already exists
if os.path.exists('config.yaml'):
print("\nβ οΈ config.yaml already exists")
overwrite = input("Overwrite existing configuration? (y/n): ").strip().lower()
if overwrite != 'y':
print("Setup cancelled")
return
# Get configuration
api_url = get_regional_endpoint()
credentials = get_credentials()
# Combine configuration
config_data = {
'api_url': api_url,
**credentials
}
# Create config file
create_config_file(config_data)
# Verify
verify_config()
# Test connection
test_authentication()
print("\n" + "="*60)
print("Setup complete!")
print("="*60)
print("\nNext steps:")
print("1. Add config.yaml to .gitignore if using version control")
print("2. Use notebook_operations.py to list and backup notebooks")
print("3. Use entry_operations.py to create entries and upload files")
print("\nFor more information, see references/authentication_guide.md")
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/labarchive-integration/scripts/setup_config.py",
"license": "MIT License",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/literature-review/scripts/generate_pdf.py | #!/usr/bin/env python3
"""
PDF Generation Script for Literature Reviews
Converts markdown files to professionally formatted PDFs with proper styling.
"""
import subprocess
import sys
import os
from pathlib import Path
def generate_pdf(
markdown_file: str,
output_pdf: str = None,
citation_style: str = "apa",
template: str = None,
toc: bool = True,
number_sections: bool = True
) -> bool:
"""
Generate a PDF from a markdown file using pandoc.
Args:
markdown_file: Path to the markdown file
output_pdf: Path for output PDF (defaults to same name as markdown)
citation_style: Citation style (apa, nature, chicago, etc.)
template: Path to custom LaTeX template
toc: Include table of contents
number_sections: Number the sections
Returns:
True if successful, False otherwise
"""
# Verify markdown file exists
if not os.path.exists(markdown_file):
print(f"Error: Markdown file not found: {markdown_file}")
return False
# Set default output path
if output_pdf is None:
output_pdf = Path(markdown_file).with_suffix('.pdf')
# Check if pandoc is installed
try:
subprocess.run(['pandoc', '--version'], capture_output=True, check=True)
except (subprocess.CalledProcessError, FileNotFoundError):
print("Error: pandoc is not installed.")
print("Install with: brew install pandoc (macOS) or apt-get install pandoc (Linux)")
return False
# Build pandoc command
cmd = [
'pandoc',
markdown_file,
'-o', str(output_pdf),
'--pdf-engine=xelatex', # Better Unicode support
'-V', 'geometry:margin=1in',
'-V', 'fontsize=11pt',
'-V', 'colorlinks=true',
'-V', 'linkcolor=blue',
'-V', 'urlcolor=blue',
'-V', 'citecolor=blue',
]
# Add table of contents
if toc:
cmd.extend(['--toc', '--toc-depth=3'])
# Add section numbering
if number_sections:
cmd.append('--number-sections')
# Add citation processing if bibliography exists
bib_file = Path(markdown_file).with_suffix('.bib')
if bib_file.exists():
cmd.extend([
'--citeproc',
'--bibliography', str(bib_file),
'--csl', f'{citation_style}.csl' if not citation_style.endswith('.csl') else citation_style
])
# Add custom template if provided
if template and os.path.exists(template):
cmd.extend(['--template', template])
# Execute pandoc
try:
print(f"Generating PDF: {output_pdf}")
print(f"Command: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
print(f"β PDF generated successfully: {output_pdf}")
return True
except subprocess.CalledProcessError as e:
print(f"Error generating PDF:")
print(f"STDOUT: {e.stdout}")
print(f"STDERR: {e.stderr}")
return False
def check_dependencies():
"""Check if required dependencies are installed."""
dependencies = {
'pandoc': 'pandoc --version',
'xelatex': 'xelatex --version'
}
missing = []
for name, cmd in dependencies.items():
try:
subprocess.run(cmd.split(), capture_output=True, check=True)
print(f"β {name} is installed")
except (subprocess.CalledProcessError, FileNotFoundError):
print(f"β {name} is NOT installed")
missing.append(name)
if missing:
print("\n" + "="*60)
print("Missing dependencies:")
for dep in missing:
if dep == 'pandoc':
print(" - pandoc: brew install pandoc (macOS) or apt-get install pandoc (Linux)")
elif dep == 'xelatex':
print(" - xelatex: brew install --cask mactex (macOS) or apt-get install texlive-xetex (Linux)")
return False
return True
def main():
"""Command-line interface."""
if len(sys.argv) < 2:
print("Usage: python generate_pdf.py <markdown_file> [output_pdf] [--citation-style STYLE]")
print("\nOptions:")
print(" --citation-style STYLE Citation style (default: apa)")
print(" --no-toc Disable table of contents")
print(" --no-numbers Disable section numbering")
print(" --check-deps Check if dependencies are installed")
sys.exit(1)
# Check dependencies mode
if '--check-deps' in sys.argv:
check_dependencies()
sys.exit(0)
# Parse arguments
markdown_file = sys.argv[1]
output_pdf = sys.argv[2] if len(sys.argv) > 2 and not sys.argv[2].startswith('--') else None
citation_style = 'apa'
toc = True
number_sections = True
# Parse optional flags
if '--citation-style' in sys.argv:
idx = sys.argv.index('--citation-style')
if idx + 1 < len(sys.argv):
citation_style = sys.argv[idx + 1]
if '--no-toc' in sys.argv:
toc = False
if '--no-numbers' in sys.argv:
number_sections = False
# Generate PDF
success = generate_pdf(
markdown_file,
output_pdf,
citation_style=citation_style,
toc=toc,
number_sections=number_sections
)
sys.exit(0 if success else 1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/literature-review/scripts/generate_pdf.py",
"license": "MIT License",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/literature-review/scripts/search_databases.py | #!/usr/bin/env python3
"""
Literature Database Search Script
Searches multiple literature databases and aggregates results.
"""
import json
import sys
from typing import Dict, List
from datetime import datetime
def format_search_results(results: List[Dict], output_format: str = 'json') -> str:
"""
Format search results for output.
Args:
results: List of search results
output_format: Format (json, markdown, or bibtex)
Returns:
Formatted string
"""
if output_format == 'json':
return json.dumps(results, indent=2)
elif output_format == 'markdown':
md = f"# Literature Search Results\n\n"
md += f"**Search Date**: {datetime.now().strftime('%Y-%m-%d %H:%M')}\n"
md += f"**Total Results**: {len(results)}\n\n"
for i, result in enumerate(results, 1):
md += f"## {i}. {result.get('title', 'Untitled')}\n\n"
md += f"**Authors**: {result.get('authors', 'Unknown')}\n\n"
md += f"**Year**: {result.get('year', 'N/A')}\n\n"
md += f"**Source**: {result.get('source', 'Unknown')}\n\n"
if result.get('abstract'):
md += f"**Abstract**: {result['abstract']}\n\n"
if result.get('doi'):
md += f"**DOI**: [{result['doi']}](https://doi.org/{result['doi']})\n\n"
if result.get('url'):
md += f"**URL**: {result['url']}\n\n"
if result.get('citations'):
md += f"**Citations**: {result['citations']}\n\n"
md += "---\n\n"
return md
elif output_format == 'bibtex':
bibtex = ""
for i, result in enumerate(results, 1):
entry_type = result.get('type', 'article')
cite_key = f"{result.get('first_author', 'unknown')}{result.get('year', '0000')}"
bibtex += f"@{entry_type}{{{cite_key},\n"
bibtex += f" title = {{{result.get('title', '')}}},\n"
bibtex += f" author = {{{result.get('authors', '')}}},\n"
bibtex += f" year = {{{result.get('year', '')}}},\n"
if result.get('journal'):
bibtex += f" journal = {{{result['journal']}}},\n"
if result.get('volume'):
bibtex += f" volume = {{{result['volume']}}},\n"
if result.get('pages'):
bibtex += f" pages = {{{result['pages']}}},\n"
if result.get('doi'):
bibtex += f" doi = {{{result['doi']}}},\n"
bibtex += "}\n\n"
return bibtex
else:
raise ValueError(f"Unknown format: {output_format}")
def deduplicate_results(results: List[Dict]) -> List[Dict]:
"""
Remove duplicate results based on DOI or title.
Args:
results: List of search results
Returns:
Deduplicated list
"""
seen_dois = set()
seen_titles = set()
unique_results = []
for result in results:
doi = result.get('doi', '').lower().strip()
title = result.get('title', '').lower().strip()
# Check DOI first (more reliable)
if doi and doi in seen_dois:
continue
# Check title as fallback
if not doi and title in seen_titles:
continue
# Add to results
if doi:
seen_dois.add(doi)
if title:
seen_titles.add(title)
unique_results.append(result)
return unique_results
def rank_results(results: List[Dict], criteria: str = 'citations') -> List[Dict]:
"""
Rank results by specified criteria.
Args:
results: List of search results
criteria: Ranking criteria (citations, year, relevance)
Returns:
Ranked list
"""
if criteria == 'citations':
return sorted(results, key=lambda x: x.get('citations', 0), reverse=True)
elif criteria == 'year':
return sorted(results, key=lambda x: x.get('year', '0'), reverse=True)
elif criteria == 'relevance':
return sorted(results, key=lambda x: x.get('relevance_score', 0), reverse=True)
else:
return results
def filter_by_year(results: List[Dict], start_year: int = None, end_year: int = None) -> List[Dict]:
"""
Filter results by publication year range.
Args:
results: List of search results
start_year: Minimum year (inclusive)
end_year: Maximum year (inclusive)
Returns:
Filtered list
"""
filtered = []
for result in results:
try:
year = int(result.get('year', 0))
if start_year and year < start_year:
continue
if end_year and year > end_year:
continue
filtered.append(result)
except (ValueError, TypeError):
# Include if year parsing fails
filtered.append(result)
return filtered
def generate_search_summary(results: List[Dict]) -> Dict:
"""
Generate summary statistics for search results.
Args:
results: List of search results
Returns:
Summary dictionary
"""
summary = {
'total_results': len(results),
'sources': {},
'year_distribution': {},
'avg_citations': 0,
'total_citations': 0
}
citations = []
for result in results:
# Count by source
source = result.get('source', 'Unknown')
summary['sources'][source] = summary['sources'].get(source, 0) + 1
# Count by year
year = result.get('year', 'Unknown')
summary['year_distribution'][year] = summary['year_distribution'].get(year, 0) + 1
# Collect citations
if result.get('citations'):
try:
citations.append(int(result['citations']))
except (ValueError, TypeError):
pass
if citations:
summary['avg_citations'] = sum(citations) / len(citations)
summary['total_citations'] = sum(citations)
return summary
def main():
"""Command-line interface for search result processing."""
if len(sys.argv) < 2:
print("Usage: python search_databases.py <results.json> [options]")
print("\nOptions:")
print(" --format FORMAT Output format (json, markdown, bibtex)")
print(" --output FILE Output file (default: stdout)")
print(" --rank CRITERIA Rank by (citations, year, relevance)")
print(" --year-start YEAR Filter by start year")
print(" --year-end YEAR Filter by end year")
print(" --deduplicate Remove duplicates")
print(" --summary Show summary statistics")
sys.exit(1)
# Load results
results_file = sys.argv[1]
try:
with open(results_file, 'r', encoding='utf-8') as f:
results = json.load(f)
except Exception as e:
print(f"Error loading results: {e}")
sys.exit(1)
# Parse options
output_format = 'markdown'
output_file = None
rank_criteria = None
year_start = None
year_end = None
do_dedup = False
show_summary = False
i = 2
while i < len(sys.argv):
arg = sys.argv[i]
if arg == '--format' and i + 1 < len(sys.argv):
output_format = sys.argv[i + 1]
i += 2
elif arg == '--output' and i + 1 < len(sys.argv):
output_file = sys.argv[i + 1]
i += 2
elif arg == '--rank' and i + 1 < len(sys.argv):
rank_criteria = sys.argv[i + 1]
i += 2
elif arg == '--year-start' and i + 1 < len(sys.argv):
year_start = int(sys.argv[i + 1])
i += 2
elif arg == '--year-end' and i + 1 < len(sys.argv):
year_end = int(sys.argv[i + 1])
i += 2
elif arg == '--deduplicate':
do_dedup = True
i += 1
elif arg == '--summary':
show_summary = True
i += 1
else:
i += 1
# Process results
if do_dedup:
results = deduplicate_results(results)
print(f"After deduplication: {len(results)} results")
if year_start or year_end:
results = filter_by_year(results, year_start, year_end)
print(f"After year filter: {len(results)} results")
if rank_criteria:
results = rank_results(results, rank_criteria)
print(f"Ranked by: {rank_criteria}")
# Show summary
if show_summary:
summary = generate_search_summary(results)
print("\n" + "="*60)
print("SEARCH SUMMARY")
print("="*60)
print(json.dumps(summary, indent=2))
print()
# Format output
output = format_search_results(results, output_format)
# Write output
if output_file:
with open(output_file, 'w', encoding='utf-8') as f:
f.write(output)
print(f"β Results saved to: {output_file}")
else:
print(output)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/literature-review/scripts/search_databases.py",
"license": "MIT License",
"lines": 244,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/literature-review/scripts/verify_citations.py | #!/usr/bin/env python3
"""
Citation Verification Script
Verifies DOIs, URLs, and citation metadata for accuracy.
"""
import re
import requests
import json
from typing import Dict, List, Tuple
from urllib.parse import urlparse
import time
class CitationVerifier:
def __init__(self):
self.session = requests.Session()
self.session.headers.update({
'User-Agent': 'CitationVerifier/1.0 (Literature Review Tool)'
})
def extract_dois(self, text: str) -> List[str]:
"""Extract all DOIs from text."""
doi_pattern = r'10\.\d{4,}/[^\s\]\)"]+'
return re.findall(doi_pattern, text)
def verify_doi(self, doi: str) -> Tuple[bool, Dict]:
"""
Verify a DOI and retrieve metadata.
Returns (is_valid, metadata)
"""
try:
url = f"https://doi.org/api/handles/{doi}"
response = self.session.get(url, timeout=10)
if response.status_code == 200:
# DOI exists, now get metadata from CrossRef
metadata = self._get_crossref_metadata(doi)
return True, metadata
else:
return False, {}
except Exception as e:
return False, {"error": str(e)}
def _get_crossref_metadata(self, doi: str) -> Dict:
"""Get metadata from CrossRef API."""
try:
url = f"https://api.crossref.org/works/{doi}"
response = self.session.get(url, timeout=10)
if response.status_code == 200:
data = response.json()
message = data.get('message', {})
# Extract key metadata
metadata = {
'title': message.get('title', [''])[0],
'authors': self._format_authors(message.get('author', [])),
'year': self._extract_year(message),
'journal': message.get('container-title', [''])[0],
'volume': message.get('volume', ''),
'pages': message.get('page', ''),
'doi': doi
}
return metadata
return {}
except Exception as e:
return {"error": str(e)}
def _format_authors(self, authors: List[Dict]) -> str:
"""Format author list."""
if not authors:
return ""
formatted = []
for author in authors[:3]: # First 3 authors
given = author.get('given', '')
family = author.get('family', '')
if family:
formatted.append(f"{family}, {given[0]}." if given else family)
if len(authors) > 3:
formatted.append("et al.")
return ", ".join(formatted)
def _extract_year(self, message: Dict) -> str:
"""Extract publication year."""
date_parts = message.get('published-print', {}).get('date-parts', [[]])
if not date_parts or not date_parts[0]:
date_parts = message.get('published-online', {}).get('date-parts', [[]])
if date_parts and date_parts[0]:
return str(date_parts[0][0])
return ""
def verify_url(self, url: str) -> Tuple[bool, int]:
"""
Verify a URL is accessible.
Returns (is_accessible, status_code)
"""
try:
response = self.session.head(url, timeout=10, allow_redirects=True)
is_accessible = response.status_code < 400
return is_accessible, response.status_code
except Exception as e:
return False, 0
def verify_citations_in_file(self, filepath: str) -> Dict:
"""
Verify all citations in a markdown file.
Returns a report of verification results.
"""
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read()
dois = self.extract_dois(content)
report = {
'total_dois': len(dois),
'verified': [],
'failed': [],
'metadata': {}
}
for doi in dois:
print(f"Verifying DOI: {doi}")
is_valid, metadata = self.verify_doi(doi)
if is_valid:
report['verified'].append(doi)
report['metadata'][doi] = metadata
else:
report['failed'].append(doi)
time.sleep(0.5) # Rate limiting
return report
def format_citation_apa(self, metadata: Dict) -> str:
"""Format citation in APA style."""
authors = metadata.get('authors', '')
year = metadata.get('year', 'n.d.')
title = metadata.get('title', '')
journal = metadata.get('journal', '')
volume = metadata.get('volume', '')
pages = metadata.get('pages', '')
doi = metadata.get('doi', '')
citation = f"{authors} ({year}). {title}. "
if journal:
citation += f"*{journal}*"
if volume:
citation += f", *{volume}*"
if pages:
citation += f", {pages}"
if doi:
citation += f". https://doi.org/{doi}"
return citation
def format_citation_nature(self, metadata: Dict) -> str:
"""Format citation in Nature style."""
authors = metadata.get('authors', '')
title = metadata.get('title', '')
journal = metadata.get('journal', '')
volume = metadata.get('volume', '')
pages = metadata.get('pages', '')
year = metadata.get('year', '')
citation = f"{authors} {title}. "
if journal:
citation += f"*{journal}* "
if volume:
citation += f"**{volume}**, "
if pages:
citation += f"{pages} "
if year:
citation += f"({year})"
return citation
def main():
"""Example usage."""
import sys
if len(sys.argv) < 2:
print("Usage: python verify_citations.py <markdown_file>")
sys.exit(1)
filepath = sys.argv[1]
verifier = CitationVerifier()
print(f"Verifying citations in: {filepath}")
report = verifier.verify_citations_in_file(filepath)
print("\n" + "="*60)
print("CITATION VERIFICATION REPORT")
print("="*60)
print(f"\nTotal DOIs found: {report['total_dois']}")
print(f"Verified: {len(report['verified'])}")
print(f"Failed: {len(report['failed'])}")
if report['failed']:
print("\nFailed DOIs:")
for doi in report['failed']:
print(f" - {doi}")
if report['metadata']:
print("\n\nVerified Citations (APA format):")
for doi, metadata in report['metadata'].items():
citation = verifier.format_citation_apa(metadata)
print(f"\n{citation}")
# Save detailed report
output_file = filepath.replace('.md', '_citation_report.json')
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(report, f, indent=2)
print(f"\n\nDetailed report saved to: {output_file}")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/literature-review/scripts/verify_citations.py",
"license": "MIT License",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/market-research-reports/scripts/generate_market_visuals.py | #!/usr/bin/env python3
"""
Market Research Report Visual Generator
Batch generates visuals for a market research report using
scientific-schematics and generate-image skills.
Default behavior: Generate 5-6 core visuals only
Use --all flag to generate all 28 extended visuals
Usage:
# Generate core 5-6 visuals (recommended for starting a report)
python generate_market_visuals.py --topic "Electric Vehicle Charging" --output-dir figures/
# Generate all 28 visuals (for comprehensive coverage)
python generate_market_visuals.py --topic "AI in Healthcare" --output-dir figures/ --all
# Skip existing files
python generate_market_visuals.py --topic "Topic" --output-dir figures/ --skip-existing
"""
import argparse
import os
import subprocess
import sys
from pathlib import Path
# Visual definitions with prompts
# Each tuple: (filename, tool, prompt_template, is_core)
# is_core=True for the 5-6 essential visuals to generate first
CORE_VISUALS = [
# Priority 1: Market Growth Trajectory
(
"01_market_growth_trajectory.png",
"scientific-schematics",
"Bar chart {topic} market growth 2020 to 2034. Historical bars 2020-2024 in dark blue, "
"projected bars 2025-2034 in light blue. Y-axis billions USD, X-axis years. "
"CAGR annotation. Data labels on each bar. Vertical dashed line "
"between 2024 and 2025. Title: Market Growth Trajectory. Professional white background"
),
# Priority 2: TAM/SAM/SOM
(
"02_tam_sam_som.png",
"scientific-schematics",
"TAM SAM SOM concentric circles for {topic} market. Outer circle TAM Total Addressable "
"Market. Middle circle SAM Serviceable Addressable Market. Inner circle SOM Serviceable "
"Obtainable Market. Each labeled with acronym, full name. "
"Blue gradient darkest outer to lightest inner. White background professional appearance"
),
# Priority 3: Porter's Five Forces
(
"03_porters_five_forces.png",
"scientific-schematics",
"Porter's Five Forces diagram for {topic}. Center box Competitive Rivalry with rating. "
"Four surrounding boxes with arrows to center: Top Threat of New Entrants, "
"Left Bargaining Power Suppliers, Right Bargaining Power Buyers, "
"Bottom Threat of Substitutes. Color code HIGH red, MEDIUM yellow, LOW green. "
"Include 2-3 key factors per box. Professional appearance"
),
# Priority 4: Competitive Positioning Matrix
(
"04_competitive_positioning.png",
"scientific-schematics",
"2x2 competitive positioning matrix {topic}. X-axis Market Focus Niche to Broad. "
"Y-axis Solution Approach Product to Platform. Quadrants: Upper-right Platform Leaders, "
"Upper-left Niche Platforms, Lower-right Product Leaders, Lower-left Specialists. "
"Plot 8-10 company circles with names. Circle size = market share. "
"Legend for sizes. Professional appearance"
),
# Priority 5: Risk Heatmap
(
"05_risk_heatmap.png",
"scientific-schematics",
"Risk heatmap matrix {topic}. X-axis Impact Low Medium High Critical. "
"Y-axis Probability Unlikely Possible Likely Very Likely. "
"Cell colors: Green low risk, Yellow medium, Orange high, Red critical. "
"Plot 10-12 numbered risks R1 R2 etc as labeled points. "
"Legend with risk names. Professional clear"
),
# Priority 6: Executive Summary Infographic (Optional)
(
"06_exec_summary_infographic.png",
"generate-image",
"Executive summary infographic for {topic} market research, one page layout, "
"central large metric showing market size, four quadrants showing growth rate "
"key players top segments regional leaders, modern flat design, professional "
"blue and green color scheme, clean white background, corporate business aesthetic"
),
]
EXTENDED_VISUALS = [
# Industry Ecosystem
(
"07_industry_ecosystem.png",
"scientific-schematics",
"Industry ecosystem value chain diagram for {topic} market. Horizontal flow left "
"to right: Suppliers box β Manufacturers box β Distributors box β End Users box. "
"Below each main box show 3-4 smaller boxes with example player types. Solid arrows "
"for product flow, dashed arrows for money flow. Regulatory oversight layer above. "
"Professional blue color scheme, white background, clear labels"
),
# Regional Breakdown
(
"08_regional_breakdown.png",
"scientific-schematics",
"scientific-schematics",
"Pie chart regional market breakdown for {topic}. North America 40% dark blue, "
"Europe 28% medium blue, Asia-Pacific 22% teal, Latin America 6% light blue, "
"Middle East Africa 4% gray blue. Show percentage for each slice. Legend on right. "
"Title: Market Size by Region. Professional appearance"
),
# Segment Growth
(
"09_segment_growth.png",
"scientific-schematics",
"Horizontal bar chart {topic} segment growth comparison. Y-axis 5-6 segment names, "
"X-axis CAGR percentage 0-30%. Bars colored green highest to blue lowest. "
"Data labels with percentages. Sorted highest to lowest. "
"Title: Segment Growth Rate Comparison. Include market average line"
),
# Driver Impact Matrix
(
"10_driver_impact_matrix.png",
"scientific-schematics",
"2x2 matrix driver impact assessment for {topic}. X-axis Impact Low to High, "
"Y-axis Probability Low to High. Quadrants: Upper-right CRITICAL DRIVERS red, "
"Upper-left MONITOR yellow, Lower-right WATCH CAREFULLY yellow, "
"Lower-left LOWER PRIORITY green. Plot 8 labeled driver circles at positions. "
"Circle size indicates current impact. Professional clear labels"
),
# PESTLE Analysis
(
"11_pestle_analysis.png",
"scientific-schematics",
"PESTLE hexagonal diagram for {topic} market. Center hexagon labeled Market Analysis. "
"Six surrounding hexagons: Political red, Economic blue, Social green, "
"Technological orange, Legal purple, Environmental teal. Each outer hexagon "
"has 2-3 bullet points of key factors. Lines connecting center to each. "
"Professional appearance clear readable text"
),
# Trends Timeline
(
"12_trends_timeline.png",
"scientific-schematics",
"Horizontal timeline {topic} trends 2024 to 2030. Plot 6-8 emerging trends at "
"different years. Each trend with icon, name, brief description. Color code: "
"Technology trends blue, Market trends green, Regulatory trends orange. "
"Current marker at 2024. Professional clear labels"
),
# Market Share Chart
(
"13_market_share.png",
"scientific-schematics",
"Pie chart market share {topic} top 10 companies. Company A 18% dark blue, "
"Company B 15% medium blue, Company C 12% teal, Company D 10% light blue, "
"5 more companies 5-8% each various blues, Others 15% gray. "
"Percentage labels on slices. Legend with company names. "
"Title: Market Share by Company. Colorblind-friendly colors professional"
),
# Strategic Groups Map
(
"14_strategic_groups.png",
"scientific-schematics",
"Strategic group map {topic}. X-axis Geographic Scope Regional to Global. "
"Y-axis Product Breadth Narrow to Broad. Draw 4-5 oval bubbles for strategic groups. "
"Each bubble contains 2-4 company names. Bubble size = collective market share. "
"Label groups: Global Generalists, Regional Specialists, Focused Innovators. "
"Different colors per group. Professional clear labels"
),
# Customer Segments
(
"15_customer_segments.png",
"scientific-schematics",
"Treemap customer segmentation {topic}. Large Enterprise 45% dark blue, "
"Mid-Market 30% medium blue, SMB 18% light blue, Consumer 7% teal. "
"Each segment shows name and percentage. Title: Customer Segmentation by Market Share. "
"Professional appearance clear labels"
),
(
"16_segment_attractiveness.png",
"scientific-schematics",
"2x2 segment attractiveness matrix {topic}. X-axis Segment Size Small to Large. "
"Y-axis Growth Rate Low to High. Quadrants: Upper-right PRIORITY Invest Heavily green, "
"Upper-left INVEST TO GROW yellow, Lower-right HARVEST orange, "
"Lower-left DEPRIORITIZE gray. Plot customer segments as circles. "
"Circle size = profitability. Different colors. Professional"
),
(
"17_customer_journey.png",
"scientific-schematics",
"Customer journey horizontal flowchart {topic}. 5 stages left to right: Awareness, "
"Consideration, Decision, Implementation, Advocacy. Each stage shows Key Activities, "
"Pain Points, Touchpoints in rows below. Icons for each stage. "
"Color gradient light to dark. Professional clear labels"
),
# Technology Roadmap
(
"18_technology_roadmap.png",
"scientific-schematics",
"Technology roadmap {topic} 2024 to 2030. Three parallel horizontal tracks: "
"Core Technology blue, Emerging Technology green, Enabling Technology orange. "
"Milestones and tech introductions marked on each track. Vertical lines connect "
"related tech. Year markers. Technology names labeled. Professional appearance"
),
(
"19_innovation_curve.png",
"scientific-schematics",
"Gartner Hype Cycle curve for {topic} technologies. Five phases: Innovation Trigger "
"rising, Peak of Inflated Expectations at top, Trough of Disillusionment at bottom, "
"Slope of Enlightenment rising, Plateau of Productivity stable. "
"Plot 6-8 technologies on curve with labels. Color by category. Professional clear labels"
),
# Regulatory Timeline
(
"20_regulatory_timeline.png",
"scientific-schematics",
"Regulatory timeline {topic} 2020 to 2028. Past regulations dark blue solid markers, "
"current green marker, upcoming light blue dashed. Each shows regulation name, date, "
"brief description. Vertical NOW line at 2024. Professional appearance clear labels"
),
# Risk Mitigation Matrix
(
"21_risk_mitigation.png",
"scientific-schematics",
"Risk mitigation diagram {topic}. Left column risks in orange/red boxes. "
"Right column mitigation strategies in green/blue boxes. Arrows connecting "
"risks to mitigations. Group by category. Risk severity by color intensity. "
"Include prevention and response. Professional clear labels"
),
# Opportunity Matrix
(
"22_opportunity_matrix.png",
"scientific-schematics",
"2x2 opportunity matrix {topic}. X-axis Market Attractiveness Low to High. "
"Y-axis Ability to Win Low to High. Quadrants: Upper-right PURSUE AGGRESSIVELY green, "
"Upper-left BUILD CAPABILITIES yellow, Lower-right SELECTIVE INVESTMENT yellow, "
"Lower-left AVOID red. Plot 6-8 opportunity circles with labels. "
"Size = opportunity value. Professional"
),
# Recommendation Priority Matrix
(
"23_recommendation_priority.png",
"scientific-schematics",
"2x2 priority matrix {topic} recommendations. X-axis Effort Low to High. "
"Y-axis Impact Low to High. Quadrants: Upper-left QUICK WINS green Do First, "
"Upper-right MAJOR PROJECTS blue Plan Carefully, Lower-left FILL-INS gray Do If Time, "
"Lower-right THANKLESS TASKS red Avoid. Plot 6-8 numbered recommendations. Professional"
),
# Implementation Timeline
(
"24_implementation_timeline.png",
"scientific-schematics",
"Gantt chart implementation {topic} 24 months. Phase 1 Foundation months 1-6 dark blue. "
"Phase 2 Build months 4-12 medium blue. Phase 3 Scale months 10-18 teal. "
"Phase 4 Optimize months 16-24 light blue. Overlapping bars. "
"Key milestones as diamonds. Month markers X-axis. Professional"
),
# Milestone Tracker
(
"25_milestone_tracker.png",
"scientific-schematics",
"Milestone tracker {topic} horizontal timeline 8-10 milestones. "
"Each shows date, name, status: Completed green check, In Progress yellow circle, "
"Upcoming gray circle. Group by phase. Phase labels above. "
"Connected timeline line. Professional"
),
# Financial Projections
(
"26_financial_projections.png",
"scientific-schematics",
"Combined bar and line chart {topic} 5-year projections. Bar chart revenue "
"primary Y-axis dollars. Line chart growth rate secondary Y-axis percent. "
"Three scenarios: Conservative gray, Base Case blue, Optimistic green. "
"X-axis Year 1-5. Data labels. Legend. Title Financial Projections 5-Year. Professional"
),
# Scenario Analysis
(
"27_scenario_analysis.png",
"scientific-schematics",
"Grouped bar chart {topic} scenario comparison. X-axis metrics: Revenue Y5, "
"EBITDA Y5, Market Share, ROI. Three bars per metric: Conservative gray, "
"Base Case blue, Optimistic green. Data labels. Legend. "
"Title Scenario Analysis Comparison. Professional clear labels"
),
]
def get_script_path(tool: str) -> Path:
"""Get the path to the appropriate generation script."""
base_path = Path(__file__).parent.parent.parent # skills directory
if tool == "scientific-schematics":
return base_path / "scientific-schematics" / "scripts" / "generate_schematic.py"
elif tool == "generate-image":
return base_path / "generate-image" / "scripts" / "generate_image.py"
else:
raise ValueError(f"Unknown tool: {tool}")
def generate_visual(
filename: str,
tool: str,
prompt: str,
output_dir: Path,
topic: str,
skip_existing: bool = False,
verbose: bool = False
) -> bool:
"""Generate a single visual using the appropriate tool."""
output_path = output_dir / filename
# Skip if exists and skip_existing is True
if skip_existing and output_path.exists():
if verbose:
print(f" [SKIP] {filename} already exists")
return True
# Format prompt with topic
formatted_prompt = prompt.format(topic=topic)
# Get script path
script_path = get_script_path(tool)
if not script_path.exists():
print(f" [ERROR] Script not found: {script_path}")
return False
# Build command
if tool == "scientific-schematics":
cmd = [
sys.executable,
str(script_path),
formatted_prompt,
"-o", str(output_path),
"--doc-type", "report"
]
else: # generate-image
cmd = [
sys.executable,
str(script_path),
formatted_prompt,
"--output", str(output_path)
]
if verbose:
print(f" [GEN] {filename}")
print(f" Tool: {tool}")
print(f" Prompt: {formatted_prompt[:80]}...")
try:
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=120 # 2 minute timeout per image
)
if result.returncode == 0:
if verbose:
print(f" [OK] {filename} generated successfully")
return True
else:
print(f" [ERROR] {filename} failed:")
if result.stderr:
print(f" {result.stderr[:200]}")
return False
except subprocess.TimeoutExpired:
print(f" [TIMEOUT] {filename} generation timed out")
return False
except Exception as e:
print(f" [ERROR] {filename}: {str(e)}")
return False
def main():
parser = argparse.ArgumentParser(
description="Generate visuals for a market research report (default: 5-6 core visuals)"
)
parser.add_argument(
"--topic", "-t",
required=True,
help="Market topic (e.g., 'Electric Vehicle Charging Infrastructure')"
)
parser.add_argument(
"--output-dir", "-o",
default="figures",
help="Output directory for generated images (default: figures)"
)
parser.add_argument(
"--all", "-a",
action="store_true",
help="Generate all 27 extended visuals (default: only core 5-6)"
)
parser.add_argument(
"--skip-existing", "-s",
action="store_true",
help="Skip generation if file already exists"
)
parser.add_argument(
"--verbose", "-v",
action="store_true",
help="Show detailed output"
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Show what would be generated without actually generating"
)
parser.add_argument(
"--only",
type=str,
help="Only generate visuals matching this pattern (e.g., '01_', 'porter')"
)
args = parser.parse_args()
# Create output directory
output_dir = Path(args.output_dir)
if not args.dry_run:
output_dir.mkdir(parents=True, exist_ok=True)
print(f"\n{'='*60}")
print(f"Market Research Visual Generator")
print(f"{'='*60}")
print(f"Topic: {args.topic}")
print(f"Output Directory: {output_dir.absolute()}")
print(f"Mode: {'All Visuals (27)' if args.all else 'Core Visuals Only (5-6)'}")
print(f"Skip Existing: {args.skip_existing}")
print(f"{'='*60}\n")
# Select visual set based on --all flag
if args.all:
visuals_to_generate = CORE_VISUALS + EXTENDED_VISUALS
print("Generating ALL visuals (core + extended)\n")
else:
visuals_to_generate = CORE_VISUALS
print("Generating CORE visuals only (use --all for extended set)\n")
# Filter visuals if --only specified
if args.only:
pattern = args.only.lower()
visuals_to_generate = [
v for v in VISUALS
if pattern in v[0].lower() or pattern in v[2].lower()
]
print(f"Filtered to {len(visuals_to_generate)} visuals matching '{args.only}'\n")
if args.dry_run:
print("DRY RUN - The following visuals would be generated:\n")
for filename, tool, prompt in visuals_to_generate:
formatted = prompt.format(topic=args.topic)
print(f" {filename}")
print(f" Tool: {tool}")
print(f" Prompt: {formatted[:60]}...")
print()
return
# Generate all visuals
total = len(visuals_to_generate)
success = 0
failed = 0
skipped = 0
for i, (filename, tool, prompt) in enumerate(visuals_to_generate, 1):
print(f"\n[{i}/{total}] Generating {filename}...")
result = generate_visual(
filename=filename,
tool=tool,
prompt=prompt,
output_dir=output_dir,
topic=args.topic,
skip_existing=args.skip_existing,
verbose=args.verbose
)
if result:
if args.skip_existing and (output_dir / filename).exists():
skipped += 1
else:
success += 1
else:
failed += 1
# Print summary
print(f"\n{'='*60}")
print(f"Generation Complete")
print(f"{'='*60}")
print(f"Total: {total}")
print(f"Success: {success}")
print(f"Skipped: {skipped}")
print(f"Failed: {failed}")
print(f"{'='*60}")
if failed > 0:
print(f"\nWARNING: {failed} visuals failed to generate.")
print("Check the output above for error details.")
print("You may need to generate failed visuals manually.")
print(f"\nOutput directory: {output_dir.absolute()}")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/market-research-reports/scripts/generate_market_visuals.py",
"license": "MIT License",
"lines": 466,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/markitdown/scripts/batch_convert.py | #!/usr/bin/env python3
"""
Batch convert multiple files to Markdown using MarkItDown.
This script demonstrates how to efficiently convert multiple files
in a directory to Markdown format.
"""
import argparse
from pathlib import Path
from typing import List, Optional
from markitdown import MarkItDown
from concurrent.futures import ThreadPoolExecutor, as_completed
import sys
def convert_file(md: MarkItDown, file_path: Path, output_dir: Path, verbose: bool = False) -> tuple[bool, str, str]:
"""
Convert a single file to Markdown.
Args:
md: MarkItDown instance
file_path: Path to input file
output_dir: Directory for output files
verbose: Print detailed messages
Returns:
Tuple of (success, input_path, message)
"""
try:
if verbose:
print(f"Converting: {file_path}")
result = md.convert(str(file_path))
# Create output path
output_file = output_dir / f"{file_path.stem}.md"
# Write content with metadata header
content = f"# {result.title or file_path.stem}\n\n"
content += f"**Source**: {file_path.name}\n"
content += f"**Format**: {file_path.suffix}\n\n"
content += "---\n\n"
content += result.text_content
output_file.write_text(content, encoding='utf-8')
return True, str(file_path), f"β Converted to {output_file.name}"
except Exception as e:
return False, str(file_path), f"β Error: {str(e)}"
def batch_convert(
input_dir: Path,
output_dir: Path,
extensions: Optional[List[str]] = None,
recursive: bool = False,
workers: int = 4,
verbose: bool = False,
enable_plugins: bool = False
) -> dict:
"""
Batch convert files in a directory.
Args:
input_dir: Input directory
output_dir: Output directory
extensions: List of file extensions to convert (e.g., ['.pdf', '.docx'])
recursive: Search subdirectories
workers: Number of parallel workers
verbose: Print detailed messages
enable_plugins: Enable MarkItDown plugins
Returns:
Dictionary with conversion statistics
"""
# Create output directory
output_dir.mkdir(parents=True, exist_ok=True)
# Default extensions if not specified
if extensions is None:
extensions = ['.pdf', '.docx', '.pptx', '.xlsx', '.html', '.jpg', '.png']
# Find files
files = []
if recursive:
for ext in extensions:
files.extend(input_dir.rglob(f"*{ext}"))
else:
for ext in extensions:
files.extend(input_dir.glob(f"*{ext}"))
if not files:
print(f"No files found with extensions: {', '.join(extensions)}")
return {'total': 0, 'success': 0, 'failed': 0}
print(f"Found {len(files)} file(s) to convert")
# Create MarkItDown instance
md = MarkItDown(enable_plugins=enable_plugins)
# Convert files in parallel
results = {
'total': len(files),
'success': 0,
'failed': 0,
'details': []
}
with ThreadPoolExecutor(max_workers=workers) as executor:
futures = {
executor.submit(convert_file, md, file_path, output_dir, verbose): file_path
for file_path in files
}
for future in as_completed(futures):
success, path, message = future.result()
if success:
results['success'] += 1
else:
results['failed'] += 1
results['details'].append({
'file': path,
'success': success,
'message': message
})
print(message)
return results
def main():
parser = argparse.ArgumentParser(
description="Batch convert files to Markdown using MarkItDown",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Convert all PDFs in a directory
python batch_convert.py papers/ output/ --extensions .pdf
# Convert multiple formats recursively
python batch_convert.py documents/ markdown/ --extensions .pdf .docx .pptx -r
# Use 8 parallel workers
python batch_convert.py input/ output/ --workers 8
# Enable plugins
python batch_convert.py input/ output/ --plugins
"""
)
parser.add_argument('input_dir', type=Path, help='Input directory')
parser.add_argument('output_dir', type=Path, help='Output directory')
parser.add_argument(
'--extensions', '-e',
nargs='+',
help='File extensions to convert (e.g., .pdf .docx)'
)
parser.add_argument(
'--recursive', '-r',
action='store_true',
help='Search subdirectories recursively'
)
parser.add_argument(
'--workers', '-w',
type=int,
default=4,
help='Number of parallel workers (default: 4)'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Verbose output'
)
parser.add_argument(
'--plugins', '-p',
action='store_true',
help='Enable MarkItDown plugins'
)
args = parser.parse_args()
# Validate input directory
if not args.input_dir.exists():
print(f"Error: Input directory '{args.input_dir}' does not exist")
sys.exit(1)
if not args.input_dir.is_dir():
print(f"Error: '{args.input_dir}' is not a directory")
sys.exit(1)
# Run batch conversion
results = batch_convert(
input_dir=args.input_dir,
output_dir=args.output_dir,
extensions=args.extensions,
recursive=args.recursive,
workers=args.workers,
verbose=args.verbose,
enable_plugins=args.plugins
)
# Print summary
print("\n" + "="*50)
print("CONVERSION SUMMARY")
print("="*50)
print(f"Total files: {results['total']}")
print(f"Successful: {results['success']}")
print(f"Failed: {results['failed']}")
print(f"Success rate: {results['success']/results['total']*100:.1f}%" if results['total'] > 0 else "N/A")
# Show failed files if any
if results['failed'] > 0:
print("\nFailed conversions:")
for detail in results['details']:
if not detail['success']:
print(f" - {detail['file']}: {detail['message']}")
sys.exit(0 if results['failed'] == 0 else 1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/markitdown/scripts/batch_convert.py",
"license": "MIT License",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/markitdown/scripts/convert_literature.py | #!/usr/bin/env python3
"""
Convert scientific literature PDFs to Markdown for analysis and review.
This script is specifically designed for converting academic papers,
organizing them, and preparing them for literature review workflows.
"""
import argparse
import json
import re
import sys
from pathlib import Path
from typing import List, Dict, Optional
from markitdown import MarkItDown
from datetime import datetime
def extract_metadata_from_filename(filename: str) -> Dict[str, str]:
"""
Try to extract metadata from filename.
Supports patterns like: Author_Year_Title.pdf
"""
metadata = {}
# Remove extension
name = Path(filename).stem
# Try to extract year
year_match = re.search(r'\b(19|20)\d{2}\b', name)
if year_match:
metadata['year'] = year_match.group()
# Split by underscores or dashes
parts = re.split(r'[_\-]', name)
if len(parts) >= 2:
metadata['author'] = parts[0].replace('_', ' ')
metadata['title'] = ' '.join(parts[1:]).replace('_', ' ')
else:
metadata['title'] = name.replace('_', ' ')
return metadata
def convert_paper(
md: MarkItDown,
input_file: Path,
output_dir: Path,
organize_by_year: bool = False
) -> tuple[bool, Dict]:
"""
Convert a single paper to Markdown with metadata extraction.
Args:
md: MarkItDown instance
input_file: Path to PDF file
output_dir: Output directory
organize_by_year: Organize into year subdirectories
Returns:
Tuple of (success, metadata_dict)
"""
try:
print(f"Converting: {input_file.name}")
# Convert to Markdown
result = md.convert(str(input_file))
# Extract metadata from filename
metadata = extract_metadata_from_filename(input_file.name)
metadata['source_file'] = input_file.name
metadata['converted_date'] = datetime.now().isoformat()
# Try to extract title from content if not in filename
if 'title' not in metadata and result.title:
metadata['title'] = result.title
# Create output path
if organize_by_year and 'year' in metadata:
output_subdir = output_dir / metadata['year']
output_subdir.mkdir(parents=True, exist_ok=True)
else:
output_subdir = output_dir
output_subdir.mkdir(parents=True, exist_ok=True)
output_file = output_subdir / f"{input_file.stem}.md"
# Create formatted Markdown with front matter
content = "---\n"
content += f"title: \"{metadata.get('title', input_file.stem)}\"\n"
if 'author' in metadata:
content += f"author: \"{metadata['author']}\"\n"
if 'year' in metadata:
content += f"year: {metadata['year']}\n"
content += f"source: \"{metadata['source_file']}\"\n"
content += f"converted: \"{metadata['converted_date']}\"\n"
content += "---\n\n"
# Add title
content += f"# {metadata.get('title', input_file.stem)}\n\n"
# Add metadata section
content += "## Document Information\n\n"
if 'author' in metadata:
content += f"**Author**: {metadata['author']}\n"
if 'year' in metadata:
content += f"**Year**: {metadata['year']}\n"
content += f"**Source File**: {metadata['source_file']}\n"
content += f"**Converted**: {metadata['converted_date']}\n\n"
content += "---\n\n"
# Add content
content += result.text_content
# Write to file
output_file.write_text(content, encoding='utf-8')
print(f"β Saved to: {output_file}")
return True, metadata
except Exception as e:
print(f"β Error converting {input_file.name}: {str(e)}")
return False, {'source_file': input_file.name, 'error': str(e)}
def create_index(papers: List[Dict], output_dir: Path):
"""Create an index/catalog of all converted papers."""
# Sort by year (if available) and title
papers_sorted = sorted(
papers,
key=lambda x: (x.get('year', '9999'), x.get('title', ''))
)
# Create Markdown index
index_content = "# Literature Review Index\n\n"
index_content += f"**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"
index_content += f"**Total Papers**: {len(papers)}\n\n"
index_content += "---\n\n"
# Group by year
by_year = {}
for paper in papers_sorted:
year = paper.get('year', 'Unknown')
if year not in by_year:
by_year[year] = []
by_year[year].append(paper)
# Write by year
for year in sorted(by_year.keys()):
index_content += f"## {year}\n\n"
for paper in by_year[year]:
title = paper.get('title', paper.get('source_file', 'Unknown'))
author = paper.get('author', 'Unknown Author')
source = paper.get('source_file', '')
# Create link to markdown file
md_file = Path(source).stem + ".md"
if 'year' in paper and paper['year'] != 'Unknown':
md_file = f"{paper['year']}/{md_file}"
index_content += f"- **{title}**\n"
index_content += f" - Author: {author}\n"
index_content += f" - Source: {source}\n"
index_content += f" - [Read Markdown]({md_file})\n\n"
# Write index
index_file = output_dir / "INDEX.md"
index_file.write_text(index_content, encoding='utf-8')
print(f"\nβ Created index: {index_file}")
# Also create JSON catalog
catalog_file = output_dir / "catalog.json"
with open(catalog_file, 'w', encoding='utf-8') as f:
json.dump(papers_sorted, f, indent=2, ensure_ascii=False)
print(f"β Created catalog: {catalog_file}")
def main():
parser = argparse.ArgumentParser(
description="Convert scientific literature PDFs to Markdown",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Convert all PDFs in a directory
python convert_literature.py papers/ output/
# Organize by year
python convert_literature.py papers/ output/ --organize-by-year
# Create index of all papers
python convert_literature.py papers/ output/ --create-index
Filename Conventions:
For best results, name your PDFs using this pattern:
Author_Year_Title.pdf
Examples:
Smith_2023_Machine_Learning_Applications.pdf
Jones_2022_Climate_Change_Analysis.pdf
"""
)
parser.add_argument('input_dir', type=Path, help='Directory with PDF files')
parser.add_argument('output_dir', type=Path, help='Output directory for Markdown files')
parser.add_argument(
'--organize-by-year', '-y',
action='store_true',
help='Organize output into year subdirectories'
)
parser.add_argument(
'--create-index', '-i',
action='store_true',
help='Create an index/catalog of all papers'
)
parser.add_argument(
'--recursive', '-r',
action='store_true',
help='Search subdirectories recursively'
)
args = parser.parse_args()
# Validate input
if not args.input_dir.exists():
print(f"Error: Input directory '{args.input_dir}' does not exist")
sys.exit(1)
if not args.input_dir.is_dir():
print(f"Error: '{args.input_dir}' is not a directory")
sys.exit(1)
# Find PDF files
if args.recursive:
pdf_files = list(args.input_dir.rglob("*.pdf"))
else:
pdf_files = list(args.input_dir.glob("*.pdf"))
if not pdf_files:
print("No PDF files found")
sys.exit(1)
print(f"Found {len(pdf_files)} PDF file(s)")
# Create MarkItDown instance
md = MarkItDown()
# Convert all papers
results = []
success_count = 0
for pdf_file in pdf_files:
success, metadata = convert_paper(
md,
pdf_file,
args.output_dir,
args.organize_by_year
)
if success:
success_count += 1
results.append(metadata)
# Create index if requested
if args.create_index and results:
create_index(results, args.output_dir)
# Print summary
print("\n" + "="*50)
print("CONVERSION SUMMARY")
print("="*50)
print(f"Total papers: {len(pdf_files)}")
print(f"Successful: {success_count}")
print(f"Failed: {len(pdf_files) - success_count}")
print(f"Success rate: {success_count/len(pdf_files)*100:.1f}%")
sys.exit(0 if success_count == len(pdf_files) else 1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/markitdown/scripts/convert_literature.py",
"license": "MIT License",
"lines": 225,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/markitdown/scripts/convert_with_ai.py | #!/usr/bin/env python3
"""
Convert documents to Markdown with AI-enhanced image descriptions.
This script demonstrates how to use MarkItDown with OpenRouter to generate
detailed descriptions of images in documents (PowerPoint, PDFs with images, etc.)
"""
import argparse
import os
import sys
from pathlib import Path
from markitdown import MarkItDown
from openai import OpenAI
# Predefined prompts for different use cases
PROMPTS = {
'scientific': """
Analyze this scientific image or diagram. Provide:
1. Type of visualization (graph, chart, microscopy, diagram, etc.)
2. Key data points, trends, or patterns
3. Axes labels, legends, and scales
4. Notable features or findings
5. Scientific context and significance
Be precise, technical, and detailed.
""".strip(),
'presentation': """
Describe this presentation slide image. Include:
1. Main visual elements and their arrangement
2. Key points or messages conveyed
3. Data or information presented
4. Visual hierarchy and emphasis
Keep the description clear and informative.
""".strip(),
'general': """
Describe this image in detail. Include:
1. Main subjects and objects
2. Visual composition and layout
3. Text content (if any)
4. Notable details
5. Overall context and purpose
Be comprehensive and accurate.
""".strip(),
'data_viz': """
Analyze this data visualization. Provide:
1. Type of chart/graph (bar, line, scatter, pie, etc.)
2. Variables and axes
3. Data ranges and scales
4. Key patterns, trends, or outliers
5. Statistical insights
Focus on quantitative accuracy.
""".strip(),
'medical': """
Describe this medical image. Include:
1. Type of medical imaging (X-ray, MRI, CT, microscopy, etc.)
2. Anatomical structures visible
3. Notable findings or abnormalities
4. Image quality and contrast
5. Clinical relevance
Be professional and precise.
""".strip()
}
def convert_with_ai(
input_file: Path,
output_file: Path,
api_key: str,
model: str = "anthropic/claude-sonnet-4.5",
prompt_type: str = "general",
custom_prompt: str = None
) -> bool:
"""
Convert a file to Markdown with AI image descriptions.
Args:
input_file: Path to input file
output_file: Path to output Markdown file
api_key: OpenRouter API key
model: Model name (default: anthropic/claude-sonnet-4.5)
prompt_type: Type of prompt to use
custom_prompt: Custom prompt (overrides prompt_type)
Returns:
True if successful, False otherwise
"""
try:
# Initialize OpenRouter client (OpenAI-compatible)
client = OpenAI(
api_key=api_key,
base_url="https://openrouter.ai/api/v1"
)
# Select prompt
if custom_prompt:
prompt = custom_prompt
else:
prompt = PROMPTS.get(prompt_type, PROMPTS['general'])
print(f"Using model: {model}")
print(f"Prompt type: {prompt_type if not custom_prompt else 'custom'}")
print(f"Converting: {input_file}")
# Create MarkItDown with AI support
md = MarkItDown(
llm_client=client,
llm_model=model,
llm_prompt=prompt
)
# Convert file
result = md.convert(str(input_file))
# Create output with metadata
content = f"# {result.title or input_file.stem}\n\n"
content += f"**Source**: {input_file.name}\n"
content += f"**Format**: {input_file.suffix}\n"
content += f"**AI Model**: {model}\n"
content += f"**Prompt Type**: {prompt_type if not custom_prompt else 'custom'}\n\n"
content += "---\n\n"
content += result.text_content
# Write output
output_file.parent.mkdir(parents=True, exist_ok=True)
output_file.write_text(content, encoding='utf-8')
print(f"β Successfully converted to: {output_file}")
return True
except Exception as e:
print(f"β Error: {str(e)}", file=sys.stderr)
return False
def main():
parser = argparse.ArgumentParser(
description="Convert documents to Markdown with AI-enhanced image descriptions",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=f"""
Available prompt types:
scientific - For scientific diagrams, graphs, and charts
presentation - For presentation slides
general - General-purpose image description
data_viz - For data visualizations and charts
medical - For medical imaging
Examples:
# Convert a scientific paper
python convert_with_ai.py paper.pdf output.md --prompt-type scientific
# Convert a presentation with custom model
python convert_with_ai.py slides.pptx slides.md --model anthropic/claude-sonnet-4.5 --prompt-type presentation
# Use custom prompt with advanced vision model
python convert_with_ai.py diagram.png diagram.md --model anthropic/claude-sonnet-4.5 --custom-prompt "Describe this technical diagram"
# Set API key via environment variable
export OPENROUTER_API_KEY="sk-or-v1-..."
python convert_with_ai.py image.jpg image.md
Environment Variables:
OPENROUTER_API_KEY OpenRouter API key (required if not passed via --api-key)
Popular Models (use with --model):
anthropic/claude-sonnet-4.5 - Recommended for scientific vision
anthropic/claude-opus-4.5 - Advanced vision model
openai/gpt-4o - GPT-4 Omni (vision support)
openai/gpt-4-vision - GPT-4 Vision
google/gemini-pro-vision - Gemini Pro Vision
"""
)
parser.add_argument('input', type=Path, help='Input file')
parser.add_argument('output', type=Path, help='Output Markdown file')
parser.add_argument(
'--api-key', '-k',
help='OpenRouter API key (or set OPENROUTER_API_KEY env var)'
)
parser.add_argument(
'--model', '-m',
default='anthropic/claude-sonnet-4.5',
help='Model to use via OpenRouter (default: anthropic/claude-sonnet-4.5)'
)
parser.add_argument(
'--prompt-type', '-t',
choices=list(PROMPTS.keys()),
default='general',
help='Type of prompt to use (default: general)'
)
parser.add_argument(
'--custom-prompt', '-p',
help='Custom prompt (overrides --prompt-type)'
)
parser.add_argument(
'--list-prompts', '-l',
action='store_true',
help='List available prompt types and exit'
)
args = parser.parse_args()
# List prompts and exit
if args.list_prompts:
print("Available prompt types:\n")
for name, prompt in PROMPTS.items():
print(f"[{name}]")
print(prompt)
print("\n" + "="*60 + "\n")
sys.exit(0)
# Get API key
api_key = args.api_key or os.environ.get('OPENROUTER_API_KEY')
if not api_key:
print("Error: OpenRouter API key required. Set OPENROUTER_API_KEY environment variable or use --api-key")
print("Get your API key at: https://openrouter.ai/keys")
sys.exit(1)
# Validate input file
if not args.input.exists():
print(f"Error: Input file '{args.input}' does not exist")
sys.exit(1)
# Convert file
success = convert_with_ai(
input_file=args.input,
output_file=args.output,
api_key=api_key,
model=args.model,
prompt_type=args.prompt_type,
custom_prompt=args.custom_prompt
)
sys.exit(0 if success else 1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/markitdown/scripts/convert_with_ai.py",
"license": "MIT License",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/scientific/matplotlib/scripts/plot_template.py | #!/usr/bin/env python3
"""
Matplotlib Plot Template
Comprehensive template demonstrating various plot types and best practices.
Use this as a starting point for creating publication-quality visualizations.
Usage:
python plot_template.py [--plot-type TYPE] [--style STYLE] [--output FILE]
Plot types:
line, scatter, bar, histogram, heatmap, contour, box, violin, 3d, all
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import argparse
def set_publication_style():
"""Configure matplotlib for publication-quality figures."""
plt.rcParams.update({
'figure.figsize': (10, 6),
'figure.dpi': 100,
'savefig.dpi': 300,
'savefig.bbox': 'tight',
'font.size': 11,
'axes.labelsize': 12,
'axes.titlesize': 14,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
'lines.linewidth': 2,
'axes.linewidth': 1.5,
})
def generate_sample_data():
"""Generate sample data for demonstrations."""
np.random.seed(42)
x = np.linspace(0, 10, 100)
y1 = np.sin(x)
y2 = np.cos(x)
scatter_x = np.random.randn(200)
scatter_y = np.random.randn(200)
categories = ['A', 'B', 'C', 'D', 'E']
bar_values = np.random.randint(10, 100, len(categories))
hist_data = np.random.normal(0, 1, 1000)
matrix = np.random.rand(10, 10)
X, Y = np.meshgrid(np.linspace(-3, 3, 100), np.linspace(-3, 3, 100))
Z = np.sin(np.sqrt(X**2 + Y**2))
return {
'x': x, 'y1': y1, 'y2': y2,
'scatter_x': scatter_x, 'scatter_y': scatter_y,
'categories': categories, 'bar_values': bar_values,
'hist_data': hist_data, 'matrix': matrix,
'X': X, 'Y': Y, 'Z': Z
}
def create_line_plot(data, ax=None):
"""Create line plot with best practices."""
if ax is None:
fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True)
ax.plot(data['x'], data['y1'], label='sin(x)', linewidth=2, marker='o',
markevery=10, markersize=6)
ax.plot(data['x'], data['y2'], label='cos(x)', linewidth=2, linestyle='--')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('Line Plot Example')
ax.legend(loc='best', framealpha=0.9)
ax.grid(True, alpha=0.3, linestyle='--')
# Remove top and right spines for cleaner look
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
if ax is None:
return fig
return ax
def create_scatter_plot(data, ax=None):
"""Create scatter plot with color and size variations."""
if ax is None:
fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True)
# Color based on distance from origin
colors = np.sqrt(data['scatter_x']**2 + data['scatter_y']**2)
sizes = 50 * (1 + np.abs(data['scatter_x']))
scatter = ax.scatter(data['scatter_x'], data['scatter_y'],
c=colors, s=sizes, alpha=0.6,
cmap='viridis', edgecolors='black', linewidth=0.5)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_title('Scatter Plot Example')
ax.grid(True, alpha=0.3, linestyle='--')
# Add colorbar
cbar = plt.colorbar(scatter, ax=ax)
cbar.set_label('Distance from origin')
if ax is None:
return fig
return ax
def create_bar_chart(data, ax=None):
"""Create bar chart with error bars and styling."""
if ax is None:
fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True)
x_pos = np.arange(len(data['categories']))
errors = np.random.randint(5, 15, len(data['categories']))
bars = ax.bar(x_pos, data['bar_values'], yerr=errors,
color='steelblue', edgecolor='black', linewidth=1.5,
capsize=5, alpha=0.8)
# Color bars by value
colors = plt.cm.viridis(data['bar_values'] / data['bar_values'].max())
for bar, color in zip(bars, colors):
bar.set_facecolor(color)
ax.set_xlabel('Category')
ax.set_ylabel('Values')
ax.set_title('Bar Chart Example')
ax.set_xticks(x_pos)
ax.set_xticklabels(data['categories'])
ax.grid(True, axis='y', alpha=0.3, linestyle='--')
# Remove top and right spines
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
if ax is None:
return fig
return ax
def create_histogram(data, ax=None):
"""Create histogram with density overlay."""
if ax is None:
fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True)
n, bins, patches = ax.hist(data['hist_data'], bins=30, density=True,
alpha=0.7, edgecolor='black', color='steelblue')
# Overlay theoretical normal distribution
from scipy.stats import norm
mu, std = norm.fit(data['hist_data'])
x_theory = np.linspace(data['hist_data'].min(), data['hist_data'].max(), 100)
ax.plot(x_theory, norm.pdf(x_theory, mu, std), 'r-', linewidth=2,
label=f'Normal fit (ΞΌ={mu:.2f}, Ο={std:.2f})')
ax.set_xlabel('Value')
ax.set_ylabel('Density')
ax.set_title('Histogram with Normal Fit')
ax.legend()
ax.grid(True, axis='y', alpha=0.3, linestyle='--')
if ax is None:
return fig
return ax
def create_heatmap(data, ax=None):
"""Create heatmap with colorbar and annotations."""
if ax is None:
fig, ax = plt.subplots(figsize=(10, 8), constrained_layout=True)
im = ax.imshow(data['matrix'], cmap='coolwarm', aspect='auto',
vmin=0, vmax=1)
# Add colorbar
cbar = plt.colorbar(im, ax=ax)
cbar.set_label('Value')
# Optional: Add text annotations
# for i in range(data['matrix'].shape[0]):
# for j in range(data['matrix'].shape[1]):
# text = ax.text(j, i, f'{data["matrix"][i, j]:.2f}',
# ha='center', va='center', color='black', fontsize=8)
ax.set_xlabel('X Index')
ax.set_ylabel('Y Index')
ax.set_title('Heatmap Example')
if ax is None:
return fig
return ax
def create_contour_plot(data, ax=None):
"""Create contour plot with filled contours and labels."""
if ax is None:
fig, ax = plt.subplots(figsize=(10, 8), constrained_layout=True)
# Filled contours
contourf = ax.contourf(data['X'], data['Y'], data['Z'],
levels=20, cmap='viridis', alpha=0.8)
# Contour lines
contour = ax.contour(data['X'], data['Y'], data['Z'],
levels=10, colors='black', linewidths=0.5, alpha=0.4)
# Add labels to contour lines
ax.clabel(contour, inline=True, fontsize=8)
# Add colorbar
cbar = plt.colorbar(contourf, ax=ax)
cbar.set_label('Z value')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_title('Contour Plot Example')
ax.set_aspect('equal')
if ax is None:
return fig
return ax
def create_box_plot(data, ax=None):
"""Create box plot comparing distributions."""
if ax is None:
fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True)
# Generate multiple distributions
box_data = [np.random.normal(0, std, 100) for std in range(1, 5)]
bp = ax.boxplot(box_data, labels=['Group 1', 'Group 2', 'Group 3', 'Group 4'],
patch_artist=True, showmeans=True,
boxprops=dict(facecolor='lightblue', edgecolor='black'),
medianprops=dict(color='red', linewidth=2),
meanprops=dict(marker='D', markerfacecolor='green', markersize=8))
ax.set_xlabel('Groups')
ax.set_ylabel('Values')
ax.set_title('Box Plot Example')
ax.grid(True, axis='y', alpha=0.3, linestyle='--')
if ax is None:
return fig
return ax
def create_violin_plot(data, ax=None):
"""Create violin plot showing distribution shapes."""
if ax is None:
fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True)
# Generate multiple distributions
violin_data = [np.random.normal(0, std, 100) for std in range(1, 5)]
parts = ax.violinplot(violin_data, positions=range(1, 5),
showmeans=True, showmedians=True)
# Customize colors
for pc in parts['bodies']:
pc.set_facecolor('lightblue')
pc.set_alpha(0.7)
pc.set_edgecolor('black')
ax.set_xlabel('Groups')
ax.set_ylabel('Values')
ax.set_title('Violin Plot Example')
ax.set_xticks(range(1, 5))
ax.set_xticklabels(['Group 1', 'Group 2', 'Group 3', 'Group 4'])
ax.grid(True, axis='y', alpha=0.3, linestyle='--')
if ax is None:
return fig
return ax
def create_3d_plot():
"""Create 3D surface plot."""
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(12, 9))
ax = fig.add_subplot(111, projection='3d')
# Generate data
X = np.linspace(-5, 5, 50)
Y = np.linspace(-5, 5, 50)
X, Y = np.meshgrid(X, Y)
Z = np.sin(np.sqrt(X**2 + Y**2))
# Create surface plot
surf = ax.plot_surface(X, Y, Z, cmap='viridis',
edgecolor='none', alpha=0.9)
# Add colorbar
fig.colorbar(surf, ax=ax, shrink=0.5)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title('3D Surface Plot Example')
# Set viewing angle
ax.view_init(elev=30, azim=45)
plt.tight_layout()
return fig
def create_comprehensive_figure():
"""Create a comprehensive figure with multiple subplots."""
data = generate_sample_data()
fig = plt.figure(figsize=(16, 12), constrained_layout=True)
gs = GridSpec(3, 3, figure=fig)
# Create subplots
ax1 = fig.add_subplot(gs[0, :2]) # Line plot - top left, spans 2 columns
create_line_plot(data, ax1)
ax2 = fig.add_subplot(gs[0, 2]) # Bar chart - top right
create_bar_chart(data, ax2)
ax3 = fig.add_subplot(gs[1, 0]) # Scatter plot - middle left
create_scatter_plot(data, ax3)
ax4 = fig.add_subplot(gs[1, 1]) # Histogram - middle center
create_histogram(data, ax4)
ax5 = fig.add_subplot(gs[1, 2]) # Box plot - middle right
create_box_plot(data, ax5)
ax6 = fig.add_subplot(gs[2, :2]) # Contour plot - bottom left, spans 2 columns
create_contour_plot(data, ax6)
ax7 = fig.add_subplot(gs[2, 2]) # Heatmap - bottom right
create_heatmap(data, ax7)
fig.suptitle('Comprehensive Matplotlib Template', fontsize=18, fontweight='bold')
return fig
def main():
"""Main function to run the template."""
parser = argparse.ArgumentParser(description='Matplotlib plot template')
parser.add_argument('--plot-type', type=str, default='all',
choices=['line', 'scatter', 'bar', 'histogram', 'heatmap',
'contour', 'box', 'violin', '3d', 'all'],
help='Type of plot to create')
parser.add_argument('--style', type=str, default='default',
help='Matplotlib style to use')
parser.add_argument('--output', type=str, default='plot.png',
help='Output filename')
args = parser.parse_args()
# Set style
if args.style != 'default':
plt.style.use(args.style)
else:
set_publication_style()
# Generate data
data = generate_sample_data()
# Create plot based on type
plot_functions = {
'line': create_line_plot,
'scatter': create_scatter_plot,
'bar': create_bar_chart,
'histogram': create_histogram,
'heatmap': create_heatmap,
'contour': create_contour_plot,
'box': create_box_plot,
'violin': create_violin_plot,
}
if args.plot_type == '3d':
fig = create_3d_plot()
elif args.plot_type == 'all':
fig = create_comprehensive_figure()
else:
fig = plot_functions[args.plot_type](data)
# Save figure
plt.savefig(args.output, dpi=300, bbox_inches='tight')
print(f"Plot saved to {args.output}")
# Display
plt.show()
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/matplotlib/scripts/plot_template.py",
"license": "MIT License",
"lines": 304,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/matplotlib/scripts/style_configurator.py | #!/usr/bin/env python3
"""
Matplotlib Style Configurator
Interactive utility to configure matplotlib style preferences and generate
custom style sheets. Creates a preview of the style and optionally saves
it as a .mplstyle file.
Usage:
python style_configurator.py [--preset PRESET] [--output FILE] [--preview]
Presets:
publication, presentation, web, dark, minimal
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import argparse
import os
# Predefined style presets
STYLE_PRESETS = {
'publication': {
'figure.figsize': (8, 6),
'figure.dpi': 100,
'savefig.dpi': 300,
'savefig.bbox': 'tight',
'font.family': 'sans-serif',
'font.sans-serif': ['Arial', 'Helvetica'],
'font.size': 11,
'axes.labelsize': 12,
'axes.titlesize': 14,
'axes.linewidth': 1.5,
'axes.grid': False,
'axes.spines.top': False,
'axes.spines.right': False,
'lines.linewidth': 2,
'lines.markersize': 8,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.major.size': 6,
'ytick.major.size': 6,
'xtick.major.width': 1.5,
'ytick.major.width': 1.5,
'legend.fontsize': 10,
'legend.frameon': True,
'legend.framealpha': 1.0,
'legend.edgecolor': 'black',
},
'presentation': {
'figure.figsize': (12, 8),
'figure.dpi': 100,
'savefig.dpi': 150,
'font.size': 16,
'axes.labelsize': 20,
'axes.titlesize': 24,
'axes.linewidth': 2,
'lines.linewidth': 3,
'lines.markersize': 12,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'legend.fontsize': 16,
'axes.grid': True,
'grid.alpha': 0.3,
},
'web': {
'figure.figsize': (10, 6),
'figure.dpi': 96,
'savefig.dpi': 150,
'font.size': 11,
'axes.labelsize': 12,
'axes.titlesize': 14,
'lines.linewidth': 2,
'axes.grid': True,
'grid.alpha': 0.2,
'grid.linestyle': '--',
},
'dark': {
'figure.facecolor': '#1e1e1e',
'figure.edgecolor': '#1e1e1e',
'axes.facecolor': '#1e1e1e',
'axes.edgecolor': 'white',
'axes.labelcolor': 'white',
'text.color': 'white',
'xtick.color': 'white',
'ytick.color': 'white',
'grid.color': 'gray',
'grid.alpha': 0.3,
'axes.grid': True,
'legend.facecolor': '#1e1e1e',
'legend.edgecolor': 'white',
'savefig.facecolor': '#1e1e1e',
},
'minimal': {
'figure.figsize': (10, 6),
'axes.spines.top': False,
'axes.spines.right': False,
'axes.spines.left': False,
'axes.spines.bottom': False,
'axes.grid': False,
'xtick.bottom': True,
'ytick.left': True,
'axes.axisbelow': True,
'lines.linewidth': 2.5,
'font.size': 12,
}
}
def generate_preview_data():
"""Generate sample data for style preview."""
np.random.seed(42)
x = np.linspace(0, 10, 100)
y1 = np.sin(x) + 0.1 * np.random.randn(100)
y2 = np.cos(x) + 0.1 * np.random.randn(100)
scatter_x = np.random.randn(100)
scatter_y = 2 * scatter_x + np.random.randn(100)
categories = ['A', 'B', 'C', 'D', 'E']
bar_values = [25, 40, 30, 55, 45]
return {
'x': x, 'y1': y1, 'y2': y2,
'scatter_x': scatter_x, 'scatter_y': scatter_y,
'categories': categories, 'bar_values': bar_values
}
def create_style_preview(style_dict=None):
"""Create a preview figure demonstrating the style."""
if style_dict:
plt.rcParams.update(style_dict)
data = generate_preview_data()
fig = plt.figure(figsize=(14, 10))
gs = GridSpec(2, 2, figure=fig, hspace=0.3, wspace=0.3)
# Line plot
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(data['x'], data['y1'], label='sin(x)', marker='o', markevery=10)
ax1.plot(data['x'], data['y2'], label='cos(x)', linestyle='--')
ax1.set_xlabel('X axis')
ax1.set_ylabel('Y axis')
ax1.set_title('Line Plot')
ax1.legend()
ax1.grid(True, alpha=0.3)
# Scatter plot
ax2 = fig.add_subplot(gs[0, 1])
colors = np.sqrt(data['scatter_x']**2 + data['scatter_y']**2)
scatter = ax2.scatter(data['scatter_x'], data['scatter_y'],
c=colors, cmap='viridis', alpha=0.6, s=50)
ax2.set_xlabel('X axis')
ax2.set_ylabel('Y axis')
ax2.set_title('Scatter Plot')
cbar = plt.colorbar(scatter, ax=ax2)
cbar.set_label('Distance')
ax2.grid(True, alpha=0.3)
# Bar chart
ax3 = fig.add_subplot(gs[1, 0])
bars = ax3.bar(data['categories'], data['bar_values'],
edgecolor='black', linewidth=1)
# Color bars with gradient
colors = plt.cm.viridis(np.linspace(0.2, 0.8, len(bars)))
for bar, color in zip(bars, colors):
bar.set_facecolor(color)
ax3.set_xlabel('Categories')
ax3.set_ylabel('Values')
ax3.set_title('Bar Chart')
ax3.grid(True, axis='y', alpha=0.3)
# Multiple line plot with fills
ax4 = fig.add_subplot(gs[1, 1])
ax4.plot(data['x'], data['y1'], label='Signal 1', linewidth=2)
ax4.fill_between(data['x'], data['y1'] - 0.2, data['y1'] + 0.2,
alpha=0.3, label='Β±1 std')
ax4.plot(data['x'], data['y2'], label='Signal 2', linewidth=2)
ax4.fill_between(data['x'], data['y2'] - 0.2, data['y2'] + 0.2,
alpha=0.3)
ax4.set_xlabel('X axis')
ax4.set_ylabel('Y axis')
ax4.set_title('Time Series with Uncertainty')
ax4.legend()
ax4.grid(True, alpha=0.3)
fig.suptitle('Style Preview', fontsize=16, fontweight='bold')
return fig
def save_style_file(style_dict, filename):
"""Save style dictionary as .mplstyle file."""
with open(filename, 'w') as f:
f.write("# Custom matplotlib style\n")
f.write("# Generated by style_configurator.py\n\n")
# Group settings by category
categories = {
'Figure': ['figure.'],
'Font': ['font.'],
'Axes': ['axes.'],
'Lines': ['lines.'],
'Markers': ['markers.'],
'Ticks': ['tick.', 'xtick.', 'ytick.'],
'Grid': ['grid.'],
'Legend': ['legend.'],
'Savefig': ['savefig.'],
'Text': ['text.'],
}
for category, prefixes in categories.items():
category_items = {k: v for k, v in style_dict.items()
if any(k.startswith(p) for p in prefixes)}
if category_items:
f.write(f"# {category}\n")
for key, value in sorted(category_items.items()):
# Format value appropriately
if isinstance(value, (list, tuple)):
value_str = ', '.join(str(v) for v in value)
elif isinstance(value, bool):
value_str = str(value)
else:
value_str = str(value)
f.write(f"{key}: {value_str}\n")
f.write("\n")
print(f"Style saved to {filename}")
def print_style_info(style_dict):
"""Print information about the style."""
print("\n" + "="*60)
print("STYLE CONFIGURATION")
print("="*60)
categories = {
'Figure Settings': ['figure.'],
'Font Settings': ['font.'],
'Axes Settings': ['axes.'],
'Line Settings': ['lines.'],
'Grid Settings': ['grid.'],
'Legend Settings': ['legend.'],
}
for category, prefixes in categories.items():
category_items = {k: v for k, v in style_dict.items()
if any(k.startswith(p) for p in prefixes)}
if category_items:
print(f"\n{category}:")
for key, value in sorted(category_items.items()):
print(f" {key}: {value}")
print("\n" + "="*60 + "\n")
def list_available_presets():
"""Print available style presets."""
print("\nAvailable style presets:")
print("-" * 40)
descriptions = {
'publication': 'Optimized for academic publications',
'presentation': 'Large fonts for presentations',
'web': 'Optimized for web display',
'dark': 'Dark background theme',
'minimal': 'Minimal, clean style',
}
for preset, desc in descriptions.items():
print(f" {preset:15s} - {desc}")
print("-" * 40 + "\n")
def interactive_mode():
"""Run interactive mode to customize style settings."""
print("\n" + "="*60)
print("MATPLOTLIB STYLE CONFIGURATOR - Interactive Mode")
print("="*60)
list_available_presets()
preset = input("Choose a preset to start from (or 'custom' for default): ").strip().lower()
if preset in STYLE_PRESETS:
style_dict = STYLE_PRESETS[preset].copy()
print(f"\nStarting from '{preset}' preset")
else:
style_dict = {}
print("\nStarting from default matplotlib style")
print("\nCommon settings you might want to customize:")
print(" 1. Figure size")
print(" 2. Font sizes")
print(" 3. Line widths")
print(" 4. Grid settings")
print(" 5. Color scheme")
print(" 6. Done, show preview")
while True:
choice = input("\nSelect option (1-6): ").strip()
if choice == '1':
width = input(" Figure width (inches, default 10): ").strip() or '10'
height = input(" Figure height (inches, default 6): ").strip() or '6'
style_dict['figure.figsize'] = (float(width), float(height))
elif choice == '2':
base = input(" Base font size (default 12): ").strip() or '12'
style_dict['font.size'] = float(base)
style_dict['axes.labelsize'] = float(base) + 2
style_dict['axes.titlesize'] = float(base) + 4
elif choice == '3':
lw = input(" Line width (default 2): ").strip() or '2'
style_dict['lines.linewidth'] = float(lw)
elif choice == '4':
grid = input(" Enable grid? (y/n): ").strip().lower()
style_dict['axes.grid'] = grid == 'y'
if style_dict['axes.grid']:
alpha = input(" Grid transparency (0-1, default 0.3): ").strip() or '0.3'
style_dict['grid.alpha'] = float(alpha)
elif choice == '5':
print(" Theme options: 1=Light, 2=Dark")
theme = input(" Select theme (1-2): ").strip()
if theme == '2':
style_dict.update(STYLE_PRESETS['dark'])
elif choice == '6':
break
return style_dict
def main():
"""Main function."""
parser = argparse.ArgumentParser(
description='Matplotlib style configurator',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Show available presets
python style_configurator.py --list
# Preview a preset
python style_configurator.py --preset publication --preview
# Save a preset as .mplstyle file
python style_configurator.py --preset publication --output my_style.mplstyle
# Interactive mode
python style_configurator.py --interactive
"""
)
parser.add_argument('--preset', type=str, choices=list(STYLE_PRESETS.keys()),
help='Use a predefined style preset')
parser.add_argument('--output', type=str,
help='Save style to .mplstyle file')
parser.add_argument('--preview', action='store_true',
help='Show style preview')
parser.add_argument('--list', action='store_true',
help='List available presets')
parser.add_argument('--interactive', action='store_true',
help='Run in interactive mode')
args = parser.parse_args()
if args.list:
list_available_presets()
# Also show currently available matplotlib styles
print("\nBuilt-in matplotlib styles:")
print("-" * 40)
for style in sorted(plt.style.available):
print(f" {style}")
return
if args.interactive:
style_dict = interactive_mode()
elif args.preset:
style_dict = STYLE_PRESETS[args.preset].copy()
print(f"Using '{args.preset}' preset")
else:
print("No preset or interactive mode specified. Showing default preview.")
style_dict = {}
if style_dict:
print_style_info(style_dict)
if args.output:
save_style_file(style_dict, args.output)
if args.preview or args.interactive:
print("Creating style preview...")
fig = create_style_preview(style_dict if style_dict else None)
if args.output:
preview_filename = args.output.replace('.mplstyle', '_preview.png')
plt.savefig(preview_filename, dpi=150, bbox_inches='tight')
print(f"Preview saved to {preview_filename}")
plt.show()
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/matplotlib/scripts/style_configurator.py",
"license": "MIT License",
"lines": 349,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/medchem/scripts/filter_molecules.py | #!/usr/bin/env python3
"""
Batch molecular filtering using medchem library.
This script provides a production-ready workflow for filtering compound libraries
using medchem rules, structural alerts, and custom constraints.
Usage:
python filter_molecules.py input.csv --rules rule_of_five,rule_of_cns --alerts nibr --output filtered.csv
python filter_molecules.py input.sdf --rules rule_of_drug --lilly --complexity 400 --output results.csv
python filter_molecules.py smiles.txt --nibr --pains --n-jobs -1 --output clean.csv
"""
import argparse
import sys
from pathlib import Path
from typing import List, Dict, Optional, Tuple
import json
try:
import pandas as pd
import datamol as dm
import medchem as mc
from rdkit import Chem
from tqdm import tqdm
except ImportError as e:
print(f"Error: Missing required package: {e}")
print("Install dependencies: pip install medchem datamol pandas tqdm")
sys.exit(1)
def load_molecules(input_file: Path, smiles_column: str = "smiles") -> Tuple[pd.DataFrame, List[Chem.Mol]]:
"""
Load molecules from various file formats.
Supports:
- CSV/TSV with SMILES column
- SDF files
- Plain text files with one SMILES per line
Returns:
Tuple of (DataFrame with metadata, list of RDKit molecules)
"""
suffix = input_file.suffix.lower()
if suffix == ".sdf":
print(f"Loading SDF file: {input_file}")
supplier = Chem.SDMolSupplier(str(input_file))
mols = [mol for mol in supplier if mol is not None]
# Create DataFrame from SDF properties
data = []
for mol in mols:
props = mol.GetPropsAsDict()
props["smiles"] = Chem.MolToSmiles(mol)
data.append(props)
df = pd.DataFrame(data)
elif suffix in [".csv", ".tsv"]:
print(f"Loading CSV/TSV file: {input_file}")
sep = "\t" if suffix == ".tsv" else ","
df = pd.read_csv(input_file, sep=sep)
if smiles_column not in df.columns:
print(f"Error: Column '{smiles_column}' not found in file")
print(f"Available columns: {', '.join(df.columns)}")
sys.exit(1)
print(f"Converting SMILES to molecules...")
mols = [dm.to_mol(smi) for smi in tqdm(df[smiles_column], desc="Parsing")]
elif suffix == ".txt":
print(f"Loading text file: {input_file}")
with open(input_file) as f:
smiles_list = [line.strip() for line in f if line.strip()]
df = pd.DataFrame({"smiles": smiles_list})
print(f"Converting SMILES to molecules...")
mols = [dm.to_mol(smi) for smi in tqdm(smiles_list, desc="Parsing")]
else:
print(f"Error: Unsupported file format: {suffix}")
print("Supported formats: .csv, .tsv, .sdf, .txt")
sys.exit(1)
# Filter out invalid molecules
valid_indices = [i for i, mol in enumerate(mols) if mol is not None]
if len(valid_indices) < len(mols):
n_invalid = len(mols) - len(valid_indices)
print(f"Warning: {n_invalid} invalid molecules removed")
df = df.iloc[valid_indices].reset_index(drop=True)
mols = [mols[i] for i in valid_indices]
print(f"Loaded {len(mols)} valid molecules")
return df, mols
def apply_rule_filters(mols: List[Chem.Mol], rules: List[str], n_jobs: int) -> pd.DataFrame:
"""Apply medicinal chemistry rule filters."""
print(f"\nApplying rule filters: {', '.join(rules)}")
rfilter = mc.rules.RuleFilters(rule_list=rules)
results = rfilter(mols=mols, n_jobs=n_jobs, progress=True)
# Convert to DataFrame
df_results = pd.DataFrame(results)
# Add summary column
df_results["passes_all_rules"] = df_results.all(axis=1)
return df_results
def apply_structural_alerts(mols: List[Chem.Mol], alert_type: str, n_jobs: int) -> pd.DataFrame:
"""Apply structural alert filters."""
print(f"\nApplying {alert_type} structural alerts...")
if alert_type == "common":
alert_filter = mc.structural.CommonAlertsFilters()
results = alert_filter(mols=mols, n_jobs=n_jobs, progress=True)
df_results = pd.DataFrame({
"has_common_alerts": [r["has_alerts"] for r in results],
"num_common_alerts": [r["num_alerts"] for r in results],
"common_alert_details": [", ".join(r["alert_details"]) if r["alert_details"] else "" for r in results]
})
elif alert_type == "nibr":
nibr_filter = mc.structural.NIBRFilters()
results = nibr_filter(mols=mols, n_jobs=n_jobs, progress=True)
df_results = pd.DataFrame({
"passes_nibr": results
})
elif alert_type == "lilly":
lilly_filter = mc.structural.LillyDemeritsFilters()
results = lilly_filter(mols=mols, n_jobs=n_jobs, progress=True)
df_results = pd.DataFrame({
"lilly_demerits": [r["demerits"] for r in results],
"passes_lilly": [r["passes"] for r in results],
"lilly_patterns": [", ".join([p["pattern"] for p in r["matched_patterns"]]) for r in results]
})
elif alert_type == "pains":
results = [mc.rules.basic_rules.pains_filter(mol) for mol in tqdm(mols, desc="PAINS")]
df_results = pd.DataFrame({
"passes_pains": results
})
else:
raise ValueError(f"Unknown alert type: {alert_type}")
return df_results
def apply_complexity_filter(mols: List[Chem.Mol], max_complexity: float, method: str = "bertz") -> pd.DataFrame:
"""Calculate molecular complexity."""
print(f"\nCalculating molecular complexity (method={method}, max={max_complexity})...")
complexity_scores = [
mc.complexity.calculate_complexity(mol, method=method)
for mol in tqdm(mols, desc="Complexity")
]
df_results = pd.DataFrame({
"complexity_score": complexity_scores,
"passes_complexity": [score <= max_complexity for score in complexity_scores]
})
return df_results
def apply_constraints(mols: List[Chem.Mol], constraints: Dict, n_jobs: int) -> pd.DataFrame:
"""Apply custom property constraints."""
print(f"\nApplying constraints: {constraints}")
constraint_filter = mc.constraints.Constraints(**constraints)
results = constraint_filter(mols=mols, n_jobs=n_jobs, progress=True)
df_results = pd.DataFrame({
"passes_constraints": [r["passes"] for r in results],
"constraint_violations": [", ".join(r["violations"]) if r["violations"] else "" for r in results]
})
return df_results
def apply_chemical_groups(mols: List[Chem.Mol], groups: List[str]) -> pd.DataFrame:
"""Detect chemical groups."""
print(f"\nDetecting chemical groups: {', '.join(groups)}")
group_detector = mc.groups.ChemicalGroup(groups=groups)
results = group_detector.get_all_matches(mols)
df_results = pd.DataFrame()
for group in groups:
df_results[f"has_{group}"] = [bool(r.get(group)) for r in results]
return df_results
def generate_summary(df: pd.DataFrame, output_file: Path):
"""Generate filtering summary report."""
summary_file = output_file.parent / f"{output_file.stem}_summary.txt"
with open(summary_file, "w") as f:
f.write("=" * 80 + "\n")
f.write("MEDCHEM FILTERING SUMMARY\n")
f.write("=" * 80 + "\n\n")
f.write(f"Total molecules processed: {len(df)}\n\n")
# Rule results
rule_cols = [col for col in df.columns if col.startswith("rule_") or col == "passes_all_rules"]
if rule_cols:
f.write("RULE FILTERS:\n")
f.write("-" * 40 + "\n")
for col in rule_cols:
if col in df.columns and df[col].dtype == bool:
n_pass = df[col].sum()
pct = 100 * n_pass / len(df)
f.write(f" {col}: {n_pass} passed ({pct:.1f}%)\n")
f.write("\n")
# Structural alerts
alert_cols = [col for col in df.columns if "alert" in col.lower() or "nibr" in col.lower() or "lilly" in col.lower() or "pains" in col.lower()]
if alert_cols:
f.write("STRUCTURAL ALERTS:\n")
f.write("-" * 40 + "\n")
if "has_common_alerts" in df.columns:
n_clean = (~df["has_common_alerts"]).sum()
pct = 100 * n_clean / len(df)
f.write(f" No common alerts: {n_clean} ({pct:.1f}%)\n")
if "passes_nibr" in df.columns:
n_pass = df["passes_nibr"].sum()
pct = 100 * n_pass / len(df)
f.write(f" Passes NIBR: {n_pass} ({pct:.1f}%)\n")
if "passes_lilly" in df.columns:
n_pass = df["passes_lilly"].sum()
pct = 100 * n_pass / len(df)
f.write(f" Passes Lilly: {n_pass} ({pct:.1f}%)\n")
avg_demerits = df["lilly_demerits"].mean()
f.write(f" Average Lilly demerits: {avg_demerits:.1f}\n")
if "passes_pains" in df.columns:
n_pass = df["passes_pains"].sum()
pct = 100 * n_pass / len(df)
f.write(f" Passes PAINS: {n_pass} ({pct:.1f}%)\n")
f.write("\n")
# Complexity
if "complexity_score" in df.columns:
f.write("COMPLEXITY:\n")
f.write("-" * 40 + "\n")
avg_complexity = df["complexity_score"].mean()
f.write(f" Average complexity: {avg_complexity:.1f}\n")
if "passes_complexity" in df.columns:
n_pass = df["passes_complexity"].sum()
pct = 100 * n_pass / len(df)
f.write(f" Within threshold: {n_pass} ({pct:.1f}%)\n")
f.write("\n")
# Constraints
if "passes_constraints" in df.columns:
f.write("CONSTRAINTS:\n")
f.write("-" * 40 + "\n")
n_pass = df["passes_constraints"].sum()
pct = 100 * n_pass / len(df)
f.write(f" Passes all constraints: {n_pass} ({pct:.1f}%)\n")
f.write("\n")
# Overall pass rate
pass_cols = [col for col in df.columns if col.startswith("passes_")]
if pass_cols:
df["passes_all_filters"] = df[pass_cols].all(axis=1)
n_pass = df["passes_all_filters"].sum()
pct = 100 * n_pass / len(df)
f.write("OVERALL:\n")
f.write("-" * 40 + "\n")
f.write(f" Molecules passing all filters: {n_pass} ({pct:.1f}%)\n")
f.write("\n" + "=" * 80 + "\n")
print(f"\nSummary report saved to: {summary_file}")
def main():
parser = argparse.ArgumentParser(
description="Batch molecular filtering using medchem",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__
)
# Input/Output
parser.add_argument("input", type=Path, help="Input file (CSV, TSV, SDF, or TXT)")
parser.add_argument("--output", "-o", type=Path, required=True, help="Output CSV file")
parser.add_argument("--smiles-column", default="smiles", help="Name of SMILES column (default: smiles)")
# Rule filters
parser.add_argument("--rules", help="Comma-separated list of rules (e.g., rule_of_five,rule_of_cns)")
# Structural alerts
parser.add_argument("--common-alerts", action="store_true", help="Apply common structural alerts")
parser.add_argument("--nibr", action="store_true", help="Apply NIBR filters")
parser.add_argument("--lilly", action="store_true", help="Apply Lilly demerits filter")
parser.add_argument("--pains", action="store_true", help="Apply PAINS filter")
# Complexity
parser.add_argument("--complexity", type=float, help="Maximum complexity threshold")
parser.add_argument("--complexity-method", default="bertz", choices=["bertz", "whitlock", "barone"],
help="Complexity calculation method")
# Constraints
parser.add_argument("--mw-range", help="Molecular weight range (e.g., 200,500)")
parser.add_argument("--logp-range", help="LogP range (e.g., -2,5)")
parser.add_argument("--tpsa-max", type=float, help="Maximum TPSA")
parser.add_argument("--hbd-max", type=int, help="Maximum H-bond donors")
parser.add_argument("--hba-max", type=int, help="Maximum H-bond acceptors")
parser.add_argument("--rotatable-bonds-max", type=int, help="Maximum rotatable bonds")
# Chemical groups
parser.add_argument("--groups", help="Comma-separated chemical groups to detect")
# Processing options
parser.add_argument("--n-jobs", type=int, default=-1, help="Number of parallel jobs (-1 = all cores)")
parser.add_argument("--no-summary", action="store_true", help="Don't generate summary report")
parser.add_argument("--filter-output", action="store_true", help="Only output molecules passing all filters")
args = parser.parse_args()
# Load molecules
df, mols = load_molecules(args.input, args.smiles_column)
# Apply filters
result_dfs = [df]
# Rules
if args.rules:
rule_list = [r.strip() for r in args.rules.split(",")]
df_rules = apply_rule_filters(mols, rule_list, args.n_jobs)
result_dfs.append(df_rules)
# Structural alerts
if args.common_alerts:
df_alerts = apply_structural_alerts(mols, "common", args.n_jobs)
result_dfs.append(df_alerts)
if args.nibr:
df_nibr = apply_structural_alerts(mols, "nibr", args.n_jobs)
result_dfs.append(df_nibr)
if args.lilly:
df_lilly = apply_structural_alerts(mols, "lilly", args.n_jobs)
result_dfs.append(df_lilly)
if args.pains:
df_pains = apply_structural_alerts(mols, "pains", args.n_jobs)
result_dfs.append(df_pains)
# Complexity
if args.complexity:
df_complexity = apply_complexity_filter(mols, args.complexity, args.complexity_method)
result_dfs.append(df_complexity)
# Constraints
constraints = {}
if args.mw_range:
mw_min, mw_max = map(float, args.mw_range.split(","))
constraints["mw_range"] = (mw_min, mw_max)
if args.logp_range:
logp_min, logp_max = map(float, args.logp_range.split(","))
constraints["logp_range"] = (logp_min, logp_max)
if args.tpsa_max:
constraints["tpsa_max"] = args.tpsa_max
if args.hbd_max:
constraints["hbd_max"] = args.hbd_max
if args.hba_max:
constraints["hba_max"] = args.hba_max
if args.rotatable_bonds_max:
constraints["rotatable_bonds_max"] = args.rotatable_bonds_max
if constraints:
df_constraints = apply_constraints(mols, constraints, args.n_jobs)
result_dfs.append(df_constraints)
# Chemical groups
if args.groups:
group_list = [g.strip() for g in args.groups.split(",")]
df_groups = apply_chemical_groups(mols, group_list)
result_dfs.append(df_groups)
# Combine results
df_final = pd.concat(result_dfs, axis=1)
# Filter output if requested
if args.filter_output:
pass_cols = [col for col in df_final.columns if col.startswith("passes_")]
if pass_cols:
df_final["passes_all"] = df_final[pass_cols].all(axis=1)
df_final = df_final[df_final["passes_all"]]
print(f"\nFiltered to {len(df_final)} molecules passing all filters")
# Save results
args.output.parent.mkdir(parents=True, exist_ok=True)
df_final.to_csv(args.output, index=False)
print(f"\nResults saved to: {args.output}")
# Generate summary
if not args.no_summary:
generate_summary(df_final, args.output)
print("\nDone!")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/medchem/scripts/filter_molecules.py",
"license": "MIT License",
"lines": 328,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/neuropixels-analysis/assets/analysis_template.py | #!/usr/bin/env python
"""
Neuropixels Analysis Template
Complete analysis workflow from raw data to curated units.
Copy and customize this template for your analysis.
Usage:
1. Copy this file to your analysis directory
2. Update the PARAMETERS section
3. Run: python analysis_template.py
"""
# =============================================================================
# PARAMETERS - Customize these for your analysis
# =============================================================================
# Input/Output paths
DATA_PATH = '/path/to/your/spikeglx/data/'
OUTPUT_DIR = 'analysis_output/'
DATA_FORMAT = 'spikeglx' # 'spikeglx', 'openephys', or 'nwb'
STREAM_ID = 'imec0.ap' # For multi-probe recordings
# Preprocessing parameters
FREQ_MIN = 300 # Highpass filter (Hz)
FREQ_MAX = 6000 # Lowpass filter (Hz)
APPLY_PHASE_SHIFT = True
APPLY_CMR = True
DETECT_BAD_CHANNELS = True
# Motion correction
CORRECT_MOTION = True
MOTION_PRESET = 'nonrigid_accurate' # 'kilosort_like', 'nonrigid_fast_and_accurate'
# Spike sorting
SORTER = 'kilosort4' # 'kilosort4', 'spykingcircus2', 'mountainsort5'
SORTER_PARAMS = {
'batch_size': 30000,
'nblocks': 1, # Increase for long recordings with drift
}
# Quality metrics and curation
CURATION_METHOD = 'allen' # 'allen', 'ibl', 'strict'
# Processing
N_JOBS = -1 # -1 = all cores
# =============================================================================
# ANALYSIS PIPELINE - Usually no need to modify below
# =============================================================================
from pathlib import Path
import json
import spikeinterface.full as si
from spikeinterface.exporters import export_to_phy
def main():
"""Run the full analysis pipeline."""
output_path = Path(OUTPUT_DIR)
output_path.mkdir(parents=True, exist_ok=True)
# =========================================================================
# 1. LOAD DATA
# =========================================================================
print("=" * 60)
print("1. LOADING DATA")
print("=" * 60)
if DATA_FORMAT == 'spikeglx':
recording = si.read_spikeglx(DATA_PATH, stream_id=STREAM_ID)
elif DATA_FORMAT == 'openephys':
recording = si.read_openephys(DATA_PATH)
elif DATA_FORMAT == 'nwb':
recording = si.read_nwb(DATA_PATH)
else:
raise ValueError(f"Unknown format: {DATA_FORMAT}")
print(f"Recording: {recording.get_num_channels()} channels")
print(f"Duration: {recording.get_total_duration():.1f} seconds")
print(f"Sampling rate: {recording.get_sampling_frequency()} Hz")
# =========================================================================
# 2. PREPROCESSING
# =========================================================================
print("\n" + "=" * 60)
print("2. PREPROCESSING")
print("=" * 60)
rec = recording
# Bandpass filter
print(f"Applying bandpass filter ({FREQ_MIN}-{FREQ_MAX} Hz)...")
rec = si.bandpass_filter(rec, freq_min=FREQ_MIN, freq_max=FREQ_MAX)
# Phase shift correction
if APPLY_PHASE_SHIFT:
print("Applying phase shift correction...")
rec = si.phase_shift(rec)
# Bad channel detection
if DETECT_BAD_CHANNELS:
print("Detecting bad channels...")
bad_ids, _ = si.detect_bad_channels(rec)
if len(bad_ids) > 0:
print(f" Removing {len(bad_ids)} bad channels")
rec = rec.remove_channels(bad_ids)
# Common median reference
if APPLY_CMR:
print("Applying common median reference...")
rec = si.common_reference(rec, operator='median', reference='global')
# Save preprocessed
print("Saving preprocessed recording...")
rec.save(folder=output_path / 'preprocessed', n_jobs=N_JOBS)
# =========================================================================
# 3. MOTION CORRECTION
# =========================================================================
if CORRECT_MOTION:
print("\n" + "=" * 60)
print("3. MOTION CORRECTION")
print("=" * 60)
print(f"Estimating and correcting motion (preset: {MOTION_PRESET})...")
rec = si.correct_motion(
rec,
preset=MOTION_PRESET,
folder=output_path / 'motion',
)
# =========================================================================
# 4. SPIKE SORTING
# =========================================================================
print("\n" + "=" * 60)
print("4. SPIKE SORTING")
print("=" * 60)
print(f"Running {SORTER}...")
sorting = si.run_sorter(
SORTER,
rec,
output_folder=output_path / f'{SORTER}_output',
verbose=True,
**SORTER_PARAMS,
)
print(f"Found {len(sorting.unit_ids)} units")
# =========================================================================
# 5. POSTPROCESSING
# =========================================================================
print("\n" + "=" * 60)
print("5. POSTPROCESSING")
print("=" * 60)
print("Creating SortingAnalyzer...")
analyzer = si.create_sorting_analyzer(
sorting,
rec,
format='binary_folder',
folder=output_path / 'analyzer',
sparse=True,
)
print("Computing extensions...")
analyzer.compute('random_spikes', max_spikes_per_unit=500)
analyzer.compute('waveforms', ms_before=1.0, ms_after=2.0)
analyzer.compute('templates', operators=['average', 'std'])
analyzer.compute('noise_levels')
analyzer.compute('spike_amplitudes')
analyzer.compute('correlograms', window_ms=50.0, bin_ms=1.0)
analyzer.compute('unit_locations', method='monopolar_triangulation')
# =========================================================================
# 6. QUALITY METRICS
# =========================================================================
print("\n" + "=" * 60)
print("6. QUALITY METRICS")
print("=" * 60)
print("Computing quality metrics...")
metrics = si.compute_quality_metrics(
analyzer,
metric_names=[
'snr', 'isi_violations_ratio', 'presence_ratio',
'amplitude_cutoff', 'firing_rate', 'amplitude_cv',
],
n_jobs=N_JOBS,
)
metrics.to_csv(output_path / 'quality_metrics.csv')
print(f"Saved metrics to: {output_path / 'quality_metrics.csv'}")
# Print summary
print("\nMetrics summary:")
for col in ['snr', 'isi_violations_ratio', 'presence_ratio', 'firing_rate']:
if col in metrics.columns:
print(f" {col}: {metrics[col].median():.4f} (median)")
# =========================================================================
# 7. CURATION
# =========================================================================
print("\n" + "=" * 60)
print("7. CURATION")
print("=" * 60)
# Curation criteria
criteria = {
'allen': {'snr': 3.0, 'isi_violations_ratio': 0.1, 'presence_ratio': 0.9},
'ibl': {'snr': 4.0, 'isi_violations_ratio': 0.5, 'presence_ratio': 0.5},
'strict': {'snr': 5.0, 'isi_violations_ratio': 0.01, 'presence_ratio': 0.95},
}[CURATION_METHOD]
print(f"Applying {CURATION_METHOD} criteria: {criteria}")
labels = {}
for unit_id in metrics.index:
row = metrics.loc[unit_id]
is_good = (
row.get('snr', 0) >= criteria['snr'] and
row.get('isi_violations_ratio', 1) <= criteria['isi_violations_ratio'] and
row.get('presence_ratio', 0) >= criteria['presence_ratio']
)
if is_good:
labels[int(unit_id)] = 'good'
elif row.get('snr', 0) < 2:
labels[int(unit_id)] = 'noise'
else:
labels[int(unit_id)] = 'mua'
# Save labels
with open(output_path / 'curation_labels.json', 'w') as f:
json.dump(labels, f, indent=2)
# Count
good_count = sum(1 for v in labels.values() if v == 'good')
mua_count = sum(1 for v in labels.values() if v == 'mua')
noise_count = sum(1 for v in labels.values() if v == 'noise')
print(f"\nCuration results:")
print(f" Good: {good_count}")
print(f" MUA: {mua_count}")
print(f" Noise: {noise_count}")
print(f" Total: {len(labels)}")
# =========================================================================
# 8. EXPORT
# =========================================================================
print("\n" + "=" * 60)
print("8. EXPORT")
print("=" * 60)
print("Exporting to Phy...")
export_to_phy(
analyzer,
output_folder=output_path / 'phy_export',
copy_binary=True,
)
print(f"\nAnalysis complete!")
print(f"Results saved to: {output_path}")
print(f"\nTo open in Phy:")
print(f" phy template-gui {output_path / 'phy_export' / 'params.py'}")
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/neuropixels-analysis/assets/analysis_template.py",
"license": "MIT License",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/neuropixels-analysis/scripts/compute_metrics.py | #!/usr/bin/env python
"""
Compute quality metrics and curate units.
Usage:
python compute_metrics.py sorting/ preprocessed/ --output metrics/
"""
import argparse
from pathlib import Path
import json
import pandas as pd
import spikeinterface.full as si
# Curation criteria presets
CURATION_CRITERIA = {
'allen': {
'snr': 3.0,
'isi_violations_ratio': 0.1,
'presence_ratio': 0.9,
'amplitude_cutoff': 0.1,
},
'ibl': {
'snr': 4.0,
'isi_violations_ratio': 0.5,
'presence_ratio': 0.5,
'amplitude_cutoff': None,
},
'strict': {
'snr': 5.0,
'isi_violations_ratio': 0.01,
'presence_ratio': 0.95,
'amplitude_cutoff': 0.05,
},
}
def compute_metrics(
sorting_path: str,
recording_path: str,
output_dir: str,
curation_method: str = 'allen',
n_jobs: int = -1,
):
"""Compute quality metrics and apply curation."""
print(f"Loading sorting from: {sorting_path}")
sorting = si.load_extractor(Path(sorting_path) / 'sorting')
print(f"Loading recording from: {recording_path}")
recording = si.load_extractor(Path(recording_path) / 'preprocessed')
print(f"Units: {len(sorting.unit_ids)}")
output_path = Path(output_dir)
output_path.mkdir(parents=True, exist_ok=True)
# Create analyzer
print("Creating SortingAnalyzer...")
analyzer = si.create_sorting_analyzer(
sorting,
recording,
format='binary_folder',
folder=output_path / 'analyzer',
sparse=True,
)
# Compute extensions
print("Computing waveforms...")
analyzer.compute('random_spikes', max_spikes_per_unit=500)
analyzer.compute('waveforms', ms_before=1.0, ms_after=2.0)
analyzer.compute('templates', operators=['average', 'std'])
print("Computing additional extensions...")
analyzer.compute('noise_levels')
analyzer.compute('spike_amplitudes')
analyzer.compute('correlograms', window_ms=50.0, bin_ms=1.0)
analyzer.compute('unit_locations', method='monopolar_triangulation')
# Compute quality metrics
print("Computing quality metrics...")
metrics = si.compute_quality_metrics(
analyzer,
metric_names=[
'snr',
'isi_violations_ratio',
'presence_ratio',
'amplitude_cutoff',
'firing_rate',
'amplitude_cv',
'sliding_rp_violation',
],
n_jobs=n_jobs,
)
# Save metrics
metrics.to_csv(output_path / 'quality_metrics.csv')
print(f"Saved metrics to: {output_path / 'quality_metrics.csv'}")
# Apply curation
criteria = CURATION_CRITERIA.get(curation_method, CURATION_CRITERIA['allen'])
print(f"\nApplying {curation_method} curation criteria: {criteria}")
labels = {}
for unit_id in metrics.index:
row = metrics.loc[unit_id]
# Check each criterion
is_good = True
if criteria.get('snr') and row.get('snr', 0) < criteria['snr']:
is_good = False
if criteria.get('isi_violations_ratio') and row.get('isi_violations_ratio', 1) > criteria['isi_violations_ratio']:
is_good = False
if criteria.get('presence_ratio') and row.get('presence_ratio', 0) < criteria['presence_ratio']:
is_good = False
if criteria.get('amplitude_cutoff') and row.get('amplitude_cutoff', 1) > criteria['amplitude_cutoff']:
is_good = False
# Classify
if is_good:
labels[int(unit_id)] = 'good'
elif row.get('snr', 0) < 2:
labels[int(unit_id)] = 'noise'
else:
labels[int(unit_id)] = 'mua'
# Save labels
with open(output_path / 'curation_labels.json', 'w') as f:
json.dump(labels, f, indent=2)
# Summary
label_counts = {}
for label in labels.values():
label_counts[label] = label_counts.get(label, 0) + 1
print(f"\nCuration summary:")
print(f" Good: {label_counts.get('good', 0)}")
print(f" MUA: {label_counts.get('mua', 0)}")
print(f" Noise: {label_counts.get('noise', 0)}")
print(f" Total: {len(labels)}")
# Metrics summary
print(f"\nMetrics summary:")
for col in ['snr', 'isi_violations_ratio', 'presence_ratio', 'firing_rate']:
if col in metrics.columns:
print(f" {col}: {metrics[col].median():.4f} (median)")
return analyzer, metrics, labels
def main():
parser = argparse.ArgumentParser(description='Compute quality metrics')
parser.add_argument('sorting', help='Path to sorting directory')
parser.add_argument('recording', help='Path to preprocessed recording')
parser.add_argument('--output', '-o', default='metrics/', help='Output directory')
parser.add_argument('--curation', '-c', default='allen',
choices=['allen', 'ibl', 'strict'])
parser.add_argument('--n-jobs', type=int, default=-1, help='Number of parallel jobs')
args = parser.parse_args()
compute_metrics(
args.sorting,
args.recording,
args.output,
curation_method=args.curation,
n_jobs=args.n_jobs,
)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/neuropixels-analysis/scripts/compute_metrics.py",
"license": "MIT License",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/neuropixels-analysis/scripts/explore_recording.py | #!/usr/bin/env python3
"""
Quick exploration of Neuropixels recording.
Usage:
python explore_recording.py /path/to/spikeglx/data
"""
import argparse
import spikeinterface.full as si
import matplotlib.pyplot as plt
import numpy as np
def explore_recording(data_path: str, stream_id: str = 'imec0.ap'):
"""Explore a Neuropixels recording."""
print(f"Loading: {data_path}")
recording = si.read_spikeglx(data_path, stream_id=stream_id)
# Basic info
print("\n" + "="*50)
print("RECORDING INFO")
print("="*50)
print(f"Channels: {recording.get_num_channels()}")
print(f"Duration: {recording.get_total_duration():.2f} s ({recording.get_total_duration()/60:.2f} min)")
print(f"Sampling rate: {recording.get_sampling_frequency()} Hz")
print(f"Total samples: {recording.get_num_samples()}")
# Probe info
probe = recording.get_probe()
print(f"\nProbe: {probe.manufacturer} {probe.model_name if hasattr(probe, 'model_name') else ''}")
print(f"Probe shape: {probe.ndim}D")
# Channel groups
if recording.get_channel_groups() is not None:
groups = np.unique(recording.get_channel_groups())
print(f"Channel groups (shanks): {len(groups)}")
# Check for bad channels
print("\n" + "="*50)
print("BAD CHANNEL DETECTION")
print("="*50)
bad_ids, labels = si.detect_bad_channels(recording)
if len(bad_ids) > 0:
print(f"Bad channels found: {len(bad_ids)}")
for ch, label in zip(bad_ids, labels):
print(f" Channel {ch}: {label}")
else:
print("No bad channels detected")
# Sample traces
print("\n" + "="*50)
print("SIGNAL STATISTICS")
print("="*50)
# Get 1 second of data
n_samples = int(recording.get_sampling_frequency())
traces = recording.get_traces(start_frame=0, end_frame=n_samples)
print(f"Sample mean: {np.mean(traces):.2f}")
print(f"Sample std: {np.std(traces):.2f}")
print(f"Sample min: {np.min(traces):.2f}")
print(f"Sample max: {np.max(traces):.2f}")
return recording
def plot_probe(recording, output_path=None):
"""Plot probe layout."""
fig, ax = plt.subplots(figsize=(4, 12))
si.plot_probe_map(recording, ax=ax, with_channel_ids=False)
ax.set_title('Probe Layout')
if output_path:
plt.savefig(output_path, dpi=150, bbox_inches='tight')
print(f"Saved: {output_path}")
else:
plt.show()
def plot_traces(recording, duration=1.0, output_path=None):
"""Plot raw traces."""
n_samples = int(duration * recording.get_sampling_frequency())
traces = recording.get_traces(start_frame=0, end_frame=n_samples)
fig, ax = plt.subplots(figsize=(12, 8))
# Plot subset of channels
n_channels = min(20, recording.get_num_channels())
channel_idx = np.linspace(0, recording.get_num_channels()-1, n_channels, dtype=int)
time = np.arange(n_samples) / recording.get_sampling_frequency()
for i, ch in enumerate(channel_idx):
offset = i * 200 # Offset for visibility
ax.plot(time, traces[:, ch] + offset, 'k', linewidth=0.5)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Channel (offset)')
ax.set_title(f'Raw Traces ({n_channels} channels)')
if output_path:
plt.savefig(output_path, dpi=150, bbox_inches='tight')
print(f"Saved: {output_path}")
else:
plt.show()
def plot_power_spectrum(recording, output_path=None):
"""Plot power spectrum."""
from scipy import signal
# Get data from middle channel
mid_ch = recording.get_num_channels() // 2
n_samples = min(int(10 * recording.get_sampling_frequency()), recording.get_num_samples())
traces = recording.get_traces(
start_frame=0,
end_frame=n_samples,
channel_ids=[recording.channel_ids[mid_ch]]
).flatten()
fs = recording.get_sampling_frequency()
# Compute power spectrum
freqs, psd = signal.welch(traces, fs, nperseg=4096)
fig, ax = plt.subplots(figsize=(10, 5))
ax.semilogy(freqs, psd)
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power Spectral Density')
ax.set_title(f'Power Spectrum (Channel {mid_ch})')
ax.set_xlim(0, 5000)
ax.axvline(300, color='r', linestyle='--', alpha=0.5, label='300 Hz')
ax.axvline(6000, color='r', linestyle='--', alpha=0.5, label='6000 Hz')
ax.legend()
ax.grid(True, alpha=0.3)
if output_path:
plt.savefig(output_path, dpi=150, bbox_inches='tight')
print(f"Saved: {output_path}")
else:
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Explore Neuropixels recording')
parser.add_argument('data_path', help='Path to SpikeGLX recording')
parser.add_argument('--stream', default='imec0.ap', help='Stream ID')
parser.add_argument('--plot', action='store_true', help='Generate plots')
parser.add_argument('--output', default=None, help='Output directory for plots')
args = parser.parse_args()
recording = explore_recording(args.data_path, args.stream)
if args.plot:
import os
if args.output:
os.makedirs(args.output, exist_ok=True)
plot_probe(recording, f"{args.output}/probe_map.png")
plot_traces(recording, output_path=f"{args.output}/raw_traces.png")
plot_power_spectrum(recording, f"{args.output}/power_spectrum.png")
else:
plot_probe(recording)
plot_traces(recording)
plot_power_spectrum(recording)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/neuropixels-analysis/scripts/explore_recording.py",
"license": "MIT License",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/neuropixels-analysis/scripts/export_to_phy.py | #!/usr/bin/env python
"""
Export sorting results to Phy for manual curation.
Usage:
python export_to_phy.py metrics/analyzer --output phy_export/
"""
import argparse
from pathlib import Path
import spikeinterface.full as si
from spikeinterface.exporters import export_to_phy
def export_phy(
analyzer_path: str,
output_dir: str,
copy_binary: bool = True,
compute_amplitudes: bool = True,
compute_pc_features: bool = True,
n_jobs: int = -1,
):
"""Export to Phy format."""
print(f"Loading analyzer from: {analyzer_path}")
analyzer = si.load_sorting_analyzer(analyzer_path)
print(f"Units: {len(analyzer.sorting.unit_ids)}")
output_path = Path(output_dir)
# Compute required extensions if missing
if compute_amplitudes and analyzer.get_extension('spike_amplitudes') is None:
print("Computing spike amplitudes...")
analyzer.compute('spike_amplitudes')
if compute_pc_features and analyzer.get_extension('principal_components') is None:
print("Computing principal components...")
analyzer.compute('principal_components', n_components=5, mode='by_channel_local')
print(f"Exporting to Phy: {output_path}")
export_to_phy(
analyzer,
output_folder=output_path,
copy_binary=copy_binary,
compute_amplitudes=compute_amplitudes,
compute_pc_features=compute_pc_features,
n_jobs=n_jobs,
)
print("\nExport complete!")
print(f"To open in Phy, run:")
print(f" phy template-gui {output_path / 'params.py'}")
def main():
parser = argparse.ArgumentParser(description='Export to Phy')
parser.add_argument('analyzer', help='Path to sorting analyzer')
parser.add_argument('--output', '-o', default='phy_export/', help='Output directory')
parser.add_argument('--no-binary', action='store_true', help='Skip copying binary file')
parser.add_argument('--no-amplitudes', action='store_true', help='Skip amplitude computation')
parser.add_argument('--no-pc', action='store_true', help='Skip PC feature computation')
parser.add_argument('--n-jobs', type=int, default=-1, help='Number of parallel jobs')
args = parser.parse_args()
export_phy(
args.analyzer,
args.output,
copy_binary=not args.no_binary,
compute_amplitudes=not args.no_amplitudes,
compute_pc_features=not args.no_pc,
n_jobs=args.n_jobs,
)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/neuropixels-analysis/scripts/export_to_phy.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/neuropixels-analysis/scripts/neuropixels_pipeline.py | #!/usr/bin/env python3
"""
Neuropixels Data Analysis Pipeline (Best Practices Version)
Based on SpikeInterface, Allen Institute, and IBL recommendations.
Usage:
python neuropixels_pipeline.py /path/to/spikeglx/data /path/to/output
References:
- https://spikeinterface.readthedocs.io/en/stable/how_to/analyze_neuropixels.html
- https://github.com/AllenInstitute/ecephys_spike_sorting
"""
import argparse
from pathlib import Path
import json
import spikeinterface.full as si
import numpy as np
def load_recording(data_path: str, stream_id: str = 'imec0.ap') -> si.BaseRecording:
"""Load a SpikeGLX or Open Ephys recording."""
data_path = Path(data_path)
# Auto-detect format
if any(data_path.rglob('*.ap.bin')) or any(data_path.rglob('*.ap.meta')):
# SpikeGLX format
streams, _ = si.get_neo_streams('spikeglx', data_path)
print(f"Available streams: {streams}")
recording = si.read_spikeglx(data_path, stream_id=stream_id)
elif any(data_path.rglob('*.oebin')):
# Open Ephys format
recording = si.read_openephys(data_path)
else:
raise ValueError(f"Unknown format in {data_path}")
print(f"Loaded recording:")
print(f" Channels: {recording.get_num_channels()}")
print(f" Duration: {recording.get_total_duration():.2f} s")
print(f" Sampling rate: {recording.get_sampling_frequency()} Hz")
return recording
def preprocess(
recording: si.BaseRecording,
apply_phase_shift: bool = True,
freq_min: float = 400.,
) -> tuple:
"""
Apply standard Neuropixels preprocessing.
Following SpikeInterface recommendations:
1. High-pass filter at 400 Hz (not 300)
2. Detect and remove bad channels
3. Phase shift (NP 1.0 only)
4. Common median reference
"""
print("Preprocessing...")
# Step 1: High-pass filter
rec = si.highpass_filter(recording, freq_min=freq_min)
print(f" Applied high-pass filter at {freq_min} Hz")
# Step 2: Detect bad channels
bad_channel_ids, channel_labels = si.detect_bad_channels(rec)
if len(bad_channel_ids) > 0:
print(f" Detected {len(bad_channel_ids)} bad channels: {bad_channel_ids}")
rec = rec.remove_channels(bad_channel_ids)
else:
print(" No bad channels detected")
# Step 3: Phase shift (for Neuropixels 1.0)
if apply_phase_shift:
rec = si.phase_shift(rec)
print(" Applied phase shift correction")
# Step 4: Common median reference
rec = si.common_reference(rec, operator='median', reference='global')
print(" Applied common median reference")
return rec, bad_channel_ids
def check_drift(recording: si.BaseRecording, output_folder: str) -> dict:
"""
Detect peaks and check for drift before spike sorting.
"""
print("Checking for drift...")
from spikeinterface.sortingcomponents.peak_detection import detect_peaks
from spikeinterface.sortingcomponents.peak_localization import localize_peaks
job_kwargs = dict(n_jobs=8, chunk_duration='1s', progress_bar=True)
# Get noise levels
noise_levels = si.get_noise_levels(recording, return_in_uV=False)
# Detect peaks
peaks = detect_peaks(
recording,
method='locally_exclusive',
noise_levels=noise_levels,
detect_threshold=5,
radius_um=50.,
**job_kwargs
)
print(f" Detected {len(peaks)} peaks")
# Localize peaks
peak_locations = localize_peaks(
recording, peaks,
method='center_of_mass',
**job_kwargs
)
# Save drift plot
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(12, 6))
# Subsample for plotting
n_plot = min(100000, len(peaks))
idx = np.random.choice(len(peaks), n_plot, replace=False)
ax.scatter(
peaks['sample_index'][idx] / recording.get_sampling_frequency(),
peak_locations['y'][idx],
s=1, alpha=0.1, c='k'
)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Depth (ΞΌm)')
ax.set_title('Peak Activity (Check for Drift)')
plt.savefig(f'{output_folder}/drift_check.png', dpi=150, bbox_inches='tight')
plt.close()
print(f" Saved drift plot to {output_folder}/drift_check.png")
# Estimate drift magnitude
y_positions = peak_locations['y']
drift_estimate = np.percentile(y_positions, 95) - np.percentile(y_positions, 5)
print(f" Estimated drift range: {drift_estimate:.1f} ΞΌm")
return {
'peaks': peaks,
'peak_locations': peak_locations,
'drift_estimate': drift_estimate
}
def correct_motion(
recording: si.BaseRecording,
output_folder: str,
preset: str = 'nonrigid_fast_and_accurate'
) -> si.BaseRecording:
"""Apply motion correction if needed."""
print(f"Applying motion correction (preset: {preset})...")
rec_corrected = si.correct_motion(
recording,
preset=preset,
folder=f'{output_folder}/motion',
output_motion_info=True,
n_jobs=8,
chunk_duration='1s',
progress_bar=True
)
print(" Motion correction complete")
return rec_corrected
def run_spike_sorting(
recording: si.BaseRecording,
output_folder: str,
sorter: str = 'kilosort4'
) -> si.BaseSorting:
"""Run spike sorting."""
print(f"Running spike sorting with {sorter}...")
sorter_folder = f'{output_folder}/sorting_{sorter}'
sorting = si.run_sorter(
sorter,
recording,
folder=sorter_folder,
verbose=True
)
print(f" Found {len(sorting.unit_ids)} units")
print(f" Total spikes: {sorting.get_total_num_spikes()}")
return sorting
def postprocess(
sorting: si.BaseSorting,
recording: si.BaseRecording,
output_folder: str
) -> tuple:
"""Run post-processing and compute quality metrics."""
print("Post-processing...")
job_kwargs = dict(n_jobs=8, chunk_duration='1s', progress_bar=True)
# Create analyzer
analyzer = si.create_sorting_analyzer(
sorting, recording,
sparse=True,
format='binary_folder',
folder=f'{output_folder}/analyzer'
)
# Compute extensions (order matters)
print(" Computing waveforms...")
analyzer.compute('random_spikes', method='uniform', max_spikes_per_unit=500)
analyzer.compute('waveforms', ms_before=1.5, ms_after=2.0, **job_kwargs)
analyzer.compute('templates', operators=['average', 'std'])
analyzer.compute('noise_levels')
print(" Computing spike features...")
analyzer.compute('spike_amplitudes', **job_kwargs)
analyzer.compute('correlograms', window_ms=100, bin_ms=1)
analyzer.compute('unit_locations', method='monopolar_triangulation')
analyzer.compute('template_similarity')
print(" Computing quality metrics...")
analyzer.compute('quality_metrics')
qm = analyzer.get_extension('quality_metrics').get_data()
return analyzer, qm
def curate_units(qm, method: str = 'allen') -> dict:
"""
Classify units based on quality metrics.
Methods:
'allen': Allen Institute defaults (more permissive)
'ibl': IBL standards
'strict': Strict single-unit criteria
"""
print(f"Curating units (method: {method})...")
labels = {}
for unit_id in qm.index:
row = qm.loc[unit_id]
# Noise detection (universal)
if row['snr'] < 1.5:
labels[unit_id] = 'noise'
continue
if method == 'allen':
# Allen Institute defaults
if (row['presence_ratio'] > 0.9 and
row['isi_violations_ratio'] < 0.5 and
row['amplitude_cutoff'] < 0.1):
labels[unit_id] = 'good'
elif row['isi_violations_ratio'] > 0.5:
labels[unit_id] = 'mua'
else:
labels[unit_id] = 'unsorted'
elif method == 'ibl':
# IBL standards
if (row['presence_ratio'] > 0.9 and
row['isi_violations_ratio'] < 0.1 and
row['amplitude_cutoff'] < 0.1 and
row['firing_rate'] > 0.1):
labels[unit_id] = 'good'
elif row['isi_violations_ratio'] > 0.1:
labels[unit_id] = 'mua'
else:
labels[unit_id] = 'unsorted'
elif method == 'strict':
# Strict single-unit
if (row['snr'] > 5 and
row['presence_ratio'] > 0.95 and
row['isi_violations_ratio'] < 0.01 and
row['amplitude_cutoff'] < 0.01):
labels[unit_id] = 'good'
elif row['isi_violations_ratio'] > 0.05:
labels[unit_id] = 'mua'
else:
labels[unit_id] = 'unsorted'
# Summary
from collections import Counter
counts = Counter(labels.values())
print(f" Classification: {dict(counts)}")
return labels
def export_results(
analyzer,
sorting,
recording,
labels: dict,
output_folder: str
):
"""Export results to various formats."""
print("Exporting results...")
# Get good units
good_ids = [u for u, l in labels.items() if l == 'good']
sorting_good = sorting.select_units(good_ids)
# Export to Phy
phy_folder = f'{output_folder}/phy_export'
si.export_to_phy(analyzer, phy_folder,
compute_pc_features=True,
compute_amplitudes=True)
print(f" Phy export: {phy_folder}")
# Generate report
report_folder = f'{output_folder}/report'
si.export_report(analyzer, report_folder, format='png')
print(f" Report: {report_folder}")
# Save quality metrics
qm = analyzer.get_extension('quality_metrics').get_data()
qm.to_csv(f'{output_folder}/quality_metrics.csv')
# Save labels
with open(f'{output_folder}/unit_labels.json', 'w') as f:
json.dump({str(k): v for k, v in labels.items()}, f, indent=2)
# Save summary
summary = {
'total_units': len(sorting.unit_ids),
'good_units': len(good_ids),
'total_spikes': int(sorting.get_total_num_spikes()),
'duration_s': float(recording.get_total_duration()),
'n_channels': int(recording.get_num_channels()),
}
with open(f'{output_folder}/summary.json', 'w') as f:
json.dump(summary, f, indent=2)
print(f" Summary: {summary}")
def run_pipeline(
data_path: str,
output_path: str,
sorter: str = 'kilosort4',
stream_name: str = 'imec0.ap',
apply_motion_correction: bool = True,
curation_method: str = 'allen'
):
"""Run complete Neuropixels analysis pipeline."""
output_path = Path(output_path)
output_path.mkdir(parents=True, exist_ok=True)
# 1. Load data
recording = load_recording(data_path, stream_name)
# 2. Preprocess
rec_preprocessed, bad_channels = preprocess(recording)
# Save preprocessed
preproc_folder = output_path / 'preprocessed'
job_kwargs = dict(n_jobs=8, chunk_duration='1s', progress_bar=True)
rec_preprocessed = rec_preprocessed.save(
folder=str(preproc_folder),
format='binary',
**job_kwargs
)
# 3. Check drift
drift_info = check_drift(rec_preprocessed, str(output_path))
# 4. Motion correction (if needed)
if apply_motion_correction and drift_info['drift_estimate'] > 20:
print(f"Drift > 20 ΞΌm detected, applying motion correction...")
rec_final = correct_motion(rec_preprocessed, str(output_path))
else:
print("Skipping motion correction (low drift)")
rec_final = rec_preprocessed
# 5. Spike sorting
sorting = run_spike_sorting(rec_final, str(output_path), sorter)
# 6. Post-processing
analyzer, qm = postprocess(sorting, rec_final, str(output_path))
# 7. Curation
labels = curate_units(qm, method=curation_method)
# 8. Export
export_results(analyzer, sorting, rec_final, labels, str(output_path))
print("\n" + "="*50)
print("Pipeline complete!")
print(f"Output directory: {output_path}")
print("="*50)
return analyzer, sorting, qm, labels
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Neuropixels analysis pipeline (best practices)'
)
parser.add_argument('data_path', help='Path to SpikeGLX/OpenEphys recording')
parser.add_argument('output_path', help='Output directory')
parser.add_argument('--sorter', default='kilosort4',
choices=['kilosort4', 'kilosort3', 'spykingcircus2', 'mountainsort5'],
help='Spike sorter to use')
parser.add_argument('--stream', default='imec0.ap', help='Stream name')
parser.add_argument('--no-motion-correction', action='store_true',
help='Skip motion correction')
parser.add_argument('--curation', default='allen',
choices=['allen', 'ibl', 'strict'],
help='Curation method')
args = parser.parse_args()
run_pipeline(
args.data_path,
args.output_path,
sorter=args.sorter,
stream_name=args.stream,
apply_motion_correction=not args.no_motion_correction,
curation_method=args.curation
)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/neuropixels-analysis/scripts/neuropixels_pipeline.py",
"license": "MIT License",
"lines": 344,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/neuropixels-analysis/scripts/preprocess_recording.py | #!/usr/bin/env python
"""
Preprocess Neuropixels recording.
Usage:
python preprocess_recording.py /path/to/data --output preprocessed/ --format spikeglx
"""
import argparse
from pathlib import Path
import spikeinterface.full as si
def preprocess_recording(
input_path: str,
output_dir: str,
format: str = 'auto',
stream_id: str = None,
freq_min: float = 300,
freq_max: float = 6000,
phase_shift: bool = True,
common_ref: bool = True,
detect_bad: bool = True,
n_jobs: int = -1,
):
"""Preprocess a Neuropixels recording."""
print(f"Loading recording from: {input_path}")
# Load recording
if format == 'spikeglx' or (format == 'auto' and 'imec' in str(input_path).lower()):
recording = si.read_spikeglx(input_path, stream_id=stream_id or 'imec0.ap')
elif format == 'openephys':
recording = si.read_openephys(input_path)
elif format == 'nwb':
recording = si.read_nwb(input_path)
else:
# Try auto-detection
try:
recording = si.read_spikeglx(input_path, stream_id=stream_id or 'imec0.ap')
except:
recording = si.load_extractor(input_path)
print(f"Recording: {recording.get_num_channels()} channels, {recording.get_total_duration():.1f}s")
# Preprocessing chain
rec = recording
# Bandpass filter
print(f"Applying bandpass filter ({freq_min}-{freq_max} Hz)...")
rec = si.bandpass_filter(rec, freq_min=freq_min, freq_max=freq_max)
# Phase shift correction (for Neuropixels ADC)
if phase_shift:
print("Applying phase shift correction...")
rec = si.phase_shift(rec)
# Bad channel detection
if detect_bad:
print("Detecting bad channels...")
bad_channel_ids, bad_labels = si.detect_bad_channels(rec)
if len(bad_channel_ids) > 0:
print(f" Removing {len(bad_channel_ids)} bad channels: {bad_channel_ids[:10]}...")
rec = rec.remove_channels(bad_channel_ids)
# Common median reference
if common_ref:
print("Applying common median reference...")
rec = si.common_reference(rec, operator='median', reference='global')
# Save preprocessed
output_path = Path(output_dir)
output_path.mkdir(parents=True, exist_ok=True)
print(f"Saving preprocessed recording to: {output_path}")
rec.save(folder=output_path / 'preprocessed', n_jobs=n_jobs)
# Save probe info
probe = rec.get_probe()
if probe is not None:
from probeinterface import write_probeinterface
write_probeinterface(output_path / 'probe.json', probe)
print("Done!")
print(f" Output channels: {rec.get_num_channels()}")
print(f" Output duration: {rec.get_total_duration():.1f}s")
return rec
def main():
parser = argparse.ArgumentParser(description='Preprocess Neuropixels recording')
parser.add_argument('input', help='Path to input recording')
parser.add_argument('--output', '-o', default='preprocessed/', help='Output directory')
parser.add_argument('--format', '-f', default='auto', choices=['auto', 'spikeglx', 'openephys', 'nwb'])
parser.add_argument('--stream-id', default=None, help='Stream ID for multi-probe recordings')
parser.add_argument('--freq-min', type=float, default=300, help='Highpass cutoff (Hz)')
parser.add_argument('--freq-max', type=float, default=6000, help='Lowpass cutoff (Hz)')
parser.add_argument('--no-phase-shift', action='store_true', help='Skip phase shift correction')
parser.add_argument('--no-cmr', action='store_true', help='Skip common median reference')
parser.add_argument('--no-bad-channel', action='store_true', help='Skip bad channel detection')
parser.add_argument('--n-jobs', type=int, default=-1, help='Number of parallel jobs')
args = parser.parse_args()
preprocess_recording(
args.input,
args.output,
format=args.format,
stream_id=args.stream_id,
freq_min=args.freq_min,
freq_max=args.freq_max,
phase_shift=not args.no_phase_shift,
common_ref=not args.no_cmr,
detect_bad=not args.no_bad_channel,
n_jobs=args.n_jobs,
)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/neuropixels-analysis/scripts/preprocess_recording.py",
"license": "MIT License",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/neuropixels-analysis/scripts/run_sorting.py | #!/usr/bin/env python
"""
Run spike sorting on preprocessed recording.
Usage:
python run_sorting.py preprocessed/ --sorter kilosort4 --output sorting/
"""
import argparse
from pathlib import Path
import spikeinterface.full as si
# Default parameters for each sorter
SORTER_DEFAULTS = {
'kilosort4': {
'batch_size': 30000,
'nblocks': 1,
'Th_learned': 8,
'Th_universal': 9,
},
'kilosort3': {
'do_CAR': False, # Already done in preprocessing
},
'spykingcircus2': {
'apply_preprocessing': False,
},
'mountainsort5': {
'filter': False,
'whiten': False,
},
}
def run_sorting(
input_path: str,
output_dir: str,
sorter: str = 'kilosort4',
sorter_params: dict = None,
n_jobs: int = -1,
):
"""Run spike sorting."""
print(f"Loading preprocessed recording from: {input_path}")
recording = si.load_extractor(Path(input_path) / 'preprocessed')
print(f"Recording: {recording.get_num_channels()} channels, {recording.get_total_duration():.1f}s")
# Get sorter parameters
params = SORTER_DEFAULTS.get(sorter, {}).copy()
if sorter_params:
params.update(sorter_params)
print(f"Running {sorter} with params: {params}")
output_path = Path(output_dir)
# Run sorter (note: parameter is 'folder' not 'output_folder' in newer SpikeInterface)
sorting = si.run_sorter(
sorter,
recording,
folder=output_path / f'{sorter}_output',
verbose=True,
**params,
)
print(f"\nSorting complete!")
print(f" Units found: {len(sorting.unit_ids)}")
print(f" Total spikes: {sum(len(sorting.get_unit_spike_train(uid)) for uid in sorting.unit_ids)}")
# Save sorting
sorting.save(folder=output_path / 'sorting')
print(f" Saved to: {output_path / 'sorting'}")
return sorting
def main():
parser = argparse.ArgumentParser(description='Run spike sorting')
parser.add_argument('input', help='Path to preprocessed recording')
parser.add_argument('--output', '-o', default='sorting/', help='Output directory')
parser.add_argument('--sorter', '-s', default='kilosort4',
choices=['kilosort4', 'kilosort3', 'spykingcircus2', 'mountainsort5'])
parser.add_argument('--n-jobs', type=int, default=-1, help='Number of parallel jobs')
args = parser.parse_args()
run_sorting(
args.input,
args.output,
sorter=args.sorter,
n_jobs=args.n_jobs,
)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/neuropixels-analysis/scripts/run_sorting.py",
"license": "MIT License",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/openalex-database/scripts/openalex_client.py | #!/usr/bin/env python3
"""
OpenAlex API Client with rate limiting and error handling.
Provides a robust client for interacting with the OpenAlex API with:
- Automatic rate limiting (polite pool: 10 req/sec)
- Exponential backoff retry logic
- Pagination support
- Batch operations support
"""
import time
import requests
from typing import Dict, List, Optional, Any
from urllib.parse import urljoin
class OpenAlexClient:
"""Client for OpenAlex API with rate limiting and error handling."""
BASE_URL = "https://api.openalex.org"
def __init__(self, email: Optional[str] = None, requests_per_second: int = 10):
"""
Initialize OpenAlex client.
Args:
email: Email for polite pool (10x rate limit boost)
requests_per_second: Max requests per second (default: 10 for polite pool)
"""
self.email = email
self.requests_per_second = requests_per_second
self.min_delay = 1.0 / requests_per_second
self.last_request_time = 0
def _rate_limit(self):
"""Ensure requests don't exceed rate limit."""
current_time = time.time()
time_since_last = current_time - self.last_request_time
if time_since_last < self.min_delay:
time.sleep(self.min_delay - time_since_last)
self.last_request_time = time.time()
def _make_request(
self,
endpoint: str,
params: Optional[Dict] = None,
max_retries: int = 5
) -> Dict[str, Any]:
"""
Make API request with retry logic.
Args:
endpoint: API endpoint (e.g., '/works', '/authors')
params: Query parameters
max_retries: Maximum number of retry attempts
Returns:
JSON response as dictionary
"""
if params is None:
params = {}
# Add email to params for polite pool
if self.email:
params['mailto'] = self.email
url = urljoin(self.BASE_URL, endpoint)
for attempt in range(max_retries):
try:
self._rate_limit()
response = requests.get(url, params=params, timeout=30)
if response.status_code == 200:
return response.json()
elif response.status_code == 403:
# Rate limited
wait_time = 2 ** attempt
print(f"Rate limited. Waiting {wait_time}s before retry...")
time.sleep(wait_time)
elif response.status_code >= 500:
# Server error
wait_time = 2 ** attempt
print(f"Server error. Waiting {wait_time}s before retry...")
time.sleep(wait_time)
else:
# Other error - don't retry
response.raise_for_status()
except requests.exceptions.Timeout:
if attempt < max_retries - 1:
wait_time = 2 ** attempt
print(f"Request timeout. Waiting {wait_time}s before retry...")
time.sleep(wait_time)
else:
raise
raise Exception(f"Failed after {max_retries} retries")
def search_works(
self,
search: Optional[str] = None,
filter_params: Optional[Dict] = None,
per_page: int = 200,
page: int = 1,
sort: Optional[str] = None,
select: Optional[List[str]] = None
) -> Dict[str, Any]:
"""
Search works with filters.
Args:
search: Full-text search query
filter_params: Dictionary of filter parameters
per_page: Results per page (max: 200)
page: Page number
sort: Sort parameter (e.g., 'cited_by_count:desc')
select: List of fields to return
Returns:
API response with meta and results
"""
params = {
'per-page': min(per_page, 200),
'page': page
}
if search:
params['search'] = search
if filter_params:
filter_str = ','.join([f"{k}:{v}" for k, v in filter_params.items()])
params['filter'] = filter_str
if sort:
params['sort'] = sort
if select:
params['select'] = ','.join(select)
return self._make_request('/works', params)
def get_entity(self, entity_type: str, entity_id: str) -> Dict[str, Any]:
"""
Get single entity by ID.
Args:
entity_type: Type of entity ('works', 'authors', 'institutions', etc.)
entity_id: OpenAlex ID or external ID (DOI, ORCID, etc.)
Returns:
Entity object
"""
endpoint = f"/{entity_type}/{entity_id}"
return self._make_request(endpoint)
def batch_lookup(
self,
entity_type: str,
ids: List[str],
id_field: str = 'openalex_id'
) -> List[Dict[str, Any]]:
"""
Look up multiple entities by ID efficiently.
Args:
entity_type: Type of entity ('works', 'authors', etc.)
ids: List of IDs (up to 50 per batch)
id_field: ID field name ('openalex_id', 'doi', 'orcid', etc.)
Returns:
List of entity objects
"""
all_results = []
# Process in batches of 50
for i in range(0, len(ids), 50):
batch = ids[i:i+50]
filter_value = '|'.join(batch)
params = {
'filter': f"{id_field}:{filter_value}",
'per-page': 50
}
response = self._make_request(f"/{entity_type}", params)
all_results.extend(response.get('results', []))
return all_results
def paginate_all(
self,
endpoint: str,
params: Optional[Dict] = None,
max_results: Optional[int] = None
) -> List[Dict[str, Any]]:
"""
Paginate through all results.
Args:
endpoint: API endpoint
params: Query parameters
max_results: Maximum number of results to retrieve (None for all)
Returns:
List of all results
"""
if params is None:
params = {}
params['per-page'] = 200 # Use maximum page size
params['page'] = 1
all_results = []
while True:
response = self._make_request(endpoint, params)
results = response.get('results', [])
all_results.extend(results)
# Check if we've hit max_results
if max_results and len(all_results) >= max_results:
return all_results[:max_results]
# Check if there are more pages
meta = response.get('meta', {})
total_count = meta.get('count', 0)
current_count = len(all_results)
if current_count >= total_count:
break
params['page'] += 1
return all_results
def sample_works(
self,
sample_size: int,
seed: Optional[int] = None,
filter_params: Optional[Dict] = None
) -> List[Dict[str, Any]]:
"""
Get random sample of works.
Args:
sample_size: Number of samples to retrieve
seed: Random seed for reproducibility
filter_params: Optional filters to apply
Returns:
List of sampled works
"""
params = {
'sample': min(sample_size, 10000), # API limit per request
'per-page': 200
}
if seed is not None:
params['seed'] = seed
if filter_params:
filter_str = ','.join([f"{k}:{v}" for k, v in filter_params.items()])
params['filter'] = filter_str
# For large samples, need multiple requests with different seeds
if sample_size > 10000:
all_samples = []
seen_ids = set()
for i in range((sample_size // 10000) + 1):
current_seed = seed + i if seed else i
params['seed'] = current_seed
params['sample'] = min(10000, sample_size - len(all_samples))
response = self._make_request('/works', params)
results = response.get('results', [])
# Deduplicate
for result in results:
work_id = result.get('id')
if work_id not in seen_ids:
seen_ids.add(work_id)
all_samples.append(result)
if len(all_samples) >= sample_size:
break
return all_samples[:sample_size]
else:
response = self._make_request('/works', params)
return response.get('results', [])
def group_by(
self,
entity_type: str,
group_field: str,
filter_params: Optional[Dict] = None
) -> List[Dict[str, Any]]:
"""
Aggregate results by field.
Args:
entity_type: Type of entity ('works', 'authors', etc.)
group_field: Field to group by
filter_params: Optional filters
Returns:
List of grouped results with counts
"""
params = {
'group_by': group_field
}
if filter_params:
filter_str = ','.join([f"{k}:{v}" for k, v in filter_params.items()])
params['filter'] = filter_str
response = self._make_request(f"/{entity_type}", params)
return response.get('group_by', [])
if __name__ == "__main__":
# Example usage
client = OpenAlexClient(email="your-email@example.com")
# Search for works about machine learning
results = client.search_works(
search="machine learning",
filter_params={"publication_year": "2023"},
per_page=10
)
print(f"Found {results['meta']['count']} works")
for work in results['results']:
print(f"- {work['title']}")
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/openalex-database/scripts/openalex_client.py",
"license": "MIT License",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/openalex-database/scripts/query_helpers.py | #!/usr/bin/env python3
"""
Helper functions for common OpenAlex query patterns.
Provides high-level functions for typical research queries.
"""
from typing import List, Dict, Optional, Any
from openalex_client import OpenAlexClient
def find_author_works(
author_name: str,
client: OpenAlexClient,
limit: Optional[int] = None
) -> List[Dict[str, Any]]:
"""
Find all works by an author (two-step pattern).
Args:
author_name: Author name to search for
client: OpenAlexClient instance
limit: Maximum number of works to return
Returns:
List of works by the author
"""
# Step 1: Find author ID
author_response = client._make_request(
'/authors',
params={'search': author_name, 'per-page': 1}
)
if not author_response.get('results'):
print(f"No author found for: {author_name}")
return []
author = author_response['results'][0]
author_id = author['id'].split('/')[-1] # Extract ID from URL
print(f"Found author: {author['display_name']} (ID: {author_id})")
# Step 2: Get works by author
works_params = {
'filter': f'authorships.author.id:{author_id}',
'per-page': 200
}
if limit and limit <= 200:
works_params['per-page'] = limit
response = client._make_request('/works', works_params)
return response.get('results', [])
else:
# Need pagination
return client.paginate_all('/works', works_params, max_results=limit)
def find_institution_works(
institution_name: str,
client: OpenAlexClient,
limit: Optional[int] = None
) -> List[Dict[str, Any]]:
"""
Find all works from an institution (two-step pattern).
Args:
institution_name: Institution name to search for
client: OpenAlexClient instance
limit: Maximum number of works to return
Returns:
List of works from the institution
"""
# Step 1: Find institution ID
inst_response = client._make_request(
'/institutions',
params={'search': institution_name, 'per-page': 1}
)
if not inst_response.get('results'):
print(f"No institution found for: {institution_name}")
return []
institution = inst_response['results'][0]
inst_id = institution['id'].split('/')[-1] # Extract ID from URL
print(f"Found institution: {institution['display_name']} (ID: {inst_id})")
# Step 2: Get works from institution
works_params = {
'filter': f'authorships.institutions.id:{inst_id}',
'per-page': 200
}
if limit and limit <= 200:
works_params['per-page'] = limit
response = client._make_request('/works', works_params)
return response.get('results', [])
else:
return client.paginate_all('/works', works_params, max_results=limit)
def find_highly_cited_recent_papers(
topic: Optional[str] = None,
years: str = ">2020",
client: Optional[OpenAlexClient] = None,
limit: int = 100
) -> List[Dict[str, Any]]:
"""
Find highly cited recent papers, optionally filtered by topic.
Args:
topic: Optional search term for topic filtering
years: Year filter (e.g., ">2020", "2020-2023")
client: OpenAlexClient instance
limit: Maximum number of papers to return
Returns:
List of highly cited papers sorted by citation count
"""
if client is None:
client = OpenAlexClient()
params = {
'filter': f'publication_year:{years}',
'sort': 'cited_by_count:desc',
'per-page': min(limit, 200)
}
if topic:
params['search'] = topic
if limit <= 200:
response = client._make_request('/works', params)
return response.get('results', [])
else:
return client.paginate_all('/works', params, max_results=limit)
def get_open_access_papers(
search_term: str,
client: OpenAlexClient,
oa_status: str = "any", # "any", "gold", "green", "hybrid", "bronze"
limit: int = 100
) -> List[Dict[str, Any]]:
"""
Find open access papers on a topic.
Args:
search_term: Search query
client: OpenAlexClient instance
oa_status: Type of OA ("any" for is_oa:true, or specific status)
limit: Maximum number of papers to return
Returns:
List of open access papers
"""
if oa_status == "any":
filter_str = "is_oa:true"
else:
filter_str = f"open_access.oa_status:{oa_status}"
params = {
'search': search_term,
'filter': filter_str,
'per-page': min(limit, 200)
}
if limit <= 200:
response = client._make_request('/works', params)
return response.get('results', [])
else:
return client.paginate_all('/works', params, max_results=limit)
def get_publication_trends(
search_term: Optional[str] = None,
filter_params: Optional[Dict] = None,
client: Optional[OpenAlexClient] = None
) -> List[Dict[str, Any]]:
"""
Get publication counts by year.
Args:
search_term: Optional search query
filter_params: Optional additional filters
client: OpenAlexClient instance
Returns:
List of {year, count} dictionaries
"""
if client is None:
client = OpenAlexClient()
params = {'group_by': 'publication_year'}
if search_term:
params['search'] = search_term
if filter_params:
filter_str = ','.join([f"{k}:{v}" for k, v in filter_params.items()])
params['filter'] = filter_str
response = client._make_request('/works', params)
return response.get('group_by', [])
def analyze_research_output(
entity_type: str, # 'author' or 'institution'
entity_name: str,
client: OpenAlexClient,
years: str = ">2020"
) -> Dict[str, Any]:
"""
Analyze research output for an author or institution.
Args:
entity_type: 'author' or 'institution'
entity_name: Name to search for
client: OpenAlexClient instance
years: Year filter
Returns:
Dictionary with analysis results
"""
# Find entity ID
if entity_type == 'author':
endpoint = '/authors'
filter_prefix = 'authorships.author.id'
else:
endpoint = '/institutions'
filter_prefix = 'authorships.institutions.id'
# Step 1: Find entity
entity_response = client._make_request(
endpoint,
params={'search': entity_name, 'per-page': 1}
)
if not entity_response.get('results'):
return {'error': f'No {entity_type} found for: {entity_name}'}
entity = entity_response['results'][0]
entity_id = entity['id'].split('/')[-1]
# Step 2: Get statistics
filter_params = {
filter_prefix: entity_id,
'publication_year': years
}
# Total works
works_response = client.search_works(
filter_params=filter_params,
per_page=1
)
total_works = works_response['meta']['count']
# Works by year
trends = client.group_by(
'works',
'publication_year',
filter_params={filter_prefix: entity_id, 'publication_year': years}
)
# Top topics
topics = client.group_by(
'works',
'topics.id',
filter_params=filter_params
)
# OA percentage
oa_works = client.search_works(
filter_params={**filter_params, 'is_oa': 'true'},
per_page=1
)
oa_count = oa_works['meta']['count']
oa_percentage = (oa_count / total_works * 100) if total_works > 0 else 0
return {
'entity_name': entity['display_name'],
'entity_id': entity_id,
'total_works': total_works,
'open_access_works': oa_count,
'open_access_percentage': round(oa_percentage, 1),
'publications_by_year': trends[:10], # Last 10 years
'top_topics': topics[:10] # Top 10 topics
}
if __name__ == "__main__":
# Example usage
import json
client = OpenAlexClient(email="your-email@example.com")
# Find works by author
print("\n=== Finding works by author ===")
works = find_author_works("Einstein", client, limit=5)
print(f"Found {len(works)} works")
# Analyze research output
print("\n=== Analyzing institution research output ===")
analysis = analyze_research_output('institution', 'MIT', client)
print(json.dumps(analysis, indent=2))
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/openalex-database/scripts/query_helpers.py",
"license": "MIT License",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/opentargets-database/scripts/query_opentargets.py | #!/usr/bin/env python3
"""
Open Targets Platform GraphQL Query Helper
This script provides reusable functions for querying the Open Targets Platform
GraphQL API. Use these functions to retrieve target, disease, drug, and
association data.
Dependencies: requests (pip install requests)
"""
import requests
import json
from typing import Dict, List, Optional, Any
# API endpoint
BASE_URL = "https://api.platform.opentargets.org/api/v4/graphql"
def execute_query(query: str, variables: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""
Execute a GraphQL query against the Open Targets Platform API.
Args:
query: GraphQL query string
variables: Optional dictionary of variables for the query
Returns:
Dictionary containing the API response data
Raises:
Exception if the API request fails or returns errors
"""
payload = {"query": query}
if variables:
payload["variables"] = variables
try:
response = requests.post(BASE_URL, json=payload, timeout=30)
response.raise_for_status()
data = response.json()
if "errors" in data:
raise Exception(f"GraphQL errors: {data['errors']}")
return data.get("data", {})
except requests.exceptions.RequestException as e:
raise Exception(f"API request failed: {str(e)}")
def search_entities(query_string: str, entity_types: Optional[List[str]] = None) -> List[Dict[str, Any]]:
"""
Search for targets, diseases, or drugs by name or identifier.
Args:
query_string: Search term (e.g., "BRCA1", "alzheimer", "aspirin")
entity_types: Optional list to filter by entity type ["target", "disease", "drug"]
Returns:
List of search results with id, name, entity type, and description
"""
query = """
query search($queryString: String!, $entityNames: [String!]) {
search(queryString: $queryString, entityNames: $entityNames, page: {size: 10}) {
hits {
id
entity
name
description
}
}
}
"""
variables = {"queryString": query_string}
if entity_types:
variables["entityNames"] = entity_types
result = execute_query(query, variables)
return result.get("search", {}).get("hits", [])
def get_target_info(ensembl_id: str, include_diseases: bool = False) -> Dict[str, Any]:
"""
Retrieve comprehensive information about a target gene.
Args:
ensembl_id: Ensembl gene ID (e.g., "ENSG00000157764")
include_diseases: Whether to include top associated diseases
Returns:
Dictionary with target information including tractability, safety, expression
"""
disease_fragment = """
associatedDiseases(page: {size: 10}) {
rows {
disease {
id
name
}
score
datatypeScores {
componentId
score
}
}
}
""" if include_diseases else ""
query = f"""
query targetInfo($ensemblId: String!) {{
target(ensemblId: $ensemblId) {{
id
approvedSymbol
approvedName
biotype
functionDescriptions
tractability {{
label
modality
value
}}
safetyLiabilities {{
event
effects {{
dosing
organsAffected
}}
biosamples {{
tissue {{
label
}}
}}
}}
geneticConstraint {{
constraintType
score
exp
obs
}}
{disease_fragment}
}}
}}
"""
result = execute_query(query, {"ensemblId": ensembl_id})
return result.get("target", {})
def get_disease_info(efo_id: str, include_targets: bool = False) -> Dict[str, Any]:
"""
Retrieve information about a disease.
Args:
efo_id: EFO disease identifier (e.g., "EFO_0000249")
include_targets: Whether to include top associated targets
Returns:
Dictionary with disease information
"""
target_fragment = """
associatedTargets(page: {size: 10}) {
rows {
target {
id
approvedSymbol
approvedName
}
score
datatypeScores {
componentId
score
}
}
}
""" if include_targets else ""
query = f"""
query diseaseInfo($efoId: String!) {{
disease(efoId: $efoId) {{
id
name
description
therapeuticAreas {{
id
name
}}
synonyms {{
terms
}}
{target_fragment}
}}
}}
"""
result = execute_query(query, {"efoId": efo_id})
return result.get("disease", {})
def get_target_disease_evidence(ensembl_id: str, efo_id: str,
data_types: Optional[List[str]] = None) -> List[Dict[str, Any]]:
"""
Retrieve evidence linking a target to a disease.
Args:
ensembl_id: Ensembl gene ID
efo_id: EFO disease identifier
data_types: Optional filter for evidence types (e.g., ["genetic_association", "known_drug"])
Returns:
List of evidence records with scores and sources
"""
query = """
query evidences($ensemblId: String!, $efoId: String!, $dataTypes: [String!]) {
disease(efoId: $efoId) {
evidences(ensemblIds: [$ensemblId], datatypes: $dataTypes, size: 100) {
rows {
datasourceId
datatypeId
score
targetFromSourceId
studyId
literature
cohortPhenotypes
}
}
}
}
"""
variables = {"ensemblId": ensembl_id, "efoId": efo_id}
if data_types:
variables["dataTypes"] = data_types
result = execute_query(query, variables)
return result.get("disease", {}).get("evidences", {}).get("rows", [])
def get_known_drugs_for_disease(efo_id: str) -> Dict[str, Any]:
"""
Get drugs known to be used for a disease.
Args:
efo_id: EFO disease identifier
Returns:
Dictionary with drug information including phase, targets, and status
"""
query = """
query knownDrugs($efoId: String!) {
disease(efoId: $efoId) {
knownDrugs {
uniqueDrugs
uniqueTargets
rows {
drug {
id
name
drugType
maximumClinicalTrialPhase
}
targets {
id
approvedSymbol
}
phase
status
mechanismOfAction
}
}
}
}
"""
result = execute_query(query, {"efoId": efo_id})
return result.get("disease", {}).get("knownDrugs", {})
def get_drug_info(chembl_id: str) -> Dict[str, Any]:
"""
Retrieve information about a drug.
Args:
chembl_id: ChEMBL identifier (e.g., "CHEMBL25")
Returns:
Dictionary with drug information
"""
query = """
query drugInfo($chemblId: String!) {
drug(chemblId: $chemblId) {
id
name
synonyms
drugType
maximumClinicalTrialPhase
hasBeenWithdrawn
withdrawnNotice {
reasons
countries
}
mechanismsOfAction {
actionType
mechanismOfAction
targetName
targets {
id
approvedSymbol
}
}
indications {
disease
efoId
maxPhaseForIndication
}
}
}
"""
result = execute_query(query, {"chemblId": chembl_id})
return result.get("drug", {})
def get_target_associations(ensembl_id: str, min_score: float = 0.0) -> List[Dict[str, Any]]:
"""
Get all disease associations for a target, filtered by minimum score.
Args:
ensembl_id: Ensembl gene ID
min_score: Minimum association score (0-1) to include
Returns:
List of disease associations with scores
"""
query = """
query targetAssociations($ensemblId: String!) {
target(ensemblId: $ensemblId) {
associatedDiseases(page: {size: 100}) {
count
rows {
disease {
id
name
}
score
datatypeScores {
componentId
score
}
}
}
}
}
"""
result = execute_query(query, {"ensemblId": ensembl_id})
associations = result.get("target", {}).get("associatedDiseases", {}).get("rows", [])
# Filter by minimum score
return [assoc for assoc in associations if assoc.get("score", 0) >= min_score]
# Example usage
if __name__ == "__main__":
# Example 1: Search for a gene
print("Searching for BRCA1...")
results = search_entities("BRCA1", entity_types=["target"])
for result in results[:3]:
print(f" {result['name']} ({result['id']})")
# Example 2: Get target information
if results:
ensembl_id = results[0]['id']
print(f"\nGetting info for {ensembl_id}...")
target_info = get_target_info(ensembl_id, include_diseases=True)
print(f" Symbol: {target_info.get('approvedSymbol')}")
print(f" Name: {target_info.get('approvedName')}")
# Show top diseases
diseases = target_info.get('associatedDiseases', {}).get('rows', [])
if diseases:
print(f"\n Top associated diseases:")
for disease in diseases[:3]:
print(f" - {disease['disease']['name']} (score: {disease['score']:.2f})")
# Example 3: Search for a disease
print("\n\nSearching for Alzheimer's disease...")
disease_results = search_entities("alzheimer", entity_types=["disease"])
if disease_results:
efo_id = disease_results[0]['id']
print(f" Found: {disease_results[0]['name']} ({efo_id})")
# Get known drugs
print(f"\n Known drugs for {disease_results[0]['name']}:")
drugs = get_known_drugs_for_disease(efo_id)
for drug in drugs.get('rows', [])[:5]:
print(f" - {drug['drug']['name']} (Phase {drug['phase']})")
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/opentargets-database/scripts/query_opentargets.py",
"license": "MIT License",
"lines": 339,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/scientific/opentrons-integration/scripts/basic_protocol_template.py | #!/usr/bin/env python3
"""
Basic Opentrons Protocol Template
This template provides a minimal starting point for creating Opentrons protocols.
Replace the placeholder values and add your specific protocol logic.
"""
from opentrons import protocol_api
# Metadata
metadata = {
'protocolName': 'Basic Protocol Template',
'author': 'Your Name <email@example.com>',
'description': 'A basic protocol template for Opentrons',
'apiLevel': '2.19'
}
# Requirements
requirements = {
'robotType': 'Flex', # or 'OT-2'
'apiLevel': '2.19'
}
def run(protocol: protocol_api.ProtocolContext):
"""
Main protocol function.
Args:
protocol: The protocol context provided by Opentrons
"""
# Load tip racks
tips_200 = protocol.load_labware('opentrons_flex_96_tiprack_200ul', 'D1')
# Load labware
source_plate = protocol.load_labware(
'nest_96_wellplate_200ul_flat',
'D2',
label='Source Plate'
)
dest_plate = protocol.load_labware(
'nest_96_wellplate_200ul_flat',
'D3',
label='Destination Plate'
)
# Load pipette
pipette = protocol.load_instrument(
'p300_single_flex',
'left',
tip_racks=[tips_200]
)
# Protocol commands
protocol.comment('Starting protocol...')
# Example: Transfer from A1 to B1
pipette.transfer(
volume=50,
source=source_plate['A1'],
dest=dest_plate['B1'],
new_tip='always'
)
protocol.comment('Protocol complete!')
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/opentrons-integration/scripts/basic_protocol_template.py",
"license": "MIT License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/opentrons-integration/scripts/pcr_setup_template.py | #!/usr/bin/env python3
"""
PCR Setup Protocol Template
This template demonstrates how to set up PCR reactions using the Thermocycler module.
Includes master mix distribution, sample addition, and PCR cycling.
"""
from opentrons import protocol_api
metadata = {
'protocolName': 'PCR Setup with Thermocycler',
'author': 'Opentrons',
'description': 'Automated PCR setup and cycling protocol',
'apiLevel': '2.19'
}
requirements = {
'robotType': 'Flex',
'apiLevel': '2.19'
}
def run(protocol: protocol_api.ProtocolContext):
"""
Sets up PCR reactions and runs thermocycler.
Protocol performs:
1. Distributes master mix to PCR plate
2. Adds DNA samples
3. Runs PCR cycling program
"""
# Load thermocycler module
tc_mod = protocol.load_module('thermocyclerModuleV2')
tc_plate = tc_mod.load_labware('nest_96_wellplate_100ul_pcr_full_skirt')
# Load tips and reagents
tips_20 = protocol.load_labware('opentrons_flex_96_tiprack_50ul', 'C1')
tips_200 = protocol.load_labware('opentrons_flex_96_tiprack_200ul', 'C2')
reagent_rack = protocol.load_labware(
'opentrons_24_tuberack_nest_1.5ml_snapcap',
'D1',
label='Reagents'
)
# Load pipettes
p20 = protocol.load_instrument('p50_single_flex', 'left', tip_racks=[tips_20])
p300 = protocol.load_instrument('p300_single_flex', 'right', tip_racks=[tips_200])
# Define liquids
master_mix = protocol.define_liquid(
name='PCR Master Mix',
description='2x PCR master mix',
display_color='#FFB6C1'
)
template_dna = protocol.define_liquid(
name='Template DNA',
description='DNA samples',
display_color='#90EE90'
)
# Load liquids
reagent_rack['A1'].load_liquid(liquid=master_mix, volume=1000)
for i in range(8): # 8 samples
reagent_rack.wells()[i + 1].load_liquid(liquid=template_dna, volume=50)
# PCR setup parameters
num_samples = 8
master_mix_volume = 20 # Β΅L per reaction
template_volume = 5 # Β΅L per reaction
total_reaction_volume = 25 # Β΅L
protocol.comment('Starting PCR setup...')
# Open thermocycler lid
tc_mod.open_lid()
protocol.comment('Thermocycler lid opened')
# Step 1: Distribute master mix
protocol.comment(f'Distributing {master_mix_volume}Β΅L master mix to {num_samples} wells...')
p300.distribute(
master_mix_volume,
reagent_rack['A1'],
tc_plate.wells()[:num_samples],
new_tip='once',
disposal_volume=10 # Extra volume to prevent shortage
)
# Step 2: Add template DNA
protocol.comment('Adding template DNA to each well...')
for i in range(num_samples):
p20.transfer(
template_volume,
reagent_rack.wells()[i + 1], # Sample tubes
tc_plate.wells()[i], # PCR plate wells
mix_after=(3, 10), # Mix 3x with 10Β΅L
new_tip='always'
)
protocol.comment('PCR reactions prepared')
# Close lid and start PCR
tc_mod.close_lid()
protocol.comment('Thermocycler lid closed')
# Set lid temperature
tc_mod.set_lid_temperature(celsius=105)
protocol.comment('Lid heating to 105Β°C')
# Initial denaturation
protocol.comment('Initial denaturation...')
tc_mod.set_block_temperature(
temperature=95,
hold_time_seconds=180,
block_max_volume=total_reaction_volume
)
# PCR cycling profile
protocol.comment('Starting PCR cycling...')
profile = [
{'temperature': 95, 'hold_time_seconds': 15}, # Denaturation
{'temperature': 60, 'hold_time_seconds': 30}, # Annealing
{'temperature': 72, 'hold_time_seconds': 30} # Extension
]
num_cycles = 35
tc_mod.execute_profile(
steps=profile,
repetitions=num_cycles,
block_max_volume=total_reaction_volume
)
# Final extension
protocol.comment('Final extension...')
tc_mod.set_block_temperature(
temperature=72,
hold_time_minutes=5,
block_max_volume=total_reaction_volume
)
# Hold at 4Β°C
protocol.comment('Cooling to 4Β°C for storage...')
tc_mod.set_block_temperature(
temperature=4,
block_max_volume=total_reaction_volume
)
# Deactivate and open
tc_mod.deactivate_lid()
tc_mod.open_lid()
protocol.comment('PCR complete! Plate ready for removal.')
protocol.comment(f'Completed {num_cycles} cycles for {num_samples} samples')
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/opentrons-integration/scripts/pcr_setup_template.py",
"license": "MIT License",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/opentrons-integration/scripts/serial_dilution_template.py | #!/usr/bin/env python3
"""
Serial Dilution Protocol Template
This template demonstrates how to perform a serial dilution across a plate row.
Useful for creating concentration gradients for assays.
"""
from opentrons import protocol_api
metadata = {
'protocolName': 'Serial Dilution Template',
'author': 'Opentrons',
'description': 'Serial dilution protocol for creating concentration gradients',
'apiLevel': '2.19'
}
requirements = {
'robotType': 'Flex',
'apiLevel': '2.19'
}
def run(protocol: protocol_api.ProtocolContext):
"""
Performs a serial dilution across plate rows.
Protocol performs:
1. Adds diluent to all wells except the first column
2. Transfers stock solution to first column
3. Performs serial dilutions across rows
"""
# Load labware
tips = protocol.load_labware('opentrons_flex_96_tiprack_200ul', 'D1')
reservoir = protocol.load_labware('nest_12_reservoir_15ml', 'D2', label='Reservoir')
plate = protocol.load_labware('corning_96_wellplate_360ul_flat', 'D3', label='Dilution Plate')
# Load pipette
p300 = protocol.load_instrument('p300_single_flex', 'left', tip_racks=[tips])
# Define liquids (optional, for visualization)
diluent = protocol.define_liquid(
name='Diluent',
description='Buffer or growth media',
display_color='#B0E0E6'
)
stock = protocol.define_liquid(
name='Stock Solution',
description='Concentrated stock',
display_color='#FF6347'
)
# Load liquids into wells
reservoir['A1'].load_liquid(liquid=diluent, volume=15000)
reservoir['A2'].load_liquid(liquid=stock, volume=5000)
# Protocol parameters
dilution_factor = 2 # 1:2 dilution
transfer_volume = 100 # Β΅L
num_dilutions = 11 # Number of dilution steps
protocol.comment('Starting serial dilution protocol')
# Step 1: Add diluent to all wells except first column
protocol.comment('Adding diluent to wells...')
for row in plate.rows()[:8]: # For each row (A-H)
p300.transfer(
transfer_volume,
reservoir['A1'], # Diluent source
row[1:], # All wells except first (columns 2-12)
new_tip='once'
)
# Step 2: Add stock solution to first column
protocol.comment('Adding stock solution to first column...')
p300.transfer(
transfer_volume * 2, # Double volume for first well
reservoir['A2'], # Stock source
[row[0] for row in plate.rows()[:8]], # First column (wells A1-H1)
new_tip='always'
)
# Step 3: Perform serial dilution
protocol.comment('Performing serial dilutions...')
for row in plate.rows()[:8]: # For each row
p300.transfer(
transfer_volume,
row[:num_dilutions], # Source wells (1-11)
row[1:num_dilutions + 1], # Destination wells (2-12)
mix_after=(3, 50), # Mix 3x with 50Β΅L after each transfer
new_tip='always'
)
protocol.comment('Serial dilution complete!')
protocol.comment(f'Created {num_dilutions} dilutions with {dilution_factor}x dilution factor')
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/opentrons-integration/scripts/serial_dilution_template.py",
"license": "MIT License",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/perplexity-search/scripts/perplexity_search.py | #!/usr/bin/env python3
"""
Perplexity Search via LitLLM and OpenRouter
This script performs AI-powered web searches using Perplexity models through
LiteLLM and OpenRouter. It provides real-time, grounded answers with source citations.
Usage:
python perplexity_search.py "search query" [options]
Requirements:
- OpenRouter API key set in OPENROUTER_API_KEY environment variable
- LiteLLM installed: uv pip install litellm
Author: Scientific Skills
License: MIT
"""
import os
import sys
import json
import argparse
from typing import Optional, Dict, Any, List
def check_dependencies():
"""Check if required packages are installed."""
try:
import litellm
return True
except ImportError:
print("Error: LiteLLM is not installed.", file=sys.stderr)
print("Install it with: uv pip install litellm", file=sys.stderr)
return False
def check_api_key() -> Optional[str]:
"""Check if OpenRouter API key is configured."""
api_key = os.environ.get("OPENROUTER_API_KEY")
if not api_key:
print("Error: OPENROUTER_API_KEY environment variable is not set.", file=sys.stderr)
print("\nTo set up your API key:", file=sys.stderr)
print("1. Get an API key from https://openrouter.ai/keys", file=sys.stderr)
print("2. Set the environment variable:", file=sys.stderr)
print(" export OPENROUTER_API_KEY='your-api-key-here'", file=sys.stderr)
print("\nOr create a .env file with:", file=sys.stderr)
print(" OPENROUTER_API_KEY=your-api-key-here", file=sys.stderr)
return None
return api_key
def search_with_perplexity(
query: str,
model: str = "openrouter/perplexity/sonar-pro",
max_tokens: int = 4000,
temperature: float = 0.2,
verbose: bool = False
) -> Dict[str, Any]:
"""
Perform a search using Perplexity models via LiteLLM and OpenRouter.
Args:
query: The search query
model: Model to use (default: sonar-pro)
max_tokens: Maximum tokens in response
temperature: Response temperature (0.0-1.0)
verbose: Print detailed information
Returns:
Dictionary containing the search results and metadata
"""
try:
from litellm import completion
except ImportError:
return {
"success": False,
"error": "LiteLLM not installed. Run: uv pip install litellm"
}
# Check API key
api_key = check_api_key()
if not api_key:
return {
"success": False,
"error": "OpenRouter API key not configured"
}
if verbose:
print(f"Model: {model}", file=sys.stderr)
print(f"Query: {query}", file=sys.stderr)
print(f"Max tokens: {max_tokens}", file=sys.stderr)
print(f"Temperature: {temperature}", file=sys.stderr)
print("", file=sys.stderr)
try:
# Perform the search using LiteLLM
response = completion(
model=model,
messages=[{
"role": "user",
"content": query
}],
max_tokens=max_tokens,
temperature=temperature
)
# Extract the response
result = {
"success": True,
"query": query,
"model": model,
"answer": response.choices[0].message.content,
"usage": {
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.completion_tokens,
"total_tokens": response.usage.total_tokens
}
}
# Check if citations are available in the response
if hasattr(response.choices[0].message, 'citations'):
result["citations"] = response.choices[0].message.citations
return result
except Exception as e:
return {
"success": False,
"error": str(e),
"query": query,
"model": model
}
def main():
"""Main entry point for the script."""
parser = argparse.ArgumentParser(
description="Perform AI-powered web searches using Perplexity via LiteLLM and OpenRouter",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Basic search
python perplexity_search.py "What are the latest developments in CRISPR?"
# Use Sonar Pro Search for deeper analysis
python perplexity_search.py "Compare mRNA and viral vector vaccines" --model sonar-pro-search
# Use Sonar Reasoning for complex queries
python perplexity_search.py "Explain quantum entanglement" --model sonar-reasoning-pro
# Save output to file
python perplexity_search.py "COVID-19 vaccine efficacy studies" --output results.json
# Verbose mode
python perplexity_search.py "Machine learning trends 2024" --verbose
Available Models:
- sonar-pro (default): General-purpose search with good balance
- sonar-pro-search: Most advanced agentic search with multi-step reasoning
- sonar: Standard model for basic searches
- sonar-reasoning-pro: Advanced reasoning capabilities
- sonar-reasoning: Basic reasoning model
"""
)
parser.add_argument(
"query",
help="The search query"
)
parser.add_argument(
"--model",
default="sonar-pro",
choices=[
"sonar-pro",
"sonar-pro-search",
"sonar",
"sonar-reasoning-pro",
"sonar-reasoning"
],
help="Perplexity model to use (default: sonar-pro)"
)
parser.add_argument(
"--max-tokens",
type=int,
default=4000,
help="Maximum tokens in response (default: 4000)"
)
parser.add_argument(
"--temperature",
type=float,
default=0.2,
help="Response temperature 0.0-1.0 (default: 0.2)"
)
parser.add_argument(
"--output",
help="Save results to JSON file"
)
parser.add_argument(
"--verbose",
action="store_true",
help="Print detailed information"
)
parser.add_argument(
"--check-setup",
action="store_true",
help="Check if dependencies and API key are configured"
)
args = parser.parse_args()
# Check setup if requested
if args.check_setup:
print("Checking setup...")
deps_ok = check_dependencies()
api_key_ok = check_api_key() is not None
if deps_ok and api_key_ok:
print("\nβ Setup complete! Ready to search.")
return 0
else:
print("\nβ Setup incomplete. Please fix the issues above.")
return 1
# Check dependencies
if not check_dependencies():
return 1
# Prepend openrouter/ to model name if not already present
model = args.model
if not model.startswith("openrouter/"):
model = f"openrouter/perplexity/{model}"
# Perform the search
result = search_with_perplexity(
query=args.query,
model=model,
max_tokens=args.max_tokens,
temperature=args.temperature,
verbose=args.verbose
)
# Handle results
if not result["success"]:
print(f"Error: {result['error']}", file=sys.stderr)
return 1
# Print answer
print("\n" + "="*80)
print("ANSWER")
print("="*80)
print(result["answer"])
print("="*80)
# Print usage stats if verbose
if args.verbose:
print(f"\nUsage:", file=sys.stderr)
print(f" Prompt tokens: {result['usage']['prompt_tokens']}", file=sys.stderr)
print(f" Completion tokens: {result['usage']['completion_tokens']}", file=sys.stderr)
print(f" Total tokens: {result['usage']['total_tokens']}", file=sys.stderr)
# Save to file if requested
if args.output:
with open(args.output, 'w') as f:
json.dump(result, f, indent=2)
print(f"\nβ Results saved to {args.output}", file=sys.stderr)
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/perplexity-search/scripts/perplexity_search.py",
"license": "MIT License",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/perplexity-search/scripts/setup_env.py | #!/usr/bin/env python3
"""
Setup script for Perplexity Search environment configuration.
This script helps users configure their OpenRouter API key and validates the setup.
Usage:
python setup_env.py [--api-key YOUR_KEY] [--env-file .env]
Author: Scientific Skills
License: MIT
"""
import os
import sys
import argparse
from pathlib import Path
def create_env_file(api_key: str, env_file: str = ".env") -> bool:
"""
Create or update .env file with OpenRouter API key.
Args:
api_key: The OpenRouter API key
env_file: Path to .env file (default: .env)
Returns:
True if successful, False otherwise
"""
try:
env_path = Path(env_file)
# Read existing content if file exists
existing_content = []
if env_path.exists():
with open(env_path, 'r') as f:
existing_content = [
line for line in f.readlines()
if not line.startswith('OPENROUTER_API_KEY=')
]
# Write new content
with open(env_path, 'w') as f:
# Write existing content (excluding old OPENROUTER_API_KEY)
f.writelines(existing_content)
# Add new API key
if existing_content and not existing_content[-1].endswith('\n'):
f.write('\n')
f.write(f'OPENROUTER_API_KEY={api_key}\n')
print(f"β API key saved to {env_file}")
return True
except Exception as e:
print(f"Error creating .env file: {e}", file=sys.stderr)
return False
def validate_setup() -> bool:
"""
Validate that the environment is properly configured.
Returns:
True if setup is valid, False otherwise
"""
print("Validating setup...")
print()
# Check for API key
api_key = os.environ.get("OPENROUTER_API_KEY")
if not api_key:
print("β OPENROUTER_API_KEY environment variable not set")
print()
print("To set up your API key:")
print("1. Get an API key from https://openrouter.ai/keys")
print("2. Run this script with --api-key flag:")
print(" python setup_env.py --api-key YOUR_KEY")
print()
return False
else:
# Mask the key for display
masked_key = api_key[:8] + "..." + api_key[-4:] if len(api_key) > 12 else "***"
print(f"β OPENROUTER_API_KEY is set ({masked_key})")
# Check for LiteLLM
try:
import litellm
print(f"β LiteLLM is installed (version {litellm.__version__})")
except ImportError:
print("β LiteLLM is not installed")
print()
print("Install LiteLLM with:")
print(" uv pip install litellm")
print()
return False
print()
print("β Setup is complete! You're ready to use Perplexity Search.")
return True
def main():
"""Main entry point for the setup script."""
parser = argparse.ArgumentParser(
description="Setup Perplexity Search environment configuration",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Set up API key
python setup_env.py --api-key sk-or-v1-xxxxx
# Validate existing setup
python setup_env.py --validate
# Use custom .env file location
python setup_env.py --api-key sk-or-v1-xxxxx --env-file /path/to/.env
Get your OpenRouter API key from:
https://openrouter.ai/keys
"""
)
parser.add_argument(
"--api-key",
help="Your OpenRouter API key"
)
parser.add_argument(
"--env-file",
default=".env",
help="Path to .env file (default: .env)"
)
parser.add_argument(
"--validate",
action="store_true",
help="Validate existing setup"
)
args = parser.parse_args()
# If no arguments, show validation
if not args.api_key and not args.validate:
args.validate = True
# Handle API key setup
if args.api_key:
print("Setting up OpenRouter API key...")
if create_env_file(args.api_key, args.env_file):
print()
print("Next steps:")
print(f"1. Load the environment variables:")
print(f" source {args.env_file}")
print("2. Or export directly:")
print(f" export OPENROUTER_API_KEY={args.api_key}")
print("3. Test the setup:")
print(" python perplexity_search.py --check-setup")
print()
# Validate setup
if args.validate:
if not validate_setup():
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/perplexity-search/scripts/setup_env.py",
"license": "MIT License",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pubchem-database/scripts/bioactivity_query.py | #!/usr/bin/env python3
"""
PubChem Bioactivity Data Retrieval
This script provides functions for retrieving biological activity data
from PubChem for compounds and assays.
"""
import sys
import json
import time
from typing import Dict, List, Optional
try:
import requests
except ImportError:
print("Error: requests is not installed. Install it with: pip install requests")
sys.exit(1)
BASE_URL = "https://pubchem.ncbi.nlm.nih.gov/rest/pug"
PUG_VIEW_URL = "https://pubchem.ncbi.nlm.nih.gov/rest/pug_view"
# Rate limiting: 5 requests per second maximum
REQUEST_DELAY = 0.21 # seconds between requests
def rate_limited_request(url: str, method: str = 'GET', **kwargs) -> Optional[requests.Response]:
"""
Make a rate-limited request to PubChem API.
Args:
url: Request URL
method: HTTP method ('GET' or 'POST')
**kwargs: Additional arguments for requests
Returns:
Response object or None on error
"""
time.sleep(REQUEST_DELAY)
try:
if method.upper() == 'GET':
response = requests.get(url, **kwargs)
else:
response = requests.post(url, **kwargs)
response.raise_for_status()
return response
except requests.exceptions.RequestException as e:
print(f"Request error: {e}")
return None
def get_bioassay_summary(cid: int) -> Optional[Dict]:
"""
Get bioassay summary for a compound.
Args:
cid: PubChem Compound ID
Returns:
Dictionary containing bioassay summary data
"""
url = f"{BASE_URL}/compound/cid/{cid}/assaysummary/JSON"
response = rate_limited_request(url)
if response and response.status_code == 200:
return response.json()
return None
def get_compound_bioactivities(
cid: int,
activity_outcome: Optional[str] = None
) -> List[Dict]:
"""
Get bioactivity data for a compound.
Args:
cid: PubChem Compound ID
activity_outcome: Filter by activity ('active', 'inactive', 'inconclusive')
Returns:
List of bioactivity records
"""
data = get_bioassay_summary(cid)
if not data:
return []
activities = []
table = data.get('Table', {})
for row in table.get('Row', []):
activity = {}
for i, cell in enumerate(row.get('Cell', [])):
column_name = table['Columns']['Column'][i]
activity[column_name] = cell
if activity_outcome:
if activity.get('Activity Outcome', '').lower() == activity_outcome.lower():
activities.append(activity)
else:
activities.append(activity)
return activities
def get_assay_description(aid: int) -> Optional[Dict]:
"""
Get detailed description for a specific assay.
Args:
aid: PubChem Assay ID (AID)
Returns:
Dictionary containing assay description
"""
url = f"{BASE_URL}/assay/aid/{aid}/description/JSON"
response = rate_limited_request(url)
if response and response.status_code == 200:
return response.json()
return None
def get_assay_targets(aid: int) -> List[str]:
"""
Get biological targets for an assay.
Args:
aid: PubChem Assay ID
Returns:
List of target names
"""
description = get_assay_description(aid)
if not description:
return []
targets = []
assay_data = description.get('PC_AssayContainer', [{}])[0]
assay = assay_data.get('assay', {})
# Extract target information
descr = assay.get('descr', {})
for target in descr.get('target', []):
mol_id = target.get('mol_id', '')
name = target.get('name', '')
if name:
targets.append(name)
elif mol_id:
targets.append(f"GI:{mol_id}")
return targets
def search_assays_by_target(
target_name: str,
max_results: int = 100
) -> List[int]:
"""
Search for assays targeting a specific protein or gene.
Args:
target_name: Name of the target (e.g., 'EGFR', 'p53')
max_results: Maximum number of results
Returns:
List of Assay IDs (AIDs)
"""
# Use PubChem's text search for assays
url = f"{BASE_URL}/assay/target/{target_name}/aids/JSON"
response = rate_limited_request(url)
if response and response.status_code == 200:
data = response.json()
aids = data.get('IdentifierList', {}).get('AID', [])
return aids[:max_results]
return []
def get_active_compounds_in_assay(aid: int, max_results: int = 1000) -> List[int]:
"""
Get list of active compounds in an assay.
Args:
aid: PubChem Assay ID
max_results: Maximum number of results
Returns:
List of Compound IDs (CIDs) that showed activity
"""
url = f"{BASE_URL}/assay/aid/{aid}/cids/JSON?cids_type=active"
response = rate_limited_request(url)
if response and response.status_code == 200:
data = response.json()
cids = data.get('IdentifierList', {}).get('CID', [])
return cids[:max_results]
return []
def get_compound_annotations(cid: int, section: Optional[str] = None) -> Optional[Dict]:
"""
Get comprehensive compound annotations from PUG-View.
Args:
cid: PubChem Compound ID
section: Specific section to retrieve (e.g., 'Pharmacology and Biochemistry')
Returns:
Dictionary containing annotation data
"""
url = f"{PUG_VIEW_URL}/data/compound/{cid}/JSON"
if section:
url += f"?heading={section}"
response = rate_limited_request(url)
if response and response.status_code == 200:
return response.json()
return None
def get_drug_information(cid: int) -> Optional[Dict]:
"""
Get drug and medication information for a compound.
Args:
cid: PubChem Compound ID
Returns:
Dictionary containing drug information
"""
return get_compound_annotations(cid, section="Drug and Medication Information")
def get_safety_hazards(cid: int) -> Optional[Dict]:
"""
Get safety and hazard information for a compound.
Args:
cid: PubChem Compound ID
Returns:
Dictionary containing safety information
"""
return get_compound_annotations(cid, section="Safety and Hazards")
def summarize_bioactivities(cid: int) -> Dict:
"""
Generate a summary of bioactivity data for a compound.
Args:
cid: PubChem Compound ID
Returns:
Dictionary with bioactivity summary statistics
"""
activities = get_compound_bioactivities(cid)
summary = {
'total_assays': len(activities),
'active': 0,
'inactive': 0,
'inconclusive': 0,
'unspecified': 0,
'assay_types': {}
}
for activity in activities:
outcome = activity.get('Activity Outcome', '').lower()
if 'active' in outcome:
summary['active'] += 1
elif 'inactive' in outcome:
summary['inactive'] += 1
elif 'inconclusive' in outcome:
summary['inconclusive'] += 1
else:
summary['unspecified'] += 1
return summary
def find_compounds_by_bioactivity(
target: str,
threshold: Optional[float] = None,
max_compounds: int = 100
) -> List[Dict]:
"""
Find compounds with bioactivity against a specific target.
Args:
target: Target name (e.g., 'EGFR')
threshold: Activity threshold (if applicable)
max_compounds: Maximum number of compounds to return
Returns:
List of dictionaries with compound information and activity data
"""
# Step 1: Find assays for the target
assay_ids = search_assays_by_target(target, max_results=10)
if not assay_ids:
print(f"No assays found for target: {target}")
return []
# Step 2: Get active compounds from these assays
compound_set = set()
compound_data = []
for aid in assay_ids[:5]: # Limit to first 5 assays
active_cids = get_active_compounds_in_assay(aid, max_results=max_compounds)
for cid in active_cids:
if cid not in compound_set and len(compound_data) < max_compounds:
compound_set.add(cid)
compound_data.append({
'cid': cid,
'aid': aid,
'target': target
})
if len(compound_data) >= max_compounds:
break
return compound_data
def main():
"""Example usage of bioactivity query functions."""
# Example 1: Get bioassay summary for aspirin (CID 2244)
print("Example 1: Getting bioassay summary for aspirin (CID 2244)...")
summary = summarize_bioactivities(2244)
print(json.dumps(summary, indent=2))
# Example 2: Get active bioactivities for a compound
print("\nExample 2: Getting active bioactivities for aspirin...")
activities = get_compound_bioactivities(2244, activity_outcome='active')
print(f"Found {len(activities)} active bioactivities")
if activities:
print(f"First activity: {activities[0].get('Assay Name', 'N/A')}")
# Example 3: Get assay information
print("\nExample 3: Getting assay description...")
if activities:
aid = activities[0].get('AID', 0)
targets = get_assay_targets(aid)
print(f"Assay {aid} targets: {', '.join(targets) if targets else 'N/A'}")
# Example 4: Search for compounds targeting EGFR
print("\nExample 4: Searching for EGFR inhibitors...")
egfr_compounds = find_compounds_by_bioactivity('EGFR', max_compounds=5)
print(f"Found {len(egfr_compounds)} compounds with EGFR activity")
for comp in egfr_compounds[:5]:
print(f" CID {comp['cid']} (from AID {comp['aid']})")
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pubchem-database/scripts/bioactivity_query.py",
"license": "MIT License",
"lines": 277,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pubchem-database/scripts/compound_search.py | #!/usr/bin/env python3
"""
PubChem Compound Search Utility
This script provides functions for searching and retrieving compound information
from PubChem using the PubChemPy library.
"""
import sys
import json
from typing import List, Dict, Optional, Union
try:
import pubchempy as pcp
except ImportError:
print("Error: pubchempy is not installed. Install it with: pip install pubchempy")
sys.exit(1)
def search_by_name(name: str, max_results: int = 10) -> List[pcp.Compound]:
"""
Search for compounds by name.
Args:
name: Chemical name to search for
max_results: Maximum number of results to return
Returns:
List of Compound objects
"""
try:
compounds = pcp.get_compounds(name, 'name')
return compounds[:max_results]
except Exception as e:
print(f"Error searching for '{name}': {e}")
return []
def search_by_smiles(smiles: str) -> Optional[pcp.Compound]:
"""
Search for a compound by SMILES string.
Args:
smiles: SMILES string
Returns:
Compound object or None if not found
"""
try:
compounds = pcp.get_compounds(smiles, 'smiles')
return compounds[0] if compounds else None
except Exception as e:
print(f"Error searching for SMILES '{smiles}': {e}")
return None
def get_compound_by_cid(cid: int) -> Optional[pcp.Compound]:
"""
Retrieve a compound by its CID (Compound ID).
Args:
cid: PubChem Compound ID
Returns:
Compound object or None if not found
"""
try:
return pcp.Compound.from_cid(cid)
except Exception as e:
print(f"Error retrieving CID {cid}: {e}")
return None
def get_compound_properties(
identifier: Union[str, int],
namespace: str = 'name',
properties: Optional[List[str]] = None
) -> Dict:
"""
Get specific properties for a compound.
Args:
identifier: Compound identifier (name, SMILES, CID, etc.)
namespace: Type of identifier ('name', 'smiles', 'cid', 'inchi', etc.)
properties: List of properties to retrieve. If None, returns common properties.
Returns:
Dictionary of properties
"""
if properties is None:
properties = [
'MolecularFormula',
'MolecularWeight',
'CanonicalSMILES',
'IUPACName',
'XLogP',
'TPSA',
'HBondDonorCount',
'HBondAcceptorCount'
]
try:
result = pcp.get_properties(properties, identifier, namespace)
return result[0] if result else {}
except Exception as e:
print(f"Error getting properties for '{identifier}': {e}")
return {}
def similarity_search(
smiles: str,
threshold: int = 90,
max_records: int = 10
) -> List[pcp.Compound]:
"""
Perform similarity search for compounds similar to the query structure.
Args:
smiles: Query SMILES string
threshold: Similarity threshold (0-100)
max_records: Maximum number of results
Returns:
List of similar Compound objects
"""
try:
compounds = pcp.get_compounds(
smiles,
'smiles',
searchtype='similarity',
Threshold=threshold,
MaxRecords=max_records
)
return compounds
except Exception as e:
print(f"Error in similarity search: {e}")
return []
def substructure_search(
smiles: str,
max_records: int = 100
) -> List[pcp.Compound]:
"""
Perform substructure search for compounds containing the query structure.
Args:
smiles: Query SMILES string (substructure)
max_records: Maximum number of results
Returns:
List of Compound objects containing the substructure
"""
try:
compounds = pcp.get_compounds(
smiles,
'smiles',
searchtype='substructure',
MaxRecords=max_records
)
return compounds
except Exception as e:
print(f"Error in substructure search: {e}")
return []
def get_synonyms(identifier: Union[str, int], namespace: str = 'name') -> List[str]:
"""
Get all synonyms for a compound.
Args:
identifier: Compound identifier
namespace: Type of identifier
Returns:
List of synonym strings
"""
try:
results = pcp.get_synonyms(identifier, namespace)
if results:
return results[0].get('Synonym', [])
return []
except Exception as e:
print(f"Error getting synonyms: {e}")
return []
def batch_search(
identifiers: List[str],
namespace: str = 'name',
properties: Optional[List[str]] = None
) -> List[Dict]:
"""
Batch search for multiple compounds.
Args:
identifiers: List of compound identifiers
namespace: Type of identifiers
properties: List of properties to retrieve
Returns:
List of dictionaries containing properties for each compound
"""
results = []
for identifier in identifiers:
props = get_compound_properties(identifier, namespace, properties)
if props:
props['query'] = identifier
results.append(props)
return results
def download_structure(
identifier: Union[str, int],
namespace: str = 'name',
format: str = 'SDF',
filename: Optional[str] = None
) -> Optional[str]:
"""
Download compound structure in specified format.
Args:
identifier: Compound identifier
namespace: Type of identifier
format: Output format ('SDF', 'JSON', 'PNG', etc.)
filename: Output filename (if None, returns data as string)
Returns:
Data string if filename is None, else None
"""
try:
if filename:
pcp.download(format, identifier, namespace, filename, overwrite=True)
return None
else:
return pcp.download(format, identifier, namespace)
except Exception as e:
print(f"Error downloading structure: {e}")
return None
def print_compound_info(compound: pcp.Compound) -> None:
"""
Print formatted compound information.
Args:
compound: PubChemPy Compound object
"""
print(f"\n{'='*60}")
print(f"Compound CID: {compound.cid}")
print(f"{'='*60}")
print(f"IUPAC Name: {compound.iupac_name or 'N/A'}")
print(f"Molecular Formula: {compound.molecular_formula or 'N/A'}")
print(f"Molecular Weight: {compound.molecular_weight or 'N/A'} g/mol")
print(f"Canonical SMILES: {compound.canonical_smiles or 'N/A'}")
print(f"InChI: {compound.inchi or 'N/A'}")
print(f"InChI Key: {compound.inchikey or 'N/A'}")
print(f"XLogP: {compound.xlogp or 'N/A'}")
print(f"TPSA: {compound.tpsa or 'N/A'} Ε²")
print(f"H-Bond Donors: {compound.h_bond_donor_count or 'N/A'}")
print(f"H-Bond Acceptors: {compound.h_bond_acceptor_count or 'N/A'}")
print(f"{'='*60}\n")
def main():
"""Example usage of PubChem search functions."""
# Example 1: Search by name
print("Example 1: Searching for 'aspirin'...")
compounds = search_by_name('aspirin', max_results=1)
if compounds:
print_compound_info(compounds[0])
# Example 2: Get properties
print("\nExample 2: Getting properties for caffeine...")
props = get_compound_properties('caffeine', 'name')
print(json.dumps(props, indent=2))
# Example 3: Similarity search
print("\nExample 3: Finding compounds similar to benzene...")
benzene_smiles = 'c1ccccc1'
similar = similarity_search(benzene_smiles, threshold=95, max_records=5)
print(f"Found {len(similar)} similar compounds:")
for comp in similar:
print(f" CID {comp.cid}: {comp.iupac_name or 'N/A'}")
# Example 4: Batch search
print("\nExample 4: Batch search for multiple compounds...")
names = ['aspirin', 'ibuprofen', 'paracetamol']
results = batch_search(names, properties=['MolecularFormula', 'MolecularWeight'])
for result in results:
print(f" {result.get('query')}: {result.get('MolecularFormula')} "
f"({result.get('MolecularWeight')} g/mol)")
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pubchem-database/scripts/compound_search.py",
"license": "MIT License",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pufferlib/scripts/env_template.py | #!/usr/bin/env python3
"""
PufferLib Environment Template
This template provides a starting point for creating custom PufferEnv environments.
Customize the observation space, action space, and environment logic for your task.
"""
import numpy as np
import pufferlib
from pufferlib import PufferEnv
class MyEnvironment(PufferEnv):
"""
Custom PufferLib environment template.
This is a simple grid world example. Customize it for your specific task.
"""
def __init__(self, buf=None, grid_size=10, max_steps=1000):
"""
Initialize environment.
Args:
buf: Shared memory buffer (managed by PufferLib)
grid_size: Size of the grid world
max_steps: Maximum steps per episode
"""
super().__init__(buf)
self.grid_size = grid_size
self.max_steps = max_steps
# Define observation space
# Option 1: Flat vector observation
self.observation_space = self.make_space((4,)) # [x, y, goal_x, goal_y]
# Option 2: Dict observation with multiple components
# self.observation_space = self.make_space({
# 'position': (2,),
# 'goal': (2,),
# 'grid': (grid_size, grid_size)
# })
# Option 3: Image observation
# self.observation_space = self.make_space((grid_size, grid_size, 3))
# Define action space
# Option 1: Discrete actions
self.action_space = self.make_discrete(4) # 0: up, 1: right, 2: down, 3: left
# Option 2: Continuous actions
# self.action_space = self.make_space((2,)) # [dx, dy]
# Option 3: Multi-discrete actions
# self.action_space = self.make_multi_discrete([3, 3]) # Two 3-way choices
# Initialize state
self.agent_pos = None
self.goal_pos = None
self.step_count = 0
self.reset()
def reset(self):
"""
Reset environment to initial state.
Returns:
observation: Initial observation
"""
# Reset state
self.agent_pos = np.array([0, 0], dtype=np.float32)
self.goal_pos = np.array([self.grid_size - 1, self.grid_size - 1], dtype=np.float32)
self.step_count = 0
# Return initial observation
return self._get_observation()
def step(self, action):
"""
Execute one environment step.
Args:
action: Action to take
Returns:
observation: New observation
reward: Reward for this step
done: Whether episode is complete
info: Additional information
"""
self.step_count += 1
# Execute action
self._apply_action(action)
# Compute reward
reward = self._compute_reward()
# Check if episode is done
done = self._is_done()
# Get new observation
observation = self._get_observation()
# Additional info
info = {}
if done:
# Include episode statistics when episode ends
info['episode'] = {
'r': reward,
'l': self.step_count
}
return observation, reward, done, info
def _apply_action(self, action):
"""Apply action to update environment state."""
# Discrete actions: 0=up, 1=right, 2=down, 3=left
if action == 0: # Up
self.agent_pos[1] = min(self.agent_pos[1] + 1, self.grid_size - 1)
elif action == 1: # Right
self.agent_pos[0] = min(self.agent_pos[0] + 1, self.grid_size - 1)
elif action == 2: # Down
self.agent_pos[1] = max(self.agent_pos[1] - 1, 0)
elif action == 3: # Left
self.agent_pos[0] = max(self.agent_pos[0] - 1, 0)
def _compute_reward(self):
"""Compute reward for current state."""
# Distance to goal
distance = np.linalg.norm(self.agent_pos - self.goal_pos)
# Reward shaping: negative distance + bonus for reaching goal
reward = -distance / self.grid_size
# Goal reached
if distance < 0.5:
reward += 10.0
return reward
def _is_done(self):
"""Check if episode is complete."""
# Episode ends if goal reached or max steps exceeded
distance = np.linalg.norm(self.agent_pos - self.goal_pos)
goal_reached = distance < 0.5
timeout = self.step_count >= self.max_steps
return goal_reached or timeout
def _get_observation(self):
"""Generate observation from current state."""
# Return flat vector observation
observation = np.concatenate([
self.agent_pos,
self.goal_pos
]).astype(np.float32)
return observation
class MultiAgentEnvironment(PufferEnv):
"""
Multi-agent environment template.
Example: Cooperative navigation task where agents must reach individual goals.
"""
def __init__(self, buf=None, num_agents=4, grid_size=10, max_steps=1000):
super().__init__(buf)
self.num_agents = num_agents
self.grid_size = grid_size
self.max_steps = max_steps
# Per-agent observation space
self.single_observation_space = self.make_space({
'position': (2,),
'goal': (2,),
'others': (2 * (num_agents - 1),) # Positions of other agents
})
# Per-agent action space
self.single_action_space = self.make_discrete(5) # 4 directions + stay
# Initialize state
self.agent_positions = None
self.goal_positions = None
self.step_count = 0
self.reset()
def reset(self):
"""Reset all agents."""
# Random initial positions
self.agent_positions = np.random.rand(self.num_agents, 2) * self.grid_size
# Random goal positions
self.goal_positions = np.random.rand(self.num_agents, 2) * self.grid_size
self.step_count = 0
# Return observations for all agents
return {
f'agent_{i}': self._get_obs(i)
for i in range(self.num_agents)
}
def step(self, actions):
"""
Step all agents.
Args:
actions: Dict of {agent_id: action}
Returns:
observations: Dict of {agent_id: observation}
rewards: Dict of {agent_id: reward}
dones: Dict of {agent_id: done}
infos: Dict of {agent_id: info}
"""
self.step_count += 1
observations = {}
rewards = {}
dones = {}
infos = {}
# Update all agents
for agent_id, action in actions.items():
agent_idx = int(agent_id.split('_')[1])
# Apply action
self._apply_action(agent_idx, action)
# Generate outputs
observations[agent_id] = self._get_obs(agent_idx)
rewards[agent_id] = self._compute_reward(agent_idx)
dones[agent_id] = self._is_done(agent_idx)
infos[agent_id] = {}
# Global done condition
dones['__all__'] = all(dones.values()) or self.step_count >= self.max_steps
return observations, rewards, dones, infos
def _apply_action(self, agent_idx, action):
"""Apply action for specific agent."""
if action == 0: # Up
self.agent_positions[agent_idx, 1] += 1
elif action == 1: # Right
self.agent_positions[agent_idx, 0] += 1
elif action == 2: # Down
self.agent_positions[agent_idx, 1] -= 1
elif action == 3: # Left
self.agent_positions[agent_idx, 0] -= 1
# action == 4: Stay
# Clip to grid bounds
self.agent_positions[agent_idx] = np.clip(
self.agent_positions[agent_idx],
0,
self.grid_size - 1
)
def _compute_reward(self, agent_idx):
"""Compute reward for specific agent."""
distance = np.linalg.norm(
self.agent_positions[agent_idx] - self.goal_positions[agent_idx]
)
return -distance / self.grid_size
def _is_done(self, agent_idx):
"""Check if specific agent is done."""
distance = np.linalg.norm(
self.agent_positions[agent_idx] - self.goal_positions[agent_idx]
)
return distance < 0.5
def _get_obs(self, agent_idx):
"""Get observation for specific agent."""
# Get positions of other agents
other_positions = np.concatenate([
self.agent_positions[i]
for i in range(self.num_agents)
if i != agent_idx
])
return {
'position': self.agent_positions[agent_idx].astype(np.float32),
'goal': self.goal_positions[agent_idx].astype(np.float32),
'others': other_positions.astype(np.float32)
}
def test_environment():
"""Test environment to verify it works correctly."""
print("Testing single-agent environment...")
env = MyEnvironment()
obs = env.reset()
print(f"Initial observation shape: {obs.shape}")
for step in range(10):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
print(f"Step {step}: reward={reward:.3f}, done={done}")
if done:
obs = env.reset()
print("Episode finished, resetting...")
print("\nTesting multi-agent environment...")
multi_env = MultiAgentEnvironment(num_agents=4)
obs = multi_env.reset()
print(f"Number of agents: {len(obs)}")
for step in range(10):
actions = {
agent_id: multi_env.single_action_space.sample()
for agent_id in obs.keys()
}
obs, rewards, dones, infos = multi_env.step(actions)
print(f"Step {step}: mean_reward={np.mean(list(rewards.values())):.3f}")
if dones.get('__all__', False):
obs = multi_env.reset()
print("Episode finished, resetting...")
print("\nβ Environment tests passed!")
if __name__ == '__main__':
test_environment()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pufferlib/scripts/env_template.py",
"license": "MIT License",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pufferlib/scripts/train_template.py | #!/usr/bin/env python3
"""
PufferLib Training Template
This template provides a complete training script for reinforcement learning
with PufferLib. Customize the environment, policy, and training configuration
as needed for your use case.
"""
import argparse
import torch
import torch.nn as nn
import pufferlib
from pufferlib import PuffeRL
from pufferlib.pytorch import layer_init
class Policy(nn.Module):
"""Example policy network."""
def __init__(self, observation_space, action_space, hidden_size=256):
super().__init__()
self.observation_space = observation_space
self.action_space = action_space
# Encoder network
self.encoder = nn.Sequential(
layer_init(nn.Linear(observation_space.shape[0], hidden_size)),
nn.ReLU(),
layer_init(nn.Linear(hidden_size, hidden_size)),
nn.ReLU()
)
# Policy head (actor)
self.actor = layer_init(nn.Linear(hidden_size, action_space.n), std=0.01)
# Value head (critic)
self.critic = layer_init(nn.Linear(hidden_size, 1), std=1.0)
def forward(self, observations):
"""Forward pass through policy."""
features = self.encoder(observations)
logits = self.actor(features)
value = self.critic(features)
return logits, value
def make_env():
"""Create environment. Customize this for your task."""
# Option 1: Use Ocean environment
return pufferlib.make('procgen-coinrun', num_envs=256)
# Option 2: Use Gymnasium environment
# return pufferlib.make('gym-CartPole-v1', num_envs=256)
# Option 3: Use custom environment
# from my_envs import MyEnvironment
# return pufferlib.emulate(MyEnvironment, num_envs=256)
def create_policy(env):
"""Create policy network."""
return Policy(
observation_space=env.observation_space,
action_space=env.action_space,
hidden_size=256
)
def train(args):
"""Main training function."""
# Set random seeds
torch.manual_seed(args.seed)
# Create environment
print(f"Creating environment with {args.num_envs} parallel environments...")
env = pufferlib.make(
args.env_name,
num_envs=args.num_envs,
num_workers=args.num_workers
)
# Create policy
print("Initializing policy...")
policy = create_policy(env)
if args.device == 'cuda' and torch.cuda.is_available():
policy = policy.cuda()
print(f"Using GPU: {torch.cuda.get_device_name(0)}")
else:
print("Using CPU")
# Create logger
if args.logger == 'wandb':
from pufferlib import WandbLogger
logger = WandbLogger(
project=args.project,
name=args.exp_name,
config=vars(args)
)
elif args.logger == 'neptune':
from pufferlib import NeptuneLogger
logger = NeptuneLogger(
project=args.project,
name=args.exp_name,
api_token=args.neptune_token
)
else:
from pufferlib import NoLogger
logger = NoLogger()
# Create trainer
print("Creating trainer...")
trainer = PuffeRL(
env=env,
policy=policy,
device=args.device,
learning_rate=args.learning_rate,
batch_size=args.batch_size,
n_epochs=args.n_epochs,
gamma=args.gamma,
gae_lambda=args.gae_lambda,
clip_coef=args.clip_coef,
ent_coef=args.ent_coef,
vf_coef=args.vf_coef,
max_grad_norm=args.max_grad_norm,
logger=logger,
compile=args.compile
)
# Training loop
print(f"Starting training for {args.num_iterations} iterations...")
for iteration in range(1, args.num_iterations + 1):
# Collect rollouts
rollout_data = trainer.evaluate()
# Train on batch
train_metrics = trainer.train()
# Log results
trainer.mean_and_log()
# Save checkpoint
if iteration % args.save_freq == 0:
checkpoint_path = f"{args.checkpoint_dir}/checkpoint_{iteration}.pt"
trainer.save_checkpoint(checkpoint_path)
print(f"Saved checkpoint to {checkpoint_path}")
# Print progress
if iteration % args.log_freq == 0:
mean_reward = rollout_data.get('mean_reward', 0)
sps = rollout_data.get('sps', 0)
print(f"Iteration {iteration}/{args.num_iterations} | "
f"Mean Reward: {mean_reward:.2f} | "
f"SPS: {sps:,.0f}")
print("Training complete!")
# Save final model
final_path = f"{args.checkpoint_dir}/final_model.pt"
trainer.save_checkpoint(final_path)
print(f"Saved final model to {final_path}")
def main():
parser = argparse.ArgumentParser(description='PufferLib Training')
# Environment
parser.add_argument('--env-name', type=str, default='procgen-coinrun',
help='Environment name')
parser.add_argument('--num-envs', type=int, default=256,
help='Number of parallel environments')
parser.add_argument('--num-workers', type=int, default=8,
help='Number of vectorization workers')
# Training
parser.add_argument('--num-iterations', type=int, default=10000,
help='Number of training iterations')
parser.add_argument('--learning-rate', type=float, default=3e-4,
help='Learning rate')
parser.add_argument('--batch-size', type=int, default=32768,
help='Batch size for training')
parser.add_argument('--n-epochs', type=int, default=4,
help='Number of training epochs per batch')
parser.add_argument('--device', type=str, default='cuda',
choices=['cuda', 'cpu'], help='Device to use')
# PPO Parameters
parser.add_argument('--gamma', type=float, default=0.99,
help='Discount factor')
parser.add_argument('--gae-lambda', type=float, default=0.95,
help='GAE lambda')
parser.add_argument('--clip-coef', type=float, default=0.2,
help='PPO clipping coefficient')
parser.add_argument('--ent-coef', type=float, default=0.01,
help='Entropy coefficient')
parser.add_argument('--vf-coef', type=float, default=0.5,
help='Value function coefficient')
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='Maximum gradient norm')
# Logging
parser.add_argument('--logger', type=str, default='none',
choices=['wandb', 'neptune', 'none'],
help='Logger to use')
parser.add_argument('--project', type=str, default='pufferlib-training',
help='Project name for logging')
parser.add_argument('--exp-name', type=str, default='experiment',
help='Experiment name')
parser.add_argument('--neptune-token', type=str, default=None,
help='Neptune API token')
parser.add_argument('--log-freq', type=int, default=10,
help='Logging frequency (iterations)')
# Checkpointing
parser.add_argument('--checkpoint-dir', type=str, default='checkpoints',
help='Directory to save checkpoints')
parser.add_argument('--save-freq', type=int, default=100,
help='Checkpoint save frequency (iterations)')
# Misc
parser.add_argument('--seed', type=int, default=42,
help='Random seed')
parser.add_argument('--compile', action='store_true',
help='Use torch.compile for faster training')
args = parser.parse_args()
# Create checkpoint directory
import os
os.makedirs(args.checkpoint_dir, exist_ok=True)
# Run training
train(args)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pufferlib/scripts/train_template.py",
"license": "MIT License",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pydeseq2/scripts/run_deseq2_analysis.py | #!/usr/bin/env python3
"""
PyDESeq2 Analysis Script
This script performs a complete differential expression analysis using PyDESeq2.
It can be used as a template for standard RNA-seq DEA workflows.
Usage:
python run_deseq2_analysis.py --counts counts.csv --metadata metadata.csv \
--design "~condition" --contrast condition treated control \
--output results/
Requirements:
- pydeseq2
- pandas
- matplotlib (optional, for plots)
"""
import argparse
import os
import pickle
import sys
from pathlib import Path
import pandas as pd
try:
from pydeseq2.dds import DeseqDataSet
from pydeseq2.ds import DeseqStats
except ImportError:
print("Error: pydeseq2 not installed. Install with: pip install pydeseq2")
sys.exit(1)
def load_and_validate_data(counts_path, metadata_path, transpose_counts=True):
"""Load count matrix and metadata, perform basic validation."""
print(f"Loading count data from {counts_path}...")
counts_df = pd.read_csv(counts_path, index_col=0)
if transpose_counts:
print("Transposing count matrix to samples Γ genes format...")
counts_df = counts_df.T
print(f"Loading metadata from {metadata_path}...")
metadata = pd.read_csv(metadata_path, index_col=0)
print(f"\nData loaded:")
print(f" Counts shape: {counts_df.shape} (samples Γ genes)")
print(f" Metadata shape: {metadata.shape} (samples Γ variables)")
# Validate
if not all(counts_df.index == metadata.index):
print("\nWarning: Sample indices don't match perfectly. Taking intersection...")
common_samples = counts_df.index.intersection(metadata.index)
counts_df = counts_df.loc[common_samples]
metadata = metadata.loc[common_samples]
print(f" Using {len(common_samples)} common samples")
# Check for negative or non-integer values
if (counts_df < 0).any().any():
raise ValueError("Count matrix contains negative values")
return counts_df, metadata
def filter_data(counts_df, metadata, min_counts=10, condition_col=None):
"""Filter low-count genes and samples with missing data."""
print(f"\nFiltering data...")
initial_genes = counts_df.shape[1]
initial_samples = counts_df.shape[0]
# Filter genes
genes_to_keep = counts_df.columns[counts_df.sum(axis=0) >= min_counts]
counts_df = counts_df[genes_to_keep]
genes_removed = initial_genes - counts_df.shape[1]
print(f" Removed {genes_removed} genes with < {min_counts} total counts")
# Filter samples with missing condition data
if condition_col and condition_col in metadata.columns:
samples_to_keep = ~metadata[condition_col].isna()
counts_df = counts_df.loc[samples_to_keep]
metadata = metadata.loc[samples_to_keep]
samples_removed = initial_samples - counts_df.shape[0]
if samples_removed > 0:
print(f" Removed {samples_removed} samples with missing '{condition_col}' data")
print(f" Final data shape: {counts_df.shape[0]} samples Γ {counts_df.shape[1]} genes")
return counts_df, metadata
def run_deseq2(counts_df, metadata, design, n_cpus=1):
"""Run DESeq2 normalization and fitting."""
print(f"\nInitializing DeseqDataSet with design: {design}")
dds = DeseqDataSet(
counts=counts_df,
metadata=metadata,
design=design,
refit_cooks=True,
n_cpus=n_cpus,
quiet=False
)
print("\nRunning DESeq2 pipeline...")
print(" Step 1/7: Computing size factors...")
print(" Step 2/7: Fitting genewise dispersions...")
print(" Step 3/7: Fitting dispersion trend curve...")
print(" Step 4/7: Computing dispersion priors...")
print(" Step 5/7: Fitting MAP dispersions...")
print(" Step 6/7: Fitting log fold changes...")
print(" Step 7/7: Calculating Cook's distances...")
dds.deseq2()
print("\nβ DESeq2 fitting complete")
return dds
def run_statistical_tests(dds, contrast, alpha=0.05, shrink_lfc=True):
"""Perform Wald tests and compute p-values."""
print(f"\nPerforming statistical tests...")
print(f" Contrast: {contrast}")
print(f" Significance threshold: {alpha}")
ds = DeseqStats(
dds,
contrast=contrast,
alpha=alpha,
cooks_filter=True,
independent_filter=True,
quiet=False
)
print("\n Running Wald tests...")
print(" Filtering outliers based on Cook's distance...")
print(" Applying independent filtering...")
print(" Adjusting p-values (Benjamini-Hochberg)...")
ds.summary()
print("\nβ Statistical testing complete")
# Optional LFC shrinkage
if shrink_lfc:
print("\nApplying LFC shrinkage for visualization...")
ds.lfc_shrink()
print("β LFC shrinkage complete")
return ds
def save_results(ds, dds, output_dir, shrink_lfc=True):
"""Save results and intermediate objects."""
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
print(f"\nSaving results to {output_dir}/")
# Save statistical results
results_path = output_dir / "deseq2_results.csv"
ds.results_df.to_csv(results_path)
print(f" Saved: {results_path}")
# Save significant genes
significant = ds.results_df[ds.results_df.padj < 0.05]
sig_path = output_dir / "significant_genes.csv"
significant.to_csv(sig_path)
print(f" Saved: {sig_path} ({len(significant)} significant genes)")
# Save sorted results
sorted_results = ds.results_df.sort_values("padj")
sorted_path = output_dir / "results_sorted_by_padj.csv"
sorted_results.to_csv(sorted_path)
print(f" Saved: {sorted_path}")
# Save DeseqDataSet as pickle
dds_path = output_dir / "deseq_dataset.pkl"
with open(dds_path, "wb") as f:
pickle.dump(dds.to_picklable_anndata(), f)
print(f" Saved: {dds_path}")
# Print summary
print(f"\n{'='*60}")
print("ANALYSIS SUMMARY")
print(f"{'='*60}")
print(f"Total genes tested: {len(ds.results_df)}")
print(f"Significant genes (padj < 0.05): {len(significant)}")
print(f"Upregulated: {len(significant[significant.log2FoldChange > 0])}")
print(f"Downregulated: {len(significant[significant.log2FoldChange < 0])}")
print(f"{'='*60}")
# Show top genes
print("\nTop 10 most significant genes:")
print(sorted_results.head(10)[["baseMean", "log2FoldChange", "pvalue", "padj"]])
return results_path
def create_plots(ds, output_dir):
"""Create basic visualization plots."""
try:
import matplotlib.pyplot as plt
import numpy as np
except ImportError:
print("\nNote: matplotlib not installed. Skipping plot generation.")
return
output_dir = Path(output_dir)
results = ds.results_df.copy()
print("\nGenerating plots...")
# Volcano plot
results["-log10(padj)"] = -np.log10(results.padj.fillna(1))
plt.figure(figsize=(10, 6))
significant = results.padj < 0.05
plt.scatter(
results.loc[~significant, "log2FoldChange"],
results.loc[~significant, "-log10(padj)"],
alpha=0.3, s=10, c='gray', label='Not significant'
)
plt.scatter(
results.loc[significant, "log2FoldChange"],
results.loc[significant, "-log10(padj)"],
alpha=0.6, s=10, c='red', label='Significant (padj < 0.05)'
)
plt.axhline(-np.log10(0.05), color='blue', linestyle='--', linewidth=1, alpha=0.5)
plt.axvline(1, color='gray', linestyle='--', linewidth=1, alpha=0.5)
plt.axvline(-1, color='gray', linestyle='--', linewidth=1, alpha=0.5)
plt.xlabel("Log2 Fold Change", fontsize=12)
plt.ylabel("-Log10(Adjusted P-value)", fontsize=12)
plt.title("Volcano Plot", fontsize=14, fontweight='bold')
plt.legend()
plt.tight_layout()
volcano_path = output_dir / "volcano_plot.png"
plt.savefig(volcano_path, dpi=300)
plt.close()
print(f" Saved: {volcano_path}")
# MA plot
plt.figure(figsize=(10, 6))
plt.scatter(
np.log10(results.loc[~significant, "baseMean"] + 1),
results.loc[~significant, "log2FoldChange"],
alpha=0.3, s=10, c='gray', label='Not significant'
)
plt.scatter(
np.log10(results.loc[significant, "baseMean"] + 1),
results.loc[significant, "log2FoldChange"],
alpha=0.6, s=10, c='red', label='Significant (padj < 0.05)'
)
plt.axhline(0, color='blue', linestyle='--', linewidth=1, alpha=0.5)
plt.xlabel("Log10(Base Mean + 1)", fontsize=12)
plt.ylabel("Log2 Fold Change", fontsize=12)
plt.title("MA Plot", fontsize=14, fontweight='bold')
plt.legend()
plt.tight_layout()
ma_path = output_dir / "ma_plot.png"
plt.savefig(ma_path, dpi=300)
plt.close()
print(f" Saved: {ma_path}")
def main():
parser = argparse.ArgumentParser(
description="Run PyDESeq2 differential expression analysis",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Basic analysis
python run_deseq2_analysis.py \\
--counts counts.csv \\
--metadata metadata.csv \\
--design "~condition" \\
--contrast condition treated control \\
--output results/
# Multi-factor analysis
python run_deseq2_analysis.py \\
--counts counts.csv \\
--metadata metadata.csv \\
--design "~batch + condition" \\
--contrast condition treated control \\
--output results/ \\
--n-cpus 4
"""
)
parser.add_argument("--counts", required=True, help="Path to count matrix CSV file")
parser.add_argument("--metadata", required=True, help="Path to metadata CSV file")
parser.add_argument("--design", required=True, help="Design formula (e.g., '~condition')")
parser.add_argument("--contrast", nargs=3, required=True,
metavar=("VARIABLE", "TEST", "REFERENCE"),
help="Contrast specification: variable test_level reference_level")
parser.add_argument("--output", default="results", help="Output directory (default: results)")
parser.add_argument("--min-counts", type=int, default=10,
help="Minimum total counts for gene filtering (default: 10)")
parser.add_argument("--alpha", type=float, default=0.05,
help="Significance threshold (default: 0.05)")
parser.add_argument("--no-transpose", action="store_true",
help="Don't transpose count matrix (use if already samples Γ genes)")
parser.add_argument("--no-shrink", action="store_true",
help="Skip LFC shrinkage")
parser.add_argument("--n-cpus", type=int, default=1,
help="Number of CPUs for parallel processing (default: 1)")
parser.add_argument("--plots", action="store_true",
help="Generate volcano and MA plots")
args = parser.parse_args()
# Load data
counts_df, metadata = load_and_validate_data(
args.counts,
args.metadata,
transpose_counts=not args.no_transpose
)
# Filter data
condition_col = args.contrast[0]
counts_df, metadata = filter_data(
counts_df,
metadata,
min_counts=args.min_counts,
condition_col=condition_col
)
# Run DESeq2
dds = run_deseq2(counts_df, metadata, args.design, n_cpus=args.n_cpus)
# Statistical testing
ds = run_statistical_tests(
dds,
contrast=args.contrast,
alpha=args.alpha,
shrink_lfc=not args.no_shrink
)
# Save results
save_results(ds, dds, args.output, shrink_lfc=not args.no_shrink)
# Create plots if requested
if args.plots:
create_plots(ds, args.output)
print(f"\nβ Analysis complete! Results saved to {args.output}/")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pydeseq2/scripts/run_deseq2_analysis.py",
"license": "MIT License",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pydicom/scripts/anonymize_dicom.py | #!/usr/bin/env python3
"""
Anonymize DICOM files by removing or replacing Protected Health Information (PHI).
Usage:
python anonymize_dicom.py input.dcm output.dcm
python anonymize_dicom.py input.dcm output.dcm --patient-id ANON001
"""
import argparse
import sys
from pathlib import Path
try:
import pydicom
except ImportError:
print("Error: pydicom is not installed. Install it with: pip install pydicom")
sys.exit(1)
# Tags commonly containing PHI (Protected Health Information)
PHI_TAGS = [
'PatientName', 'PatientID', 'PatientBirthDate', 'PatientBirthTime',
'PatientSex', 'PatientAge', 'PatientSize', 'PatientWeight',
'PatientAddress', 'PatientTelephoneNumbers', 'PatientMotherBirthName',
'MilitaryRank', 'EthnicGroup', 'Occupation', 'PatientComments',
'InstitutionName', 'InstitutionAddress', 'InstitutionalDepartmentName',
'ReferringPhysicianName', 'ReferringPhysicianAddress',
'ReferringPhysicianTelephoneNumbers', 'ReferringPhysicianIdentificationSequence',
'PerformingPhysicianName', 'PerformingPhysicianIdentificationSequence',
'OperatorsName', 'PhysiciansOfRecord', 'PhysiciansOfRecordIdentificationSequence',
'NameOfPhysiciansReadingStudy', 'PhysiciansReadingStudyIdentificationSequence',
'StudyDescription', 'SeriesDescription', 'AdmittingDiagnosesDescription',
'DerivationDescription', 'RequestingPhysician', 'RequestingService',
'RequestedProcedureDescription', 'ScheduledPerformingPhysicianName',
'PerformedLocation', 'PerformedStationName',
]
def anonymize_dicom(input_path, output_path, patient_id='ANONYMOUS', patient_name='ANONYMOUS'):
"""
Anonymize a DICOM file by removing or replacing PHI.
Args:
input_path: Path to input DICOM file
output_path: Path to output anonymized DICOM file
patient_id: Replacement patient ID (default: 'ANONYMOUS')
patient_name: Replacement patient name (default: 'ANONYMOUS')
"""
try:
# Read DICOM file
ds = pydicom.dcmread(input_path)
# Track what was anonymized
anonymized = []
# Remove or replace sensitive data
for tag in PHI_TAGS:
if hasattr(ds, tag):
if tag == 'PatientName':
ds.PatientName = patient_name
anonymized.append(f"{tag}: replaced with '{patient_name}'")
elif tag == 'PatientID':
ds.PatientID = patient_id
anonymized.append(f"{tag}: replaced with '{patient_id}'")
elif tag == 'PatientBirthDate':
ds.PatientBirthDate = '19000101'
anonymized.append(f"{tag}: replaced with '19000101'")
else:
delattr(ds, tag)
anonymized.append(f"{tag}: removed")
# Anonymize UIDs if present (optional - maintains referential integrity)
# Uncomment if you want to anonymize UIDs as well
# if hasattr(ds, 'StudyInstanceUID'):
# ds.StudyInstanceUID = pydicom.uid.generate_uid()
# if hasattr(ds, 'SeriesInstanceUID'):
# ds.SeriesInstanceUID = pydicom.uid.generate_uid()
# if hasattr(ds, 'SOPInstanceUID'):
# ds.SOPInstanceUID = pydicom.uid.generate_uid()
# Save anonymized file
ds.save_as(output_path)
return True, anonymized
except Exception as e:
return False, str(e)
def main():
parser = argparse.ArgumentParser(
description='Anonymize DICOM files by removing or replacing PHI',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python anonymize_dicom.py input.dcm output.dcm
python anonymize_dicom.py input.dcm output.dcm --patient-id ANON001
python anonymize_dicom.py input.dcm output.dcm --patient-id ANON001 --patient-name "Anonymous^Patient"
"""
)
parser.add_argument('input', type=str, help='Input DICOM file')
parser.add_argument('output', type=str, help='Output anonymized DICOM file')
parser.add_argument('--patient-id', type=str, default='ANONYMOUS',
help='Replacement patient ID (default: ANONYMOUS)')
parser.add_argument('--patient-name', type=str, default='ANONYMOUS',
help='Replacement patient name (default: ANONYMOUS)')
parser.add_argument('-v', '--verbose', action='store_true',
help='Show detailed anonymization information')
args = parser.parse_args()
# Validate input file exists
input_path = Path(args.input)
if not input_path.exists():
print(f"Error: Input file '{args.input}' not found")
sys.exit(1)
# Anonymize the file
print(f"Anonymizing: {args.input}")
success, result = anonymize_dicom(args.input, args.output,
args.patient_id, args.patient_name)
if success:
print(f"β Successfully anonymized DICOM file: {args.output}")
if args.verbose:
print(f"\nAnonymized {len(result)} fields:")
for item in result:
print(f" - {item}")
else:
print(f"β Error: {result}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pydicom/scripts/anonymize_dicom.py",
"license": "MIT License",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pydicom/scripts/dicom_to_image.py | #!/usr/bin/env python3
"""
Convert DICOM files to common image formats (PNG, JPEG, TIFF).
Usage:
python dicom_to_image.py input.dcm output.png
python dicom_to_image.py input.dcm output.jpg --format JPEG
python dicom_to_image.py input.dcm output.tiff --apply-windowing
"""
import argparse
import sys
from pathlib import Path
try:
import pydicom
import numpy as np
from PIL import Image
except ImportError as e:
print(f"Error: Required package not installed: {e}")
print("Install with: pip install pydicom pillow numpy")
sys.exit(1)
def apply_windowing(pixel_array, ds):
"""Apply VOI LUT windowing if available."""
try:
from pydicom.pixel_data_handlers.util import apply_voi_lut
return apply_voi_lut(pixel_array, ds)
except (ImportError, AttributeError):
return pixel_array
def normalize_to_uint8(pixel_array):
"""Normalize pixel array to uint8 (0-255) range."""
if pixel_array.dtype == np.uint8:
return pixel_array
# Normalize to 0-1 range
pix_min = pixel_array.min()
pix_max = pixel_array.max()
if pix_max > pix_min:
normalized = (pixel_array - pix_min) / (pix_max - pix_min)
else:
normalized = np.zeros_like(pixel_array, dtype=float)
# Scale to 0-255
return (normalized * 255).astype(np.uint8)
def convert_dicom_to_image(input_path, output_path, image_format='PNG',
apply_window=False, frame=0):
"""
Convert DICOM file to standard image format.
Args:
input_path: Path to input DICOM file
output_path: Path to output image file
image_format: Output format (PNG, JPEG, TIFF, etc.)
apply_window: Whether to apply VOI LUT windowing
frame: Frame number for multi-frame DICOM files
"""
try:
# Read DICOM file
ds = pydicom.dcmread(input_path)
# Get pixel array
pixel_array = ds.pixel_array
# Handle multi-frame DICOM
if len(pixel_array.shape) == 3 and pixel_array.shape[0] > 1:
if frame >= pixel_array.shape[0]:
return False, f"Frame {frame} out of range (0-{pixel_array.shape[0]-1})"
pixel_array = pixel_array[frame]
print(f"Extracting frame {frame} of {ds.NumberOfFrames}")
# Apply windowing if requested
if apply_window and hasattr(ds, 'WindowCenter'):
pixel_array = apply_windowing(pixel_array, ds)
# Handle color images
if len(pixel_array.shape) == 3 and pixel_array.shape[2] in [3, 4]:
# RGB or RGBA image
if ds.PhotometricInterpretation in ['YBR_FULL', 'YBR_FULL_422']:
# Convert from YBR to RGB
try:
from pydicom.pixel_data_handlers.util import convert_color_space
pixel_array = convert_color_space(pixel_array,
ds.PhotometricInterpretation, 'RGB')
except ImportError:
print("Warning: Could not convert color space, using as-is")
image = Image.fromarray(pixel_array)
else:
# Grayscale image - normalize to uint8
pixel_array = normalize_to_uint8(pixel_array)
image = Image.fromarray(pixel_array, mode='L')
# Save image
image.save(output_path, format=image_format)
return True, {
'shape': ds.pixel_array.shape,
'modality': ds.Modality if hasattr(ds, 'Modality') else 'Unknown',
'bits_allocated': ds.BitsAllocated if hasattr(ds, 'BitsAllocated') else 'Unknown',
}
except Exception as e:
return False, str(e)
def main():
parser = argparse.ArgumentParser(
description='Convert DICOM files to common image formats',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python dicom_to_image.py input.dcm output.png
python dicom_to_image.py input.dcm output.jpg --format JPEG
python dicom_to_image.py input.dcm output.tiff --apply-windowing
python dicom_to_image.py multiframe.dcm frame5.png --frame 5
"""
)
parser.add_argument('input', type=str, help='Input DICOM file')
parser.add_argument('output', type=str, help='Output image file')
parser.add_argument('--format', type=str, choices=['PNG', 'JPEG', 'TIFF', 'BMP'],
help='Output image format (default: inferred from extension)')
parser.add_argument('--apply-windowing', action='store_true',
help='Apply VOI LUT windowing if available')
parser.add_argument('--frame', type=int, default=0,
help='Frame number for multi-frame DICOM files (default: 0)')
parser.add_argument('-v', '--verbose', action='store_true',
help='Show detailed conversion information')
args = parser.parse_args()
# Validate input file exists
input_path = Path(args.input)
if not input_path.exists():
print(f"Error: Input file '{args.input}' not found")
sys.exit(1)
# Determine output format
if args.format:
image_format = args.format
else:
# Infer from extension
ext = Path(args.output).suffix.upper().lstrip('.')
image_format = ext if ext in ['PNG', 'JPEG', 'JPG', 'TIFF', 'BMP'] else 'PNG'
# Convert the file
print(f"Converting: {args.input} -> {args.output}")
success, result = convert_dicom_to_image(args.input, args.output,
image_format, args.apply_windowing,
args.frame)
if success:
print(f"β Successfully converted to {image_format}")
if args.verbose:
print(f"\nImage information:")
print(f" - Shape: {result['shape']}")
print(f" - Modality: {result['modality']}")
print(f" - Bits Allocated: {result['bits_allocated']}")
else:
print(f"β Error: {result}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pydicom/scripts/dicom_to_image.py",
"license": "MIT License",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pydicom/scripts/extract_metadata.py | #!/usr/bin/env python3
"""
Extract and display DICOM metadata in a readable format.
Usage:
python extract_metadata.py file.dcm
python extract_metadata.py file.dcm --output metadata.txt
python extract_metadata.py file.dcm --format json --output metadata.json
"""
import argparse
import sys
import json
from pathlib import Path
try:
import pydicom
except ImportError:
print("Error: pydicom is not installed. Install it with: pip install pydicom")
sys.exit(1)
def format_value(value):
"""Format DICOM values for display."""
if isinstance(value, bytes):
try:
return value.decode('utf-8', errors='ignore')
except:
return str(value)
elif isinstance(value, pydicom.multival.MultiValue):
return ', '.join(str(v) for v in value)
elif isinstance(value, pydicom.sequence.Sequence):
return f"Sequence with {len(value)} item(s)"
else:
return str(value)
def extract_metadata_text(ds, show_sequences=False):
"""Extract metadata as formatted text."""
lines = []
lines.append("=" * 80)
lines.append("DICOM Metadata")
lines.append("=" * 80)
# File Meta Information
if hasattr(ds, 'file_meta'):
lines.append("\n[File Meta Information]")
for elem in ds.file_meta:
lines.append(f"{elem.name:40s} {format_value(elem.value)}")
# Patient Information
lines.append("\n[Patient Information]")
patient_tags = ['PatientName', 'PatientID', 'PatientBirthDate',
'PatientSex', 'PatientAge', 'PatientWeight']
for tag in patient_tags:
if hasattr(ds, tag):
value = getattr(ds, tag)
lines.append(f"{tag:40s} {format_value(value)}")
# Study Information
lines.append("\n[Study Information]")
study_tags = ['StudyInstanceUID', 'StudyDate', 'StudyTime',
'StudyDescription', 'AccessionNumber', 'StudyID']
for tag in study_tags:
if hasattr(ds, tag):
value = getattr(ds, tag)
lines.append(f"{tag:40s} {format_value(value)}")
# Series Information
lines.append("\n[Series Information]")
series_tags = ['SeriesInstanceUID', 'SeriesNumber', 'SeriesDescription',
'Modality', 'SeriesDate', 'SeriesTime']
for tag in series_tags:
if hasattr(ds, tag):
value = getattr(ds, tag)
lines.append(f"{tag:40s} {format_value(value)}")
# Image Information
lines.append("\n[Image Information]")
image_tags = ['SOPInstanceUID', 'InstanceNumber', 'ImageType',
'Rows', 'Columns', 'BitsAllocated', 'BitsStored',
'PhotometricInterpretation', 'SamplesPerPixel',
'PixelSpacing', 'SliceThickness', 'ImagePositionPatient',
'ImageOrientationPatient', 'WindowCenter', 'WindowWidth']
for tag in image_tags:
if hasattr(ds, tag):
value = getattr(ds, tag)
lines.append(f"{tag:40s} {format_value(value)}")
# All other elements
if show_sequences:
lines.append("\n[All Elements]")
for elem in ds:
if elem.VR != 'SQ': # Skip sequences for brevity
lines.append(f"{elem.name:40s} {format_value(elem.value)}")
else:
lines.append(f"{elem.name:40s} {format_value(elem.value)}")
return '\n'.join(lines)
def extract_metadata_json(ds):
"""Extract metadata as JSON."""
metadata = {}
# File Meta Information
if hasattr(ds, 'file_meta'):
metadata['file_meta'] = {}
for elem in ds.file_meta:
metadata['file_meta'][elem.keyword] = format_value(elem.value)
# All data elements (excluding sequences for simplicity)
metadata['dataset'] = {}
for elem in ds:
if elem.VR != 'SQ':
metadata['dataset'][elem.keyword] = format_value(elem.value)
return json.dumps(metadata, indent=2)
def main():
parser = argparse.ArgumentParser(
description='Extract and display DICOM metadata',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python extract_metadata.py file.dcm
python extract_metadata.py file.dcm --output metadata.txt
python extract_metadata.py file.dcm --format json --output metadata.json
python extract_metadata.py file.dcm --show-sequences
"""
)
parser.add_argument('input', type=str, help='Input DICOM file')
parser.add_argument('--output', '-o', type=str, help='Output file (default: print to console)')
parser.add_argument('--format', type=str, choices=['text', 'json'], default='text',
help='Output format (default: text)')
parser.add_argument('--show-sequences', action='store_true',
help='Include all data elements including sequences')
args = parser.parse_args()
# Validate input file exists
input_path = Path(args.input)
if not input_path.exists():
print(f"Error: Input file '{args.input}' not found")
sys.exit(1)
try:
# Read DICOM file
ds = pydicom.dcmread(args.input)
# Extract metadata
if args.format == 'json':
output = extract_metadata_json(ds)
else:
output = extract_metadata_text(ds, args.show_sequences)
# Write or print output
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"β Metadata extracted to: {args.output}")
else:
print(output)
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pydicom/scripts/extract_metadata.py",
"license": "MIT License",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pymatgen/scripts/phase_diagram_generator.py | #!/usr/bin/env python3
"""
Phase diagram generator using Materials Project data.
This script generates phase diagrams for chemical systems using data from the
Materials Project database via pymatgen's MPRester.
Usage:
python phase_diagram_generator.py chemical_system [options]
Examples:
python phase_diagram_generator.py Li-Fe-O
python phase_diagram_generator.py Li-Fe-O --output li_fe_o_pd.png
python phase_diagram_generator.py Fe-O --show
python phase_diagram_generator.py Li-Fe-O --analyze "LiFeO2"
"""
import argparse
import os
import sys
from pathlib import Path
try:
from pymatgen.core import Composition
from pymatgen.analysis.phase_diagram import PhaseDiagram, PDPlotter
except ImportError:
print("Error: pymatgen is not installed. Install with: pip install pymatgen")
sys.exit(1)
try:
from mp_api.client import MPRester
except ImportError:
print("Error: mp-api is not installed. Install with: pip install mp-api")
sys.exit(1)
def get_api_key() -> str:
"""Get Materials Project API key from environment."""
api_key = os.environ.get("MP_API_KEY")
if not api_key:
print("Error: MP_API_KEY environment variable not set.")
print("Get your API key from https://next-gen.materialsproject.org/")
print("Then set it with: export MP_API_KEY='your_key_here'")
sys.exit(1)
return api_key
def generate_phase_diagram(chemsys: str, args):
"""
Generate and analyze phase diagram for a chemical system.
Args:
chemsys: Chemical system (e.g., "Li-Fe-O")
args: Command line arguments
"""
api_key = get_api_key()
print(f"\n{'='*60}")
print(f"PHASE DIAGRAM: {chemsys}")
print(f"{'='*60}\n")
# Get entries from Materials Project
print("Fetching data from Materials Project...")
with MPRester(api_key) as mpr:
entries = mpr.get_entries_in_chemsys(chemsys)
print(f"β Retrieved {len(entries)} entries")
if len(entries) == 0:
print(f"Error: No entries found for chemical system {chemsys}")
sys.exit(1)
# Build phase diagram
print("Building phase diagram...")
pd = PhaseDiagram(entries)
# Get stable entries
stable_entries = pd.stable_entries
print(f"β Phase diagram constructed with {len(stable_entries)} stable phases")
# Print stable phases
print("\n--- STABLE PHASES ---")
for entry in stable_entries:
formula = entry.composition.reduced_formula
energy = entry.energy_per_atom
print(f" {formula:<20} E = {energy:.4f} eV/atom")
# Analyze specific composition if requested
if args.analyze:
print(f"\n--- STABILITY ANALYSIS: {args.analyze} ---")
try:
comp = Composition(args.analyze)
# Find closest entry
closest_entry = None
min_distance = float('inf')
for entry in entries:
if entry.composition.reduced_formula == comp.reduced_formula:
closest_entry = entry
break
if closest_entry:
# Calculate energy above hull
e_above_hull = pd.get_e_above_hull(closest_entry)
print(f"Energy above hull: {e_above_hull:.4f} eV/atom")
if e_above_hull < 0.001:
print(f"Status: STABLE (on convex hull)")
elif e_above_hull < 0.05:
print(f"Status: METASTABLE (nearly stable)")
else:
print(f"Status: UNSTABLE")
# Get decomposition
decomp = pd.get_decomposition(comp)
print(f"\nDecomposes to:")
for entry, fraction in decomp.items():
formula = entry.composition.reduced_formula
print(f" {fraction:.3f} Γ {formula}")
# Get reaction energy
rxn_energy = pd.get_equilibrium_reaction_energy(closest_entry)
print(f"\nDecomposition energy: {rxn_energy:.4f} eV/atom")
else:
print(f"No entry found for composition {args.analyze}")
print("Checking stability of hypothetical composition...")
# Analyze hypothetical composition
decomp = pd.get_decomposition(comp)
print(f"\nWould decompose to:")
for entry, fraction in decomp.items():
formula = entry.composition.reduced_formula
print(f" {fraction:.3f} Γ {formula}")
except Exception as e:
print(f"Error analyzing composition: {e}")
# Get chemical potentials
if args.chemical_potentials:
print("\n--- CHEMICAL POTENTIALS ---")
print("(at stability regions)")
try:
chempots = pd.get_all_chempots()
for element, potentials in chempots.items():
print(f"\n{element}:")
for potential in potentials[:5]: # Show first 5
print(f" {potential:.4f} eV")
except Exception as e:
print(f"Could not calculate chemical potentials: {e}")
# Plot phase diagram
print("\n--- GENERATING PLOT ---")
plotter = PDPlotter(pd, show_unstable=args.show_unstable)
if args.output:
output_path = Path(args.output)
plotter.write_image(str(output_path), image_format=output_path.suffix[1:])
print(f"β Phase diagram saved to {output_path}")
if args.show:
print("Opening interactive plot...")
plotter.show()
print(f"\n{'='*60}\n")
def main():
parser = argparse.ArgumentParser(
description="Generate phase diagrams using Materials Project data",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Requirements:
- Materials Project API key (set MP_API_KEY environment variable)
- mp-api package: pip install mp-api
Examples:
%(prog)s Li-Fe-O
%(prog)s Li-Fe-O --output li_fe_o_phase_diagram.png
%(prog)s Fe-O --show --analyze "Fe2O3"
%(prog)s Li-Fe-O --analyze "LiFeO2" --show-unstable
"""
)
parser.add_argument(
"chemsys",
help="Chemical system (e.g., Li-Fe-O, Fe-O)"
)
parser.add_argument(
"--output", "-o",
help="Output file for phase diagram plot (PNG, PDF, SVG)"
)
parser.add_argument(
"--show", "-s",
action="store_true",
help="Show interactive plot"
)
parser.add_argument(
"--analyze", "-a",
help="Analyze stability of specific composition (e.g., LiFeO2)"
)
parser.add_argument(
"--show-unstable",
action="store_true",
help="Include unstable phases in plot"
)
parser.add_argument(
"--chemical-potentials",
action="store_true",
help="Calculate chemical potentials"
)
args = parser.parse_args()
# Validate chemical system format
elements = args.chemsys.split("-")
if len(elements) < 2:
print("Error: Chemical system must contain at least 2 elements")
print("Example: Li-Fe-O")
sys.exit(1)
# Generate phase diagram
generate_phase_diagram(args.chemsys, args)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pymatgen/scripts/phase_diagram_generator.py",
"license": "MIT License",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pymatgen/scripts/structure_analyzer.py | #!/usr/bin/env python3
"""
Structure analysis tool using pymatgen.
Analyzes crystal structures and provides comprehensive information including:
- Composition and formula
- Space group and symmetry
- Lattice parameters
- Density
- Coordination environment
- Bond lengths and angles
Usage:
python structure_analyzer.py structure_file [options]
Examples:
python structure_analyzer.py POSCAR
python structure_analyzer.py structure.cif --symmetry --neighbors
python structure_analyzer.py POSCAR --export json
"""
import argparse
import json
import sys
from pathlib import Path
try:
from pymatgen.core import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.local_env import CrystalNN
except ImportError:
print("Error: pymatgen is not installed. Install with: pip install pymatgen")
sys.exit(1)
def analyze_structure(struct: Structure, args) -> dict:
"""
Perform comprehensive structure analysis.
Args:
struct: Pymatgen Structure object
args: Command line arguments
Returns:
Dictionary containing analysis results
"""
results = {}
# Basic information
print("\n" + "="*60)
print("STRUCTURE ANALYSIS")
print("="*60)
print("\n--- COMPOSITION ---")
print(f"Formula (reduced): {struct.composition.reduced_formula}")
print(f"Formula (full): {struct.composition.formula}")
print(f"Formula (Hill): {struct.composition.hill_formula}")
print(f"Chemical system: {struct.composition.chemical_system}")
print(f"Number of sites: {len(struct)}")
print(f"Number of species: {len(struct.composition.elements)}")
print(f"Molecular weight: {struct.composition.weight:.2f} amu")
results['composition'] = {
'reduced_formula': struct.composition.reduced_formula,
'formula': struct.composition.formula,
'hill_formula': struct.composition.hill_formula,
'chemical_system': struct.composition.chemical_system,
'num_sites': len(struct),
'molecular_weight': struct.composition.weight,
}
# Lattice information
print("\n--- LATTICE ---")
print(f"a = {struct.lattice.a:.4f} Γ
")
print(f"b = {struct.lattice.b:.4f} Γ
")
print(f"c = {struct.lattice.c:.4f} Γ
")
print(f"Ξ± = {struct.lattice.alpha:.2f}Β°")
print(f"Ξ² = {struct.lattice.beta:.2f}Β°")
print(f"Ξ³ = {struct.lattice.gamma:.2f}Β°")
print(f"Volume: {struct.volume:.2f} Ε³")
print(f"Density: {struct.density:.3f} g/cmΒ³")
results['lattice'] = {
'a': struct.lattice.a,
'b': struct.lattice.b,
'c': struct.lattice.c,
'alpha': struct.lattice.alpha,
'beta': struct.lattice.beta,
'gamma': struct.lattice.gamma,
'volume': struct.volume,
'density': struct.density,
}
# Symmetry analysis
if args.symmetry:
print("\n--- SYMMETRY ---")
try:
sga = SpacegroupAnalyzer(struct)
spacegroup_symbol = sga.get_space_group_symbol()
spacegroup_number = sga.get_space_group_number()
crystal_system = sga.get_crystal_system()
point_group = sga.get_point_group_symbol()
print(f"Space group: {spacegroup_symbol} (#{spacegroup_number})")
print(f"Crystal system: {crystal_system}")
print(f"Point group: {point_group}")
# Get symmetry operations
symm_ops = sga.get_symmetry_operations()
print(f"Symmetry operations: {len(symm_ops)}")
results['symmetry'] = {
'spacegroup_symbol': spacegroup_symbol,
'spacegroup_number': spacegroup_number,
'crystal_system': crystal_system,
'point_group': point_group,
'num_symmetry_ops': len(symm_ops),
}
# Show equivalent sites
sym_struct = sga.get_symmetrized_structure()
print(f"Symmetry-equivalent site groups: {len(sym_struct.equivalent_sites)}")
except Exception as e:
print(f"Could not determine symmetry: {e}")
# Site information
print("\n--- SITES ---")
print(f"{'Index':<6} {'Species':<10} {'Wyckoff':<10} {'Frac Coords':<30}")
print("-" * 60)
for i, site in enumerate(struct):
coords_str = f"[{site.frac_coords[0]:.4f}, {site.frac_coords[1]:.4f}, {site.frac_coords[2]:.4f}]"
wyckoff = "N/A"
if args.symmetry:
try:
sga = SpacegroupAnalyzer(struct)
sym_struct = sga.get_symmetrized_structure()
wyckoff = sym_struct.equivalent_sites[0][0].species_string # Simplified
except:
pass
print(f"{i:<6} {site.species_string:<10} {wyckoff:<10} {coords_str:<30}")
# Neighbor analysis
if args.neighbors:
print("\n--- COORDINATION ENVIRONMENT ---")
try:
cnn = CrystalNN()
for i, site in enumerate(struct):
neighbors = cnn.get_nn_info(struct, i)
print(f"\nSite {i} ({site.species_string}):")
print(f" Coordination number: {len(neighbors)}")
if len(neighbors) > 0 and len(neighbors) <= 12:
print(f" Neighbors:")
for j, neighbor in enumerate(neighbors):
neighbor_site = struct[neighbor['site_index']]
distance = site.distance(neighbor_site)
print(f" {neighbor_site.species_string} at {distance:.3f} Γ
")
except Exception as e:
print(f"Could not analyze coordination: {e}")
# Distance matrix (for small structures)
if args.distances and len(struct) <= 20:
print("\n--- DISTANCE MATRIX (Γ
) ---")
distance_matrix = struct.distance_matrix
# Print header
print(f"{'':>4}", end="")
for i in range(len(struct)):
print(f"{i:>8}", end="")
print()
# Print matrix
for i in range(len(struct)):
print(f"{i:>4}", end="")
for j in range(len(struct)):
if i == j:
print(f"{'---':>8}", end="")
else:
print(f"{distance_matrix[i][j]:>8.3f}", end="")
print()
print("\n" + "="*60)
return results
def main():
parser = argparse.ArgumentParser(
description="Analyze crystal structures using pymatgen",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"structure_file",
help="Structure file to analyze (CIF, POSCAR, etc.)"
)
parser.add_argument(
"--symmetry", "-s",
action="store_true",
help="Perform symmetry analysis"
)
parser.add_argument(
"--neighbors", "-n",
action="store_true",
help="Analyze coordination environment"
)
parser.add_argument(
"--distances", "-d",
action="store_true",
help="Show distance matrix (for structures with β€20 atoms)"
)
parser.add_argument(
"--export", "-e",
choices=["json", "yaml"],
help="Export analysis results to file"
)
parser.add_argument(
"--output", "-o",
help="Output file for exported results"
)
args = parser.parse_args()
# Read structure
try:
struct = Structure.from_file(args.structure_file)
except Exception as e:
print(f"Error reading structure file: {e}")
sys.exit(1)
# Analyze structure
results = analyze_structure(struct, args)
# Export results
if args.export:
output_file = args.output or f"analysis.{args.export}"
if args.export == "json":
with open(output_file, "w") as f:
json.dump(results, f, indent=2)
print(f"\nβ Analysis exported to {output_file}")
elif args.export == "yaml":
try:
import yaml
with open(output_file, "w") as f:
yaml.dump(results, f, default_flow_style=False)
print(f"\nβ Analysis exported to {output_file}")
except ImportError:
print("Error: PyYAML is not installed. Install with: pip install pyyaml")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pymatgen/scripts/structure_analyzer.py",
"license": "MIT License",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pymatgen/scripts/structure_converter.py | #!/usr/bin/env python3
"""
Structure file format converter using pymatgen.
This script converts between different structure file formats supported by pymatgen.
Supports automatic format detection and batch conversion.
Usage:
python structure_converter.py input_file output_file
python structure_converter.py input_file --format cif
python structure_converter.py *.cif --output-dir ./converted --format poscar
Examples:
python structure_converter.py POSCAR structure.cif
python structure_converter.py structure.cif --format json
python structure_converter.py *.vasp --output-dir ./cif_files --format cif
"""
import argparse
import sys
from pathlib import Path
from typing import List
try:
from pymatgen.core import Structure
except ImportError:
print("Error: pymatgen is not installed. Install with: pip install pymatgen")
sys.exit(1)
def convert_structure(input_path: Path, output_path: Path = None, output_format: str = None) -> bool:
"""
Convert a structure file to a different format.
Args:
input_path: Path to input structure file
output_path: Path to output file (optional if output_format is specified)
output_format: Target format (e.g., 'cif', 'poscar', 'json', 'yaml')
Returns:
True if conversion succeeded, False otherwise
"""
try:
# Read structure with automatic format detection
struct = Structure.from_file(str(input_path))
print(f"β Read structure: {struct.composition.reduced_formula} from {input_path}")
# Determine output path
if output_path is None and output_format:
output_path = input_path.with_suffix(f".{output_format}")
elif output_path is None:
print("Error: Must specify either output_path or output_format")
return False
# Write structure
struct.to(filename=str(output_path))
print(f"β Wrote structure to {output_path}")
return True
except Exception as e:
print(f"β Error converting {input_path}: {e}")
return False
def batch_convert(input_files: List[Path], output_dir: Path, output_format: str) -> None:
"""
Convert multiple structure files to a common format.
Args:
input_files: List of input structure files
output_dir: Directory for output files
output_format: Target format for all files
"""
output_dir.mkdir(parents=True, exist_ok=True)
success_count = 0
for input_file in input_files:
output_file = output_dir / f"{input_file.stem}.{output_format}"
if convert_structure(input_file, output_file):
success_count += 1
print(f"\n{'='*60}")
print(f"Conversion complete: {success_count}/{len(input_files)} files converted successfully")
def main():
parser = argparse.ArgumentParser(
description="Convert structure files between different formats using pymatgen",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Supported formats:
Input: CIF, POSCAR, CONTCAR, XYZ, PDB, JSON, YAML, and many more
Output: CIF, POSCAR, XYZ, PDB, JSON, YAML, XSF, and many more
Examples:
%(prog)s POSCAR structure.cif
%(prog)s structure.cif --format json
%(prog)s *.cif --output-dir ./poscar_files --format poscar
"""
)
parser.add_argument(
"input",
nargs="+",
help="Input structure file(s). Supports wildcards for batch conversion."
)
parser.add_argument(
"output",
nargs="?",
help="Output structure file (ignored if --output-dir is used)"
)
parser.add_argument(
"--format", "-f",
help="Output format (e.g., cif, poscar, json, yaml, xyz)"
)
parser.add_argument(
"--output-dir", "-o",
type=Path,
help="Output directory for batch conversion"
)
args = parser.parse_args()
# Expand wildcards and convert to Path objects
input_files = []
for pattern in args.input:
matches = list(Path.cwd().glob(pattern))
if matches:
input_files.extend(matches)
else:
input_files.append(Path(pattern))
# Filter to files only
input_files = [f for f in input_files if f.is_file()]
if not input_files:
print("Error: No input files found")
sys.exit(1)
# Batch conversion mode
if args.output_dir or len(input_files) > 1:
if not args.format:
print("Error: --format is required for batch conversion")
sys.exit(1)
output_dir = args.output_dir or Path("./converted")
batch_convert(input_files, output_dir, args.format)
# Single file conversion
elif len(input_files) == 1:
input_file = input_files[0]
if args.output:
output_file = Path(args.output)
convert_structure(input_file, output_file)
elif args.format:
convert_structure(input_file, output_format=args.format)
else:
print("Error: Must specify output file or --format")
parser.print_help()
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pymatgen/scripts/structure_converter.py",
"license": "MIT License",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pymc/assets/hierarchical_model_template.py | """
PyMC Hierarchical/Multilevel Model Template
This template provides a complete workflow for Bayesian hierarchical models,
useful for grouped/nested data (e.g., students within schools, patients within hospitals).
Customize the sections marked with # TODO
"""
import pymc as pm
import arviz as az
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# =============================================================================
# 1. DATA PREPARATION
# =============================================================================
# TODO: Load your data with group structure
# Example:
# df = pd.read_csv('data.csv')
# groups = df['group_id'].values
# X = df['predictor'].values
# y = df['outcome'].values
# For demonstration: Generate hierarchical data
np.random.seed(42)
n_groups = 10
n_per_group = 20
n_obs = n_groups * n_per_group
# True hierarchical structure
true_mu_alpha = 5.0
true_sigma_alpha = 2.0
true_mu_beta = 1.5
true_sigma_beta = 0.5
true_sigma = 1.0
group_alphas = np.random.normal(true_mu_alpha, true_sigma_alpha, n_groups)
group_betas = np.random.normal(true_mu_beta, true_sigma_beta, n_groups)
# Generate data
groups = np.repeat(np.arange(n_groups), n_per_group)
X = np.random.randn(n_obs)
y = group_alphas[groups] + group_betas[groups] * X + np.random.randn(n_obs) * true_sigma
# TODO: Customize group names
group_names = [f'Group_{i}' for i in range(n_groups)]
# =============================================================================
# 2. BUILD HIERARCHICAL MODEL
# =============================================================================
print("Building hierarchical model...")
coords = {
'groups': group_names,
'obs': np.arange(n_obs)
}
with pm.Model(coords=coords) as hierarchical_model:
# Data containers (for later predictions)
X_data = pm.Data('X_data', X)
groups_data = pm.Data('groups_data', groups)
# Hyperpriors (population-level parameters)
# TODO: Adjust hyperpriors based on your domain knowledge
mu_alpha = pm.Normal('mu_alpha', mu=0, sigma=10)
sigma_alpha = pm.HalfNormal('sigma_alpha', sigma=5)
mu_beta = pm.Normal('mu_beta', mu=0, sigma=10)
sigma_beta = pm.HalfNormal('sigma_beta', sigma=5)
# Group-level parameters (non-centered parameterization)
# Non-centered parameterization improves sampling efficiency
alpha_offset = pm.Normal('alpha_offset', mu=0, sigma=1, dims='groups')
alpha = pm.Deterministic('alpha', mu_alpha + sigma_alpha * alpha_offset, dims='groups')
beta_offset = pm.Normal('beta_offset', mu=0, sigma=1, dims='groups')
beta = pm.Deterministic('beta', mu_beta + sigma_beta * beta_offset, dims='groups')
# Observation-level model
mu = alpha[groups_data] + beta[groups_data] * X_data
# Observation noise
sigma = pm.HalfNormal('sigma', sigma=5)
# Likelihood
y_obs = pm.Normal('y_obs', mu=mu, sigma=sigma, observed=y, dims='obs')
print("Model built successfully!")
print(f"Groups: {n_groups}")
print(f"Observations: {n_obs}")
# =============================================================================
# 3. PRIOR PREDICTIVE CHECK
# =============================================================================
print("\nRunning prior predictive check...")
with hierarchical_model:
prior_pred = pm.sample_prior_predictive(samples=500, random_seed=42)
# Visualize prior predictions
fig, ax = plt.subplots(figsize=(10, 6))
az.plot_ppc(prior_pred, group='prior', num_pp_samples=100, ax=ax)
ax.set_title('Prior Predictive Check')
plt.tight_layout()
plt.savefig('hierarchical_prior_check.png', dpi=300, bbox_inches='tight')
print("Prior predictive check saved to 'hierarchical_prior_check.png'")
# =============================================================================
# 4. FIT MODEL
# =============================================================================
print("\nFitting hierarchical model...")
print("(This may take a few minutes due to model complexity)")
with hierarchical_model:
# MCMC sampling with higher target_accept for hierarchical models
idata = pm.sample(
draws=2000,
tune=2000, # More tuning for hierarchical models
chains=4,
target_accept=0.95, # Higher for better convergence
random_seed=42,
idata_kwargs={'log_likelihood': True}
)
print("Sampling complete!")
# =============================================================================
# 5. CHECK DIAGNOSTICS
# =============================================================================
print("\n" + "="*60)
print("DIAGNOSTICS")
print("="*60)
# Summary for key parameters
summary = az.summary(
idata,
var_names=['mu_alpha', 'sigma_alpha', 'mu_beta', 'sigma_beta', 'sigma', 'alpha', 'beta']
)
print("\nParameter Summary:")
print(summary)
# Check convergence
bad_rhat = summary[summary['r_hat'] > 1.01]
if len(bad_rhat) > 0:
print(f"\nβ οΈ WARNING: {len(bad_rhat)} parameters with R-hat > 1.01")
print(bad_rhat[['r_hat']])
else:
print("\nβ All R-hat values < 1.01 (good convergence)")
# Check effective sample size
low_ess = summary[summary['ess_bulk'] < 400]
if len(low_ess) > 0:
print(f"\nβ οΈ WARNING: {len(low_ess)} parameters with ESS < 400")
print(low_ess[['ess_bulk']].head(10))
else:
print("\nβ All ESS values > 400 (sufficient samples)")
# Check divergences
divergences = idata.sample_stats.diverging.sum().item()
if divergences > 0:
print(f"\nβ οΈ WARNING: {divergences} divergent transitions")
print(" This is common in hierarchical models - non-centered parameterization already applied")
print(" Consider even higher target_accept or stronger hyperpriors")
else:
print("\nβ No divergences")
# Trace plots for hyperparameters
fig, axes = plt.subplots(5, 2, figsize=(12, 12))
az.plot_trace(
idata,
var_names=['mu_alpha', 'sigma_alpha', 'mu_beta', 'sigma_beta', 'sigma'],
axes=axes
)
plt.tight_layout()
plt.savefig('hierarchical_trace_plots.png', dpi=300, bbox_inches='tight')
print("\nTrace plots saved to 'hierarchical_trace_plots.png'")
# =============================================================================
# 6. POSTERIOR PREDICTIVE CHECK
# =============================================================================
print("\nRunning posterior predictive check...")
with hierarchical_model:
pm.sample_posterior_predictive(idata, extend_inferencedata=True, random_seed=42)
# Visualize fit
fig, ax = plt.subplots(figsize=(10, 6))
az.plot_ppc(idata, num_pp_samples=100, ax=ax)
ax.set_title('Posterior Predictive Check')
plt.tight_layout()
plt.savefig('hierarchical_posterior_check.png', dpi=300, bbox_inches='tight')
print("Posterior predictive check saved to 'hierarchical_posterior_check.png'")
# =============================================================================
# 7. ANALYZE HIERARCHICAL STRUCTURE
# =============================================================================
print("\n" + "="*60)
print("POPULATION-LEVEL (HYPERPARAMETER) ESTIMATES")
print("="*60)
# Population-level estimates
hyper_summary = summary.loc[['mu_alpha', 'sigma_alpha', 'mu_beta', 'sigma_beta', 'sigma']]
print(hyper_summary[['mean', 'sd', 'hdi_3%', 'hdi_97%']])
# Forest plot for group-level parameters
fig, axes = plt.subplots(1, 2, figsize=(14, 8))
# Group intercepts
az.plot_forest(idata, var_names=['alpha'], combined=True, ax=axes[0])
axes[0].set_title('Group-Level Intercepts (Ξ±)')
axes[0].set_yticklabels(group_names)
axes[0].axvline(idata.posterior['mu_alpha'].mean().item(), color='red', linestyle='--', label='Population mean')
axes[0].legend()
# Group slopes
az.plot_forest(idata, var_names=['beta'], combined=True, ax=axes[1])
axes[1].set_title('Group-Level Slopes (Ξ²)')
axes[1].set_yticklabels(group_names)
axes[1].axvline(idata.posterior['mu_beta'].mean().item(), color='red', linestyle='--', label='Population mean')
axes[1].legend()
plt.tight_layout()
plt.savefig('group_level_estimates.png', dpi=300, bbox_inches='tight')
print("\nGroup-level estimates saved to 'group_level_estimates.png'")
# Shrinkage visualization
fig, axes = plt.subplots(1, 2, figsize=(12, 5))
# Intercepts
alpha_samples = idata.posterior['alpha'].values.reshape(-1, n_groups)
alpha_means = alpha_samples.mean(axis=0)
mu_alpha_mean = idata.posterior['mu_alpha'].mean().item()
axes[0].scatter(range(n_groups), alpha_means, alpha=0.6)
axes[0].axhline(mu_alpha_mean, color='red', linestyle='--', label='Population mean')
axes[0].set_xlabel('Group')
axes[0].set_ylabel('Intercept')
axes[0].set_title('Group Intercepts (showing shrinkage to population mean)')
axes[0].legend()
# Slopes
beta_samples = idata.posterior['beta'].values.reshape(-1, n_groups)
beta_means = beta_samples.mean(axis=0)
mu_beta_mean = idata.posterior['mu_beta'].mean().item()
axes[1].scatter(range(n_groups), beta_means, alpha=0.6)
axes[1].axhline(mu_beta_mean, color='red', linestyle='--', label='Population mean')
axes[1].set_xlabel('Group')
axes[1].set_ylabel('Slope')
axes[1].set_title('Group Slopes (showing shrinkage to population mean)')
axes[1].legend()
plt.tight_layout()
plt.savefig('shrinkage_plot.png', dpi=300, bbox_inches='tight')
print("Shrinkage plot saved to 'shrinkage_plot.png'")
# =============================================================================
# 8. PREDICTIONS FOR NEW DATA
# =============================================================================
# TODO: Specify new data
# For existing groups:
# new_X = np.array([...])
# new_groups = np.array([0, 1, 2, ...]) # Existing group indices
# For a new group (predict using population-level parameters):
# Just use mu_alpha and mu_beta
print("\n" + "="*60)
print("PREDICTIONS FOR NEW DATA")
print("="*60)
# Example: Predict for existing groups
new_X = np.array([-2, -1, 0, 1, 2])
new_groups = np.array([0, 2, 4, 6, 8]) # Select some groups
with hierarchical_model:
pm.set_data({'X_data': new_X, 'groups_data': new_groups, 'obs': np.arange(len(new_X))})
post_pred = pm.sample_posterior_predictive(
idata.posterior,
var_names=['y_obs'],
random_seed=42
)
y_pred_samples = post_pred.posterior_predictive['y_obs']
y_pred_mean = y_pred_samples.mean(dim=['chain', 'draw']).values
y_pred_hdi = az.hdi(y_pred_samples, hdi_prob=0.95).values
print(f"Predictions for existing groups:")
print(f"{'Group':<10} {'X':<10} {'Mean':<15} {'95% HDI Lower':<15} {'95% HDI Upper':<15}")
print("-"*65)
for i, g in enumerate(new_groups):
print(f"{group_names[g]:<10} {new_X[i]:<10.2f} {y_pred_mean[i]:<15.3f} {y_pred_hdi[i, 0]:<15.3f} {y_pred_hdi[i, 1]:<15.3f}")
# Predict for a new group (using population parameters)
print(f"\nPrediction for a NEW group (using population-level parameters):")
new_X_newgroup = np.array([0.0])
# Manually compute using population parameters
mu_alpha_samples = idata.posterior['mu_alpha'].values.flatten()
mu_beta_samples = idata.posterior['mu_beta'].values.flatten()
sigma_samples = idata.posterior['sigma'].values.flatten()
# Predicted mean for new group
y_pred_newgroup = mu_alpha_samples + mu_beta_samples * new_X_newgroup[0]
y_pred_mean_newgroup = y_pred_newgroup.mean()
y_pred_hdi_newgroup = az.hdi(y_pred_newgroup, hdi_prob=0.95)
print(f"X = {new_X_newgroup[0]:.2f}")
print(f"Predicted mean: {y_pred_mean_newgroup:.3f}")
print(f"95% HDI: [{y_pred_hdi_newgroup[0]:.3f}, {y_pred_hdi_newgroup[1]:.3f}]")
# =============================================================================
# 9. SAVE RESULTS
# =============================================================================
idata.to_netcdf('hierarchical_model_results.nc')
print("\nResults saved to 'hierarchical_model_results.nc'")
summary.to_csv('hierarchical_model_summary.csv')
print("Summary saved to 'hierarchical_model_summary.csv'")
print("\n" + "="*60)
print("ANALYSIS COMPLETE")
print("="*60)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pymc/assets/hierarchical_model_template.py",
"license": "MIT License",
"lines": 264,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pymc/assets/linear_regression_template.py | """
PyMC Linear Regression Template
This template provides a complete workflow for Bayesian linear regression,
including data preparation, model building, diagnostics, and predictions.
Customize the sections marked with # TODO
"""
import pymc as pm
import arviz as az
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# =============================================================================
# 1. DATA PREPARATION
# =============================================================================
# TODO: Load your data
# Example:
# df = pd.read_csv('data.csv')
# X = df[['predictor1', 'predictor2', 'predictor3']].values
# y = df['outcome'].values
# For demonstration:
np.random.seed(42)
n_samples = 100
n_predictors = 3
X = np.random.randn(n_samples, n_predictors)
true_beta = np.array([1.5, -0.8, 2.1])
true_alpha = 0.5
y = true_alpha + X @ true_beta + np.random.randn(n_samples) * 0.5
# Standardize predictors for better sampling
X_mean = X.mean(axis=0)
X_std = X.std(axis=0)
X_scaled = (X - X_mean) / X_std
# =============================================================================
# 2. BUILD MODEL
# =============================================================================
# TODO: Customize predictor names
predictor_names = ['predictor1', 'predictor2', 'predictor3']
coords = {
'predictors': predictor_names,
'obs_id': np.arange(len(y))
}
with pm.Model(coords=coords) as linear_model:
# Priors
# TODO: Adjust prior parameters based on your domain knowledge
alpha = pm.Normal('alpha', mu=0, sigma=1)
beta = pm.Normal('beta', mu=0, sigma=1, dims='predictors')
sigma = pm.HalfNormal('sigma', sigma=1)
# Linear predictor
mu = alpha + pm.math.dot(X_scaled, beta)
# Likelihood
y_obs = pm.Normal('y_obs', mu=mu, sigma=sigma, observed=y, dims='obs_id')
# =============================================================================
# 3. PRIOR PREDICTIVE CHECK
# =============================================================================
print("Running prior predictive check...")
with linear_model:
prior_pred = pm.sample_prior_predictive(samples=1000, random_seed=42)
# Visualize prior predictions
fig, ax = plt.subplots(figsize=(10, 6))
az.plot_ppc(prior_pred, group='prior', num_pp_samples=100, ax=ax)
ax.set_title('Prior Predictive Check')
plt.tight_layout()
plt.savefig('prior_predictive_check.png', dpi=300, bbox_inches='tight')
print("Prior predictive check saved to 'prior_predictive_check.png'")
# =============================================================================
# 4. FIT MODEL
# =============================================================================
print("\nFitting model...")
with linear_model:
# Optional: Quick ADVI exploration
# approx = pm.fit(n=20000, random_seed=42)
# MCMC sampling
idata = pm.sample(
draws=2000,
tune=1000,
chains=4,
target_accept=0.9,
random_seed=42,
idata_kwargs={'log_likelihood': True}
)
print("Sampling complete!")
# =============================================================================
# 5. CHECK DIAGNOSTICS
# =============================================================================
print("\n" + "="*60)
print("DIAGNOSTICS")
print("="*60)
# Summary statistics
summary = az.summary(idata, var_names=['alpha', 'beta', 'sigma'])
print("\nParameter Summary:")
print(summary)
# Check convergence
bad_rhat = summary[summary['r_hat'] > 1.01]
if len(bad_rhat) > 0:
print(f"\nβ οΈ WARNING: {len(bad_rhat)} parameters with R-hat > 1.01")
print(bad_rhat[['r_hat']])
else:
print("\nβ All R-hat values < 1.01 (good convergence)")
# Check effective sample size
low_ess = summary[summary['ess_bulk'] < 400]
if len(low_ess) > 0:
print(f"\nβ οΈ WARNING: {len(low_ess)} parameters with ESS < 400")
print(low_ess[['ess_bulk', 'ess_tail']])
else:
print("\nβ All ESS values > 400 (sufficient samples)")
# Check divergences
divergences = idata.sample_stats.diverging.sum().item()
if divergences > 0:
print(f"\nβ οΈ WARNING: {divergences} divergent transitions")
print(" Consider increasing target_accept or reparameterizing")
else:
print("\nβ No divergences")
# Trace plots
fig, axes = plt.subplots(len(['alpha', 'beta', 'sigma']), 2, figsize=(12, 8))
az.plot_trace(idata, var_names=['alpha', 'beta', 'sigma'], axes=axes)
plt.tight_layout()
plt.savefig('trace_plots.png', dpi=300, bbox_inches='tight')
print("\nTrace plots saved to 'trace_plots.png'")
# =============================================================================
# 6. POSTERIOR PREDICTIVE CHECK
# =============================================================================
print("\nRunning posterior predictive check...")
with linear_model:
pm.sample_posterior_predictive(idata, extend_inferencedata=True, random_seed=42)
# Visualize fit
fig, ax = plt.subplots(figsize=(10, 6))
az.plot_ppc(idata, num_pp_samples=100, ax=ax)
ax.set_title('Posterior Predictive Check')
plt.tight_layout()
plt.savefig('posterior_predictive_check.png', dpi=300, bbox_inches='tight')
print("Posterior predictive check saved to 'posterior_predictive_check.png'")
# =============================================================================
# 7. ANALYZE RESULTS
# =============================================================================
# Posterior distributions
fig, axes = plt.subplots(1, 3, figsize=(15, 4))
az.plot_posterior(idata, var_names=['alpha', 'beta', 'sigma'], ax=axes)
plt.tight_layout()
plt.savefig('posterior_distributions.png', dpi=300, bbox_inches='tight')
print("Posterior distributions saved to 'posterior_distributions.png'")
# Forest plot for coefficients
fig, ax = plt.subplots(figsize=(8, 6))
az.plot_forest(idata, var_names=['beta'], combined=True, ax=ax)
ax.set_title('Coefficient Estimates (95% HDI)')
ax.set_yticklabels(predictor_names)
plt.tight_layout()
plt.savefig('coefficient_forest_plot.png', dpi=300, bbox_inches='tight')
print("Forest plot saved to 'coefficient_forest_plot.png'")
# Print coefficient estimates
print("\n" + "="*60)
print("COEFFICIENT ESTIMATES")
print("="*60)
beta_samples = idata.posterior['beta']
for i, name in enumerate(predictor_names):
mean = beta_samples.sel(predictors=name).mean().item()
hdi = az.hdi(beta_samples.sel(predictors=name), hdi_prob=0.95)
print(f"{name:20s}: {mean:7.3f} [95% HDI: {hdi.values[0]:7.3f}, {hdi.values[1]:7.3f}]")
# =============================================================================
# 8. PREDICTIONS FOR NEW DATA
# =============================================================================
# TODO: Provide new data for predictions
# X_new = np.array([[...], [...], ...]) # New predictor values
# For demonstration, use some test data
X_new = np.random.randn(10, n_predictors)
X_new_scaled = (X_new - X_mean) / X_std
# Update model data and predict
with linear_model:
pm.set_data({'X_scaled': X_new_scaled, 'obs_id': np.arange(len(X_new))})
post_pred = pm.sample_posterior_predictive(
idata.posterior,
var_names=['y_obs'],
random_seed=42
)
# Extract predictions
y_pred_samples = post_pred.posterior_predictive['y_obs']
y_pred_mean = y_pred_samples.mean(dim=['chain', 'draw']).values
y_pred_hdi = az.hdi(y_pred_samples, hdi_prob=0.95).values
print("\n" + "="*60)
print("PREDICTIONS FOR NEW DATA")
print("="*60)
print(f"{'Index':<10} {'Mean':<15} {'95% HDI Lower':<15} {'95% HDI Upper':<15}")
print("-"*60)
for i in range(len(X_new)):
print(f"{i:<10} {y_pred_mean[i]:<15.3f} {y_pred_hdi[i, 0]:<15.3f} {y_pred_hdi[i, 1]:<15.3f}")
# =============================================================================
# 9. SAVE RESULTS
# =============================================================================
# Save InferenceData
idata.to_netcdf('linear_regression_results.nc')
print("\nResults saved to 'linear_regression_results.nc'")
# Save summary to CSV
summary.to_csv('model_summary.csv')
print("Summary saved to 'model_summary.csv'")
print("\n" + "="*60)
print("ANALYSIS COMPLETE")
print("="*60)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pymc/assets/linear_regression_template.py",
"license": "MIT License",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pymc/scripts/model_comparison.py | """
PyMC Model Comparison Script
Utilities for comparing multiple Bayesian models using information criteria
and cross-validation metrics.
Usage:
from scripts.model_comparison import compare_models, plot_model_comparison
# Compare multiple models
comparison = compare_models(
{'model1': idata1, 'model2': idata2, 'model3': idata3},
ic='loo'
)
# Visualize comparison
plot_model_comparison(comparison, output_path='model_comparison.png')
"""
import arviz as az
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from typing import Dict
def compare_models(models_dict: Dict[str, az.InferenceData],
ic='loo',
scale='deviance',
verbose=True):
"""
Compare multiple models using information criteria.
Parameters
----------
models_dict : dict
Dictionary mapping model names to InferenceData objects.
All models must have log_likelihood computed.
ic : str
Information criterion to use: 'loo' (default) or 'waic'
scale : str
Scale for IC: 'deviance' (default), 'log', or 'negative_log'
verbose : bool
Print detailed comparison results (default: True)
Returns
-------
pd.DataFrame
Comparison DataFrame with model rankings and statistics
Notes
-----
Models must be fit with idata_kwargs={'log_likelihood': True} or
log-likelihood computed afterwards with pm.compute_log_likelihood().
"""
if verbose:
print("="*70)
print(f" " * 25 + f"MODEL COMPARISON ({ic.upper()})")
print("="*70)
# Perform comparison
comparison = az.compare(models_dict, ic=ic, scale=scale)
if verbose:
print("\nModel Rankings:")
print("-"*70)
print(comparison.to_string())
print("\n" + "="*70)
print("INTERPRETATION GUIDE")
print("="*70)
print(f"β’ rank: Model ranking (0 = best)")
print(f"β’ {ic}: {ic.upper()} estimate (lower is better)")
print(f"β’ p_{ic}: Effective number of parameters")
print(f"β’ d{ic}: Difference from best model")
print(f"β’ weight: Model probability (pseudo-BMA)")
print(f"β’ se: Standard error of {ic.upper()}")
print(f"β’ dse: Standard error of the difference")
print(f"β’ warning: True if model has reliability issues")
print(f"β’ scale: {scale}")
print("\n" + "="*70)
print("MODEL SELECTION GUIDELINES")
print("="*70)
best_model = comparison.index[0]
print(f"\nβ Best model: {best_model}")
# Check for clear winner
if len(comparison) > 1:
delta = comparison.iloc[1][f'd{ic}']
delta_se = comparison.iloc[1]['dse']
if delta > 10:
print(f" β STRONG evidence for {best_model} (Ξ{ic} > 10)")
elif delta > 4:
print(f" β MODERATE evidence for {best_model} (4 < Ξ{ic} < 10)")
elif delta > 2:
print(f" β WEAK evidence for {best_model} (2 < Ξ{ic} < 4)")
else:
print(f" β Models are SIMILAR (Ξ{ic} < 2)")
print(f" Consider model averaging or choose based on simplicity")
# Check if difference is significant relative to SE
if delta > 2 * delta_se:
print(f" β Difference is > 2 SE, likely reliable")
else:
print(f" β Difference is < 2 SE, uncertain distinction")
# Check for warnings
if comparison['warning'].any():
print("\nβ οΈ WARNING: Some models have reliability issues")
warned_models = comparison[comparison['warning']].index.tolist()
print(f" Models with warnings: {', '.join(warned_models)}")
print(f" β Check Pareto-k diagnostics with check_loo_reliability()")
return comparison
def check_loo_reliability(models_dict: Dict[str, az.InferenceData],
threshold=0.7,
verbose=True):
"""
Check LOO-CV reliability using Pareto-k diagnostics.
Parameters
----------
models_dict : dict
Dictionary mapping model names to InferenceData objects
threshold : float
Pareto-k threshold for flagging observations (default: 0.7)
verbose : bool
Print detailed diagnostics (default: True)
Returns
-------
dict
Dictionary with Pareto-k diagnostics for each model
"""
if verbose:
print("="*70)
print(" " * 20 + "LOO RELIABILITY CHECK")
print("="*70)
results = {}
for name, idata in models_dict.items():
if verbose:
print(f"\n{name}:")
print("-"*70)
# Compute LOO with pointwise results
loo_result = az.loo(idata, pointwise=True)
pareto_k = loo_result.pareto_k.values
# Count problematic observations
n_high = (pareto_k > threshold).sum()
n_very_high = (pareto_k > 1.0).sum()
results[name] = {
'pareto_k': pareto_k,
'n_high': n_high,
'n_very_high': n_very_high,
'max_k': pareto_k.max(),
'loo': loo_result
}
if verbose:
print(f"Pareto-k diagnostics:")
print(f" β’ Good (k < 0.5): {(pareto_k < 0.5).sum()} observations")
print(f" β’ OK (0.5 β€ k < 0.7): {((pareto_k >= 0.5) & (pareto_k < 0.7)).sum()} observations")
print(f" β’ Bad (0.7 β€ k < 1.0): {((pareto_k >= 0.7) & (pareto_k < 1.0)).sum()} observations")
print(f" β’ Very bad (k β₯ 1.0): {(pareto_k >= 1.0).sum()} observations")
print(f" β’ Maximum k: {pareto_k.max():.3f}")
if n_high > 0:
print(f"\nβ οΈ {n_high} observations with k > {threshold}")
print(" LOO approximation may be unreliable for these points")
print(" Solutions:")
print(" β Use WAIC instead (less sensitive to outliers)")
print(" β Investigate influential observations")
print(" β Consider more flexible model")
if n_very_high > 0:
print(f"\nβ οΈ {n_very_high} observations with k > 1.0")
print(" These points have very high influence")
print(" β Strongly consider K-fold CV or other validation")
else:
print(f"β All Pareto-k values < {threshold}")
print(" LOO estimates are reliable")
return results
def plot_model_comparison(comparison, output_path=None, show=True):
"""
Visualize model comparison results.
Parameters
----------
comparison : pd.DataFrame
Comparison DataFrame from az.compare()
output_path : str, optional
If provided, save plot to this path
show : bool
Whether to display plot (default: True)
Returns
-------
matplotlib.figure.Figure
The comparison figure
"""
fig = plt.figure(figsize=(10, 6))
az.plot_compare(comparison)
plt.title('Model Comparison', fontsize=14, fontweight='bold')
plt.tight_layout()
if output_path:
plt.savefig(output_path, dpi=300, bbox_inches='tight')
print(f"Comparison plot saved to {output_path}")
if show:
plt.show()
else:
plt.close()
return fig
def model_averaging(models_dict: Dict[str, az.InferenceData],
weights=None,
var_name='y_obs',
ic='loo'):
"""
Perform Bayesian model averaging using model weights.
Parameters
----------
models_dict : dict
Dictionary mapping model names to InferenceData objects
weights : array-like, optional
Model weights. If None, computed from IC (pseudo-BMA weights)
var_name : str
Name of the predicted variable (default: 'y_obs')
ic : str
Information criterion for computing weights if not provided
Returns
-------
np.ndarray
Averaged predictions across models
np.ndarray
Model weights used
"""
if weights is None:
comparison = az.compare(models_dict, ic=ic)
weights = comparison['weight'].values
model_names = comparison.index.tolist()
else:
model_names = list(models_dict.keys())
weights = np.array(weights)
weights = weights / weights.sum() # Normalize
print("="*70)
print(" " * 22 + "BAYESIAN MODEL AVERAGING")
print("="*70)
print("\nModel weights:")
for name, weight in zip(model_names, weights):
print(f" {name}: {weight:.4f} ({weight*100:.2f}%)")
# Extract predictions and average
predictions = []
for name in model_names:
idata = models_dict[name]
if 'posterior_predictive' in idata:
pred = idata.posterior_predictive[var_name].values
else:
print(f"Warning: {name} missing posterior_predictive, skipping")
continue
predictions.append(pred)
# Weighted average
averaged = sum(w * p for w, p in zip(weights, predictions))
print(f"\nβ Model averaging complete")
print(f" Combined predictions using {len(predictions)} models")
return averaged, weights
def cross_validation_comparison(models_dict: Dict[str, az.InferenceData],
k=10,
verbose=True):
"""
Perform k-fold cross-validation comparison (conceptual guide).
Note: This function provides guidance. Full k-fold CV requires
re-fitting models k times, which should be done in the main script.
Parameters
----------
models_dict : dict
Dictionary of model names to InferenceData
k : int
Number of folds (default: 10)
verbose : bool
Print guidance
Returns
-------
None
"""
if verbose:
print("="*70)
print(" " * 20 + "K-FOLD CROSS-VALIDATION GUIDE")
print("="*70)
print(f"\nTo perform {k}-fold CV:")
print("""
1. Split data into k folds
2. For each fold:
- Train all models on k-1 folds
- Compute log-likelihood on held-out fold
3. Sum log-likelihoods across folds for each model
4. Compare models using total CV score
Example code:
-------------
from sklearn.model_selection import KFold
kf = KFold(n_splits=k, shuffle=True, random_seed=42)
cv_scores = {name: [] for name in models_dict.keys()}
for train_idx, test_idx in kf.split(X):
X_train, X_test = X[train_idx], X[test_idx]
y_train, y_test = y[train_idx], y[test_idx]
for name in models_dict.keys():
# Fit model on train set
with create_model(name, X_train, y_train) as model:
idata = pm.sample()
# Compute log-likelihood on test set
with model:
pm.set_data({'X': X_test, 'y': y_test})
log_lik = pm.compute_log_likelihood(idata).sum()
cv_scores[name].append(log_lik)
# Compare total CV scores
for name, scores in cv_scores.items():
print(f"{name}: {np.sum(scores):.2f}")
""")
print("\nNote: K-fold CV is expensive but most reliable for model comparison")
print(" Use when LOO has reliability issues (high Pareto-k values)")
# Example usage
if __name__ == '__main__':
print("This script provides model comparison utilities for PyMC.")
print("\nExample usage:")
print("""
import pymc as pm
from scripts.model_comparison import compare_models, check_loo_reliability
# Fit multiple models (must include log_likelihood)
with pm.Model() as model1:
# ... define model 1 ...
idata1 = pm.sample(idata_kwargs={'log_likelihood': True})
with pm.Model() as model2:
# ... define model 2 ...
idata2 = pm.sample(idata_kwargs={'log_likelihood': True})
# Compare models
models = {'Simple': idata1, 'Complex': idata2}
comparison = compare_models(models, ic='loo')
# Check reliability
reliability = check_loo_reliability(models)
# Visualize
plot_model_comparison(comparison, output_path='comparison.png')
# Model averaging
averaged_pred, weights = model_averaging(models, var_name='y_obs')
""")
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pymc/scripts/model_comparison.py",
"license": "MIT License",
"lines": 317,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pymc/scripts/model_diagnostics.py | """
PyMC Model Diagnostics Script
Comprehensive diagnostic checks for PyMC models.
Run this after sampling to validate results before interpretation.
Usage:
from scripts.model_diagnostics import check_diagnostics, create_diagnostic_report
# Quick check
check_diagnostics(idata)
# Full report with plots
create_diagnostic_report(idata, var_names=['alpha', 'beta', 'sigma'], output_dir='diagnostics/')
"""
import arviz as az
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
def check_diagnostics(idata, var_names=None, ess_threshold=400, rhat_threshold=1.01):
"""
Perform comprehensive diagnostic checks on MCMC samples.
Parameters
----------
idata : arviz.InferenceData
InferenceData object from pm.sample()
var_names : list, optional
Variables to check. If None, checks all model parameters
ess_threshold : int
Minimum acceptable effective sample size (default: 400)
rhat_threshold : float
Maximum acceptable R-hat value (default: 1.01)
Returns
-------
dict
Dictionary with diagnostic results and flags
"""
print("="*70)
print(" " * 20 + "MCMC DIAGNOSTICS REPORT")
print("="*70)
# Get summary statistics
summary = az.summary(idata, var_names=var_names)
results = {
'summary': summary,
'has_issues': False,
'issues': []
}
# 1. Check R-hat (convergence)
print("\n1. CONVERGENCE CHECK (R-hat)")
print("-" * 70)
bad_rhat = summary[summary['r_hat'] > rhat_threshold]
if len(bad_rhat) > 0:
print(f"β οΈ WARNING: {len(bad_rhat)} parameters have R-hat > {rhat_threshold}")
print("\nTop 10 worst R-hat values:")
print(bad_rhat[['r_hat']].sort_values('r_hat', ascending=False).head(10))
print("\nβ οΈ Chains may not have converged!")
print(" β Run longer chains or check for multimodality")
results['has_issues'] = True
results['issues'].append('convergence')
else:
print(f"β All R-hat values β€ {rhat_threshold}")
print(" Chains have converged successfully")
# 2. Check Effective Sample Size
print("\n2. EFFECTIVE SAMPLE SIZE (ESS)")
print("-" * 70)
low_ess_bulk = summary[summary['ess_bulk'] < ess_threshold]
low_ess_tail = summary[summary['ess_tail'] < ess_threshold]
if len(low_ess_bulk) > 0 or len(low_ess_tail) > 0:
print(f"β οΈ WARNING: Some parameters have ESS < {ess_threshold}")
if len(low_ess_bulk) > 0:
print(f"\n Bulk ESS issues ({len(low_ess_bulk)} parameters):")
print(low_ess_bulk[['ess_bulk']].sort_values('ess_bulk').head(10))
if len(low_ess_tail) > 0:
print(f"\n Tail ESS issues ({len(low_ess_tail)} parameters):")
print(low_ess_tail[['ess_tail']].sort_values('ess_tail').head(10))
print("\nβ οΈ High autocorrelation detected!")
print(" β Sample more draws or reparameterize to reduce correlation")
results['has_issues'] = True
results['issues'].append('low_ess')
else:
print(f"β All ESS values β₯ {ess_threshold}")
print(" Sufficient effective samples")
# 3. Check Divergences
print("\n3. DIVERGENT TRANSITIONS")
print("-" * 70)
divergences = idata.sample_stats.diverging.sum().item()
if divergences > 0:
total_samples = len(idata.posterior.draw) * len(idata.posterior.chain)
divergence_rate = divergences / total_samples * 100
print(f"β οΈ WARNING: {divergences} divergent transitions ({divergence_rate:.2f}% of samples)")
print("\n Divergences indicate biased sampling in difficult posterior regions")
print(" Solutions:")
print(" β Increase target_accept (e.g., target_accept=0.95 or 0.99)")
print(" β Use non-centered parameterization for hierarchical models")
print(" β Add stronger/more informative priors")
print(" β Check for model misspecification")
results['has_issues'] = True
results['issues'].append('divergences')
results['n_divergences'] = divergences
else:
print("β No divergences detected")
print(" NUTS explored the posterior successfully")
# 4. Check Tree Depth
print("\n4. TREE DEPTH")
print("-" * 70)
tree_depth = idata.sample_stats.tree_depth
max_tree_depth = tree_depth.max().item()
# Typical max_treedepth is 10 (default in PyMC)
hits_max = (tree_depth >= 10).sum().item()
if hits_max > 0:
total_samples = len(idata.posterior.draw) * len(idata.posterior.chain)
hit_rate = hits_max / total_samples * 100
print(f"β οΈ WARNING: Hit maximum tree depth {hits_max} times ({hit_rate:.2f}% of samples)")
print("\n Model may be difficult to explore efficiently")
print(" Solutions:")
print(" β Reparameterize model to improve geometry")
print(" β Increase max_treedepth (if necessary)")
results['issues'].append('max_treedepth')
else:
print(f"β No maximum tree depth issues")
print(f" Maximum tree depth reached: {max_tree_depth}")
# 5. Check Energy (if available)
if hasattr(idata.sample_stats, 'energy'):
print("\n5. ENERGY DIAGNOSTICS")
print("-" * 70)
print("β Energy statistics available")
print(" Use az.plot_energy(idata) to visualize energy transitions")
print(" Good separation indicates healthy HMC sampling")
# Summary
print("\n" + "="*70)
print("SUMMARY")
print("="*70)
if not results['has_issues']:
print("β All diagnostics passed!")
print(" Your model has sampled successfully.")
print(" Proceed with inference and interpretation.")
else:
print("β οΈ Some diagnostics failed!")
print(f" Issues found: {', '.join(results['issues'])}")
print(" Review warnings above and consider re-running with adjustments.")
print("="*70)
return results
def create_diagnostic_report(idata, var_names=None, output_dir='diagnostics/', show=False):
"""
Create comprehensive diagnostic report with plots.
Parameters
----------
idata : arviz.InferenceData
InferenceData object from pm.sample()
var_names : list, optional
Variables to plot. If None, uses all model parameters
output_dir : str
Directory to save diagnostic plots
show : bool
Whether to display plots (default: False, just save)
Returns
-------
dict
Diagnostic results from check_diagnostics
"""
# Create output directory
output_path = Path(output_dir)
output_path.mkdir(parents=True, exist_ok=True)
# Run diagnostic checks
results = check_diagnostics(idata, var_names=var_names)
print(f"\nGenerating diagnostic plots in '{output_dir}'...")
# 1. Trace plots
fig, axes = plt.subplots(
len(var_names) if var_names else 5,
2,
figsize=(12, 10)
)
az.plot_trace(idata, var_names=var_names, axes=axes)
plt.tight_layout()
plt.savefig(output_path / 'trace_plots.png', dpi=300, bbox_inches='tight')
print(f" β Saved trace plots")
if show:
plt.show()
else:
plt.close()
# 2. Rank plots (check mixing)
fig = plt.figure(figsize=(12, 8))
az.plot_rank(idata, var_names=var_names)
plt.tight_layout()
plt.savefig(output_path / 'rank_plots.png', dpi=300, bbox_inches='tight')
print(f" β Saved rank plots")
if show:
plt.show()
else:
plt.close()
# 3. Autocorrelation plots
fig = plt.figure(figsize=(12, 8))
az.plot_autocorr(idata, var_names=var_names, combined=True)
plt.tight_layout()
plt.savefig(output_path / 'autocorr_plots.png', dpi=300, bbox_inches='tight')
print(f" β Saved autocorrelation plots")
if show:
plt.show()
else:
plt.close()
# 4. Energy plot (if available)
if hasattr(idata.sample_stats, 'energy'):
fig = plt.figure(figsize=(10, 6))
az.plot_energy(idata)
plt.tight_layout()
plt.savefig(output_path / 'energy_plot.png', dpi=300, bbox_inches='tight')
print(f" β Saved energy plot")
if show:
plt.show()
else:
plt.close()
# 5. ESS plot
fig = plt.figure(figsize=(10, 6))
az.plot_ess(idata, var_names=var_names, kind='evolution')
plt.tight_layout()
plt.savefig(output_path / 'ess_evolution.png', dpi=300, bbox_inches='tight')
print(f" β Saved ESS evolution plot")
if show:
plt.show()
else:
plt.close()
# Save summary to CSV
results['summary'].to_csv(output_path / 'summary_statistics.csv')
print(f" β Saved summary statistics")
print(f"\nDiagnostic report complete! Files saved in '{output_dir}'")
return results
def compare_prior_posterior(idata, prior_idata, var_names=None, output_path=None):
"""
Compare prior and posterior distributions.
Parameters
----------
idata : arviz.InferenceData
InferenceData with posterior samples
prior_idata : arviz.InferenceData
InferenceData with prior samples
var_names : list, optional
Variables to compare
output_path : str, optional
If provided, save plot to this path
Returns
-------
None
"""
fig, axes = plt.subplots(
len(var_names) if var_names else 3,
1,
figsize=(10, 8)
)
if not isinstance(axes, np.ndarray):
axes = [axes]
for idx, var in enumerate(var_names if var_names else list(idata.posterior.data_vars)[:3]):
# Plot prior
az.plot_dist(
prior_idata.prior[var].values.flatten(),
label='Prior',
ax=axes[idx],
color='blue',
alpha=0.3
)
# Plot posterior
az.plot_dist(
idata.posterior[var].values.flatten(),
label='Posterior',
ax=axes[idx],
color='green',
alpha=0.3
)
axes[idx].set_title(f'{var}: Prior vs Posterior')
axes[idx].legend()
plt.tight_layout()
if output_path:
plt.savefig(output_path, dpi=300, bbox_inches='tight')
print(f"Prior-posterior comparison saved to {output_path}")
else:
plt.show()
# Example usage
if __name__ == '__main__':
print("This script provides diagnostic functions for PyMC models.")
print("\nExample usage:")
print("""
import pymc as pm
from scripts.model_diagnostics import check_diagnostics, create_diagnostic_report
# After sampling
with pm.Model() as model:
# ... define model ...
idata = pm.sample()
# Quick diagnostic check
results = check_diagnostics(idata)
# Full diagnostic report with plots
create_diagnostic_report(
idata,
var_names=['alpha', 'beta', 'sigma'],
output_dir='my_diagnostics/'
)
""")
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pymc/scripts/model_diagnostics.py",
"license": "MIT License",
"lines": 291,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pymoo/scripts/custom_problem_example.py | """
Custom problem definition example using pymoo.
This script demonstrates how to define a custom optimization problem
and solve it using pymoo.
"""
from pymoo.core.problem import ElementwiseProblem
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.optimize import minimize
from pymoo.visualization.scatter import Scatter
import numpy as np
class MyBiObjectiveProblem(ElementwiseProblem):
"""
Custom bi-objective optimization problem.
Minimize:
f1(x) = x1^2 + x2^2
f2(x) = (x1-1)^2 + (x2-1)^2
Subject to:
0 <= x1 <= 5
0 <= x2 <= 5
"""
def __init__(self):
super().__init__(
n_var=2, # Number of decision variables
n_obj=2, # Number of objectives
n_ieq_constr=0, # Number of inequality constraints
n_eq_constr=0, # Number of equality constraints
xl=np.array([0, 0]), # Lower bounds
xu=np.array([5, 5]) # Upper bounds
)
def _evaluate(self, x, out, *args, **kwargs):
"""Evaluate objectives for a single solution."""
# Objective 1: Distance from origin
f1 = x[0]**2 + x[1]**2
# Objective 2: Distance from (1, 1)
f2 = (x[0] - 1)**2 + (x[1] - 1)**2
# Return objectives
out["F"] = [f1, f2]
class ConstrainedProblem(ElementwiseProblem):
"""
Custom constrained bi-objective problem.
Minimize:
f1(x) = x1
f2(x) = (1 + x2) / x1
Subject to:
x2 + 9*x1 >= 6 (g1 <= 0)
-x2 + 9*x1 >= 1 (g2 <= 0)
0.1 <= x1 <= 1
0 <= x2 <= 5
"""
def __init__(self):
super().__init__(
n_var=2,
n_obj=2,
n_ieq_constr=2, # Two inequality constraints
xl=np.array([0.1, 0.0]),
xu=np.array([1.0, 5.0])
)
def _evaluate(self, x, out, *args, **kwargs):
"""Evaluate objectives and constraints."""
# Objectives
f1 = x[0]
f2 = (1 + x[1]) / x[0]
out["F"] = [f1, f2]
# Inequality constraints (g <= 0)
# Convert g1: x2 + 9*x1 >= 6 β -(x2 + 9*x1 - 6) <= 0
g1 = -(x[1] + 9 * x[0] - 6)
# Convert g2: -x2 + 9*x1 >= 1 β -(-x2 + 9*x1 - 1) <= 0
g2 = -(-x[1] + 9 * x[0] - 1)
out["G"] = [g1, g2]
def solve_custom_problem():
"""Solve custom bi-objective problem."""
print("="*60)
print("CUSTOM PROBLEM - UNCONSTRAINED")
print("="*60)
# Define custom problem
problem = MyBiObjectiveProblem()
# Configure algorithm
algorithm = NSGA2(pop_size=100)
# Solve
result = minimize(
problem,
algorithm,
('n_gen', 200),
seed=1,
verbose=False
)
print(f"Number of solutions: {len(result.F)}")
print(f"Objective space range:")
print(f" f1: [{result.F[:, 0].min():.3f}, {result.F[:, 0].max():.3f}]")
print(f" f2: [{result.F[:, 1].min():.3f}, {result.F[:, 1].max():.3f}]")
# Visualize
plot = Scatter(title="Custom Bi-Objective Problem")
plot.add(result.F, color="blue", alpha=0.7)
plot.show()
return result
def solve_constrained_problem():
"""Solve custom constrained problem."""
print("\n" + "="*60)
print("CUSTOM PROBLEM - CONSTRAINED")
print("="*60)
# Define constrained problem
problem = ConstrainedProblem()
# Configure algorithm
algorithm = NSGA2(pop_size=100)
# Solve
result = minimize(
problem,
algorithm,
('n_gen', 200),
seed=1,
verbose=False
)
# Check feasibility
feasible = result.CV[:, 0] == 0 # Constraint violation = 0
print(f"Total solutions: {len(result.F)}")
print(f"Feasible solutions: {np.sum(feasible)}")
print(f"Infeasible solutions: {np.sum(~feasible)}")
if np.any(feasible):
F_feasible = result.F[feasible]
print(f"\nFeasible objective space range:")
print(f" f1: [{F_feasible[:, 0].min():.3f}, {F_feasible[:, 0].max():.3f}]")
print(f" f2: [{F_feasible[:, 1].min():.3f}, {F_feasible[:, 1].max():.3f}]")
# Visualize feasible solutions
plot = Scatter(title="Constrained Problem - Feasible Solutions")
plot.add(F_feasible, color="green", alpha=0.7, label="Feasible")
if np.any(~feasible):
plot.add(result.F[~feasible], color="red", alpha=0.3, s=10, label="Infeasible")
plot.show()
return result
if __name__ == "__main__":
# Run both examples
result1 = solve_custom_problem()
result2 = solve_constrained_problem()
print("\n" + "="*60)
print("EXAMPLES COMPLETED")
print("="*60)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pymoo/scripts/custom_problem_example.py",
"license": "MIT License",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pymoo/scripts/decision_making_example.py | """
Multi-criteria decision making example using pymoo.
This script demonstrates how to select preferred solutions from
a Pareto front using various MCDM methods.
"""
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.problems import get_problem
from pymoo.optimize import minimize
from pymoo.mcdm.pseudo_weights import PseudoWeights
from pymoo.visualization.scatter import Scatter
from pymoo.visualization.petal import Petal
import numpy as np
def run_optimization_for_decision_making():
"""Run optimization to obtain Pareto front."""
print("Running optimization to obtain Pareto front...")
# Solve ZDT1 problem
problem = get_problem("zdt1")
algorithm = NSGA2(pop_size=100)
result = minimize(
problem,
algorithm,
('n_gen', 200),
seed=1,
verbose=False
)
print(f"Obtained {len(result.F)} solutions in Pareto front\n")
return problem, result
def apply_pseudo_weights(result, weights):
"""Apply pseudo-weights MCDM method."""
print(f"Applying Pseudo-Weights with weights: {weights}")
# Normalize objectives to [0, 1]
F_norm = (result.F - result.F.min(axis=0)) / (result.F.max(axis=0) - result.F.min(axis=0))
# Apply MCDM
dm = PseudoWeights(weights)
selected_idx = dm.do(F_norm)
selected_x = result.X[selected_idx]
selected_f = result.F[selected_idx]
print(f"Selected solution (decision variables): {selected_x}")
print(f"Selected solution (objectives): {selected_f}")
print()
return selected_idx, selected_x, selected_f
def compare_different_preferences(result):
"""Compare selections with different preference weights."""
print("="*60)
print("COMPARING DIFFERENT PREFERENCE WEIGHTS")
print("="*60 + "\n")
# Define different preference scenarios
scenarios = [
("Equal preference", np.array([0.5, 0.5])),
("Prefer f1", np.array([0.8, 0.2])),
("Prefer f2", np.array([0.2, 0.8])),
]
selections = {}
for name, weights in scenarios:
print(f"Scenario: {name}")
idx, x, f = apply_pseudo_weights(result, weights)
selections[name] = (idx, f)
# Visualize all selections
plot = Scatter(title="Decision Making - Different Preferences")
plot.add(result.F, color="lightgray", alpha=0.5, s=20, label="Pareto Front")
colors = ["red", "blue", "green"]
for (name, (idx, f)), color in zip(selections.items(), colors):
plot.add(f, color=color, s=100, marker="*", label=name)
plot.show()
return selections
def visualize_selected_solutions(result, selections):
"""Visualize selected solutions using petal diagram."""
# Get objective bounds for normalization
f_min = result.F.min(axis=0)
f_max = result.F.max(axis=0)
plot = Petal(
title="Selected Solutions Comparison",
bounds=[f_min, f_max],
labels=["f1", "f2"]
)
colors = ["red", "blue", "green"]
for (name, (idx, f)), color in zip(selections.items(), colors):
plot.add(f, color=color, label=name)
plot.show()
def find_extreme_solutions(result):
"""Find extreme solutions (best in each objective)."""
print("\n" + "="*60)
print("EXTREME SOLUTIONS")
print("="*60 + "\n")
# Best f1 (minimize f1)
best_f1_idx = np.argmin(result.F[:, 0])
print(f"Best f1 solution: {result.F[best_f1_idx]}")
print(f" Decision variables: {result.X[best_f1_idx]}\n")
# Best f2 (minimize f2)
best_f2_idx = np.argmin(result.F[:, 1])
print(f"Best f2 solution: {result.F[best_f2_idx]}")
print(f" Decision variables: {result.X[best_f2_idx]}\n")
return best_f1_idx, best_f2_idx
def main():
"""Main execution function."""
# Step 1: Run optimization
problem, result = run_optimization_for_decision_making()
# Step 2: Find extreme solutions
best_f1_idx, best_f2_idx = find_extreme_solutions(result)
# Step 3: Compare different preference weights
selections = compare_different_preferences(result)
# Step 4: Visualize selections with petal diagram
visualize_selected_solutions(result, selections)
print("="*60)
print("DECISION MAKING EXAMPLE COMPLETED")
print("="*60)
print("\nKey Takeaways:")
print("1. Different weights lead to different selected solutions")
print("2. Higher weight on an objective selects solutions better in that objective")
print("3. Visualization helps understand trade-offs")
print("4. MCDM methods help formalize decision maker preferences")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pymoo/scripts/decision_making_example.py",
"license": "MIT License",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pymoo/scripts/many_objective_example.py | """
Many-objective optimization example using pymoo.
This script demonstrates many-objective optimization (4+ objectives)
using NSGA-III on the DTLZ2 benchmark problem.
"""
from pymoo.algorithms.moo.nsga3 import NSGA3
from pymoo.problems import get_problem
from pymoo.optimize import minimize
from pymoo.util.ref_dirs import get_reference_directions
from pymoo.visualization.pcp import PCP
import numpy as np
def run_many_objective_optimization():
"""Run many-objective optimization example."""
# Define the problem - DTLZ2 with 5 objectives
n_obj = 5
problem = get_problem("dtlz2", n_obj=n_obj)
# Generate reference directions for NSGA-III
# Das-Dennis method for uniform distribution
ref_dirs = get_reference_directions("das-dennis", n_obj, n_partitions=12)
print(f"Number of reference directions: {len(ref_dirs)}")
# Configure NSGA-III algorithm
algorithm = NSGA3(
ref_dirs=ref_dirs,
eliminate_duplicates=True
)
# Run optimization
result = minimize(
problem,
algorithm,
('n_gen', 300),
seed=1,
verbose=True
)
# Print results summary
print("\n" + "="*60)
print("MANY-OBJECTIVE OPTIMIZATION RESULTS")
print("="*60)
print(f"Number of objectives: {n_obj}")
print(f"Number of solutions: {len(result.F)}")
print(f"Number of generations: {result.algorithm.n_gen}")
print(f"Number of function evaluations: {result.algorithm.evaluator.n_eval}")
# Show objective space statistics
print("\nObjective space statistics:")
print(f"Minimum values per objective: {result.F.min(axis=0)}")
print(f"Maximum values per objective: {result.F.max(axis=0)}")
print("="*60)
# Visualize using Parallel Coordinate Plot
plot = PCP(
title=f"DTLZ2 ({n_obj} objectives) - NSGA-III Results",
labels=[f"f{i+1}" for i in range(n_obj)],
normalize_each_axis=True
)
plot.add(result.F, alpha=0.3, color="blue")
plot.show()
return result
if __name__ == "__main__":
result = run_many_objective_optimization()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pymoo/scripts/many_objective_example.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pymoo/scripts/multi_objective_example.py | """
Multi-objective optimization example using pymoo.
This script demonstrates multi-objective optimization using
NSGA-II on the ZDT1 benchmark problem.
"""
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.problems import get_problem
from pymoo.optimize import minimize
from pymoo.visualization.scatter import Scatter
import matplotlib.pyplot as plt
def run_multi_objective_optimization():
"""Run multi-objective optimization example."""
# Define the problem - ZDT1 (bi-objective)
problem = get_problem("zdt1")
# Configure NSGA-II algorithm
algorithm = NSGA2(
pop_size=100,
eliminate_duplicates=True
)
# Run optimization
result = minimize(
problem,
algorithm,
('n_gen', 200),
seed=1,
verbose=True
)
# Print results summary
print("\n" + "="*60)
print("MULTI-OBJECTIVE OPTIMIZATION RESULTS")
print("="*60)
print(f"Number of solutions in Pareto front: {len(result.F)}")
print(f"Number of generations: {result.algorithm.n_gen}")
print(f"Number of function evaluations: {result.algorithm.evaluator.n_eval}")
print("\nFirst 5 solutions (decision variables):")
print(result.X[:5])
print("\nFirst 5 solutions (objective values):")
print(result.F[:5])
print("="*60)
# Visualize results
plot = Scatter(title="ZDT1 - NSGA-II Results")
plot.add(result.F, color="red", alpha=0.7, s=30, label="Obtained Pareto Front")
# Add true Pareto front for comparison
pf = problem.pareto_front()
plot.add(pf, color="black", alpha=0.3, label="True Pareto Front")
plot.show()
return result
if __name__ == "__main__":
result = run_multi_objective_optimization()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pymoo/scripts/multi_objective_example.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pymoo/scripts/single_objective_example.py | """
Single-objective optimization example using pymoo.
This script demonstrates basic single-objective optimization
using the Genetic Algorithm on the Sphere function.
"""
from pymoo.algorithms.soo.nonconvex.ga import GA
from pymoo.problems import get_problem
from pymoo.optimize import minimize
from pymoo.operators.crossover.sbx import SBX
from pymoo.operators.mutation.pm import PM
from pymoo.operators.sampling.rnd import FloatRandomSampling
from pymoo.termination import get_termination
import numpy as np
def run_single_objective_optimization():
"""Run single-objective optimization example."""
# Define the problem - Sphere function (sum of squares)
problem = get_problem("sphere", n_var=10)
# Configure the algorithm
algorithm = GA(
pop_size=100,
sampling=FloatRandomSampling(),
crossover=SBX(prob=0.9, eta=15),
mutation=PM(eta=20),
eliminate_duplicates=True
)
# Define termination criteria
termination = get_termination("n_gen", 100)
# Run optimization
result = minimize(
problem,
algorithm,
termination,
seed=1,
verbose=True
)
# Print results
print("\n" + "="*60)
print("OPTIMIZATION RESULTS")
print("="*60)
print(f"Best solution: {result.X}")
print(f"Best objective value: {result.F[0]:.6f}")
print(f"Number of generations: {result.algorithm.n_gen}")
print(f"Number of function evaluations: {result.algorithm.evaluator.n_eval}")
print("="*60)
return result
if __name__ == "__main__":
result = run_single_objective_optimization()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pymoo/scripts/single_objective_example.py",
"license": "MIT License",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pytdc/scripts/benchmark_evaluation.py | #!/usr/bin/env python3
"""
TDC Benchmark Group Evaluation Template
This script demonstrates how to use TDC benchmark groups for systematic
model evaluation following the required 5-seed protocol.
Usage:
python benchmark_evaluation.py
"""
from tdc.benchmark_group import admet_group
from tdc import Evaluator
import numpy as np
import pandas as pd
def load_benchmark_group():
"""
Load the ADMET benchmark group
"""
print("=" * 60)
print("Loading ADMET Benchmark Group")
print("=" * 60)
# Initialize benchmark group
group = admet_group(path='data/')
# Get available benchmarks
print("\nAvailable benchmarks in ADMET group:")
benchmark_names = group.dataset_names
print(f"Total: {len(benchmark_names)} datasets")
for i, name in enumerate(benchmark_names[:10], 1):
print(f" {i}. {name}")
if len(benchmark_names) > 10:
print(f" ... and {len(benchmark_names) - 10} more")
return group
def single_dataset_evaluation(group, dataset_name='Caco2_Wang'):
"""
Example: Evaluate on a single dataset with 5-seed protocol
"""
print("\n" + "=" * 60)
print(f"Example 1: Single Dataset Evaluation ({dataset_name})")
print("=" * 60)
# Get dataset benchmarks
benchmark = group.get(dataset_name)
print(f"\nBenchmark structure:")
print(f" Seeds: {list(benchmark.keys())}")
# Required: Evaluate with 5 different seeds
predictions = {}
for seed in [1, 2, 3, 4, 5]:
print(f"\n--- Seed {seed} ---")
# Get train/valid data for this seed
train = benchmark[seed]['train']
valid = benchmark[seed]['valid']
print(f"Train size: {len(train)}")
print(f"Valid size: {len(valid)}")
# TODO: Replace with your model training
# model = YourModel()
# model.fit(train['Drug'], train['Y'])
# For demonstration, create dummy predictions
# Replace with: predictions[seed] = model.predict(benchmark[seed]['test'])
test = benchmark[seed]['test']
y_true = test['Y'].values
# Simulate predictions (add controlled noise)
np.random.seed(seed)
y_pred = y_true + np.random.normal(0, 0.3, len(y_true))
predictions[seed] = y_pred
# Evaluate this seed
evaluator = Evaluator(name='MAE')
score = evaluator(y_true, y_pred)
print(f"MAE for seed {seed}: {score:.4f}")
# Evaluate across all seeds
print("\n--- Overall Evaluation ---")
results = group.evaluate(predictions)
print(f"\nResults for {dataset_name}:")
mean_score, std_score = results[dataset_name]
print(f" Mean MAE: {mean_score:.4f}")
print(f" Std MAE: {std_score:.4f}")
return predictions, results
def multiple_datasets_evaluation(group):
"""
Example: Evaluate on multiple datasets
"""
print("\n" + "=" * 60)
print("Example 2: Multiple Datasets Evaluation")
print("=" * 60)
# Select a subset of datasets for demonstration
selected_datasets = ['Caco2_Wang', 'HIA_Hou', 'Bioavailability_Ma']
all_predictions = {}
all_results = {}
for dataset_name in selected_datasets:
print(f"\n{'='*40}")
print(f"Evaluating: {dataset_name}")
print(f"{'='*40}")
benchmark = group.get(dataset_name)
predictions = {}
# Train and predict for each seed
for seed in [1, 2, 3, 4, 5]:
train = benchmark[seed]['train']
test = benchmark[seed]['test']
# TODO: Replace with your model
# model = YourModel()
# model.fit(train['Drug'], train['Y'])
# predictions[seed] = model.predict(test['Drug'])
# Dummy predictions for demonstration
np.random.seed(seed)
y_true = test['Y'].values
y_pred = y_true + np.random.normal(0, 0.3, len(y_true))
predictions[seed] = y_pred
all_predictions[dataset_name] = predictions
# Evaluate this dataset
results = group.evaluate({dataset_name: predictions})
all_results[dataset_name] = results[dataset_name]
mean_score, std_score = results[dataset_name]
print(f" {dataset_name}: {mean_score:.4f} Β± {std_score:.4f}")
# Summary
print("\n" + "=" * 60)
print("Summary of Results")
print("=" * 60)
results_df = pd.DataFrame([
{
'Dataset': name,
'Mean MAE': f"{mean:.4f}",
'Std MAE': f"{std:.4f}"
}
for name, (mean, std) in all_results.items()
])
print(results_df.to_string(index=False))
return all_predictions, all_results
def custom_model_template():
"""
Template for integrating your own model with TDC benchmarks
"""
print("\n" + "=" * 60)
print("Example 3: Custom Model Template")
print("=" * 60)
code_template = '''
# Template for using your own model with TDC benchmarks
from tdc.benchmark_group import admet_group
from your_library import YourModel # Replace with your model
# Initialize benchmark group
group = admet_group(path='data/')
benchmark = group.get('Caco2_Wang')
predictions = {}
for seed in [1, 2, 3, 4, 5]:
# Get data for this seed
train = benchmark[seed]['train']
valid = benchmark[seed]['valid']
test = benchmark[seed]['test']
# Extract features and labels
X_train, y_train = train['Drug'], train['Y']
X_valid, y_valid = valid['Drug'], valid['Y']
X_test = test['Drug']
# Initialize and train model
model = YourModel(random_state=seed)
model.fit(X_train, y_train)
# Optionally use validation set for early stopping
# model.fit(X_train, y_train, validation_data=(X_valid, y_valid))
# Make predictions on test set
predictions[seed] = model.predict(X_test)
# Evaluate with TDC
results = group.evaluate(predictions)
print(f"Results: {results}")
'''
print("\nCustom Model Integration Template:")
print("=" * 60)
print(code_template)
return code_template
def multi_seed_statistics(predictions_dict):
"""
Example: Analyzing multi-seed prediction statistics
"""
print("\n" + "=" * 60)
print("Example 4: Multi-Seed Statistics Analysis")
print("=" * 60)
# Analyze prediction variability across seeds
all_preds = np.array([predictions_dict[seed] for seed in [1, 2, 3, 4, 5]])
print("\nPrediction statistics across 5 seeds:")
print(f" Shape: {all_preds.shape}")
print(f" Mean prediction: {all_preds.mean():.4f}")
print(f" Std across seeds: {all_preds.std(axis=0).mean():.4f}")
print(f" Min prediction: {all_preds.min():.4f}")
print(f" Max prediction: {all_preds.max():.4f}")
# Per-sample variance
per_sample_std = all_preds.std(axis=0)
print(f"\nPer-sample prediction std:")
print(f" Mean: {per_sample_std.mean():.4f}")
print(f" Median: {np.median(per_sample_std):.4f}")
print(f" Max: {per_sample_std.max():.4f}")
def leaderboard_submission_guide():
"""
Guide for submitting to TDC leaderboards
"""
print("\n" + "=" * 60)
print("Example 5: Leaderboard Submission Guide")
print("=" * 60)
guide = """
To submit results to TDC leaderboards:
1. Evaluate your model following the 5-seed protocol:
- Use seeds [1, 2, 3, 4, 5] exactly as provided
- Do not modify the train/valid/test splits
- Report mean Β± std across all 5 seeds
2. Format your results:
results = group.evaluate(predictions)
# Returns: {'dataset_name': [mean_score, std_score]}
3. Submit to leaderboard:
- Visit: https://tdcommons.ai/benchmark/admet_group/
- Click on your dataset of interest
- Submit your results with:
* Model name and description
* Mean score Β± standard deviation
* Reference to paper/code (if available)
4. Best practices:
- Report all datasets in the benchmark group
- Include model hyperparameters
- Share code for reproducibility
- Compare against baseline models
5. Evaluation metrics:
- ADMET Group uses MAE by default
- Other groups may use different metrics
- Check benchmark-specific requirements
"""
print(guide)
def main():
"""
Main function to run all benchmark evaluation examples
"""
print("\n" + "=" * 60)
print("TDC Benchmark Group Evaluation Examples")
print("=" * 60)
# Load benchmark group
group = load_benchmark_group()
# Example 1: Single dataset evaluation
predictions, results = single_dataset_evaluation(group)
# Example 2: Multiple datasets evaluation
all_predictions, all_results = multiple_datasets_evaluation(group)
# Example 3: Custom model template
custom_model_template()
# Example 4: Multi-seed statistics
multi_seed_statistics(predictions)
# Example 5: Leaderboard submission guide
leaderboard_submission_guide()
print("\n" + "=" * 60)
print("Benchmark evaluation examples completed!")
print("=" * 60)
print("\nNext steps:")
print("1. Replace dummy predictions with your model")
print("2. Run full evaluation on all benchmark datasets")
print("3. Submit results to TDC leaderboard")
print("=" * 60)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pytdc/scripts/benchmark_evaluation.py",
"license": "MIT License",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pytdc/scripts/load_and_split_data.py | #!/usr/bin/env python3
"""
TDC Data Loading and Splitting Template
This script demonstrates how to load TDC datasets and apply different
splitting strategies for model training and evaluation.
Usage:
python load_and_split_data.py
"""
from tdc.single_pred import ADME
from tdc.multi_pred import DTI
from tdc import Evaluator
import pandas as pd
def load_single_pred_example():
"""
Example: Loading and splitting a single-prediction dataset (ADME)
"""
print("=" * 60)
print("Example 1: Single-Prediction Task (ADME)")
print("=" * 60)
# Load Caco2 dataset (intestinal permeability)
print("\nLoading Caco2_Wang dataset...")
data = ADME(name='Caco2_Wang')
# Get basic dataset info
print(f"\nDataset size: {len(data.get_data())} molecules")
data.print_stats()
# Method 1: Scaffold split (default, recommended)
print("\n--- Scaffold Split ---")
split = data.get_split(method='scaffold', seed=42, frac=[0.7, 0.1, 0.2])
train = split['train']
valid = split['valid']
test = split['test']
print(f"Train: {len(train)} molecules")
print(f"Valid: {len(valid)} molecules")
print(f"Test: {len(test)} molecules")
# Display sample data
print("\nSample training data:")
print(train.head(3))
# Method 2: Random split
print("\n--- Random Split ---")
split_random = data.get_split(method='random', seed=42, frac=[0.8, 0.1, 0.1])
print(f"Train: {len(split_random['train'])} molecules")
print(f"Valid: {len(split_random['valid'])} molecules")
print(f"Test: {len(split_random['test'])} molecules")
return split
def load_multi_pred_example():
"""
Example: Loading and splitting a multi-prediction dataset (DTI)
"""
print("\n" + "=" * 60)
print("Example 2: Multi-Prediction Task (DTI)")
print("=" * 60)
# Load BindingDB Kd dataset (drug-target interactions)
print("\nLoading BindingDB_Kd dataset...")
data = DTI(name='BindingDB_Kd')
# Get basic dataset info
full_data = data.get_data()
print(f"\nDataset size: {len(full_data)} drug-target pairs")
print(f"Unique drugs: {full_data['Drug_ID'].nunique()}")
print(f"Unique targets: {full_data['Target_ID'].nunique()}")
# Method 1: Random split
print("\n--- Random Split ---")
split_random = data.get_split(method='random', seed=42)
print(f"Train: {len(split_random['train'])} pairs")
print(f"Valid: {len(split_random['valid'])} pairs")
print(f"Test: {len(split_random['test'])} pairs")
# Method 2: Cold drug split (unseen drugs in test)
print("\n--- Cold Drug Split ---")
split_cold_drug = data.get_split(method='cold_drug', seed=42)
train = split_cold_drug['train']
test = split_cold_drug['test']
# Verify no drug overlap
train_drugs = set(train['Drug_ID'])
test_drugs = set(test['Drug_ID'])
overlap = train_drugs & test_drugs
print(f"Train: {len(train)} pairs, {len(train_drugs)} unique drugs")
print(f"Test: {len(test)} pairs, {len(test_drugs)} unique drugs")
print(f"Drug overlap: {len(overlap)} (should be 0)")
# Method 3: Cold target split (unseen targets in test)
print("\n--- Cold Target Split ---")
split_cold_target = data.get_split(method='cold_target', seed=42)
train = split_cold_target['train']
test = split_cold_target['test']
train_targets = set(train['Target_ID'])
test_targets = set(test['Target_ID'])
overlap = train_targets & test_targets
print(f"Train: {len(train)} pairs, {len(train_targets)} unique targets")
print(f"Test: {len(test)} pairs, {len(test_targets)} unique targets")
print(f"Target overlap: {len(overlap)} (should be 0)")
# Display sample data
print("\nSample DTI data:")
print(full_data.head(3))
return split_cold_drug
def evaluation_example(split):
"""
Example: Evaluating model predictions with TDC evaluators
"""
print("\n" + "=" * 60)
print("Example 3: Model Evaluation")
print("=" * 60)
test = split['test']
# For demonstration, create dummy predictions
# In practice, replace with your model's predictions
import numpy as np
np.random.seed(42)
# Simulate predictions (replace with model.predict(test['Drug']))
y_true = test['Y'].values
y_pred = y_true + np.random.normal(0, 0.5, len(y_true)) # Add noise
# Evaluate with different metrics
print("\nEvaluating predictions...")
# Regression metrics
mae_evaluator = Evaluator(name='MAE')
mae = mae_evaluator(y_true, y_pred)
print(f"MAE: {mae:.4f}")
rmse_evaluator = Evaluator(name='RMSE')
rmse = rmse_evaluator(y_true, y_pred)
print(f"RMSE: {rmse:.4f}")
r2_evaluator = Evaluator(name='R2')
r2 = r2_evaluator(y_true, y_pred)
print(f"RΒ²: {r2:.4f}")
spearman_evaluator = Evaluator(name='Spearman')
spearman = spearman_evaluator(y_true, y_pred)
print(f"Spearman: {spearman:.4f}")
def custom_split_example():
"""
Example: Creating custom splits with different fractions
"""
print("\n" + "=" * 60)
print("Example 4: Custom Split Fractions")
print("=" * 60)
data = ADME(name='HIA_Hou')
# Custom split fractions
custom_fracs = [
([0.6, 0.2, 0.2], "60/20/20 split"),
([0.8, 0.1, 0.1], "80/10/10 split"),
([0.7, 0.15, 0.15], "70/15/15 split")
]
for frac, description in custom_fracs:
split = data.get_split(method='scaffold', seed=42, frac=frac)
print(f"\n{description}:")
print(f" Train: {len(split['train'])} ({frac[0]*100:.0f}%)")
print(f" Valid: {len(split['valid'])} ({frac[1]*100:.0f}%)")
print(f" Test: {len(split['test'])} ({frac[2]*100:.0f}%)")
def main():
"""
Main function to run all examples
"""
print("\n" + "=" * 60)
print("TDC Data Loading and Splitting Examples")
print("=" * 60)
# Example 1: Single prediction with scaffold split
split = load_single_pred_example()
# Example 2: Multi prediction with cold splits
dti_split = load_multi_pred_example()
# Example 3: Model evaluation
evaluation_example(split)
# Example 4: Custom split fractions
custom_split_example()
print("\n" + "=" * 60)
print("Examples completed!")
print("=" * 60)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pytdc/scripts/load_and_split_data.py",
"license": "MIT License",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pytdc/scripts/molecular_generation.py | #!/usr/bin/env python3
"""
TDC Molecular Generation with Oracles Template
This script demonstrates how to use TDC oracles for molecular generation
tasks including goal-directed generation and distribution learning.
Usage:
python molecular_generation.py
"""
from tdc.generation import MolGen
from tdc import Oracle
import numpy as np
def load_generation_dataset():
"""
Load molecular generation dataset
"""
print("=" * 60)
print("Loading Molecular Generation Dataset")
print("=" * 60)
# Load ChEMBL dataset
data = MolGen(name='ChEMBL_V29')
# Get training molecules
split = data.get_split()
train_smiles = split['train']['Drug'].tolist()
print(f"\nDataset: ChEMBL_V29")
print(f"Training molecules: {len(train_smiles)}")
# Display sample molecules
print("\nSample SMILES:")
for i, smiles in enumerate(train_smiles[:5], 1):
print(f" {i}. {smiles}")
return train_smiles
def single_oracle_example():
"""
Example: Using a single oracle for molecular evaluation
"""
print("\n" + "=" * 60)
print("Example 1: Single Oracle Evaluation")
print("=" * 60)
# Initialize oracle for GSK3B target
oracle = Oracle(name='GSK3B')
# Test molecules
test_molecules = [
'CC(C)Cc1ccc(cc1)C(C)C(O)=O', # Ibuprofen
'CC(=O)Oc1ccccc1C(=O)O', # Aspirin
'Cn1c(=O)c2c(ncn2C)n(C)c1=O', # Caffeine
'CN1C=NC2=C1C(=O)N(C(=O)N2C)C' # Theophylline
]
print("\nEvaluating molecules with GSK3B oracle:")
print("-" * 60)
for smiles in test_molecules:
score = oracle(smiles)
print(f"SMILES: {smiles}")
print(f"GSK3B score: {score:.4f}\n")
def multiple_oracles_example():
"""
Example: Using multiple oracles for multi-objective optimization
"""
print("\n" + "=" * 60)
print("Example 2: Multiple Oracles (Multi-Objective)")
print("=" * 60)
# Initialize multiple oracles
oracles = {
'QED': Oracle(name='QED'), # Drug-likeness
'SA': Oracle(name='SA'), # Synthetic accessibility
'GSK3B': Oracle(name='GSK3B'), # Target binding
'LogP': Oracle(name='LogP') # Lipophilicity
}
# Test molecule
test_smiles = 'CC(C)Cc1ccc(cc1)C(C)C(O)=O'
print(f"\nEvaluating: {test_smiles}")
print("-" * 60)
scores = {}
for name, oracle in oracles.items():
score = oracle(test_smiles)
scores[name] = score
print(f"{name:10s}: {score:.4f}")
# Multi-objective score (weighted combination)
print("\n--- Multi-Objective Scoring ---")
# Invert SA (lower is better, so we invert for maximization)
sa_score = 1.0 / (1.0 + scores['SA'])
# Weighted combination
weights = {'QED': 0.3, 'SA': 0.2, 'GSK3B': 0.4, 'LogP': 0.1}
multi_score = (
weights['QED'] * scores['QED'] +
weights['SA'] * sa_score +
weights['GSK3B'] * scores['GSK3B'] +
weights['LogP'] * (scores['LogP'] / 5.0) # Normalize LogP
)
print(f"Multi-objective score: {multi_score:.4f}")
print(f"Weights: {weights}")
def batch_evaluation_example():
"""
Example: Batch evaluation of multiple molecules
"""
print("\n" + "=" * 60)
print("Example 3: Batch Evaluation")
print("=" * 60)
# Generate sample molecules
molecules = [
'CC(C)Cc1ccc(cc1)C(C)C(O)=O',
'CC(=O)Oc1ccccc1C(=O)O',
'Cn1c(=O)c2c(ncn2C)n(C)c1=O',
'CN1C=NC2=C1C(=O)N(C(=O)N2C)C',
'CC(C)NCC(COc1ccc(cc1)COCCOC(C)C)O'
]
# Initialize oracle
oracle = Oracle(name='DRD2')
print(f"\nBatch evaluating {len(molecules)} molecules with DRD2 oracle...")
# Batch evaluation (more efficient than individual calls)
scores = oracle(molecules)
print("\nResults:")
print("-" * 60)
for smiles, score in zip(molecules, scores):
print(f"{smiles[:40]:40s}... Score: {score:.4f}")
# Statistics
print(f"\nStatistics:")
print(f" Mean score: {np.mean(scores):.4f}")
print(f" Std score: {np.std(scores):.4f}")
print(f" Min score: {np.min(scores):.4f}")
print(f" Max score: {np.max(scores):.4f}")
def goal_directed_generation_template():
"""
Template for goal-directed molecular generation
"""
print("\n" + "=" * 60)
print("Example 4: Goal-Directed Generation Template")
print("=" * 60)
template = '''
# Template for goal-directed molecular generation
from tdc.generation import MolGen
from tdc import Oracle
import numpy as np
# 1. Load training data
data = MolGen(name='ChEMBL_V29')
train_smiles = data.get_split()['train']['Drug'].tolist()
# 2. Initialize oracle(s)
oracle = Oracle(name='GSK3B')
# 3. Initialize your generative model
# model = YourGenerativeModel()
# model.fit(train_smiles)
# 4. Generation loop
num_iterations = 100
num_molecules_per_iter = 100
best_molecules = []
for iteration in range(num_iterations):
# Generate candidate molecules
# candidates = model.generate(num_molecules_per_iter)
# Evaluate with oracle
scores = oracle(candidates)
# Select top molecules
top_indices = np.argsort(scores)[-10:]
top_molecules = [candidates[i] for i in top_indices]
top_scores = [scores[i] for i in top_indices]
# Store best molecules
best_molecules.extend(zip(top_molecules, top_scores))
# Optional: Fine-tune model on top molecules
# model.fine_tune(top_molecules)
# Print progress
print(f"Iteration {iteration}: Best score = {max(scores):.4f}")
# Sort and display top molecules
best_molecules.sort(key=lambda x: x[1], reverse=True)
print("\\nTop 10 molecules:")
for smiles, score in best_molecules[:10]:
print(f"{smiles}: {score:.4f}")
'''
print("\nGoal-Directed Generation Template:")
print("=" * 60)
print(template)
def distribution_learning_example(train_smiles):
"""
Example: Distribution learning evaluation
"""
print("\n" + "=" * 60)
print("Example 5: Distribution Learning")
print("=" * 60)
# Use subset for demonstration
train_subset = train_smiles[:1000]
# Initialize oracle
oracle = Oracle(name='QED')
print("\nEvaluating property distribution...")
# Evaluate training set
print("Computing training set distribution...")
train_scores = oracle(train_subset)
# Simulate generated molecules (in practice, use your generative model)
# For demo: add noise to training molecules
print("Computing generated set distribution...")
generated_scores = train_scores + np.random.normal(0, 0.1, len(train_scores))
generated_scores = np.clip(generated_scores, 0, 1) # QED is [0, 1]
# Compare distributions
print("\n--- Distribution Statistics ---")
print(f"Training set (n={len(train_subset)}):")
print(f" Mean: {np.mean(train_scores):.4f}")
print(f" Std: {np.std(train_scores):.4f}")
print(f" Median: {np.median(train_scores):.4f}")
print(f"\nGenerated set (n={len(generated_scores)}):")
print(f" Mean: {np.mean(generated_scores):.4f}")
print(f" Std: {np.std(generated_scores):.4f}")
print(f" Median: {np.median(generated_scores):.4f}")
# Distribution similarity metrics
from scipy.stats import ks_2samp
ks_statistic, p_value = ks_2samp(train_scores, generated_scores)
print(f"\nKolmogorov-Smirnov Test:")
print(f" KS statistic: {ks_statistic:.4f}")
print(f" P-value: {p_value:.4f}")
if p_value > 0.05:
print(" β Distributions are similar (p > 0.05)")
else:
print(" β Distributions are significantly different (p < 0.05)")
def available_oracles_info():
"""
Display information about available oracles
"""
print("\n" + "=" * 60)
print("Example 6: Available Oracles")
print("=" * 60)
oracle_info = {
'Biochemical Targets': [
'DRD2', 'GSK3B', 'JNK3', '5HT2A', 'ACE',
'MAPK', 'CDK', 'P38', 'PARP1', 'PIK3CA'
],
'Physicochemical Properties': [
'QED', 'SA', 'LogP', 'MW', 'Lipinski'
],
'Composite Metrics': [
'Isomer_Meta', 'Median1', 'Median2',
'Rediscovery', 'Similarity', 'Uniqueness', 'Novelty'
],
'Specialized': [
'ASKCOS', 'Docking', 'Vina'
]
}
print("\nAvailable Oracle Categories:")
print("-" * 60)
for category, oracles in oracle_info.items():
print(f"\n{category}:")
for oracle_name in oracles:
print(f" - {oracle_name}")
print("\nFor detailed oracle documentation, see:")
print(" references/oracles.md")
def constraint_satisfaction_example():
"""
Example: Molecular generation with constraints
"""
print("\n" + "=" * 60)
print("Example 7: Constraint Satisfaction")
print("=" * 60)
# Define constraints
constraints = {
'QED': (0.5, 1.0), # Drug-likeness >= 0.5
'SA': (1.0, 5.0), # Easy to synthesize
'MW': (200, 500), # Molecular weight 200-500 Da
'LogP': (0, 3) # Lipophilicity 0-3
}
# Initialize oracles
oracles = {name: Oracle(name=name) for name in constraints.keys()}
# Test molecules
test_molecules = [
'CC(C)Cc1ccc(cc1)C(C)C(O)=O',
'CC(=O)Oc1ccccc1C(=O)O',
'Cn1c(=O)c2c(ncn2C)n(C)c1=O'
]
print("\nConstraints:")
for prop, (min_val, max_val) in constraints.items():
print(f" {prop}: [{min_val}, {max_val}]")
print("\n" + "-" * 60)
print("Evaluating molecules against constraints:")
print("-" * 60)
for smiles in test_molecules:
print(f"\nSMILES: {smiles}")
satisfies_all = True
for prop, (min_val, max_val) in constraints.items():
score = oracles[prop](smiles)
satisfies = min_val <= score <= max_val
status = "β" if satisfies else "β"
print(f" {prop:10s}: {score:7.2f} [{min_val:5.1f}, {max_val:5.1f}] {status}")
satisfies_all = satisfies_all and satisfies
result = "PASS" if satisfies_all else "FAIL"
print(f" Overall: {result}")
def main():
"""
Main function to run all molecular generation examples
"""
print("\n" + "=" * 60)
print("TDC Molecular Generation with Oracles Examples")
print("=" * 60)
# Load generation dataset
train_smiles = load_generation_dataset()
# Example 1: Single oracle
single_oracle_example()
# Example 2: Multiple oracles
multiple_oracles_example()
# Example 3: Batch evaluation
batch_evaluation_example()
# Example 4: Goal-directed generation template
goal_directed_generation_template()
# Example 5: Distribution learning
distribution_learning_example(train_smiles)
# Example 6: Available oracles
available_oracles_info()
# Example 7: Constraint satisfaction
constraint_satisfaction_example()
print("\n" + "=" * 60)
print("Molecular generation examples completed!")
print("=" * 60)
print("\nNext steps:")
print("1. Implement your generative model")
print("2. Use oracles to guide generation")
print("3. Evaluate generated molecules")
print("4. Iterate and optimize")
print("=" * 60)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pytdc/scripts/molecular_generation.py",
"license": "MIT License",
"lines": 311,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pytorch-lightning/scripts/template_datamodule.py | """
Template for creating a PyTorch Lightning DataModule.
This template provides a complete boilerplate for building a LightningDataModule
with all essential methods and best practices for data handling.
"""
import lightning as L
from torch.utils.data import Dataset, DataLoader, random_split
import torch
class CustomDataset(Dataset):
"""
Custom Dataset implementation.
Replace this with your actual dataset implementation.
"""
def __init__(self, data_path, transform=None):
"""
Initialize the dataset.
Args:
data_path: Path to data directory
transform: Optional transforms to apply
"""
self.data_path = data_path
self.transform = transform
# Load your data here
# self.data = load_data(data_path)
# self.labels = load_labels(data_path)
# Placeholder data
self.data = torch.randn(1000, 3, 224, 224)
self.labels = torch.randint(0, 10, (1000,))
def __len__(self):
"""Return the size of the dataset."""
return len(self.data)
def __getitem__(self, idx):
"""
Get a single item from the dataset.
Args:
idx: Index of the item
Returns:
Tuple of (data, label)
"""
sample = self.data[idx]
label = self.labels[idx]
if self.transform:
sample = self.transform(sample)
return sample, label
class TemplateDataModule(L.LightningDataModule):
"""
Template LightningDataModule for data handling.
This class encapsulates all data processing steps:
1. Download/prepare data (prepare_data)
2. Create datasets (setup)
3. Create dataloaders (train/val/test/predict_dataloader)
Args:
data_dir: Directory containing the data
batch_size: Batch size for dataloaders
num_workers: Number of workers for data loading
train_val_split: Train/validation split ratio
pin_memory: Whether to pin memory for faster GPU transfer
"""
def __init__(
self,
data_dir: str = "./data",
batch_size: int = 32,
num_workers: int = 4,
train_val_split: float = 0.8,
pin_memory: bool = True,
):
super().__init__()
# Save hyperparameters
self.save_hyperparameters()
# Initialize as None (will be set in setup)
self.train_dataset = None
self.val_dataset = None
self.test_dataset = None
self.predict_dataset = None
def prepare_data(self):
"""
Download and prepare data.
This method is called only once and on a single process.
Do not set state here (e.g., self.x = y) because it's not
transferred to other processes.
Use this for:
- Downloading datasets
- Tokenizing text
- Saving processed data to disk
"""
# Example: Download data if not exists
# if not os.path.exists(self.hparams.data_dir):
# download_dataset(self.hparams.data_dir)
# Example: Process and save data
# process_and_save(self.hparams.data_dir)
pass
def setup(self, stage: str = None):
"""
Create datasets for each stage.
This method is called on every process in distributed training.
Set state here (e.g., self.train_dataset = ...).
Args:
stage: Current stage ('fit', 'validate', 'test', or 'predict')
"""
# Define transforms
train_transform = self._get_train_transforms()
test_transform = self._get_test_transforms()
# Setup for training and validation
if stage == "fit" or stage is None:
# Load full dataset
full_dataset = CustomDataset(
self.hparams.data_dir, transform=train_transform
)
# Split into train and validation
train_size = int(self.hparams.train_val_split * len(full_dataset))
val_size = len(full_dataset) - train_size
self.train_dataset, self.val_dataset = random_split(
full_dataset,
[train_size, val_size],
generator=torch.Generator().manual_seed(42),
)
# Apply test transforms to validation set
# (Note: random_split doesn't support different transforms,
# you may need to implement a custom wrapper)
# Setup for testing
if stage == "test" or stage is None:
self.test_dataset = CustomDataset(
self.hparams.data_dir, transform=test_transform
)
# Setup for prediction
if stage == "predict":
self.predict_dataset = CustomDataset(
self.hparams.data_dir, transform=test_transform
)
def _get_train_transforms(self):
"""
Define training transforms/augmentations.
Returns:
Training transforms
"""
# Example with torchvision:
# from torchvision import transforms
# return transforms.Compose([
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(10),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
return None
def _get_test_transforms(self):
"""
Define test/validation transforms (no augmentation).
Returns:
Test/validation transforms
"""
# Example with torchvision:
# from torchvision import transforms
# return transforms.Compose([
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
return None
def train_dataloader(self):
"""
Create training dataloader.
Returns:
Training DataLoader
"""
return DataLoader(
self.train_dataset,
batch_size=self.hparams.batch_size,
shuffle=True,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
persistent_workers=True if self.hparams.num_workers > 0 else False,
drop_last=True, # Drop last incomplete batch
)
def val_dataloader(self):
"""
Create validation dataloader.
Returns:
Validation DataLoader
"""
return DataLoader(
self.val_dataset,
batch_size=self.hparams.batch_size,
shuffle=False,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
persistent_workers=True if self.hparams.num_workers > 0 else False,
)
def test_dataloader(self):
"""
Create test dataloader.
Returns:
Test DataLoader
"""
return DataLoader(
self.test_dataset,
batch_size=self.hparams.batch_size,
shuffle=False,
num_workers=self.hparams.num_workers,
)
def predict_dataloader(self):
"""
Create prediction dataloader.
Returns:
Prediction DataLoader
"""
return DataLoader(
self.predict_dataset,
batch_size=self.hparams.batch_size,
shuffle=False,
num_workers=self.hparams.num_workers,
)
# Optional: State management for checkpointing
def state_dict(self):
"""
Save DataModule state for checkpointing.
Returns:
State dictionary
"""
return {"train_val_split": self.hparams.train_val_split}
def load_state_dict(self, state_dict):
"""
Restore DataModule state from checkpoint.
Args:
state_dict: State dictionary
"""
self.hparams.train_val_split = state_dict["train_val_split"]
# Optional: Teardown for cleanup
def teardown(self, stage: str = None):
"""
Cleanup after training/testing/prediction.
Args:
stage: Current stage ('fit', 'validate', 'test', or 'predict')
"""
# Clean up resources
if stage == "fit":
self.train_dataset = None
self.val_dataset = None
elif stage == "test":
self.test_dataset = None
elif stage == "predict":
self.predict_dataset = None
# Example usage
if __name__ == "__main__":
# Create DataModule
dm = TemplateDataModule(
data_dir="./data",
batch_size=64,
num_workers=8,
train_val_split=0.8,
)
# Setup for training
dm.prepare_data()
dm.setup(stage="fit")
# Get dataloaders
train_loader = dm.train_dataloader()
val_loader = dm.val_dataloader()
print(f"Train dataset size: {len(dm.train_dataset)}")
print(f"Validation dataset size: {len(dm.val_dataset)}")
print(f"Train batches: {len(train_loader)}")
print(f"Validation batches: {len(val_loader)}")
# Example: Use with Trainer
# from template_lightning_module import TemplateLightningModule
# model = TemplateLightningModule()
# trainer = L.Trainer(max_epochs=10)
# trainer.fit(model, datamodule=dm)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pytorch-lightning/scripts/template_datamodule.py",
"license": "MIT License",
"lines": 264,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/pytorch-lightning/scripts/template_lightning_module.py | """
Template for creating a PyTorch Lightning Module.
This template provides a complete boilerplate for building a LightningModule
with all essential methods and best practices.
"""
import lightning as L
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
class TemplateLightningModule(L.LightningModule):
"""
Template LightningModule for building deep learning models.
Args:
learning_rate: Learning rate for optimizer
hidden_dim: Hidden dimension size
dropout: Dropout probability
"""
def __init__(
self,
learning_rate: float = 0.001,
hidden_dim: int = 256,
dropout: float = 0.1,
):
super().__init__()
# Save hyperparameters (accessible via self.hparams)
self.save_hyperparameters()
# Define your model architecture
self.model = nn.Sequential(
nn.Linear(784, self.hparams.hidden_dim),
nn.ReLU(),
nn.Dropout(self.hparams.dropout),
nn.Linear(self.hparams.hidden_dim, 10),
)
# Optional: Define metrics
# from torchmetrics import Accuracy
# self.train_accuracy = Accuracy(task="multiclass", num_classes=10)
# self.val_accuracy = Accuracy(task="multiclass", num_classes=10)
def forward(self, x):
"""
Forward pass of the model.
Args:
x: Input tensor
Returns:
Model output
"""
return self.model(x)
def training_step(self, batch, batch_idx):
"""
Training step (called for each training batch).
Args:
batch: Current batch of data
batch_idx: Index of the current batch
Returns:
Loss tensor
"""
x, y = batch
# Forward pass
logits = self(x)
loss = F.cross_entropy(logits, y)
# Calculate accuracy (optional)
preds = torch.argmax(logits, dim=1)
acc = (preds == y).float().mean()
# Log metrics
self.log("train/loss", loss, on_step=True, on_epoch=True, prog_bar=True)
self.log("train/acc", acc, on_step=True, on_epoch=True)
self.log("learning_rate", self.optimizers().param_groups[0]["lr"])
return loss
def validation_step(self, batch, batch_idx):
"""
Validation step (called for each validation batch).
Args:
batch: Current batch of data
batch_idx: Index of the current batch
"""
x, y = batch
# Forward pass
logits = self(x)
loss = F.cross_entropy(logits, y)
# Calculate accuracy
preds = torch.argmax(logits, dim=1)
acc = (preds == y).float().mean()
# Log metrics (automatically aggregated across batches)
self.log("val/loss", loss, on_epoch=True, prog_bar=True, sync_dist=True)
self.log("val/acc", acc, on_epoch=True, prog_bar=True, sync_dist=True)
def test_step(self, batch, batch_idx):
"""
Test step (called for each test batch).
Args:
batch: Current batch of data
batch_idx: Index of the current batch
"""
x, y = batch
# Forward pass
logits = self(x)
loss = F.cross_entropy(logits, y)
# Calculate accuracy
preds = torch.argmax(logits, dim=1)
acc = (preds == y).float().mean()
# Log metrics
self.log("test/loss", loss, on_epoch=True)
self.log("test/acc", acc, on_epoch=True)
def predict_step(self, batch, batch_idx, dataloader_idx=0):
"""
Prediction step (called for each prediction batch).
Args:
batch: Current batch of data
batch_idx: Index of the current batch
dataloader_idx: Index of the dataloader (if multiple)
Returns:
Predictions
"""
x, y = batch
logits = self(x)
preds = torch.argmax(logits, dim=1)
return preds
def configure_optimizers(self):
"""
Configure optimizers and learning rate schedulers.
Returns:
Optimizer and scheduler configuration
"""
# Define optimizer
optimizer = Adam(
self.parameters(),
lr=self.hparams.learning_rate,
weight_decay=1e-5,
)
# Define scheduler
scheduler = ReduceLROnPlateau(
optimizer,
mode="min",
factor=0.5,
patience=5,
verbose=True,
)
# Return configuration
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler,
"monitor": "val/loss",
"interval": "epoch",
"frequency": 1,
},
}
# Optional: Add custom methods for model-specific logic
def on_train_epoch_end(self):
"""Called at the end of each training epoch."""
# Example: Log custom metrics
pass
def on_validation_epoch_end(self):
"""Called at the end of each validation epoch."""
# Example: Compute epoch-level metrics
pass
# Example usage
if __name__ == "__main__":
# Create model
model = TemplateLightningModule(
learning_rate=0.001,
hidden_dim=256,
dropout=0.1,
)
# Create trainer
trainer = L.Trainer(
max_epochs=10,
accelerator="auto",
devices="auto",
logger=True,
)
# Train (you need to provide train_dataloader and val_dataloader)
# trainer.fit(model, train_dataloader, val_dataloader)
print(f"Model created with {model.num_parameters:,} parameters")
print(f"Hyperparameters: {model.hparams}")
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/pytorch-lightning/scripts/template_lightning_module.py",
"license": "MIT License",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/rdkit/scripts/molecular_properties.py | #!/usr/bin/env python3
"""
Molecular Properties Calculator
Calculate comprehensive molecular properties and descriptors for molecules.
Supports single molecules or batch processing from files.
Usage:
python molecular_properties.py "CCO"
python molecular_properties.py --file molecules.smi --output properties.csv
"""
import argparse
import sys
from pathlib import Path
try:
from rdkit import Chem
from rdkit.Chem import Descriptors, Lipinski
except ImportError:
print("Error: RDKit not installed. Install with: conda install -c conda-forge rdkit")
sys.exit(1)
def calculate_properties(mol):
"""Calculate comprehensive molecular properties."""
if mol is None:
return None
properties = {
# Basic properties
'SMILES': Chem.MolToSmiles(mol),
'Molecular_Formula': Chem.rdMolDescriptors.CalcMolFormula(mol),
# Molecular weight
'MW': Descriptors.MolWt(mol),
'ExactMW': Descriptors.ExactMolWt(mol),
# Lipophilicity
'LogP': Descriptors.MolLogP(mol),
'MR': Descriptors.MolMR(mol),
# Polar surface area
'TPSA': Descriptors.TPSA(mol),
'LabuteASA': Descriptors.LabuteASA(mol),
# Hydrogen bonding
'HBD': Descriptors.NumHDonors(mol),
'HBA': Descriptors.NumHAcceptors(mol),
# Atom counts
'Heavy_Atoms': Descriptors.HeavyAtomCount(mol),
'Heteroatoms': Descriptors.NumHeteroatoms(mol),
'Valence_Electrons': Descriptors.NumValenceElectrons(mol),
# Ring information
'Rings': Descriptors.RingCount(mol),
'Aromatic_Rings': Descriptors.NumAromaticRings(mol),
'Saturated_Rings': Descriptors.NumSaturatedRings(mol),
'Aliphatic_Rings': Descriptors.NumAliphaticRings(mol),
'Aromatic_Heterocycles': Descriptors.NumAromaticHeterocycles(mol),
# Flexibility
'Rotatable_Bonds': Descriptors.NumRotatableBonds(mol),
'Fraction_Csp3': Descriptors.FractionCsp3(mol),
# Complexity
'BertzCT': Descriptors.BertzCT(mol),
# Drug-likeness
'QED': Descriptors.qed(mol),
}
# Lipinski's Rule of Five
properties['Lipinski_Pass'] = (
properties['MW'] <= 500 and
properties['LogP'] <= 5 and
properties['HBD'] <= 5 and
properties['HBA'] <= 10
)
# Lead-likeness
properties['Lead-like'] = (
250 <= properties['MW'] <= 350 and
properties['LogP'] <= 3.5 and
properties['Rotatable_Bonds'] <= 7
)
return properties
def process_single_molecule(smiles):
"""Process a single SMILES string."""
mol = Chem.MolFromSmiles(smiles)
if mol is None:
print(f"Error: Failed to parse SMILES: {smiles}")
return None
props = calculate_properties(mol)
return props
def process_file(input_file, output_file=None):
"""Process molecules from a file."""
input_path = Path(input_file)
if not input_path.exists():
print(f"Error: File not found: {input_file}")
return
# Determine file type
if input_path.suffix.lower() in ['.sdf', '.mol']:
suppl = Chem.SDMolSupplier(str(input_path))
elif input_path.suffix.lower() in ['.smi', '.smiles', '.txt']:
suppl = Chem.SmilesMolSupplier(str(input_path), titleLine=False)
else:
print(f"Error: Unsupported file format: {input_path.suffix}")
return
results = []
for idx, mol in enumerate(suppl):
if mol is None:
print(f"Warning: Failed to parse molecule {idx+1}")
continue
props = calculate_properties(mol)
if props:
props['Index'] = idx + 1
results.append(props)
# Output results
if output_file:
write_csv(results, output_file)
print(f"Results written to: {output_file}")
else:
# Print to console
for props in results:
print("\n" + "="*60)
for key, value in props.items():
print(f"{key:25s}: {value}")
return results
def write_csv(results, output_file):
"""Write results to CSV file."""
import csv
if not results:
print("No results to write")
return
with open(output_file, 'w', newline='') as f:
fieldnames = results[0].keys()
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(results)
def print_properties(props):
"""Print properties in formatted output."""
print("\nMolecular Properties:")
print("="*60)
# Group related properties
print("\n[Basic Information]")
print(f" SMILES: {props['SMILES']}")
print(f" Formula: {props['Molecular_Formula']}")
print("\n[Size & Weight]")
print(f" Molecular Weight: {props['MW']:.2f}")
print(f" Exact MW: {props['ExactMW']:.4f}")
print(f" Heavy Atoms: {props['Heavy_Atoms']}")
print(f" Heteroatoms: {props['Heteroatoms']}")
print("\n[Lipophilicity]")
print(f" LogP: {props['LogP']:.2f}")
print(f" Molar Refractivity: {props['MR']:.2f}")
print("\n[Polarity]")
print(f" TPSA: {props['TPSA']:.2f}")
print(f" Labute ASA: {props['LabuteASA']:.2f}")
print(f" H-bond Donors: {props['HBD']}")
print(f" H-bond Acceptors: {props['HBA']}")
print("\n[Ring Systems]")
print(f" Total Rings: {props['Rings']}")
print(f" Aromatic Rings: {props['Aromatic_Rings']}")
print(f" Saturated Rings: {props['Saturated_Rings']}")
print(f" Aliphatic Rings: {props['Aliphatic_Rings']}")
print(f" Aromatic Heterocycles: {props['Aromatic_Heterocycles']}")
print("\n[Flexibility & Complexity]")
print(f" Rotatable Bonds: {props['Rotatable_Bonds']}")
print(f" Fraction Csp3: {props['Fraction_Csp3']:.3f}")
print(f" Bertz Complexity: {props['BertzCT']:.1f}")
print("\n[Drug-likeness]")
print(f" QED Score: {props['QED']:.3f}")
print(f" Lipinski Pass: {'Yes' if props['Lipinski_Pass'] else 'No'}")
print(f" Lead-like: {'Yes' if props['Lead-like'] else 'No'}")
print("="*60)
def main():
parser = argparse.ArgumentParser(
description='Calculate molecular properties for molecules',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Single molecule
python molecular_properties.py "CCO"
# From file
python molecular_properties.py --file molecules.smi
# Save to CSV
python molecular_properties.py --file molecules.sdf --output properties.csv
"""
)
parser.add_argument('smiles', nargs='?', help='SMILES string to analyze')
parser.add_argument('--file', '-f', help='Input file (SDF or SMILES)')
parser.add_argument('--output', '-o', help='Output CSV file')
args = parser.parse_args()
if not args.smiles and not args.file:
parser.print_help()
sys.exit(1)
if args.smiles:
# Process single molecule
props = process_single_molecule(args.smiles)
if props:
print_properties(props)
elif args.file:
# Process file
process_file(args.file, args.output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/rdkit/scripts/molecular_properties.py",
"license": "MIT License",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/rdkit/scripts/similarity_search.py | #!/usr/bin/env python3
"""
Molecular Similarity Search
Perform fingerprint-based similarity screening against a database of molecules.
Supports multiple fingerprint types and similarity metrics.
Usage:
python similarity_search.py "CCO" database.smi --threshold 0.7
python similarity_search.py query.smi database.sdf --method morgan --output hits.csv
"""
import argparse
import sys
from pathlib import Path
try:
from rdkit import Chem
from rdkit.Chem import AllChem, MACCSkeys
from rdkit import DataStructs
except ImportError:
print("Error: RDKit not installed. Install with: conda install -c conda-forge rdkit")
sys.exit(1)
FINGERPRINT_METHODS = {
'morgan': 'Morgan fingerprint (ECFP-like)',
'rdkit': 'RDKit topological fingerprint',
'maccs': 'MACCS structural keys',
'atompair': 'Atom pair fingerprint',
'torsion': 'Topological torsion fingerprint'
}
def generate_fingerprint(mol, method='morgan', radius=2, n_bits=2048):
"""Generate molecular fingerprint based on specified method."""
if mol is None:
return None
method = method.lower()
if method == 'morgan':
return AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=n_bits)
elif method == 'rdkit':
return Chem.RDKFingerprint(mol, maxPath=7, fpSize=n_bits)
elif method == 'maccs':
return MACCSkeys.GenMACCSKeys(mol)
elif method == 'atompair':
from rdkit.Chem.AtomPairs import Pairs
return Pairs.GetAtomPairFingerprintAsBitVect(mol, nBits=n_bits)
elif method == 'torsion':
from rdkit.Chem.AtomPairs import Torsions
return Torsions.GetHashedTopologicalTorsionFingerprintAsBitVect(mol, nBits=n_bits)
else:
raise ValueError(f"Unknown fingerprint method: {method}")
def load_molecules(file_path):
"""Load molecules from file."""
path = Path(file_path)
if not path.exists():
print(f"Error: File not found: {file_path}")
return []
molecules = []
if path.suffix.lower() in ['.sdf', '.mol']:
suppl = Chem.SDMolSupplier(str(path))
elif path.suffix.lower() in ['.smi', '.smiles', '.txt']:
suppl = Chem.SmilesMolSupplier(str(path), titleLine=False)
else:
print(f"Error: Unsupported file format: {path.suffix}")
return []
for idx, mol in enumerate(suppl):
if mol is None:
print(f"Warning: Failed to parse molecule {idx+1}")
continue
# Try to get molecule name
name = mol.GetProp('_Name') if mol.HasProp('_Name') else f"Mol_{idx+1}"
smiles = Chem.MolToSmiles(mol)
molecules.append({
'index': idx + 1,
'name': name,
'smiles': smiles,
'mol': mol
})
return molecules
def similarity_search(query_mol, database, method='morgan', threshold=0.7,
radius=2, n_bits=2048, metric='tanimoto'):
"""
Perform similarity search.
Args:
query_mol: Query molecule (RDKit Mol object)
database: List of database molecules
method: Fingerprint method
threshold: Similarity threshold (0-1)
radius: Morgan fingerprint radius
n_bits: Fingerprint size
metric: Similarity metric (tanimoto, dice, cosine)
Returns:
List of hits with similarity scores
"""
if query_mol is None:
print("Error: Invalid query molecule")
return []
# Generate query fingerprint
query_fp = generate_fingerprint(query_mol, method, radius, n_bits)
if query_fp is None:
print("Error: Failed to generate query fingerprint")
return []
# Choose similarity function
if metric.lower() == 'tanimoto':
sim_func = DataStructs.TanimotoSimilarity
elif metric.lower() == 'dice':
sim_func = DataStructs.DiceSimilarity
elif metric.lower() == 'cosine':
sim_func = DataStructs.CosineSimilarity
else:
raise ValueError(f"Unknown similarity metric: {metric}")
# Search database
hits = []
for db_entry in database:
db_fp = generate_fingerprint(db_entry['mol'], method, radius, n_bits)
if db_fp is None:
continue
similarity = sim_func(query_fp, db_fp)
if similarity >= threshold:
hits.append({
'index': db_entry['index'],
'name': db_entry['name'],
'smiles': db_entry['smiles'],
'similarity': similarity
})
# Sort by similarity (descending)
hits.sort(key=lambda x: x['similarity'], reverse=True)
return hits
def write_results(hits, output_file):
"""Write results to CSV file."""
import csv
with open(output_file, 'w', newline='') as f:
fieldnames = ['Rank', 'Index', 'Name', 'SMILES', 'Similarity']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for rank, hit in enumerate(hits, 1):
writer.writerow({
'Rank': rank,
'Index': hit['index'],
'Name': hit['name'],
'SMILES': hit['smiles'],
'Similarity': f"{hit['similarity']:.4f}"
})
def print_results(hits, max_display=20):
"""Print results to console."""
if not hits:
print("\nNo hits found above threshold")
return
print(f"\nFound {len(hits)} similar molecules:")
print("="*80)
print(f"{'Rank':<6} {'Index':<8} {'Similarity':<12} {'Name':<20} {'SMILES'}")
print("-"*80)
for rank, hit in enumerate(hits[:max_display], 1):
name = hit['name'][:18] + '..' if len(hit['name']) > 20 else hit['name']
smiles = hit['smiles'][:40] + '...' if len(hit['smiles']) > 43 else hit['smiles']
print(f"{rank:<6} {hit['index']:<8} {hit['similarity']:<12.4f} {name:<20} {smiles}")
if len(hits) > max_display:
print(f"\n... and {len(hits) - max_display} more")
print("="*80)
def main():
parser = argparse.ArgumentParser(
description='Molecular similarity search using fingerprints',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=f"""
Available fingerprint methods:
{chr(10).join(f' {k:12s} - {v}' for k, v in FINGERPRINT_METHODS.items())}
Similarity metrics:
tanimoto - Tanimoto coefficient (default)
dice - Dice coefficient
cosine - Cosine similarity
Examples:
# Search with SMILES query
python similarity_search.py "CCO" database.smi --threshold 0.7
# Use different fingerprint
python similarity_search.py query.smi database.sdf --method maccs
# Save results
python similarity_search.py "c1ccccc1" database.smi --output hits.csv
# Adjust Morgan radius
python similarity_search.py "CCO" database.smi --method morgan --radius 3
"""
)
parser.add_argument('query', help='Query SMILES or file')
parser.add_argument('database', help='Database file (SDF or SMILES)')
parser.add_argument('--method', '-m', default='morgan',
choices=FINGERPRINT_METHODS.keys(),
help='Fingerprint method (default: morgan)')
parser.add_argument('--threshold', '-t', type=float, default=0.7,
help='Similarity threshold (default: 0.7)')
parser.add_argument('--radius', '-r', type=int, default=2,
help='Morgan fingerprint radius (default: 2)')
parser.add_argument('--bits', '-b', type=int, default=2048,
help='Fingerprint size (default: 2048)')
parser.add_argument('--metric', default='tanimoto',
choices=['tanimoto', 'dice', 'cosine'],
help='Similarity metric (default: tanimoto)')
parser.add_argument('--output', '-o', help='Output CSV file')
parser.add_argument('--max-display', type=int, default=20,
help='Maximum hits to display (default: 20)')
args = parser.parse_args()
# Load query
query_path = Path(args.query)
if query_path.exists():
# Query is a file
query_mols = load_molecules(args.query)
if not query_mols:
print("Error: No valid molecules in query file")
sys.exit(1)
query_mol = query_mols[0]['mol']
query_smiles = query_mols[0]['smiles']
else:
# Query is SMILES string
query_mol = Chem.MolFromSmiles(args.query)
query_smiles = args.query
if query_mol is None:
print(f"Error: Failed to parse query SMILES: {args.query}")
sys.exit(1)
print(f"Query: {query_smiles}")
print(f"Method: {args.method}")
print(f"Threshold: {args.threshold}")
print(f"Loading database: {args.database}...")
# Load database
database = load_molecules(args.database)
if not database:
print("Error: No valid molecules in database")
sys.exit(1)
print(f"Loaded {len(database)} molecules")
print("Searching...")
# Perform search
hits = similarity_search(
query_mol, database,
method=args.method,
threshold=args.threshold,
radius=args.radius,
n_bits=args.bits,
metric=args.metric
)
# Output results
if args.output:
write_results(hits, args.output)
print(f"\nResults written to: {args.output}")
print_results(hits, args.max_display)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/rdkit/scripts/similarity_search.py",
"license": "MIT License",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/rdkit/scripts/substructure_filter.py | #!/usr/bin/env python3
"""
Substructure Filter
Filter molecules based on substructure patterns using SMARTS.
Supports inclusion and exclusion filters, and custom pattern libraries.
Usage:
python substructure_filter.py molecules.smi --pattern "c1ccccc1" --output filtered.smi
python substructure_filter.py database.sdf --exclude "C(=O)Cl" --filter-type functional-groups
"""
import argparse
import sys
from pathlib import Path
try:
from rdkit import Chem
except ImportError:
print("Error: RDKit not installed. Install with: conda install -c conda-forge rdkit")
sys.exit(1)
# Common SMARTS pattern libraries
PATTERN_LIBRARIES = {
'functional-groups': {
'alcohol': '[OH][C]',
'aldehyde': '[CH1](=O)',
'ketone': '[C](=O)[C]',
'carboxylic_acid': 'C(=O)[OH]',
'ester': 'C(=O)O[C]',
'amide': 'C(=O)N',
'amine': '[NX3]',
'ether': '[C][O][C]',
'nitrile': 'C#N',
'nitro': '[N+](=O)[O-]',
'halide': '[C][F,Cl,Br,I]',
'thiol': '[C][SH]',
'sulfide': '[C][S][C]',
},
'rings': {
'benzene': 'c1ccccc1',
'pyridine': 'n1ccccc1',
'pyrrole': 'n1cccc1',
'furan': 'o1cccc1',
'thiophene': 's1cccc1',
'imidazole': 'n1cncc1',
'indole': 'c1ccc2[nH]ccc2c1',
'naphthalene': 'c1ccc2ccccc2c1',
},
'pains': {
'rhodanine': 'S1C(=O)NC(=S)C1',
'catechol': 'c1ccc(O)c(O)c1',
'quinone': 'O=C1C=CC(=O)C=C1',
'michael_acceptor': 'C=CC(=O)',
'alkyl_halide': '[C][I,Br]',
},
'privileged': {
'biphenyl': 'c1ccccc1-c2ccccc2',
'piperazine': 'N1CCNCC1',
'piperidine': 'N1CCCCC1',
'morpholine': 'N1CCOCC1',
}
}
def load_molecules(file_path, keep_props=True):
"""Load molecules from file."""
path = Path(file_path)
if not path.exists():
print(f"Error: File not found: {file_path}")
return []
molecules = []
if path.suffix.lower() in ['.sdf', '.mol']:
suppl = Chem.SDMolSupplier(str(path))
elif path.suffix.lower() in ['.smi', '.smiles', '.txt']:
suppl = Chem.SmilesMolSupplier(str(path), titleLine=False)
else:
print(f"Error: Unsupported file format: {path.suffix}")
return []
for idx, mol in enumerate(suppl):
if mol is None:
print(f"Warning: Failed to parse molecule {idx+1}")
continue
molecules.append(mol)
return molecules
def create_pattern_query(pattern_string):
"""Create SMARTS query from string or SMILES."""
# Try as SMARTS first
query = Chem.MolFromSmarts(pattern_string)
if query is not None:
return query
# Try as SMILES
query = Chem.MolFromSmiles(pattern_string)
if query is not None:
return query
print(f"Error: Invalid pattern: {pattern_string}")
return None
def filter_molecules(molecules, include_patterns=None, exclude_patterns=None,
match_all_include=False):
"""
Filter molecules based on substructure patterns.
Args:
molecules: List of RDKit Mol objects
include_patterns: List of (name, pattern) tuples to include
exclude_patterns: List of (name, pattern) tuples to exclude
match_all_include: If True, molecule must match ALL include patterns
Returns:
Tuple of (filtered_molecules, match_info)
"""
filtered = []
match_info = []
for idx, mol in enumerate(molecules):
if mol is None:
continue
# Check exclusion patterns first
excluded = False
exclude_matches = []
if exclude_patterns:
for name, pattern in exclude_patterns:
if mol.HasSubstructMatch(pattern):
excluded = True
exclude_matches.append(name)
if excluded:
match_info.append({
'index': idx + 1,
'smiles': Chem.MolToSmiles(mol),
'status': 'excluded',
'matches': exclude_matches
})
continue
# Check inclusion patterns
if include_patterns:
include_matches = []
for name, pattern in include_patterns:
if mol.HasSubstructMatch(pattern):
include_matches.append(name)
# Decide if molecule passes inclusion filter
if match_all_include:
passed = len(include_matches) == len(include_patterns)
else:
passed = len(include_matches) > 0
if passed:
filtered.append(mol)
match_info.append({
'index': idx + 1,
'smiles': Chem.MolToSmiles(mol),
'status': 'included',
'matches': include_matches
})
else:
match_info.append({
'index': idx + 1,
'smiles': Chem.MolToSmiles(mol),
'status': 'no_match',
'matches': []
})
else:
# No inclusion patterns, keep all non-excluded
filtered.append(mol)
match_info.append({
'index': idx + 1,
'smiles': Chem.MolToSmiles(mol),
'status': 'included',
'matches': []
})
return filtered, match_info
def write_molecules(molecules, output_file):
"""Write molecules to file."""
output_path = Path(output_file)
if output_path.suffix.lower() in ['.sdf']:
writer = Chem.SDWriter(str(output_path))
for mol in molecules:
writer.write(mol)
writer.close()
elif output_path.suffix.lower() in ['.smi', '.smiles', '.txt']:
with open(output_path, 'w') as f:
for mol in molecules:
smiles = Chem.MolToSmiles(mol)
name = mol.GetProp('_Name') if mol.HasProp('_Name') else ''
f.write(f"{smiles} {name}\n")
else:
print(f"Error: Unsupported output format: {output_path.suffix}")
return
print(f"Wrote {len(molecules)} molecules to {output_file}")
def write_report(match_info, output_file):
"""Write detailed match report."""
import csv
with open(output_file, 'w', newline='') as f:
fieldnames = ['Index', 'SMILES', 'Status', 'Matches']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for info in match_info:
writer.writerow({
'Index': info['index'],
'SMILES': info['smiles'],
'Status': info['status'],
'Matches': ', '.join(info['matches'])
})
def print_summary(total, filtered, match_info):
"""Print filtering summary."""
print("\n" + "="*60)
print("Filtering Summary")
print("="*60)
print(f"Total molecules: {total}")
print(f"Passed filter: {len(filtered)}")
print(f"Filtered out: {total - len(filtered)}")
print(f"Pass rate: {len(filtered)/total*100:.1f}%")
# Count by status
status_counts = {}
for info in match_info:
status = info['status']
status_counts[status] = status_counts.get(status, 0) + 1
print("\nBreakdown:")
for status, count in status_counts.items():
print(f" {status:15s}: {count}")
print("="*60)
def main():
parser = argparse.ArgumentParser(
description='Filter molecules by substructure patterns',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=f"""
Pattern libraries:
--filter-type functional-groups Common functional groups
--filter-type rings Ring systems
--filter-type pains PAINS (Pan-Assay Interference)
--filter-type privileged Privileged structures
Examples:
# Include molecules with benzene ring
python substructure_filter.py molecules.smi --pattern "c1ccccc1" -o filtered.smi
# Exclude reactive groups
python substructure_filter.py database.sdf --exclude "C(=O)Cl" -o clean.sdf
# Filter by functional groups
python substructure_filter.py molecules.smi --filter-type functional-groups -o fg.smi
# Remove PAINS
python substructure_filter.py compounds.smi --filter-type pains --exclude-mode -o clean.smi
# Multiple patterns
python substructure_filter.py mol.smi --pattern "c1ccccc1" --pattern "N" -o aromatic_amines.smi
"""
)
parser.add_argument('input', help='Input file (SDF or SMILES)')
parser.add_argument('--pattern', '-p', action='append',
help='SMARTS/SMILES pattern to include (can specify multiple)')
parser.add_argument('--exclude', '-e', action='append',
help='SMARTS/SMILES pattern to exclude (can specify multiple)')
parser.add_argument('--filter-type', choices=PATTERN_LIBRARIES.keys(),
help='Use predefined pattern library')
parser.add_argument('--exclude-mode', action='store_true',
help='Use filter-type patterns for exclusion instead of inclusion')
parser.add_argument('--match-all', action='store_true',
help='Molecule must match ALL include patterns')
parser.add_argument('--output', '-o', help='Output file')
parser.add_argument('--report', '-r', help='Write detailed report to CSV')
parser.add_argument('--list-patterns', action='store_true',
help='List available pattern libraries and exit')
args = parser.parse_args()
# List patterns mode
if args.list_patterns:
print("\nAvailable Pattern Libraries:")
print("="*60)
for lib_name, patterns in PATTERN_LIBRARIES.items():
print(f"\n{lib_name}:")
for name, pattern in patterns.items():
print(f" {name:25s}: {pattern}")
sys.exit(0)
# Load molecules
print(f"Loading molecules from: {args.input}")
molecules = load_molecules(args.input)
if not molecules:
print("Error: No valid molecules loaded")
sys.exit(1)
print(f"Loaded {len(molecules)} molecules")
# Prepare patterns
include_patterns = []
exclude_patterns = []
# Add custom include patterns
if args.pattern:
for pattern_str in args.pattern:
query = create_pattern_query(pattern_str)
if query:
include_patterns.append(('custom', query))
# Add custom exclude patterns
if args.exclude:
for pattern_str in args.exclude:
query = create_pattern_query(pattern_str)
if query:
exclude_patterns.append(('custom', query))
# Add library patterns
if args.filter_type:
lib_patterns = PATTERN_LIBRARIES[args.filter_type]
for name, pattern_str in lib_patterns.items():
query = create_pattern_query(pattern_str)
if query:
if args.exclude_mode:
exclude_patterns.append((name, query))
else:
include_patterns.append((name, query))
if not include_patterns and not exclude_patterns:
print("Error: No patterns specified")
sys.exit(1)
# Print filter configuration
print(f"\nFilter configuration:")
if include_patterns:
print(f" Include patterns: {len(include_patterns)}")
if args.match_all:
print(" Mode: Match ALL")
else:
print(" Mode: Match ANY")
if exclude_patterns:
print(f" Exclude patterns: {len(exclude_patterns)}")
# Perform filtering
print("\nFiltering...")
filtered, match_info = filter_molecules(
molecules,
include_patterns=include_patterns if include_patterns else None,
exclude_patterns=exclude_patterns if exclude_patterns else None,
match_all_include=args.match_all
)
# Print summary
print_summary(len(molecules), filtered, match_info)
# Write output
if args.output:
write_molecules(filtered, args.output)
if args.report:
write_report(match_info, args.report)
print(f"Detailed report written to: {args.report}")
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/rdkit/scripts/substructure_filter.py",
"license": "MIT License",
"lines": 320,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/reactome-database/scripts/reactome_query.py | #!/usr/bin/env python3
"""
Reactome Database Query Helper Script
This script provides convenient command-line access to common Reactome operations.
Usage:
python reactome_query.py version
python reactome_query.py query <pathway_id>
python reactome_query.py analyze <gene_list_file>
python reactome_query.py search <term>
python reactome_query.py entities <pathway_id>
Examples:
python reactome_query.py version
python reactome_query.py query R-HSA-69278
python reactome_query.py analyze genes.txt
python reactome_query.py search "cell cycle"
python reactome_query.py entities R-HSA-69278
"""
import sys
import json
import requests
from typing import List, Dict, Optional
class ReactomeClient:
"""Client for interacting with Reactome REST APIs"""
CONTENT_BASE = "https://reactome.org/ContentService"
ANALYSIS_BASE = "https://reactome.org/AnalysisService"
def get_version(self) -> str:
"""Get Reactome database version"""
response = requests.get(f"{self.CONTENT_BASE}/data/database/version")
response.raise_for_status()
return response.text.strip()
def query_pathway(self, pathway_id: str) -> Dict:
"""Query pathway information by ID"""
response = requests.get(f"{self.CONTENT_BASE}/data/query/{pathway_id}")
response.raise_for_status()
return response.json()
def get_pathway_entities(self, pathway_id: str) -> List[Dict]:
"""Get participating entities in a pathway"""
response = requests.get(
f"{self.CONTENT_BASE}/data/event/{pathway_id}/participatingPhysicalEntities"
)
response.raise_for_status()
return response.json()
def search_pathways(self, term: str) -> List[Dict]:
"""Search for pathways by name"""
response = requests.get(
f"{self.CONTENT_BASE}/data/query",
params={"name": term}
)
response.raise_for_status()
return response.json()
def analyze_genes(self, gene_list: List[str]) -> Dict:
"""Perform pathway enrichment analysis on gene list"""
data = "\n".join(gene_list)
response = requests.post(
f"{self.ANALYSIS_BASE}/identifiers/",
headers={"Content-Type": "text/plain"},
data=data
)
response.raise_for_status()
return response.json()
def get_analysis_by_token(self, token: str) -> Dict:
"""Retrieve analysis results by token"""
response = requests.get(f"{self.ANALYSIS_BASE}/token/{token}")
response.raise_for_status()
return response.json()
def print_json(data):
"""Pretty print JSON data"""
print(json.dumps(data, indent=2))
def command_version():
"""Get and display Reactome version"""
client = ReactomeClient()
version = client.get_version()
print(f"Reactome Database Version: {version}")
def command_query(pathway_id: str):
"""Query and display pathway information"""
client = ReactomeClient()
try:
pathway = client.query_pathway(pathway_id)
print(f"Pathway: {pathway['displayName']}")
print(f"ID: {pathway['stId']}")
print(f"Type: {pathway['schemaClass']}")
if 'species' in pathway and pathway['species']:
species = pathway['species'][0]['displayName']
print(f"Species: {species}")
if 'summation' in pathway and pathway['summation']:
summation = pathway['summation'][0]['text']
print(f"\nDescription: {summation}")
print("\nFull JSON response:")
print_json(pathway)
except requests.HTTPError as e:
if e.response.status_code == 404:
print(f"Error: Pathway '{pathway_id}' not found")
else:
print(f"Error: {e}")
sys.exit(1)
def command_entities(pathway_id: str):
"""Display entities participating in a pathway"""
client = ReactomeClient()
try:
entities = client.get_pathway_entities(pathway_id)
print(f"Entities in pathway {pathway_id}: {len(entities)} total\n")
# Group by type
by_type = {}
for entity in entities:
entity_type = entity['schemaClass']
if entity_type not in by_type:
by_type[entity_type] = []
by_type[entity_type].append(entity)
# Display by type
for entity_type, entities_list in sorted(by_type.items()):
print(f"{entity_type} ({len(entities_list)}):")
for entity in entities_list[:10]: # Show first 10
print(f" - {entity['stId']}: {entity['displayName']}")
if len(entities_list) > 10:
print(f" ... and {len(entities_list) - 10} more")
print()
except requests.HTTPError as e:
if e.response.status_code == 404:
print(f"Error: Pathway '{pathway_id}' not found")
else:
print(f"Error: {e}")
sys.exit(1)
def command_search(term: str):
"""Search for pathways by term"""
client = ReactomeClient()
try:
results = client.search_pathways(term)
print(f"Search results for '{term}': {len(results)} found\n")
for result in results[:20]: # Show first 20
print(f"{result['stId']}: {result['displayName']}")
if 'species' in result and result['species']:
species = result['species'][0]['displayName']
print(f" Species: {species}")
print(f" Type: {result['schemaClass']}")
print()
if len(results) > 20:
print(f"... and {len(results) - 20} more results")
except requests.HTTPError as e:
print(f"Error: {e}")
sys.exit(1)
def command_analyze(gene_file: str):
"""Perform pathway enrichment analysis"""
client = ReactomeClient()
# Read gene list
try:
with open(gene_file, 'r') as f:
genes = [line.strip() for line in f if line.strip()]
except FileNotFoundError:
print(f"Error: File '{gene_file}' not found")
sys.exit(1)
print(f"Analyzing {len(genes)} genes...")
try:
result = client.analyze_genes(genes)
# Display summary
summary = result['summary']
print(f"\nAnalysis Type: {summary['type']}")
print(f"Token: {summary['token']} (valid for 7 days)")
print(f"Species: {summary.get('species', 'N/A')}")
# Display pathways
pathways = result.get('pathways', [])
print(f"\nEnriched Pathways: {len(pathways)} found")
# Show significant pathways (FDR < 0.05)
significant = [p for p in pathways if p['entities']['fdr'] < 0.05]
print(f"Significant (FDR < 0.05): {len(significant)}\n")
# Display top 10 pathways
print("Top 10 Pathways:")
for i, pathway in enumerate(pathways[:10], 1):
print(f"\n{i}. {pathway['name']}")
print(f" ID: {pathway['stId']}")
entities = pathway['entities']
print(f" Found: {entities['found']}/{entities['total']} entities")
print(f" p-value: {entities['pValue']:.6e}")
print(f" FDR: {entities['fdr']:.6e}")
# Generate browser URL for top pathway
if pathways:
token = summary['token']
top_pathway = pathways[0]['stId']
url = f"https://reactome.org/PathwayBrowser/#{top_pathway}&DTAB=AN&ANALYSIS={token}"
print(f"\nView top result in browser:")
print(url)
# Save full results
output_file = gene_file.replace('.txt', '_results.json')
with open(output_file, 'w') as f:
json.dump(result, f, indent=2)
print(f"\nFull results saved to: {output_file}")
except requests.HTTPError as e:
print(f"Error: {e}")
sys.exit(1)
def print_usage():
"""Print usage information"""
print(__doc__)
def main():
if len(sys.argv) < 2:
print_usage()
sys.exit(1)
command = sys.argv[1].lower()
if command == "version":
command_version()
elif command == "query":
if len(sys.argv) < 3:
print("Error: pathway_id required")
print("Usage: python reactome_query.py query <pathway_id>")
sys.exit(1)
command_query(sys.argv[2])
elif command == "entities":
if len(sys.argv) < 3:
print("Error: pathway_id required")
print("Usage: python reactome_query.py entities <pathway_id>")
sys.exit(1)
command_entities(sys.argv[2])
elif command == "search":
if len(sys.argv) < 3:
print("Error: search term required")
print("Usage: python reactome_query.py search <term>")
sys.exit(1)
command_search(" ".join(sys.argv[2:]))
elif command == "analyze":
if len(sys.argv) < 3:
print("Error: gene list file required")
print("Usage: python reactome_query.py analyze <gene_list_file>")
sys.exit(1)
command_analyze(sys.argv[2])
else:
print(f"Error: Unknown command '{command}'")
print_usage()
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/reactome-database/scripts/reactome_query.py",
"license": "MIT License",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/research-lookup/examples.py | #!/usr/bin/env python3
"""
Example usage of the Research Lookup skill with automatic model selection.
This script demonstrates:
1. Automatic model selection based on query complexity
2. Manual model override options
3. Batch query processing
4. Integration with scientific writing workflows
"""
import os
from research_lookup import ResearchLookup
def example_automatic_selection():
"""Demonstrate automatic model selection."""
print("=" * 80)
print("EXAMPLE 1: Automatic Model Selection")
print("=" * 80)
print()
research = ResearchLookup()
# Simple lookup - will use Sonar Pro Search
query1 = "Recent advances in CRISPR gene editing 2024"
print(f"Query: {query1}")
print(f"Expected model: Sonar Pro Search (fast lookup)")
result1 = research.lookup(query1)
print(f"Actual model: {result1.get('model')}")
print()
# Complex analysis - will use Sonar Reasoning Pro
query2 = "Compare and contrast the efficacy of mRNA vaccines versus traditional vaccines"
print(f"Query: {query2}")
print(f"Expected model: Sonar Reasoning Pro (analytical)")
result2 = research.lookup(query2)
print(f"Actual model: {result2.get('model')}")
print()
def example_manual_override():
"""Demonstrate manual model override."""
print("=" * 80)
print("EXAMPLE 2: Manual Model Override")
print("=" * 80)
print()
# Force Sonar Pro Search for budget-constrained rapid lookup
research_pro = ResearchLookup(force_model='pro')
query = "Explain the mechanism of CRISPR-Cas9"
print(f"Query: {query}")
print(f"Forced model: Sonar Pro Search")
result = research_pro.lookup(query)
print(f"Model used: {result.get('model')}")
print()
# Force Sonar Reasoning Pro for critical analysis
research_reasoning = ResearchLookup(force_model='reasoning')
print(f"Query: {query}")
print(f"Forced model: Sonar Reasoning Pro")
result = research_reasoning.lookup(query)
print(f"Model used: {result.get('model')}")
print()
def example_batch_queries():
"""Demonstrate batch query processing."""
print("=" * 80)
print("EXAMPLE 3: Batch Query Processing")
print("=" * 80)
print()
research = ResearchLookup()
# Mix of simple and complex queries
queries = [
"Recent clinical trials for Alzheimer's disease", # Sonar Pro Search
"Compare deep learning vs traditional ML in drug discovery", # Sonar Reasoning Pro
"Statistical power analysis methods", # Sonar Pro Search
]
print("Processing batch queries...")
print("Each query will automatically select the appropriate model")
print()
results = research.batch_lookup(queries, delay=1.0)
for i, result in enumerate(results):
print(f"Query {i+1}: {result['query'][:50]}...")
print(f" Model: {result.get('model')}")
print(f" Type: {result.get('model_type')}")
print()
def example_scientific_writing_workflow():
"""Demonstrate integration with scientific writing workflow."""
print("=" * 80)
print("EXAMPLE 4: Scientific Writing Workflow")
print("=" * 80)
print()
research = ResearchLookup()
# Literature review phase - use Pro for breadth
print("PHASE 1: Literature Review (Breadth)")
lit_queries = [
"Recent papers on machine learning in genomics 2024",
"Clinical applications of AI in radiology",
"RNA sequencing analysis methods"
]
for query in lit_queries:
print(f" - {query}")
# These will automatically use Sonar Pro Search
print()
# Discussion phase - use Reasoning Pro for synthesis
print("PHASE 2: Discussion (Synthesis & Analysis)")
discussion_queries = [
"Compare the advantages and limitations of different ML approaches in genomics",
"Explain the relationship between model interpretability and clinical adoption",
"Analyze the ethical implications of AI in medical diagnosis"
]
for query in discussion_queries:
print(f" - {query}")
# These will automatically use Sonar Reasoning Pro
print()
def main():
"""Run all examples (requires OPENROUTER_API_KEY to be set)."""
if not os.getenv("OPENROUTER_API_KEY"):
print("Note: Set OPENROUTER_API_KEY environment variable to run live queries")
print("These examples show the structure without making actual API calls")
print()
# Uncomment to run examples (requires API key)
# example_automatic_selection()
# example_manual_override()
# example_batch_queries()
# example_scientific_writing_workflow()
# Show complexity assessment without API calls
print("=" * 80)
print("COMPLEXITY ASSESSMENT EXAMPLES (No API calls required)")
print("=" * 80)
print()
os.environ.setdefault("OPENROUTER_API_KEY", "test")
research = ResearchLookup()
test_queries = [
("Recent CRISPR studies", "pro"),
("Compare CRISPR vs TALENs", "reasoning"),
("Explain how CRISPR works", "reasoning"),
("Western blot protocol", "pro"),
("Pros and cons of different sequencing methods", "reasoning"),
]
for query, expected in test_queries:
complexity = research._assess_query_complexity(query)
model_name = "Sonar Reasoning Pro" if complexity == "reasoning" else "Sonar Pro Search"
status = "β" if complexity == expected else "β"
print(f"{status} '{query}'")
print(f" β {model_name}")
print()
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/research-lookup/examples.py",
"license": "MIT License",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/research-lookup/lookup.py | #!/usr/bin/env python3
"""
Research Lookup Tool for Claude Code
Performs research queries using Perplexity Sonar Pro Search via OpenRouter.
"""
import os
import sys
import json
from typing import Dict, List, Optional
# Import the main research lookup class
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'scripts'))
from research_lookup import ResearchLookup
def format_response(result: Dict) -> str:
"""Format the research result for display."""
if not result["success"]:
return f"β Research lookup failed: {result['error']}"
response = result["response"]
citations = result["citations"]
# Format the output for Claude Code
output = f"""π **Research Results**
**Query:** {result['query']}
**Model:** {result['model']}
**Timestamp:** {result['timestamp']}
---
{response}
"""
if citations:
output += f"\n**Extracted Citations ({len(citations)}):**\n"
for i, citation in enumerate(citations, 1):
if citation.get("doi"):
output += f"{i}. DOI: {citation['doi']}\n"
elif citation.get("authors") and citation.get("year"):
output += f"{i}. {citation['authors']} ({citation['year']})\n"
else:
output += f"{i}. {citation}\n"
if result.get("usage"):
usage = result["usage"]
output += f"\n**Usage:** {usage.get('total_tokens', 'N/A')} tokens"
return output
def main():
"""Main entry point for Claude Code tool."""
# Check for API key
if not os.getenv("OPENROUTER_API_KEY"):
print("β Error: OPENROUTER_API_KEY environment variable not set")
print("Please set it in your .env file or export it:")
print(" export OPENROUTER_API_KEY='your_openrouter_api_key'")
return 1
# Get query from command line arguments
if len(sys.argv) < 2:
print("β Error: No query provided")
print("Usage: python lookup.py 'your research query here'")
return 1
query = " ".join(sys.argv[1:])
try:
# Initialize research tool
research = ResearchLookup()
# Perform lookup
print(f"π Researching: {query}")
result = research.lookup(query)
# Format and output result
formatted_output = format_response(result)
print(formatted_output)
# Return success code
return 0 if result["success"] else 1
except Exception as e:
print(f"β Error: {str(e)}")
return 1
if __name__ == "__main__":
exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/research-lookup/lookup.py",
"license": "MIT License",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/research-lookup/research_lookup.py | #!/usr/bin/env python3
"""
Research Information Lookup Tool
Uses Perplexity's Sonar Pro Search model through OpenRouter for academic research queries.
"""
import os
import json
import requests
import time
from datetime import datetime
from typing import Dict, List, Optional, Any
from urllib.parse import quote
class ResearchLookup:
"""Research information lookup using Perplexity Sonar models via OpenRouter."""
# Available models
MODELS = {
"pro": "perplexity/sonar-pro-search", # Fast lookup with search, cost-effective
"reasoning": "perplexity/sonar-reasoning-pro", # Deep analysis with reasoning and online search
}
# Keywords that indicate complex queries requiring reasoning model
REASONING_KEYWORDS = [
"compare", "contrast", "analyze", "analysis", "evaluate", "critique",
"versus", "vs", "vs.", "compared to", "differences between", "similarities",
"meta-analysis", "systematic review", "synthesis", "integrate",
"mechanism", "why", "how does", "how do", "explain", "relationship",
"theoretical framework", "implications", "interpret", "reasoning",
"controversy", "conflicting", "paradox", "debate", "reconcile",
"pros and cons", "advantages and disadvantages", "trade-off", "tradeoff",
]
def __init__(self, force_model: Optional[str] = None):
"""
Initialize the research lookup tool.
Args:
force_model: Optional model override ('pro' or 'reasoning').
If None, model is auto-selected based on query complexity.
"""
self.api_key = os.getenv("OPENROUTER_API_KEY")
if not self.api_key:
raise ValueError("OPENROUTER_API_KEY environment variable not set")
self.base_url = "https://openrouter.ai/api/v1"
self.force_model = force_model
self.headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
"HTTP-Referer": "https://scientific-writer.local",
"X-Title": "Scientific Writer Research Tool"
}
def _select_model(self, query: str) -> str:
"""
Select the appropriate model based on query complexity.
Args:
query: The research query
Returns:
Model identifier string
"""
if self.force_model:
return self.MODELS.get(self.force_model, self.MODELS["reasoning"])
# Check for reasoning keywords (case-insensitive)
query_lower = query.lower()
for keyword in self.REASONING_KEYWORDS:
if keyword in query_lower:
return self.MODELS["reasoning"]
# Check for multiple questions or complex structure
question_count = query.count("?")
if question_count >= 2:
return self.MODELS["reasoning"]
# Check for very long queries (likely complex)
if len(query) > 200:
return self.MODELS["reasoning"]
# Default to pro for simple lookups
return self.MODELS["pro"]
def _make_request(self, messages: List[Dict[str, str]], model: str, **kwargs) -> Dict[str, Any]:
"""Make a request to the OpenRouter API with academic search mode."""
data = {
"model": model,
"messages": messages,
"max_tokens": 8000,
"temperature": 0.1, # Low temperature for factual research
# Perplexity-specific parameters for academic search
"search_mode": "academic", # Prioritize scholarly sources (peer-reviewed papers, journals)
"search_context_size": "high", # Always use high context for deeper research
**kwargs
}
try:
response = requests.post(
f"{self.base_url}/chat/completions",
headers=self.headers,
json=data,
timeout=90 # Increased timeout for academic search
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
raise Exception(f"API request failed: {str(e)}")
def _format_research_prompt(self, query: str) -> str:
"""Format the query for optimal research results."""
return f"""You are an expert research assistant. Please provide comprehensive, accurate research information for the following query: "{query}"
IMPORTANT INSTRUCTIONS:
1. Focus on ACADEMIC and SCIENTIFIC sources (peer-reviewed papers, reputable journals, institutional research)
2. Include RECENT information (prioritize 2020-2026 publications)
3. Provide COMPLETE citations with authors, title, journal/conference, year, and DOI when available
4. Structure your response with clear sections and proper attribution
5. Be comprehensive but concise - aim for 800-1200 words
6. Include key findings, methodologies, and implications when relevant
7. Note any controversies, limitations, or conflicting evidence
RESPONSE FORMAT:
- Start with a brief summary (2-3 sentences)
- Present key findings and studies in organized sections
- End with future directions or research gaps if applicable
- Include 5-8 high-quality citations at the end
Remember: This is for academic research purposes. Prioritize accuracy, completeness, and proper attribution."""
def lookup(self, query: str) -> Dict[str, Any]:
"""Perform a research lookup for the given query."""
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# Select model based on query complexity
model = self._select_model(query)
# Format the research prompt
research_prompt = self._format_research_prompt(query)
# Prepare messages for the API with system message for academic mode
messages = [
{
"role": "system",
"content": "You are an academic research assistant. Focus exclusively on scholarly sources: peer-reviewed journals, academic papers, research institutions, and reputable scientific publications. Prioritize recent academic literature (2020-2026) and provide complete citations with DOIs. Use academic/scholarly search mode."
},
{"role": "user", "content": research_prompt}
]
try:
# Make the API request
response = self._make_request(messages, model)
# Extract the response content
if "choices" in response and len(response["choices"]) > 0:
choice = response["choices"][0]
if "message" in choice and "content" in choice["message"]:
content = choice["message"]["content"]
# Extract citations from API response (Perplexity provides these)
api_citations = self._extract_api_citations(response, choice)
# Also extract citations from text as fallback
text_citations = self._extract_citations_from_text(content)
# Combine: prioritize API citations, add text citations if no duplicates
citations = api_citations + text_citations
return {
"success": True,
"query": query,
"response": content,
"citations": citations,
"sources": api_citations, # Separate field for API-provided sources
"timestamp": timestamp,
"model": model,
"usage": response.get("usage", {})
}
else:
raise Exception("Invalid response format from API")
else:
raise Exception("No response choices received from API")
except Exception as e:
return {
"success": False,
"query": query,
"error": str(e),
"timestamp": timestamp,
"model": model
}
def _extract_api_citations(self, response: Dict[str, Any], choice: Dict[str, Any]) -> List[Dict[str, str]]:
"""Extract citations from Perplexity API response fields."""
citations = []
# Perplexity returns citations in search_results field (new format)
# Check multiple possible locations where OpenRouter might place them
search_results = (
response.get("search_results") or
choice.get("search_results") or
choice.get("message", {}).get("search_results") or
[]
)
for result in search_results:
citation = {
"type": "source",
"title": result.get("title", ""),
"url": result.get("url", ""),
"date": result.get("date", ""),
}
# Add snippet if available (newer API feature)
if result.get("snippet"):
citation["snippet"] = result.get("snippet")
citations.append(citation)
# Also check for legacy citations field (backward compatibility)
legacy_citations = (
response.get("citations") or
choice.get("citations") or
choice.get("message", {}).get("citations") or
[]
)
for url in legacy_citations:
if isinstance(url, str):
# Legacy format was just URLs
citations.append({
"type": "source",
"url": url,
"title": "",
"date": ""
})
elif isinstance(url, dict):
citations.append({
"type": "source",
"url": url.get("url", ""),
"title": url.get("title", ""),
"date": url.get("date", "")
})
return citations
def _extract_citations_from_text(self, text: str) -> List[Dict[str, str]]:
"""Extract potential citations from the response text as fallback."""
import re
citations = []
# Look for DOI patterns first (most reliable)
# Matches: doi:10.xxx, DOI: 10.xxx, https://doi.org/10.xxx
doi_pattern = r'(?:doi[:\s]*|https?://(?:dx\.)?doi\.org/)(10\.[0-9]{4,}/[^\s\)\]\,\[\<\>]+)'
doi_matches = re.findall(doi_pattern, text, re.IGNORECASE)
seen_dois = set()
for doi in doi_matches:
# Clean up DOI - remove trailing punctuation and brackets
doi_clean = doi.strip().rstrip('.,;:)]')
if doi_clean and doi_clean not in seen_dois:
seen_dois.add(doi_clean)
citations.append({
"type": "doi",
"doi": doi_clean,
"url": f"https://doi.org/{doi_clean}"
})
# Look for URLs that might be sources
url_pattern = r'https?://[^\s\)\]\,\<\>\"\']+(?:arxiv\.org|pubmed|ncbi\.nlm\.nih\.gov|nature\.com|science\.org|wiley\.com|springer\.com|ieee\.org|acm\.org)[^\s\)\]\,\<\>\"\']*'
url_matches = re.findall(url_pattern, text, re.IGNORECASE)
seen_urls = set()
for url in url_matches:
url_clean = url.rstrip('.')
if url_clean not in seen_urls:
seen_urls.add(url_clean)
citations.append({
"type": "url",
"url": url_clean
})
return citations
def batch_lookup(self, queries: List[str], delay: float = 1.0) -> List[Dict[str, Any]]:
"""Perform multiple research lookups with optional delay between requests."""
results = []
for i, query in enumerate(queries):
if i > 0 and delay > 0:
time.sleep(delay) # Rate limiting
result = self.lookup(query)
results.append(result)
# Print progress
print(f"[Research] Completed query {i+1}/{len(queries)}: {query[:50]}...")
return results
def get_model_info(self) -> Dict[str, Any]:
"""Get information about available models from OpenRouter."""
try:
response = requests.get(
f"{self.base_url}/models",
headers=self.headers,
timeout=30
)
response.raise_for_status()
return response.json()
except Exception as e:
return {"error": str(e)}
def main():
"""Command-line interface for testing the research lookup tool."""
import argparse
parser = argparse.ArgumentParser(description="Research Information Lookup Tool")
parser.add_argument("query", nargs="?", help="Research query to look up")
parser.add_argument("--model-info", action="store_true", help="Show available models")
parser.add_argument("--batch", nargs="+", help="Run multiple queries")
parser.add_argument("--force-model", choices=["pro", "reasoning"],
help="Force specific model: 'pro' for fast lookup, 'reasoning' for deep analysis")
args = parser.parse_args()
# Check for API key
if not os.getenv("OPENROUTER_API_KEY"):
print("Error: OPENROUTER_API_KEY environment variable not set")
print("Please set it in your .env file or export it:")
print(" export OPENROUTER_API_KEY='your_openrouter_api_key'")
return 1
try:
research = ResearchLookup(force_model=args.force_model)
if args.model_info:
print("Available models from OpenRouter:")
models = research.get_model_info()
if "data" in models:
for model in models["data"]:
if "perplexity" in model["id"].lower():
print(f" - {model['id']}: {model.get('name', 'N/A')}")
return 0
if not args.query and not args.batch:
print("Error: No query provided. Use --model-info to see available models.")
return 1
if args.batch:
print(f"Running batch research for {len(args.batch)} queries...")
results = research.batch_lookup(args.batch)
else:
print(f"Researching: {args.query}")
results = [research.lookup(args.query)]
# Display results
for i, result in enumerate(results):
if result["success"]:
print(f"\n{'='*80}")
print(f"Query {i+1}: {result['query']}")
print(f"Timestamp: {result['timestamp']}")
print(f"Model: {result['model']}")
print(f"{'='*80}")
print(result["response"])
# Display API-provided sources first (most reliable)
sources = result.get("sources", [])
if sources:
print(f"\nπ Sources ({len(sources)}):")
for j, source in enumerate(sources):
title = source.get("title", "Untitled")
url = source.get("url", "")
date = source.get("date", "")
date_str = f" ({date})" if date else ""
print(f" [{j+1}] {title}{date_str}")
if url:
print(f" {url}")
# Display additional text-extracted citations
citations = result.get("citations", [])
text_citations = [c for c in citations if c.get("type") in ("doi", "url")]
if text_citations:
print(f"\nπ Additional References ({len(text_citations)}):")
for j, citation in enumerate(text_citations):
if citation.get("type") == "doi":
print(f" [{j+1}] DOI: {citation.get('doi', '')} - {citation.get('url', '')}")
elif citation.get("type") == "url":
print(f" [{j+1}] {citation.get('url', '')}")
if result.get("usage"):
print(f"\nUsage: {result['usage']}")
else:
print(f"\nError in query {i+1}: {result['error']}")
return 0
except Exception as e:
print(f"Error: {str(e)}")
return 1
if __name__ == "__main__":
exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/research-lookup/research_lookup.py",
"license": "MIT License",
"lines": 339,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/research-lookup/scripts/research_lookup.py | #!/usr/bin/env python3
"""
Research Information Lookup Tool
Uses Perplexity's Sonar Pro Search model through OpenRouter for academic research queries.
"""
import os
import json
import requests
import time
from datetime import datetime
from typing import Dict, List, Optional, Any
from urllib.parse import quote
class ResearchLookup:
"""Research information lookup using Perplexity Sonar models via OpenRouter."""
# Available models
MODELS = {
"pro": "perplexity/sonar-pro", # Fast lookup, cost-effective
"reasoning": "perplexity/sonar-reasoning-pro", # Deep analysis with reasoning
}
# Keywords that indicate complex queries requiring reasoning model
REASONING_KEYWORDS = [
"compare", "contrast", "analyze", "analysis", "evaluate", "critique",
"versus", "vs", "vs.", "compared to", "differences between", "similarities",
"meta-analysis", "systematic review", "synthesis", "integrate",
"mechanism", "why", "how does", "how do", "explain", "relationship",
"theoretical framework", "implications", "interpret", "reasoning",
"controversy", "conflicting", "paradox", "debate", "reconcile",
"pros and cons", "advantages and disadvantages", "trade-off", "tradeoff",
]
def __init__(self, force_model: Optional[str] = None):
"""
Initialize the research lookup tool.
Args:
force_model: Optional model override ('pro' or 'reasoning').
If None, model is auto-selected based on query complexity.
"""
self.api_key = os.getenv("OPENROUTER_API_KEY")
if not self.api_key:
raise ValueError("OPENROUTER_API_KEY environment variable not set")
self.base_url = "https://openrouter.ai/api/v1"
self.force_model = force_model
self.headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
"HTTP-Referer": "https://scientific-writer.local",
"X-Title": "Scientific Writer Research Tool"
}
def _select_model(self, query: str) -> str:
"""
Select the appropriate model based on query complexity.
Args:
query: The research query
Returns:
Model identifier string
"""
if self.force_model:
return self.MODELS.get(self.force_model, self.MODELS["reasoning"])
# Check for reasoning keywords (case-insensitive)
query_lower = query.lower()
for keyword in self.REASONING_KEYWORDS:
if keyword in query_lower:
return self.MODELS["reasoning"]
# Check for multiple questions or complex structure
question_count = query.count("?")
if question_count >= 2:
return self.MODELS["reasoning"]
# Check for very long queries (likely complex)
if len(query) > 200:
return self.MODELS["reasoning"]
# Default to pro for simple lookups
return self.MODELS["pro"]
def _make_request(self, messages: List[Dict[str, str]], model: str, **kwargs) -> Dict[str, Any]:
"""Make a request to the OpenRouter API with academic search mode."""
data = {
"model": model,
"messages": messages,
"max_tokens": 8000,
"temperature": 0.1, # Low temperature for factual research
# Perplexity-specific parameters for academic search
"search_mode": "academic", # Prioritize scholarly sources (peer-reviewed papers, journals)
"search_context_size": "high", # Always use high context for deeper research
**kwargs
}
try:
response = requests.post(
f"{self.base_url}/chat/completions",
headers=self.headers,
json=data,
timeout=90 # Increased timeout for academic search
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
raise Exception(f"API request failed: {str(e)}")
def _format_research_prompt(self, query: str) -> str:
"""Format the query for optimal research results."""
return f"""You are an expert research assistant. Please provide comprehensive, accurate research information for the following query: "{query}"
IMPORTANT INSTRUCTIONS:
1. Focus on ACADEMIC and SCIENTIFIC sources (peer-reviewed papers, reputable journals, institutional research)
2. Include RECENT information (prioritize 2020-2026 publications)
3. Provide COMPLETE citations with authors, title, journal/conference, year, and DOI when available
4. Structure your response with clear sections and proper attribution
5. Be comprehensive but concise - aim for 800-1200 words
6. Include key findings, methodologies, and implications when relevant
7. Note any controversies, limitations, or conflicting evidence
RESPONSE FORMAT:
- Start with a brief summary (2-3 sentences)
- Present key findings and studies in organized sections
- End with future directions or research gaps if applicable
- Include 5-8 high-quality citations at the end
Remember: This is for academic research purposes. Prioritize accuracy, completeness, and proper attribution."""
def lookup(self, query: str) -> Dict[str, Any]:
"""Perform a research lookup for the given query."""
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# Select model based on query complexity
model = self._select_model(query)
# Format the research prompt
research_prompt = self._format_research_prompt(query)
# Prepare messages for the API with system message for academic mode
messages = [
{
"role": "system",
"content": "You are an academic research assistant. Focus exclusively on scholarly sources: peer-reviewed journals, academic papers, research institutions, and reputable scientific publications. Prioritize recent academic literature (2020-2026) and provide complete citations with DOIs. Use academic/scholarly search mode."
},
{"role": "user", "content": research_prompt}
]
try:
# Make the API request
response = self._make_request(messages, model)
# Extract the response content
if "choices" in response and len(response["choices"]) > 0:
choice = response["choices"][0]
if "message" in choice and "content" in choice["message"]:
content = choice["message"]["content"]
# Extract citations from API response (Perplexity provides these)
api_citations = self._extract_api_citations(response, choice)
# Also extract citations from text as fallback
text_citations = self._extract_citations_from_text(content)
# Combine: prioritize API citations, add text citations if no duplicates
citations = api_citations + text_citations
return {
"success": True,
"query": query,
"response": content,
"citations": citations,
"sources": api_citations, # Separate field for API-provided sources
"timestamp": timestamp,
"model": model,
"usage": response.get("usage", {})
}
else:
raise Exception("Invalid response format from API")
else:
raise Exception("No response choices received from API")
except Exception as e:
return {
"success": False,
"query": query,
"error": str(e),
"timestamp": timestamp,
"model": model
}
def _extract_api_citations(self, response: Dict[str, Any], choice: Dict[str, Any]) -> List[Dict[str, str]]:
"""Extract citations from Perplexity API response fields."""
citations = []
# Perplexity returns citations in search_results field (new format)
# Check multiple possible locations where OpenRouter might place them
search_results = (
response.get("search_results") or
choice.get("search_results") or
choice.get("message", {}).get("search_results") or
[]
)
for result in search_results:
citation = {
"type": "source",
"title": result.get("title", ""),
"url": result.get("url", ""),
"date": result.get("date", ""),
}
# Add snippet if available (newer API feature)
if result.get("snippet"):
citation["snippet"] = result.get("snippet")
citations.append(citation)
# Also check for legacy citations field (backward compatibility)
legacy_citations = (
response.get("citations") or
choice.get("citations") or
choice.get("message", {}).get("citations") or
[]
)
for url in legacy_citations:
if isinstance(url, str):
# Legacy format was just URLs
citations.append({
"type": "source",
"url": url,
"title": "",
"date": ""
})
elif isinstance(url, dict):
citations.append({
"type": "source",
"url": url.get("url", ""),
"title": url.get("title", ""),
"date": url.get("date", "")
})
return citations
def _extract_citations_from_text(self, text: str) -> List[Dict[str, str]]:
"""Extract potential citations from the response text as fallback."""
import re
citations = []
# Look for DOI patterns first (most reliable)
# Matches: doi:10.xxx, DOI: 10.xxx, https://doi.org/10.xxx
doi_pattern = r'(?:doi[:\s]*|https?://(?:dx\.)?doi\.org/)(10\.[0-9]{4,}/[^\s\)\]\,\[\<\>]+)'
doi_matches = re.findall(doi_pattern, text, re.IGNORECASE)
seen_dois = set()
for doi in doi_matches:
# Clean up DOI - remove trailing punctuation and brackets
doi_clean = doi.strip().rstrip('.,;:)]')
if doi_clean and doi_clean not in seen_dois:
seen_dois.add(doi_clean)
citations.append({
"type": "doi",
"doi": doi_clean,
"url": f"https://doi.org/{doi_clean}"
})
# Look for URLs that might be sources
url_pattern = r'https?://[^\s\)\]\,\<\>\"\']+(?:arxiv\.org|pubmed|ncbi\.nlm\.nih\.gov|nature\.com|science\.org|wiley\.com|springer\.com|ieee\.org|acm\.org)[^\s\)\]\,\<\>\"\']*'
url_matches = re.findall(url_pattern, text, re.IGNORECASE)
seen_urls = set()
for url in url_matches:
url_clean = url.rstrip('.')
if url_clean not in seen_urls:
seen_urls.add(url_clean)
citations.append({
"type": "url",
"url": url_clean
})
return citations
def batch_lookup(self, queries: List[str], delay: float = 1.0) -> List[Dict[str, Any]]:
"""Perform multiple research lookups with optional delay between requests."""
results = []
for i, query in enumerate(queries):
if i > 0 and delay > 0:
time.sleep(delay) # Rate limiting
result = self.lookup(query)
results.append(result)
# Print progress
print(f"[Research] Completed query {i+1}/{len(queries)}: {query[:50]}...")
return results
def get_model_info(self) -> Dict[str, Any]:
"""Get information about available models from OpenRouter."""
try:
response = requests.get(
f"{self.base_url}/models",
headers=self.headers,
timeout=30
)
response.raise_for_status()
return response.json()
except Exception as e:
return {"error": str(e)}
def main():
"""Command-line interface for testing the research lookup tool."""
import argparse
parser = argparse.ArgumentParser(description="Research Information Lookup Tool")
parser.add_argument("query", nargs="?", help="Research query to look up")
parser.add_argument("--model-info", action="store_true", help="Show available models")
parser.add_argument("--batch", nargs="+", help="Run multiple queries")
parser.add_argument("--force-model", choices=["pro", "reasoning"],
help="Force specific model: 'pro' for fast lookup, 'reasoning' for deep analysis")
args = parser.parse_args()
# Check for API key
if not os.getenv("OPENROUTER_API_KEY"):
print("Error: OPENROUTER_API_KEY environment variable not set")
print("Please set it in your .env file or export it:")
print(" export OPENROUTER_API_KEY='your_openrouter_api_key'")
return 1
try:
research = ResearchLookup(force_model=args.force_model)
if args.model_info:
print("Available models from OpenRouter:")
models = research.get_model_info()
if "data" in models:
for model in models["data"]:
if "perplexity" in model["id"].lower():
print(f" - {model['id']}: {model.get('name', 'N/A')}")
return 0
if not args.query and not args.batch:
print("Error: No query provided. Use --model-info to see available models.")
return 1
if args.batch:
print(f"Running batch research for {len(args.batch)} queries...")
results = research.batch_lookup(args.batch)
else:
print(f"Researching: {args.query}")
results = [research.lookup(args.query)]
# Display results
for i, result in enumerate(results):
if result["success"]:
print(f"\n{'='*80}")
print(f"Query {i+1}: {result['query']}")
print(f"Timestamp: {result['timestamp']}")
print(f"Model: {result['model']}")
print(f"{'='*80}")
print(result["response"])
# Display API-provided sources first (most reliable)
sources = result.get("sources", [])
if sources:
print(f"\nπ Sources ({len(sources)}):")
for j, source in enumerate(sources):
title = source.get("title", "Untitled")
url = source.get("url", "")
date = source.get("date", "")
date_str = f" ({date})" if date else ""
print(f" [{j+1}] {title}{date_str}")
if url:
print(f" {url}")
# Display additional text-extracted citations
citations = result.get("citations", [])
text_citations = [c for c in citations if c.get("type") in ("doi", "url")]
if text_citations:
print(f"\nπ Additional References ({len(text_citations)}):")
for j, citation in enumerate(text_citations):
if citation.get("type") == "doi":
print(f" [{j+1}] DOI: {citation.get('doi', '')} - {citation.get('url', '')}")
elif citation.get("type") == "url":
print(f" [{j+1}] {citation.get('url', '')}")
if result.get("usage"):
print(f"\nUsage: {result['usage']}")
else:
print(f"\nError in query {i+1}: {result['error']}")
return 0
except Exception as e:
print(f"Error: {str(e)}")
return 1
if __name__ == "__main__":
exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/research-lookup/scripts/research_lookup.py",
"license": "MIT License",
"lines": 339,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scanpy/assets/analysis_template.py | #!/usr/bin/env python3
"""
Complete Single-Cell Analysis Template
This template provides a complete workflow for single-cell RNA-seq analysis
using scanpy, from data loading through clustering and cell type annotation.
Customize the parameters and sections as needed for your specific dataset.
"""
import scanpy as sc
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# ============================================================================
# CONFIGURATION
# ============================================================================
# File paths
INPUT_FILE = 'data/raw_counts.h5ad' # Change to your input file
OUTPUT_DIR = 'results/'
FIGURES_DIR = 'figures/'
# QC parameters
MIN_GENES = 200 # Minimum genes per cell
MIN_CELLS = 3 # Minimum cells per gene
MT_THRESHOLD = 5 # Maximum mitochondrial percentage
# Analysis parameters
N_TOP_GENES = 2000 # Number of highly variable genes
N_PCS = 40 # Number of principal components
N_NEIGHBORS = 10 # Number of neighbors for graph
LEIDEN_RESOLUTION = 0.5 # Clustering resolution
# Scanpy settings
sc.settings.verbosity = 3
sc.settings.set_figure_params(dpi=80, facecolor='white')
sc.settings.figdir = FIGURES_DIR
# ============================================================================
# 1. LOAD DATA
# ============================================================================
print("=" * 80)
print("LOADING DATA")
print("=" * 80)
# Load data (adjust based on your file format)
adata = sc.read_h5ad(INPUT_FILE)
# adata = sc.read_10x_mtx('data/filtered_gene_bc_matrices/') # For 10X data
# adata = sc.read_csv('data/counts.csv') # For CSV data
print(f"Loaded: {adata.n_obs} cells x {adata.n_vars} genes")
# ============================================================================
# 2. QUALITY CONTROL
# ============================================================================
print("\n" + "=" * 80)
print("QUALITY CONTROL")
print("=" * 80)
# Identify mitochondrial genes
adata.var['mt'] = adata.var_names.str.startswith('MT-')
# Calculate QC metrics
sc.pp.calculate_qc_metrics(adata, qc_vars=['mt'], percent_top=None,
log1p=False, inplace=True)
# Visualize QC metrics before filtering
sc.pl.violin(adata, ['n_genes_by_counts', 'total_counts', 'pct_counts_mt'],
jitter=0.4, multi_panel=True, save='_qc_before_filtering')
sc.pl.scatter(adata, x='total_counts', y='pct_counts_mt', save='_qc_mt')
sc.pl.scatter(adata, x='total_counts', y='n_genes_by_counts', save='_qc_genes')
# Filter cells and genes
print(f"\nBefore filtering: {adata.n_obs} cells, {adata.n_vars} genes")
sc.pp.filter_cells(adata, min_genes=MIN_GENES)
sc.pp.filter_genes(adata, min_cells=MIN_CELLS)
adata = adata[adata.obs.pct_counts_mt < MT_THRESHOLD, :]
print(f"After filtering: {adata.n_obs} cells, {adata.n_vars} genes")
# ============================================================================
# 3. NORMALIZATION
# ============================================================================
print("\n" + "=" * 80)
print("NORMALIZATION")
print("=" * 80)
# Normalize to 10,000 counts per cell
sc.pp.normalize_total(adata, target_sum=1e4)
# Log-transform
sc.pp.log1p(adata)
# Store normalized data
adata.raw = adata
# ============================================================================
# 4. FEATURE SELECTION
# ============================================================================
print("\n" + "=" * 80)
print("FEATURE SELECTION")
print("=" * 80)
# Identify highly variable genes
sc.pp.highly_variable_genes(adata, n_top_genes=N_TOP_GENES)
# Visualize
sc.pl.highly_variable_genes(adata, save='_hvg')
print(f"Selected {sum(adata.var.highly_variable)} highly variable genes")
# Subset to highly variable genes
adata = adata[:, adata.var.highly_variable]
# ============================================================================
# 5. SCALING AND REGRESSION
# ============================================================================
print("\n" + "=" * 80)
print("SCALING AND REGRESSION")
print("=" * 80)
# Regress out unwanted sources of variation
sc.pp.regress_out(adata, ['total_counts', 'pct_counts_mt'])
# Scale data
sc.pp.scale(adata, max_value=10)
# ============================================================================
# 6. DIMENSIONALITY REDUCTION
# ============================================================================
print("\n" + "=" * 80)
print("DIMENSIONALITY REDUCTION")
print("=" * 80)
# PCA
sc.tl.pca(adata, svd_solver='arpack')
sc.pl.pca_variance_ratio(adata, log=True, save='_pca_variance')
# Compute neighborhood graph
sc.pp.neighbors(adata, n_neighbors=N_NEIGHBORS, n_pcs=N_PCS)
# UMAP
sc.tl.umap(adata)
# ============================================================================
# 7. CLUSTERING
# ============================================================================
print("\n" + "=" * 80)
print("CLUSTERING")
print("=" * 80)
# Leiden clustering
sc.tl.leiden(adata, resolution=LEIDEN_RESOLUTION)
# Visualize
sc.pl.umap(adata, color='leiden', legend_loc='on data', save='_leiden')
print(f"Identified {len(adata.obs['leiden'].unique())} clusters")
# ============================================================================
# 8. MARKER GENE IDENTIFICATION
# ============================================================================
print("\n" + "=" * 80)
print("MARKER GENE IDENTIFICATION")
print("=" * 80)
# Find marker genes
sc.tl.rank_genes_groups(adata, 'leiden', method='wilcoxon')
# Visualize top markers
sc.pl.rank_genes_groups(adata, n_genes=25, sharey=False, save='_markers')
sc.pl.rank_genes_groups_heatmap(adata, n_genes=10, save='_markers_heatmap')
sc.pl.rank_genes_groups_dotplot(adata, n_genes=5, save='_markers_dotplot')
# Get top markers for each cluster
for cluster in adata.obs['leiden'].unique():
print(f"\nCluster {cluster} top markers:")
markers = sc.get.rank_genes_groups_df(adata, group=cluster).head(10)
print(markers[['names', 'scores', 'pvals_adj']].to_string(index=False))
# ============================================================================
# 9. CELL TYPE ANNOTATION (CUSTOMIZE THIS SECTION)
# ============================================================================
print("\n" + "=" * 80)
print("CELL TYPE ANNOTATION")
print("=" * 80)
# Example marker genes for common cell types (customize for your data)
marker_genes = {
'T cells': ['CD3D', 'CD3E', 'CD3G'],
'B cells': ['MS4A1', 'CD79A', 'CD79B'],
'Monocytes': ['CD14', 'LYZ', 'S100A8'],
'NK cells': ['NKG7', 'GNLY', 'KLRD1'],
'Dendritic cells': ['FCER1A', 'CST3'],
}
# Visualize marker genes
for cell_type, genes in marker_genes.items():
available_genes = [g for g in genes if g in adata.raw.var_names]
if available_genes:
sc.pl.umap(adata, color=available_genes, use_raw=True,
save=f'_{cell_type.replace(" ", "_")}')
# Manual annotation based on marker expression (customize this mapping)
cluster_to_celltype = {
'0': 'CD4 T cells',
'1': 'CD14+ Monocytes',
'2': 'B cells',
'3': 'CD8 T cells',
'4': 'NK cells',
# Add more mappings based on your marker analysis
}
# Apply annotations
adata.obs['cell_type'] = adata.obs['leiden'].map(cluster_to_celltype)
adata.obs['cell_type'] = adata.obs['cell_type'].fillna('Unknown')
# Visualize annotated cell types
sc.pl.umap(adata, color='cell_type', legend_loc='on data', save='_celltypes')
# ============================================================================
# 10. ADDITIONAL ANALYSES (OPTIONAL)
# ============================================================================
print("\n" + "=" * 80)
print("ADDITIONAL ANALYSES")
print("=" * 80)
# PAGA trajectory analysis (optional)
sc.tl.paga(adata, groups='leiden')
sc.pl.paga(adata, color='leiden', save='_paga')
# Gene set scoring (optional)
# example_gene_set = ['CD3D', 'CD3E', 'CD3G']
# sc.tl.score_genes(adata, example_gene_set, score_name='T_cell_score')
# sc.pl.umap(adata, color='T_cell_score', save='_gene_set_score')
# ============================================================================
# 11. SAVE RESULTS
# ============================================================================
print("\n" + "=" * 80)
print("SAVING RESULTS")
print("=" * 80)
import os
os.makedirs(OUTPUT_DIR, exist_ok=True)
# Save processed AnnData object
adata.write(f'{OUTPUT_DIR}/processed_data.h5ad')
print(f"Saved processed data to {OUTPUT_DIR}/processed_data.h5ad")
# Export metadata
adata.obs.to_csv(f'{OUTPUT_DIR}/cell_metadata.csv')
adata.var.to_csv(f'{OUTPUT_DIR}/gene_metadata.csv')
print(f"Saved metadata to {OUTPUT_DIR}/")
# Export marker genes
for cluster in adata.obs['leiden'].unique():
markers = sc.get.rank_genes_groups_df(adata, group=cluster)
markers.to_csv(f'{OUTPUT_DIR}/markers_cluster_{cluster}.csv', index=False)
print(f"Saved marker genes to {OUTPUT_DIR}/")
# ============================================================================
# 12. SUMMARY
# ============================================================================
print("\n" + "=" * 80)
print("ANALYSIS SUMMARY")
print("=" * 80)
print(f"\nFinal dataset:")
print(f" Cells: {adata.n_obs}")
print(f" Genes: {adata.n_vars}")
print(f" Clusters: {len(adata.obs['leiden'].unique())}")
print(f"\nCell type distribution:")
print(adata.obs['cell_type'].value_counts())
print("\n" + "=" * 80)
print("ANALYSIS COMPLETE")
print("=" * 80)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scanpy/assets/analysis_template.py",
"license": "MIT License",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scanpy/scripts/qc_analysis.py | #!/usr/bin/env python3
"""
Quality Control Analysis Script for Scanpy
Performs comprehensive quality control on single-cell RNA-seq data,
including calculating metrics, generating QC plots, and filtering cells.
Usage:
python qc_analysis.py <input_file> [--output <output_file>]
"""
import argparse
import scanpy as sc
import matplotlib.pyplot as plt
def calculate_qc_metrics(adata, mt_threshold=5, min_genes=200, min_cells=3):
"""
Calculate QC metrics and filter cells/genes.
Parameters:
-----------
adata : AnnData
Annotated data matrix
mt_threshold : float
Maximum percentage of mitochondrial genes (default: 5)
min_genes : int
Minimum number of genes per cell (default: 200)
min_cells : int
Minimum number of cells per gene (default: 3)
Returns:
--------
AnnData
Filtered annotated data matrix
"""
# Identify mitochondrial genes (assumes gene names follow standard conventions)
adata.var['mt'] = adata.var_names.str.startswith(('MT-', 'mt-', 'Mt-'))
# Calculate QC metrics
sc.pp.calculate_qc_metrics(adata, qc_vars=['mt'], percent_top=None,
log1p=False, inplace=True)
print("\n=== QC Metrics Summary ===")
print(f"Total cells: {adata.n_obs}")
print(f"Total genes: {adata.n_vars}")
print(f"Mean genes per cell: {adata.obs['n_genes_by_counts'].mean():.2f}")
print(f"Mean counts per cell: {adata.obs['total_counts'].mean():.2f}")
print(f"Mean mitochondrial %: {adata.obs['pct_counts_mt'].mean():.2f}")
return adata
def generate_qc_plots(adata, output_prefix='qc'):
"""
Generate comprehensive QC plots.
Parameters:
-----------
adata : AnnData
Annotated data matrix
output_prefix : str
Prefix for saved figure files
"""
# Create figure directory if it doesn't exist
import os
os.makedirs('figures', exist_ok=True)
# Violin plots for QC metrics
sc.pl.violin(adata, ['n_genes_by_counts', 'total_counts', 'pct_counts_mt'],
jitter=0.4, multi_panel=True, save=f'_{output_prefix}_violin.pdf')
# Scatter plots
sc.pl.scatter(adata, x='total_counts', y='pct_counts_mt',
save=f'_{output_prefix}_mt_scatter.pdf')
sc.pl.scatter(adata, x='total_counts', y='n_genes_by_counts',
save=f'_{output_prefix}_genes_scatter.pdf')
# Highest expressing genes
sc.pl.highest_expr_genes(adata, n_top=20,
save=f'_{output_prefix}_highest_expr.pdf')
print(f"\nQC plots saved to figures/ directory with prefix '{output_prefix}'")
def filter_data(adata, mt_threshold=5, min_genes=200, max_genes=None,
min_counts=None, max_counts=None, min_cells=3):
"""
Filter cells and genes based on QC thresholds.
Parameters:
-----------
adata : AnnData
Annotated data matrix
mt_threshold : float
Maximum percentage of mitochondrial genes
min_genes : int
Minimum number of genes per cell
max_genes : int, optional
Maximum number of genes per cell
min_counts : int, optional
Minimum number of counts per cell
max_counts : int, optional
Maximum number of counts per cell
min_cells : int
Minimum number of cells per gene
Returns:
--------
AnnData
Filtered annotated data matrix
"""
n_cells_before = adata.n_obs
n_genes_before = adata.n_vars
# Filter cells
sc.pp.filter_cells(adata, min_genes=min_genes)
if max_genes:
adata = adata[adata.obs['n_genes_by_counts'] < max_genes, :]
if min_counts:
adata = adata[adata.obs['total_counts'] >= min_counts, :]
if max_counts:
adata = adata[adata.obs['total_counts'] < max_counts, :]
# Filter by mitochondrial percentage
adata = adata[adata.obs['pct_counts_mt'] < mt_threshold, :]
# Filter genes
sc.pp.filter_genes(adata, min_cells=min_cells)
print(f"\n=== Filtering Results ===")
print(f"Cells: {n_cells_before} -> {adata.n_obs} ({adata.n_obs/n_cells_before*100:.1f}% retained)")
print(f"Genes: {n_genes_before} -> {adata.n_vars} ({adata.n_vars/n_genes_before*100:.1f}% retained)")
return adata
def main():
parser = argparse.ArgumentParser(description='QC analysis for single-cell data')
parser.add_argument('input', help='Input file (h5ad, 10X mtx, csv, etc.)')
parser.add_argument('--output', default='qc_filtered.h5ad',
help='Output file name (default: qc_filtered.h5ad)')
parser.add_argument('--mt-threshold', type=float, default=5,
help='Max mitochondrial percentage (default: 5)')
parser.add_argument('--min-genes', type=int, default=200,
help='Min genes per cell (default: 200)')
parser.add_argument('--min-cells', type=int, default=3,
help='Min cells per gene (default: 3)')
parser.add_argument('--skip-plots', action='store_true',
help='Skip generating QC plots')
args = parser.parse_args()
# Configure scanpy
sc.settings.verbosity = 2
sc.settings.set_figure_params(dpi=300, facecolor='white')
sc.settings.figdir = './figures/'
print(f"Loading data from: {args.input}")
# Load data based on file extension
if args.input.endswith('.h5ad'):
adata = sc.read_h5ad(args.input)
elif args.input.endswith('.h5'):
adata = sc.read_10x_h5(args.input)
elif args.input.endswith('.csv'):
adata = sc.read_csv(args.input)
else:
# Try reading as 10X mtx directory
adata = sc.read_10x_mtx(args.input)
print(f"Loaded data: {adata.n_obs} cells x {adata.n_vars} genes")
# Calculate QC metrics
adata = calculate_qc_metrics(adata, mt_threshold=args.mt_threshold,
min_genes=args.min_genes, min_cells=args.min_cells)
# Generate QC plots (before filtering)
if not args.skip_plots:
print("\nGenerating QC plots (before filtering)...")
generate_qc_plots(adata, output_prefix='qc_before')
# Filter data
adata = filter_data(adata, mt_threshold=args.mt_threshold,
min_genes=args.min_genes, min_cells=args.min_cells)
# Generate QC plots (after filtering)
if not args.skip_plots:
print("\nGenerating QC plots (after filtering)...")
generate_qc_plots(adata, output_prefix='qc_after')
# Save filtered data
print(f"\nSaving filtered data to: {args.output}")
adata.write_h5ad(args.output)
print("\n=== QC Analysis Complete ===")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scanpy/scripts/qc_analysis.py",
"license": "MIT License",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.